gpt4 book ai didi

python - 来自保存模型的 TF 估计器 : Can't Load *. pb

转载 作者:行者123 更新时间:2023-12-01 09:04:14 25 4
gpt4 key购买 nike

我使用 TF Estimator 创建一个简单模型,并使用 export_savedmodel 函数保存模型。我使用一个简单的 Iris 数据集,它有 4 个特征。

num_epoch = 50
num_train = 120
num_test = 30

# 1 Define input function
def input_function(x, y, is_train):
dict_x = {
"thisisinput" : x,
}

dataset = tf.data.Dataset.from_tensor_slices((
dict_x, y
))

if is_train:
dataset = dataset.shuffle(num_train).repeat(num_epoch).batch(num_train)
else:
dataset = dataset.batch(num_test)

return dataset

def my_serving_input_fn():
input_data = {
"thisisinput" : tf.placeholder(tf.float32, [None, 4], name='inputtensors')
}
return tf.estimator.export.ServingInputReceiver(input_data, input_data)

def main(argv):
tf.set_random_seed(1103) # avoiding different result of random

# 2 Define feature columns
feature_columns = [
tf.feature_column.numeric_column(key="thisisinput",shape=4),
]

# 3 Define an estimator
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10],
n_classes=3,
optimizer=tf.train.GradientDescentOptimizer(0.001),
activation_fn=tf.nn.relu,
model_dir = 'modeliris2/'
)

# Train the model
classifier.train(
input_fn=lambda:input_function(xtrain, ytrain, True)
)

# Evaluate the model
eval_result = classifier.evaluate(
input_fn=lambda:input_function(xtest, ytest, False)
)

print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
print('\nSaving models...')
classifier.export_savedmodel("modeliris2pb", my_serving_input_fn)

if __name__ == "__main__":
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.app.run(main)

运行程序后,它会生成一个包含saved_model.pb的文件夹。我看到很多教程建议使用 contrib.predictor 加载 saved_model.pb 但我不能。我使用 contrib.predictor 函数来加载模型:

def main(a):
with tf.Session() as sess:
PB_PATH= "modeliris2pb/1536219836/"
predict_fn = predictor.from_saved_model(PB_PATH)

if __name__=="__main__":
main()

但它会产生错误:

ValueError: Got signature_def_key "serving_default". Available signatures are ['predict']. Original error: No SignatureDef with key 'serving_default' found in MetaGraphDef.

还有其他更好的方法来加载 *.pb 文件吗?为什么会出现这个错误?我怀疑这是因为 my_serving_input_fn() 函数,但我不知道为什么

最佳答案

我遇到了同样的问题,我尝试在网络上搜索,但没有对此的解释,所以我尝试了不同的方法:

节省:

首先,您需要以字典格式定义特征长度,如下所示:

feature_spec = {'x': tf.FixedLenFeature([4],tf.float32)}

然后您必须构建一个具有相同形状特征的占位符的函数,并使用 tf.estimator.export.ServingInputReceiver 返回

def serving_input_receiver_fn():
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=[None],
name='input_tensors')
receiver_tensors = {'inputs': serialized_tf_example}

features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)

然后只需使用export_savedmodel保存:

classifier.export_savedmodel(dir_path, serving_input_receiver_fn)

完整示例代码:

import os
from six.moves.urllib.request import urlopen

import numpy as np
import tensorflow as tf


dir_path = os.path.dirname('.')

IRIS_TRAINING = os.path.join(dir_path, "iris_training.csv")
IRIS_TEST = os.path.join(dir_path, "iris_test.csv")

feature_spec = {'x': tf.FixedLenFeature([4],tf.float32)}

def serving_input_receiver_fn():
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=[None],
name='input_tensors')
receiver_tensors = {'inputs': serialized_tf_example}

features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)




def main():
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)

feature_columns = [tf.feature_column.numeric_column("x", shape=[4])]


classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=dir_path)
# Define the training inputs
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(training_set.data)},
y=np.array(training_set.target),
num_epochs=None,
shuffle=True)

# Train model.
classifier.train(input_fn=train_input_fn, steps=200)


classifier.export_savedmodel(dir_path, serving_input_receiver_fn)


if __name__ == "__main__":
main()

正在恢复

现在让我们恢复模型:

import tensorflow as tf 
import os

dir_path = os.path.dirname('.') #current directory
exported_path= os.path.join(dir_path, "1536315752")

def main():
with tf.Session() as sess:

tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], exported_path)

model_input= tf.train.Example(features=tf.train.Features(feature={
'x': tf.train.Feature(float_list=tf.train.FloatList(value=[6.4, 3.2, 4.5, 1.5]))
}))

predictor= tf.contrib.predictor.from_saved_model(exported_path)

input_tensor=tf.get_default_graph().get_tensor_by_name("input_tensors:0")

model_input=model_input.SerializeToString()

output_dict= predictor({"inputs":[model_input]})

print(" prediction is " , output_dict['scores'])


if __name__ == "__main__":
main()

这里是Ipython notebook demo带有数据和解释的示例:

关于python - 来自保存模型的 TF 估计器 : Can't Load *. pb,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52200016/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com