gpt4 book ai didi

python - 使用 keras 模型中的 tensorflow 图进行预测

转载 作者:IT老高 更新时间:2023-10-28 22:22:49 29 4
gpt4 key购买 nike

我有一个使用 Keras 和 Tensorflow 作为后端训练的模型,但现在我需要将我的模型转换为某个应用程序的 tensorflow 图。我试图这样做并做出预测以确保它正常工作,但是当与从 model.predict() 收集的结果进行比较时,我得到了非常不同的值。例如:

from keras.models import load_model
import tensorflow as tf

model = load_model('model_file.h5')

x_placeholder = tf.placeholder(tf.float32, shape=(None,7214,1))
y = model(x_placeholder)

x = np.ones((1,7214,1))


with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("Predictions from:\ntf graph: "+str(sess.run(y, feed_dict={x_placeholder:x})))
print("keras predict: "+str(model.predict(x)))

返回:

Predictions from:
tf graph: [[-0.1015993 0.07432419 0.0592984 ]]
keras predict: [[ 0.39339241 0.57949686 -3.67846966]]

keras predict 的值是正确的,但是 tf graph 结果不正确。

如果有助于了解最终的预期应用程序,我将使用 tf.gradients() 函数创建一个雅可比矩阵,但目前它与提供正确雅可比的 theano 的雅可比函数进行比较时不会返回正确的结果。这是我的 tensorflow 雅可比代码:

x = tf.placeholder(tf.float32, shape=(None,7214,1))
y = tf.reshape(model(x)[0],[-1])
y_list = tf.unstack(y)

jacobian_list = [tf.gradients(y_, x)[0] for y_ in y_list]
jacobian = tf.stack(jacobian_list)

编辑:型号代码

import numpy as np

from keras.models import Sequential
from keras.layers import Dense, InputLayer, Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ReduceLROnPlateau

# activation function used following every layer except for the output layers
activation = 'relu'

# model weight initializer
initializer = 'he_normal'

# shape of input data that is fed into the input layer
input_shape = (None,7214,1)

# number of filters used in the convolutional layers
num_filters = [4,16]

# length of the filters in the convolutional layers
filter_length = 8

# length of the maxpooling window
pool_length = 4

# number of nodes in each of the hidden fully connected layers
num_hidden_nodes = [256,128]

# number of samples fed into model at once during training
batch_size = 64

# maximum number of interations for model training
max_epochs = 30

# initial learning rate for optimization algorithm
lr = 0.0007

# exponential decay rate for the 1st moment estimates for optimization algorithm
beta_1 = 0.9

# exponential decay rate for the 2nd moment estimates for optimization algorithm
beta_2 = 0.999

# a small constant for numerical stability for optimization algorithm
optimizer_epsilon = 1e-08

model = Sequential([

InputLayer(batch_input_shape=input_shape),

Conv1D(kernel_initializer=initializer, activation=activation, padding="same", filters=num_filters[0], kernel_size=filter_length),

Conv1D(kernel_initializer=initializer, activation=activation, padding="same", filters=num_filters[1], kernel_size=filter_length),

MaxPooling1D(pool_size=pool_length),

Flatten(),

Dense(units=num_hidden_nodes[0], kernel_initializer=initializer, activation=activation),

Dense(units=num_hidden_nodes[1], kernel_initializer=initializer, activation=activation),

Dense(units=3, activation="linear", input_dim=num_hidden_nodes[1]),
])

# compile model
loss_function = mean squared error
early_stopping_min_delta = 0.0001
early_stopping_patience = 4
reduce_lr_factor = 0.5
reuce_lr_epsilon = 0.0009
reduce_lr_patience = 2
reduce_lr_min = 0.00008

optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=optimizer_epsilon, decay=0.0)

early_stopping = EarlyStopping(monitor='val_loss', min_delta=early_stopping_min_delta,
patience=early_stopping_patience, verbose=2, mode='min')

reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, epsilon=reuce_lr_epsilon,
patience=reduce_lr_patience, min_lr=reduce_lr_min, mode='min', verbose=2)

model.compile(optimizer=optimizer, loss=loss_function)

model.fit(train_x, train_y, validation_data=(cv_x, cv_y),
epochs=max_epochs, batch_size=batch_size, verbose=2,
callbacks=[reduce_lr,early_stopping])

model.save('model_file.h5')

最佳答案

@frankyjuang 把我链接到这里

https://github.com/amir-abdi/keras_to_tensorflow

并将其与来自

的代码相结合

https://github.com/metaflow-ai/blog/blob/master/tf-freeze/load.py

https://github.com/tensorflow/tensorflow/issues/675

我找到了使用 tf 图进行预测和创建雅可比函数的解决方案:

import tensorflow as tf
import numpy as np

# Create function to convert saved keras model to tensorflow graph
def convert_to_pb(weight_file,input_fld='',output_fld=''):

import os
import os.path as osp
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from keras.models import load_model
from keras import backend as K


# weight_file is a .h5 keras model file
output_node_names_of_input_network = ["pred0"]
output_node_names_of_final_network = 'output_node'

# change filename to a .pb tensorflow file
output_graph_name = weight_file[:-2]+'pb'
weight_file_path = osp.join(input_fld, weight_file)

net_model = load_model(weight_file_path)

num_output = len(output_node_names_of_input_network)
pred = [None]*num_output
pred_node_names = [None]*num_output

for i in range(num_output):
pred_node_names[i] = output_node_names_of_final_network+str(i)
pred[i] = tf.identity(net_model.output[i], name=pred_node_names[i])

sess = K.get_session()

constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph, output_fld, output_graph_name, as_text=False)
print('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))

return output_fld+output_graph_name

调用:

tf_model_path = convert_to_pb('model_file.h5','/model_dir/','/model_dir/')

创建函数以将 tf 模型加载为图形:

def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())

# Then, we can use again a convenient built-in function to import a graph_def into the
# current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
op_dict=None,
producer_op_list=None
)

input_name = graph.get_operations()[0].name+':0'
output_name = graph.get_operations()[-1].name+':0'

return graph, input_name, output_name

创建一个函数以使用 tf 图进行模型预测

def predict(model_path, input_data):
# load tf graph
tf_model,tf_input,tf_output = load_graph(model_path)

# Create tensors for model input and output
x = tf_model.get_tensor_by_name(tf_input)
y = tf_model.get_tensor_by_name(tf_output)

# Number of model outputs
num_outputs = y.shape.as_list()[0]
predictions = np.zeros((input_data.shape[0],num_outputs))
for i in range(input_data.shape[0]):
with tf.Session(graph=tf_model) as sess:
y_out = sess.run(y, feed_dict={x: input_data[i:i+1]})
predictions[i] = y_out

return predictions

做出预测:

tf_predictions = predict(tf_model_path,test_data)

雅可比函数:

def compute_jacobian(model_path,input_data):

tf_model,tf_input,tf_output = load_graph(model_path)

x = tf_model.get_tensor_by_name(tf_input)
y = tf_model.get_tensor_by_name(tf_output)
y_list = tf.unstack(y)
num_outputs = y.shape.as_list()[0]
jacobian = np.zeros((num_outputs,input_data.shape[0],input_data.shape[1]))
for i in range(input_data.shape[0]):
with tf.Session(graph=tf_model) as sess:
y_out = sess.run([tf.gradients(y_, x)[0] for y_ in y_list], feed_dict={x: input_data[i:i+1]})
jac_temp = np.asarray(y_out)
jacobian[:,i:i+1,:]=jac_temp[:,:,:,0]
return jacobian

计算雅可比矩阵:

jacobians = compute_jacobian(tf_model_path,test_data)

关于python - 使用 keras 模型中的 tensorflow 图进行预测,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44274701/

29 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com