作者热门文章
- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我有一个自定义神经网络,我正在对数据进行训练,并试图将网络的输出值限制在两个任意常数之间:[lower_bound,upper_bound]
.是否有在损失函数中编码此约束的最佳实践?
下面我写了一个最小的工作示例,我在生成的数据上构建和训练神经网络。此外,我设置了输出应该在 [lower_bound,upper_bound] = [-0.5,0.75]
之间的任意约束。在被优化的损失函数中。但是我尝试使用一种相对粗略的方法来查找预测值超出边界的所有实例,然后简单地使这些项的损失函数成为一个大值(如果预测值在给定的范围内,则为零):
lower_bound = -0.5 #a guessed a priori lower bound on the output
upper_bound = 0.75 #a guessed a priori upper bound on the output
cond_v1_1 = tf.greater(self.v1_pred[:,0], upper_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
cond_v1_2 = tf.greater(-1.0*self.v1_pred[:,0], lower_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_1, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), 0.0*tf.zeros(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_2, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), self.red_v1)
self.loss_cond = tf.reduce_sum(1.0*tf.square(self.red_v1))
但是在训练神经网络时,是否有任何方法或损失函数可以更好地编码这种约束?也许优化器更容易处理和/或修改我的代码本身的更平滑的损失函数?任何关于惩罚/训练下面代码中神经网络的最佳实践的评论和进一步的想法都将不胜感激。
import numpy as np
import tensorflow as tf
end_it = 1000 #number of iterations
frac_train = 1.0 #randomly sampled fraction of data to create training set
frac_sample_train = 0.01 #randomly sampled fraction of data from training set to train in batches
layers = [2, 20, 20, 20, 1]
#Generate training data
len_data = 10000
x_x = np.array([np.linspace(0.,1.,len_data)])
x_y = np.array([np.linspace(0.,1.,len_data)])
y_true = np.array([np.linspace(-0.2,0.2,len_data)])
N_train = int(frac_train*len_data)
idx = np.random.choice(len_data, N_train, replace=False)
x_train = x_x.T[idx,:]
y_train = x_y.T[idx,:]
v1_train = y_true.T[idx,:]
sample_batch_size = int(frac_sample_train*N_train)
np.random.seed(1234)
tf.set_random_seed(1234)
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
tf.logging.set_verbosity(tf.logging.ERROR)
class NeuralNet:
def __init__(self, x, y, v1, layers):
X = np.concatenate([x, y], 1)
self.lb = X.min(0)
self.ub = X.max(0)
self.X = X
self.x = X[:,0:1]
self.y = X[:,1:2]
self.v1 = v1
self.layers = layers
self.weights_v1, self.biases_v1 = self.initialize_NN(layers)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=False,
log_device_placement=False))
self.x_tf = tf.placeholder(tf.float32, shape=[None, self.x.shape[1]])
self.y_tf = tf.placeholder(tf.float32, shape=[None, self.y.shape[1]])
self.v1_tf = tf.placeholder(tf.float32, shape=[None, self.v1.shape[1]])
self.v1_pred = self.net(self.x_tf, self.y_tf)
lower_bound = -0.5 #a guessed a priori lower bound on the output
upper_bound = 0.75 #a guessed a priori upper bound on the output
cond_v1_1 = tf.greater(self.v1_pred[:,0], upper_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
cond_v1_2 = tf.greater(-1.0*self.v1_pred[:,0], lower_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_1, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), 0.0*tf.zeros(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_2, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), self.red_v1)
self.loss_cond = tf.reduce_sum(1.0*tf.square(self.red_v1))
self.loss_data = tf.reduce_mean(tf.square(self.v1_tf - self.v1_pred))
self.loss = self.loss_cond + self.loss_data
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
var_list=self.weights_v1+self.biases_v1,
method = 'L-BFGS-B',
options = {'maxiter': 50,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol' : 1.0 * np.finfo(float).eps})
self.optimizer_Adam = tf.train.AdamOptimizer()
self.train_op_Adam_v1 = self.optimizer_Adam.minimize(self.loss, var_list=self.weights_v1+self.biases_v1)
init = tf.global_variables_initializer()
self.sess.run(init)
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def net(self, x, y):
v1_out = self.neural_net(tf.concat([x,y], 1), self.weights_v1, self.biases_v1)
v1 = v1_out[:,0:1]
return v1
def callback(self, loss):
global Nfeval
print(str(Nfeval)+' - Loss in loop: %.3e' % (loss))
Nfeval += 1
def fetch_minibatch(self, x_in, y_in, v1_in, N_train_sample):
idx_batch = np.random.choice(len(x_in), N_train_sample, replace=False)
x_batch = x_in[idx_batch,:]
y_batch = y_in[idx_batch,:]
v1_batch = v1_in[idx_batch,:]
return x_batch, y_batch, v1_batch
def train(self, end_it):
it = 0
while it < end_it:
x_res_batch, y_res_batch, v1_res_batch = self.fetch_minibatch(self.x, self.y, self.v1, sample_batch_size) # Fetch residual mini-batch
tf_dict = {self.x_tf: x_res_batch, self.y_tf: y_res_batch,
self.v1_tf: v1_res_batch}
self.sess.run(self.train_op_Adam_v1, tf_dict)
self.optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.loss],
loss_callback = self.callback)
it = it + 1
def predict(self, x_star, y_star):
tf_dict = {self.x_tf: x_star, self.y_tf: y_star}
v1_star = self.sess.run(self.v1_pred, tf_dict)
return v1_star
model = NeuralNet(x_train, y_train, v1_train, layers)
Nfeval = 1
model.train(end_it)
最佳答案
做这种事情的最好方法(恕我直言)是通过输出激活函数来强制执行。我们可以使用 tf.nn.sigmoid
作为基础,它在 [0, 1] 之间,并稍微移动和缩放它。
def bounded_output(x, lower, upper):
scale = upper - lower
return scale * tf.nn.sigmoid(x) + lower
在你的情况下,用
lower=-0.5
调用它和
upper=0.75
.这将移动 sigmoid,使最低输出为 -0.5,范围为
0.75 + 0.5 = 1.25
,这将上限设为 0.75。在网络的最后一层添加它作为输出激活意味着输出不能超出范围。
关于tensorflow - 将神经网络的输出限制在任意范围内,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/62562463/
我是一名优秀的程序员,十分优秀!