gpt4 book ai didi

python - 如何在tensorflow中实现提前停止

转载 作者:太空狗 更新时间:2023-10-29 17:04:51 26 4
gpt4 key购买 nike

def train():
# Model
model = Model()

# Loss, Optimizer
global_step = tf.Variable(1, dtype=tf.int32, trainable=False, name='global_step')
loss_fn = model.loss()
optimizer = tf.train.AdamOptimizer(learning_rate=TrainConfig.LR).minimize(loss_fn, global_step=global_step)

# Summaries
summary_op = summaries(model, loss_fn)

with tf.Session(config=TrainConfig.session_conf) as sess:

# Initialized, Load state
sess.run(tf.global_variables_initializer())
model.load_state(sess, TrainConfig.CKPT_PATH)

writer = tf.summary.FileWriter(TrainConfig.GRAPH_PATH, sess.graph)

# Input source
data = Data(TrainConfig.DATA_PATH)

loss = Diff()
for step in xrange(global_step.eval(), TrainConfig.FINAL_STEP):

mixed_wav, src1_wav, src2_wav, _ = data.next_wavs(TrainConfig.SECONDS, TrainConfig.NUM_WAVFILE, step)

mixed_spec = to_spectrogram(mixed_wav)
mixed_mag = get_magnitude(mixed_spec)

src1_spec, src2_spec = to_spectrogram(src1_wav), to_spectrogram(src2_wav)
src1_mag, src2_mag = get_magnitude(src1_spec), get_magnitude(src2_spec)

src1_batch, _ = model.spec_to_batch(src1_mag)
src2_batch, _ = model.spec_to_batch(src2_mag)
mixed_batch, _ = model.spec_to_batch(mixed_mag)

# Initializae our callback.
#early_stopping_cb = EarlyStoppingCallback(val_acc_thresh=0.5)


l, _, summary = sess.run([loss_fn, optimizer, summary_op],
feed_dict={model.x_mixed: mixed_batch, model.y_src1: src1_batch,
model.y_src2: src2_batch})

loss.update(l)
print('step-{}\td_loss={:2.2f}\tloss={}'.format(step, loss.diff * 100, loss.value))

writer.add_summary(summary, global_step=step)

# Save state
if step % TrainConfig.CKPT_STEP == 0:
tf.train.Saver().save(sess, TrainConfig.CKPT_PATH + '/checkpoint', global_step=step)

writer.close()

我有这个神经网络代码,可以将 .wav 文件中的音乐与语音分开。如何引入提前停止算法来停止列车部分?我看到一些谈论 ValidationMonitor 的项目。有人可以帮助我吗?

最佳答案

这是我对提前停止的实现,你可以调整它:

提前停止可以在训练过程的某些阶段应用,例如在每个 epoch 的末尾。具体来说;就我而言;我在每个时期监测测试(验证)损失,在 20 时期(self.require_improvement= 20)之后测试损失没有改善,训练被中断。

您可以将最大纪元设置为 10000 或 20000 或任何您想要的 (self.max_epochs = 10000)。

  self.require_improvement= 20
self.max_epochs = 10000

这是我使用提前停止的训练函数:

定义火车(自己):

# training data
train_input = self.Normalize(self.x_train)
train_output = self.y_train.copy()
#===============
save_sess=self.sess # this used to compare the result of previous sess with actual one
# ===============
#costs history :
costs = []
costs_inter=[]
# =================
#for early stopping :
best_cost=1000000
stop = False
last_improvement=0
# ================
n_samples = train_input.shape[0] # size of the training set
# ===============
#train the mini_batches model using the early stopping criteria
epoch = 0
while epoch < self.max_epochs and stop == False:
#train the model on the traning set by mini batches
#suffle then split the training set to mini-batches of size self.batch_size
seq =list(range(n_samples))
random.shuffle(seq)
mini_batches = [
seq[k:k+self.batch_size]
for k in range(0,n_samples, self.batch_size)
]

avg_cost = 0. # The average cost of mini_batches
step= 0

for sample in mini_batches:

batch_x = x_train.iloc[sample, :]
batch_y =train_output.iloc[sample, :]
batch_y = np.array(batch_y).flatten()

feed_dict={self.X: batch_x,self.Y:batch_y, self.is_train:True}

_, cost,acc=self.sess.run([self.train_step, self.loss_, self.accuracy_],feed_dict=feed_dict)
avg_cost += cost *len(sample)/n_samples
print('epoch[{}] step [{}] train -- loss : {}, accuracy : {}'.format(epoch,step, avg_cost, acc))
step += 100

#cost history since the last best cost
costs_inter.append(avg_cost)

#early stopping based on the validation set/ max_steps_without_decrease of the loss value : require_improvement
if avg_cost < best_cost:
save_sess= self.sess # save session
best_cost = avg_cost
costs +=costs_inter # costs history of the validatio set
last_improvement = 0
costs_inter= []
else:
last_improvement +=1
if last_improvement > self.require_improvement:
print("No improvement found during the ( self.require_improvement) last iterations, stopping optimization.")
# Break out from the loop.
stop = True
self.sess=save_sess # restore session with the best cost

## Run validation after every epoch :
print('---------------------------------------------------------')
self.y_validation = np.array(self.y_validation).flatten()
loss_valid, acc_valid = self.sess.run([self.loss_,self.accuracy_],
feed_dict={self.X: self.x_validation, self.Y: self.y_validation,self.is_train: True})
print("Epoch: {0}, validation loss: {1:.2f}, validation accuracy: {2:.01%}".format(epoch + 1, loss_valid, acc_valid))
print('---------------------------------------------------------')

epoch +=1

我们可以在这里恢复重要的代码:

def train(self):
...
#costs history :
costs = []
costs_inter=[]
#for early stopping :
best_cost=1000000
stop = False
last_improvement=0
#train the mini_batches model using the early stopping criteria
epoch = 0
while epoch < self.max_epochs and stop == False:
...
for sample in mini_batches:
...
#cost history since the last best cost
costs_inter.append(avg_cost)

#early stopping based on the validation set/ max_steps_without_decrease of the loss value : require_improvement
if avg_cost < best_cost:
save_sess= self.sess # save session
best_cost = avg_cost
costs +=costs_inter # costs history of the validatio set
last_improvement = 0
costs_inter= []
else:
last_improvement +=1
if last_improvement > self.require_improvement:
print("No improvement found during the ( self.require_improvement) last iterations, stopping optimization.")
# Break out from the loop.
stop = True
self.sess=save_sess # restore session with the best cost
...
epoch +=1

希望它能对某人有所帮助:)。

关于python - 如何在tensorflow中实现提前停止,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/46428604/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com