gpt4 book ai didi

machine-learning - 当 Seq2Seq 网络在输出中一遍又一遍地重复单词时该怎么办?

转载 作者:行者123 更新时间:2023-11-30 08:59:14 25 4
gpt4 key购买 nike

所以,我已经在一个项目上工作了一段时间,我们的数据非常少,我知道如果我们能够整合一个更大的数据集,情况会变得更好。除此之外,我现在的问题是当我有一个句子输入时,我的输出现在看起来像这样:

contactid contactid contactid contactid

一个词被重点关注并一遍又一遍地重复。我该如何克服这个障碍?

我尝试过的事情:

  1. 仔细检查我是否附加了开始/停止标记,并确保这些标记正确放置在其词汇文件的顶部,我正在共享词汇。
  2. 我发现有人说这可能是由于词嵌入不佳造成的。为此,我检查了张量板,果然 PCA 显示了非常密集的点簇。看到我捕获了 Facebook 的公共(public)预训练词向量并将它们作为嵌入加载。再次训练,这次张量板 PCA 显示了更好的图像。
  3. 将我的训练调度程序从基本切换到 SampledScheduling,以便偶尔用真实情况替换训练输出。
  4. 将我的解码器切换为使用波束搜索解码器,我认为如果单词选择在中间特征空间中靠近在一起,这可能会给出更稳健的响应。

可以肯定的是,我的困惑正在稳步减少。

这是我的数据集准备代码:

class ModelInputs(object):
"""Factory to construct various input hooks and functions depending on mode """

def __init__(
self, vocab_files, batch_size,
share_vocab=True, src_eos_id=1, tgt_eos_id=2
):
self.batch_size = batch_size
self.vocab_files = vocab_files
self.share_vocab = share_vocab
self.src_eos_id = src_eos_id
self.tgt_eos_id = tgt_eos_id

def get_inputs(self, file_path, num_infer=None, mode=tf.estimator.ModeKeys.TRAIN):
self.mode = mode
if self.mode == tf.estimator.ModeKeys.TRAIN:
return self._training_input_hook(file_path)
if self.mode == tf.estimator.ModeKeys.EVAL:
return self._validation_input_hook(file_path)
if self.mode == tf.estimator.ModeKeys.PREDICT:
if num_infer is None:
raise ValueError('If performing inference must supply number of predictions to be made.')
return self._infer_input_hook(file_path, num_infer)

def _prepare_data(self, dataset, out=False):
prep_set = dataset.map(lambda string: tf.string_split([string]).values)
prep_set = prep_set.map(lambda words: (words, tf.size(words)))
if out == True:
return prep_set.map(lambda words, size: (self.vocab_tables[1].lookup(words), size))
return prep_set.map(lambda words, size: (self.vocab_tables[0].lookup(words), size))

def _batch_data(self, dataset, src_eos_id, tgt_eos_id):
batched_set = dataset.padded_batch(
self.batch_size,
padded_shapes=((tf.TensorShape([None]), tf.TensorShape([])), (tf.TensorShape([None]), tf.TensorShape([]))),
padding_values=((src_eos_id, 0), (tgt_eos_id, 0))
)
return batched_set

def _batch_infer_data(self, dataset, src_eos_id):
batched_set = dataset.padded_batch(
self.batch_size,
padded_shapes=(tf.TensorShape([None]), tf.TensorShape([])),
padding_values=(src_eos_id, 0)
)
return batched_set

def _create_vocab_tables(self, vocab_files, share_vocab=False):
if vocab_files[1] is None and share_vocab == False:
raise ValueError('If share_vocab is set to false must provide target vocab. (src_vocab_file, \
target_vocab_file)')

src_vocab_table = lookup_ops.index_table_from_file(
vocab_files[0],
default_value=UNK_ID
)

if share_vocab:
tgt_vocab_table = src_vocab_table
else:
tgt_vocab_table = lookup_ops.index_table_from_file(
vocab_files[1],
default_value=UNK_ID
)

return src_vocab_table, tgt_vocab_table

def _prepare_iterator_hook(self, hook, scope_name, iterator, file_path, name_placeholder):
if self.mode == tf.estimator.ModeKeys.TRAIN or self.mode == tf.estimator.ModeKeys.EVAL:
feed_dict = {
name_placeholder[0]: file_path[0],
name_placeholder[1]: file_path[1]
}
else:
feed_dict = {name_placeholder: file_path}

with tf.name_scope(scope_name):
hook.iterator_initializer_func = \
lambda sess: sess.run(
iterator.initializer,
feed_dict=feed_dict,
)

def _set_up_train_or_eval(self, scope_name, file_path):
hook = IteratorInitializerHook()
def input_fn():
with tf.name_scope(scope_name):
with tf.name_scope('sentence_markers'):
src_eos_id = tf.constant(self.src_eos_id, dtype=tf.int64)
tgt_eos_id = tf.constant(self.tgt_eos_id, dtype=tf.int64)
self.vocab_tables = self._create_vocab_tables(self.vocab_files, self.share_vocab)
in_file = tf.placeholder(tf.string, shape=())
in_dataset = self._prepare_data(tf.contrib.data.TextLineDataset(in_file).repeat(None))
out_file = tf.placeholder(tf.string, shape=())
out_dataset = self._prepare_data(tf.contrib.data.TextLineDataset(out_file).repeat(None))
dataset = tf.contrib.data.Dataset.zip((in_dataset, out_dataset))
dataset = self._batch_data(dataset, src_eos_id, tgt_eos_id)
iterator = dataset.make_initializable_iterator()
next_example, next_label = iterator.get_next()
self._prepare_iterator_hook(hook, scope_name, iterator, file_path, (in_file, out_file))
return next_example, next_label

return (input_fn, hook)

def _training_input_hook(self, file_path):
input_fn, hook = self._set_up_train_or_eval('train_inputs', file_path)

return (input_fn, hook)

def _validation_input_hook(self, file_path):
input_fn, hook = self._set_up_train_or_eval('eval_inputs', file_path)

return (input_fn, hook)

def _infer_input_hook(self, file_path, num_infer):
hook = IteratorInitializerHook()

def input_fn():
with tf.name_scope('infer_inputs'):
with tf.name_scope('sentence_markers'):
src_eos_id = tf.constant(self.src_eos_id, dtype=tf.int64)
self.vocab_tables = self._create_vocab_tables(self.vocab_files, self.share_vocab)
infer_file = tf.placeholder(tf.string, shape=())
dataset = tf.contrib.data.TextLineDataset(infer_file)
dataset = self._prepare_data(dataset)
dataset = self._batch_infer_data(dataset, src_eos_id)
iterator = dataset.make_initializable_iterator()
next_example, seq_len = iterator.get_next()
self._prepare_iterator_hook(hook, 'infer_inputs', iterator, file_path, infer_file)
return ((next_example, seq_len), None)

return (input_fn, hook)

这是我的模型:

class Seq2Seq():

def __init__(
self, batch_size, inputs,
outputs, inp_vocab_size, tgt_vocab_size,
embed_dim, mode, time_major=False,
enc_embedding=None, dec_embedding=None, average_across_batch=True,
average_across_timesteps=True, vocab_path=None, embedding_path='./data_files/wiki.simple.vec'
):
embed_np = self._get_embedding(embedding_path)
if not enc_embedding:
self.enc_embedding = tf.contrib.layers.embed_sequence(
inputs,
inp_vocab_size,
embed_dim,
trainable=True,
scope='embed',
initializer=tf.constant_initializer(value=embed_np, dtype=tf.float32)
)
else:
self.enc_embedding = enc_embedding
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
if not dec_embedding:
embed_outputs = tf.contrib.layers.embed_sequence(
outputs,
tgt_vocab_size,
embed_dim,
trainable=True,
scope='embed',
reuse=True
)
with tf.variable_scope('embed', reuse=True):
dec_embedding = tf.get_variable('embeddings')
self.embed_outputs = embed_outputs
self.dec_embedding = dec_embedding

else:
self.dec_embedding = dec_embedding
else:
with tf.variable_scope('embed', reuse=True):
self.dec_embedding = tf.get_variable('embeddings')

if mode == tf.estimator.ModeKeys.PREDICT and vocab_path is None:
raise ValueError('If mode is predict, must supply vocab_path')
self.vocab_path = vocab_path
self.inp_vocab_size = inp_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.average_across_batch = average_across_batch
self.average_across_timesteps = average_across_timesteps
self.time_major = time_major
self.batch_size = batch_size
self.mode = mode

def _get_embedding(self, embedding_path):
model = KeyedVectors.load_word2vec_format(embedding_path)
vocab = model.vocab
vocab_len = len(vocab)
return np.array([model.word_vec(k) for k in vocab.keys()])

def _get_lstm(self, num_units):
return tf.nn.rnn_cell.BasicLSTMCell(num_units)

def encode(self, num_units, num_layers, seq_len, cell_fw=None, cell_bw=None):
if cell_fw and cell_bw:
fw_cell = cell_fw
bw_cell = cell_bw
else:
fw_cell = self._get_lstm(num_units)
bw_cell = self._get_lstm(num_units)
encoder_outputs, bi_encoder_state = tf.nn.bidirectional_dynamic_rnn(
fw_cell,
bw_cell,
self.enc_embedding,
sequence_length=seq_len,
time_major=self.time_major,
dtype=tf.float32
)
c_state = tf.concat([bi_encoder_state[0].c, bi_encoder_state[1].c], axis=1)
h_state = tf.concat([bi_encoder_state[0].h, bi_encoder_state[1].h], axis=1)
encoder_state = tf.contrib.rnn.LSTMStateTuple(c=c_state, h=h_state)
return tf.concat(encoder_outputs, -1), encoder_state

def _train_decoder(self, decoder_cell, out_seq_len, encoder_state, helper):
if not helper:
helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
self.embed_outputs,
out_seq_len,
self.dec_embedding,
0.3,
)
# helper = tf.contrib.seq2seq.TrainingHelper(
# self.dec_embedding,
# out_seq_len,
# )
projection_layer = layers_core.Dense(self.tgt_vocab_size, use_bias=False)
decoder = tf.contrib.seq2seq.BasicDecoder(
decoder_cell,
helper,
encoder_state,
output_layer=projection_layer
)
return decoder

def _predict_decoder(self, cell, encoder_state, beam_width, length_penalty_weight):
tiled_encoder_state = tf.contrib.seq2seq.tile_batch(
encoder_state, multiplier=beam_width
)
with tf.name_scope('sentence_markers'):
sos_id = tf.constant(1, dtype=tf.int32)
eos_id = tf.constant(2, dtype=tf.int32)
start_tokens = tf.fill([self.batch_size], sos_id)
end_token = eos_id
projection_layer = layers_core.Dense(self.tgt_vocab_size, use_bias=False)
emb = tf.squeeze(self.dec_embedding)
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=cell,
embedding=self.dec_embedding,
start_tokens=start_tokens,
end_token=end_token,
initial_state=tiled_encoder_state,
beam_width=beam_width,
output_layer=projection_layer,
length_penalty_weight=length_penalty_weight
)
return decoder

def decode(
self, num_units, out_seq_len,
encoder_state, cell=None, helper=None,
beam_width=None, length_penalty_weight=None
):
with tf.name_scope('Decode'):
if cell:
decoder_cell = cell
else:
decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(2*num_units)
if self.mode != estimator.ModeKeys.PREDICT:
decoder = self._train_decoder(decoder_cell, out_seq_len, encoder_state, helper)
else:
decoder = self._predict_decoder(decoder_cell, encoder_state, beam_width, length_penalty_weight)
outputs = tf.contrib.seq2seq.dynamic_decode(
decoder,
maximum_iterations=20,
swap_memory=True,
)
outputs = outputs[0]
if self.mode != estimator.ModeKeys.PREDICT:
return outputs.rnn_output, outputs.sample_id
else:
return outputs.beam_search_decoder_output, outputs.predicted_ids

def prepare_predict(self, sample_id):
rev_table = lookup_ops.index_to_string_table_from_file(
self.vocab_path, default_value=UNK)
predictions = rev_table.lookup(tf.to_int64(sample_id))
return tf.estimator.EstimatorSpec(
predictions=predictions,
mode=tf.estimator.ModeKeys.PREDICT
)

def prepare_train_eval(
self, t_out,
out_seq_len, labels, lr,
train_op=None, loss=None
):
if not loss:
weights = tf.sequence_mask(
out_seq_len,
dtype=t_out.dtype
)
loss = tf.contrib.seq2seq.sequence_loss(
t_out,
labels,
weights,
average_across_batch=self.average_across_batch,
)

if not train_op:
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.train.get_global_step(),
optimizer='SGD',
learning_rate=lr,
summaries=['loss', 'learning_rate']
)

return tf.estimator.EstimatorSpec(
mode=self.mode,
loss=loss,
train_op=train_op,
)

最佳答案

这种类型的重复称为“文本退化”

2019 年有一篇很棒的论文分析了这一现象: The Curious Case of Neural Text Degeneration 作者:Ari Holtzman 等人。来自艾伦人工智能研究所。

重复可能来自解码器站点上的文本搜索类型(文本采样)。许多人只是通过模型提出的最可能的下一个世界(最后一层的 softmax 上的 argmax)或所谓的波束搜索来实现这一点。事实上,集束搜索是当今的行业标准。

这是文章中的 Beam 搜索示例:

继续(BeamSearch,b=10):

“ unicorn 们能够互相交流,他们说 unicorn 。 unicorn 的声明。洛杉矶系教授,世界上最重要的地方得到世界的认可成为世界的一员 成为世界的一员 成为世界的一员 成为世界的一员 成为世界的一员 成为世界的一员 成为世界的一员 成为世界的一员 成为世界的一员 成为世界的一员...

正如您所看到的,有大量的重复。

根据该论文,这种奇怪的情况可以通过以下事实来解释:每个重复的单词序列比没有下一次重复的序列具有更高的概率: enter image description here

本文提出了一些由解码器进行单词采样的解决方法。这肯定需要更多的研究,但这是我们今天得到的最好的解释。

另一个是你的模型需要更多的训练。在许多情况下,当我有大量训练集并且模型仍然无法很好地概括数据的整体多样性时,我会遇到类似的行为。为了测试这个假设 - 尝试在较小的数据集上进行训练,看看它是否具有泛化性(产生有意义的结果)。

但是,即使您的模型概括得足够好,也不意味着您永远不会遇到重复模式。除非您更改解码器的采样模式,否则这是常见情况。

关于machine-learning - 当 Seq2Seq 网络在输出中一遍又一遍地重复单词时该怎么办?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/46924452/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com