gpt4 book ai didi

python - 有没有办法减少 RMSProp 的代码量

转载 作者:太空宇宙 更新时间:2023-11-03 16:10:28 27 4
gpt4 key购买 nike

我有一些简单的循环神经网络的代码,想知道是否有办法减少更新阶段所需的代码量。我的代码是这样的:

class RNN(object):
def__init___(self, data, hidden_size, eps=0.0001):
self.data = data
self.hidden_size = hidden_size
self.weights_hidden = np.random.rand(hidden_size, hidden_size) * 0.1 # W
self.weights_input = np.random.rand(hidden_size, len(data[0])) * 0.1 # U
self.weights_output = np.random.rand(len(data[0]), hidden_size) * 0.1 # V
self.bias_hidden = np.array([np.random.rand(hidden_size)]).T # b
self.bias_output = np.array([np.random.rand(len(data[0]))]).T # c

self.cache_w_hid, self.cache_w_in, self.cache_w_out = 0, 0, 0
self.cache_b_hid, self.cache_b_out = 0, 0
self.eps = eps

def train(self, seq_length, epochs, eta, decay_rate=0.9, learning_decay=0.0):
# Other stuff
self.update(seq, epoch, eta, decay_rate, learning_decay)
# Other Stuff

def update(self, seq, epoch, eta, decay_rate, learning_decay):
"""Updates the network's weights and biases by applying gradient
descent using backpropagation through time and RMSPROP.
"""
delta_nabla_c, delta_nabla_b,\
delta_nabla_V, delta_nabla_W, delta_nabla_U = self.backward_pass(seq)

eta = eta*np.exp(-epoch*learning_decay)

self.cache_w_hid += decay_rate * self.cache_w_hid \
+ (1 - decay_rate) * delta_nabla_W**2
self.weights_hidden -= eta * delta_nabla_W / (np.sqrt(self.cache_w_hid) + self.eps)

self.cache_w_in += decay_rate * self.cache_w_in \
+ (1 - decay_rate) * delta_nabla_U**2
self.weights_input -= eta * delta_nabla_U / (np.sqrt(self.cache_w_in) + self.eps)

self.cache_w_out += decay_rate * self.cache_w_out \
+ (1 - decay_rate) * delta_nabla_V**2
self.weights_output -= eta * delta_nabla_V / (np.sqrt(self.cache_w_out) + self.eps)

self.cache_b_hid += decay_rate * self.cache_b_hid \
+ (1 - decay_rate) * delta_nabla_b**2
self.bias_hidden -= eta * delta_nabla_b / (np.sqrt(self.cache_b_hid) + self.eps)

self.cache_b_out += decay_rate * self.cache_b_out \
+ (1 - decay_rate) * delta_nabla_c**2
self.bias_output -= eta * delta_nabla_c / (np.sqrt(self.cache_b_out) + self.eps)

对于#RMSProp下的每个变量都遵循更新规则,即:

cache = decay_rate * cache + (1 - decay_rate) * dx**2
x += - learning_rate * dx / (np.sqrt(cache) + eps)

我已经声明了 cache_ ,后跟 self.weight_self.bias_ ,并且希望将其写得更紧凑。我正在考虑使用 zip() 但我不知道如何去做。

最佳答案

从您的问题来看,我猜测您正在尝试提高此处任何其他类型优化的可读性/优雅性。

您可以引入一个函数来实现更新规则,然后为每个变量调用一次。这里的技巧是 Python 允许您按名称访问属性,因此您可以传入缓存和权重属性的名称而不是值。这将让您更新 future 通行证的值:

def update_rule(self, cache_attr, x_attr, decay_rate, learning_rate, dx):
cache = getattr(self, cache_attr)
cache = decay_rate * cache + (1 - decay_rate) * dx**2
setattr(self, cache_attr, cache)

x = getattr(self, x_attr)
x += - learning_rate * dx / (np.sqrt(cache) + self.eps)
setattr(self, x_attr, x)

def update(self, seq, epoch, eta, decay_rate, learning_decay):
"""Updates the network's weights and biases by applying gradient
descent using backpropagation through time and RMSPROP.
"""
delta_nabla_c, delta_nabla_b,\
delta_nabla_V, delta_nabla_W, delta_nabla_U = self.backward_pass(seq)

eta = eta*np.exp(-epoch*learning_decay)

self.update_rule('cache_w_hid', 'weights_hidden', decay_rate, eta, delta_nabla_W)
self.update_rule('cache_w_in', 'weights_input', decay_rate, eta, delta_nabla_U)
self.update_rule('cache_w_out', 'weights_output', decay_rate, eta, delta_nabla_V)
self.update_rule('cache_b_hid', 'bias_hidden', decay_rate, eta, delta_nabla_b)
self.update_rule('cache_b_out', 'bias_output', decay_rate, eta, delta_nabla_c)

事实上,您可以通过输入 update_rule 来保存额外的参数并避免暴露基本上是私有(private)的方法。进入update 。这将公开 update 的命名空间至update_rule当它被调用时,所以你不必传入 decay_ratelearning_rate :

def update(self, seq, epoch, eta, decay_rate, learning_decay):
"""Updates the network's weights and biases by applying gradient
descent using backpropagation through time and RMSPROP.
"""

def update_rule(cache_attr, x_attr, dx):
cache = getattr(self, cache_attr)
cache = decay_rate * cache + (1 - decay_rate) * dx**2
setattr(self, cache_attr, cache)

x = getattr(self, x_attr)
x += - eta * dx / (np.sqrt(cache) + self.eps)
setattr(self, x_attr, x)

delta_nabla_c, delta_nabla_b,\
delta_nabla_V, delta_nabla_W, delta_nabla_U = self.backward_pass(seq)

eta = eta*np.exp(-epoch*learning_decay)

update_rule('cache_w_hid', 'weights_hidden', delta_nabla_W)
update_rule('cache_w_in', 'weights_input', delta_nabla_U)
update_rule('cache_w_out', 'weights_output', delta_nabla_V)
update_rule('cache_b_hid', 'bias_hidden', delta_nabla_b)
update_rule('cache_b_out', 'bias_output', delta_nabla_c)

最后,如果你真的想要,你可以使用 zip调用 update_rule进入一个循环。请注意,对于此版本,调用顺序已更改为匹配 self.backward_pass 返回值的顺序。 。就我个人而言,我不会使用最后一个版本,除非您确实有很多更新要做,因为它开始看起来很困惑,而且它对 backward_pass 的结果非常敏感。 .

def update(self, seq, epoch, eta, decay_rate, learning_decay):
"""Updates the network's weights and biases by applying gradient
descent using backpropagation through time and RMSPROP.
"""

def update_rule(cache_attr, x_attr, dx):
cache = getattr(self, cache_attr)
cache = decay_rate * cache + (1 - decay_rate) * dx**2
setattr(self, cache_attr, cache)

x = getattr(self, x_attr)
x += - eta * dx / (np.sqrt(cache) + self.eps)
setattr(self, x_attr, x)

dx = self.backward_pass(seq)

eta = eta*np.exp(-epoch*learning_decay)

cache_attrs = ('cache_b_out', 'cache_b_hid', 'cache_w_out', 'cache_w_hid', 'cache_w_in')
x_attrs = ('bias_output', 'bias_hidden', 'weights_output', 'weights_hidden', 'weights_input')

for args in zip(cache_attrs, x_attrs, dx):
update_rule(*args)

关于python - 有没有办法减少 RMSProp 的代码量,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/39375173/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com