gpt4 book ai didi

python - Tensorflow:无效计算

转载 作者:太空宇宙 更新时间:2023-11-04 02:47:15 26 4
gpt4 key购买 nike

我是 tensorflow 的新手,正在尝试了解计算图的工作原理。我正在 tensorflow 网站上研究非常基本的线性回归示例。我有以下代码:

import numpy as np
import tensorflow as tf


def manual_loss(_w, _b, _x, _y):
_loss = 0.0
n = len(_x)
for j in range(n):
_loss += (_w * _x[j] + _b - _y[j]) ** 2
return _loss


def manual_grads(_w, _b, _x, _y):
n = len(_x)
g_w = 0.0
g_b = 0
for j in range(n):
g_w += 2.0 * (_w * _x[j] + _b - _y[j]) * _x[j]
g_b += 2.0 * (_w * _x[j] + _b - _y[j])
return g_w, g_b


# Model parameters
W = tf.Variable([0.3], dtype=tf.float32)
b = tf.Variable([-0.3], dtype=tf.float32)
_W = 0.3
_b = -0.3
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
grads = tf.gradients(loss, [W, b])
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
lr = 0.001
for i in range(1000):
results = sess.run([loss, W, b, grads], {x: x_train, y: y_train})
loss_value = results[0]
W_value = results[1]
b_value = results[2]
grad_W = results[3][0]
grad_b = results[3][1]
manual_loss_value = manual_loss(_w=_W, _b=_b, _x=x_train, _y=y_train)
manual_grad_W, manual_grad_b = manual_grads(_w=_W, _b=_b, _x=x_train, _y=y_train)
new_W_value = W_value - lr * grad_W
new_b_value = b_value - lr * grad_b
W = tf.assign(W, value=new_W_value)
b = tf.assign(b, value=new_b_value)
print("***********************")
print("loss={0}".format(loss_value))
print("manual_loss_value={0}".format(manual_loss_value))
print("W={0}".format(W_value))
print("b={0}".format(b_value))
print("manual_W={0}".format(_W))
print("manual_b={0}".format(_b))
print("grad_W={0}".format(grad_W))
print("grad_b={0}".format(grad_b))
print("manual_grad_W={0}".format(manual_grad_W))
print("manual_grad_b={0}".format(manual_grad_b))
print("***********************")
_W -= lr * manual_grad_W
_b -= lr * manual_grad_b

我只是尝试将梯度下降应用于简单的 (w*X - b - Y)^2 模型。我不是有意使用Tensorflow自带的优化器,我想了解底层的图更新机制。为了检查系统是否计算出正确的梯度,我也为线性回归实现了自己的损失和梯度计算函数。不幸的是,tensorflow 似乎没有按预期计算损失函数和梯度。这是我得到的输出:

 ***********************
loss=23.65999984741211
manual_loss_value=23.659999999999997
W=[ 0.30000001]
b=[-0.30000001]
manual_W=0.3
manual_b=-0.3
grad_W=[ 52.]
grad_b=[ 15.59999943]
manual_grad_W=52.0
manual_grad_b=15.599999999999998
***********************
***********************
loss=23.65999984741211
manual_loss_value=20.81095744
W=[ 0.24800001]
b=[-0.31560001]
manual_W=0.248
manual_b=-0.3156
grad_W=[ 52.]
grad_b=[ 15.59999943]
manual_grad_W=48.568
manual_grad_b=14.4352
***********************

如您所见,tensorflow在第二次迭代中为W和b计算了错误的损失值和梯度,实际上与第一次迭代相同。在某些试验中,它开始偏离第三或第四次迭代的实际值;并不总是在第二个。我在这里做错了什么吗?一旦获得 W 和 b 的值及其梯度,我就会在训练循环中使用 tf.assign() 更新它们的值。问题出在这里吗?用 tensorflow 更新变量是错误的方法吗?刚开始就遇到这样的问题,真是令人气馁。

最佳答案

我认为tf.assign的使用有问题。命令 tf.assign 创建分配节点,应该运行这些节点才能生效。你应该改成类似的东西

assign_W_placeholder = tf.placeholder(tf.float32)
assign_b_placeholder = tf.placeholder(tf.float32)
assign_W_node = tf.assign(W, assign_W_placeholder)
assign_b_node = tf.assign(b, assign_b_placeholder)

然后在for循环中,添加类似的东西

sess.run(assign_W_node, feed_dict={assign_W_placeholder: new_W_value}
sess.run(assign_b_node, feed_dict={assign_b_placeholder: new_b_value}

在这些之后,tensorflow 和 manual 给出了相同的结果。

完整代码:

import numpy as np
import tensorflow as tf


def manual_loss(_w, _b, _x, _y):
_loss = 0.0
n = len(_x)
for j in range(n):
_loss += (_w * _x[j] + _b - _y[j]) ** 2
return _loss


def manual_grads(_w, _b, _x, _y):
n = len(_x)
g_w = 0.0
g_b = 0
for j in range(n):
g_w += 2.0 * (_w * _x[j] + _b - _y[j]) * _x[j]
g_b += 2.0 * (_w * _x[j] + _b - _y[j])
return g_w, g_b


# Model parameters
W = tf.Variable([0.3], dtype=tf.float32)
b = tf.Variable([-0.3], dtype=tf.float32)
_W = 0.3
_b = -0.3
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)

assign_W_placeholder = tf.placeholder(tf.float32)
assign_b_placeholder = tf.placeholder(tf.float32)
assign_W_node = tf.assign(W, assign_W_placeholder)
assign_b_node = tf.assign(b, assign_b_placeholder)

# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
grads = tf.gradients(loss, [W, b])
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
lr = 0.001
for i in range(1000):
results = sess.run([loss, W, b, grads], {x: x_train, y: y_train})
loss_value = results[0]
W_value = results[1]
b_value = results[2]
grad_W = results[3][0]
grad_b = results[3][1]
manual_loss_value = manual_loss(_w=_W, _b=_b, _x=x_train, _y=y_train)
manual_grad_W, manual_grad_b = manual_grads(_w=_W, _b=_b, _x=x_train, _y=y_train)
new_W_value = W_value - lr * grad_W
new_b_value = b_value - lr * grad_b
sess.run([assign_W_node, assign_b_node],
feed_dict={assign_W_placeholder: new_W_value, assign_b_placeholder: new_b_value})
print("***********************")
print("loss={0}".format(loss_value))
print("manual_loss_value={0}".format(manual_loss_value))
print("W={0}".format(W_value))
print("b={0}".format(b_value))
print("manual_W={0}".format(_W))
print("manual_b={0}".format(_b))
print("grad_W={0}".format(grad_W))
print("grad_b={0}".format(grad_b))
print("manual_grad_W={0}".format(manual_grad_W))
print("manual_grad_b={0}".format(manual_grad_b))
print("***********************")
_W -= lr * manual_grad_W
_b -= lr * manual_grad_b

关于python - Tensorflow:无效计算,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44782544/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com