- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试计算具有 2 个或更多隐藏层的神经网络相对于其输入的导数。所以不是“标准反向传播”,因为我对输出如何随权重变化不感兴趣。而且我不打算使用它来训练我的网络(如果这需要删除反向传播标签,请告诉我,但我怀疑我需要的并没有太大不同)
我对这里的导数感兴趣的原因是,我有一个测试集,它有时为我提供匹配的 [x1, x2] : [y]
对,有时为我提供一个 [x1, x2]:[d(y)/dx1]
或 [x1, x2]:[d(y)/dx2]
。然后,我使用粒子群算法来训练我的网络。
我喜欢图表,所以这里简单说一下我的网络:
我想要的是compute_derivative
方法返回以下形式的numpy数组:
这是我到目前为止的尝试,但我似乎无法在最后获得与我的输入数量匹配的数组。我不知道我做错了什么。
def compute_derivative(self):
"""Computes the network derivative and returns an array with the change in output with respect to each input"""
self.compute_layer_derivative(0)
for l in np.arange(1,self.size):
dl = self.compute_layer_derivative(l)
dprev = self.layers[l-1].derivatives
self.output_derivatives = dl.T.dot(dprev)
return self.output_derivatives
def compute_layer_derivative(self, l_id):
wL = self.layers[l_id].w
zL = self.layers[l_id].output
daL = self.layers[l_id].f(zL, div=1)
daLM = np.repeat(daL,wL.shape[0], axis=0)
self.layers[l_id].derivatives = np.multiply(daLM,wL)
return self.layers[l_id].derivatives
如果您想运行整个代码,我已经制作了一个删减的带注释的版本,它将与复制粘贴一起使用(见下文)。感谢您的帮助!
# -*- coding: utf-8 -*-
import numpy as np
def sigmoid(x, div = 0):
if div == 1: #first derivative f'
return np.exp(-x) / (1. + np.exp(-x))**2.
if div == 2: # second derivative f''
return - np.exp(x) * (np.exp(x) - 1) / (1. + np.exp(x))**3.
return 1. / (1. + np.exp(-x)) # f
def linear(x, div = 0):
if div == 1: #first derivative f'
return np.full(x.shape,1)
if div > 2: # second derivative f''
return np.zeros(x.shape)
return x # f
class Layer():
def __init__(self, in_n, h_n, activation, bias = True, debug = False):
self.w = 2*np.random.random((in_n, h_n)) - 1 # synaptic weights with 0 mean
self.f = activation
self.output = None
self.activation = None
self.derivatives = np.array([[None for i in range(in_n+1)]]) #+1 for global dev
if bias:
self.b = 2*np.random.random((1, h_n)) - 1
else:
self.b = None
if debug:
self.w = np.full((in_n, h_n), 1.)
if self.b is not None: self.b = np.full((1, h_n), 1.)
def compute(self, inputs):
if self.w.shape[0] != inputs.shape[1]:
raise ValueError("Inputs dimensions do not match test data dim.")
if self.b is None:
self.output = np.dot(inputs, self.w)
else:
self.output = np.dot(inputs, self.w) + self.b
self.activation = self.f(self.output)
class NeuralNetwork():
def __init__(self, nb_layers, in_NN, h_density, out_NN, debug = False):
self.debug = debug
self.layers = []
self.size = nb_layers+1
self.output_derivatives = None
self.output = None
self.in_N = in_NN
self.out_N = out_NN
if debug:
print("Input Layer with {} inputs.".format(in_NN))
#create hidden layers
current_inputs = in_NN
for l in range(self.size - 1):
self.layers.append(Layer(current_inputs, h_density, sigmoid, debug = debug))
current_inputs = h_density
if debug:
print("Hidden Layer {} with {} inputs and {} neurons.".format(l+1, self.layers[l].w.shape[0], self.layers[l].w.shape[1]))
#creat output layer
self.layers.append(Layer(current_inputs, out_NN, linear, bias=False, debug = debug))
if debug:
print("Output Layer with {} inputs and {} outputs.".format(self.layers[-1].w.shape[0], self.layers[-1].w.shape[1]))
#print("with w: {}".format(self.layers[l].w))
print("ANN size = {}, with {} Layers\n\n".format( self.size, len(self.layers)))
def compute(self, point):
curr_inputs = point
for l in range(self.size):
self.layers[l].compute(curr_inputs)
curr_inputs = self.layers[l].activation
self.output = curr_inputs
if self.debug: print("ANN output: ",curr_inputs)
return self.output
def compute_derivative(self, order, point):
""" If the network has not been computed, compute it before getting
the derivative. This might be a bit expensive..."""
if self.layers[self.size-1].output is None:
self.compute(point)
#Compute output layer total derivative
self.compute_layer_derivative(self.size-1, order)
self.output_derivatives = self.get_partial_derivatives_to_outputs(self.size-1)
print(self.output_derivatives)
for l in np.arange(1,self.size):
l = self.size-1 - l
self.compute_layer_derivative(l, order)
if l > 0: #if we are not at first hidden layer compute the total derivative
self.output_derivatives *= self.get_total_derivative_to_inputs(l)
else:# get the each output derivative with respect to each input
backprop_dev_to_outs = np.repeat(np.matrix(self.output_derivatives),self.in_N, axis=0).T
dev_to_inputs = np.repeat(np.matrix(self.get_partial_derivatives_to_inputs(l)).T,self.out_N, axis=1).T
self.output_derivatives = np.multiply(backprop_dev_to_outs, dev_to_inputs)
if self.debug: print("output derivatives: ",self.output_derivatives)
return self.output_derivatives
def get_total_derivative(self,l_id):
return np.sum(self.get_partial_derivatives_to_inputs(l_id))
def get_total_derivative_to_inputs(self,l_id):
return np.sum(self.get_partial_derivatives_to_inputs(l_id))
def get_partial_derivatives_to_inputs(self,l_id):
return np.sum(self.layers[l_id].derivatives, axis=1)
def get_partial_derivatives_to_outputs(self,l_id):
return np.sum(self.layers[l_id].derivatives, axis=0)
def compute_layer_derivative(self, l_id, order):
if self.debug: print("\n\ncurrent layer is ", l_id)
wL = self.layers[l_id].w
zL = self.layers[l_id].output
daL = self.layers[l_id].f(zL, order)
daLM = np.repeat(daL,wL.shape[0], axis=0)
self.layers[l_id].derivatives = np.multiply(daLM,wL)
if self.debug:
print("L_id: {}, a_f: {}".format(l_id, self.layers[l_id].f))
print("L_id: {}, dev: {}".format(l_id, self.get_total_derivative_to_inputs(l_id)))
return self.layers[l_id].derivatives
#nb_layers, in_NN, h_density, out_NN, debug = False
nn = NeuralNetwork(1,2,2,1, debug= True)
nn.compute(np.array([[1,1]]))# head value
nn.compute_derivative(1,np.array([[1,1]])) #first derivative
根据 Sirguy 的回复编辑的答案:
# Here we assume that the layer has sigmoid activation
def Jacobian(x = np.array([[1,1]]), w = np.array([[1,1],[1,1]]), b = np.array([[1,1]])):
return sigmoid_d(x.dot(w) + b) * w # J(S, x)
如果网络有 2 个带有 sigmoid 激活的隐藏层和一个带有 sigmoid 激活的输出层(这样我们就可以使用与上面相同的函数),我们有:
J_L1 = Jacobian(x = np.array([[1,1]])) # where [1,1] are the inputs of to the network (i.e. values of the neuron in the input layer)
J_L2 = Jacobian(x = np.array([[3,3]])) # where [3,3] are the neuron values of layer 1 before activation
# in the output layer the weights and biases are adjusted as there is 1 neuron rather than 2
J_Lout = Jacobian(x = np.array([[2.90514825, 2.90514825]]), w = np.array([[1],[1]]), b = np.array([[1]]))# where [2.905,2.905] are the neuron values of layer 2 before activation
J_out_to_in = J_Lout.T.dot(J_L2).dot(J_L1)
最佳答案
以下是我如何得出您的示例应该给出的内容:
# i'th component of vector-valued function S(x) (sigmoid-weighted layer)
S_i(x) = 1 / 1 + exp(-w_i . x + b_i) # . for matrix multiplication here
# i'th component of vector-valued function L(x) (linear-weighted layer)
L_i(x) = w_i . x # different weights than S.
# as it happens our L(x) output 1 value, so is in fact a scalar function
F(x) = L(S(x)) # final output value
#derivative of F, denoted as J(F, x) to mean the Jacobian of the function F, evaluated at x.
J(F, x) = J(L(S(x)), x) = J(L, S(x)) . J(S, x) # chain rule for multivariable, vector-valued functions
#First, what's the derivative of L?
J(L, S(x)) = L
这通常是一个令人惊讶的结果,但您可以通过计算 M 的偏导数自行验证这一点。 x
为某个随机矩阵 M
。如果您计算所有导数并将它们放入雅可比行列式中,您将得到M
。
#Now what's the derivative of S? Compute via formula
d(S_i(x)/dx_j) = w_ij * exp(-w_i.x+b_i) / (1 + exp(-w_i.x+b_i))**2 #w_ij, is the j'th component of the vector w_i
#For the gradient of a S_i (which is just one component of S), we get
J(S_i, x) = (exp(-w_i . x + b_i) / (1 + exp(-w_i . x + b_i))**2) * w_i # remember this is a vector because w_i is a vector
现在以 1 的调试示例为例。
w_i = b = x = [1, 1]
#define a to make this less cluttered
a = exp(-w_i . x + b) = exp(-3)
J(S_i, x) = a / (1 + a)^2 * [1, 1]
J(S, x) = a / (1 + a)^2 * [[1, 1], [1, 1]]
J(L, S(x)) = [1, 1] #Doesn't depend on S(x)
J(F, x) = J(L, S(x)) . J(S, x) = (a / (1 + a)**2) * [1, 1] . [[1, 1], [1, 1]]
J(F, x) = (a / (1 + a)**2) * [2, 2] = (2 * a / (1 + a)**2) * [1, 1]
J(F, x) = [0.0903533, 0.0903533]
希望这能帮助您重新组织一下代码。您不能仅使用 w_i 的值来计算此处的导数。 x
,您将分别需要 w_i
和 x
来正确计算所有内容。
编辑
因为我发现这个东西很有趣,这是我的 python 脚本计算神经网络的值和一阶导数:
import numpy as np
class Layer:
def __init__(self, weights_matrix, bias_vector, sigmoid_activation = True):
self.weights_matrix = weights_matrix
self.bias_vector = bias_vector
self.sigmoid_activation = sigmoid_activation
def compute_value(self, x_vector):
result = np.add(np.dot(self.weights_matrix, x_vector), self.bias_vector)
if self.sigmoid_activation:
result = np.exp(-result)
result = 1 / (1 + result)
return result
def compute_value_and_derivative(self, x_vector):
if not self.sigmoid_activation:
return (self.compute_value(x_vector), self.weights_matrix)
temp = np.add(np.dot(self.weights_matrix, x_vector), self.bias_vector)
temp = np.exp(-temp)
value = 1.0 / (1 + temp)
temp = temp / (1 + temp)**2
#pre-multiplying by a diagonal matrix multiplies each row by
#the corresponding diagonal element
#(1st row with 1st value, 2nd row with 2nd value, etc...)
jacobian = np.dot(np.diag(temp), self.weights_matrix)
return (value, jacobian)
class Network:
def __init__(self, layers):
self.layers = layers
def compute_value(self, x_vector):
for l in self.layers:
x_vector = l.compute_value(x_vector)
return x_vector
def compute_value_and_derivative(self, x_vector):
x_vector, jacobian = self.layers[0].compute_value_and_derivative(x_vector)
for l in self.layers[1:]:
x_vector, j = l.compute_value_and_derivative(x_vector)
jacobian = np.dot(j, jacobian)
return x_vector, jacobian
#first weights
l1w = np.array([[1,1],[1,1]])
l1b = np.array([1,1])
l2w = np.array([[1,1],[1,1]])
l2b = np.array([1,1])
l3w = np.array([1, 1])
l3b = np.array([0])
nn = Network([Layer(l1w, l1b),
Layer(l2w, l2b),
Layer(l3w, l3b, False)])
r = nn.compute_value_and_derivative(np.array([1,1]))
print r
关于python - 计算深度神经网络关于输入的偏导数,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52185475/
我正在处理一组标记为 160 个组的 173k 点。我想通过合并最接近的(到 9 或 10 个组)来减少组/集群的数量。我搜索过 sklearn 或类似的库,但没有成功。 我猜它只是通过 knn 聚类
我有一个扁平数字列表,这些数字逻辑上以 3 为一组,其中每个三元组是 (number, __ignored, flag[0 or 1]),例如: [7,56,1, 8,0,0, 2,0,0, 6,1,
我正在使用 pipenv 来管理我的包。我想编写一个 python 脚本来调用另一个使用不同虚拟环境(VE)的 python 脚本。 如何运行使用 VE1 的 python 脚本 1 并调用另一个 p
假设我有一个文件 script.py 位于 path = "foo/bar/script.py"。我正在寻找一种在 Python 中通过函数 execute_script() 从我的主要 Python
这听起来像是谜语或笑话,但实际上我还没有找到这个问题的答案。 问题到底是什么? 我想运行 2 个脚本。在第一个脚本中,我调用另一个脚本,但我希望它们继续并行,而不是在两个单独的线程中。主要是我不希望第
我有一个带有 python 2.5.5 的软件。我想发送一个命令,该命令将在 python 2.7.5 中启动一个脚本,然后继续执行该脚本。 我试过用 #!python2.7.5 和http://re
我在 python 命令行(使用 python 2.7)中,并尝试运行 Python 脚本。我的操作系统是 Windows 7。我已将我的目录设置为包含我所有脚本的文件夹,使用: os.chdir("
剧透:部分解决(见最后)。 以下是使用 Python 嵌入的代码示例: #include int main(int argc, char** argv) { Py_SetPythonHome
假设我有以下列表,对应于及时的股票价格: prices = [1, 3, 7, 10, 9, 8, 5, 3, 6, 8, 12, 9, 6, 10, 13, 8, 4, 11] 我想确定以下总体上最
所以我试图在选择某个单选按钮时更改此框架的背景。 我的框架位于一个类中,并且单选按钮的功能位于该类之外。 (这样我就可以在所有其他框架上调用它们。) 问题是每当我选择单选按钮时都会出现以下错误: co
我正在尝试将字符串与 python 中的正则表达式进行比较,如下所示, #!/usr/bin/env python3 import re str1 = "Expecting property name
考虑以下原型(prototype) Boost.Python 模块,该模块从单独的 C++ 头文件中引入类“D”。 /* file: a/b.cpp */ BOOST_PYTHON_MODULE(c)
如何编写一个程序来“识别函数调用的行号?” python 检查模块提供了定位行号的选项,但是, def di(): return inspect.currentframe().f_back.f_l
我已经使用 macports 安装了 Python 2.7,并且由于我的 $PATH 变量,这就是我输入 $ python 时得到的变量。然而,virtualenv 默认使用 Python 2.6,除
我只想问如何加快 python 上的 re.search 速度。 我有一个很长的字符串行,长度为 176861(即带有一些符号的字母数字字符),我使用此函数测试了该行以进行研究: def getExe
list1= [u'%app%%General%%Council%', u'%people%', u'%people%%Regional%%Council%%Mandate%', u'%ppp%%Ge
这个问题在这里已经有了答案: Is it Pythonic to use list comprehensions for just side effects? (7 个答案) 关闭 4 个月前。 告
我想用 Python 将两个列表组合成一个列表,方法如下: a = [1,1,1,2,2,2,3,3,3,3] b= ["Sun", "is", "bright", "June","and" ,"Ju
我正在运行带有最新 Boost 发行版 (1.55.0) 的 Mac OS X 10.8.4 (Darwin 12.4.0)。我正在按照说明 here构建包含在我的发行版中的教程 Boost-Pyth
学习 Python,我正在尝试制作一个没有任何第 3 方库的网络抓取工具,这样过程对我来说并没有简化,而且我知道我在做什么。我浏览了一些在线资源,但所有这些都让我对某些事情感到困惑。 html 看起来
我是一名优秀的程序员,十分优秀!