gpt4 book ai didi

python - theano 中的 MLP 分类器稳定在局部最小值

转载 作者:行者123 更新时间:2023-11-30 08:54:59 26 4
gpt4 key购买 nike

我使用 theano 编写了一个 MLP 分类器。使用反向传播算法的训练函数如下:

self.weights=[theano.shared(numpy.random.random((network.architecture[i+1],network.architecture[i]))) for i in range(len(network.architecture)-1)]
self.bias=[theano.shared(numpy.random.random(network.architecture[i+1])) for i in range(len(network.architecture)-1)]
self.layers=network.layers
self.prev_rate=[theano.shared(numpy.zeros((network.architecture[i+1],network.architecture[i]))) for i in range(len(network.architecture)-1)]+[theano.shared(numpy.zeros(network.architecture[i+1])) for i in range(len(network.architecture)-1)]
prediction=T.dmatrix()
output=T.dmatrix()
reg_lambda=T.dscalar()
alpha=T.dscalar()
momentum=T.dscalar()
cost=T.nnet.categorical_crossentropy(prediction,output).mean()
for i,j in zip(self.weights,self.bias):
cost+=T.sum(i**2)*reg_lambda
cost+=T.sum(j**2)*reg_lambda
parameters=self.weights+self.bias
rates=[(alpha*T.grad(cost,parameter)+momentum*prev_rate) for parameter,prev_rate in zip(parameters,self.prev_rate)]
updates=[(weight,weight-rate) for weight,rate in zip(parameters,rates)]+[(prev_rate,rate) for prev_rate,rate in zip(self.prev_rate,rates)]
self.backprop=theano.function([prediction,output,reg_lambda,alpha,momentum],cost,updates=updates)

我尝试针对 XOR 问题训练分类器。实现是

network=FeedForwardNetwork([2,2,2])
network.initialize()
network.train(numpy.array([[0.,0.],[0.,1.],[1.,0.],[1.,1.],[0.,0.],[0.,1.],[1.,0.],[1.,1.]]),numpy.array([[0.,1.],[1.,0.],[1.,0.],[0.,1.],[0.,1.],[1.,0.],[1.,0.],[0.,1.]]),alpha=0.01,epochs=1000000000000000,momentum=0.9)
print network.predict(numpy.array([[1.,0.]]))
print network.predict(numpy.array([[0.,0.]]))

initialize() 方法只是编译后端的所有函数,即反向传播函数、用于计算预测的前向传递函数和一些其他 theano 函数。现在,当我运行这段代码时,训练稳定在局部最小值。

0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056
0.69314718056

训练开始时,损失约为 0.92。它稳步下降到上述值并停在那里。我尝试改变 alpha 和动量的值。我做错了什么?

附注整个代码在这里:网络.py

import theano
import theano.tensor as T
import numpy
from layers import *
from backend import NetworkBackend

class Network:

def __init__(self,architecture):
self.architecture=architecture
self.layers=[]
self.weights=[]
self.bias=[]

def __str__(self):
banner=''
for i in range(len(self.weights)):
banner+=str(self.weights[i])+'\n'
banner+=str(self.bias[i])+'\n'
return banner

class FeedForwardNetwork(Network):

def initialize(self):
self.layers.append(InputLayer(units=self.architecture[0]))
for i in range(1,len(self.architecture[:-1])):
self.layers.append(SigmoidLayer(units=self.architecture[i]))
self.layers.append(SoftmaxLayer(units=self.architecture[-1]))
self.backend=NetworkBackend(self)

def predict(self,inputs):
return self.backend.activate(inputs)

def train(self,X,y,alpha=100,reg_lambda=0.0001,epochs=10000,momentum=0.9):
cost=1
while cost>0.01 and epochs:
prediction=self.predict(X)
cost=self.backend.backprop(prediction,y,reg_lambda,alpha,momentum)
print cost
epochs-=1


if __name__=='__main__':
network=FeedForwardNetwork([2,2,2])
network.initialize()
network.train(numpy.array([[0.,0.],[0.,1.],[1.,0.],[1.,1.],[0.,0.],[0.,1.],[1.,0.],[1.,1.]]),numpy.array([[0.,1.],[1.,0.],[1.,0.],[0.,1.],[0.,1.],[1.,0.],[1.,0.],[0.,1.]]),alpha=0.01,epochs=1000000000000000,momentum=0.9)
print network.predict(numpy.array([[1.,0.]]))
print network.predict(numpy.array([[0.,0.]]))

层.py

import theano
import theano.tensor as T
import scipy
from backend import ComputationBackend

class Layer:

def __init__(self,units):
self.units=units
self.backend=ComputationBackend()

def __str__(self):
banner=self.__class__.__name__
banner+=" Units:%d"%self.units
return banner

class SigmoidLayer(Layer):

def forwardPass(self,inputs):
return self.backend.sigmoid(inputs)


class InputLayer(Layer):

def forwardPass(self,inputs):
return inputs

class SoftmaxLayer(Layer):

def forwardPass(self,inputs):
return self.backend.softmax(inputs)

后端.py

import theano
import theano.tensor as T
import numpy

class NetworkBackend:

def __init__(self,network):

# initialize shared variables
self.weights=[theano.shared(numpy.random.random((network.architecture[i+1],network.architecture[i]))) for i in range(len(network.architecture)-1)]
self.bias=[theano.shared(numpy.random.random(network.architecture[i+1])) for i in range(len(network.architecture)-1)]
self.layers=network.layers
self.prev_rate=[theano.shared(numpy.zeros((network.architecture[i+1],network.architecture[i]))) for i in range(len(network.architecture)-1)]+[theano.shared(numpy.zeros(network.architecture[i+1])) for i in range(len(network.architecture)-1)]

# activation for network layers
inputs=T.dmatrix()
temp=self.layers[0].forwardPass(inputs)
for i in range(1,len(self.layers[:-1])):
temp=self.layers[i].forwardPass(T.dot(temp,self.weights[i-1].transpose())+self.bias[i-1])
output=self.layers[-1].forwardPass(T.dot(temp,self.weights[-1].transpose())+self.bias[-1])
self.activate=theano.function([inputs],output)

prediction=T.dmatrix()
output=T.dmatrix()
reg_lambda=T.dscalar()
alpha=T.dscalar()
momentum=T.dscalar()
cost=T.nnet.categorical_crossentropy(prediction,output).mean()
for i,j in zip(self.weights,self.bias):
cost+=T.sum(i**2)*reg_lambda
cost+=T.sum(j**2)*reg_lambda
parameters=self.weights+self.bias
rates=[(alpha*T.grad(cost,parameter)+momentum*prev_rate) for parameter,prev_rate in zip(parameters,self.prev_rate)]
updates=[(weight,weight-rate) for weight,rate in zip(parameters,rates)]+[(prev_rate,rate) for prev_rate,rate in zip(self.prev_rate,rates)]
self.backprop=theano.function([prediction,output,reg_lambda,alpha,momentum],cost,updates=updates)


class ComputationBackend:

def __init__(self):

# sigmoid activation
self.sigmoid=T.nnet.sigmoid

# softmax activation
self.softmax=T.nnet.softmax

最佳答案

这可能是参数初始化引起的。以下代码示例使用具有单个隐藏层的神经网络实现基本的 XOR 学习器。

import numpy
import theano
import theano.tensor as tt


def compile(input_size, hidden_size):
w_h = theano.shared(numpy.random.standard_normal(size=(input_size, hidden_size)).astype(theano.config.floatX))
b_h = theano.shared(numpy.zeros((hidden_size,), dtype=theano.config.floatX))
w_y = theano.shared(numpy.zeros((hidden_size,), dtype=theano.config.floatX))
b_y = theano.shared(numpy.zeros(1, dtype=theano.config.floatX), broadcastable=(True,))
x = tt.matrix()
z = tt.ivector()
learning_rate = tt.scalar()
h = tt.tanh(tt.dot(x, w_h) + b_h)
y = tt.nnet.sigmoid(tt.dot(h, w_y) + b_y)
cost = tt.nnet.binary_crossentropy(y, z).mean()
updates = [(p, p - learning_rate * tt.grad(cost, p)) for p in [w_h, b_h, w_y, b_y]]
return theano.function([x, z, learning_rate], outputs=cost, updates=updates), theano.function([x], outputs=y)


def main():
numpy.random.seed(5)
train, test = compile(2, 2)
for _ in xrange(100000):
print train([[1, 1], [1, 0], [0, 1], [0, 0]], [0, 1, 1, 0], 0.1)
print test([[1, 1], [1, 0], [0, 1], [0, 0]])


main()

记下随机数生成器种子值。在种子为 5 的情况下,学习者会收敛到一个好的解决方案,并且如果有足够的时间,它看起来会趋向于完美的解决方案。然而,如果种子更改为1,网络就会陷入局部最优;它能够区分第二维,但不能区分第一维。

不同的随机初始化方法可能会产生更好的结果,即对 RNG 种子不太敏感。

关于python - theano 中的 MLP 分类器稳定在局部最小值,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/34397120/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com