- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试用 C 实现一个循环神经网络,但它不起作用。我在互联网上阅读了一些文档,但我不懂复杂的数学。所以我调整了多层感知器的计算。
在学习的几个步骤中,我的网络的输出是一个数字,但很快输出就变成了“不是数字”(-1,#IND00)。
1. 计算。
我的第一个问题是数值、误差和重量变化的计算。
我计算了两个神经元之间的前向链接 N1->N2
,通过以下方式:
(value of N2) += (value of N1) * (weight of link N1->N2)
(error of N1) += (error of N2) * (weight of link N1->N2)
和输出神经元 (error) = (value of neuron) - (target output)
(new weight) = (old weight) - derivative(value of N2) * (error of N2) * (value of N1) * learning_rate
N2->N1
,通过以下方式:
(value of N1) += (previous value of N2) * (weight of link N2->N1)
然后将来自所有前向和循环链接的 N1 的最终值传递给一个 sigmoid 函数 (tanh),除了输出神经元 (error of N2) += (previous error of N1) * (weight of link N2->N1)
(new weight) = (old weight) - derivative(value of N1) * (error of N1) * (previous value of N2) * learning_rate
rnnset()
,
rnnsetstart()
和
rnnlearn()
, 查看向前和向后传递,并且在这 3 个函数中禁用了循环链接(注释行/块)。
rnnsetstart()
必须在
rnnset()
之前调用,为了将最后一次前向传递的值存储在神经元变量
value_prev
中.
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <malloc.h>
#include <stdlib.h>
#include "mlp.h"
typedef struct _neuron NEURON;
struct _neuron {
int layer;
double * weight;
int nbsynapsesin;
NEURON ** synapsesin;
double bias;
double value;
double value_prev;
double error;
double error_prev;
};
typedef struct _rnn RNN;
struct _rnn {
int * layersize;
int nbneurons;
NEURON * n;
};
typedef struct _config CONFIG;
struct _config {
int nbneurons;
int * layersize;
int nbsynapses;
int * synapses;
};
CONFIG * createconfig(int * layersize) {
CONFIG * conf = (CONFIG*)malloc(sizeof(CONFIG));
int i;
conf->nbneurons = 0;
for(i=1; i<layersize[0]+1; i++) conf->nbneurons += layersize[i];
conf->layersize = (int*)malloc((layersize[0]+1)*sizeof(int));
for(i=0; i<layersize[0]+1; i++) conf->layersize[i] = layersize[i];
conf->nbsynapses = 0;
for(i=1; i<layersize[0]; i++) conf->nbsynapses += layersize[i] * layersize[i+1];
conf->nbsynapses *= 2;
conf->synapses = (int*)malloc(2*conf->nbsynapses*sizeof(int));
// creation of the synapses:
int j,k=0,l,k2=0,k3=0;
for(i=1;i<layersize[0];i++) {
k3 += layersize[i];
for(j=0; j<layersize[i]; j++) {
for(l=0; l<layersize[i+1]; l++) {
// forward link/synapse:
conf->synapses[k] = k2+j;
k++;
conf->synapses[k] = k3+l;
k++;
// Recurrent link/synapse:
conf->synapses[k] = k3+l;
k++;
conf->synapses[k] = k2+j;
k++;
}
}
k2 += layersize[i];
}
return conf;
}
void freeconfig(CONFIG* conf) {
free(conf->synapses);
free(conf->layersize);
free(conf);
}
RNN * creaternn(CONFIG * conf) {
RNN * net = (RNN*)malloc(sizeof(RNN));
net->nbneurons = conf->nbneurons;
net->layersize = (int*)malloc((conf->layersize[0]+1)*sizeof(int));
int i;
for(i=0; i<conf->layersize[0]+1; i++) net->layersize[i] = conf->layersize[i];
net->n = (NEURON*)malloc(conf->nbneurons*sizeof(NEURON));
int j=0,k=0;
for(i=0; i<conf->nbneurons; i++) {
if(k==0) { k = conf->layersize[j+1]; j++; }
net->n[i].layer = j-1;
net->n[i].nbsynapsesin = 0;
k--;
}
k=0;
for(i=0; i<conf->nbsynapses; i++) {
k++;
net->n[conf->synapses[k]].nbsynapsesin++;
k++;
}
for(i=0; i<conf->nbneurons; i++) {
net->n[i].weight = (double*)malloc(net->n[i].nbsynapsesin*sizeof(double));
net->n[i].synapsesin = (NEURON**)malloc(net->n[i].nbsynapsesin*sizeof(NEURON*));
net->n[i].nbsynapsesin = 0;
}
// Link the incoming synapses with the neurons:
k=0;
for(i=0; i<conf->nbsynapses; i++) {
k++;
net->n[conf->synapses[k]].synapsesin[net->n[conf->synapses[k]].nbsynapsesin] = &(net->n[conf->synapses[k-1]]);
net->n[conf->synapses[k]].nbsynapsesin++;
k++;
}
// Initialization of the values, errors, and weights:
for(i=0; i<net->nbneurons; i++) {
for(j=0; j<net->n[i].nbsynapsesin; j++) {
net->n[i].weight[j] = 1.0 * (double)rand() / RAND_MAX - 1.0/2;
}
net->n[i].bias = 1.0 * (double)rand() / RAND_MAX - 1.0/2;
net->n[i].value = 0.0;
net->n[i].value_prev = 0.0;
net->n[i].error_prev = 0.0;
net->n[i].error = 0.0;
}
return net;
}
void freernn(RNN * net) {
int i;
for(i=0; i<net->nbneurons; i++) {
free(net->n[i].weight);
free(net->n[i].synapsesin);
}
free(net->n);
free(net->layersize);
free(net);
}
void rnnget(RNN * net, double * out) {
int i,k=0;
for(i=net->nbneurons-1; i>net->nbneurons-net->layersize[net->layersize[0]]-1; i--) { out[k] = net->n[i].value; k++; }
}
void rnnset(RNN * net, double * in) {
int i,j,k;
double v;
NEURON *ni,*nj;
// For each neuron:
for(i=0; i<net->nbneurons; i++) {
ni = &(net->n[i]);
if(i<net->layersize[1]) ni->value = in[i]; else ni->value = ni->bias;
// For each incoming synapse:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
// If it is a forward link/synapse:
if(ni->layer > nj->layer) ni->value += nj->value * ni->weight[j];
// Uncomment the following line to activate reccurent links computation:
//else ni->value += nj->value_prev * ni->weight[j];
}
// If NOT the output layer, then tanh the value:
if(ni->layer != net->layersize[0]-1) ni->value = tanh(ni->value);
}
}
void rnnsetstart(RNN * net) {
int i,j;
NEURON *ni,*nj;
// For each neuron, update value_prev:
for(i=0; i<net->nbneurons; i++) {
ni = &(net->n[i]);
// If NOT the output layer, then the value is already computed by tanh:
if(ni->layer != net->layersize[0]-1) {
ni->value_prev = ni->value;
} else {
ni->value_prev = tanh(ni->value);
}
}
}
void rnnlearn(RNN * net, double * out, double learningrate) {
int i,j,k;
k=0;
NEURON *ni,*nj;
// Initialize error to zero for the output layer:
for(i=net->nbneurons-1; i>=net->nbneurons-net->layersize[net->layersize[0]]; i--) net->n[i].error = 0.0;
// Compute the error for output neurons:
for(i=net->nbneurons-1; i>=0; i--) {
ni = &(net->n[i]);
// If ni is an output neuron, update the error:
if(ni->layer == net->layersize[0]-1) {
ni->error += ni->value - out[k];
k++;
} else {
ni->error = 0.0;
}
// Uncomment the following block to activate reccurent links computation:
/*
// For each incoming synapse from output layer:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
// If neuron nj is in output layer, then update the error:
if(nj->layer == net->layersize[0]-1) nj->error += ni->error_prev * ni->weight[j];
}
*/
}
// Compute error for all other neurons:
for(i=net->nbneurons-1; i>=0; i--) {
ni = &(net->n[i]);
// For each input synapse NOT from output layer:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
// If neuron nj is NOT in output layer, then update the error:
if(nj->layer != net->layersize[0]-1) {
// If it is a forward link/synapse:
if(ni->layer > nj->layer) nj->error += ni->error * ni->weight[j];
// Uncomment the following line to activate reccurent links computation:
//else nj->error += ni->error_prev * ni->weight[j];
}
}
}
// Update weights:
for(i=0; i<net->nbneurons; i++) {
ni = &(net->n[i]);
double wchange,derivative;
// For the output layer:
if(ni->layer == net->layersize[0]-1) {
derivative = ni->error * learningrate;
// For each incoming synapse:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
wchange = derivative;
// If it is a forward link/synapse:
if(ni->layer > nj->layer) wchange *= nj->value;
else wchange *= nj->value_prev;
ni->weight[j] -= wchange;
if(ni->weight[j] > 5) ni->weight[j] = 5;
if(ni->weight[j] < -5) ni->weight[j] = -5;
}
ni->bias -= derivative;
if(ni->bias > 5) ni->bias = 5;
if(ni->bias < -5) ni->bias = -5;
// For the other layers:
} else {
derivative = 1.0 - ni->value * ni->value;
derivative *= ni->error * learningrate;
// For each incoming synapse:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
wchange = derivative;
// If it is a forward link/synapse:
if(ni->layer > nj->layer) wchange *= nj->value;
else wchange *= nj->value_prev;
ni->weight[j] -= wchange;
}
ni->bias -= derivative;
}
}
// Update error_prev:
for(i=0; i<net->nbneurons; i++) net->n[i].error_prev = net->n[i].error;
}
int main() {
srand(time(NULL));
int layersize[] = {1, 25, 12, 1};
int layersize_netrnn[] = { 4, 1, 25, 12, 1 };
mlp * netmlp = create_mlp (4, layersize);
CONFIG * configrnn = createconfig(layersize_netrnn);
RNN * netrnn = creaternn(configrnn);
double inc,outc;
double global_error = 1;
double global_error2 = 1;
int iter,i1=0,i2=0;
//////////////////////////////////////////////////////
// Training of the Multi-Layer Perceptron:
//////////////////////////////////////////////////////
while(global_error > 0.005 && i1<1000) {
for (iter=0; iter < 100; iter++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
set_mlp(netmlp,&inc);
learn_mlp(netmlp,&outc,0.03);
}
global_error = 0;
int k;
for (k=0; k < 100; k++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
set_mlp(netmlp,&inc);
get_mlp(netmlp,&outc);
mlp_float desired_out = inc*inc;
global_error += (desired_out - outc)*(desired_out - outc);
}
global_error /= 100;
global_error = sqrt(global_error);
i1++;
}
//////////////////////////////////////////////////////
// Training of the Recurrent Neural Network:
//////////////////////////////////////////////////////
while(global_error2 > 0.005 && i2<1000) {
for (iter=0; iter < 100; iter++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
rnnsetstart(netrnn);
rnnset(netrnn,&inc);
double outc2;
rnnlearn(netrnn,&outc,0.03);
}
global_error2 = 0;
int k;
for (k=0; k < 100; k++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
double desired_out = inc*inc;
rnnsetstart(netrnn);
rnnset(netrnn,&inc);
rnnget(netrnn,&outc);
global_error2 += (desired_out - outc)*(desired_out - outc);
}
global_error2 /= 100;
global_error2 = sqrt(global_error2);
if(!isnormal(global_error2)) global_error2 = 100;
i2++;
}
//////////////////////////////////////////////////////
// Test of performance for the both networks:
//////////////////////////////////////////////////////
global_error = 0;
global_error2 = 0;
int k;
for (k=0; k < 10000; k++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
double desired_out = inc*inc;
rnnsetstart(netrnn);
rnnset(netrnn,&inc);
rnnget(netrnn,&outc);
global_error2 += (desired_out - outc)*(desired_out - outc);
set_mlp(netmlp,&inc);
get_mlp(netmlp,&outc);
global_error += (desired_out - outc)*(desired_out - outc);
}
global_error /= 10000;
global_error = sqrt(global_error);
printf("\n MLP: i: %5d error: %f",i1,global_error);
global_error2 /= 10000;
global_error2 = sqrt(global_error2);
printf("\n RNN: i: %5d error: %f",i2,global_error2);
free_mlp(netmlp);
freeconfig(configrnn);
freernn(netrnn);
}
和文件 mlp.h:
typedef double mlp_float;
typedef struct {
mlp_float *synaptic_weight;
mlp_float *neuron_value;
mlp_float *neuron_error_value;
mlp_float *input_neuron;
mlp_float *output_neuron;
mlp_float *output_error_value;
int *layer_index;
int *layer_size;
int *synapse_index;
int layer_number;
int neuron_number;
int synapse_number;
int input_layer_size;
int output_layer_size;
} mlp;
static mlp_float MAGICAL_WEIGHT_NUMBER = 1.0f;
static mlp_float MAGICAL_LEARNING_NUMBER = 0.4f;
void reinit_mlp(mlp * network) {
int i;
for (i = 0; i < network->synapse_number; i++) {
network->synaptic_weight[i] = /*0.001;*/MAGICAL_WEIGHT_NUMBER * (mlp_float)rand() / RAND_MAX - MAGICAL_WEIGHT_NUMBER/2;
}
}
mlp *create_mlp(int layer_number, int *layer_size) {
mlp *network = (mlp*)malloc(sizeof * network);
network->layer_number = layer_number;
network->layer_size = (int*)malloc(sizeof * network->layer_size * network->layer_number);
network->layer_index = (int*)malloc(sizeof * network->layer_index * network->layer_number);
int i;
network->neuron_number = 0;
for (i = 0; i < layer_number; i++) {
network->layer_size[i] = layer_size[i];
network->layer_index[i] = network->neuron_number;
network->neuron_number += layer_size[i];
}
network->neuron_value = (mlp_float*)malloc(sizeof * network->neuron_value * network->neuron_number);
network->neuron_error_value = (mlp_float*)malloc(sizeof * network->neuron_error_value * network->neuron_number);
network->input_layer_size = layer_size[0];
network->output_layer_size = layer_size[layer_number-1];
network->input_neuron = network->neuron_value;
network->output_neuron = &network->neuron_value[network->layer_index[layer_number-1]];
network->output_error_value = &network->neuron_error_value[network->layer_index[layer_number-1]];
network->synapse_index = (int*)malloc(sizeof * network->synapse_index * (network->layer_number-1));
network->synapse_number = 0;
for (i = 0; i < layer_number - 1; i++) {
network->synapse_index[i] = network->synapse_number;
network->synapse_number += (network->layer_size[i]+1) * network->layer_size[i+1];
}
network->synaptic_weight = (mlp_float*)malloc(sizeof * network->synaptic_weight * network->synapse_number);
for (i = 0; i < network->synapse_number; i++) {
network->synaptic_weight[i] = MAGICAL_WEIGHT_NUMBER * (mlp_float)rand() / RAND_MAX - MAGICAL_WEIGHT_NUMBER/2;
}
return network;
}
void free_mlp (mlp *network) {
free(network->layer_size);
free(network->layer_index);
free(network->neuron_value);
free(network->neuron_error_value);
free(network->synapse_index);
free(network->synaptic_weight);
free(network);
}
void set_mlp (mlp * network, mlp_float *vector) {
if (vector != NULL) {
int i;
for (i = 0; i < network->input_layer_size; i++) {
network->input_neuron[i] = vector[i];
}
}
int i;
int synapse_index;
synapse_index = 0;
for (i = 1; i < network->layer_number; i++) {
int j;
for (j = network->layer_index[i]; j < network->layer_index[i] + network->layer_size[i]; j++) {
mlp_float weighted_sum = 0.0;
int k;
for (k = network->layer_index[i-1]; k < network->layer_index[i-1] + network->layer_size[i-1]; k++) {
weighted_sum += network->neuron_value[k] * network->synaptic_weight[synapse_index];
synapse_index++;
}
weighted_sum += network->synaptic_weight[synapse_index];
synapse_index++;
network->neuron_value[j] = weighted_sum;
if (i != network->layer_number - 1) network->neuron_value[j] = tanh(network->neuron_value[j]);
}
}
}
void get_mlp (mlp *network, mlp_float *vector) {
int i;
for (i = 0; i < network->output_layer_size; i++) {
vector[i] = network->output_neuron[i];
}
}
void learn_mlp (mlp *network, mlp_float *desired_out, mlp_float learning_rate) {
int i;
mlp_float global_error = 0;
int synapse_index = network->synapse_index[network->layer_number-2];
for (i = 0; i < network->output_layer_size; i++) {
network->output_error_value[i] = network->output_neuron[i] - desired_out[i];
int j;
for (j = network->layer_index[network->layer_number-2]; j < network->layer_index[network->layer_number-2] + network->layer_size[network->layer_number-2]; j++) {
mlp_float weightChange;
weightChange = learning_rate * network->output_error_value[i] * network->neuron_value[j];
network->synaptic_weight[synapse_index] -= weightChange;
if (network->synaptic_weight[synapse_index] > 5) network->synaptic_weight[synapse_index] = 5;
if (network->synaptic_weight[synapse_index] < -5) network->synaptic_weight[synapse_index] = -5;
synapse_index++;
}
mlp_float weightChange;
weightChange = learning_rate * network->output_error_value[i];
network->synaptic_weight[synapse_index] -= weightChange;
if (network->synaptic_weight[synapse_index] > 5) network->synaptic_weight[synapse_index] = 5;
if (network->synaptic_weight[synapse_index] < -5) network->synaptic_weight[synapse_index] = -5;
synapse_index++;
}
for (i = network->layer_number - 2; i > 0; i--) {
int j;
int jj= 0;
int synapse_index = network->synapse_index[i-1];
for (j = network->layer_index[i]; j < network->layer_index[i] + network->layer_size[i]; j++,jj++) {
int k;
int synapse_index2 = network->synapse_index[i] + jj;
network->neuron_error_value[j] = 0;
for (k = network->layer_index[i+1]; k < network->layer_index[i+1] + network->layer_size[i+1]; k++) {
network->neuron_error_value[j] += network->synaptic_weight[synapse_index2] * network->neuron_error_value[k];
synapse_index2+=network->layer_size[i]+1;
}
for (k = network->layer_index[i-1]; k < network->layer_index[i-1] + network->layer_size[i-1]; k++) {
mlp_float weightChange;
weightChange = 1.0 - network->neuron_value[j] * network->neuron_value[j];
weightChange *= network->neuron_error_value[j] * learning_rate;
weightChange *= network->neuron_value[k];
network->synaptic_weight[synapse_index] -= weightChange;
synapse_index++;
}
mlp_float weightChange;
weightChange = 1.0 - network->neuron_value[j] * network->neuron_value[j];
weightChange *= network->neuron_error_value[j] * learning_rate;
network->synaptic_weight[synapse_index] -= weightChange;
synapse_index++;
}
}
}
void get_mlp_inputs (mlp *network, mlp_float *vector) {
if (vector != NULL) {
int i;
for (i = 0; i < network->input_layer_size; i++) {
vector[i] = network->input_neuron[i];
}
}
}
最佳答案
关于recurrent links的计算,终于找到了a document .如果我很好理解,我应该计算两个神经元之间的循环链接 N1<-N2
,通过以下方式:
(value of N1) += (previous value of N2) * (weight of link N1<-N2)
No error backpropagation through recurrent links
(new weight) = (old weight) - derivative(value of N1) * (error of N1) * (previous value of N2) * learning_rate
tanh
计算了输入神经元的值,但不应改变输入神经元的值。
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <malloc.h>
#include <stdlib.h>
#include "mlp.h"
typedef struct _neuron NEURON;
struct _neuron {
int layer;
double * weight; // table of weights for incoming synapses
int nbsynapsesin; // number of incoming synapses
NEURON ** synapsesin; // table of pointer to the neurons from
// which are coming the synapses
double bias;
double value;
double value_prev;
double error;
double error_prev;
};
typedef struct _rnn RNN;
struct _rnn {
int * layersize;
int nbneurons;
NEURON * n;
};
typedef struct _config CONFIG;
struct _config {
int nbneurons;
int * layersize;
int nbsynapses;
int * synapses;
};
CONFIG * createconfig(int * layersize) {
CONFIG * conf = (CONFIG*)malloc(sizeof(CONFIG));
int i;
conf->nbneurons = 0;
for(i=1; i<layersize[0]+1; i++) conf->nbneurons += layersize[i];
conf->layersize = (int*)malloc((layersize[0]+1)*sizeof(int));
for(i=0; i<layersize[0]+1; i++) conf->layersize[i] = layersize[i];
// Compute the number of synapses:
conf->nbsynapses = 0;
for(i=1; i<layersize[0]; i++) conf->nbsynapses += layersize[i] * layersize[i+1];
conf->nbsynapses *= 2;
// Allocate the table of synapses:
conf->synapses = (int*)malloc(2*conf->nbsynapses*sizeof(int));
// creation of the synapses:
int j,k=0,l,k2=0,k3=0;
for(i=1;i<layersize[0];i++) {
k3 += layersize[i];
for(j=0; j<layersize[i]; j++) {
for(l=0; l<layersize[i+1]; l++) {
// forward link/synapse:
conf->synapses[k] = k2+j;
k++;
conf->synapses[k] = k3+l;
k++;
// Recurrent link/synapse:
conf->synapses[k] = k3+l;
k++;
conf->synapses[k] = k2+j;
k++;
}
}
k2 += layersize[i];
}
return conf;
}
void freeconfig(CONFIG* conf) {
free(conf->synapses);
free(conf->layersize);
free(conf);
}
RNN * creaternn(CONFIG * conf) {
RNN * net = (RNN*)malloc(sizeof(RNN));
net->nbneurons = conf->nbneurons;
net->layersize = (int*)malloc((conf->layersize[0]+1)*sizeof(int));
int i;
for(i=0; i<conf->layersize[0]+1; i++) net->layersize[i] = conf->layersize[i];
// Allocate the neuron table of the Recurrent Neural Network:
net->n = (NEURON*)malloc(conf->nbneurons*sizeof(NEURON));
// Initialize some neuron values:
int j=0,k=0;
for(i=0; i<conf->nbneurons; i++) {
if(k==0) { k = conf->layersize[j+1]; j++; }
net->n[i].layer = j-1;
net->n[i].nbsynapsesin = 0;
k--;
}
// Count the incoming synapses for each neuron:
k=0;
for(i=0; i<conf->nbsynapses; i++) {
k++;
net->n[conf->synapses[k]].nbsynapsesin++;
k++;
}
// Allocate weight table in neurons, and the table of pointer to neuron
// that represent the incoming synapses:
for(i=0; i<conf->nbneurons; i++) {
net->n[i].weight = (double*)malloc(net->n[i].nbsynapsesin*sizeof(double));
net->n[i].synapsesin = (NEURON**)malloc(net->n[i].nbsynapsesin*sizeof(NEURON*));
net->n[i].nbsynapsesin = 0;
}
// Link the incoming synapses with the neurons:
k=0;
for(i=0; i<conf->nbsynapses; i++) {
k++;
net->n[conf->synapses[k]].synapsesin[net->n[conf->synapses[k]].nbsynapsesin] = &(net->n[conf->synapses[k-1]]);
net->n[conf->synapses[k]].nbsynapsesin++;
k++;
}
// Initialization of the values, errors, and weights:
for(i=0; i<net->nbneurons; i++) {
for(j=0; j<net->n[i].nbsynapsesin; j++) {
net->n[i].weight[j] = 1.0 * (double)rand() / RAND_MAX - 1.0/2;
}
net->n[i].bias = 1.0 * (double)rand() / RAND_MAX - 1.0/2;
net->n[i].value = 0.0;
net->n[i].value_prev = 0.0;
net->n[i].error_prev = 0.0;
net->n[i].error = 0.0;
}
return net;
}
void freernn(RNN * net) {
int i;
for(i=0; i<net->nbneurons; i++) {
free(net->n[i].weight);
free(net->n[i].synapsesin);
}
free(net->n);
free(net->layersize);
free(net);
}
void rnnget(RNN * net, double * out) {
int i,k=0;
// Store the output of the network in the variable table "out":
for(i=net->nbneurons-1; i>=(net->nbneurons - net->layersize[net->layersize[0]]); i--) { out[k] = net->n[i].value; k++; }
}
void rnnsetstart(RNN * net) {
int i,j;
NEURON *ni,*nj;
// For each neuron, update value_prev:
for(i=0; i<net->nbneurons; i++) {
ni = &(net->n[i]);
// If NOT the output layer, then the value is already computed by tanh:
if(ni->layer != net->layersize[0]-1) ni->value_prev = ni->value;
else ni->value_prev = tanh(ni->value);
}
}
void rnnset(RNN * net, double * in) {
int i,j,k;
double v;
NEURON *ni,*nj;
// For each neuron:
for(i=0; i<net->nbneurons; i++) {
ni = &(net->n[i]);
// If it is an input neuron:
if(i<net->layersize[1]) ni->value = in[i];
else ni->value = ni->bias;
// If the neuron is NOT in input layer, then
// compute the value from the incoming synapses:
if(i>=net->layersize[1]) {
// For each incoming synapse:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
// If the synapse is from input layer to output layer, then tanh the value:
if(nj->layer == 0 && ni->layer == (net->layersize[0]-1)) {
////////////////////////////////////////////////////////////////////////
// Uncomment the following line to enable reccurent links computation:
ni->value += tanh(nj->value_prev) * ni->weight[j];
////////////////////////////////////////////////////////////////////////
} else {
// If it is a forward link/synapse:
if(ni->layer > nj->layer) ni->value += nj->value * ni->weight[j];
////////////////////////////////////////////////////////////////////////
// Uncomment the following line to enable reccurent links computation:
else ni->value += nj->value_prev * ni->weight[j];
////////////////////////////////////////////////////////////////////////
}
}
}
// If NOT the input layer NOR the output layer, then tanh the value:
if(ni->layer != 0 && ni->layer != net->layersize[0]-1) ni->value = tanh(ni->value);
}
}
void rnnlearnstart(RNN * net) {
int i;
// For each neuron, initialize error_prev and value_prev for a
// new training cycle:
for(i=0; i<net->nbneurons; i++) { net->n[i].error_prev = 0.0; net->n[i].value_prev = 0.0; }
}
void rnnlearn(RNN * net, double * out, double learningrate) {
int i,j,k;
k=0;
NEURON *ni,*nj;
// Initialize error to zero for the output layer:
for(i=net->nbneurons-1; i>=net->nbneurons-net->layersize[net->layersize[0]]; i--) net->n[i].error = 0.0;
// Compute the error for output neurons, and
// initialize it to 0 for the other neurons:
for(i=net->nbneurons-1; i>=0; i--) {
ni = &(net->n[i]);
// If ni is an output neuron, update the error:
if(ni->layer == net->layersize[0]-1) {
ni->error += ni->value - out[k];
k++;
} else {
ni->error = 0.0;
}
}
// Compute error for all other neurons:
for(i=net->nbneurons-1; i>=0; i--) {
ni = &(net->n[i]);
// For each incoming synapse NOT from output layer:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
// If it is a forward link/synapse:
if(ni->layer > nj->layer) nj->error += ni->error * ni->weight[j];
}
}
// Update weights:
for(i=0; i<net->nbneurons; i++) {
ni = &(net->n[i]);
double wchange,derivative;
// For the output layer:
if(ni->layer == net->layersize[0]-1) {
derivative = ni->error * learningrate;
// For each incoming synapse:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
wchange = derivative;
// If it is a forward link/synapse:
if(ni->layer > nj->layer) wchange *= nj->value;
else wchange *= nj->value_prev;
ni->weight[j] -= wchange;
if(ni->weight[j] > 5) ni->weight[j] = 5;
if(ni->weight[j] < -5) ni->weight[j] = -5;
}
ni->bias -= derivative;
if(ni->bias > 5) ni->bias = 5;
if(ni->bias < -5) ni->bias = -5;
// For the other layers:
} else {
derivative = 1.0 - ni->value * ni->value;
derivative *= ni->error * learningrate;
// For each incoming synapse:
for(j=0; j<ni->nbsynapsesin; j++) {
nj = ni->synapsesin[j];
wchange = derivative;
// If it is a forward link/synapse:
if(ni->layer > nj->layer) wchange *= nj->value;
else wchange *= nj->value_prev;
ni->weight[j] -= wchange;
}
ni->bias -= derivative;
}
}
// Update error_prev:
for(i=0; i<net->nbneurons; i++) net->n[i].error_prev = net->n[i].error;
}
int main() {
srand(time(NULL));
int layersize[] = {1, 25, 12, 1};
int layersize_netrnn[] = { 4, 1, 25, 12, 1 };
mlp * netmlp = create_mlp (4, layersize);
srand(time(NULL));
CONFIG * configrnn = createconfig(layersize_netrnn);
RNN * netrnn = creaternn(configrnn);
double inc,outc;
double global_error = 1;
double global_error2 = 1;
int iter,i1=0,i2=0;
//////////////////////////////////////////////////////
// Training of the Multi-Layer Perceptron:
//////////////////////////////////////////////////////
while(global_error > 0.005 && i1<1000) {
for (iter=0; iter < 100; iter++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
set_mlp(netmlp,&inc);
learn_mlp(netmlp,&outc,0.03);
}
global_error = 0;
int k;
for (k=0; k < 100; k++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
set_mlp(netmlp,&inc);
get_mlp(netmlp,&outc);
mlp_float desired_out = inc*inc;
global_error += (desired_out - outc)*(desired_out - outc);
}
global_error /= 100;
global_error = sqrt(global_error);
i1++;
}
//////////////////////////////////////////////////////
// Training of the Recurrent Neural Network:
//////////////////////////////////////////////////////
while(global_error2 > 0.005 && i2<1000) {
rnnlearnstart(netrnn);
for (iter=0; iter < 100; iter++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
rnnsetstart(netrnn);
rnnset(netrnn,&inc);
double outc2;
rnnlearn(netrnn,&outc,0.03);
}
global_error2 = 0;
int k;
for (k=0; k < 100; k++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
double desired_out = inc*inc;
rnnsetstart(netrnn);
rnnset(netrnn,&inc);
rnnget(netrnn,&outc);
global_error2 += (desired_out - outc)*(desired_out - outc);
}
global_error2 /= 100;
global_error2 = sqrt(global_error2);
if(!isnormal(global_error2)) global_error2 = 100;
i2++;
}
//////////////////////////////////////////////////////
// Test of performance for the both networks:
//////////////////////////////////////////////////////
global_error = 0;
global_error2 = 0;
int k;
for (k=0; k < 10000; k++) {
inc = 1.0*rand()/(RAND_MAX+1.0);
outc = inc*inc;
double desired_out = inc*inc;
rnnsetstart(netrnn);
rnnset(netrnn,&inc);
rnnget(netrnn,&outc);
global_error2 += (desired_out - outc)*(desired_out - outc);
set_mlp(netmlp,&inc);
get_mlp(netmlp,&outc);
global_error += (desired_out - outc)*(desired_out - outc);
}
global_error /= 10000;
global_error = sqrt(global_error);
printf("\n MLP: Training cycles: %5d Error: %f",i1,global_error);
global_error2 /= 10000;
global_error2 = sqrt(global_error2);
printf("\n RNN: Training cycles: %5d Error: %f",i2,global_error2);
free_mlp(netmlp);
freeconfig(configrnn);
freernn(netrnn);
}
关于c - 循环神经网络实现,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/39910695/
我是 PHP 新手。我一直在脚本中使用 for 循环、while 循环、foreach 循环。我想知道 哪个性能更好? 选择循环的标准是什么? 当我们在另一个循环中循环时应该使用哪个? 我一直想知道要
我在高中的编程课上,我的作业是制作一个基本的小计和顶级计算器,但我在一家餐馆工作,所以制作一个只能让你在一种食物中读到。因此,我尝试让它能够接收多种食品并将它们添加到一个价格变量中。抱歉,如果某些代码
这是我正在学习的一本教科书。 var ingredients = ["eggs", "milk", "flour", "sugar", "baking soda", "baking powder",
我正在从字符串中提取数字并将其传递给函数。我想给它加 1,然后返回字符串,同时保留前导零。我可以使用 while 循环来完成此操作,但不能使用 for 循环。 for 循环只是跳过零。 var add
编辑:我已经在程序的输出中进行了编辑。 该程序要求估计给定值 mu。用户给出一个值 mu,同时还提供了四个不等于 1 的不同数字(称为 w、x、y、z)。然后,程序尝试使用 de Jaeger 公式找
我正在编写一个算法,该算法对一个整数数组从末尾到开头执行一个大循环,其中包含一个 if 条件。第一次条件为假时,循环可以终止。 因此,对于 for 循环,如果条件为假,它会继续迭代并进行简单的变量更改
现在我已经习惯了在内存非常有限的情况下进行编程,但我没有答案的一个问题是:哪个内存效率更高;- for(;;) 或 while() ?还是它们可以平等互换?如果有的话,还要对效率问题发表评论! 最佳答
这个问题已经有答案了: How do I compare strings in Java? (23 个回答) 已关闭 8 年前。 我正在尝试创建一个小程序,我可以在其中读取该程序的单词。如果单词有 6
这个问题在这里已经有了答案: python : list index out of range error while iteratively popping elements (12 个答案) 关
我正在尝试向用户请求 4 到 10 之间的整数。如果他们回答超出该范围,它将进入循环。当用户第一次正确输入数字时,它不会中断并继续执行 else 语句。如果用户在 else 语句中正确输入数字,它将正
我尝试创建一个带有嵌套 foreach 循环的列表。第一个循环是循环一些数字,第二个循环是循环日期。我想给一个日期写一个数字。所以还有另一个功能来检查它。但结果是数字多次写入日期。 Out 是这样的:
我想要做的事情是使用循环创建一个数组,然后在另一个类中调用该数组,这不会做,也可能永远不会做。解决这个问题最好的方法是什么?我已经寻找了所有解决方案,但它们无法编译。感谢您的帮助。 import ja
我尝试创建一个带有嵌套 foreach 循环的列表。第一个循环是循环一些数字,第二个循环是循环日期。我想给一个日期写一个数字。所以还有另一个功能来检查它。但结果是数字多次写入日期。 Out 是这样的:
我正在模拟一家快餐店三个多小时。这三个小时分为 18 个间隔,每个间隔 600 秒。每个间隔都会输出有关这 600 秒内发生的情况的统计信息。 我原来的结构是这样的: int i; for (i=0;
这个问题已经有答案了: IE8 for...in enumerator (3 个回答) How do I check if an object has a specific property in J
哪个对性能更好?这可能与其他编程语言不一致,所以如果它们不同,或者如果你能用你对特定语言的知识回答我的问题,请解释。 我将使用 c++ 作为示例,但我想知道它在 java、c 或任何其他主流语言中的工
这个问题不太可能帮助任何 future 的访问者;它只与一个小的地理区域、一个特定的时间点或一个非常狭窄的情况有关,这些情况并不普遍适用于互联网的全局受众。为了帮助使这个问题更广泛地适用,visit
我是 C 编程和编写代码的新手,以确定 M 测试用例的质因数分解。如果我一次只扫描一次,该功能本身就可以工作,但是当我尝试执行 M 次时却惨遭失败。 我不知道为什么 scanf() 循环有问题。 in
这个问题已经有答案了: JavaScript by reference vs. by value [duplicate] (4 个回答) 已关闭 3 年前。 我在使用 TSlint 时遇到问题,并且理
我尝试在下面的代码中添加 foreach 或 for 循环,以便为 Charts.js 创建多个数据集。这将允许我在此折线图上创建多条线。 我有一个 PHP 对象,我可以对其进行编码以稍后填充变量,但
我是一名优秀的程序员,十分优秀!