gpt4 book ai didi

c - 神经网络只是有时有效

转载 作者:行者123 更新时间:2023-12-04 15:24:45 25 4
gpt4 key购买 nike

所以我终于在 youtube channel “Coding Train”系列的帮助下第一次编写了一个神经网络(我一直想这样做),“唯一”的区别是我编写了它用 C 而不是 JS。

我试着模拟了一个异或:结构是两个输入节点,两个隐藏节点,一个输出节点。训练后,我发现它不能正常工作。

已经看过这篇文章和其他几篇文章,但它们对我没有帮助:XOR Neural Network sometimes outputs 0.5

这是我的训练数据:

Training data:
IN | OUT
00 | 0
01 | 1
10 | 1
11 | 0

我对它进行了多次训练,每次训练超过 10000 次,学习率从 0.5 到 0.01 不等,并期望得到相应的结果。在下表中,我列出了最常见的输出,无论我选择什么学习范围,在不同的培训类(class)之后。

一次培训=多次培训

Actual output after training (OUTn is the nth training session):
IN | OUT1 | OUT2 | OUT3 | OUT4 | OUT5
00 | 0.01 | 0.01 | 0.01 | 0.66 | 0.01
01 | 0.99 | 0.99 | 0.50 | 0.66 | 0.66
10 | 0.99 | 0.50 | 0.99 | 0.66 | 0.66
11 | 0.01 | 0.50 | 0.50 | 0.66 | 0.66

大多数时候它会输出一些非常奇怪的东西。经过几个小时的错误搜索等,我仍然无法弄清楚错误在哪里。也许阅读本文的人会找到一个?

代码如下。

我通过一个结构定义了一个 GETRANDOM 和我的网络,这样我就可以轻松地传递、修改和返回它:

#define GETRANDOM   ( (double)rand() / RAND_MAX * 2.0 - 1.0 )   // random number between -1 and 1

// network structure
struct sNetwork {
// node count
int input_nodes;
int hidden_nodes;
int output_nodes;
// values
double* input_values;
double* hidden_values;
double* output_values;
double* expected_values;
// error
double* hidden_error;
double* output_error;
// bias
double* bias_h;
double* bias_o;
// weights
double** weights_ih;
double** weights_ho;
};
typedef struct sNetwork tNetwork;

为此我还写了一个设置函数:

tNetwork* setup_network(tNetwork* tNet)
{
// general error check
if(tNet == NULL)
{
return NULL;
}
if((*tNet).input_nodes == 0 || (*tNet).hidden_nodes == 0 || (*tNet).output_nodes == 0)
{
return NULL;
}

// based on the defined size, set up the weights

// set up the input to hidden weights
(*tNet).weights_ih = (double**)malloc((*tNet).input_nodes * sizeof(double*));
for(int i = 0; i < (*tNet).input_nodes; i++)
{
(*tNet).weights_ih[i] = (double*)malloc((*tNet).hidden_nodes * sizeof(double));
for(int j = 0; j < (*tNet).hidden_nodes; j++)
{
(*tNet).weights_ih[i][j] = GETRANDOM;
}
}

// set up the hidden to output weights
(*tNet).weights_ho = (double**)malloc((*tNet).hidden_nodes * sizeof(double*));
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
(*tNet).weights_ho[i] = (double*)malloc((*tNet).output_nodes * sizeof(double));
for(int j = 0; j < (*tNet).output_nodes; j++)
{
(*tNet).weights_ho[i][j] = GETRANDOM;
}
}

// set up the bias

// set up hidden bias and value
(*tNet).bias_h = (double*)malloc((*tNet).hidden_nodes * sizeof(double));
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
(*tNet).bias_h[i] = GETRANDOM;
}

// set up the output bias and value

(*tNet).bias_o = (double*)malloc((*tNet).output_nodes * sizeof(double));
for(int i = 0; i < (*tNet).output_nodes; i++)
{
(*tNet).bias_o[i] = GETRANDOM;
}

// set up the values
(*tNet).hidden_values = (double*)malloc((*tNet).hidden_nodes * sizeof(double));
(*tNet).output_values = (double*)malloc((*tNet).output_nodes * sizeof(double));
(*tNet).input_values = (double*)malloc((*tNet).input_nodes * sizeof(double));
(*tNet).expected_values = (double*)malloc((*tNet).output_nodes * sizeof(double));

// set up the error stuff
(*tNet).hidden_error = (double*)malloc((*tNet).hidden_nodes * sizeof(double));
(*tNet).output_error = (double*)malloc((*tNet).output_nodes * sizeof(double));

return tNet;
}

S 型函数:

double sigmoid(double x)
{
return 1 / (1 + exp(-x));
}

double dsigmoid(double x)
{
return x * (1 - (x));
}

然后我编写了前馈函数:

tNetwork* feed_forward(tNetwork* tNet)
{
// calculate the hidden outputs
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
(*tNet).hidden_values[i] = (*tNet).bias_h[i]; // add bias to weighted sum

for(int j = 0; j < (*tNet).input_nodes; j++)
{
(*tNet).hidden_values[i] += ( (*tNet).input_values[j] * (*tNet).weights_ih[j][i] ); // build the weighted sum
}

(*tNet).hidden_values[i] = sigmoid((*tNet).hidden_values[i]);
}

// calculate the output
for(int i = 0; i < (*tNet).output_nodes; i++)
{
(*tNet).output_values[i] = (*tNet).bias_o[i]; // add bias to weighted sum

for(int j = 0; j < (*tNet).hidden_nodes; j++)
{
(*tNet).output_values[i] += ( (*tNet).hidden_values[j] * (*tNet).weights_ho[j][i] ); // build the weighted sum
}
(*tNet).output_values[i] = sigmoid((*tNet).output_values[i]);
}

return tNet;
}

之后的火车功能:

tNetwork* train(tNetwork* tNet, double learning_rate)
{
// first of all feed the network
tNet = feed_forward(tNet);

// init the hidden errors
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
(*tNet).hidden_error[i] = 0;
}

// calculate the output error
for(int i = 0; i < (*tNet).output_nodes; i++)
{
(*tNet).output_error[i] = (*tNet).expected_values[i] - (*tNet).output_values[i];
}

// calculate the hidden error
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
for(int j = 0; j < (*tNet).output_nodes; j++)
{
(*tNet).hidden_error[i] += ( (*tNet).weights_ho[i][j] * (*tNet).output_error[j] );
}
}

// adjust outputs
for(int i = 0; i < (*tNet).output_nodes; i++)
{
// adjust output bias
double gradient = learning_rate * (*tNet).output_error[i] * dsigmoid((*tNet).output_values[i]);
(*tNet).bias_o[i] += gradient;

for(int j = 0; j < (*tNet).hidden_nodes; j++)
{
// adjust hidden->output weights
(*tNet).weights_ho[j][i] += gradient * (*tNet).hidden_values[j];
}
}

// adjust hiddens
for(int j = 0; j < (*tNet).hidden_nodes; j++)
{
// adjust hidden bias
double gradient = learning_rate * (*tNet).hidden_error[j] * dsigmoid((*tNet).hidden_values[j]);
(*tNet).bias_h[j] += gradient;

for(int k = 0; k < (*tNet).input_nodes; k++)
{
// adjust input->hidden weights
(*tNet).weights_ih[k][j] += gradient * (*tNet).input_values[k];
}
}

return tNet;
}

最后,在我的主要功能中我这样做了:

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>

int main(void)
{
// initialize
srand(time(NULL));

// create neural network
tNetwork* network = (tNetwork*)malloc(sizeof(tNetwork));

// set up the properties of the network and initialize it
network->input_nodes = 2;
network->hidden_nodes = 2;
network->output_nodes = 1;
network = setup_network(network);

// train
for(int i = 0; i < 50000; i++)
{
switch(rand() % 4)
{
case 0:
// train #1
network->input_values[0] = 0;
network->input_values[1] = 0;
network->expected_values[0] = 0;
network = train(network, 0.1);
break;
case 1:
// train #2
network->input_values[0] = 1;
network->input_values[1] = 0;
network->expected_values[0] = 1;
network = train(network, 0.1);
break;
case 2:
// train #3
network->input_values[0] = 0;
network->input_values[1] = 1;
network->expected_values[0] = 1;
network = train(network, 0.1);
break;
case 3:
// train #4
network->input_values[0] = 1;
network->input_values[1] = 1;
network->expected_values[0] = 0;
network = train(network, 0.1);
break;
default:
break;
}
}

// check the functionality

network->input_values[0] = 0;
network->input_values[1] = 0;
network = feed_forward(network);
printf("%f\n", network->output_values[0]);

network->input_values[0] = 0;
network->input_values[1] = 1;
network = feed_forward(network);
printf("%f\n", network->output_values[0]);

network->input_values[0] = 1;
network->input_values[1] = 0;
network = feed_forward(network);
printf("%f\n", network->output_values[0]);

network->input_values[0] = 1;
network->input_values[1] = 1;
network = feed_forward(network);
printf("%f\n", network->output_values[0]);

return 0;
}

如果有人真的读到这里,我印象深刻,如果发现并解释了任何错误,我非常感谢,提前致谢!!

最佳答案

我认为您的代码非常好(我不擅长用 C 编程……我有 Java 背景)并且没有输出,因为需要进行一些手动调整。例如,我认为训练循环应该运行更长的时间,而不是在 4 个案例之间随机选择,而应该选择所有案例并对其进行训练(这是因为如果我们随机选择训练样本,一些案例可能会得到更多比其他导致网络学习不正确)。我调整了你的代码来解决这些问题(同时将学习率提高到 0.2),我几乎总是能得到很好的分类。请尝试以下代码,

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>

#define GETRANDOM ( (double)rand() / RAND_MAX * 2.0 - 1.0 ) // random number between -1 and 1

// network structure
struct sNetwork {
// node count
int input_nodes;
int hidden_nodes;
int output_nodes;
// values
double* input_values;
double* hidden_values;
double* output_values;
double* expected_values;
// error
double* hidden_error;
double* output_error;
// bias
double* bias_h;
double* bias_o;
// weights
double** weights_ih;
double** weights_ho;
};
typedef struct sNetwork tNetwork;

tNetwork* setup_network(tNetwork* tNet)
{
// general error check
if(tNet == NULL)
{
return NULL;
}
if((*tNet).input_nodes == 0 || (*tNet).hidden_nodes == 0 || (*tNet).output_nodes == 0)
{
return NULL;
}

// based on the defined size, set up the weights

// set up the input to hidden weights
(*tNet).weights_ih = (double**)malloc((*tNet).input_nodes * sizeof(double*));
for(int i = 0; i < (*tNet).input_nodes; i++)
{
(*tNet).weights_ih[i] = (double*)malloc((*tNet).hidden_nodes * sizeof(double));
for(int j = 0; j < (*tNet).hidden_nodes; j++)
{
(*tNet).weights_ih[i][j] = GETRANDOM;
}
}

// set up the hidden to output weights
(*tNet).weights_ho = (double**)malloc((*tNet).hidden_nodes * sizeof(double*));
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
(*tNet).weights_ho[i] = (double*)malloc((*tNet).output_nodes * sizeof(double));
for(int j = 0; j < (*tNet).output_nodes; j++)
{
(*tNet).weights_ho[i][j] = GETRANDOM;
}
}

// set up the bias

// set up hidden bias and value
(*tNet).bias_h = (double*)malloc((*tNet).hidden_nodes * sizeof(double));
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
(*tNet).bias_h[i] = GETRANDOM;
}

// set up the output bias and value

(*tNet).bias_o = (double*)malloc((*tNet).output_nodes * sizeof(double));
for(int i = 0; i < (*tNet).output_nodes; i++)
{
(*tNet).bias_o[i] = GETRANDOM;
}

// set up the values
(*tNet).hidden_values = (double*)malloc((*tNet).hidden_nodes * sizeof(double));
(*tNet).output_values = (double*)malloc((*tNet).output_nodes * sizeof(double));
(*tNet).input_values = (double*)malloc((*tNet).input_nodes * sizeof(double));
(*tNet).expected_values = (double*)malloc((*tNet).output_nodes * sizeof(double));

// set up the error stuff
(*tNet).hidden_error = (double*)malloc((*tNet).hidden_nodes * sizeof(double));
(*tNet).output_error = (double*)malloc((*tNet).output_nodes * sizeof(double));

return tNet;
}

double sigmoid(double x)
{
return 1 / (1 + exp(-x));
}

double dsigmoid(double x)
{
return x * (1 - (x));
}

tNetwork* feed_forward(tNetwork* tNet)
{
// calculate the hidden outputs
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
(*tNet).hidden_values[i] = (*tNet).bias_h[i]; // add bias to weighted sum

for(int j = 0; j < (*tNet).input_nodes; j++)
{
(*tNet).hidden_values[i] += ( (*tNet).input_values[j] * (*tNet).weights_ih[j][i] ); // build the weighted sum
}

(*tNet).hidden_values[i] = sigmoid((*tNet).hidden_values[i]);
}

// calculate the output
for(int i = 0; i < (*tNet).output_nodes; i++)
{
(*tNet).output_values[i] = (*tNet).bias_o[i]; // add bias to weighted sum

for(int j = 0; j < (*tNet).hidden_nodes; j++)
{
(*tNet).output_values[i] += ( (*tNet).hidden_values[j] * (*tNet).weights_ho[j][i] ); // build the weighted sum
}
(*tNet).output_values[i] = sigmoid((*tNet).output_values[i]);
}

return tNet;
}

tNetwork* train(tNetwork* tNet, double learning_rate)
{
// first of all feed the network
tNet = feed_forward(tNet);

// init the hidden errors
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
(*tNet).hidden_error[i] = 0;
}

// calculate the output error
for(int i = 0; i < (*tNet).output_nodes; i++)
{
(*tNet).output_error[i] = ((*tNet).expected_values[i] - (*tNet).output_values[i]);
}

// calculate the hidden error
for(int i = 0; i < (*tNet).hidden_nodes; i++)
{
for(int j = 0; j < (*tNet).output_nodes; j++)
{
(*tNet).hidden_error[i] += ( (*tNet).weights_ho[i][j] * (*tNet).output_error[j] );
}
}

// adjust outputs
for(int i = 0; i < (*tNet).output_nodes; i++)
{
// adjust output bias
double gradient = learning_rate * (*tNet).output_error[i] * dsigmoid((*tNet).output_values[i]);
(*tNet).bias_o[i] += gradient;

for(int j = 0; j < (*tNet).hidden_nodes; j++)
{
// adjust hidden->output weights
(*tNet).weights_ho[j][i] += gradient * (*tNet).hidden_values[j];
}
}

// adjust hiddens
for(int j = 0; j < (*tNet).hidden_nodes; j++)
{
// adjust hidden bias
double gradient = learning_rate * (*tNet).hidden_error[j] * dsigmoid((*tNet).hidden_values[j]);
(*tNet).bias_h[j] += gradient;

for(int k = 0; k < (*tNet).input_nodes; k++)
{
// adjust input->hidden weights
(*tNet).weights_ih[k][j] += gradient * (*tNet).input_values[k];
}
}

return tNet;
}

int main(void)
{
// initialize
srand(time(NULL));

// create neural network
tNetwork* network = (tNetwork*)malloc(sizeof(tNetwork));

// set up the properties of the network and initialize it
network->input_nodes = 2;
network->hidden_nodes = 2;
network->output_nodes = 1;
network = setup_network(network);

// train
for(int i = 0; i < 10000; i++)
{
double learnRate = 0.2;
network->input_values[0] = 0;
network->input_values[1] = 0;
network->expected_values[0] = 0;
network = train(network, learnRate);

network->input_values[0] = 1;
network->input_values[1] = 0;
network->expected_values[0] = 1;
network = train(network, learnRate);

network->input_values[0] = 0;
network->input_values[1] = 1;
network->expected_values[0] = 1;
network = train(network, learnRate);

network->input_values[0] = 1;
network->input_values[1] = 1;
network->expected_values[0] = 0;
network = train(network, learnRate);
}

// check the functionality

network->input_values[0] = 0;
network->input_values[1] = 0;
network = feed_forward(network);
printf("%f\n", network->output_values[0]);

network->input_values[0] = 0;
network->input_values[1] = 1;
network = feed_forward(network);
printf("%f\n", network->output_values[0]);

network->input_values[0] = 1;
network->input_values[1] = 0;
network = feed_forward(network);
printf("%f\n", network->output_values[0]);

network->input_values[0] = 1;
network->input_values[1] = 1;
network = feed_forward(network);
printf("%f\n", network->output_values[0]);

return 0;
}

第一次运行:

0.004500                                                                                                                              
0.995514
0.994496
0.004476

第二次运行:

0.026612                                                                                                                              
0.976464
0.976448
0.025998

第三次运行:(有时网络不学习,但我想这没关系,因为不能保证神经网络总能学到所有东西)

0.016715                                                                                                                              
0.980586
0.490094
0.490994

为了增加我们的网络更好地学习的机会,我们可以增加隐藏层的大小(例如,从 2 到 10)并运行循环训练 100000 次。那样的话分类效果会更好,

第一次运行:

0.001796                                                                                                                              
0.997434
0.997245
0.003259

第二次运行:

0.002740                                                                                                                              
0.997007
0.997539
0.002883

第三次运行:

0.000807                                                                                                                              
0.996993
0.996345
0.004765

关于c - 神经网络只是有时有效,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/62506755/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com