gpt4 book ai didi

javascript - NOT 函数的 bool 感知器

转载 作者:行者123 更新时间:2023-11-29 21:02:50 25 4
gpt4 key购买 nike

我正在尝试为基本 bool 表达式实现一个简单的感知器。但是我无法正确训练 NOT 感知器。

我成功地训练了 ANDOR 感知器,为给定的输入集返回正确的值。但是当我尝试训练 NOT 时。

我就是这样做的:

ANDOR 感知器有两个输入、两个权重和一个偏差(固定1到偏置输入)。

所有感知器的所有权重都从0开始。然后我生成随机值(0 到 1 之间)来训练感知器,并保持循环直到我猜对 10 次。

他们的学习率为0.1

这是训练过程:

猜测值:
对于每个输入,我将输入乘以权重,然后对所有值(包括偏差)求和。

sum = (weight1 * input1) + (weight2 * input2) + (biasWeight * biasInput)--Bias input is fixed to 1
return = if (sum > 0) then 1 else 0

训练感知器:
我从感知器中得到猜测

val = and.guess(1,0) --This will return 0 or 1
error = answer - val

对于每个输入,我都执行此计算

weight = weight + (input * error * rate)

然后我对偏差做同样的事情

biasWeight = biasWeight + (input * error * rate)--Bias input is fixed to 1

通过这个过程,我可以成功地训练ANDOR 感知器。

AND/ORNOT 感知器之间的唯一区别是输入的数量(仅1 表示)

但是 NOT 感知器只是按照学习率中的数字不断增加权重。

有时,根据 NOT 感知器的训练顺序,它会在达到 0.5 时获得正确的值。

回家贴代码的时候就有代码(html,javascript)了。我真的找到了这个错误。本应返回weight * inputCALC 函数返回了weight + input,它实际上适用于AND培训。

<!DOCTYPE html>

<html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title></title>
<script src="jquery-3.2.1.js"></script>

<script type="text/javascript">
function Show(text) {
if (!text) {
text = '';
}

document.writeln(text + '<br />');
}

//return random value from 0 ~ 1
function getRandom() {
return Math.floor(Math.random() * 2);
};

function PerceptronData(input, weight) {
this.input = input;
this.weight = weight;
}
PerceptronData.prototype.calc = function () {
var result = this.input + this.weight;
return result;
};
PerceptronData.prototype.adjust = function (error, rate) {
this.weight += (this.input * error * rate);
};
PerceptronData.prototype.print = function () {
return '(' + this.input + ', ' + this.weight + ')';
}

function Perceptron(n) {
this.data = [];//Data array [input, weight]
this.bias = new PerceptronData(1, 0);
this.rate = 0.1;//learning rate

//initial data
for (var index = 0; index < n; index++) {
this.data.push(new PerceptronData(0, 0));
}
}
//called from "guess" function in the final perceptron
Perceptron.prototype.process = function (inputs) {
var data = this.data;

if (inputs.length != data.length) {
throw "The number os inputs [" + inputs.length + "] doesn't match with the start value [" + data.length + "] of the Perceptron.";
}

var dataSum = 0;
for (var index = 0; index < data.length; index++) {
data[index].input = parseInt(inputs[index]);
dataSum += data[index].calc();
}

dataSum += this.bias.calc();

return dataSum;
};
//twick the weight for every data
Perceptron.prototype.adjust = function (value, answer) {
var data = this.data;
var error = answer - value;

for (var index = 0; index < data.length; index++) {
data[index].adjust(error, this.rate);
}

this.bias.adjust(error, this.rate);
};
Perceptron.prototype.print = function () {
var data = this.data;
var result = '';
for (var index = 0; index < data.length; index++) {
result += 'data[' + index + ']' + data[index].print() + ' > ';
}

return result + 'bias' + this.bias.print();
};

function NotPerceptron() {
Perceptron.call(this, 1);
}
NotPerceptron.prototype = Object.create(Perceptron.prototype);
NotPerceptron.prototype.guess = function (value) {
var data = this.process([value]);

//activation function
return ((data > 0) ? 1 : 0);
};
NotPerceptron.prototype.train = function (value, answer) {
var result = this.guess([value]);
this.adjust(result, answer);
};

function AndPerceptron() {
Perceptron.call(this, 2);
}
AndPerceptron.prototype = Object.create(Perceptron.prototype);
AndPerceptron.prototype.guess = function (valueA, valueB) {
var data = this.process([valueA, valueB]);

//activation function
return ((data > 0) ? 1 : 0);
};
AndPerceptron.prototype.train = function (valueA, valueB, answer) {
var result = this.guess(valueA, valueB);

this.adjust(result, answer);
};

function OrPerceptron() {
Perceptron.call(this, 2);
}
OrPerceptron.prototype = Object.create(Perceptron.prototype);
OrPerceptron.prototype.guess = function (valueA, valueB) {
var data = this.process([valueA, valueB]);

//activation function
return ((data > 0) ? 1 : 0);
};
OrPerceptron.prototype.train = function (valueA, valueB, answer) {
var result = this.guess(valueA, valueB);

this.adjust(result, answer);
};
</script>
</head>
<body>
<script type="text/javascript">
Show('Training AND...');
Show();
var and = new AndPerceptron();

var count = 0;
var total = 0;
var max = 100;

while (count < 10 && total < max) {
total++;
var a = getRandom();
var b = getRandom();
var answer = ((a === 1 && b === 1) ? 1 : 0);

and.train(a, b, answer);

a = getRandom();
b = getRandom();
answer = ((a === 1 && b === 1) ? 1 : 0);

var guess = and.guess(a, b);

if (guess === answer) {
count++;
} else {
count = 0;
}

Show(' > AND(' + a + ', ' + b + ') = ' + guess + ' > [' + and.print() + ']');

if (count == 10) {
//final test
if (and.guess(0, 0) == 1) {
count = 0;
}

if (and.guess(0, 1) == 1) {
count = 0;
}

if (and.guess(1, 0) == 1) {
count = 0;
}

if (and.guess(1, 1) == 0) {
count = 0;
}
}
}
Show();

if (total >= max) {
Show('AND training failed...');
} else {
Show('AND trained with [' + total + '] interactions. [' + and.print() + ']');
}

Show();
Show('AND(0, 0) = ' + and.guess(0, 0));
Show('AND(0, 1) = ' + and.guess(0, 1));
Show('AND(1, 0) = ' + and.guess(1, 0));
Show('AND(1, 1) = ' + and.guess(1, 1));

Show();
Show('Training OR...');
Show();
var or = new OrPerceptron();

count = 0;
total = 0;
max = 100;

while (count < 10 && total < max) {
total++;
var a = getRandom();
var b = getRandom();
var answer = ((a === 1 || b === 1) ? 1 : 0);

or.train(a, b, answer);

a = getRandom();
b = getRandom();
answer = ((a === 1 || b === 1) ? 1 : 0);

var guess = or.guess(a, b);

if (guess === answer) {
count++;
} else {
count = 0;
}

Show(' > OR(' + a + ', ' + b + ') = ' + guess + ' > [' + or.print() + ']');

if (count == 10) {
//final test
if (or.guess(0, 0) == 1) {
count = 0;
}

if (or.guess(0, 1) == 0) {
count = 0;
}

if (or.guess(1, 0) == 0) {
count = 0;
}

if (or.guess(1, 1) == 0) {
count = 0;
}
}
}
Show();

if (total >= max) {
Show('OR training failed...');
} else {
Show('OR trained with [' + total + '] interactions. [' + or.print() + ']');
}

Show();
Show('OR(0, 0) = ' + or.guess(0, 0));
Show('OR(0, 1) = ' + or.guess(0, 1));
Show('OR(1, 0) = ' + or.guess(1, 0));
Show('OR(1, 1) = ' + or.guess(1, 1));

Show();
Show('Training NOT...');
Show();
var not = new NotPerceptron();
not.rate = 0.1;

count = 0;
total = 0;
max = 100;

while (count < 10 && total < max) {
total++;
var test = getRandom();
var answer = ((test === 1) ? 0 : 1);

not.train(test, answer);

test = getRandom();
answer = ((test === 1) ? 0 : 1);

var guess = not.guess(test);

if (guess === answer) {
count++;
} else {
count = 0;
}

Show(' > NOT(' + test + ') = ' + guess + ' > [' + not.print() + ']');

if (count == 10) {
//final test
if (not.guess(0) == 0) {
count = 0;
}

if (not.guess(1) == 1) {
count = 0;
}
}
}
Show();

if (total >= max) {
Show('NOT training failed...');
} else {
Show('NOT trained with [' + total + '] interactions. [' + not.print() + ']');
}

Show();
Show('NOT(1) = ' + not.guess(1));
Show('NOT(0) = ' + not.guess(0));
</script>
</body>
</html>

输出:

Training AND...

> AND(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> AND(1, 1) = 1 > [data[0](1, 0.1) > data[1](1, 0) > bias(1, 0)]
> AND(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0) > bias(1, 0)]
> AND(1, 1) = 1 > [data[0](1, 0.1) > data[1](1, 0) > bias(1, 0)]
> AND(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0) > bias(1, 0)]
> AND(0, 1) = 0 > [data[0](0, 0.1) > data[1](1, 0) > bias(1, 0)]
> AND(0, 1) = 0 > [data[0](0, 0) > data[1](1, 0) > bias(1, -0.1)]
> AND(0, 1) = 1 > [data[0](0, 0.1) > data[1](1, 0.1) > bias(1, 0)]
> AND(0, 1) = 0 > [data[0](0, 0.1) > data[1](1, 0) > bias(1, -0.1)]
> AND(1, 1) = 0 > [data[0](1, 0.1) > data[1](1, 0) > bias(1, -0.1)]
> AND(1, 1) = 0 > [data[0](1, 0.1) > data[1](1, 0) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0) > bias(1, -0.1)]
> AND(1, 1) = 1 > [data[0](1, 0.2) > data[1](1, 0.1) > bias(1, 0)]
> AND(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]

AND trained with [21] interactions. [data[0](1, 0.1) > data[1](1, 0.1) > bias(1, -0.1)]

AND(0, 0) = 0
AND(0, 1) = 0
AND(1, 0) = 0
AND(1, 1) = 1

Training OR...

> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> OR(0, 1) = 1 > [data[0](0, 0.1) > data[1](1, 0.1) > bias(1, 0.1)]
> OR(0, 1) = 1 > [data[0](0, 0.1) > data[1](1, 0.1) > bias(1, 0.1)]
> OR(0, 0) = 1 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> OR(0, 0) = 1 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> OR(0, 1) = 1 > [data[0](0, 0.1) > data[1](1, 0.1) > bias(1, 0.1)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(1, 1) = 1 > [data[0](1, 0.1) > data[1](1, 0.1) > bias(1, 0)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0)]

OR trained with [15] interactions. [data[0](1, 0.1) > data[1](1, 0.1) > bias(1, 0)]

OR(0, 0) = 0
OR(0, 1) = 1
OR(1, 0) = 1
OR(1, 1) = 1

Training NOT...

> NOT(0) = 0 > [data[0](0, 0) > bias(1, 0)]
> NOT(1) = 1 > [data[0](1, 0) > bias(1, 0.1)]
> NOT(0) = 1 > [data[0](0, 0) > bias(1, 0.1)]
> NOT(1) = 1 > [data[0](1, 0) > bias(1, 0.1)]
> NOT(0) = 0 > [data[0](0, -0.1) > bias(1, 0)]
> NOT(1) = 1 > [data[0](1, -0.2) > bias(1, -0.1)]
> NOT(1) = 1 > [data[0](1, -0.2) > bias(1, -0.1)]
> NOT(0) = 1 > [data[0](0, -0.2) > bias(1, -0.1)]
> NOT(0) = 1 > [data[0](0, -0.30000000000000004) > bias(1, -0.2)]
> NOT(1) = 1 > [data[0](1, -0.30000000000000004) > bias(1, -0.2)]
> NOT(0) = 1 > [data[0](0, -0.30000000000000004) > bias(1, -0.2)]
> NOT(1) = 1 > [data[0](1, -0.4) > bias(1, -0.30000000000000004)]
> NOT(1) = 1 > [data[0](1, -0.5) > bias(1, -0.4)]
> NOT(1) = 1 > [data[0](1, -0.5) > bias(1, -0.4)]
> NOT(1) = 1 > [data[0](1, -0.6) > bias(1, -0.5)]
> NOT(1) = 1 > [data[0](1, -0.6) > bias(1, -0.5)]
> NOT(1) = 1 > [data[0](1, -0.7) > bias(1, -0.6)]
> NOT(1) = 1 > [data[0](1, -0.7999999999999999) > bias(1, -0.7)]
> NOT(0) = 1 > [data[0](0, -0.8999999999999999) > bias(1, -0.7999999999999999)]
> NOT(0) = 1 > [data[0](0, -0.8999999999999999) > bias(1, -0.7999999999999999)]
> NOT(0) = 1 > [data[0](0, -0.9999999999999999) > bias(1, -0.8999999999999999)]
> NOT(0) = 1 > [data[0](0, -0.9999999999999999) > bias(1, -0.8999999999999999)]
> NOT(1) = 1 > [data[0](1, -0.9999999999999999) > bias(1, -0.8999999999999999)]
> NOT(0) = 1 > [data[0](0, -0.9999999999999999) > bias(1, -0.8999999999999999)]
> NOT(0) = 1 > [data[0](0, -1.0999999999999999) > bias(1, -0.9999999999999999)]
> NOT(1) = 1 > [data[0](1, -1.2) > bias(1, -1.0999999999999999)]
> NOT(0) = 1 > [data[0](0, -1.2) > bias(1, -1.0999999999999999)]
> NOT(1) = 1 > [data[0](1, -1.2) > bias(1, -1.0999999999999999)]
> NOT(0) = 1 > [data[0](0, -1.2) > bias(1, -1.0999999999999999)]
> NOT(0) = 1 > [data[0](0, -1.2) > bias(1, -1.0999999999999999)]
> NOT(1) = 1 > [data[0](1, -1.2) > bias(1, -1.0999999999999999)]
> NOT(1) = 1 > [data[0](1, -1.3) > bias(1, -1.2)]
> NOT(0) = 1 > [data[0](0, -1.4000000000000001) > bias(1, -1.3)]
> NOT(0) = 1 > [data[0](0, -1.5000000000000002) > bias(1, -1.4000000000000001)]
> NOT(1) = 1 > [data[0](1, -1.6000000000000003) > bias(1, -1.5000000000000002)]
> NOT(1) = 1 > [data[0](1, -1.6000000000000003) > bias(1, -1.5000000000000002)]
> NOT(0) = 1 > [data[0](0, -1.6000000000000003) > bias(1, -1.5000000000000002)]
> NOT(0) = 1 > [data[0](0, -1.7000000000000004) > bias(1, -1.6000000000000003)]
> NOT(0) = 1 > [data[0](0, -1.8000000000000005) > bias(1, -1.7000000000000004)]
> NOT(1) = 1 > [data[0](1, -1.9000000000000006) > bias(1, -1.8000000000000005)]
> NOT(1) = 1 > [data[0](1, -1.9000000000000006) > bias(1, -1.8000000000000005)]
> NOT(1) = 1 > [data[0](1, -1.9000000000000006) > bias(1, -1.8000000000000005)]
> NOT(1) = 1 > [data[0](1, -1.9000000000000006) > bias(1, -1.8000000000000005)]
> NOT(0) = 1 > [data[0](0, -2.0000000000000004) > bias(1, -1.9000000000000006)]
> NOT(1) = 1 > [data[0](1, -2.1000000000000005) > bias(1, -2.0000000000000004)]
> NOT(1) = 1 > [data[0](1, -2.2000000000000006) > bias(1, -2.1000000000000005)]
> NOT(1) = 1 > [data[0](1, -2.3000000000000007) > bias(1, -2.2000000000000006)]
> NOT(0) = 1 > [data[0](0, -2.3000000000000007) > bias(1, -2.2000000000000006)]
> NOT(0) = 1 > [data[0](0, -2.400000000000001) > bias(1, -2.3000000000000007)]
> NOT(0) = 1 > [data[0](0, -2.500000000000001) > bias(1, -2.400000000000001)]
> NOT(1) = 1 > [data[0](1, -2.600000000000001) > bias(1, -2.500000000000001)]
> NOT(0) = 1 > [data[0](0, -2.700000000000001) > bias(1, -2.600000000000001)]
> NOT(1) = 1 > [data[0](1, -2.800000000000001) > bias(1, -2.700000000000001)]
> NOT(0) = 1 > [data[0](0, -2.9000000000000012) > bias(1, -2.800000000000001)]
> NOT(1) = 1 > [data[0](1, -3.0000000000000013) > bias(1, -2.9000000000000012)]
> NOT(1) = 1 > [data[0](1, -3.0000000000000013) > bias(1, -2.9000000000000012)]
> NOT(1) = 1 > [data[0](1, -3.0000000000000013) > bias(1, -2.9000000000000012)]
> NOT(0) = 1 > [data[0](0, -3.1000000000000014) > bias(1, -3.0000000000000013)]
> NOT(0) = 1 > [data[0](0, -3.1000000000000014) > bias(1, -3.0000000000000013)]
> NOT(1) = 1 > [data[0](1, -3.2000000000000015) > bias(1, -3.1000000000000014)]
> NOT(0) = 1 > [data[0](0, -3.3000000000000016) > bias(1, -3.2000000000000015)]
> NOT(1) = 1 > [data[0](1, -3.4000000000000017) > bias(1, -3.3000000000000016)]
> NOT(0) = 1 > [data[0](0, -3.5000000000000018) > bias(1, -3.4000000000000017)]
> NOT(0) = 1 > [data[0](0, -3.600000000000002) > bias(1, -3.5000000000000018)]
> NOT(1) = 1 > [data[0](1, -3.700000000000002) > bias(1, -3.600000000000002)]
> NOT(1) = 1 > [data[0](1, -3.700000000000002) > bias(1, -3.600000000000002)]
> NOT(1) = 1 > [data[0](1, -3.800000000000002) > bias(1, -3.700000000000002)]
> NOT(0) = 1 > [data[0](0, -3.800000000000002) > bias(1, -3.700000000000002)]
> NOT(1) = 1 > [data[0](1, -3.900000000000002) > bias(1, -3.800000000000002)]
> NOT(1) = 1 > [data[0](1, -4.000000000000002) > bias(1, -3.900000000000002)]
> NOT(1) = 1 > [data[0](1, -4.000000000000002) > bias(1, -3.900000000000002)]
> NOT(0) = 1 > [data[0](0, -4.000000000000002) > bias(1, -3.900000000000002)]
> NOT(0) = 1 > [data[0](0, -4.000000000000002) > bias(1, -3.900000000000002)]
> NOT(1) = 1 > [data[0](1, -4.100000000000001) > bias(1, -4.000000000000002)]
> NOT(1) = 1 > [data[0](1, -4.100000000000001) > bias(1, -4.000000000000002)]
> NOT(1) = 1 > [data[0](1, -4.200000000000001) > bias(1, -4.100000000000001)]
> NOT(0) = 1 > [data[0](0, -4.300000000000001) > bias(1, -4.200000000000001)]
> NOT(1) = 1 > [data[0](1, -4.300000000000001) > bias(1, -4.200000000000001)]
> NOT(1) = 1 > [data[0](1, -4.4) > bias(1, -4.300000000000001)]
> NOT(0) = 1 > [data[0](0, -4.5) > bias(1, -4.4)]
> NOT(0) = 1 > [data[0](0, -4.5) > bias(1, -4.4)]
> NOT(0) = 1 > [data[0](0, -4.5) > bias(1, -4.4)]
> NOT(0) = 1 > [data[0](0, -4.6) > bias(1, -4.5)]
> NOT(1) = 1 > [data[0](1, -4.699999999999999) > bias(1, -4.6)]
> NOT(0) = 1 > [data[0](0, -4.799999999999999) > bias(1, -4.699999999999999)]
> NOT(1) = 1 > [data[0](1, -4.799999999999999) > bias(1, -4.699999999999999)]
> NOT(0) = 1 > [data[0](0, -4.899999999999999) > bias(1, -4.799999999999999)]
> NOT(0) = 1 > [data[0](0, -4.999999999999998) > bias(1, -4.899999999999999)]
> NOT(0) = 1 > [data[0](0, -5.099999999999998) > bias(1, -4.999999999999998)]
> NOT(0) = 1 > [data[0](0, -5.1999999999999975) > bias(1, -5.099999999999998)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(1) = 1 > [data[0](1, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.399999999999997) > bias(1, -5.299999999999997)]
> NOT(0) = 1 > [data[0](0, -5.4999999999999964) > bias(1, -5.399999999999997)]
> NOT(1) = 1 > [data[0](1, -5.599999999999996) > bias(1, -5.4999999999999964)]
> NOT(0) = 1 > [data[0](0, -5.699999999999996) > bias(1, -5.599999999999996)]
> NOT(1) = 1 > [data[0](1, -5.799999999999995) > bias(1, -5.699999999999996)]
> NOT(0) = 1 > [data[0](0, -5.899999999999995) > bias(1, -5.799999999999995)]
> NOT(0) = 1 > [data[0](0, -5.999999999999995) > bias(1, -5.899999999999995)]
> NOT(0) = 1 > [data[0](0, -6.099999999999994) > bias(1, -5.999999999999995)]
> NOT(1) = 1 > [data[0](1, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(0) = 1 > [data[0](0, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(1) = 1 > [data[0](1, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(1) = 1 > [data[0](1, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(0) = 1 > [data[0](0, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(1) = 1 > [data[0](1, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(0) = 1 > [data[0](0, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(1) = 1 > [data[0](1, -6.299999999999994) > bias(1, -6.199999999999994)]
> NOT(0) = 1 > [data[0](0, -6.399999999999993) > bias(1, -6.299999999999994)]

最佳答案

根据@Stanislav Kralin 的建议,我再次更新了问题,以便显示问题所在。这是解决方案。

问题出在 CALC 函数上,该函数应该乘以权重的输入值。但我正在添加它。

不幸的是,我太专注于查看是否应该使用 sigmoid 函数或其他函数,查看学习率以及线性和非线性函数,但我没有看到这个错误。

事实上,ANDOR 感知器可以很好地工作,这让我误入歧途。

PerceptronData.prototype.calc = function () {
//var result = this.input + this.weight;//This was wrong... :(
var result = this.input * this.weight;
return result;
};

关于javascript - NOT 函数的 bool 感知器,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/45640357/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com