- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试为基本 bool 表达式实现一个简单的感知器。但是我无法正确训练 NOT 感知器。
我成功地训练了 AND 和 OR 感知器,为给定的输入集返回正确的值。但是当我尝试训练 NOT 时。
我就是这样做的:
AND 和OR 感知器有两个输入、两个权重和一个偏差(固定1到偏置输入)。
所有感知器的所有权重都从0开始。然后我生成随机值(0 到 1 之间)来训练感知器,并保持循环直到我猜对 10 次。
他们的学习率为0.1
这是训练过程:
猜测值:
对于每个输入,我将输入乘以权重,然后对所有值(包括偏差)求和。
sum = (weight1 * input1) + (weight2 * input2) + (biasWeight * biasInput)--Bias input is fixed to 1
return = if (sum > 0) then 1 else 0
训练感知器:
我从感知器中得到猜测
val = and.guess(1,0) --This will return 0 or 1
error = answer - val
对于每个输入,我都执行此计算
weight = weight + (input * error * rate)
然后我对偏差做同样的事情
biasWeight = biasWeight + (input * error * rate)--Bias input is fixed to 1
通过这个过程,我可以成功地训练AND 和OR 感知器。
AND/OR 和NOT 感知器之间的唯一区别是输入的数量(仅1 表示不)
但是 NOT 感知器只是按照学习率中的数字不断增加权重。
有时,根据 NOT 感知器的训练顺序,它会在达到 0.5 时获得正确的值。
回家贴代码的时候就有代码(html,javascript)了。我真的找到了这个错误。本应返回weight * input 的CALC 函数返回了weight + input,它实际上适用于AND和或培训。
<!DOCTYPE html>
<html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title></title>
<script src="jquery-3.2.1.js"></script>
<script type="text/javascript">
function Show(text) {
if (!text) {
text = '';
}
document.writeln(text + '<br />');
}
//return random value from 0 ~ 1
function getRandom() {
return Math.floor(Math.random() * 2);
};
function PerceptronData(input, weight) {
this.input = input;
this.weight = weight;
}
PerceptronData.prototype.calc = function () {
var result = this.input + this.weight;
return result;
};
PerceptronData.prototype.adjust = function (error, rate) {
this.weight += (this.input * error * rate);
};
PerceptronData.prototype.print = function () {
return '(' + this.input + ', ' + this.weight + ')';
}
function Perceptron(n) {
this.data = [];//Data array [input, weight]
this.bias = new PerceptronData(1, 0);
this.rate = 0.1;//learning rate
//initial data
for (var index = 0; index < n; index++) {
this.data.push(new PerceptronData(0, 0));
}
}
//called from "guess" function in the final perceptron
Perceptron.prototype.process = function (inputs) {
var data = this.data;
if (inputs.length != data.length) {
throw "The number os inputs [" + inputs.length + "] doesn't match with the start value [" + data.length + "] of the Perceptron.";
}
var dataSum = 0;
for (var index = 0; index < data.length; index++) {
data[index].input = parseInt(inputs[index]);
dataSum += data[index].calc();
}
dataSum += this.bias.calc();
return dataSum;
};
//twick the weight for every data
Perceptron.prototype.adjust = function (value, answer) {
var data = this.data;
var error = answer - value;
for (var index = 0; index < data.length; index++) {
data[index].adjust(error, this.rate);
}
this.bias.adjust(error, this.rate);
};
Perceptron.prototype.print = function () {
var data = this.data;
var result = '';
for (var index = 0; index < data.length; index++) {
result += 'data[' + index + ']' + data[index].print() + ' > ';
}
return result + 'bias' + this.bias.print();
};
function NotPerceptron() {
Perceptron.call(this, 1);
}
NotPerceptron.prototype = Object.create(Perceptron.prototype);
NotPerceptron.prototype.guess = function (value) {
var data = this.process([value]);
//activation function
return ((data > 0) ? 1 : 0);
};
NotPerceptron.prototype.train = function (value, answer) {
var result = this.guess([value]);
this.adjust(result, answer);
};
function AndPerceptron() {
Perceptron.call(this, 2);
}
AndPerceptron.prototype = Object.create(Perceptron.prototype);
AndPerceptron.prototype.guess = function (valueA, valueB) {
var data = this.process([valueA, valueB]);
//activation function
return ((data > 0) ? 1 : 0);
};
AndPerceptron.prototype.train = function (valueA, valueB, answer) {
var result = this.guess(valueA, valueB);
this.adjust(result, answer);
};
function OrPerceptron() {
Perceptron.call(this, 2);
}
OrPerceptron.prototype = Object.create(Perceptron.prototype);
OrPerceptron.prototype.guess = function (valueA, valueB) {
var data = this.process([valueA, valueB]);
//activation function
return ((data > 0) ? 1 : 0);
};
OrPerceptron.prototype.train = function (valueA, valueB, answer) {
var result = this.guess(valueA, valueB);
this.adjust(result, answer);
};
</script>
</head>
<body>
<script type="text/javascript">
Show('Training AND...');
Show();
var and = new AndPerceptron();
var count = 0;
var total = 0;
var max = 100;
while (count < 10 && total < max) {
total++;
var a = getRandom();
var b = getRandom();
var answer = ((a === 1 && b === 1) ? 1 : 0);
and.train(a, b, answer);
a = getRandom();
b = getRandom();
answer = ((a === 1 && b === 1) ? 1 : 0);
var guess = and.guess(a, b);
if (guess === answer) {
count++;
} else {
count = 0;
}
Show(' > AND(' + a + ', ' + b + ') = ' + guess + ' > [' + and.print() + ']');
if (count == 10) {
//final test
if (and.guess(0, 0) == 1) {
count = 0;
}
if (and.guess(0, 1) == 1) {
count = 0;
}
if (and.guess(1, 0) == 1) {
count = 0;
}
if (and.guess(1, 1) == 0) {
count = 0;
}
}
}
Show();
if (total >= max) {
Show('AND training failed...');
} else {
Show('AND trained with [' + total + '] interactions. [' + and.print() + ']');
}
Show();
Show('AND(0, 0) = ' + and.guess(0, 0));
Show('AND(0, 1) = ' + and.guess(0, 1));
Show('AND(1, 0) = ' + and.guess(1, 0));
Show('AND(1, 1) = ' + and.guess(1, 1));
Show();
Show('Training OR...');
Show();
var or = new OrPerceptron();
count = 0;
total = 0;
max = 100;
while (count < 10 && total < max) {
total++;
var a = getRandom();
var b = getRandom();
var answer = ((a === 1 || b === 1) ? 1 : 0);
or.train(a, b, answer);
a = getRandom();
b = getRandom();
answer = ((a === 1 || b === 1) ? 1 : 0);
var guess = or.guess(a, b);
if (guess === answer) {
count++;
} else {
count = 0;
}
Show(' > OR(' + a + ', ' + b + ') = ' + guess + ' > [' + or.print() + ']');
if (count == 10) {
//final test
if (or.guess(0, 0) == 1) {
count = 0;
}
if (or.guess(0, 1) == 0) {
count = 0;
}
if (or.guess(1, 0) == 0) {
count = 0;
}
if (or.guess(1, 1) == 0) {
count = 0;
}
}
}
Show();
if (total >= max) {
Show('OR training failed...');
} else {
Show('OR trained with [' + total + '] interactions. [' + or.print() + ']');
}
Show();
Show('OR(0, 0) = ' + or.guess(0, 0));
Show('OR(0, 1) = ' + or.guess(0, 1));
Show('OR(1, 0) = ' + or.guess(1, 0));
Show('OR(1, 1) = ' + or.guess(1, 1));
Show();
Show('Training NOT...');
Show();
var not = new NotPerceptron();
not.rate = 0.1;
count = 0;
total = 0;
max = 100;
while (count < 10 && total < max) {
total++;
var test = getRandom();
var answer = ((test === 1) ? 0 : 1);
not.train(test, answer);
test = getRandom();
answer = ((test === 1) ? 0 : 1);
var guess = not.guess(test);
if (guess === answer) {
count++;
} else {
count = 0;
}
Show(' > NOT(' + test + ') = ' + guess + ' > [' + not.print() + ']');
if (count == 10) {
//final test
if (not.guess(0) == 0) {
count = 0;
}
if (not.guess(1) == 1) {
count = 0;
}
}
}
Show();
if (total >= max) {
Show('NOT training failed...');
} else {
Show('NOT trained with [' + total + '] interactions. [' + not.print() + ']');
}
Show();
Show('NOT(1) = ' + not.guess(1));
Show('NOT(0) = ' + not.guess(0));
</script>
</body>
</html>
输出:
Training AND...
> AND(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> AND(1, 1) = 1 > [data[0](1, 0.1) > data[1](1, 0) > bias(1, 0)]
> AND(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0) > bias(1, 0)]
> AND(1, 1) = 1 > [data[0](1, 0.1) > data[1](1, 0) > bias(1, 0)]
> AND(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0) > bias(1, 0)]
> AND(0, 1) = 0 > [data[0](0, 0.1) > data[1](1, 0) > bias(1, 0)]
> AND(0, 1) = 0 > [data[0](0, 0) > data[1](1, 0) > bias(1, -0.1)]
> AND(0, 1) = 1 > [data[0](0, 0.1) > data[1](1, 0.1) > bias(1, 0)]
> AND(0, 1) = 0 > [data[0](0, 0.1) > data[1](1, 0) > bias(1, -0.1)]
> AND(1, 1) = 0 > [data[0](1, 0.1) > data[1](1, 0) > bias(1, -0.1)]
> AND(1, 1) = 0 > [data[0](1, 0.1) > data[1](1, 0) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0) > bias(1, -0.1)]
> AND(1, 1) = 1 > [data[0](1, 0.2) > data[1](1, 0.1) > bias(1, 0)]
> AND(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(1, 0) = 0 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
> AND(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, -0.1)]
AND trained with [21] interactions. [data[0](1, 0.1) > data[1](1, 0.1) > bias(1, -0.1)]
AND(0, 0) = 0
AND(0, 1) = 0
AND(1, 0) = 0
AND(1, 1) = 1
Training OR...
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> OR(0, 1) = 1 > [data[0](0, 0.1) > data[1](1, 0.1) > bias(1, 0.1)]
> OR(0, 1) = 1 > [data[0](0, 0.1) > data[1](1, 0.1) > bias(1, 0.1)]
> OR(0, 0) = 1 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> OR(0, 0) = 1 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0.1)]
> OR(0, 1) = 1 > [data[0](0, 0.1) > data[1](1, 0.1) > bias(1, 0.1)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(0, 0) = 0 > [data[0](0, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(1, 1) = 1 > [data[0](1, 0.1) > data[1](1, 0.1) > bias(1, 0)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0)]
> OR(1, 0) = 1 > [data[0](1, 0.1) > data[1](0, 0.1) > bias(1, 0)]
OR trained with [15] interactions. [data[0](1, 0.1) > data[1](1, 0.1) > bias(1, 0)]
OR(0, 0) = 0
OR(0, 1) = 1
OR(1, 0) = 1
OR(1, 1) = 1
Training NOT...
> NOT(0) = 0 > [data[0](0, 0) > bias(1, 0)]
> NOT(1) = 1 > [data[0](1, 0) > bias(1, 0.1)]
> NOT(0) = 1 > [data[0](0, 0) > bias(1, 0.1)]
> NOT(1) = 1 > [data[0](1, 0) > bias(1, 0.1)]
> NOT(0) = 0 > [data[0](0, -0.1) > bias(1, 0)]
> NOT(1) = 1 > [data[0](1, -0.2) > bias(1, -0.1)]
> NOT(1) = 1 > [data[0](1, -0.2) > bias(1, -0.1)]
> NOT(0) = 1 > [data[0](0, -0.2) > bias(1, -0.1)]
> NOT(0) = 1 > [data[0](0, -0.30000000000000004) > bias(1, -0.2)]
> NOT(1) = 1 > [data[0](1, -0.30000000000000004) > bias(1, -0.2)]
> NOT(0) = 1 > [data[0](0, -0.30000000000000004) > bias(1, -0.2)]
> NOT(1) = 1 > [data[0](1, -0.4) > bias(1, -0.30000000000000004)]
> NOT(1) = 1 > [data[0](1, -0.5) > bias(1, -0.4)]
> NOT(1) = 1 > [data[0](1, -0.5) > bias(1, -0.4)]
> NOT(1) = 1 > [data[0](1, -0.6) > bias(1, -0.5)]
> NOT(1) = 1 > [data[0](1, -0.6) > bias(1, -0.5)]
> NOT(1) = 1 > [data[0](1, -0.7) > bias(1, -0.6)]
> NOT(1) = 1 > [data[0](1, -0.7999999999999999) > bias(1, -0.7)]
> NOT(0) = 1 > [data[0](0, -0.8999999999999999) > bias(1, -0.7999999999999999)]
> NOT(0) = 1 > [data[0](0, -0.8999999999999999) > bias(1, -0.7999999999999999)]
> NOT(0) = 1 > [data[0](0, -0.9999999999999999) > bias(1, -0.8999999999999999)]
> NOT(0) = 1 > [data[0](0, -0.9999999999999999) > bias(1, -0.8999999999999999)]
> NOT(1) = 1 > [data[0](1, -0.9999999999999999) > bias(1, -0.8999999999999999)]
> NOT(0) = 1 > [data[0](0, -0.9999999999999999) > bias(1, -0.8999999999999999)]
> NOT(0) = 1 > [data[0](0, -1.0999999999999999) > bias(1, -0.9999999999999999)]
> NOT(1) = 1 > [data[0](1, -1.2) > bias(1, -1.0999999999999999)]
> NOT(0) = 1 > [data[0](0, -1.2) > bias(1, -1.0999999999999999)]
> NOT(1) = 1 > [data[0](1, -1.2) > bias(1, -1.0999999999999999)]
> NOT(0) = 1 > [data[0](0, -1.2) > bias(1, -1.0999999999999999)]
> NOT(0) = 1 > [data[0](0, -1.2) > bias(1, -1.0999999999999999)]
> NOT(1) = 1 > [data[0](1, -1.2) > bias(1, -1.0999999999999999)]
> NOT(1) = 1 > [data[0](1, -1.3) > bias(1, -1.2)]
> NOT(0) = 1 > [data[0](0, -1.4000000000000001) > bias(1, -1.3)]
> NOT(0) = 1 > [data[0](0, -1.5000000000000002) > bias(1, -1.4000000000000001)]
> NOT(1) = 1 > [data[0](1, -1.6000000000000003) > bias(1, -1.5000000000000002)]
> NOT(1) = 1 > [data[0](1, -1.6000000000000003) > bias(1, -1.5000000000000002)]
> NOT(0) = 1 > [data[0](0, -1.6000000000000003) > bias(1, -1.5000000000000002)]
> NOT(0) = 1 > [data[0](0, -1.7000000000000004) > bias(1, -1.6000000000000003)]
> NOT(0) = 1 > [data[0](0, -1.8000000000000005) > bias(1, -1.7000000000000004)]
> NOT(1) = 1 > [data[0](1, -1.9000000000000006) > bias(1, -1.8000000000000005)]
> NOT(1) = 1 > [data[0](1, -1.9000000000000006) > bias(1, -1.8000000000000005)]
> NOT(1) = 1 > [data[0](1, -1.9000000000000006) > bias(1, -1.8000000000000005)]
> NOT(1) = 1 > [data[0](1, -1.9000000000000006) > bias(1, -1.8000000000000005)]
> NOT(0) = 1 > [data[0](0, -2.0000000000000004) > bias(1, -1.9000000000000006)]
> NOT(1) = 1 > [data[0](1, -2.1000000000000005) > bias(1, -2.0000000000000004)]
> NOT(1) = 1 > [data[0](1, -2.2000000000000006) > bias(1, -2.1000000000000005)]
> NOT(1) = 1 > [data[0](1, -2.3000000000000007) > bias(1, -2.2000000000000006)]
> NOT(0) = 1 > [data[0](0, -2.3000000000000007) > bias(1, -2.2000000000000006)]
> NOT(0) = 1 > [data[0](0, -2.400000000000001) > bias(1, -2.3000000000000007)]
> NOT(0) = 1 > [data[0](0, -2.500000000000001) > bias(1, -2.400000000000001)]
> NOT(1) = 1 > [data[0](1, -2.600000000000001) > bias(1, -2.500000000000001)]
> NOT(0) = 1 > [data[0](0, -2.700000000000001) > bias(1, -2.600000000000001)]
> NOT(1) = 1 > [data[0](1, -2.800000000000001) > bias(1, -2.700000000000001)]
> NOT(0) = 1 > [data[0](0, -2.9000000000000012) > bias(1, -2.800000000000001)]
> NOT(1) = 1 > [data[0](1, -3.0000000000000013) > bias(1, -2.9000000000000012)]
> NOT(1) = 1 > [data[0](1, -3.0000000000000013) > bias(1, -2.9000000000000012)]
> NOT(1) = 1 > [data[0](1, -3.0000000000000013) > bias(1, -2.9000000000000012)]
> NOT(0) = 1 > [data[0](0, -3.1000000000000014) > bias(1, -3.0000000000000013)]
> NOT(0) = 1 > [data[0](0, -3.1000000000000014) > bias(1, -3.0000000000000013)]
> NOT(1) = 1 > [data[0](1, -3.2000000000000015) > bias(1, -3.1000000000000014)]
> NOT(0) = 1 > [data[0](0, -3.3000000000000016) > bias(1, -3.2000000000000015)]
> NOT(1) = 1 > [data[0](1, -3.4000000000000017) > bias(1, -3.3000000000000016)]
> NOT(0) = 1 > [data[0](0, -3.5000000000000018) > bias(1, -3.4000000000000017)]
> NOT(0) = 1 > [data[0](0, -3.600000000000002) > bias(1, -3.5000000000000018)]
> NOT(1) = 1 > [data[0](1, -3.700000000000002) > bias(1, -3.600000000000002)]
> NOT(1) = 1 > [data[0](1, -3.700000000000002) > bias(1, -3.600000000000002)]
> NOT(1) = 1 > [data[0](1, -3.800000000000002) > bias(1, -3.700000000000002)]
> NOT(0) = 1 > [data[0](0, -3.800000000000002) > bias(1, -3.700000000000002)]
> NOT(1) = 1 > [data[0](1, -3.900000000000002) > bias(1, -3.800000000000002)]
> NOT(1) = 1 > [data[0](1, -4.000000000000002) > bias(1, -3.900000000000002)]
> NOT(1) = 1 > [data[0](1, -4.000000000000002) > bias(1, -3.900000000000002)]
> NOT(0) = 1 > [data[0](0, -4.000000000000002) > bias(1, -3.900000000000002)]
> NOT(0) = 1 > [data[0](0, -4.000000000000002) > bias(1, -3.900000000000002)]
> NOT(1) = 1 > [data[0](1, -4.100000000000001) > bias(1, -4.000000000000002)]
> NOT(1) = 1 > [data[0](1, -4.100000000000001) > bias(1, -4.000000000000002)]
> NOT(1) = 1 > [data[0](1, -4.200000000000001) > bias(1, -4.100000000000001)]
> NOT(0) = 1 > [data[0](0, -4.300000000000001) > bias(1, -4.200000000000001)]
> NOT(1) = 1 > [data[0](1, -4.300000000000001) > bias(1, -4.200000000000001)]
> NOT(1) = 1 > [data[0](1, -4.4) > bias(1, -4.300000000000001)]
> NOT(0) = 1 > [data[0](0, -4.5) > bias(1, -4.4)]
> NOT(0) = 1 > [data[0](0, -4.5) > bias(1, -4.4)]
> NOT(0) = 1 > [data[0](0, -4.5) > bias(1, -4.4)]
> NOT(0) = 1 > [data[0](0, -4.6) > bias(1, -4.5)]
> NOT(1) = 1 > [data[0](1, -4.699999999999999) > bias(1, -4.6)]
> NOT(0) = 1 > [data[0](0, -4.799999999999999) > bias(1, -4.699999999999999)]
> NOT(1) = 1 > [data[0](1, -4.799999999999999) > bias(1, -4.699999999999999)]
> NOT(0) = 1 > [data[0](0, -4.899999999999999) > bias(1, -4.799999999999999)]
> NOT(0) = 1 > [data[0](0, -4.999999999999998) > bias(1, -4.899999999999999)]
> NOT(0) = 1 > [data[0](0, -5.099999999999998) > bias(1, -4.999999999999998)]
> NOT(0) = 1 > [data[0](0, -5.1999999999999975) > bias(1, -5.099999999999998)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(1) = 1 > [data[0](1, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.299999999999997) > bias(1, -5.1999999999999975)]
> NOT(0) = 1 > [data[0](0, -5.399999999999997) > bias(1, -5.299999999999997)]
> NOT(0) = 1 > [data[0](0, -5.4999999999999964) > bias(1, -5.399999999999997)]
> NOT(1) = 1 > [data[0](1, -5.599999999999996) > bias(1, -5.4999999999999964)]
> NOT(0) = 1 > [data[0](0, -5.699999999999996) > bias(1, -5.599999999999996)]
> NOT(1) = 1 > [data[0](1, -5.799999999999995) > bias(1, -5.699999999999996)]
> NOT(0) = 1 > [data[0](0, -5.899999999999995) > bias(1, -5.799999999999995)]
> NOT(0) = 1 > [data[0](0, -5.999999999999995) > bias(1, -5.899999999999995)]
> NOT(0) = 1 > [data[0](0, -6.099999999999994) > bias(1, -5.999999999999995)]
> NOT(1) = 1 > [data[0](1, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(0) = 1 > [data[0](0, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(1) = 1 > [data[0](1, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(1) = 1 > [data[0](1, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(0) = 1 > [data[0](0, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(1) = 1 > [data[0](1, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(0) = 1 > [data[0](0, -6.199999999999994) > bias(1, -6.099999999999994)]
> NOT(1) = 1 > [data[0](1, -6.299999999999994) > bias(1, -6.199999999999994)]
> NOT(0) = 1 > [data[0](0, -6.399999999999993) > bias(1, -6.299999999999994)]
最佳答案
根据@Stanislav Kralin 的建议,我再次更新了问题,以便显示问题所在。这是解决方案。
问题出在 CALC 函数上,该函数应该乘以权重的输入值。但我正在添加它。
不幸的是,我太专注于查看是否应该使用 sigmoid 函数或其他函数,查看学习率以及线性和非线性函数,但我没有看到这个错误。
事实上,AND 和 OR 感知器可以很好地工作,这让我误入歧途。
PerceptronData.prototype.calc = function () {
//var result = this.input + this.weight;//This was wrong... :(
var result = this.input * this.weight;
return result;
};
关于javascript - NOT 函数的 bool 感知器,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/45640357/
C语言sscanf()函数:从字符串中读取指定格式的数据 头文件: ?
最近,我有一个关于工作预评估的问题,即使查询了每个功能的工作原理,我也不知道如何解决。这是一个伪代码。 下面是一个名为foo()的函数,该函数将被传递一个值并返回一个值。如果将以下值传递给foo函数,
CStr 函数 返回表达式,该表达式已被转换为 String 子类型的 Variant。 CStr(expression) expression 参数是任意有效的表达式。 说明 通常,可以
CSng 函数 返回表达式,该表达式已被转换为 Single 子类型的 Variant。 CSng(expression) expression 参数是任意有效的表达式。 说明 通常,可
CreateObject 函数 创建并返回对 Automation 对象的引用。 CreateObject(servername.typename [, location]) 参数 serv
Cos 函数 返回某个角的余弦值。 Cos(number) number 参数可以是任何将某个角表示为弧度的有效数值表达式。 说明 Cos 函数取某个角并返回直角三角形两边的比值。此比值是
CLng 函数 返回表达式,此表达式已被转换为 Long 子类型的 Variant。 CLng(expression) expression 参数是任意有效的表达式。 说明 通常,您可以使
CInt 函数 返回表达式,此表达式已被转换为 Integer 子类型的 Variant。 CInt(expression) expression 参数是任意有效的表达式。 说明 通常,可
Chr 函数 返回与指定的 ANSI 字符代码相对应的字符。 Chr(charcode) charcode 参数是可以标识字符的数字。 说明 从 0 到 31 的数字表示标准的不可打印的
CDbl 函数 返回表达式,此表达式已被转换为 Double 子类型的 Variant。 CDbl(expression) expression 参数是任意有效的表达式。 说明 通常,您可
CDate 函数 返回表达式,此表达式已被转换为 Date 子类型的 Variant。 CDate(date) date 参数是任意有效的日期表达式。 说明 IsDate 函数用于判断 d
CCur 函数 返回表达式,此表达式已被转换为 Currency 子类型的 Variant。 CCur(expression) expression 参数是任意有效的表达式。 说明 通常,
CByte 函数 返回表达式,此表达式已被转换为 Byte 子类型的 Variant。 CByte(expression) expression 参数是任意有效的表达式。 说明 通常,可以
CBool 函数 返回表达式,此表达式已转换为 Boolean 子类型的 Variant。 CBool(expression) expression 是任意有效的表达式。 说明 如果 ex
Atn 函数 返回数值的反正切值。 Atn(number) number 参数可以是任意有效的数值表达式。 说明 Atn 函数计算直角三角形两个边的比值 (number) 并返回对应角的弧
Asc 函数 返回与字符串的第一个字母对应的 ANSI 字符代码。 Asc(string) string 参数是任意有效的字符串表达式。如果 string 参数未包含字符,则将发生运行时错误。
Array 函数 返回包含数组的 Variant。 Array(arglist) arglist 参数是赋给包含在 Variant 中的数组元素的值的列表(用逗号分隔)。如果没有指定此参数,则
Abs 函数 返回数字的绝对值。 Abs(number) number 参数可以是任意有效的数值表达式。如果 number 包含 Null,则返回 Null;如果是未初始化变量,则返回 0。
FormatPercent 函数 返回表达式,此表达式已被格式化为尾随有 % 符号的百分比(乘以 100 )。 FormatPercent(expression[,NumDigitsAfterD
FormatNumber 函数 返回表达式,此表达式已被格式化为数值。 FormatNumber( expression [,NumDigitsAfterDecimal [,Inc
我是一名优秀的程序员,十分优秀!