神经网络的输出每次都保持相同的值
问题描述:
我正在研究一个非常简单的前馈神经网络来练习我的编程技巧。有3类:神经网络的输出每次都保持相同的值
- Neural :: Net;建立网络,提供输入值(目前无反向传播)
- Neural :: Neuron;具有神经元特征(指数,输出,体重等)
- Neural :: Connection;一个类似结构的类,将权重随机化并保持输出,delta权重等。
程序是非常基本的:我用2个隐藏层和随机权重构建网络,然后要求它前馈相同的输入值。
我的问题是:预计程序每次运行后都会以不同的输出值结束,但输出总是相同的。我尝试在各处放置标记以了解它为什么一遍又一遍地计算相同的事物,但我无法将手指放在错误上。
下面是代码:
#include <iostream>
#include <cassert>
#include <cstdlib>
#include <vector>
#include "ConsoleColor.hpp"
using namespace std;
namespace Neural {
class Neuron;
typedef vector<Neuron> Layer;
// ******************** Class: Connection ******************** //
class Connection {
public:
Connection();
void setOutput(const double& outputVal) { myOutputVal = outputVal; }
void setWeight(const double& weight) { myDeltaWeight = myWeight - weight; myWeight = weight; }
double getOutput(void) const { return myOutputVal; }
double getWeight(void) const { return myWeight; }
private:
static double randomizeWeight(void) { return rand()/double(RAND_MAX); }
double myOutputVal;
double myWeight;
double myDeltaWeight;
};
Connection::Connection() {
myOutputVal = 0;
myWeight = Connection::randomizeWeight();
myDeltaWeight = myWeight;
cout << "Weight: " << myWeight << endl;
}
// ******************** Class: Neuron ************************ //
class Neuron {
public:
Neuron();
void setIndex(const unsigned int& index) { myIndex = index; }
void setOutput(const double& output) { myConnection.setOutput(output); }
unsigned int getIndex(void) const { return myIndex; }
double getOutput(void) const { return myConnection.getOutput(); }
void feedForward(const Layer& prevLayer);
void printOutput(void) const;
private:
inline static double transfer(const double& weightedSum);
Connection myConnection;
unsigned int myIndex;
};
Neuron::Neuron() : myIndex(0), myConnection() { }
double Neuron::transfer(const double& weightedSum) { return 1/double((1 + exp(-weightedSum))); }
void Neuron::printOutput(void) const { cout << "Neuron " << myIndex << ':' << myConnection.getOutput() << endl; }
void Neuron::feedForward(const Layer& prevLayer) {
// Weight sum of the previous layer's output values
double weightedSum = 0;
for (unsigned int i = 0; i < prevLayer.size(); ++i) {
weightedSum += prevLayer[i].getOutput()*myConnection.getWeight();
cout << "Neuron " << i << " from prevLayer has output: " << prevLayer[i].getOutput() << endl;
cout << "Weighted sum: " << weightedSum << endl;
}
// Transfer function
myConnection.setOutput(Neuron::transfer(weightedSum));
cout << "Transfer: " << myConnection.getOutput() << endl;
}
// ******************** Class: Net *************************** //
class Net {
public:
Net(const vector<unsigned int>& topology);
void setTarget(const vector<double>& targetVals);
void feedForward(const vector<double>& inputVals);
void backPropagate(void);
void printOutput(void) const;
private:
vector<Layer> myLayers;
vector<double> myTargetVals;
};
Net::Net(const vector<unsigned int>& topology) : myTargetVals() {
assert(topology.size() > 0);
for (unsigned int i = 0; i < topology.size(); ++i) { // Creating the layers
myLayers.push_back(Layer(((i + 1) == topology.size()) ? topology[i] : topology[i] + 1)); // +1 is for bias neuron
// Setting each neurons index inside layer
for (unsigned int j = 0; j < myLayers[i].size(); ++j) {
myLayers[i][j].setIndex(j);
}
// Console log
cout << red;
if (i == 0) {
cout << "Input layer (" << myLayers[i].size() << " neurons including bias neuron) created." << endl;
myLayers[i].back().setOutput(1);
}
else if (i < topology.size() - 1) {
cout << "Hidden layer " << i << " (" << myLayers[i].size() << " neurons including bias neuron) created." << endl;
myLayers[i].back().setOutput(1);
}
else { cout << "Output layer (" << myLayers[i].size() << " neurons) created." << endl; }
cout << white;
}
}
void Net::setTarget(const vector<double>& targetVals) { assert(targetVals.size() == myLayers.back().size()); myTargetVals = targetVals; }
void Net::feedForward(const vector<double>& inputVals) {
assert(myLayers[0].size() - 1 == inputVals.size());
for (unsigned int i = 0; i < inputVals.size(); ++i) { // Setting input vals to input layer
cout << yellow << "Setting input vals...";
myLayers[0][i].setOutput(inputVals[i]); // myLayers[0] is the input layer
cout << "myLayer[0][" << i << "].getOutput()==" << myLayers[0][i].getOutput() << white << endl;
}
for (unsigned int i = 1; i < myLayers.size() - 1; ++i) { // Updating hidden layers
for (unsigned int j = 0; j < myLayers[i].size() - 1; ++j) { // - 1 because bias neurons do not have input
cout << "myLayers[" << i << "].size()==" << myLayers[i].size() << endl;
cout << green << "Updating neuron " << j << " inside layer " << i << white << endl;
myLayers[i][j].feedForward(myLayers[i - 1]); // Updating the neurons output based on the neurons of the previous layer
}
}
for (unsigned int i = 0; i < myLayers.back().size(); ++i) { // Updating output layer
cout << green << "Updating output neuron " << i << ": " << white << endl;
const Layer& prevLayer = myLayers[myLayers.size() - 2];
myLayers.back()[i].feedForward(prevLayer); // Updating the neurons output based on the neurons of the previous layer
}
}
void Net::printOutput(void) const {
for (unsigned int i = 0; i < myLayers.back().size(); ++i) {
cout << blue; myLayers.back()[i].printOutput(); cout << white;
}
}
void Net::backPropagate(void) {
}
}
int main(int argc, char* argv[]) {
vector<unsigned int> myTopology;
myTopology.push_back(3);
myTopology.push_back(4);
myTopology.push_back(2);
myTopology.push_back(2);
cout << myTopology.size() << endl << endl; // myTopology == {3, 4, 2 ,1}
vector<double> myTargetVals= {0.5,1};
vector<double> myInputVals= {1, 0.5, 1};
Neural::Net myNet(myTopology);
myNet.feedForward(myInputVals);
myNet.printOutput();
return 0;
}
编辑:我想的是,在输入层中的偏置神经元被正确设置到输出1,而在隐藏层中的那些被设置为0和I固定的。但是,每次运行的输出仍然是相同的。我在一张纸上做了数学运算。下面是输出(颜色编码为清楚起见):
我期望的值是随机的,就像权重。难道不是这样吗?我很困惑。
答
我发现我的错误。我认为rand()会自动初始化它的种子。我知道这是一件愚蠢的事情。我在程序开始时添加了srand(time(NULL));
,现在它按照原样运行。