Skip to content

Commit

Permalink
Add the demo of BP Neural Network.
Browse files Browse the repository at this point in the history
This is the demo of BP Neural Network. The codes run OK in Matlab 2015a.
  • Loading branch information
Yang Zhen committed May 4, 2017
1 parent f81b0d0 commit c8eb453
Show file tree
Hide file tree
Showing 31 changed files with 681 additions and 0 deletions.
10 changes: 10 additions & 0 deletions BPNN(BP Neural Network)/ActiveFunc.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
function y = ActiveFunc( x )
%单个神经元的激活函数
%本例中使用双极S形函数 [-1,1]

alpha = 2;

y = 2 / (1+exp(-alpha*x)) - 1;

end

9 changes: 9 additions & 0 deletions BPNN(BP Neural Network)/ActiveFuncDerivative.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
function dy = ActiveFuncDerivative( x )
%单个神经元的激活函数的导数
%本例中使用双极S形函数

alpha = 2;
dy = alpha * (1-ActiveFunc(x)^2) / 2;

end

106 changes: 106 additions & 0 deletions BPNN(BP Neural Network)/BackPropagation.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
function network = BackPropagation(network, outputData, learningRate, times)
%误差反向传播+权值更新
%outputData=理论输出结果Y的行向量
%learningRate=学习速率
%times=当前学习次数

%每层神经元的个数
xDim = length( network.inputNeure(1).weight );
inputNeureNum = size(network.inputNeure, 2);
hiddenNeureNum = size(network.hiddenNeure, 2);
outputNeureNum = size(network.outputNeure, 2);

%检查维数
if outputNeureNum ~= length(outputData)
disp(['error dim for outputNeureNum/outputData=', num2str(outputNeureNum), '/', num2str(length(outputData))]);
return;
end

%1) 输出层
%反向传播
delta = zeros(1, outputNeureNum);
for outputIndex = 1: outputNeureNum
diffY = outputData(outputIndex) - network.outputNeure(outputIndex).output;%_yi_(理论输出) - yi(网络输出)
v_k = network.outputNeure(outputIndex).netSum;%导数f'的输入v_1_k...v_nk_k
delta(outputIndex) = diffY * ActiveFuncDerivative(v_k);%[3.16 a]
network.error(times, outputIndex) = diffY;%记录输出层各神经元的误差
end
%权值更新
for outputIndex = 1: outputNeureNum
for hiddenIndex = 1: hiddenNeureNum
x_k_1 = network.hiddenNeure(hiddenIndex).output;%第k-1层的输出向量=隐含层的输出

w_k_s = network.outputNeure(outputIndex).weight(hiddenIndex);%权值向量
network.outputNeure(outputIndex).weight(hiddenIndex) = w_k_s + learningRate*x_k_1*delta(outputIndex);
end
end

%2) 隐含层
%当前k=隐含层
%反向传播
delta_k1 = delta;%delta(k+1) 输出层的delta = [1xoutputNeureNum]
w_k1 = zeros(outputNeureNum, hiddenNeureNum);%W(k+1) 输出层的权值
for outputIndex = 1 : outputNeureNum
for hiddenIndex = 1: hiddenNeureNum
w_k1(outputIndex, hiddenIndex) = network.outputNeure(outputIndex).weight(hiddenIndex);
end
end
f_k = zeros(hiddenNeureNum, hiddenNeureNum);%F(k) 隐含层的梯度
for hiddenIndex = 1: hiddenNeureNum
v_k = network.hiddenNeure(hiddenIndex).netSum;
f_k(hiddenIndex, hiddenIndex) = ActiveFuncDerivative(v_k);
end
delta_k = delta_k1 * w_k1 * f_k;%反向传播计算公式delta(k) = delta(k+1)W(k+1)F(k) = [1xhiddenNeureNum]
%权值更新
w_k_s = zeros(hiddenNeureNum, inputNeureNum);%W(k)(s) 隐含层权值
for hiddenIndex = 1: hiddenNeureNum
for inputIndex = 1: inputNeureNum
w_k_s(hiddenIndex, inputIndex) = network.hiddenNeure(hiddenIndex).weight(inputIndex);
end
end
x_k_1 = zeros(inputNeureNum, 1);%第k-1层的输出向量=输入层的输出 X(k-1)
for i = inputIndex: inputNeureNum
x_k_1(inputIndex) = network.inputNeure(inputIndex).output;
end
w_k_s1 = w_k_s + learningRate * (x_k_1*delta_k)';%权值更新公式W(k)(s+1) = W(k)(s) + LR*(X(k-1)*delta(k))'
for inputIndex = 1: inputNeureNum
for hiddenIndex = 1: hiddenNeureNum
network.hiddenNeure(hiddenIndex).weight(inputIndex) = w_k_s1(hiddenIndex, inputIndex);
end
end

%3) 输入层
%反向传播
delta_k1 = delta_k;%delta(k+1) 隐含层的delta = [1xhiddenNeureNum]
w_k1 = zeros(hiddenNeureNum, inputNeureNum);%W(k+1) 隐含层的权值
for hiddenIndex = 1 : hiddenNeureNum
for inputIndex = 1: inputNeureNum
w_k1(hiddenIndex, inputIndex) = network.hiddenNeure(hiddenIndex).weight(inputIndex);
end
end
f_k = zeros(inputNeureNum, inputNeureNum);%F(k) 输入层的梯度
for inputIndex = 1: inputNeureNum
v_k = network.inputNeure(inputIndex).netSum;
f_k(inputIndex, inputIndex) = ActiveFuncDerivative(v_k);
end
delta_k = delta_k1 * w_k1 * f_k;%反向传播计算公式delta(k) = delta(k+1)W(k+1)F(k) = [1xinputNeureNum]
%权值更新
w_k_s = zeros(inputNeureNum, xDim);%W(k)(s) 输入层权值
for inputIndex = 1: inputNeureNum
for xIndex = 1: xDim
w_k_s(inputIndex, xIndex) = network.inputNeure(inputIndex).weight(xIndex);
end
end
x_k_1 = zeros(xDim, 1);%第k-1层的输出向量=样本的输入 X(k-1)
for i = xIndex: xDim
x_k_1(xIndex) = network.inputNeure(1).input(xIndex);
end
w_k_s1 = w_k_s + learningRate * (x_k_1*delta_k)';%权值更新公式W(k)(s+1) = W(k)(s) + LR*(X(k-1)*delta(k))'
for inputIndex = 1: inputNeureNum
for xIndex = 1: xDim
network.inputNeure(inputIndex).weight(xIndex) = w_k_s1(inputIndex, xIndex);
end
end

end

22 changes: 22 additions & 0 deletions BPNN(BP Neural Network)/FigError.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
function FigError(network, figNum)
%绘制network中每次学习过程中的误差

[epochs, yDim] = size(network.error);

error = zeros(epochs, 1);
for times = 1: epochs
for yIndex = 1: yDim
error(times) = error(times) + network.error(times, yIndex)^2;
end
error(times) = sqrt(error(times)) / 3.0;
end

figure(figNum);
hold on;
plot(error);
title('error');
xlabel('epochs');
ylabel('error');

end

20 changes: 20 additions & 0 deletions BPNN(BP Neural Network)/GetAccuracny.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
function accurancy = GetAccuracny(result, yDim)
%计算分类的正确率
%result = [理论结果, 神经网络结果, 处理后的神经网络结果]

rightCnt = 0;
totalSample = size(result,1);
for sampleIndex = 1: totalSample
for yIndex = 1: yDim
%理论结果的位置=1 且 神经元输出结果的位置=1
if result(sampleIndex, yIndex)==1 && result(sampleIndex, yIndex+2*yDim)==1
rightCnt = rightCnt+1;
break;
end
end
end

accurancy = rightCnt/totalSample;

end

38 changes: 38 additions & 0 deletions BPNN(BP Neural Network)/InitNeuralNetwork.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
function network = InitNeuralNetwork(inputNum, hiddenNum, outputNum, xDim, epochs)
%inputNum 输入层的神经元个数
%hiddenNum 隐含层的神经元个数
%outputNum 输出层的神经元个数
%xDim 数据的X维数
%epochs 学习循环次数

%每个神经元的权重的维数 = 前一层的输出的维数
%权重范围-1~1

%输入层
for i = 1: inputNum
singleNeure.weight = 2*rand(xDim,1)-1;%xDim
singleNeure.input = zeros(xDim, 1);
singleNeure.output = 0;
singleNeure.netSum = 0;
network.inputNeure(i) = singleNeure;
end

%隐含层
for i = 1: hiddenNum
singleNeure.weight = 2*rand(inputNum,1)-1;%inputNum
singleNeure.input = zeros(inputNum, 1);
network.hiddenNeure(i) = singleNeure;
end

%输出层
for i = 1: outputNum
singleNeure.weight = 2*rand(hiddenNum,1)-1;%hiddenNum
singleNeure.input = zeros(hiddenNum, 1);
network.outputNeure(i) = singleNeure;
end

%每次学习循环 输出层各神经元的误差
network.error = zeros(epochs, outputNum);

end

26 changes: 26 additions & 0 deletions BPNN(BP Neural Network)/NeureCalculate.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
function [fx, sum] = NeureCalculate(neure, input)
%根据 单个神经元neure自身的权重 和 该神经元的输入向量input
%计算 单个神经元的激活输出output

%检查维数
lenInput = length(input);
lenNeure = length(neure.input);
if lenInput ~= lenNeure
disp(['error dim for input/neure.input=', num2str(lenInput), '/', num2str(lenNeure)]);
return;
end

%求和净输入
sum = 0;
for i = 1: lenInput%对于每个输入
sum = sum + input(i)*neure.weight(i);%输入i*权重i
end
% if length(neure.weight) == lenInput+1%偏置神经元x0=-1
% sum = sum - neure.weight(lenInput+1);
% end

%激活函数后的输出fx
fx = ActiveFunc(sum);

end

54 changes: 54 additions & 0 deletions BPNN(BP Neural Network)/NeureNetworkCalculate.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
function network = NeureNetworkCalculate(network, data)
%完成一轮神经网络的计算
%根据data的输入X行向量 计算神经网络的各层输出

%每层神经元的个数
inputNeureNum = size(network.inputNeure, 2);
hiddenNeureNum = size(network.hiddenNeure, 2);
outputNeureNum = size(network.outputNeure, 2);

%1) 计算输入层的输出
layerInput = data;%输入层的输入向量 = data输入向量

for neureIndex = 1: inputNeureNum%每个神经元
singleNeure = network.inputNeure(neureIndex);%单个神经元
%[fx, sum] = NeureCalculate(singleNeure, layerInput);%神经元计算+激活
[fx, sum] = NeureCalculate(singleNeure, layerInput);%神经元计算+激活

network.inputMeure(neureIndex).input = layerInput;
network.inputNeure(neureIndex).output = fx;
network.inputNeure(neureIndex).netSum = sum;
end

%2) 计算隐含层的输出
layerInput = zeros(1, inputNeureNum);%隐含层的输入向量 = 输入层的输出
for neureIndex = 1: inputNeureNum%输入层的每个神经元
layerInput(neureIndex) = network.inputNeure(neureIndex).output;
end

for neureIndex = 1: hiddenNeureNum%每个神经元
singleNeure = network.hiddenNeure(neureIndex);%单个神经元
[fx, sum] = NeureCalculate(singleNeure, layerInput);%神经元计算+激活

network.hiddenNeure(neureIndex).input = layerInput;
network.hiddenNeure(neureIndex).output = fx;
network.hiddenNeure(neureIndex).netSum = sum;
end

%3) 计算输出层的输出
layerInput = zeros(1, hiddenNeureNum);%输出层的输入向量 = 隐含层的输出
for neureIndex = 1: hiddenNeureNum%输入层的每个神经元
layerInput(neureIndex) = network.hiddenNeure(neureIndex).output;
end

for neureIndex = 1: outputNeureNum%每个神经元
singleNeure = network.outputNeure(neureIndex);%单个神经元
[fx, sum] = NeureCalculate(singleNeure, layerInput);%神经元计算+激活

network.outputNeure(neureIndex).input = layerInput;
network.outputNeure(neureIndex).output = fx;
network.outputNeure(neureIndex).netSum = sum;
end

end

37 changes: 37 additions & 0 deletions BPNN(BP Neural Network)/NormalizeData.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
function [result, maxVector, minVector] = NormalizeData(data)
%对数据data进行归一化
%data=[x1,x2,x3,x4, y]
%data每行=一个样本数据
%data每列=一个特征

[row, column] = size(data);

%result = -1*ones(row, column+2);%y变为[y1 y2 y3] 对应3维输出
result = zeros(row, column+2);%y变为[y1 y2 y3] 对应3维输出
for rowIndex = 1: row
switch data(rowIndex, column)
case 1%y1=1
result(rowIndex, column) = 1;
case 2%y2=1
result(rowIndex, column+1) = 1;
case 3%y3=1
result(rowIndex, column+2) = 1;
end
end

%每个特征X的最大最小值
minVector = min(data(:, 1:column-1), [], 1);%一个行向量=每列的最小值
maxVector = max(data(:, 1:column-1), [], 1);%一个行向量=每列的最大值
for columnIndex = 1: column-1%每列=每个特征X 进行归一化
%该列的最大最小值
minNum = minVector(columnIndex);
maxNum = maxVector(columnIndex);

for rowIndex = 1: row%每行 该特征的每个数据
x = data(rowIndex, columnIndex);
result(rowIndex, columnIndex) = 2 * (x-minNum) / (maxNum-minNum) - 1;%线性转换 归一化到[-1,1]
end
end

end

42 changes: 42 additions & 0 deletions BPNN(BP Neural Network)/NormalizeDataWithRange.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
function result = NormalizeDataWithRange(data, maxVector, minVector)
%根据max/minVector 对数据data进行归一化
%data=[x1,x2,x3,x4, y]
%data每行=一个样本数据
%data每列=一个特征

[row, column] = size(data);

result = -1*ones(row, column+2);%y变为[y1 y2 y3] 对应3维输出
for rowIndex = 1: row
switch data(rowIndex, column)
case 1%y1=1
result(rowIndex, column) = 1;
case 2%y2=1
result(rowIndex, column+1) = 1;
case 3%y3=1
result(rowIndex, column+2) = 1;
end
end

%每个特征X的最大最小值
for columnIndex = 1: column-1%每列=每个特征X 进行归一化
%该列的最大最小值
minNum = minVector(columnIndex);
maxNum = maxVector(columnIndex);

for rowIndex = 1: row%每行 该特征的每个数据
x = data(rowIndex, columnIndex);
normalizedNum = 2 * (x-minNum) / (maxNum-minNum) - 1;%线性转换 归一化到[-1,1]
if normalizedNum < -1
normalizedNum = -1;
elseif normalizedNum > 1
normalizedNum = 1;
end

result(rowIndex, columnIndex) = normalizedNum;
end
end


end

Loading

0 comments on commit c8eb453

Please sign in to comment.