Multilayer Neural Network – Implementing Back-Propagation Algorithm-sciLab Program

//Multi Layer neural Network BP Algogithm
//Author : Srinath Krishnamoorthy
//Date : 10-08-2017
//(c)Copyright Srinath Krishnamoorthy-2017

clear
clear all
clc

//Initialise the input values at Layer 1
x=[0 0
0 1
1 0
1 1];

yd=[0;1;1;0];//Desired Output at Y
ya=rand(4,1);//Actual Output

//Initialise the weights from i/p to hidden layer

w_ih=rand(2,2);
w_initial=w_ih;

//Initialise the weights from hidden to output layer

w_h1y=rand(1); //Hidden Neuron 1 to o/p neuron Y
w_h2y=rand(1); //Hidden Neuron 2 to o/p neuron Y

w_h1y_initial=w_h1y;
w_h2y_initial=w_h2y;

//Set the bias of the neurons-h1,h2 and y

bh1=-1;
bh2=-1;
by=-1;

//Set the thresholds for each neuron

th1=rand(1);
th2=rand(1);
ty=rand(1);

//Error at Y

err_y=0.00;

//Error gradient at h1,h2 and Y
err_grad_h1=0.00;
err_grad_h2=0.00;
err_grad_y =0.00;

lr=0.5;//Learning rate

flag=0;
net_h1=0;//Net ouput at h1
net_h2=0;//Net ouput at h2
net_y =0;//Net ouput at Y

//Actual output of h1,h2 and Y will be the sigmoid of their net outputs.

actual_h1=0.00;
actual_h2=0.00;
actual_y=0.00;

epoch=0;//Counts the number of cycles

delta_wh1y=0.00;
delta_wh2y=0.00;
delta_ty=0.00;

//Sum of squared error. Will be executed till it gets below a certain range

sum_sqr_err=0.00;
errors=zeros(4,1);

while flag==0 do

for i=1:4
//calculate the net output of hidden neuron h1
for j=1:2
net_h1=net_h1+[x(i,j)*w_ih(j,1)];
end;

//calculate the net output of hidden neuron h2
for j=1:2
net_h2=net_h2+[x(i,j)*w_ih(j,2)];
end;
//Applying Bias and Threshold, net values at h1 and h2 will be
net_h1=net_h1+(bh1*th1);
net_h2=net_h2+(bh2*th2);

//Actual Output is the Sigmoid of net output at h1 and h2

actual_h1=1/[1+%e^(-1*net_h1)];
actual_h2=1/[1+%e^(-1*net_h2)];

//Now we need to calculate the net output at Y
net_y=(actual_h1*w_h1y)+(actual_h2*w_h2y)+(by*ty);
//Thus actual output at Y is sigmoid of net_y
actual_y=1/[1+%e^(-1*net_y)];

//Calculate the error at Y
err_y=yd(i,1)-actual_y;
ya(i,1)=actual_y;
errors(i,1)=err_y;
//Calculate the error gradient at Y
err_grad_y=actual_y*(1-actual_y)*err_y;
//Now we go for weight correction
delta_wh1y=lr*actual_h1*err_grad_y;
delta_wh2y=lr*actual_h2*err_grad_y;
delta_ty=lr*by*err_grad_y;

// Now we calculate the err gradient of hidden neurons
err_grad_h1=actual_h1*(1-actual_h1)*err_grad_y*w_h1y;
err_grad_h2=actual_h2*(1-actual_h2)*err_grad_y*w_h2y;

//Weight corrections for hidden neuron h1:

for j=1:2
w_ih(j,1)=w_ih(j,1)+[lr*x(i,j)*err_grad_h1];
end;
//Adjust the threshold of the hidden neuron h1
th1=th1+[lr*bh1*err_grad_h1];

//Weight corrections for hidden neuron h2:

for j=1:2
w_ih(j,2)=w_ih(j,2)+[lr*x(i,j)*err_grad_h2];
end;
//Adjust the threshold of the hidden neuron h1
th2=th2+[lr*bh2*err_grad_h2];

//Now we adjust all weights and threshold levels from hidden layer to output layer

w_h1y=w_h1y+delta_wh1y;
w_h2y=w_h2y+delta_wh2y;
ty=ty+delta_ty;

//We reset the output values prior to next iteration
net_h1=0.00;
net_h2=0.00;
net_y=0.00;
actual_h1=0.00;
actual_h2=0.00;
actual_y=0.00;
err_y=0.00;
err_grad_y=0.00;
err_grad_h1=0.00;
err_grad_h2=0.00;
delta_wh1y=0.00;
delta_wh2y=0.00;
delta_ty=0.00;
end //End of main for() loop
epoch=epoch+1;

for k=1:4
sum_sqr_err=sum_sqr_err + [errors(k,1)^2];
end;
//Sum of squared errors (SSE) is an useful indicator of network’s performance. As per Nagnevitsky 3rd edition page 183, ideal value is set less than or equal to 0.0010

if sum_sqr_err > 0.0010 then
flag=0;
else
flag=1;
end;
disp(sum_sqr_err,’Sum of Squared Errors = ‘);
disp(errors,’The errors after this epoch is : ‘);
sum_sqr_err=0.00;
errors=zeros(4,1);
disp(ya,’Actual Output = ‘);
disp(yd,’Desired Output’);
disp(epoch,’end of epoch’);
disp(‘********************************************************************************’);
end
//End of the do-while loop
disp(epoch,’The number of epochs required is :’,lr,’For the learning rate’)

disp(w_initial,’Initial Weights between input layer and hidden layer is : ‘);

disp(w_ih,’Final Weights between input layer and hidden layer is : ‘);

disp(w_h1y_initial,’Initial weight between hidden layer neuron h1 and output neuron Y is : ‘);

disp(w_h2y_initial,’Initial weight between hidden layer neuron h2 and output neuron Y is : ‘);

disp(w_h1y,’Final weight between hidden layer neuron h1 and output neuron Y is : ‘);

disp(w_h2y,’Final weight between hidden layer neuron h2 and output neuron Y is : ‘);

plot(yd,ya);
//The plot should yield a straight line

Backpropagation algorithm

Backpropagation algorithm2

 

If you have any queries, you can e-mail me at : srinath.krishnamoorthy@villacollege.edu.mv

About me :

I m Srinath Krishnamoorthy. I m an MTech in Computer Science and Engineering from MA College of Engineering, Kothamangalam, and BTech in Information Technology from Government Engineering College, Palakkad. I m teaching Artificial Intelligence for the University of West of England (BSc Computer Science) in Male`, Maldives. My area of interests is AI, Data Analytics, Computational Intelligence and Theory of Computation.

Srinath Krishnamoorthy (1)Srinath Krishnamoorthy
Advertisements

sciLab Program for Training a a Perceptron

//Author : Srinath Krishnamoorthy
//Date : 10-07-2017

clear
clear all
clc
//initialise the inputs
x=[1 0 0
1 0 1
1 1 0
1 1 1];
disp(‘Input is :’);
disp(‘ B x1 x2’);
disp(x);
yd=[0;0;0;1]; // This is for AND gate you can change it to any linearly separable function of your choice
disp(‘Target Output Yd Is :’);
disp(yd);

ya=rand(4,1);

//Initialise the weights
w=rand(1,3);
w1=w

disp(‘Initialise Random Weights:’);
disp(‘ W1 W2 W3’);
disp(w);

lr=0.5;
disp(‘Learning Coefficient =’);
disp(lr);

flag=0;
net=0;
err=0;
epoch=0;
thresh=0;

while flag==0 do
for i=1:4
for j=1:3
net=net+w(1,j)*x(i,j);
end;
if net >= thresh then
ya(i,1)=1;
else
ya(i,1)=0;
end;

err=yd(i,1)-ya(i,1);
for j=1:3
w(1,j)=w(1,j)+ (lr*x(i,j)*err);
end;
net=0.00; //Reset net for next iteration
end
disp(ya,’Actual Output’);
disp(yd,’Desired Output’);

epoch=epoch+1;
disp(‘End of Epoch No:’);
disp(epoch);
disp(‘************************************************************’);
if epoch > 1000 then
disp(‘Learning Attempt Failed !’)
break
end;

if yd(1,1) == ya(1,1)& yd(2,1) == ya(2,1) & yd(3,1) == ya(3,1) & yd(4,1) == ya(4,1) then
flag=1;
else
flag=0;
end
end
disp(‘Initial Random Weights -‘);
disp(w1);
disp(‘Final Adjusted Weights -‘);
disp(w);
disp(lr,’Learning rate is – ‘)
disp(‘***********************************’)
plot(yd,ya);