clear all;
close all;
clc;
% how many trials training goes on for
nRuns = 100;
% the x
inputs{1} = [1 0 0 0 0 0 1;
0 1 0 0 0 1 0;
0 0 1 0 1 0 0;
0 0 0 1 0 0 0;
0 0 1 0 1 0 0;
0 1 0 0 0 1 0;
1 0 0 0 0 0 1];
% the o
inputs{2} = [1 1 1 1 1 1 1;
1 0 0 0 0 0 1;
1 0 0 0 0 0 1;
1 0 0 0 0 0 1;
1 0 0 0 0 0 1;
1 0 0 0 0 0 1;
1 1 1 1 1 1 1];
subplot(2,2,1);
imagesc(inputs{1});
subplot(2,2,2);
imagesc(inputs{2});
% determine the size of the input
inputDimension = size(inputs{1},1);
% randomize the weights to numbers between -1 and +1
% weights(1:inputDimension,1:inputDimension) = rand([inputDimension,inputDimension])*2-1;
weights(1:inputDimension,1:inputDimension) = 0;
subplot(2,2,3);
imagesc(weights);
% the learning rate of the model
learningRate = 0.1;
xActivations = [];
oActivations = [];
for runs = 1:nRuns
% randomly pick an x or an o
whichInput = randi(2);
% compute the input to the output neuron
netInput = sum(sum(inputs{whichInput} .* weights));
% compute the output based on the input
netOutput = 1 / (1 + exp(-1*netInput));
% loop through to adjust the weights
for counter1 = 1:inputDimension
for counter2 = 1:inputDimension
% reward it for an X
if inputs{whichInput}(counter1,counter2) == 1 & whichInput == 1
% update the weights
weights(counter1,counter2) = weights(counter1,counter2) + (1 - netOutput)*learningRate;
% ensure the weights do not blow up
if weights(counter1,counter2) > 1
weights(counter1,counter2) = 1;
end
% punish it for an O
elseif inputs{whichInput}(counter1,counter2) == 1 & whichInput == 2
% update the weights
weights(counter1,counter2) = weights(counter1,counter2) + (0 - netOutput)*learningRate;
% ensure the weights do not blow up
if weights(counter1,counter2) < -1
weights(counter1,counter2) = -1;
end
end
end
end
% store the activations
if whichInput == 1
xActivations = [xActivations netOutput];
else
oActivations = [oActivations netOutput];
end
end
subplot(2,2,4);
imagesc(weights);
figure;
plot(xActivations);
hold on;
plot(oActivations);
title('X and O Activations across trials');