NEURALNETWORK.M
Clear
% mean value 0 0 0 MU1 = [0 0 0];
% covariance matrix I SIGMA1 = [1 0 0; 0 1 0; 0 0 1];
DATA1 = Mvnrnd (MU1, SIGMA1, 1000);
% Scatter3 (DATA1 (:, 1), DATA1 (:, 2), DATA1 (:, 3))% mean 0 1 0 MU2 = [0 1 0];
% covariance matrix I SIGMA2 = [1 0 1; 0 2 2; 1 2 5];
DATA2 = Mvnrnd (MU2, SIGMA2, 1000);
% mean -1 0 1 MU3 = [-1 0 1];
% covariance matrix I SIGMA3 = [2 0 0; 0 6 0; 0 0 1];
DATA3 = Mvnrnd (MU3, SIGMA3, 1000);
% mean value 0 0.5 1 MU4 = [0 0.5 1];
% covariance matrix I SIGMA4 = [2 0 0; 0 1 0; 0 0 3];
DATA4 = Mvnrnd (MU4, SIGMA4, 1000);
% calculates the probability distribution function of the marker sample in each distribution Y1 = probability (DATA1, MU1, SIGMA1);
Y2 = Probability (DATA1, MU2, SIGMA2);
Y3 = Probability (DATA1, MU3, SIGMA3);
Y4 = Probability (DATA1, MU4, SIGMA4);
Y = [Y1 Y2 Y3 Y4];
Z = SUM (Y, 2);
% calculates the actual classification probability for i = 1:length (y) for j = 1:4 Y (i, j) = Y (i, J)/Z (i);
End end [LABEL, labelidx] = max (Y, [], 2);
% initializes the weight matrix between the input layer and the hidden layer W1 = 2*rand (3)-1;
% initializes the weighted matrix between the active hidden layer and the output layer W2 = 2*rand (3, 4)-1;
% bias b1,b2 B1 = 2*rand (1)-1;
B2 = 2*rand (1)-1;
Learningrate = 0.01; For m = 1:50 for i = 1:lEngth (DATA1)% forward propagation% network structure using 3-3-4 structure, finally with Softmax activation NEURON1 = DATA1 (i,:) * W1 + B1;
neuron1_active = sigmoid (NEURON1);
NEURON2 = neuron1_active * W2 + B2;
OUTPUT = Softmax (NEURON2);
% Reverse propagation% calculates the derivative of the cross-entropy based on the probability value of the actual probability distribution loss = Output-y (i,:);
Softmaxloss = Softmax_loss (NEURON2). * LOSS;
% Compute weight Matrix W2 loss W2_loss = [neuron1_active ' neuron1_active ' neuron1_active ' neuron1_active ']; For j = 1:3 W2_loss (j,:) = W2_loss (j,:).
* SOFTMAXLOSS;
End% calculates the loss of the hidden layer neuron_active_losstemp = W2; For j = 1:3 Neuron_active_losstemp (j,:) = W2 (j,:).
* SOFTMAXLOSS;
End neuron_active_loss = SUM (neuron_active_losstemp, 2);
Neuron1_loss = Sigmoid_loss (NEURON1). * neuron_active_loss '; % Compute weight Matrix W1 loss W1_loss = [DATA1 (i,:) ') DATA1 (i,:) '
DATA1 (i,:) '); For j = 1:3 W1_loss (j,:) = W1_loss (J,:). * NEURON1_LOSS;
End% Update weights B1 = B1-sum (Neuron1_loss, 2) * learningrate;
W1 = W1-w1_loss * learningrate;
B2 = B2-sum (neuron_active_loss) * learningrate;
W2 = W2-w2_loss * learningrate;
End end value = 0;
For i = 1:length (DATA1)% forward propagation NEURON1 = DATA1 (i,:) * W1 + B1;
neuron1_active = sigmoid (NEURON1);
NEURON2 = neuron1_active * W2 + B2;
OUTPUT = Softmax (NEURON2);
[Tlabel, Tlabelidx] = max (OUTPUT, [], 2);
if Tlabelidx = = Labelidx (i) value = value + 1; End end rate = value/1000;
Probability.m
function output = probability (X, MU1, SIGMA1)
%probility Summary of this function goes here
% detailed Expla Nation goes here
Mu1_diag = DIAG (MU1);
output = [];
For i = 1:length (x)
Y1 = prod (diag (NORMCDF (diag (x (i,:)), Mu1_diag, SIGMA1)));
output = [output; Y1];
End
End
Sigmoid.m
function output = sigmoid (x)%sigmoid Summary of this function goes here, detailed explanation goes here
output = 1./(1+exp (-X));
End
Sigmoid_loss.m
function output = Sigmoid_loss (x)
%sigmoid_loss Summary of this function goes here
% detailed explanation Goes here
output = exp (-X)./(1+exp (-X));
End
Softmax.m
function output = Softmax (x)
%softmax Summary of this function goes here% detailed explanation goes here
output = exp (x)/SUM (exp (x));
End
Softmax_loss.m
function output = Softmax_loss (x)
%soft Summary of this function goes here% detailed explanation goes here< C9/>output = exp (x)/SUM (exp (x)) + exp (x). ^2/sum (exp (x)). ^2;
End