2
0
mirror of https://github.com/esiur/esiur-dotnet.git synced 2025-05-06 11:32:59 +00:00
This commit is contained in:
Ahmed Zamil 2023-03-15 20:34:48 +03:00
parent 989d20812e
commit b9eb9c6adf

View File

@ -50,7 +50,7 @@ namespace Esiur.Analysis.Neural
for(var i = 0; i < target.Length; i++) for(var i = 0; i < target.Length; i++)
{ {
var z = -(target[i] - output[i]) * //var z = -(target[i] - output[i]) *
} }
//for (int i = 0; i < output.Length; i++) //for (int i = 0; i < output.Length; i++)
// totalError += (float)Math.Pow(output[i] - expected[i], 2);//calculated cost of network // totalError += (float)Math.Pow(output[i] - expected[i], 2);//calculated cost of network
@ -59,43 +59,43 @@ namespace Esiur.Analysis.Neural
var gamma = neuralLayers.Select(x => x.Neurons.Select(n => n.Value).ToArray()).ToArray(); //var gamma = neuralLayers.Select(x => x.Neurons.Select(n => n.Value).ToArray()).ToArray();
int layer = layers.Length - 2; //int layer = layers.Length - 2;
for (int i = 0; i < output.Length; i++) gamma[layers.Length - 1][i] = (output[i] - expected[i]) * activateDer(output[i], layer);//Gamma calculation //for (int i = 0; i < output.Length; i++) gamma[layers.Length - 1][i] = (output[i] - expected[i]) * activateDer(output[i], layer);//Gamma calculation
for (int i = 0; i < layers[layers.Length - 1]; i++)//calculates the w' and b' for the last layer in the network //for (int i = 0; i < layers[layers.Length - 1]; i++)//calculates the w' and b' for the last layer in the network
{ //{
biases[layers.Length - 2][i] -= gamma[layers.Length - 1][i] * learningRate; // biases[layers.Length - 2][i] -= gamma[layers.Length - 1][i] * learningRate;
for (int j = 0; j < layers[layers.Length - 2]; j++) // for (int j = 0; j < layers[layers.Length - 2]; j++)
{ // {
weights[layers.Length - 2][i][j] -= gamma[layers.Length - 1][i] * neurons[layers.Length - 2][j] * learningRate;//*learning // weights[layers.Length - 2][i][j] -= gamma[layers.Length - 1][i] * neurons[layers.Length - 2][j] * learningRate;//*learning
} // }
} //}
for (int i = layers.Length - 2; i > 0; i--)//runs on all hidden layers //for (int i = layers.Length - 2; i > 0; i--)//runs on all hidden layers
{ //{
layer = i - 1; // layer = i - 1;
for (int j = 0; j < layers[i]; j++)//outputs // for (int j = 0; j < layers[i]; j++)//outputs
{ // {
gamma[i][j] = 0; // gamma[i][j] = 0;
for (int k = 0; k < gamma[i + 1].Length; k++) // for (int k = 0; k < gamma[i + 1].Length; k++)
{ // {
gamma[i][j] += gamma[i + 1][k] * weights[i][k][j]; // gamma[i][j] += gamma[i + 1][k] * weights[i][k][j];
} // }
gamma[i][j] *= activateDer(neurons[i][j], layer);//calculate gamma // gamma[i][j] *= activateDer(neurons[i][j], layer);//calculate gamma
} // }
for (int j = 0; j < layers[i]; j++)//itterate over outputs of layer // for (int j = 0; j < layers[i]; j++)//itterate over outputs of layer
{ // {
biases[i - 1][j] -= gamma[i][j] * learningRate;//modify biases of network // biases[i - 1][j] -= gamma[i][j] * learningRate;//modify biases of network
for (int k = 0; k < layers[i - 1]; k++)//itterate over inputs to layer // for (int k = 0; k < layers[i - 1]; k++)//itterate over inputs to layer
{ // {
weights[i - 1][j][k] -= gamma[i][j] * neurons[i - 1][k] * learningRate;//modify weights of network // weights[i - 1][j][k] -= gamma[i][j] * neurons[i - 1][k] * learningRate;//modify weights of network
} // }
} // }
} //}
} }
} }
} }