Nir4s
Nir4s

Reputation: 15

Backpropagation() to file

I wanna get backPropagation proccess into a file in the most detail possible. I've tried adding ofstream File as Attribute for Matrix class to write methods results via ofstream append to file but then most Matrices in SimpleNeuralNetwork class were marked with error (for example _weightMatrices).

class SimpleNeuralNetwork
{
    public:
        std::vector<uint32_t> _topology;
        std::vector<Matrix2D<float>> _weightMatrices;
        std::vector<Matrix2D<float>> _valueMatrices;
        std::vector<Matrix2D<float>> _biasMatrices;
        float _learningRate;
    public:
        
        // topology defines the no.of neurons for each layer
        // learning rate defines how much modification should be done in each backwords propagation i.e. training
        SimpleNeuralNetwork(std::vector<uint32_t> topology,float learningRate = 0.1f)
            :_topology(topology),
            _weightMatrices({}),
            _valueMatrices({}),
            _biasMatrices({}),
            _learningRate(learningRate)
        {

bool backPropagate(std::vector<float> targetOutput)
        {
            if(targetOutput.size() != _topology.back())
                return false;

            // determine the simple error
            // error = target - output
            Matrix2D<float> errors(targetOutput.size(), 1);
            errors._vals = targetOutput;
            errors = errors.add(_valueMatrices.back().negetive());
            

            // back propagating the error from output layer to input layer
            // and adjusting weights of weight matrices and bias matrics
            for(int32_t i = _weightMatrices.size() - 1; i >= 0; i--)
            {
                
                //calculating errrors for previous layer
                Matrix2D<float> prevErrors = errors.multiply(_weightMatrices[i].transpose());

                //calculating gradient i.e. delta weight (dw)
                //dw = lr * error * d/dx(activated value)
                Matrix2D<float> dOutputs = _valueMatrices[i + 1].applyFunction(DSigmoid);
                Matrix2D<float> gradients = errors.multiplyElements(dOutputs);
                gradients = gradients.multiplyScaler(_learningRate);
                Matrix2D<float> weightGradients = _valueMatrices[i].transpose().multiply(gradients);
                
                //adjusting bias and weight
                _biasMatrices[i] = _biasMatrices[i].add(gradients);
                _weightMatrices[i] = _weightMatrices[i].add(weightGradients);
                errors = prevErrors;
            }

}

Upvotes: 0

Views: 42

Answers (0)

Related Questions