Mistři v programování na #hovnokod

Java #4694

Učení neuronové sítě pomocí backpropagation.

public NeuralNetwork backpropError(double targetResult) {
        neurons[7].setError((targetResult - neurons[7].getOutput()) * neurons[7].derivative());
        neurons[7].setBias(neurons[7].getBias() + LEARNING_RATE * neurons[7].getError());
        neurons[7].getWeights()[0] = neurons[7].getWeights()[0] + LEARNING_RATE * neurons[7].getError() * neurons[1].getOutput();
        neurons[7].getWeights()[1] = neurons[7].getWeights()[1] + LEARNING_RATE * neurons[7].getError() * neurons[2].getOutput();
        neurons[7].getWeights()[2] = neurons[7].getWeights()[2] + LEARNING_RATE * neurons[7].getError() * neurons[3].getOutput();
        neurons[7].getWeights()[3] = neurons[7].getWeights()[3] + LEARNING_RATE * neurons[7].getError() * neurons[4].getOutput();
        neurons[7].getWeights()[4] = neurons[7].getWeights()[4] + LEARNING_RATE * neurons[7].getError() * neurons[5].getOutput();
        neurons[7].getWeights()[5] = neurons[7].getWeights()[5] + LEARNING_RATE * neurons[7].getError() * neurons[6].getOutput();
        neurons[6].setError((neurons[7].getWeights()[5] * neurons[7].getError()) * neurons[6].derivative());
        neurons[6].setBias(neurons[6].getBias() + LEARNING_RATE * neurons[6].getError());
        neurons[6].getWeights()[0] = neurons[6].getWeights()[0] + LEARNING_RATE * neurons[6].getError() * neurons[0].getOutput();
        neurons[5].setError((neurons[7].getWeights()[4] * neurons[7].getError()) * neurons[5].derivative());
        neurons[5].setBias(neurons[5].getBias() + LEARNING_RATE * neurons[5].getError());
        neurons[5].getWeights()[0] = neurons[5].getWeights()[0] + LEARNING_RATE * neurons[5].getError() * neurons[0].getOutput();
        neurons[4].setError((neurons[7].getWeights()[3] * neurons[7].getError()) * neurons[4].derivative());
        neurons[4].setBias(neurons[4].getBias() + LEARNING_RATE * neurons[4].getError());
        neurons[4].getWeights()[0] = neurons[4].getWeights()[0] + LEARNING_RATE * neurons[4].getError() * neurons[0].getOutput();
        neurons[3].setError((neurons[7].getWeights()[2] * neurons[7].getError()) * neurons[3].derivative());
        neurons[3].setBias(neurons[3].getBias() + LEARNING_RATE * neurons[3].getError());
        neurons[3].getWeights()[0] = neurons[3].getWeights()[0] + LEARNING_RATE * neurons[3].getError() * neurons[0].getOutput();
        neurons[2].setError((neurons[7].getWeights()[1] * neurons[7].getError()) * neurons[2].derivative());
        neurons[2].setBias(neurons[2].getBias() + LEARNING_RATE * neurons[2].getError());
        neurons[2].getWeights()[0] = neurons[2].getWeights()[0] + LEARNING_RATE * neurons[2].getError() * neurons[0].getOutput();
        neurons[1].setError((neurons[7].getWeights()[0] * neurons[7].getError()) * neurons[1].derivative());
        neurons[1].setBias(neurons[1].getBias() + LEARNING_RATE * neurons[1].getError());
        neurons[1].getWeights()[0] = neurons[1].getWeights()[0] + LEARNING_RATE * neurons[1].getError() * neurons[0].getOutput();

        return this;

    }

Anonymous,