728x90
반응형
#include <stdio.h>
double x1 = 2, x2 = 3; // input layer
double w1 = 3, w2 = 4; // weight (1/2)
double w3 = 5, w4 = 6; // weight (2/2)
double b1 = 1, b2 = 2; // bias
double y1, y2; // output layer
double E, E1, E2; // Error(=loss)
double yT1 = 27, yT2 = -30; // Target value, label
double yE1, yE2; // back propagation (Output)
double xE1, xE2; // back progagation (Input)
double w1E, w2E; // back progagation (Weight 1/2)
double w3E, w4E; // back progagation (Weight 2/2)
double b1E, b2E; // back progagation (bias)
double lr = 0.01; // Learning rate
int main(void)
{
// Forward propagation
y1 = (x1 * w1) + (x2 * w2) + (1 * b1);
y2 = (x1 * w3) + (x2 * w4) + (1 * b2);
printf("예측값(y1) : %.2lf, (y2) : %.2lf\n", y1, y2);
// Mean Squared Error (MSE)
E1 = (y1 - yT1) * (y1 - yT1) / 2;
E2 = (y2 - yT2) * (y2 - yT2) / 2;
E = E1 + E2;
printf("오차(E) : %.2lf, (E1) : %.2lf, (E2) : %.2lf\n", E, E1, E2);
// Back propagation (Output)
yE1 = y1 - yT1;
yE2 = y2 - yT2;
printf("출력 역전파 오차(yE1) : %.2lf, (yE2) : %.2lf\n", yE1, yE2);
// Back propagation (Input)
xE1 = (yE1 * w1) + (yE2 * w3);
xE2 = (yE1 * w2) + (yE2 * w4);
// Forward propagation (Weight, Bias)
y1 = (w1 * x1) + (w2 * x2) + (b1 * 1);
y2 = (w3 * x1) + (w4 * x2) + (b2 * 1);
printf("가중치, 편향 순전파(y1) : %.2lf, (y2) : %.2lf\n", y1, y2);
// Back propagation (Weight, Bias)
w1E = yE1 * x1;
w2E = yE1 * x2;
w3E = yE2 * x1;
w4E = yE2 * x2;
b1E = yE1 * 1;
b2E = yE2 * 1;
printf("가중치 역전파(w1E) : %.2lf, (w2E) : %.2lf, (w3E) : %.2lf, (w4E) : %.2lf\n", w1E, w2E, w3E, w4E);
printf("편향 역전파(b1E) : %.2lf, (b2E) : %.2lf\n", b1E, b2E);
// Neural network Learning (Weight, Bias)
w1 -= lr * w1E;
w2 -= lr * w2E;
w3 -= lr * w3E;
w4 -= lr * w4E;
b1 -= lr * b1E;
b2 -= lr * b2E;
printf("가중치 학습(w1) : %.2lf, (w2) : %.2lf, (w3) : %.2lf, (w4) : %.2lf\n", w1, w2, w3, w4);
printf("편향 학습(b1) : %.2lf, (b2) : %.2lf\n", b1, b2);
// Predict with learned neural networks
y1 = (x1 * w1) + (x2 * w2) + (1 * b1);
y2 = (x1 * w3) + (x2 * w4) + (1 * b2);
printf("y1 : %.2lf, y2 : %.2lf\n", y1, y2);
/*
예측값(y1) : 19.00, (y2) : 30.00
오차(E) : 1832.00, (E1) : 32.00, (E2) : 1800.00
출력 역전파 오차(yE1) : -8.00, (yE2) : 60.00
가중치, 편향 순전파(y1) : 19.00, (y2) : 30.00
가중치 역전파(w1E) : -16.00, (w2E) : -24.00, (w3E) : 120.00, (w4E) : 180.00
편향 역전파(b1E) : -8.00, (b2E) : 60.00
가중치 학습(w1) : 3.16, (w2) : 4.24, (w3) : 3.80, (w4) : 4.20
편향 학습(b1) : 1.08, (b2) : 1.40
y1 : 20.12, y2 : 21.60
*/
return 0;
}
#include <stdio.h>
void dnn_test();
double x1 = 2, x2 = 3; // input layer
double w1 = 3, w2 = 4; // weight (1/2)
double w3 = 5, w4 = 6; // weight (2/2)
double b1 = 1, b2 = 2; // bias
double y1, y2; // output layer
double y1T = 27, y2T = -30; // Target value(=loss)
double E, E1, E2; // Mean Squared Error (MSE)
double y1E, y2E; // Back propagation (Output)
double x1E, x2E; // Back propagation (Input)
double w1E, w2E; // Back propagation (Weight 1/2)
double w3E, w4E; // Back propagation (Weight 2/2)
double b1E, b2E; // Back propagation (Bias)
double lr = 0.01; // Learning rate
int main(void)
{
dnn_test();
return 0;
}
void dnn_test()
{
for (int epoch = 0; epoch < 200; epoch++)
{
printf("epoch = %d\n", epoch);
// Feed forward
y1 = (x1 * w1) + (x2 * w2) + (1 * b1);
y2 = (x1 * w2) + (x2 * w4) + (2 * b2);
printf("예측값(y1) : %.2lf, (y2) : %.2lf\n", y1, y2);
// Mean Squared Error (MSE)
E1 = (y1 - y1T) * (y1 - y1T) / 2;
E2 = (y2 - y2T) * (y2 - y2T) / 2;
E = E1 + E2;
printf("평균 제곱 오차(E) : %.2lf, (E1) : %.2lf, (E2) : %.2lf\n", E, E1, E2);
// Back Propagation (Output)
y1E = y1 - y1T;
y2E = y2 - y2T;
printf("출력 역전파 오차(y1E) : %.2lf, (y2E) : %.2lf\n", y1E, y2E);
// Back propagation (Input)
x1E = (y1E * w1) + (y2E * w3);
x2E = (y1E * w2) + (y2E * w4);
printf("입력 역전파 오차(x1E) : %.2lf, (x2E) : %.2lf\n", x1E, x2E);
// Forward propagation (Weight, Bias)
y1 = (w1 * x1) + (w2 * x2) + (b1 * 1);
y2 = (w2 * x1) + (w4 * x2) + (b2 * 2);
printf("가중치, 편향 순전파(y1) : %.2lf, (y2) : %.2lf\n", y1, y2);
// Back propagation (Weight, Bias)
w1E = y1E * x1;
w2E = y1E * x2;
w3E = y2E * x1;
w4E = y2E * x2;
b1E = y1E * 1;
b2E = y2E * 2;
printf("가중치 역전파 오차(w1E) : %.2lf, (w2E) : %.2lf, (w3E) : %.2lf, (w4E) : %.2lf\n", w1E, w2E, w3E, w4E);
printf("편향 역전파 오차(b1E) : %.2lf, (b2E) : %.2lf\n", b1E, b2E);
// Neural networks learning (Weight, Bias)
w1 -= lr * w1E;
w2 -= lr * w2E;
w3 -= lr * w3E;
w4 -= lr * w4E;
b1 -= lr * b1E;
b2 -= lr * b2E;
printf("가중치 학습(w1) : %.2lf, (w2) : %.2lf, (w3) : %.2lf, (w4) : %.2lf\n", w1, w2, w3, w4);
printf("편향 학습(b1) : %.2lf, (b2) : %.2lf\n", b1, b2);
// Predict with learned neural networks
y1 = (x1 * w1) + (x2 * w2) + (1 * b1);
y2 = (x1 * w2) + (x2 * w4) + (2 * b2);
printf("y1 : % .2lf, y2 : % .2lf\n", y1, y2);
printf("------------------------------------------\n");
if (E < 0.0000001) break;
}
}
728x90
반응형
'Deep Learning' 카테고리의 다른 글
Matrix and Calculation (0) | 2022.08.31 |
---|---|
single perceptron.py (0) | 2022.08.31 |
Deep Learning Basic Formula - 2input, 1output (epoch 1 ~ 200) (0) | 2022.08.23 |
Deep Learning Basic Formula - 1input, 1output (epoch 1 ~ 200) (0) | 2022.08.23 |
Deep Learning Library (0) | 2022.08.23 |