728x90
반응형
#include <stdio.h>

double x1 = 2, x2 = 3;			// input layer
double w1 = 3, w2 = 4;			// weight
double b = 1;					// bias
double y;						// output layer

double E;						// Error (=loss)
double yT = 27;					// Target value, label

double yE;						// back propagation (output 1/1)
double x1E;						// back propagation (input 1/2)
double x2E;						// back propagation (input 2/2)
double w1E;						// back progagation (weight 1/2)
double w2E;						// back progagation (weight 2/2)
double bE;						// back progagation (bias 1/1)

double lr = 0.01;				// Learning rate


int main(void)
{
	// Forward propagation
	y = (x1 * w1) + (x2 * w2) + (1 * b);
	printf("예측값(y) : %.2lf\n", y);

	// Mean Squared Error (MSE)
	E = (y - yT) * (y - yT) / 2;
	printf("오차(E) : %.2lf\n", E);

	// Back propagation (Output)
	yE = y - yT;
	printf("출력 역전파 오차(yE) : %.2lf\n", yE);

	// Back propagation (Input)
	x1E = yE * w1;
	x2E = yE * w2;
	printf("입력 역전파 오차(x1E) : %.2lf, (x2E) : %.2lf\n", x1E, x2E);

	// Weight, Bias Forward propagation
	y = (w1 * x1) + (w2 * x2) + (b * 1);
	printf("가중치, 편향 순전파(y) : %.2lf\n", y);

	// Weight, Bias Back propagation
	w1E = yE * x1;
	w2E = yE * x2;
	bE = yE * 1;
	printf("가중치 역전파 (w1E) : %.2lf, (w2E) :%.2lf\n", w1E, w2E);
	printf("편향 역전파(bE) : %.2lf\n", bE);

	// Neural network learning (Weight, Bias)
	w1 -= lr * w1E;
	w2 -= lr * w2E;
	b -= lr * bE;
	printf("가중치 학습(w1) : %.2lf, (w2) : %.2lf\n", w1, w2);
	printf("편향 학습(b) : %.2lf\n", b);

	// Predict with learned neural networks
	y = (x1 * w1) + (x2 * w2) + (1 * b);
	printf("y : %f\n", y);

	/*
	예측값(y) : 19.00
	오차(E) : 32.00
	출력 역전파 오차(yE) : -8.00
	입력 역전파 오차(x1E) : -24.00, (x2E) : -32.00
	가중치, 편향 순전파(y) : 19.00
	가중치 역전파 (w1E) : -16.00, (w2E) :-24.00
	편향 역전파(bE) : -8.00
	가중치 학습(w1) : 3.16, (w2) : 4.24
	편향 학습(b) : 1.08
	y : 20.120000
	*/

	return 0;
}
#include <stdio.h>

double x1 = 2, x2 = 3;		// input layer (2/2)
double w1 = 3, w2 = 4;		// weight (2/2)
double b = 1;				// bias
double yT = 27;				// target value, label
double lr = 0.01;			// learning rate

void dnn_test();

int main(void)
{
	dnn_test();

	return 0;
}

void dnn_test()
{
	for (int epoch = 0; epoch < 200; epoch++)
	{
		double y = (x1 * w1) + (x2 * w2) + (1 * b);		// Forward propagation
		double E = (y - yT) * (y - yT) / 2;				// Mean squared error
		double yE = y - yT;								// Back propagation (Output)
		double x1E = yE - w1;							// Back propagation (Input 1/2)
		double x2E = yE - w2;							// Back propagation (Input 2/2)
		double w1E = yE * x1;							// Back progagation (Weight 1/2)
		double w2E = yE * x2;							// Back progagation (Weight 2/2)
		double bE = yE * 1;								// Back progagation (Bias)

		w1 -= lr * w1E;									// Weight learning (1/2)
		w2 -= lr * w2E;									// Weight learning (2/2)
		b -= lr * bE;									// bias learning

		printf("epoch = %d\n", epoch);
		printf("예측값(y) = %6.3f\n", y);
		printf("가중치(w1) = %6.3f, (w2) = %6.3f\n", w1, w2);
		printf("가중치 역전파(w1E) = %6.3f, (w2E) = %6.3f, 편향 역전파(bE) = %6.3f\n", w1E, w2E, bE);
		printf("-----------------------------------------------------------------------------\n");

		if (E < 0.0000001) break;
	}
}
728x90
반응형

+ Recent posts