728x90
반응형
import numpy as np

A = np.array([[1, 2], [3, 4]])

B = np.array([[1, 2]])

C = np.array([[1], [2]])

print(A)
print(B)
print(C)

"""
[[1 2]
 [3 4]]
[[1 2]]
[[1]
 [2]]
 """
import numpy as np

A = np.array([[1, 2], [3, 4]])

B = np.array([[1, 2]])

C = np.array([[1], [2]])

print(A.shape)
print(B.shape)
print(C.shape)

"""
(2, 2)
(1, 2)
(2, 1)
 """
import numpy as np

A = np.array([[1, 2]])

B = np.array([[3, 4]])

C = np.array([[1, 2], [3, 4]])

D = np.array([[5, 6], [7, 8]])


print(A + B)
print(A - B)
print(C + D)
print(C - D)
print(2 * C)

"""
[[4 6]]
[[-2 -2]]
[[ 6  8]
 [10 12]]
[[-4 -4]
 [-4 -4]]
[[2 4]
 [6 8]]
 """
import numpy as np

A = np.array([[1, 2]])      # 1 x 2 matrix

B = np.array([[3, 4], [5, 6]])  # 2 x 2 matrix

C = np.array([[1],[2]]) # 2 x 1 matrix

D = np.array([[5, 6]])  # 1 x 2 matrix


print(A@B)
print(C@D)
print(A@C)

"""
[[13 16]]
[[ 5  6]
 [10 12]]
[[5]]
 """
import numpy as np  # 역전파 (역으로) 전치

A = np.array([[1, 2]])      # 1 x 2 matrix

B = np.array([[1, 2], [3, 4]])  # 2 x 2 matrix


print(A)
print(A. T)
print(B)
print(B. T)

"""
[[1 2]]
[[1]
 [2]]
[[1 2]
 [3 4]]
[[1 3]
 [2 4]]
 """
import numpy as np  # Mean Squared Error

A = np.array([[1, 2]])      # 1 x 2 matrix


print(A)
print(A. T)
print(A@A.T)

"""
[[1 2]]
[[1]
 [2]]
[[5]]
 """
import numpy as np

X = np.array([[2, 3]])  # Input layer
W = np.array([[3, 5], [4, 6]])  # Weight
B = np.array([[1, 2]])  # Bias

Y = X@W + B # Forward propagation

print(Y)

# [[19 30]]
import numpy as np # Mean squared Error

X = np.array([[2, 3]])

W = np.array([[3, 5],[4, 6]])

B = np.array([[1, 2]])

YT = np.array([[27, -30]])

Y = X @ W + B

print(Y-YT)
print((Y- YT).T)
print((Y - YT) @ (Y-YT).T)
print((Y-YT)@(Y-YT).T/2)

"""
[[-8 60]]
[[-8]
 [60]]
[[3664]]
[[1832.]]
"""
import numpy as np # Mean squared Error

X = np.array([[2, 3]])

W = np.array([[3, 5],[4, 6]])

B = np.array([[1, 2]])

YT = np.array([[27, -30]])

Y = X @ W + B

print(Y-YT)
print((Y- YT).T)
print((Y - YT) @ (Y-YT).T)
print((Y-YT)@(Y-YT).T/2)

E = ((Y-YT)@(Y-YT).T/2)

e = E[0, 0]

YE = Y - YT
print(YE)       # back propagation

"""

"""
import numpy as np # Mean squared Error

X = np.array([[2, 3]])

W = np.array([[3, 5],[4, 6]])

B = np.array([[1, 2]])

YT = np.array([[27, -30]])

Y = X @ W + B

print(Y-YT)
print((Y- YT).T)
print((Y - YT) @ (Y-YT).T)
print((Y-YT)@(Y-YT).T/2)

E = ((Y-YT)@(Y-YT).T/2)

e = E[0, 0]

YE = Y - YT

WE = X.T @ YE   # Weight Error
BE = 1 * YE     # Bias Error

print(WE)       
print(BE)
"""
[[-8 60]]
[[-8]
 [60]]
[[3664]]
[[1832.]]
[[-16 120]
 [-24 180]]
[[-8 60]]
"""
import numpy as np # Mean squared Error

X = np.array([[2, 3]])

W = np.array([[3, 5],[4, 6]])

B = np.array([[1, 2]])

YT = np.array([[27, -30]])

Y = X @ W + B

print(Y-YT)
print((Y- YT).T)
print((Y - YT) @ (Y-YT).T)
print((Y-YT)@(Y-YT).T/2)

E = ((Y-YT)@(Y-YT).T/2)

e = E[0, 0]

YE = Y - YT

WE = X.T @ YE   # Weight Error
BE = 1 * YE     # Bias Error

lr = 0.01   # Learing rate
W -= lr * WE    # Weight learning
B -= lr * BE    # Bias learning

print(W)
print(B)

"""
[[-8 60]]
[[-8]
 [60]]
[[3664]]
[[1832.]]
[[-16 120]
 [-24 180]]
[[-8 60]]
"""
import numpy as np

# numpy 실수 출력 방법 변경 : 소수점 이하 3자리 출력
np.set_printoptions(formatter = {'float_kind':lambda x: "{0:6.3f}".format(x)})


X = np.array([[2, 3]])

W = np.array([[3, 5],[4, 6]])

B = np.array([[1, 2]])

YT = np.array([[27, -30]])

Y = X @ W + B

print(Y-YT)
print((Y- YT).T)
print((Y - YT) @ (Y-YT).T)
print((Y-YT)@(Y-YT).T/2)

E = ((Y-YT)@(Y-YT).T/2)

e = E[0, 0]

YE = Y - YT

WE = X.T @ YE   # Weight Error
BE = 1 * YE     # Bias Error

lr = 0.01   # Learing rate
W -= lr * WE    # Weight learning
B -= lr * BE    # Bias learning

print(W)
print(B)

if e < 0.0000001: 
    break
from ast import While
import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x: "{0:6.3f}".format(x)})

X = np.array([[0.05, 0.10]])
WH = np.array([[0.15, 0.25], [0.20, 0.30]])
BH = np.array([[0.35, 0.35]])
WY = np.array([[0.40, 0.50], [0.45, 0.55]])
BY = np.array([[0.60, 0.60]])
YT = np.array([[0.01, 0.99]])
lr = 0.01

for epoch in range(2000):
    
    H = X @ WH + BH             # forward propagation
    Y = H @ WY + BY             # forward propagation
    E = (Y - YT) @ (Y - YT).T/2 # Error
    e = E[0, 0]
    YE = Y - YT                 # Back propagation (Output)
    WYE = H.T @ YE                # Weight progagation
    BYE = 1 * YE
    HE = YE @ WY.T
    WHE = X.T @ HE
    BHE = 1 * HE
    WY -= lr * WYE
    BY -= lr * BYE
    WH -= lr * WHE
    BH -= lr * BHE
    
    print('epoch = ', epoch)
    print(Y)
    print(WY)
    print(BY)
    print(WH)
    print(BH)
    
    if e < 0.0000001 :
        break
    
    """
    .
    .
    .
    epoch =  665
    [[ 0.010  0.990]]
    [[ 0.203  0.533]
    [ 0.253  0.583]]
    [[-0.095  0.730]]
    [[ 0.143  0.242]
    [ 0.186  0.284]]
    [[ 0.213  0.186]]
    """
"""
30 page practice exam

import numpy as np

# forward propagation
H = X @ WH + 1 * BH     # W -> H
M = H @ WM + 1 * BM     # H -> M
Y = M @ WY + 1 * BY     # M -> H

# Mean Squared Error (MSE)
ME = YE @ WY.T
HE = ME @ WM.T

WYE = M.T @ YE
BYE = 1 * YE
WME = H.T @ ME
BME = 1 * ME
WHE =  X. T @ HE
BHE = 1 * HE

# Error
E = (Y - YT) @ (Y - YT).T / 2
e = E[0, 0]

# Back Propagation (Output)
YE = Y - YT



"""
728x90
반응형

+ Recent posts