728x90
반응형
import numpy as np
import matplotlib.pylab as plt

x = np.arange(-5.0, 5.0, 0.1)

"""
# Activation: Step Function
def step_function(x):
    return np.array(x > 0, dtype=np.int)

y = step_function(x)

plt.plot(x,y)
plt.ylim(-0.1, 1.1)
plt.show()
"""

"""
# Activation : Sigmoid Function
def sigmoid(x):
    return 1 / (1 + np.exp(-x))     # Numpy 의 exp method : e의 지수배 값 계산

y = sigmoid(x)

plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
"""
"""
# Activation : Relu Function
def relu(x):
    return np.maximum(0, x);

y = relu(x)

plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
"""
# Activation function (Cross Entropy Error)

import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x : "{0:6.3f}".format(x)})

# ----------------------------------------------

Y = np.array([[.02, .90, .05, .01, .02]])
YT = np.array([[0, 1, 0, 0, 0]])

print(YT * np.log(Y))
print('%.2f' %np.sum(-YT * np.log(Y)))​
# Activation Function (Sigmoid)

import numpy as np

np.set_printoptions(formatter = {'float_kind': lambda x: "{0:6.3f}".format(x)})

# ----------------------------------------------------
# Function (Sigmoid)
"""
def sigmoid_f(X) :                  # Sigmoid Forward propagation. Fn
    return 1 / (1 + np.exp(-X))

def sigmoid_b(XE, X) :              # Sigmoid Back propagation. Fn
    return X * (1 - X) * XE
"""
# ----------------------------------------------------
# Function (relu)
"""
def relu_f(X) :                     # relu Forward propagation. Fn
    return (X > 0) * X

def relu_b(XE, X) :                 # relu Back propagation. Fn
    return (X > 0) * 1 * XE
"""
# ----------------------------------------------------
#X = np.array([[-5, 10]])

#X = (X > 0) * X                # relu Forward propagation, Xd = X dash
#X = 1 / (1 + np.exp(-X))       # Sigmoid Forward propagation, Xd = X dash
#X = sigmoid_f(X)                # Sigmoid Function Call (F_p)
#X = relu_f(X)                    # relu Function Call (F_p)
#print(X)

#print(Xd)
# -----------------------------------------------------
#XE = np.array([[2, 1]])        # XdE = X dash Error

#XE = (Xd > 0) * 1 * XdE         # XdE = X dash Error (relu)
#XE = Xd * (1 - Xd) * XdE        # Sigmoid Back Propagation
#XE = sigmoid_b(XdE, X)          # Sigmoid Function Call (B_p)
#XE = relu_b(XE, X)              # relu Function Call (B_p)

#print(XE)
# Activation function (Sigmoid ,Relu Fn : iteration)

import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x : "{0:6.3f}".format(x)})

# ----------------------------------------------
"""
# Define a Sigmoid Function
def sigmoid_f(X) :
    return 1 / (1 + np.exp(-X))

def sigmoid_b(XE, X) :
    return X * (1 - X) * XE
"""
# ----------------------------------------------
"""
# Define a Relu Function
def relu_f(X) :
    return (X > 0) * X

def relu_b(XE, X) :
    return (X > 0) * 1 * XE
"""
# ----------------------------------------------

X = np.array([[0.05, 0.10]])
WH = np.array([[0.15, 0.25], [0.20, 0.30]])
BH = np.array([[0.35, 0.35]])
WY = np.array([[0.40, 0.50],[0.45, 0.55]])
BY = np.array([[0.60, 0.60]])
YT = np.array([[0.01, 0.99]])
lr = 0.01

# ----------------------------------------------
for epoch in range(10000):
    
    #H = sigmoid_f(X @ WH + BH)          # F.p of the Hidden layer (Call a Sigmoid Fn)
    #Y = sigmoid_f(H @ WY + BY)          # F.p of the Output layer (Call a Sigmoid Fn)
    
    #H = relu_f(X @ WH + BH)             # F.p of the Hidden layer (Call a Relu Fn)
    #Y = relu_f(H @ WY + BY)             # F.p of the Output layer (Call a Relu Fn)
    
    E = (Y - YT) @ (Y - YT).T / 2
    e = E[0, 0]
    
    #YE = sigmoid_b(Y - YT, Y)           # B.p of the Output layer (Call a Sigmoid Fn)
    #HE = sigmoid_b(YE @ WY.T, H)        # B.p of the Hidden layer (Call a Sigmoid Fn)
        
    #YE = relu_b(Y - YT, Y)              # B.p of the Output layer (Call a Relu Fn)
    #HE = relu_b(YE @ WY.T, H)           # B.p of the Hidden layer (Call a Relu Fn)
        
    WYE = H.T @ YE
    BYE = 1 * YE
    WHE = X.T @ HE
    BHE = 1 * HE
    
    WY -= lr * WYE
    BY -= lr * BYE
    WH -= lr * WHE
    BH -= lr * BHE
    
    print('epoch =', epoch)
    print(Y)
    
    if e < 0.0000001 : break
# Activation function (Softmax Fn)

import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x : "{0:6.3f}".format(x)})

# ----------------------------------------------

Y = np.array([[1.3, 5.1, 2.2, 0.7, 1.1]])   # Output layer

print(np.exp(Y))

sumY = np.sum(np.exp(Y))        # 전체 항목 값 더하기
print('{:.2f}'.format(sumY))

Y = np.exp(Y) / sumY            # 각 항목을 전체 항목 값의 합으로 나누기
print(Y)
# Activation function (Reduce the denominator size of the Softmax function)

import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x : "{0:6.3f}".format(x)})

# ----------------------------------------------

Y = np.array([[1.3, 5.1, 2.2, 0.7, 1.1]])   # Output layer

YMax = np.max(Y)
print(YMax)

Y = Y - YMax
print(Y)

print(np.exp(Y))

sumY = np.sum(np.exp(Y))
print('%.2f' %sumY)

Y = np.exp(Y) / sumY
print(Y)
# Activation function (Softmax Fn)

import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x : "{0:6.3f}".format(x)})

# ----------------------------------------------

def softmax_f(X) :
    XMax = np.max(X)
    X = X - XMax
    sumX = np.sum(np.exp(X))
    return np.exp(X) / sumX

def softmax_b(XE, X) :
    return XE

# ----------------------------------------------

X = np.array([[1.3, 5.1, 2.2, 0.7, 1.1]])

X = softmax_f(X)        # Call Fn

print(X)    # [[ 0.020  0.903  0.050  0.011  0.017]]
# Activation function (iteration)
# Hidden layer (Relu Fn) // Ouput layer (Softmax Fn)

import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x : "{0:6.3f}".format(x)})

# ----------------------------------------------
# Define a Relu Function
def relu_f(X) :
    return (X > 0) * X

def relu_b(XE, X) :
    return (X > 0) * 1 * XE
# ----------------------------------------------
# Define a Softmax Function
def softmax_f(X) :
    XMax = np.max(X)
    X = X - XMax
    sumX = np.sum(np.exp(X))
    return np.exp(X) / sumX

def softmax_b(XE, X) :
    return XE
# ----------------------------------------------

X = np.array([[0.05, 0.10]])
WH = np.array([[0.15, 0.25], [0.20, 0.30]])
BH = np.array([[0.35, 0.35]])
WY = np.array([[0.40, 0.50],[0.45, 0.55]])
BY = np.array([[0.60, 0.60]])
YT = np.array([[0, 1]])     # Cross Entropy (0, 1)
lr = 0.01

# ----------------------------------------------
for epoch in range(10000):
       
    H = relu_f(X @ WH + BH)             # F.p of the Hidden layer (Call a Relu Fn)
    Y = softmax_f(H @ WY + BY)             # F.p of the Output layer (Call a Softmax Fn)
    
    e = -np.sum(YT * np.log(Y))
        
    YE = softmax_b(Y - YT, Y)              # B.p of the Output layer (Call a Softmax Fn)
    HE = relu_b(YE @ WY.T, H)           # B.p of the Hidden layer (Call a Relu Fn)
        
    WYE = H.T @ YE
    BYE = 1 * YE
    WHE = X.T @ HE
    BHE = 1 * HE
    
    WY -= lr * WYE
    BY -= lr * BYE
    WH -= lr * WHE
    BH -= lr * BHE
    
    print('epoch =', epoch)
    print(Y)
    
    if e < 0.0000001 : break
# Weight Initialize
# Method "HE" & "Lecun" Initialize

import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x : "{0:6.3f}".format(x)})

# ----------------------------------------------
# Make a Numpy Temporary number initialize
np.random.seed(0)

# Create Uniform randdom number matrix 2X2 from between 1 and 1
W = np.random.uniform(-1, 1, (2, 2))

print(W)
print(W * np.sqrt(6.0 / W.shape[0]))    # HE initialize > Reru
print(W * np.sqrt(3.0 / W.shape[0]))    # Lecun initialize  > Sigmoid, Softmax
# Activation function (iteration)
# Hidden layer (Relu Fn) // Ouput layer (Softmax Fn) // Weight initialize (HE)

import numpy as np

np.set_printoptions(formatter = {'float_kind':lambda x : "{0:6.3f}".format(x)})

# ----------------------------------------------
# Define a Relu Function
def relu_f(X) :
    return (X > 0) * X

def relu_b(XE, X) :
    return (X > 0) * 1 * XE
# ----------------------------------------------
# Define a Softmax Function
def softmax_f(X) :
    XMax = np.max(X)
    X = X - XMax
    sumX = np.sum(np.exp(X))
    return np.exp(X) / sumX

def softmax_b(XE, X) :
    return XE
# ----------------------------------------------
# Initialize weight
# He
def initialize_weight_He(nRow, nCol) :
    W = np.random.uniform(-1, 1, (nRow, nCol))
    return W * np.sqrt(6.0 / W.shape[0])
# Lecun
def initialize_weight_Le(nRow, nCol) :
    W = np.random.uniform(-1, 1, (nRow, nCol))
    return W * np.sqrt(3.0 / W.shape[0])
# ----------------------------------------------

X = np.array([[0.05, 0.10]])
WH = initialize_weight_He(2, 2)     # initialize Weight HE
BH = np.zeros((1, 2))               # just '0'
WY = initialize_weight_Le(2, 2)     # initialize Weight Lecun
BY = np.zeros((1, 2))
YT = np.array([[0, 1]])     # Cross Entropy (0, 1)
lr = 0.01

# ----------------------------------------------
for epoch in range(0, 10000):
       
    H = relu_f(X @ WH + BH)             # F.p of the Hidden layer (Call a Relu Fn)
    Y = softmax_f(H @ WY + BY)             # F.p of the Output layer (Call a Softmax Fn)
    
    e = -np.sum(YT * np.log(Y))
        
    YE = softmax_b(Y - YT, Y)              # B.p of the Output layer (Call a Softmax Fn)
    HE = relu_b(YE @ WY.T, H)           # B.p of the Hidden layer (Call a Relu Fn)
        
    WYE = H.T @ YE
    BYE = 1 * YE
    WHE = X.T @ HE
    BHE = 1 * HE
    
    WY -= lr * WYE
    BY -= lr * BYE
    WH -= lr * WHE
    BH -= lr * BHE
    
    print('epoch =', epoch)
    print(Y)
    
    if e < 0.0000001 : break

 

728x90
반응형

'Deep Learning' 카테고리의 다른 글

Basic Tensorflow  (0) 2022.09.02
7 Segment Display binary connetion table  (0) 2022.09.01
Matrix and Calculation  (0) 2022.08.31
single perceptron.py  (0) 2022.08.31
Deep Learning Basic Formula - 2input, 2output (epoch 1 ~ 200)  (0) 2022.08.24

+ Recent posts