Machine Learning(Andrew Ng)ex4.Back propagation algorithm

import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import matplotlib
import scipy.optimize as opt
from sklearn.metrics import classification_report#这个包是评价报告
def load_data(path):
    data=sio.loadmat(path)
    X=data.get('X')
    y=data.get('y')
    return X,y
X_raw,y_raw =load_data('ex4data1.mat')
X=np.insert(X_raw,0,np.ones(X_raw.shape[0]),axis=1)
X.shape
(5000, 401)
y_raw.shape
(5000, 1)
def expand_y(y):
    res=[]
    res=[[0]*10 for i in range(y.shape[0])]
    for i,k in zip(y,res):
        k[int(i)-1]=1
    return np.array(res)
    
y = expand_y(y_raw)
y.shape

(5000, 10)
def load_weight(path):
    data = sio.loadmat(path)
    return data['Theta1'], data['Theta2']
def serialize(a, b):
  
    return np.concatenate((np.ravel(a), np.ravel(b)))
t1, t2 = load_weight('ex4weights.mat')
t1.shape, t2.shape
theta=serialize(t1,t2)

前向传播

def sigmoid(z):
    return 1 / (1 + np.exp(-z))
def feed_forward(theta_1,theta_2,X):
    t1=theta_1
    t2=theta_2
    z2=X@t1.T
    a2=np.insert(sigmoid(z2),0,np.ones(X.shape[0]),axis=1)
    z3=a2@t2.T
    h=sigmoid(z3)
    return X,z2,a2,z3,h
    
_, _, _, _, h = feed_forward(t1,t2, X)

cost function

J ( θ ) = 1 m ∑ i = 1 m ∑ k = 1 K [ − y k ( i ) log ⁡ ( ( h θ ( x ( i ) ) ) k ) − ( 1 − y k ( i ) ) log ⁡ ( 1 − ( h θ ( x ( i ) ) ) k ) ] J(\theta)=\frac{1}{m} \sum_{i=1}^{m} \sum_{k=1}^{K}\left[-y_{k}^{(i)} \log \left(\left(h_{\theta}\left(x^{(i)}\right)\right)_{k}\right)-\left(1-y_{k}^{(i)}\right) \log \left(1-\left(h_{\theta}\left(x^{(i)}\right)\right)_{k}\right)\right] J(θ)=m1i=1mk=1K[yk(i)log((hθ(x(i)))k)(1yk(i))log(1(hθ(x(i)))k)]

def cost(theta_1,theta_2,X,y):
    m=X.shape[0]
    _, _, _, _, h = feed_forward(theta_1,theta_2, X)
    J=(1/m)*np.sum((-np.multiply(np.log(h),y)-np.multiply(1-y,np.log(1-h))))
    return J
cost(t1,t2,X,y)

0.2876291651613189

regularized cost fuction

J ( θ ) = 1 m ∑ i = 1 m ∑ k = 1 K [ − y k ( i ) log ⁡ ( ( h θ ( x ( i ) ) ) k ) − ( 1 − y k ( i ) ) log ⁡ ( 1 − ( h θ ( x ( i ) ) ) k ) ] + λ 2 m [ ∑ j = 1 25 ∑ k = 1 400 ( Θ j , k ( 1 ) ) 2 + ∑ j = 1 10 ∑ k = 1 25 ( Θ j , k ( 2 ) ) 2 ] \begin{aligned} J(\theta)=& \frac{1}{m} \sum_{i=1}^{m} \sum_{k=1}^{K}\left[-y_{k}^{(i)} \log \left(\left(h_{\theta}\left(x^{(i)}\right)\right)_{k}\right)-\left(1-y_{k}^{(i)}\right) \log \left(1-\left(h_{\theta}\left(x^{(i)}\right)\right)_{k}\right)\right]+\\ & \frac{\lambda}{2 m}\left[\sum_{j=1}^{25} \sum_{k=1}^{400}\left(\Theta_{j, k}^{(1)}\right)^{2}+\sum_{j=1}^{10} \sum_{k=1}^{25}\left(\Theta_{j, k}^{(2)}\right)^{2}\right] \end{aligned} J(θ)=m1i=1mk=1K[yk(i)log((hθ(x(i)))k)(1yk(i))log(1(hθ(x(i)))k)]+2mλ[j=125k=1400(Θj,k(1))2+j=110k=125(Θj,k(2))2]

def deserialize(seq):
#     """into ndarray of (25, 401), (10, 26)"""
    return seq[:25 * 401].reshape(25, 401), seq[25 * 401:].reshape(10, 26)

theta.shape
(10285,)
def regularized_cost(theta,X,y,l=1):
    t1,t2=deserialize(theta)
    m=X.shape[0]
    re=(l/(2*m))*(np.sum(np.power(t1[:,1:],2))+np.sum(np.power(t2[:,1:],2)))
    return cost(t1,t2,X,y)+re
regularized_cost(theta,X,y)
0.38376985909092365

反向传播算法

def sigmoid_gradieng(z):
    return sigmoid(z)*(1-sigmoid(z))
def gradient(t1,t2,X,y):#牵一发而动全身
    m=X.shape[0]
    delta1 = np.zeros(t1.shape)  # (25, 401)
    delta2 = np.zeros(t2.shape)  # (10, 26)
    a1, z2, a2, z3, h = feed_forward(t1,t2, X)
    for i in range(m):
        a1i=a1[i,:]
        z2i=z2[i,:]
        a2i=a2[i,:]
        hi=h[i,:]
        yi=y[i,:]
        d3i=hi-yi
        z2i=np.insert(z2i,0,np.ones(1))
        d2i=np.multiply(t2.T@d3i,sigmoid_gradieng(z2i))
        delta2+=np.matrix(d3i).T@np.matrix(a2i)
        delta1 += np.matrix(d2i[1:]).T @ np.matrix(a1i)  # (1, 25).T @ (1, 401) -> (25, 401)

    delta1 = delta1 / m
    delta2 = delta2 / m
    return delta1,delta2
d1,d2=gradient(t1,t2,X,y)
d1.shape,d2.shape
((25, 401), (10, 26))

梯度校验

theta=serialize(t1,t2)

在这里插入图片描述

def gradient_checking(theta, X, y, epsilon, regularized=False):
    def a_numeric_grad(plus, minus, regularized=False):
        """calculate a partial gradient with respect to 1 theta"""
        if regularized:
            return (regularized_cost(plus, X, y) - regularized_cost(minus, X, y)) / (epsilon * 2)
        else:
            return (cost(plus, X, y) - cost(minus, X, y)) / (epsilon * 2)

    theta_matrix = expand_array(theta)  # expand to (10285, 10285)
    epsilon_matrix = np.identity(len(theta)) * epsilon

    plus_matrix = theta_matrix + epsilon_matrix
    minus_matrix = theta_matrix - epsilon_matrix

    # calculate numerical gradient with respect to all theta
    numeric_grad = np.array([a_numeric_grad(plus_matrix[i], minus_matrix[i], regularized)
                                    for i in range(len(theta))])#对角矩阵,不断地算出梯度

    # analytical grad will depend on if you want it to be regularized or not
    analytic_grad = regularized_gradient(theta, X, y) if regularized else gradient(theta, X, y)

    # If you have a correct implementation, and assuming you used EPSILON = 0.0001
    # the diff below should be less than 1e-9
    # this is how original matlab code do gradient checking
    diff = np.linalg.norm(numeric_grad - analytic_grad) / np.linalg.norm(numeric_grad + analytic_grad)#反向传播算法计算得梯度和梯度校验得到的梯度


def expand_array(arr):
    """replicate array into matrix
    [1, 2, 3]

    [[1, 2, 3],
     [1, 2, 3],
     [1, 2, 3]]
    """
    # turn matrix back to ndarray
    return np.array(np.matrix(np.ones(arr.shape[0])).T @ np.matrix(arr))

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-0dhM9sMO-1571396638048)(attachment:%E5%9B%BE%E7%89%87.png)]

def random_init(size):
    return np.random.uniform(-0.12, 0.12, size)
def regularized_gradient(theta,X,y,l=1):
    t1,t2=deserialize(theta)
    m=X.shape[0]
    delta1,delat2=gradient(t1,t2,X,y)
    copy1=t1
    copy2=t1
    copy1[:,0]=0
    reg_teem_1=(l/m)*t1
    delta1=delat1+reg_teem_1
    copy2[:, 0] = 0
    reg_term2 = (l / m) * t2
    delta2 = delta2 + reg_term2

    return serialize(delta1, delta2)
gradient_checking(theta, X, y, epsilon=0.0001, regularized=True)
print("buaa")

trainning the model

def random_init(size):
    return np.random.uniform(-0.12, 0.12, size)
def nn_training(X, y):
    """regularized version
    the architecture is hard coded here... won't generalize
    """
    init_theta = random_init(10285)  # 25*401 + 10*26

    res = opt.minimize(fun=regularized_cost,
                       x0=init_theta,
                       args=(theta, y, 1),
                       method='TNC',
                       jac=regularized_gradient,
                       options={'maxiter': 400})
    return res
res = nn_training(X, y)
res

Accuracy rate

_, y_answer = load_data('ex4data1.mat')
y_answer[:20]
final_theta = res.x
def show_accuracy(theta, X, y):
    _, _, _, _, h = feed_forward(theta, X)

    y_pred = np.argmax(h, axis=1) + 1

    print(classification_report(y, y_pred))

output the hiddent layer

def plot_hidden_layer(theta):
    final_theta1,_=defeserialize(theta)
    hidden_layer=final_theta[:,1:]
    fig, ax_array = plt.subplots(nrows=5, ncols=5, sharey=True, sharex=True, figsize=(5, 5))
    for r in range(5):
        for c in range(5):
            ax_array[r, c].matshow(hidden_layer[5 * r + c].reshape((20, 20)),
                                   cmap=matplotlib.cm.binary)
plot_hidden_layer(final_theta)
plt.show()

结果没有跑出来,运行梯度检查的时候机器风扇转速明显加快了,然后就没有运行结果,反向传播算法有点复杂

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值