吴恩达机器学习编程题四(神经网络-BP)(python)

import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from scipy.optimize import minimize

data = sio.loadmat('ex4data1.mat')
raw_X = data['X']
raw_y = data['y']

X=np.insert(raw_X,0,values=1,axis=1)
X.shape

def one_hot_encoder(raw_y):
    
    result = []
    
    for i in raw_y: # 1-10
        y_temp = np.zeros(10)
        y_temp[i-1] = 1 
        
        result.append(y_temp)
        
    return np.array(result)


y = one_hot_encoder(raw_y)

y.shape

theta = sio.loadmat('ex4weights.mat')

theta1, theta2 = theta['Theta1'],theta['Theta2']
theta1.shape,theta2.shape


#定义序列化权重参数函数
def serialize(a,b):
    
    return np.append(a.flatten(),b.flatten())


theta_serialize = serialize(theta1, theta2)      #序列化theta1,2参数

theta_serialize.shape

#解序列化权重参数
def deserialize(theta_serialize):
    theta1 = theta_serialize[:25*401].reshape(25,401)
    theta2 = theta_serialize[25*401:].reshape(10,26)
    return theta1,theta2


#theta1,theta2 =deserialize(theta_serialize)

#theta1.shape,theta2.shape


def sigmoid(z):
    return 1/(1 + np.exp(-z))


def feed_forward(theta_serialize,X):
    theta1,theta2 = deserialize(theta_serialize)
    a1 = X
    z2 = a1 @ theta1.T
    a2 = sigmoid(z2)
    a2 = np.insert(a2,0,values = 1,axis = 1)
    z3 = a2 @ theta2.T
    h = sigmoid(z3)
    return a1,z2,a2,z3,h


def cost(theta_serialize,X,y):
    a1,z2,a2,z3,h = feed_forward(theta_serialize, X)
    J = -np.sum(y*np.log(h)+(1-y)*np.log(1-h))/len(X)
    return J


cost_initial = cost(theta_serialize, X, y)  
    

def reg_cost(theta_serialize,X,y,lamda):
    sum1 = np.sum(np.power(theta1[:,1:],2))
    sum2 = np.sum(np.power(theta2[:,1:],2))
    reg = (sum1 + sum2) * lamda / (2*len(X))
    a1,z2,a2,z3,h = feed_forward(theta_serialize, X)
    J = -np.sum(y*np.log(h)+(1-y)*np.log(1-h))/len(X) + reg
    return J


lamda = 1
reg_cost_initial = reg_cost(theta_serialize,X,y,lamda)

def sigmoid_gradient(z):
    return sigmoid(z)*(1-sigmoid(z))


def gradient(theta_serialize,X,y):
    theta1,theta2 = deserialize(theta_serialize)
    a1,z2,a2,z3,h = feed_forward(theta_serialize,X)
    d3 = h - y
    d2 = d3 @ theta2[:,1:] * sigmoid_gradient(z2)
    D2 = (d3.T @ a2) / len(X)
    D1 = (d2.T @ a1) / len(X)
    D_serialize = serialize(D1, D2)
    return D_serialize


def reg_gradient(theta_serialize,X,y,lamda):
    D = gradient(theta_serialize,X,y)
    D1,D2 = deserialize(D)
    
    theta1,theta2 = deserialize(theta_serialize)
    D1[:,1:] = D1[:,1:]  + theta1[:,1:] * lamda / len(X)
    D2[:,1:] = D2[:,1:]  + theta2[:,1:] * lamda / len(X)
    
    D_serialize = serialize(D1, D2)
    
    return D_serialize


from scipy.optimize import minimize

def nn_training(X,y):
    
    init_theta = np.random.uniform(-0.5,0.5,10285)
    res = minimize(fun =reg_cost,
                   x0 = init_theta,
                  args = (X,y,lamda),
                  method='TNC',
                  jac = reg_gradient,
                options = {'maxiter':300})
    
    return res


lamda=10
res = nn_training(X,y)

raw_y = data['y'].reshape(5000,)

_,_,_,_,h = feed_forward(res.x,X)
y_pred = np.argmax(h,axis=1) + 1
acc = np.mean(y_pred == raw_y)

acc

def plot_hidden_layer(theta):
    theta1,_ = deserialize(theta)
    hidden_layer = theta1[:,1:] # 25,400
    
    
    fig,ax = plt.subplots(ncols=5,nrows=5,figsize=(8,8),sharex=True,sharey=True)
    
    for r in range(5):
        for c in range(5):
            
            ax[r,c].imshow(hidden_layer[5 * r + c].reshape(20,20).T,cmap='gray_r')
            
    
    plt.xticks([])
    plt.yticks([])
    
    plt.show
    
    
plot_hidden_layer(res.x)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值