Machine Learning——Homework4

作业每次都讲得很详细~

一.onehot 代码使用

from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False)
y_onehot = encoder.fit_transform(y)

二.前向传播函数

def forward_propagate(X, theta1, theta2):
    m = X.shape[0]
    a1 = np.insert(X, 0, values=np.ones(m), axis=1)
    z2 = a1 * theta1.T
    a2 = np.insert(sigmoid(z2), 0, values=np.ones(m), axis=1)
    z3 = a2 * theta2.T
    h = sigmoid(z3)
    return a1, z2, a2, z3, h

三. 代价函数(含正则化后)

def cost(params, input_size , hidden_size, num_labels, X,y ,learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y= np.matrix(y)
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size +1)],(hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):],(num_labels,(hidden_size+1))))
    a1,z2,a2,z3,h =forword_propagate(X, theta1, theta2)
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:],np.log(h[i,:]))
        second_term = np.multiply((1-y[i,:]),np.log(1-h[i,:]))
        J +=np.sum(first_term - second_term)
    J=J/m
    # 加入正则化部分
    J +=(float(learning_rate)/(2*m))*(np.sum(np.power(theta1[:,1:],2))+np.sum(np.power(theta2[:,1:],2)))

    return J

四.sigmoid的梯度函数

def backprop(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    theta1 = np.matrix(np.reshape(params[:hidden_size*(input_size + 1)],(hidden_size,(input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size*(input_size+1):],(num_labels,(hidden_size + 1))))
    a1,z2,a2,z3,h =forword_propagate(X,theta1,theta2)
    J =0
    delta1 = np.zeros(theta1.shape)
    delta2 = np.zeros(theta1.shape)
    for i in range(m):
        first_term = np.multiply(-y[i,:],np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]),np.log(1 - h[i,:]))
        J += np.sum(first_term -second_term)
    J = J/m
    J +=(float(learning_rate)/(2*m)) * (np.sum(np.power(theta1[:,1:],2))+np.sum(np.power(theta2[:,1:],2)))
    for t in range (m):
        a1t = a1[t,:]
        z2t = z2[t,:]
        a2t = a2[t,:]
        ht = h[t,:]
        yt = y[t,:]
        d3t = ht - yt
        z2t = np.insert(z2t,0,values=np.ones(1))
        d2t = np.multiply((theta2.T*d3t.T).T, sigmoid_gradient(z2t))
        delta1 += (d2t[:,1:]).T * a1t
        delta2 += d3t.T * a2t
    delta1 = delta1/m
    delta2 = delta2/m
    grad = np.concatenate((np.ravel(delta1),np.ravel(delta2)))
    return J, grad

五.找到训练参数

X = np.matrix(X)
theta1 = np.matrix(np.reshape(fmin.x[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
theta2 = np.matrix(np.reshape(fmin.x[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))

a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
y_pred = np.array(np.argmax(h, axis=1) + 1)
y_pred

六.预测准确值

correct = [1 if a == b else 0 for (a, b) in zip(y_pred, y)]
accuracy = (sum(map(int, correct)) / float(len(correct)))
print ('accuracy = {0}%'.format(accuracy * 100))
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值