吴承恩深度学习第三周编程作业.笔记

作业包括代码网上有很多,我主要谈谈我的理解,完成一个神经网络需要三步

1.前向传播

2.后向传播

3.梯度下降

这是最主要的三个函数,其次就是随机初始化和花费,随机初始化的作用很重要,花费函数其次

在这其中每一个参数的维度要搞清楚,以上就是我做完这次作业觉得需要注意的点,附上我的代码,也是借鉴了别人的

import numpy as np
import matplotlib.pyplot as plt
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
X,Y = load_planar_dataset()
def size_xy(X,Y):
    n_x = X.shape[0]
    n_y = Y.shape[0]
    n_h = 4
    return n_x,n_y,n_h
def chushihua(n_x,n_y,n_h):
    W1 = np.random.randn(n_h,n_x)*0.01
    W2 = np.random.randn(n_y,n_h)*0.01
    b1 = np.zeros(shape=(n_h,1))
    b2 = np.zeros(shape=(n_y,1))
    parameters = {"W1":W1,
                  "b1" : b1,
	              "W2" : W2,
	              "b2" : b2 
                  }
    return parameters
def qianxiang(X,parameters):
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    Z1 = np.dot(W1,X)+b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2,A1)+b2
    A2 = sigmoid(Z2)
    cache = {"Z1": Z1,
             "A1": A1,
             "Z2": Z2,
             "A2": A2}
    return cache
def cost_compute(A2,Y,parameters):
    m = Y.shape[1]
    logprobs = np.multiply(Y,np.log(A2))+np.multiply((1-Y),np.log(1-A2))
    cost = -np.sum(logprobs)/m
    return cost
def houchuan(parameters,cache,X,Y):
    m = X.shape[1]
    W1 = parameters["W1"]
    W2 = parameters["W2"]
    A1 = cache["A1"]
    A2 = cache["A2"]
    dZ2 = A2-Y
    dW2 = np.dot(dZ2,A1.T)/m
    db2 = np.sum(dZ2,axis=1,keepdims=True)/m
    dZ1 = np.multiply(np.dot(W2.T,dZ2),1-np.power(A1,2))
    dW1 = np.dot(dZ1,X.T)/m
    db1 = np.sum(dZ1,axis=1,keepdims=True)/m
    grads = {"dW1": dW1,
             "db1": db1,
             "dW2": dW2,
             "db2": db2 }
    return grads
def tiduxiajiang(grads,parameters,learn=1.2):
    W1,W2 = parameters["W1"],parameters["W2"]
    b1,b2 = parameters["b1"],parameters["b2"]  
    dW1,dW2 = grads["dW1"],grads["dW2"]
    db1,db2 = grads["db1"],grads["db2"]
    W1 = W1-learn*dW1
    W2 = W2-learn*dW2
    b1 = b1-learn*db1
    b2 = b2-learn*db2
    parameters = {"W1": W1,
                  "b1": b1,
                  "W2": W2,
                  "b2": b2}
    return parameters
def nn_model(X,Y,n_h,num_diedai,print_cost=True):
    np.random.seed(1)
    n_x,n_y,n_h = size_xy(X,Y)
    parameters = chushihua(n_x,n_y,n_h)
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    for i in range(num_diedai):
        cache = qianxiang(X,parameters)
        A2 = cache["A2"]
        grads = houchuan(parameters,cache,X,Y)
        cost = cost_compute(A2,Y,parameters)
        parameters = tiduxiajiang(grads,parameters,learn=0.5)
        if print_cost:
            if i%1000 == 0:
                print("第"+str(i)+"次,花费"+str(cost))
    return parameters
def predict(parameters,X):
    cache = qianxiang(X,parameters)
    A2 = cache["A2"]
    predictions = np.round(A2)

    return predictions

parameters = nn_model(X, Y,4, 10000, True)

#绘制边界
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
plt.show()
predictions = predict(parameters, X)
print ('准确率: %d' % float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100) + '%')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值