吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Gradients_check(2-1.3)

这里写图片描述

import numpy as np
from testCases import *
from gc_utils import sigmoid,relu,dictionary_to_vector,vector_to_dictionary,gradients_to_vector
from testCases import gradient_check_n_test_case

Gradient_check.py

import numpy as np
from testCases import *
from gc_utils import sigmoid,relu,dictionary_to_vector,vector_to_dictionary,gradients_to_vector
from testCases import gradient_check_n_test_case
def gradient_check(x,theta,epsilon= 1e-7):
    J=x*theta
    dtheta=x
    gradapprox=(x*(theta+epsilon)-x*(theta-epsilon))/(2*epsilon)
    grad=dtheta
    numerator=np.linalg.norm(grad-gradapprox)
    denomitor=np.linalg.norm(grad)+np.linalg.norm(gradapprox)
    difference=numerator/denomitor
    if difference < epsilon:
        print "the gradient is correct"
    elif difference >= epsilon:
        print "the gradient is not so ideal"
    return J,difference
    
x,theta=2,4
J,difference = gradient_check(x,theta)
print("difference = "+str(difference))


def forward_propagation_n(X,Y,parameters):
    m=X.shape[1]
    W1=parameters["W1"]
    b1=parameters["b1"]
    W2=parameters["W2"]
    b2=parameters["b2"]  
    W3=parameters["W3"]
    b3=parameters["b3"]
    
    Z1=np.dot(W1,X)+b1
    A1=relu(Z1)
    Z2=np.dot(W2,A1)+b2
    A2=relu(Z2)
    Z3=np.dot(W3,A2)+b3
    A3=sigmoid(Z3)
    cost=(-1.0/m)*(np.sum(Y*np.log(A3)+(1-Y)*np.log(1-A3)))
    cache=(Z1,A1,W1,b1,Z2,A2,W2,b2,Z3,A3,W3,b3)
    return cost,cache

def backward_propagation_n(X,Y,cache):
    (Z1,A1,W1,b1,Z2,A2,W2,b2,Z3,A3,W3,b3)=cache
    m=X.shape[1]
    grads={}
    dZ3=A3-Y
    dW3=(1.0/m)*np.dot(dZ3,A2.T)
    db3=(1.0/m)*np.sum(dZ3,axis=1,keepdims=True)
    
    dA2=np.dot(W3.T,dZ3)   
    dZ2=np.multiply(dA2,np.int64(Z2>0))
    dW2=(1.0/m)*np.dot(dZ2,A1.T)
    db2=(1.0/m)*np.sum(dZ2,axis=1,keepdims=True)
    
    dA1=np.dot(W2.T,dZ2)   
    dZ1=np.multiply(dA1,np.int64(Z1>0))
    dW1=(1.0/m)*np.dot(dZ1,X.T)
    db1=(1.0/m)*np.sum(dZ1,axis=1,keepdims=True)
    
    grads={"dZ3": dZ3, "dW3": dW3, "db3": db3,
                 "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
                 "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
    return grads
  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值