使用numpy构建CNN(前向和反向传播)

  • 构建CNN中padding(填充)(只会对长和宽进行填充)
# GRADED FUNCTION: zero_pad

def zero_pad(X, pad):
    """
    Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, 
    as illustrated in Figure 1.
    
    Argument:
    X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
    pad -- integer, amount of padding around each image on vertical and horizontal dimensions
    
    Returns:
    X_pad -- padded image of shape (m, n_H + 2 * pad, n_W + 2 * pad, n_C)
    """
    
    #(≈ 1 line)
    # X_pad = None
    # YOUR CODE STARTS HERE
    # print(X.shape)
    X_pad=np.pad(X,((0,0),(pad,pad),(pad,pad),(0,0)),mode='constant',constant_values=(0,0))
    
    # YOUR CODE ENDS HERE
    
    return X_pad

  • 卷积运算(事实上,并不是数学中的卷积运算,而是逐个相乘,最后相加,element-wise 运算)
# GRADED FUNCTION: conv_single_step

def conv_single_step(a_slice_prev, W, b):
    """
    Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation 
    of the previous layer.
    
    Arguments:
    a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
    W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
    b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
    
    Returns:
    Z -- a scalar value, the result of convolving the sliding window (W, b) on a slice x of the input data
    """

    #(≈ 3 lines of code)
    # Element-wise product between a_slice_prev and W. Do not add the bias yet.
    # s = None
    # Sum over all entries of the volume s.
    # Z = None
    # Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
    # Z = None
    # YOUR CODE STARTS HERE
    s = a_slice_prev*W
    Z = np.sum(s)
    Z = Z + np.squeeze(b)
    # YOUR CODE ENDS HERE

    return Z

  • CN层的实现(并不包含激活函数)
# GRADED FUNCTION: conv_forward

def conv_forward(A_prev, W, b, hparameters):
    """
    Implements the forward propagation for a convolution function
    
    Arguments:
    A_prev -- output activations of the previous layer, 
        numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
    W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
    b -- Biases, numpy array of shape (1, 1, 1, n_C)
    hparameters -- python dictionary containing "stride" and "pad"
        
    Returns:
    Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
    cache -- cache of values needed for the conv_backward() function
    """
    
    # Retrieve dimensions from A_prev's shape (≈1 line)  
    # (m, n_H_prev, n_W_prev, n_C_prev) = None
    (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
    # Retrieve dimensions from W's shape (≈1 line)
    # (f, f, n_C_prev, n_C) = None
    (f, f, n_C_prev, n_C) = W.shape
    # Retrieve information from "hparameters" (≈2 lines)
    # stride = None
    # pad = None
    stride = hparameters['stride']
    pad = hparameters['pad']
    # Compute the dimensions of the CONV output volume using the formula given above. 
    # Hint: use int() to apply the 'floor' operation. (≈2 lines)
    # n_H = None
    # n_W = None
    n_H = int((n_H_prev-f+2*pad)/stride)+1
    n_W = int((n_W_prev-f+2*pad)/stride)+1
    # Initialize the output volume Z with zeros. (≈1 line)
    # Z = None
    Z = np.zeros((m,n_H,n_W,n_C))
    # Create A_prev_pad by padding A_prev
    # A_prev_pad = None
    A_prev_pad = zero_pad(A_prev,pad)
    # for i in range(None):               # loop over the batch of training examples
        # a_prev_pad = None               # Select ith training example's padded activation
        # for h in range(None):           # loop over vertical axis of the output volume
            # Find the vertical start and end of the current "slice" (≈2 lines)
            # vert_start = None
            # vert_end = None
            
            # for w in range(None):       # loop over horizontal axis of the output volume
                # Find the horizontal start and end of the current "slice" (≈2 lines)
                # horiz_start = None
                # horiz_end = None
                
                # for c in range(None):   # loop over channels (= #filters) of the output volume
                                        
                    # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)
                    # a_slice_prev = None
                    
                    # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈3 line)
                    # weights = None
                    # biases = None
                    # Z[i, h, w, c] = None
    # YOUR CODE STARTS HERE
    for i in range(m):
        a_prev_pad = A_prev_pad[i]
        for h in range(n_H):
            vert_start = stride * h
            vert_end = stride * h + f
            for w in range(n_W):
                horiz_start = stride*w
                horiz_end = stride*w+f
                for c in range(n_C):
                    a_slice_prev = a_prev_pad[vert_start:vert_end,horiz_start:horiz_end,:]
                    weight = W[:,:,:,c]
                    #print('a'a_slice_prev)
                    #print(W.shape)
                    #print(weight.shape)
                    biases = b[:,:,:,c]
                    Z[i,h,w,c] = conv_single_step(a_slice_prev,weight,biases)
    
    # YOUR CODE ENDS HERE
    
    # print(Z[0,:,:,0])
    # print("A_prev",A_prev[0])
    # print("A",A_prev_pad[0])
    # print(W_prev[:,:,:,0])
    # print(b[:,:,:,0])
    # Save information in "cache" for the backprop
    cache = (A_prev, W, b, hparameters)
    
    return Z, cache

  • Pool层的实现
# GRADED FUNCTION: pool_forward

def pool_forward(A_prev, hparameters, mode = "max"):
    """
    Implements the forward pass of the pooling layer
    
    Arguments:
    A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
    hparameters -- python dictionary containing "f" and "stride"
    mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
    
    Returns:
    A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
    cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters 
    """
    
    # Retrieve dimensions from the input shape
    (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
    
    # Retrieve hyperparameters from "hparameters"
    f = hparameters["f"]
    stride = hparameters["stride"]
    
    # Define the dimensions of the output
    n_H = int(1 + (n_H_prev - f) / stride)
    n_W = int(1 + (n_W_prev - f) / stride)
    n_C = n_C_prev
    
    # Initialize output matrix A
    A = np.zeros((m, n_H, n_W, n_C))              
    
    # for i in range(None):                         # loop over the training examples
        # for h in range(None):                     # loop on the vertical axis of the output volume
            # Find the vertical start and end of the current "slice" (≈2 lines)
            # vert_start = None
            # vert_end = None
            
            # for w in range(None):                 # loop on the horizontal axis of the output volume
                # Find the vertical start and end of the current "slice" (≈2 lines)
                # horiz_start = None
                # horiz_end = None
                
                # for c in range (None):            # loop over the channels of the output volume
                    
                    # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)
                    # a_prev_slice = None
                    
                    # Compute the pooling operation on the slice. 
                    # Use an if statement to differentiate the modes. 
                    # Use np.max and np.mean.
                    # if mode == "max":
                        # A[i, h, w, c] = None
                    # elif mode == "average":
                        # A[i, h, w, c] = None
    
    # YOUR CODE STARTS HERE
    for i in range(m):
        for h in range(n_H):
            vert_start = h*stride
            vert_end = h*stride+f
            for w in range(n_W):
                horiz_start = w*stride
                horiz_end = horiz_start+f
                for c in range(n_C):
                    a_prev_slice = A_prev[i,vert_start:vert_end,horiz_start:horiz_end,c]
                    if mode=='max':
                        A[i,h,w,c] = np.max(a_prev_slice)
                    elif mode == 'average':
                        A[i,h,w,c] = np.mean(a_prev_slice)
    
    # YOUR CODE ENDS HERE
    
    # Store the input and hparameters in "cache" for pool_backward()
    cache = (A_prev, hparameters)
    
    # Making sure your output shape is correct
    #assert(A.shape == (m, n_H, n_W, n_C))
    
    return A, cache

下面进行CNN的反向传播

对于绝大多数的深度学习框架,只需要进行前向传播,便可以自动实现反向传播,但是我们依然可以手动实现反向传播。

  • CN层的反向传播,这些求导知识需要用到矩阵的求导,可以参考《The Matrix coodbook》这本书
    d A + = ∑ h = 0 n H ∑ w = 0 n W W c × d Z h w (1) dA \mathrel{+}= \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1} dA+=h=0nHw=0nWWc×dZhw(1)
    d W c + = ∑ h = 0 n H ∑ w = 0 n W a s l i c e × d Z h w (2) dW_c \mathrel{+}= \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2} dWc+=h=0nHw=0nWaslice×dZhw(2)
    d b = ∑ h ∑ w d Z h w (3) db = \sum_h \sum_w dZ_{hw} \tag{3} db=hwdZhw(3)
def conv_backward(dZ, cache):
    """
    Implement the backward propagation for a convolution function
    
    Arguments:
    dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
    cache -- cache of values needed for the conv_backward(), output of conv_forward()
    
    Returns:
    dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
               numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
    dW -- gradient of the cost with respect to the weights of the conv layer (W)
          numpy array of shape (f, f, n_C_prev, n_C)
    db -- gradient of the cost with respect to the biases of the conv layer (b)
          numpy array of shape (1, 1, 1, n_C)
    """    
    
        
    # Retrieve information from "cache"
    # (A_prev, W, b, hparameters) = None
    # Retrieve dimensions from A_prev's shape
    # (m, n_H_prev, n_W_prev, n_C_prev) = None
    # Retrieve dimensions from W's shape
    # (f, f, n_C_prev, n_C) = None
    (A_prev, W, b, hparameters) = cache
    (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
    (f, f, n_C_prev, n_C) = W.shape
    # Retrieve information from "hparameters"
    # stride = None
    # pad = None
    stride = hparameters['stride']
    pad = hparameters['pad']
    # Retrieve dimensions from dZ's shape
    # (m, n_H, n_W, n_C) = None
    (m, n_H, n_W, n_C) = dZ.shape
    # Initialize dA_prev, dW, db with the correct shapes
    # dA_prev = None                          
    # dW = None
    # db = None
    dA_prev = np.zeros(shape=A_prev.shape)
    dW = np.zeros(shape=W.shape)
    db = np.zeros((1,1,1,n_C))
    # Pad A_prev and dA_prev
    # A_prev_pad = zero_pad(A_prev, pad)
    # dA_prev_pad = zero_pad(dA_prev, pad)
    A_prev_pad = zero_pad(A_prev,pad)
    dA_prev_pad = zero_pad(dA_prev,pad)
    #for i in range(m):                       # loop over the training examples
        
        # select ith training example from A_prev_pad and dA_prev_pad
        # a_prev_pad = None
        # da_prev_pad = None
        
        #for h in range(n_H):                   # loop over vertical axis of the output volume
        #    for w in range(n_W):               # loop over horizontal axis of the output volume
        #        for c in range(n_C):           # loop over the channels of the output volume
                    
                    # Find the corners of the current "slice"
                    # vert_start = None
                    # vert_end = None
                    # horiz_start = None
                    # horiz_end = None

                    # Use the corners to define the slice from a_prev_pad
                    # a_slice = None

                    # Update gradients for the window and the filter's parameters using the code formulas given above
                    # da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += None
                    # dW[:,:,:,c] += None
                    # db[:,:,:,c] += None
                    
        # Set the ith training example's dA_prev to the unpadded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
        # dA_prev[i, :, :, :] = None
    # YOUR CODE STARTS HERE
    for i in range(m):
        a_prev_pad = A_prev_pad[i,:,:,:]
        da_prev_pad = dA_prev_pad[i,:,:,:]
        for h in range(n_H):
            for w in range(n_W):
                for c in range(n_C):
                    vert_start = stride * h
                    vert_end = vert_start + f
                    horiz_start = stride * w
                    horiz_end = horiz_start + f
                    a_slice = a_prev_pad[vert_start:vert_end,horiz_start:horiz_end,:]
                    # print(a_slice.shape)
                    da_prev_pad[vert_start:vert_end,horiz_start:horiz_end,:] += W[:,:,:,c]*dZ[i,h,w,c]
                    dW[:,:,:,c]+=a_slice*dZ[i,h,w,c]
                    db[:,:,:,c]+=dZ[i,h,w,c]
        dA_prev[i,:,:,:] = da_prev_pad[pad:-pad,pad:-pad,:]
    
    # YOUR CODE ENDS HERE
    
    # Making sure your output shape is correct
    assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
    
    return dA_prev, dW, db
  • 尽管pool层没有任何参数,但是我们依然需要考虑他的影响,并且对于Max和average还要进行不同的处理

MaxPooling将该梯度应用于值最高的坐标中

def create_mask_from_window(x):
    """
    Creates a mask from an input matrix x, to identify the max entry of x.
    
    Arguments:
    x -- Array of shape (f, f)
    
    Returns:
    mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
    """    
    # (≈1 line)
    # mask = None
    # YOUR CODE STARTS HERE
    biggest = np.max(x)
    mask = (x==biggest)
    # YOUR CODE ENDS HERE
    return mask

average是将该梯度平均应用于(f*f)矩阵中

def distribute_value(dz, shape):
    """
    Distributes the input value in the matrix of dimension shape
    
    Arguments:
    dz -- input scalar
    shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
    
    Returns:
    a -- Array of size (n_H, n_W) for which we distributed the value of dz
    """    
    # Retrieve dimensions from shape (≈1 line)
    # (n_H, n_W) = None
    (n_H, n_W) = shape
    # Compute the value to distribute on the matrix (≈1 line)
    # average = None
    average = dz/(n_H*n_W)
    # Create a matrix where every entry is the "average" value (≈1 line)
    # a = None
    # YOUR CODE STARTS HERE
    a = np.ones((n_H,n_W))*average
    
    # YOUR CODE ENDS HERE
    return a

def pool_backward(dA, cache, mode = "max"):
    """
    Implements the backward pass of the pooling layer
    
    Arguments:
    dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
    cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters 
    mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
    
    Returns:
    dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
    """
    # Retrieve information from cache (≈1 line)
    # (A_prev, hparameters) = None
    
    # Retrieve hyperparameters from "hparameters" (≈2 lines)
    # stride = None
    # f = None
    
    # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)
    # m, n_H_prev, n_W_prev, n_C_prev = None
    # m, n_H, n_W, n_C = None
    
    # Initialize dA_prev with zeros (≈1 line)
    # dA_prev = None
    
    # for i in range(None): # loop over the training examples
        
        # select training example from A_prev (≈1 line)
        # a_prev = None
        
        # for h in range(n_H):                   # loop on the vertical axis
            # for w in range(n_W):               # loop on the horizontal axis
                # for c in range(n_C):           # loop over the channels (depth)
        
                    # Find the corners of the current "slice" (≈4 lines)
                    # vert_start = None
                    # vert_end = None
                    # horiz_start = None
                    # horiz_end = None
                    
                    # Compute the backward propagation in both modes.
                    # if mode == "max":
                        
                        # Use the corners and "c" to define the current slice from a_prev (≈1 line)
                        # a_prev_slice = None
                        
                        # Create the mask from a_prev_slice (≈1 line)
                        # mask = None

                        # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)
                        # dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None
                        
                    # elif mode == "average":
                        
                        # Get the value da from dA (≈1 line)
                        # da = None
                        
                        # Define the shape of the filter as fxf (≈1 line)
                        # shape = None

                        # Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)
                        # dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None
    # YOUR CODE STARTS HERE
    (A_prev, hparameters) = cache
    stride = hparameters['stride']
    f = hparameters['f']
    m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
    m, n_H, n_W, n_C = dA.shape
    dA_prev = np.zeros(A_prev.shape)
    for i in range(m):
        a_prev = A_prev[i,:,:,:]
        for h in range(n_H):
            for w in range(n_W):
                for c in range(n_C):
                    vert_start = h*stride
                    vert_end = vert_start+f
                    horiz_start = w*stride
                    horiz_end = horiz_start+f
                    
                    if mode == 'max':
                        a_prev_slice = a_prev[vert_start:vert_end,horiz_start:horiz_end,c]
                        mask = create_mask_from_window(a_prev_slice)
                        # print(mask)
                        dA_prev[i,vert_start:vert_end,horiz_start:horiz_end,c] += mask*dA[i,h,w,c]
                    elif mode == 'average':
                        da = dA[i,h,w,c]
                        shape=(f,f)
                        dA_prev[i,vert_start:vert_end,horiz_start:horiz_end,c] +=distribute_value(da,shape)
                        
    # YOUR CODE ENDS HERE
    
    # Making sure your output shape is correct
    assert(dA_prev.shape == A_prev.shape)
    
    return dA_prev
  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

东风中的蒟蒻

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值