吴恩达 深度学习 第一课 第四周 assignmen4_2

#导入数据和扩展包

import time

import numpy as np

import h5py

import matplotlib.pyplot as plt

import scipy

from PIL import Image

from scipy import ndimage

from dnn_app_utils_v2 import *

​

%matplotlib inline

plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots

plt.rcParams['image.interpolation'] = 'nearest'

plt.rcParams['image.cmap'] = 'gray'

​

%load_ext autoreload

%autoreload 2

​

np.random.seed(1)

读取数据

train_x_orig, train_y, test_x_orig, test_y, classes = load_data()

# Example of a picture

index = 7

plt.imshow(train_x_orig[index])

print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") +  " picture.")

y = 1. It's a cat picture.

m_train = train_x_orig.shape[0]

num_px = train_x_orig.shape[1]

m_test = test_x_orig.shape[0]

​

print ("Number of training examples: " + str(m_train))

print ("Number of testing examples: " + str(m_test))

print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")

print ("train_x_orig shape: " + str(train_x_orig.shape))

print ("train_y shape: " + str(train_y.shape))

print ("test_x_orig shape: " + str(test_x_orig.shape))

print ("test_y shape: " + str(test_y.shape))

Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)

# Reshape the training and test examples 

train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T   # The "-1" makes reshape flatten the remaining dimensions

test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T

​

# Standardize data to have feature values between 0 and 1.

train_x = train_x_flatten/255.

test_x = test_x_flatten/255.

​

print ("train_x's shape: " + str(train_x.shape))

print ("test_x's shape: " + str(test_x.shape))

train_x's shape: (12288, 209)
test_x's shape: (12288, 50)

n_x = 12288

n_h = 7

n_y = 1

layers_dims = (n_x,n_h,n_y)

导入数据和扩展包,读取数据并进行测试

对读取到的数据进行shape确认和一维化,并确认输入到神经网络的规模

def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost = False):
    np.random.seed(1)
    grads={}
    costs= []
    m=X.shape[1]
    (n_x, n_h, n_y) = layers_dims
    
    parameters = initialize_parameters(n_x,n_h,n_y)
    
    W1 = parameters["W1"]
    b1  = parameters["b1"]
    W2 = parameters["W2"]
    b2  = parameters["b2"]
    
    for i in range(0,num_iterations):
        A1, cache1 = linear_activation_forward(X, W1, b1, 'relu')
        A2, cache2 = linear_activation_forward(A1, W2, b2,'sigmoid')
        
        cost = compute_cost(A2, Y)
        
        dA2 = -(np.divide(Y,A2)-np.divide(1-Y,1-A2))
        dA1, dW2, db2 =linear_activation_backward(dA2, cache2,'sigmoid')
        dA0, dW1, db1 =linear_activation_backward(dA1, cache1, 'relu')
        
        grads['dW1'] = dW1
        grads['db1'] = db1
        grads['dW2'] = dW2
        grads['db2'] = db2
        
        parameters = update_parameters(parameters, grads, learning_rate)
        
        W1 = parameters['W1']
        b1 = parameters['b1']
        W2 = parameters['W2']
        b2 = parameters['b2']
        
        if print_cost and i%100 == 0:
            print("cost after iteration{}: {}" .format(i, np.squeeze(cost)))
        if print_cost and i%100 == 0:
            costs.append(cost)
    
    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.ylabel('cost')
    plt.xlabel('iterations (per tens)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()
    
    return parameters

首先在任务1制作的函数的基础上建立两层神经网络的模型并对输入的数据进行训练与测试。

首先参照两层神经网路的规格去初始化参数,设定迭代次数,并设定每100次输出训练的cost值

最终样本训练结果返回parameters

def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost = False):
    np.random.seed(1)
    costs =[]
    
    parameters = initialize_parameters_deep(layers_dims)
    
    for i in range(0, num_iterations):
        AL, caches = L_model_forward(X, parameters)
        
        cost = compute_cost(AL, Y)
        
        grads = L_model_backward(AL, Y, caches)
        
        parameters = update_parameters(parameters, grads, learning_rate)
        
        if print_cost and i%100 == 0:
            print("cost after iteration %i %f " %(i, cost))
        if print_cost and i % 100 == 0:
            costs.append(cost)
    
    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.xlabel('iterations(per tend)')
    plt.title("learning rate = " +str(learning_rate))
    plt.show()
    
    return parameters

对于深层神经网络主要步骤即为

正向传播:输入X和初始化的参数值parameters,最终预测结果AL和过程中的缓存cache

损失函数计算:输入为上一步输出的AL,和最初输入的标注Y,输出损失函数值cost

反向梯度计算:输入AL,标注Y和正向传播中缓存cache,输出结果为grads(各个隐藏层的梯度值)

梯度参数更新:输入梯度grads,参数parameters和学习率learning_rate计算梯度下降更新parameters并保存

parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)

cost after iteration 0 0.771749 
cost after iteration 100 0.672053 
cost after iteration 200 0.648263 
cost after iteration 300 0.611507 
cost after iteration 400 0.567047 
cost after iteration 500 0.540138 
cost after iteration 600 0.527930 
cost after iteration 700 0.465477 
cost after iteration 800 0.369126 
cost after iteration 900 0.391747 
cost after iteration 1000 0.315187 
cost after iteration 1100 0.272700 
cost after iteration 1200 0.237419 
cost after iteration 1300 0.199601 
cost after iteration 1400 0.189263 
cost after iteration 1500 0.161189 
cost after iteration 1600 0.148214 
cost after iteration 1700 0.137775 
cost after iteration 1800 0.129740 
cost after iteration 1900 0.121225 
cost after iteration 2000 0.113821 
cost after iteration 2100 0.107839 
cost after iteration 2200 0.102855 
cost after iteration 2300 0.100897 
cost after iteration 2400 0.092878 

 此处随着梯度下降, 损失函数cost大部分下降,但也存在波动

波动的原因还需要学习

my_image = "test4.jpg" 
my_image_y = [0]

fname = "images/"+ my_image
image= np.array(ndimage.imread(fname, flatten = False))
#scipy.ndimage.imread(fname,flatten=False): 将图片转换为数组,如果mode默认为‘RGB’,则返回的数组的形状为(height , width, 3)


my_image = scipy.misc.imresize(image, size=(num_px, num_px)).reshape((num_px*num_px*3,1))
#注意矩阵格式大小

#scipy.misc.imresize设定图片的大小尺寸
my_predicted_image = predict(my_image, my_image_y, parameters)

plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\"picture")

测试时尝试了很多图片,但是都被识别为y=1,但是准确率应该基本上是有的,查询了过程参数和本身给的测试模型,都没有问题,那么问题出在哪

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值