Deep Neural Network for Image Classification:Application

上一篇文章中实现了一个两层神经网络和L层神经网络需要用到的函数

本篇我们利用这些函数来实现一个深层神经网络来实现图片的分类

1.首先是导入需要的包

import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v2 import *
np.random.seed(1)  #设置随机数种子,使产生的随机数不变

2.加载数据集(采用的数据集是第二个作业的"cat vs non-cat" dataset),

训练数据集在train_cat.h5,测试数据集在test_cat.h5中

之前采用的Logistic Regression来区分是否是猫 达到的正确率只有70%,显然,正确率并不高,因此,通过建立一个神经网络来提升正确率

def load_data():
    train_dataset = h5py.File('train_cat.h5', "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:])  # your train set features
    train_set_y_orig = np.array(train_dataset["train_set_y"][:])  # your train set labels

    test_dataset = h5py.File('test_cat.h5', "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:])  # your test set features
    test_set_y_orig = np.array(test_dataset["test_set_y"][:])  # your test set labels

    classes = np.array(test_dataset["list_classes"][:])  # the list of classes

    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))

    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
调用函数

rain_x_orig,train_y,test_x_orig,test_y,classes=load_data()
# print(train_x_orig.shape)  # (209, 64, 64, 3)
# print(train_y.shape)   #(1,209)
# print(test_x_orig.shape)  #(50, 64, 64, 3)
# print(test_y.shape)  #(1, 50)
# print(classes.shape)  #(2,)
# print(classes)   #[b'non-cat' b'cat']
# index=10
# plt.imshow(train_x_orig[index])
# plt.show()
# print("y="+str(train_y[0][index])+".It's a "+classes[train_y[0][index]].decode("utf-8")+" picture")
m_train=train_x_orig.shape[0]
num_px=train_x_orig.shape[1]
m_test=test_x_orig.shape[0]
train_x_flatten=train_x_orig.reshape(train_x_orig.shape[0],-1).T  #shape:(64*64*3, 209)
test_x_flatten=test_x_orig.reshape(test_x_orig.shape[0],-1).T   #shape:(64*64*3,50)
train_x=train_x_flatten/255  #standard data to have feature value between a and 1
test_x=test_x_flatten/255
此处,如果你想把train_cat.h5,test_cat.h5中所有的图片读出并分别保存到文件夹"F:\train_cat"和“F:\test_cat”中,就把下面的代码复制下来

t=0
for i in range(209):
    # plt.imshow(train_x_orig[i])
    img=train_x_orig[i]
    dst = os.path.join(os.path.abspath('F:\\train_cat'), "%06d" % t + '.jpg')
    img = Image.fromarray(img)
    img.save(dst)
    plt.show()
    print("y="+str(train_y[0][i])+".It's a "+classes[train_y[0][i]].decode("utf-8")+" picture")
    t=t+1

k=0
for i in range(50):
    # plt.imshow(train_x_orig[i])
    img=test_x_orig[i]
    dst = os.path.join(os.path.abspath('F:\\test_cat'), "%06d" % t + '.jpg')
    img = Image.fromarray(img)
    img.save(dst)
    plt.show()
    print("y="+str(test_y[0][i])+".It's a "+classes[test_y[0][i]].decode("utf-8")+" picture")
    k=k+1
3.两层神经网络

n_x=12288
n_h=7
n_y=1
layer_dims=(n_x,n_h,n_y)
def two_layer_model(X,Y,layer_dims,learning_rate=0.0075,num_iterations=3000,print_cost=False):
    """Implements a two layer neural network:linear-relu-linear-sigmoid
    X:input data:(n_x,number of examples)
    Y:true labels:(1,number of examples)
    layer_dims:dimension of the layers(n_x,n_h,n_y)
    num_iterations:number of iterations of the optimization loop
    returns:
    :parameters--a dictionary containing w1w2,b1,b2
    """
    np.random.seed(1)
    grads={}
    costs=[]
    m=X.shape[1]
    (n_x, n_h, n_y)=layer_dims
    parameters=initialize_parameters(n_x,n_h,n_y)   #W1,b1,W2,b2
    W1=parameters["W1"]
    b1=parameters["b1"]
    W2=parameters["W2"]
    b2=parameters["b2"]
    for i in range(0,num_iterations):
        #forward propagation
        A1,cache1=linear_activation_forward(X,W1,b1,activation="relu")
        A2,cache2=linear_activation_forward(A1,W2,b2,activation="sigmoid")
        #compoute cost
        cost=compute_cost(A2,Y)
        dA2=-(np.divide(Y,A2)-np.divide(1-Y,1-A2))
        #backward propagation
        dA1,dW2,db2=linear_activation_backward(dA2,cache2,activation="sigmoid")
        dA0,dW1,db1=linear_activation_backward(dA1,cache1,activation="relu")
        grads["dW1"]=dW1
        grads["db1"]=db1
        grads["dW2"]=dW2
        grads["db2"]=db2
        #update parameters
        parameters=update_parameters(parameters,grads,learning_rate)
        #retrieve W1,b1,W2,b2 from parameters
        W1 = parameters["W1"]
        b1 = parameters["b1"]
        W2 = parameters["W2"]
        b2 = parameters["b2"]
        if print_cost and i%100==0:
            print("cost after iterations{}:{}".format(i,np.squeeze(cost)))
        if print_cost and i % 100 == 0:
            costs.append(cost)
        #plot the cost
    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.xlabel('iterations(per hundreds)')
    plt.title("learning rate="+str(learning_rate))
    plt.show()
    return parameters
调用上面的函数

parameters=two_layer_model(train_x,train_y,layer_dims=(n_x,n_h,n_y),learning_rate=0.0075,num_iterations=3000,print_cost=True)

prediction_train=predict(train_x,train_y,parameters)
prediction_test=predict(test_x,test_y,parameters)
训练集   Accuracy: 1.0

测试集  Accuracy: 0.72

显然测试集正确率并不高,因此我们采用多层神经网络

layer_dims=[12288,20,7,5,1]  #5-layer model
def L_layer_model(X,Y,layer_dims,learning_rate=0.0075,num_iterations=3000,print_cost=False):
    """
    Implements a L-layer neural network:[linear->relu]*(L-1)->linear->sigmoid
    :param X: (numbers of examples,num_px*num_px*3)
    :param Y: (1,numbers of examples)
    :param layer_dims:
    :param learning_rate:
    :param num_iterations:numbers of the optimization loop
    :param print_cost:
    :return:  paramters learned by the model,they can be used to predict
    """
    np.random.seed(1)
    costs=[]
    #初始化参数
    parameters=initialize_parameters_deep(layer_dims)
    for i in range(0,num_iterations):
        AL,caches=L_model_forward(X,parameters)
        cost=compute_cost(AL,Y)
        grads=L_model_backward(AL,Y,caches)
        parameters=update_parameters(parameters,grads,learning_rate=0.0075)
        if print_cost and i%100==0:
            print("cost after iterations{}:{}".format(i,np.squeeze(cost)))
        if print_cost and i % 100 == 0:
            costs.append(cost)
    # plot the cost
    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.xlabel('iterations(per hundreds)')
    plt.title("learning rate=" + str(learning_rate))
    plt.show()
    return parameters

调用该函数:

parameters=L_layer_model(train_x,train_y,layer_dims ,learning_rate=0.0075,num_iterations=2500,print_cost=True)
pred_train=predict(train_x,train_y,parameters)
pred_test=predict(test_x,test_y,parameters)
print("parameters="+str(parameters))

训练集 Accuracy: 0.985645933014

测试集  Accuracy: 0.8


利用上面学得的参数,你可以对自己的图片进行预测

my_image="000039.jpg"
my_label_y=[0]
fname="F:\\test_cat\\"+my_image
image=np.array(ndimage.imread(fname,flatten=False))
my_image=scipy.misc.imresize(image,size=(num_px,num_px)).reshape((num_px*num_px*3,1))
print(my_image.shape)

my_predicted_image=predict(my_image,my_label_y,parameters)

print(my_predicted_image)
plt.imshow(image)
print("y="+str(np.squeeze(my_predicted_image))+", your L-layer model predicts is a "+classes[int
(np.squeeze(my_predicted_image))].decode("utf-8")+" picture")

 该神经网络采用的数据集图片大小是64*64px,而我在网上下载的图片都很大,所以在进行resize后,出现图片的严重失真,对图片的特征辨别不正确,因此,并没有在我自己的图片上达到很好的效果










评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值