吴恩达神经网络学习-L1W2作业2

学习如何建立逻辑回归分类器用来识别猫。 这项作业将引导你逐步了解神经网络的思维方式,同时磨练你对深度学习的直觉。

import numpy as np
import matplotlib.pyplot as plt #matplotlib是一个著名的Python图形库
import h5py  #h5py是一个常用的包,可以处理存储为H5文件格式的数据集
from PIL import Image
from lr_utils import load_dataset
import scipy
from scipy import misc

# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes=load_dataset()
#train_set_x_orig和test_set_x_orig的每一行都是代表图像的数组。 你可以通过运行以下代码来可视化示例。 还可以随意更改index值并重新运行以查看其他图像。
index=5
plt.imshow(train_set_x_orig[index]) #处理图像
plt.show() #显示图像
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") +  "' picture.")


#预处理数据集的常见步骤是:
# 1.找出数据的尺寸和维度(m_train,m_test,num_px等)
# 2.重塑数据集,以使每个示例都是大小为(num_px * num_px * 3,1)的向量
# 3.“标准化”数据

# 查找以下各项的值:
# m_train(训练集示例数量)
# m_test(测试集示例数量)
# num_px(=训练图像的高度 = 训练图像的宽度)
# train_set_x_orig”是一个维度为(m_train,num_px,num_px,3)的numpy数组

m_train=train_set_x_orig.shape[0]
m_test=test_set_x_orig.shape[0]
num_px=train_set_x_orig.shape[1]

print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))


#重塑训练和测试数据集,以便将大小(num_px,num_px,3)的图像展平为单个形状的向量(num_px*num_px*3, 1)
#此后,我们的训练(和测试)数据集是一个numpy数组,其中每列代表一个展平的图像。 应该有m_train(和m_test)列。
# train_set_x_orig=train_set_x_orig.reshape(num_px*num_px*3,m_train)
# test_set_x_orig=test_set_x_orig.reshape(num_px*num_px*3,m_test)
#当你想将维度为(a,b,c,d)的矩阵X展平为形状为(b*c*d, a)的矩阵X_flatten时的一个技巧是:
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T #reshape(a,-1)表示将矩阵转化为a行
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T

print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0])) #重塑后的检查维度

#机器学习中一个常见的预处理步骤是对数据集进行居中和标准化,这意味着你要从每个示例中减去整个numpy数组的均值,然后除以整个numpy数组的标准差。
# 但是图片数据集则更为简单方便,并且只要将数据集的每一行除以255(像素通道的最大值),效果也差不多。
train_set_x=train_set_x_flatten/255
test_set_x=test_set_x_flatten/255


#建立神经网络的主要步骤是:
# 1.定义模型结构(例如输入特征的数量)
# 2.初始化模型的参数
# 3.循环:
#
#      计算当前损失(正向传播)
#      计算当前梯度(向后传播)
#      更新参数(梯度下降)
# 你通常会分别构建1-3,然后将它们集成到一个称为“ model()”的函数中。

#辅助函数
def sigmoid(z):
    s=1/(1+np.exp(-z))
    return s

# print(sigmoid(np.array([0,2])))

#初始化参数
def initialize_with_zeros(dim):
    w=np.zeros((dim,1))
    b=0
    assert(w.shape==(dim,1))
    assert (isinstance(b, float) or isinstance(b, int)) #isinstance() 函数来判断一个对象是否是一个已知的类型
    return w,b

dim = 2
w, b = initialize_with_zeros(dim)
# print ("w = " + str(w))
# print ("b = " + str(b))

#前向和后向传播
#实现函数propagate()来计算损失函数及其梯度
def propagate(w,b,X,Y):
    m=X.shape[1] #数据集中图片的个数
    A=sigmoid(np.dot(w.T,X)+b)
    # cost=-1/m*np.sum(np.dot(Y,np.log(A))+np.dot((1-Y),np.log(1-A)))
    cost=-1/m*np.sum(Y*np.log(A)+(1-Y)*np.log(1-A)) #损失函数
    dw=1/m*np.dot(X,(A-Y).T)
    db=1/m*np.sum(A-Y)
    grads={
        "dw":dw,
        "db":db
    }
    return grads,cost

w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))

#优化函数
# 初始化参数。
# 计算损失函数及其梯度。
# 使用梯度下降来更新参数。
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
    # Arguments:
    # w -- weights, a numpy array of size (num_px * num_px * 3, 1)
    # b -- bias, a scalar
    # X -- data of shape (num_px * num_px * 3, number of examples)
    # Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
    # num_iterations -- number of iterations of the optimization loop
    # learning_rate -- learning rate of the gradient descent update rule
    # print_cost -- True to print the loss every 100 steps

    costs=[]
    for i in range(num_iterations):
        grads, cost = propagate(w, b, X, Y)
        dw=grads["dw"]
        db=grads["db"]
        w = w - learning_rate * dw
        b = b - learning_rate * db
        if i % 100 == 0:
            costs.append(cost)
        if print_cost and i % 100 == 0:
            print("Cost after iteration %i: %f" % (i, cost))
    params = {"w": w,
              "b": b}
    grads = {"dw": dw,
             "db": db}

    # params -- dictionary containing the weights w and bias b
    # grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
    # costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
    return params, grads, costs

params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)

# print ("w = " + str(params["w"]))
# print ("b = " + str(params["b"]))
# print ("dw = " + str(grads["dw"]))
# print ("db = " + str(grads["db"]))
# print(costs)

#上一个函数将输出学习到的w和b。 我们能够使用w和b来预测数据集X的标签。实现predict()函数。
#将a的项转换为0(如果激活<= 0.5)或1(如果激活> 0.5),并将预测结果存储在向量“ Y_prediction”中。
def predict(w,b,X):
    m=X.shape[1]
    Y_prediction=np.zeros((1,m))
    A = sigmoid(np.dot(w.T, X) + b)
    for i in range(m):
        if A[0,i]<=0.5:
            Y_prediction[0,i]=0
        else:
            Y_prediction[0,i]=1
    assert (Y_prediction.shape == (1, m))
    return Y_prediction

# print ("predictions = " + str(predict(w, b, X)))

#你已经实现了以下几个函数:
# 1.初始化(w,b)
# 2.迭代优化损失以学习参数(w,b):
#     计算损失及其梯度
#     使用梯度下降更新参数
# 3.使用得到的(w,b)来预测给定示例集的标签

#将所有功能合并到模型中
# Y_prediction对测试集的预测
# Y_prediction_train对训练集的预测
# w,损失,optimize()输出的梯度
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
    # Arguments:
    # X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
    # Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
    # X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
    # Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
    # num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
    # learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
    # print_cost -- Set to true to print the cost every 100 iterations
    w,b=initialize_with_zeros(X_train.shape[0])
    parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
    w = parameters["w"]
    b = parameters["b"]
    Y_prediction_test = predict(w, b, X_test)
    Y_prediction_train = predict(w, b, X_train)
    print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
    print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))

    d = {"costs": costs,
         "Y_prediction_test": Y_prediction_test,
         "Y_prediction_train": Y_prediction_train,
         "w": w,
         "b": b,
         "learning_rate": learning_rate,
         "num_iterations": num_iterations}

    return d

d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)

#绘制损失函数和梯度吧
# costs=np.squeeze(d["costs"]) #从数组的形状中删除单维度条目,即把shape中为1的维度去掉
costs=d["costs"]
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()

#不同的学习率会带来不同的损失,因此会有不同的预测结果。
# 如果学习率太大(0.01),则成本可能会上下波动。 它甚至可能会发散(尽管在此示例中,使用0.01最终仍会以较高的损失值获得收益)。
# 较低的损失并不意味着模型效果很好。当训练精度比测试精度高很多时,就会发生过拟合情况。
# 在深度学习中,我们通常建议你:
#      选择好能最小化损失函数的学习率。
#      如果模型过度拟合,请使用其他方法来减少过度拟合。

#使用自己的图像进行测试
fname = './images/my_image.jpg'
image = np.array(plt.imread(fname))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") +  "\" picture.")
plt.imshow(image)
plt.show()

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值