三天打鱼两天晒网的作业 | 欢迎批评指正

Logistic Regression with a Neural Network mindset

Dataset

导入

import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset

# h5py指一种为存储和处理大容量科学数据设计的文件格式及相应库文件
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()

# 我们在图像数据集(训练和测试)的末尾添加了"_orig",以便对其进行预处理。 预处理后,我们将得到train_set_x和test_set_x
# 标签train_set_y和test_set_y不需要任何预处理
# train_set_x_orig和test_set_x_orig的每一行都是代表图像的数组

 维度和尺寸

# train_set_x_orig ==> numpy array ( m_train, num_px, num_ px, 3)
# 3 是因为RGB的像素
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]

print("Number of training examples: m_train =" + str(m_train))
print("Number of testing examples: m_test = " + str(m_test))
print("Height / width of each image: num_px = " + str(num_px))
print("train_set_x shape:" + str(train_set_x_orig.shape))
print("train_set_y shape:" + str(train_set_y.shape))
print("test_set_x shape:" + str(test_set_x_orig.shape))
print("test_set_y shape:" + str(test_set_y.shape))

Flatten 和 Reshape


# to reshape a marrix of X (a, b, c, d) into X_flatten (b$c$d, a)
# X_flatten = X.reshape(X.shape[0], -1).T
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape((test_set_x_orig.shape[0], -1)).T

print(("train_set_x_flatten shape:" + str(train_set_x_flatten.shape)))
print("test_set_x_flatten shape:" + str(test_set_x_flatten.shape))

Standardize the Data 

归一化和标准化

# standardize the data
# 因为图像数据的maximum value of a pixel channel = 255
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255

 函数

函数 

def sigmoid(z):
    s = 1.0 / (1.0 + np.exp(-1.0 * z))
    return s

初始化

# initialize with zeros
# create a vector of shape (dim,1) for w, and b = 0
def initialize_with_zeros(dim):
    w = np.zeros((dim, 1))
    b = 0
    assert (w.shape == (dim, 1))  # 用于调试,如果不满足则会退出报错
    assert (isinstance(b, float) or isinstance(b, int))
    return w, b

 梯度下降


def propagate(w, b, X, Y):
    # forward propagation
    m = X.shape[1]
    A = sigmoid(np.dot(w.T, X) + b)
    cost = -(1.0 / m) * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))

    # backward propagation
    dw = (1.0 / m) * np.dot(X, (A - Y).T)
    db = (1.0 / m) * np.sum(A - Y)

    assert (dw.shape == w.shape)
    assert (db.dtype == float)
    cost = np.squeeze(cost)
    assert (cost.shape == ())

    grads = {"dw": dw,
             "db": db}
    return grads, cost

更新 


# optimization
# print_cost -- True to print the loss every 100 steps
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost):
    costs = []

    for i in range(num_iterations):

        grads, cost = propagate(w, b, X, Y)

        dw = grads["dw"]
        db = grads["db"]

        # update
        w = w - learning_rate * dw
        b = b - learning_rate * db

        # record the costs
        # append 函数用于在末尾加一个数
        if i / 100 == 0:
            costs.append(cost)

        # print the cost every 100 training examples
        if print_cost and i % 100 == 0:
            print("Cost after iteration %i: %f" % (i, cost))

    params = {"w": w,
              "b": b}
    grads = {"dw": dw,
             "db": db}

    return params, grads, costs

分类 

# 分类
def predict(w, b, X):
    m = X.shape[1]
    Y_pridiction = np.zeros((1, m))
    w = w.reshape(X.shape[0], 1)
    A = sigmoid(np.dot(w.T, X) + b)

    for i in range(A.shape[1]):
        if A[0, i] > 0.5:
            Y_pridiction[0, i] = 1
        else:
            Y_pridiction[0, i] = 0

    assert (Y_pridiction.shape == (1, m))
    return Y_pridiction

Model 

# Builds the logistic regression model by calling the functions above
"""
    Arguments:
    X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
    Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
    X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
    Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
    num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
    learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
    print_cost -- Set to true to print the cost every 100 iterations
    
    Returns:
    d -- dictionary containing information about the model.
"""


def model(X_train, Y_train, X_test, Y_test, num_iterations, learning_rate, print_cost):
    # initialize parameters
    w, b = initialize_with_zeros(X_train.shape[0])

    # Gradient descent
    parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)

    # Retrive parameters w and b
    w = parameters["w"]
    b = parameters["b"]

    Y_prediction_test = predict(w, b, X_test)
    Y_prediction_train = predict(w, b, X_train)

    # Print train/test Errors
    print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
    print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))

    d = {"costs": costs,
         "Y_prediction_test": Y_prediction_test,
         "Y_prediction_train": Y_prediction_train,
         "w": w,
         "b": b,
         "learning_rate": learning_rate,
         "num_iterations": num_iterations}

    return d

Result

Number of training examples: m_train =209
Number of testing examples: m_test = 50
Height / width of each image: num_px = 64
train_set_x shape:(209, 64, 64, 3)
train_set_y shape:(1, 209)
test_set_x shape:(50, 64, 64, 3)
test_set_y shape:(1, 50)
train_set_x_flatten shape:(12288, 209)
test_set_x_flatten shape:(12288, 50)


Cost after iteration 0: 0.693147
Cost after iteration 100: 0.584508
Cost after iteration 200: 0.466949
Cost after iteration 300: 0.376007
Cost after iteration 400: 0.331463
Cost after iteration 500: 0.303273
Cost after iteration 600: 0.279880
Cost after iteration 700: 0.260042
Cost after iteration 800: 0.242941
Cost after iteration 900: 0.228004
Cost after iteration 1000: 0.214820
Cost after iteration 1100: 0.203078
Cost after iteration 1200: 0.192544
Cost after iteration 1300: 0.183033
Cost after iteration 1400: 0.174399
Cost after iteration 1500: 0.166521
Cost after iteration 1600: 0.159305
Cost after iteration 1700: 0.152667
Cost after iteration 1800: 0.146542
Cost after iteration 1900: 0.140872
train accuracy: 99.04306220095694 %
test accuracy: 70.0 %

 learning tare

# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()

 

……为啥画出来和吴老师不一样,是要拟合吗

 猫片

这只猫好看hhh index = 10

index = 1
plt.imshow(test_set_x[:, index].reshape((num_px, num_px, 3)))
plt.show()
print("y = " + str(test_set_y[:, index]) + ", it's a '" + classes[np.squeeze(test_set_y[:, index])].decode(
      "utf-8") + "' picture." + "You predict it's a '" + classes[int(d["Y_prediction_test"][0, index])].decode(
      "utf-8") + "' picture.")

 

 

 

 报错

Cost after iteration 0: 0.693147

bug: 控制台只输出一条iteration

问题分析:

# optimization
# print_cost -- True to print the loss every 100 steps
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost):
    costs = []

    for i in range(num_iterations):

        grads, cost = propagate(w, b, X, Y)

        dw = grads["dw"]
        db = grads["db"]

        # update
        w = w - learning_rate * dw
        b = b - learning_rate * db

        # record the costs
        # append 函数用于在末尾加一个数
        if i / 100 == 0:
            costs.append(cost)

        # print the cost every 100 training examples
        if print_cost and i % 100 == 0:
            print("Cost after iteration %i: %f" % (i, cost))

# 错误缩进:
"""
        params = {"w": w,
                  "b": b}
        grads = {"dw": dw,
                 "db": db}

        return params, grads, costs
"""


# 正确缩进
    params = {"w": w,
              "b": b}
    grads = {"dw": dw,
             "db": db}

    return params, grads, costs

 

 缩进错误,导致被嵌套在一个for循环中,只输出一个w, b, dw, db的值

numpy.array

Python中提供了list容器,可以当作数组使用。但列表中的元素可以是任何对象,因此列表中保存的是对象的指针,这样一来,为了保存一个简单的列表[1,2,3]。就需要三个指针和三个整数对象。对于数值运算来说,这种结构显然不够高效。

Python虽然也提供了array模块,但其只支持一维数组,不支持多维数组(在TensorFlow里面偏向于矩阵理解),也没有各种运算函数。因而不适合数值运算。

NumPy的出现弥补了这些不足。

numpy.squeeze(a,axis = None)

 1)a表示输入的数组;
 2)axis用于指定需要删除的维度,但是指定的维度必须为单维度,否则将会报错;
 3)axis的取值可为None 或 int 或 tuple of ints, 可选。若axis为空,则删除所有单维度的条目;
 4)返回值:数组
 5) 不会修改原数组;
 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值