吴恩达课后编程作业Course 1 - 神经网络和深度学习 - 第二周作业

转载自:https://blog.csdn.net/u013733326/article/details/79639509

import numpy as np
import h5py
import matplotlib.pyplot as plt
from lr_utils import load_dataset

# sigmoid函数
def sigmoid(z):
    s = 1 / (1 + np.exp(-z))
    return s

# 初始化参数w和b
# 创建一个维度为(dim, 1)的0向量,并将b初始化为0
# dim为w矢量的大小
# 返回的w是维度为(dim, 1)的初始化向量
# 返回的b是初始化的标量
def initialize_with_zeros(dim):
    w = np.zeros(shape = (dim, 1))
    b = 0
    # 断言用于确保数据正确
    assert(w.shape == (dim, 1))
    assert(isinstance(b, float) or isinstance(b, int))

    return (w, b)

# 传播函数(正向传播和反向传播)
# w代表权重,b代表偏差,X代表各个训练样本的图像组合,Y代表所有的预测值组合
# 返回的cost代表成本,dw代表成本函数对w的导数,db代表成本函数对b的导数
def propagate(w, b, X, Y):
    m = X.shape[1]

    # 正向传播
    A = sigmoid(np.dot(w.T, X) + b)# 激活值
    cost = (-1 / m) * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))# 成本函数值

    # 反向传播
    dw = (1 / m) * np.dot(X, (A - Y).T)
    db = (1 / m) * np.sum(A - Y)

    # 使用断言确保数据正确
    assert(dw.shape == w.shape)
    assert(db.dtype == float)

    cost = np.squeeze(cost)
    assert(cost.shape == ())

    grads = {
        'dw': dw,
        'db': db
    }

    return (grads, cost)


# 通过梯度下降法来优化w和b
# num_iterations代表迭代次数, learning_rate代表学习率
# print_cost代表是否打印成本值
# 返回学习到的参数w,b以及此时的梯度和成本
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
    costs = []

    for i in range(num_iterations):
        grads, cost = propagate(w, b, X, Y)
        dw = grads['dw']
        db = grads['db']
        w = w - learning_rate * dw
        b = b - learning_rate * db

        # 每迭代一百次记录成本函数值
        if i % 100 == 0:
            costs.append(cost)
        if (print_cost) and (i % 100 == 0):
            print('当前迭代次数:%i,误差值:%f' % (i, cost))

    # 将最终学习到的参数记录下来
    params = {
        "w": w,
        "b": b
    }
    grads = {
        "dw": dw,
        "db": db
    }

    return (params, grads, costs)

# 预测函数
# 返回的Y_prediction是对X中的所有图片的预测
def predict(w, b, X):
    m = X.shape[1]
    Y_prediction = np.zeros((1, m))
    w = w.reshape(X.shape[0], 1)

    A = sigmoid(np.dot(w.T, X) + b)
    # 将概率转换为准确预测
    for i in range(A.shape[1]):
        Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0

    assert(Y_prediction.shape == (1, m))

    return Y_prediction

# 将以上函数整合成一个模型,使用时直接调用这个模型即可
# 返回的d是包含有关模型信息的字典
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
    w, b = initialize_with_zeros(X_train.shape[0])# 初始化参数
    parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)

    w, b = parameters['w'], parameters['b']

    # 用学习到的参数w和b对测试集和训练集进行预测
    Y_prediction_test = predict(w, b, X_test)
    Y_prediction_train = predict(w, b, X_train)

    # 计算预测准确性
    print('训练集准确性:', format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100), '%')
    print('测试集准确性:', format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100), '%')

    d = {
        'costs': costs,
        'Y_prediction_test': Y_prediction_test,
        'Y_prediction_train': Y_prediction_train,
        'w': w,
        'b': b,
        'learning_rate': learning_rate,
        'num_iterations': num_iterations
    }

    return d






# 将训练集和测试集加载到本程序中
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()

index = 6
plt.imshow(train_set_x_orig[index])
plt.show()

#打印出当前的训练标签值
#使用np.squeeze的目的是压缩维度,【未压缩】train_set_y[:,index]的值为[1] , 【压缩后】np.squeeze(train_set_y[:,index])的值为1
#print("【使用np.squeeze:" + str(np.squeeze(train_set_y[:,index])) + ",不使用np.squeeze: " + str(train_set_y[:,index]) + "】")
#只有压缩后的值才能进行解码操作
print('y=' + str(train_set_y[:,index]) + ' is a ' + classes[np.squeeze(train_set_y[:,index])].decode('utf-8') + 'picture')

m_train = train_set_y.shape[1]# 训练集中图片数量
m_test = test_set_y.shape[1]# 测试集中图片数量
num_px = train_set_x_orig.shape[1]# 图片宽高

print('训练集的数量:', str(m_train))
print('测试集的数量:', str(m_test))
print('图片宽高:', str(num_px))
print('训练集中图片的维数:', str(train_set_x_orig.shape))
print('训练集中标签的维数:', str(train_set_y.shape))
print('测试集中图片的维数:', str(test_set_x_orig.shape))
print('测试集中标签的维数:', str(test_set_y.shape))

# 将训练集和测试集的维度降低并转置
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
print('训练集降维之后的维数:', str(train_set_x_flatten.shape))
print('训练集中标签的维数:', str(train_set_y.shape))
print('测试集降维之后的维数:', str(test_set_x_flatten.shape))
print('测试集中标签的维数:', str(test_set_y.shape))

# 标准化数据集
# ?????? 这里为啥要这样处理呢?
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255

# print('测试sigmoid')
# print('sigmoid(0) = ', sigmoid(0))
# print('sigmoid(-0.1) = ', sigmoid(-0.1))
# print('sigmoid(10) = ', sigmoid(10))
#
# print('测试propagate')
# w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1,0]])
# grads, cost = propagate(w, b, X, Y)
# print('dw=', str(grads['dw']))
# print('db=', str(grads['db']))
# print('cost=', str(cost))
#
# print()
# print('测试优化函数')
# w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1, 2], [3, 4]]), np.array([[1, 0]])
# params, grads, costs = optimize(w, b, X, Y, num_iterations = 100, learning_rate = 0.009, print_cost=False)
# print('w=', str(params['w']))
# print('b=', str(params['b']))
# print('dw=', str(grads['dw']))
# print('db=', str(grads['db']))
#
# print()
# print('测试预测函数')
# w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1, 2], [3, 4]]), np.array([[1, 0]])
# print('预测值:', str(predict(w, b, X)))

print('====================测试模型======================')
# d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 5000, learning_rate = 0.005, print_cost = True)
#
# # 绘制图
# costs = np.squeeze(d['costs'])
# plt.plot(costs)
# plt.ylabel('cost')
# plt.xlabel('iterations (per hundreds)')
# plt.title('Learning_rate = ' + str(d['learning_rate']))
# plt.show()


# 切换学习率
learning_rates = [0.01, 0.001, 0.0001]
models = {}

# 三组学习率分别学习
for i in learning_rates:
    models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)

# 三条曲线的处理
for i in learning_rates:
    plt.plot(np.squeeze(models[str(i)]['costs']), label = str(models[str(i)]['learning_rate']))
plt.ylabel('cost')
plt.xlabel('iterations')

legend = plt.legend(loc = 'upper center', shadow = True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值