吴恩达老师深度学习跟随作业(五)--------- tensorflow入门(IMUDGES)

参考博客:https://blog.csdn.net/u013733326/article/details/79971488

本博客以记录自己的学习为主,个人认为还是看我参考的博客更好


话不多说,我还是直接上代码了
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import keras
import tensorflow as tf
from tensorflow.python.framework import ops
import tf_utils
import time

###第一部分:简单熟悉tensorflow

np.random.seed(1)

y_hat = tf.constant(36, name="y_hat") #定义y_hat为固定值
y = tf.constant(39, name="y") #定义y为固定值39

loss = tf.Variable((y-y_hat)**2, name="loss")  #为损失函数创建一个变量

init = tf.compat.v1.global_variables_initializer()  #运行之后初始化(ession.run(init))
                                          #损失变量将被初始化并准备计算
session = tf.compat.v1.Session()                      #创建一个session并打印
session.run(init)                           #初始化变量
#print(session.run(loss))

a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a, b)
#print(c)

sess = tf.Session()
#print(sess.run(c))

#利用feed_dict来改变X的值
x = tf.placeholder(tf.int64,name="x")
#print(sess.run(2 * x,feed_dict={x:3}))
sess.close()

#线性函数
def linear_function():
    """
    实现一个线性功能
    初始化W,类型为tensor的随机变量,维度为(4,3)
    初始化X,类型为tensor的随机变量,维度为(3,1)
    初始化b,类型为tensor的随机变量,维度为(4,1)
    :return: result - 运行了session后的结果,运行的是Y = WX + b
    """
    np.random.seed(1) #指定随机种子
    X = np.random.randn(3, 1)
    W = np.random.randn(4, 3)
    b = np.random.randn(4, 1)

    Y = tf.add(tf.matmul(W, X), b)  #tf.matmul是矩阵乘法
    # Y = tf.matmul(W,X) + b #也可以以写成这样子

    #创建一个session并运行
    sess = tf.Session()
    result = sess.run(Y)

    #session使用完毕,关闭它
    sess.close()

    return result

#测试一下线性函数
#print("result = " +  str(linear_function()))

#计算sigmoid
def sigmoid(z):
    """
    实现使用sigmoid函数计算z
    """
    #创建一个占位符x,名字“x”
    x = tf.placeholder(tf.float32, name="x")

    #计算sigmoid(z)
    sigmoid = tf.sigmoid(x)

    #创建一个会话
    sess = tf.Session()
    result = sess.run(sigmoid, feed_dict={x: z})

    return result

#测试
'''
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))  '''

#计算成本
#使用内置函数计算神经网络成本

#使用0,1编码
def one_hot_matrix(lables, C):
    """
    创建一个矩阵,其中第i行对应第i个类号,第j列对应第j个训练样本
    :param lables:标签向量
    :param C:分类数
    :return:one_hot - 热独矩阵
    """
    #创建一个tf.constant,赋值为C,名字为“C”
    C = tf.constant(C, name="C")

    #使用tf.one_hot,注意一下axis
    one_hot_matrix = tf.one_hot(indices=lables, depth=C, axis=0)

    #创建一个session
    sess = tf.Session()

    #运行session
    one_hot = sess.run(one_hot_matrix)

    #关闭session
    sess.close()

    return one_hot

#测试
'''
a = time.time()
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C=4) 
print(one_hot)   '''

#初始化为0和1
def ones(shape):
    """
    创建一个维度为shape的变量,其值全为1
    :param shape: 创建的数组维度
    :return: 只包含1的数组
    """
    #使用tf.ones()
    ones = tf.ones(shape)

    #创建会话
    sess = tf.Session()

    #运行会话
    ones= sess.run(ones)

    #关闭会话
    sess.close()

    return ones

#测试
#print ("ones = " + str(ones([3])))

### 第二部分

#####################################
#                                   #
#  使用tensorflow构建第一个神经网络  #
#                                   #
#####################################

#加载数据集
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = tf_utils.load_dataset()

#扁平化数据
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T #每一列就是一个样本
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T

#归一化数据
X_train = X_train_flatten / 255
X_test = X_test_flatten / 255

#转换为热独矩阵(0,1矩阵)
Y_train = tf_utils.convert_to_one_hot(Y_train_orig, 6)
Y_test = tf_utils.convert_to_one_hot(Y_test_orig, 6)
'''
print("训练集样本数 = " + str(X_train.shape[1]))
print("测试集样本数 = " + str(X_test.shape[1]))
print("X_train.shape: " + str(X_train.shape))
print("Y_train.shape: " + str(Y_train.shape))
print("X_test.shape: " + str(X_test.shape))
print("Y_test.shape: " + str(Y_test.shape))  '''

#创建placeholders,为X和Y创建占位符
def create_placeholders(n_x, n_y):
    """
    为tensorflow的会话创建占位符
    :param n_x:一个实数,图像向量的大小(64*64*3)
    :param n_y:一个实数,分类数(从0到5,所以n_y = 6)
    :return:X - 一个数据输入的占位符,维度为【n_x,None】,dtype = "float"
            Y - 一个对应输入的标签的占位符,维度为【n_Y,None】,dtype = "float"

    提示:
        使用None,因为它让我们可以灵活处理占位符提供的样本数量。事实上,测试/训练期间的样本数量是不同的。

    """
    X = tf.placeholder(tf.float32, [n_x, None], name="X")
    Y = tf.placeholder(tf.float32, [n_y, None], name="Y")

    return X, Y

#测试

X, Y = create_placeholders(12288, 6)
'''
print(str(X))
print(str(Y))  '''

#初始化参数
def initialize_parameters():
    """
    初始化神经网络的参数,参数的维度如下:
        W1 : [25, 12288]
        b1 : [25, 1]
        W2 : [12, 25]
        b2 : [12, 1]
        W3 : [6, 12]
        b3 : [6, 1]
    :return:parameters- 包含了W和b的字典
    """
    tf.set_random_seed(1)

    W1 = tf.get_variable("W1", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b1 = tf.get_variable("b1", [25, 1], initializer=tf.zeros_initializer())
    W2 = tf.get_variable("W2", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b2 = tf.get_variable("b2", [12, 1], initializer=tf.zeros_initializer())
    W3 = tf.get_variable("W3", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b3 = tf.get_variable("b3", [6, 1], initializer=tf.zeros_initializer())

    parameters = {"W1": W1,
                  "b1": b1,
                  "W2": W2,
                  "b2": b2,
                  "W3": W3,
                  "b3": b3}

    return parameters

#测试

tf.reset_default_graph() #用于清除默认图形堆栈并重置全局默认图形。
'''
with tf.Session() as sess:
    parameters = initialize_parameters()
    print("W1 = " + str(parameters["W1"]))
    print("b1 = " + str(parameters["b1"]))
    print("W2 = " + str(parameters["W2"]))
    print("b2 = " + str(parameters["b2"]))  '''

#前向传播
def forward_propagation(X,parameters):
    """
    实现一个模型的前向传播,模型结构为LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
    :param X:输入数据的占位符,维度为(输入节点数量,样本数量)
    :param parameters:包含了W和B的参数字典
    :return:Z3 - 最后一个linear节点的输出
    """
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']
    W3 = parameters['W3']
    b3 = parameters['b3']

    Z1 = tf.add(tf.matmul(W1,X),b1)   #Z1 = np.dot(W1,X)+b1
    #Z1 = tf.matmul(W1,X)+b1    #这样写也可以
    A1 = tf.nn.relu(Z1)               #A1 = relu(Z1)
    Z2 = tf.add(tf.matmul(W2, A1), b2)  # Z2 = np.dot(W2, a1) + b2
    A2 = tf.nn.relu(Z2)  # A2 = relu(Z2)
    Z3 = tf.add(tf.matmul(W3, A2), b3)  # Z3 = np.dot(W3,Z2) + b3


    return Z3

#测试
'''
tf.reset_default_graph() #用于清除默认图形堆栈并重置全局默认图形。
with tf.Session() as sess:
    X,Y = create_placeholders(12288,6)
    parameters = initialize_parameters()
    Z3 = forward_propagation(X,parameters)
    #print("Z3 = " + str(Z3)) '''

#计算成本
def compute_cost(Z3, Y):
    """
    计算成本
    :param Z3:前向传播的结果
    :param Y:标签,一个占位符,和Z3的维度相同
    :return:cost - 成本值
    """
    logits = tf.transpose(Z3)  #转置
    labels = tf.transpose(Y)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels))
    return cost

#测试

'''
tf.reset_default_graph()

sess = tf.Session()
X, Y = create_placeholders(12288,6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
#print("cost=" + str(cost)) '''

#反向传播和更新参数
#optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
#_ , c = sess.run([optimizer,cost],feed_dict={X:mini_batch_X,Y:mini_batch_Y})

#构建模型
def model(X_train,Y_train,X_test,Y_test,
        learning_rate=0.0002,num_epochs=2000,minibatch_size=32,
        print_cost=True,is_plot=True):
    """
    实现一个三层的TensorFlow神经网络:LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX

    参数:
        X_train - 训练集,维度为(输入大小(输入节点数量) = 12288, 样本数量 = 1080)
        Y_train - 训练集分类数量,维度为(输出大小(输出节点数量) = 6, 样本数量 = 1080)
        X_test - 测试集,维度为(输入大小(输入节点数量) = 12288, 样本数量 = 120)
        Y_test - 测试集分类数量,维度为(输出大小(输出节点数量) = 6, 样本数量 = 120)
        learning_rate - 学习速率
        num_epochs - 整个训练集的遍历次数
        mini_batch_size - 每个小批量数据集的大小
        print_cost - 是否打印成本,每100代打印一次
        is_plot - 是否绘制曲线图

    返回:
        parameters - 学习后的参数

    """
    ops.reset_default_graph()                #能够重新运行模型而不覆盖tf变量
    tf.set_random_seed(1)
    seed = 3
    (n_x , m)  = X_train.shape               #获取输入节点数量和样本数
    n_y = Y_train.shape[0]                   #获取输出节点数量
    costs = []                               #成本集

    #给X和Y创建placeholder
    X,Y = create_placeholders(n_x,n_y)

    #初始化参数
    parameters = initialize_parameters()

    #前向传播
    Z3 = forward_propagation(X,parameters)

    #计算成本
    cost = compute_cost(Z3,Y)

    #反向传播,使用Adam优化
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

    #初始化所有的变量
    init = tf.global_variables_initializer()

    #开始会话并计算
    with tf.Session() as sess:
        #初始化
        sess.run(init)

        #正常训练的循环
        for epoch in range(num_epochs):

            epoch_cost = 0  #每代的成本
            num_minibatches = int(m / minibatch_size)    #minibatch的总数量
            seed = seed + 1
            minibatches = tf_utils.random_mini_batches(X_train,Y_train,minibatch_size,seed)

            for minibatch in minibatches:

                #选择一个minibatch
                (minibatch_X,minibatch_Y) = minibatch

                #数据已经准备好了,开始运行session
                _ , minibatch_cost = sess.run([optimizer,cost],feed_dict={X:minibatch_X,Y:minibatch_Y})

                #计算这个minibatch在这一代中所占的误差
                epoch_cost = epoch_cost + minibatch_cost / num_minibatches

            #记录并打印成本
            ## 记录成本
            if epoch % 5 == 0:
                costs.append(epoch_cost)
                #是否打印:
                if print_cost and epoch % 100 == 0:
                        print("epoch = " + str(epoch) + "    epoch_cost = " + str(epoch_cost))

        #是否绘制图谱
        if is_plot:
            plt.plot(np.squeeze(costs))
            plt.ylabel('cost')
            plt.xlabel('iterations (per tens)')
            plt.title("Learning rate =" + str(learning_rate))
            plt.show()

        #保存学习后的参数
        parameters = sess.run(parameters)
        print("参数已经保存到session。")

        #计算当前的预测结果
        correct_prediction = tf.equal(tf.argmax(Z3),tf.argmax(Y))

        #计算准确率
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float"))

        print("训练集的准确率:", accuracy.eval({X: X_train, Y: Y_train}))
        print("测试集的准确率:", accuracy.eval({X: X_test, Y: Y_test}))

        return parameters

#开始时间
start_time = time.time()
#开始训练
parameters = model(X_train, Y_train, X_test, Y_test)
#结束时间
end_time = time.time()
#计算时差
#print("CPU的执行时间 = " + str(end_time - start_time) + " 秒" )

#测试自己的图片

my_image1 = "1.png"
fileName1 = "datasets/" + my_image1
image1 = mpimg.imread(fileName1)
plt.show()

my_image1 = image1.reshape(1, 64*64*3).T
my_image_prediction = tf_utils.predict(my_image1,parameters)
print("预测结果:Y="+ str(np.squeeze(my_image_prediction)))

tf_utils.py

#tf_utils.py

import h5py
import numpy as np
import tensorflow as tf
import math

def load_dataset():
    train_dataset = h5py.File('datasets/train_signs.h5', "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
    train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels

    test_dataset = h5py.File('datasets/test_signs.h5', "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
    test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels

    classes = np.array(test_dataset["list_classes"][:]) # the list of classes

    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))

    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes


def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
    """
    Creates a list of random minibatches from (X, Y)

    Arguments:
    X -- input data, of shape (input size, number of examples)
    Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
    mini_batch_size - size of the mini-batches, integer
    seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.

    Returns:
    mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
    """

    m = X.shape[1]                  # number of training examples
    mini_batches = []
    np.random.seed(seed)

    # Step 1: Shuffle (X, Y)
    permutation = list(np.random.permutation(m))
    shuffled_X = X[:, permutation]
    shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))

    # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
    num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
    for k in range(0, num_complete_minibatches):
        mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
        mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

    # Handling the end case (last mini-batch < mini_batch_size)
    if m % mini_batch_size != 0:
        mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]
        mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

    return mini_batches

def convert_to_one_hot(Y, C):
    Y = np.eye(C)[Y.reshape(-1)].T
    return Y


def predict(X, parameters):

    W1 = tf.convert_to_tensor(parameters["W1"])
    b1 = tf.convert_to_tensor(parameters["b1"])
    W2 = tf.convert_to_tensor(parameters["W2"])
    b2 = tf.convert_to_tensor(parameters["b2"])
    W3 = tf.convert_to_tensor(parameters["W3"])
    b3 = tf.convert_to_tensor(parameters["b3"])

    params = {"W1": W1,
              "b1": b1,
              "W2": W2,
              "b2": b2,
              "W3": W3,
              "b3": b3}

    x = tf.placeholder("float", [12288, 1])

    z3 = forward_propagation_for_predict(x, params)
    p = tf.argmax(z3)

    sess = tf.Session()
    prediction = sess.run(p, feed_dict = {x: X})

    return prediction

def forward_propagation_for_predict(X, parameters):
    """
    Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX

    Arguments:
    X -- input dataset placeholder, of shape (input size, number of examples)
    parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
                  the shapes are given in initialize_parameters
    Returns:
    Z3 -- the output of the last LINEAR unit
    """

    # Retrieve the parameters from the dictionary "parameters"
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']
    W3 = parameters['W3']
    b3 = parameters['b3']
                                                           # Numpy Equivalents:
    Z1 = tf.add(tf.matmul(W1, X), b1)                      # Z1 = np.dot(W1, X) + b1
    A1 = tf.nn.relu(Z1)                                    # A1 = relu(Z1)
    Z2 = tf.add(tf.matmul(W2, A1), b2)                     # Z2 = np.dot(W2, a1) + b2
    A2 = tf.nn.relu(Z2)                                    # A2 = relu(Z2)
    Z3 = tf.add(tf.matmul(W3, A2), b3)                     # Z3 = np.dot(W3,Z2) + b3

    return Z3
最后有个实践部分。。。。 enmm!~~ 看原博客吧
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值