使用TensorFlow 2.6.0版本改写TensorFlow 1的代码,使用TF2兼容TF1的API。
1 - Exploring the Tensorflow Library
1.1 导入相关库
import math
import h5py
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import convert_to_one_hot, load_dataset, predict, random_mini_batches
%matplotlib inline
np.random.seed(1)
1.2 计算 Loss
# 禁用 eager execution 以使用 TensorFlow 1.x API 风格
tf.compat.v1.disable_eager_execution()
# 定义变量
y_hat = tf.constant(36, name="y_hat")
y = tf.constant(39, name="y")
# 计算损失
loss = tf.Variable((y - y_hat) ** 2, name='loss')
# 初始化变量
init = tf.compat.v1.global_variables_initializer()
# 创建会话
with tf.compat.v1.Session() as session:
# 运行初始化操作
session.run(init)
# 计算损失值
loss_value = session.run(loss)
# 打印损失值
print(loss_value)
# Output输出
# 9
1.3 初始化变量
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a, b)
print(c)
# Output 输出
# Tensor("Mul:0", shape=(), dtype=int32)
1.4 运行Session
# 使用tf.compat.v1.Session()来运行静态图
with tf.compat.v1.Session() as sess:
# 由于tf.function已经将操作包装成了一个图,我们可以直接运行它
result = sess.run(c)
print(result)
# Output 输出
# 20
1.5 变量占位并赋值
# Change the value of x in the feed_dict
x = tf.compat.v1.placeholder(tf.int64, name='x')
with tf.compat.v1.Session() as sess:
# 运行图并获取结果
print(sess.run(2 * x, feed_dict={x: 3}))
# Output 输出
# 6
1.6 Linear function 线性函数
# GRADED FUNCTION: linear_function
def linear_function():
"""
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
### START CODE HERE ### (4 lines of code)
X = tf.constant(np.random.randn(3, 1), name="X")
W = tf.constant(np.random.randn(4, 3), name="W")
b = tf.constant(np.random.randn(4, 1), name="b")
Y = tf.add(tf.matmul(W, X), b)
### END CODE HERE ###
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
### START CODE HERE ###
sess = tf.compat.v1.Session()
result = sess.run(Y)
### END CODE HERE ###
# close the session
sess.close()
return result
print("result = " + str(linear_function()))
# Output 输出
'''
result = [[-2.15657382]
[ 2.95891446]
[-1.08926781]
[-0.84538042]]
'''
1.7 计算 Sigmoid
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
x = tf.compat.v1.placeholder(tf.float32, name="x")
sigmoid = tf.sigmoid(x)
with tf.compat.v1.Session() as sess:
# Initialize global variables
result = sess.run(sigmoid, feed_dict={x: z})
return result
print("sigmoid(0) = " + str(sigmoid(0)))
print("sigmoid(12) = " + str(sigmoid(12)))
# Output 输出
'''
sigmoid(0) = 0.5
sigmoid(12) = 0.9999939
'''
1.8 计算成本Cost
# GRADED FUNCTION: cost
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.compat.v1.placeholder(tf.float32, name="z")
y = tf.compat.v1.placeholder(tf.float32, name="y")
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)
# Create a session (approx. 1 line). See method 1 above.
with tf.compat.v1.Session() as sess:
# Initialize variables
sess.run(tf.compat.v1.global_variables_initializer())
# Run the session (approx. 1 line).
cost_value = sess.run(cost, feed_dict={z: logits, y: labels})
### END CODE HERE ###
return cost_value
cost = cost(logits, labels)
print("cost = " + str(cost))
# Output 输出
# cost = [1.0053872 1.0366409 0.41385433 0.39956614]
1.9 独热编码 One Hot Encoding
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
### START CODE HERE ###
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant(value=C, name="C")
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot(labels, C, axis=0)
with tf.compat.v1.Session() as sess:
# Initialize variables
one_hot = sess.run(one_hot_matrix)
### END CODE HERE ###
return one_hot
labels = np.array([1, 2, 3, 0, 2, 1])
one_hot = one_hot_matrix(labels, C=4)
print("one_hot = " + str(one_hot))
# Output 输出
'''
one_hot = [[0. 0. 0. 1. 0. 0.]
[1. 0. 0. 0. 0. 1.]
[0. 1. 0. 0. 1. 0.]
[0. 0. 1. 0. 0. 0.]]
'''
1.10 初始化向量
# GRADED FUNCTION: ones
def ones(shape):
"""
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
"""
### START CODE HERE ###
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape)
with tf.compat.v1.Session() as sess:
# Run the session (approx. 1 line).
ones = sess.run(ones)
### END CODE HERE ###
return ones
print("ones = " + str(ones([3])))
# Output 输出
# ones = [1. 1. 1.]
2 - Building your first neural network in tensorflow
2.1 读取数据
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Example of a picture
index = 11
plt.imshow(X_train_orig[index])
print("y = " + str(np.squeeze(Y_train_orig[:, index])))
# Output 输出
# y = 1
2.2 数据预处理
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten / 255.0
X_test = X_test_flatten / 255.0
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
print("number of training examples = " + str(X_train.shape[1]))
print("number of test examples = " + str(X_test.shape[1]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))
# Output 输出
'''
number of training examples = 1080
number of test examples = 120
X_train shape: (12288, 1080)
Y_train shape: (6, 1080)
X_test shape: (12288, 120)
Y_test shape: (6, 120)
'''
2.3 创建占位变量 Placeholders
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_x, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
"""
### START CODE HERE ### (approx. 2 lines)
X = tf.compat.v1.placeholder(tf.float32, shape=[n_x, None], name='X')
Y = tf.compat.v1.placeholder(tf.float32, shape=[n_y, None], name='Y')
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(12288, 6)
print("X = " + str(X))
print("Y = " + str(Y))
# Output 输出
'''
X = Tensor("X_5:0", shape=(12288, None), dtype=float32)
Y = Tensor("Y_2:0", shape=(6, None), dtype=float32)
'''
2.4 初始化参数
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.random.set_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.compat.v1.get_variable("W1",[25,12288],initializer = tf.initializers.GlorotUniform(seed=1))
b1 = tf.compat.v1.get_variable("b1",[25,1],initializer=tf.zeros_initializer())
W2 = tf.compat.v1.get_variable("W2", [12, 25], initializer = tf.initializers.GlorotUniform(seed=1))
b2 = tf.compat.v1.get_variable("b2", [12, 1], initializer = tf.zeros_initializer())
W3 = tf.compat.v1.get_variable("W3", [6, 12], initializer = tf.initializers.GlorotUniform(seed=1))
b3 = tf.compat.v1.get_variable("b3", [6, 1], initializer = tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3}
return parameters
ops.reset_default_graph()
with tf.compat.v1.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# Output 输出
'''
W1 = <tf.Variable 'W1:0' shape=(25, 12288) dtype=float32>
b1 = <tf.Variable 'b1:0' shape=(25, 1) dtype=float32>
W2 = <tf.Variable 'W2:0' shape=(12, 25) dtype=float32>
b2 = <tf.Variable 'b2:0' shape=(12, 1) dtype=float32>
'''
2.5 前向传播
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
### END CODE HERE ###
return Z3
ops.reset_default_graph()
with tf.compat.v1.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
print("Z3 = " + str(Z3))
# Output 输出
# Z3 = Tensor("Add_2:0", shape=(6, None), dtype=float32)
2.6 计算成本 Cost
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
)
### END CODE HERE ###
return cost
ops.reset_default_graph()
with tf.compat.v1.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
print("cost = " + str(cost))
# Output 输出
# cost = Tensor("Mean:0", shape=(), dtype=float32)
2.7 构建模型
def model(
X_train,
Y_train,
X_test,
Y_test,
learning_rate=0.0001,
num_epochs=1500,
minibatch_size=32,
print_cost=True,
):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
#ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
ops.reset_default_graph()
tf.random.set_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
#print(parameters)
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.compat.v1.global_variables_initializer()
#print(init)
# Start the session to compute the tensorflow graph
with tf.compat.v1.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0.0 # Defines a cost related to an epoch
num_minibatches = int(
m / minibatch_size
) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_, minibatch_cost = sess.run(
[optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}
)
### END CODE HERE ###
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel("cost")
plt.xlabel("iterations (per tens)")
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
parameters = model(X_train, Y_train, X_test, Y_test, minibatch_size=64)
# Output 输出
'''
Cost after epoch 0: 1.969848
Cost after epoch 100: 0.945937
Cost after epoch 200: 0.701537
Cost after epoch 300: 0.575044
Cost after epoch 400: 0.486772
Cost after epoch 500: 0.422395
Cost after epoch 600: 0.355608
Cost after epoch 700: 0.304337
Cost after epoch 800: 0.253242
Cost after epoch 900: 0.204498
Cost after epoch 1000: 0.166818
Cost after epoch 1100: 0.132297
Cost after epoch 1200: 0.098340
Cost after epoch 1300: 0.076905
Cost after epoch 1400: 0.057851
Parameters have been trained!
Train Accuracy: 0.99722224
Test Accuracy: 0.80833334
'''
2.8 测试自己的图片
import scipy
from PIL import Image
import imageio.v2 as im
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "thumbs_up.jpg"
## END CODE HERE ##
# We preprocess your image to fit your algorithm.
fname = "images/" + my_image
image = np.array(np.array(im.imread(fname)))
#my_image = scipy.misc.imresize(image, size=(64, 64)).reshape((1, 64 * 64 * 3)).T
my_image = np.array(Image.fromarray(image).resize((64, 64))).reshape((1, 64 * 64 * 3)).T
my_image_prediction = predict(my_image, parameters)
plt.imshow(image)
print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
# Output 输出
# Your algorithm predicts: y = 3