吴恩达作业(4)tensorflow搭建三层神经网络以及应用--识别手势

题目
输入手势图片,判断手势数字。如:
在这里插入图片描述

输出数字 5 5 5

数据集

  1. test_signs.h5:
    训练集,维度为:(1080,64,64,3),即1080张64*64的彩色图片
  2. train_signs.h5:
    测试集,维度为:(120,64,64,3),即120张64*64的彩色图片

参考博客

加载数据集

#加载和处理数据
def load_data():
    #获取原始训练集数据
    train_data_org = h5py.File('train_signs.h5')
    test_data_org = h5py.File('test_signs.h5')
    
    #h5文件类似字典类型,通过key去获取value
    for key in train_data_org.keys():
        print(key)
    '''
    输出:
    list_classes
    train_set_x
    train_set_y
    '''
    
    #获取训练集数据
    train_x_org = train_data_org["train_set_x"]
    train_y_org = train_data_org["train_set_y"]
    
    #获取测试集数据
    test_x_org = test_data_org["test_set_x"]
    test_y_org = test_data_org["test_set_y"]
    
    #维度和样本数量
    #feature维度
    n = np.shape(train_x_org)[1]*np.shape(train_x_org)[2]*np.shape(train_x_org)[3]
    #训练集样本数
    m_train = np.shape(train_x_org)[0]
    #测试集样本数
    m_test = np.shape(test_x_org)[0]
    
    #将标签平坦成一维数组
    train_y = np.array(train_y_org).reshape(1,m_train)
    test_y = np.array(test_y_org).reshape(1,m_test)
    
    #x数据集是64x*64*3*m的图片数组,将数据集转成12288*m的图片数组
    train_x = np.array(train_x_org).reshape(m_train,-1).T
    #reshape(n,m_train)
    test_x = np.array(test_x_org).reshape(m_test, -1).T
    #reshape(n,m_test)
    
    return train_x_org,test_x_org,train_x,test_x,train_y,test_y

基本函数定义

为x和y创建占位符

为传入的数据参数 t r a i n _ x , t e s t _ x , t r a i n _ y , t e s t _ y train\_x,test\_x,train\_y,test\_y train_x,test_x,train_y,test_y创建占位符placeholder,为需要梯度下降的参数创建变量 V a r i a b l e Variable Variable

'''
n_x:输入数据的维度,即feature数
n_y:输出数据的维度,即C
'''
def create_placeholders(n_x,n_y):
    x = tf.compat.v1.placeholder(tf.float32,[n_x,None],name='x')
    y = tf.compat.v1.placeholder(tf.float32,[n_y,None],name='y')
    return x,y

将标签集转换成one-hot编码

def convert_to_one_hot(Y, C):
    Y = np.eye(C)[Y.reshape(-1)].T
    return Y

参数初始化

这里创建的是[12288,25,12,6]的三层神经网络
x − > R e L U − > R e L U − > s o f t m a x − > y x->ReLU->ReLU->softmax->y x>ReLU>ReLU>softmax>y

def init_parameters():
    #设置随机数种子
    tf.random.set_seed(1)
    #采用he初始化
    initializer = tf.keras.initializers.glorot_normal()
    #定义变量,权重和偏置  神经网络结构:[12288,25,12,6]
    w1 = tf.Variable(initializer([25,12288]))
    b1 = tf.Variable(initializer([25,1]))
    w2 = tf.Variable(initializer([12,25]))
    b2 = tf.Variable(initializer([12,1]))
    w3 = tf.Variable(initializer([6,12]))
    b3 = tf.Variable(initializer([6,1]))
    
    parameters = {'w1':w1,
                  'b1':b1,
                  'w2':w2,
                  'b2':b2,
                  'w3':w3,
                  'b3':b3
                 }
    return parameters

前向传播

'''
x:输入数据
parameters:权重w和偏置b
'''
def forward_propagation(x,parameters):
    w1 = parameters['w1']
    b1 = parameters['b1']
    w2 = parameters['w2']
    b2 = parameters['b2']
    w3 = parameters['w3']
    b3 = parameters['b3']
    
    z1 = tf.matmul(w1,x)+b1
    a1 = tf.nn.relu(z1)
    
    z2 = tf.matmul(w2,a1)+b2
    a2 = tf.nn.relu(z2)
    
    #把z3作为损失函数的输入,不需要a3【TensorFlow中最后的线性输出层的输出作为计算损失函数的输入】
    z3 = tf.matmul(w3,a2)+b3
    
    return z3

代价函数

def costfunction(z3,y):
    #转置
    z3 = tf.transpose(z3)
    y = tf.transpose(y)
    
    
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=z3,labels=y))
    
    return cost

小批量梯度下降数据集划分

'''
小批量梯度下降
'''
def mini_batch(x,y,mini_batch_size):
    np.random.seed(0)
    #样本数
    m = x.shape[1]
    
    mini_batches = []
    
    #获取打乱的下标,打乱x和y
    index = np.array(np.random.permutation(m))
    shuffled_x = x[:,index]
    shuffled_y = y[:,index]
    
    #分割数据集
    mini_batch_num = m//mini_batch_size
    for k in range(mini_batch_num):
        x_ = shuffled_x[:,k*mini_batch_size:(k+1)*mini_batch_size]
        y_ = shuffled_y[:,k*mini_batch_size:(k+1)*mini_batch_size]
        
        #加入到列表中
        mini_batches.append([x_,y_])
    
    #若不能等分,还有剩余的
    if m%mini_batch_size !=0:
        x_ = shuffled_x[:,mini_batch_num*mini_batch_size:]
        y_ = shuffled_y[:,mini_batch_num*mini_batch_size:]
        
        mini_batches.append([x_,y_])
    
    return mini_batches

反向传播

使用tensorflow的优化器来进行反向传播和梯度下降

#最小化代价函数,即找到合适的Variable最小化代价函数
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#执行优化器,即执行一次反向反向传播和梯度下降;使用_来接收不想要的值
_,mini_batch_cost = sess.run([optimizer,cost],feed_dict={x:mini_x,y:mini_y})

模型整合

def nn(X_train,Y_train,X_test,Y_test,learning_rate=0.001,epochs=1500,mini_batch_size=32):
    
    n_x,m = X_train.shape
    n_y = Y_train.shape[0]
    
    costs = []
    
    #创建占位符
    x,y = create_placeholders(n_x,n_y)
    
    #初始化参数
    parameters = init_parameters()
    
    #前向传播
    z3 = forward_propagation(x,parameters)
    
    #
    cost = costfunction(z3,y)
    
    #反向传播
    optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
    
    #初始化所有变量
    inti = tf.compat.v1.global_variables_initializer()
    #创建会话
    sess = tf.compat.v1.Session()
    
    with tf.compat.v1.Session() as sess:
        sess.run(inti)

        for epoch in range(epochs):
        		#每次mini_batch的代价函数值
            epoch_cost = 0
					#可以划分多少个mini_batc
            mini_batch_num = m//mini_batch_size
					#划分mini_batch
            mini_batchs = mini_batch(X_train,Y_train,mini_batch_size)
					#对每个mini_batch进行反向传播和梯度下降
            for mini in mini_batchs:
                [mini_x,mini_y] = mini

                _,mini_batch_cost = sess.run([optimizer,cost],feed_dict={x:mini_x,y:mini_y})
						#相当于把每个mini_btach的代价函数值相加再除于mini_btach的数量,就是在算平均值
                epoch_cost = epoch_cost + mini_batch_cost/mini_batch_num

            if epoch%5 == 0:
                costs.append(epoch_cost)
                if epoch%100 == 0:
                    print("epoch = "+str(epoch)+"  epoch_cost = "+str(epoch_cost))
        plt.plot(costs)
        plt.ylabel('cost')
        plt.xlabel('epoch')
        plt.show()

        #保存参数到seseeion
        parameters = sess.run(parameters)

        #获取预测正确的样本下标,argmax函数找到最大值所在的下标
        correct_prediction = tf.equal(tf.argmax(z3),tf.argmax(y))

#正确率
        accuracy = tf.compat.v1.reduce_mean(tf.cast(correct_prediction,"float"))
		#accuracy.eval(feed_dict,Session)评估一个值,即进行计算;
        print("训练集的准确率:", accuracy.eval({x: X_train, y: Y_train}))
        print("测试集的准确率:", accuracy.eval({x: X_test, y: Y_test}))
    
    return parameters

加载数据与数据处理

x x x归一化和 y y y转成one-hot编码

train_x_org,test_x_org,train_x,test_x,train_y,test_y = load_data()
#plt.imshow(train_x_org[6])

#归一化
train_x,test_x = train_x/255,test_x/255
train_y,test_y = convert_to_one_hot(train_y,6),convert_to_one_hot(test_y,6)
print('原始训练集数据维度:',train_x_org.shape)
print('训练集数据维度:',train_x.shape)

执行模型

start_time = time.perf_counter()
parameters = nn(train_x,train_y,test_x,test_y,learning_rate=0.0001,epochs=1500,mini_batch_size=32)
end_time = time.perf_counter()
print("CPU的执行时间 = " + str(end_time - start_time) + " 秒" )

执行结果

list_classes
train_set_x
train_set_y
原始训练集数据维度: (1080, 64, 64, 3)
训练集数据维度: (12288, 1080)
epoch = 0  epoch_cost = 1.8585812756509497
epoch = 100  epoch_cost = 0.7151738224607526
epoch = 200  epoch_cost = 0.40799333019690076
epoch = 300  epoch_cost = 0.25242444647080975
epoch = 400  epoch_cost = 0.1505744911052964
epoch = 500  epoch_cost = 0.094912569292567
epoch = 600  epoch_cost = 0.10143834719377935
epoch = 700  epoch_cost = 0.05500200864943591
epoch = 800  epoch_cost = 0.030319687966821773
epoch = 900  epoch_cost = 0.016815169530948908
epoch = 1000  epoch_cost = 0.0380156317330671
epoch = 1100  epoch_cost = 0.008082679885608906
epoch = 1200  epoch_cost = 0.0040393410758538684
epoch = 1300  epoch_cost = 0.0025025757689339416
epoch = 1400  epoch_cost = 0.0013717231517093198

在这里插入图片描述

训练集的准确率: 1.0
测试集的准确率: 0.8333333
CPU的执行时间 = 264.22956630000044

函数预测

#其实就是跑一下前向传播和选择最大值所在下标
def predict(X,parameters):
    
    x = tf.compat.v1.placeholder("float", [12288, 1])
    
    z3 = forward_propagation(x, parameters)
    p = tf.argmax(z3)
    
    sess = tf.compat.v1.Session()
    prediction = sess.run(p, feed_dict = {x: X})
        
    return prediction

总结

  • 为传入的数据参数 t r a i n _ x , t e s t _ x , t r a i n _ y , t e s t _ y train\_x,test\_x,train\_y,test\_y train_x,test_x,train_y,test_y创建占位符placeholder,为需要梯度下降的参数创建变量 V a r i a b l e Variable Variable
  • TensorFlow中最后的线性输出层的输出作为计算损失函数的输入
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值