Tensorflow标准5层神经网络的人脸分类识别

Tensorflow标准5层神经网络的人脸分类识别


神经网络模型流程图:

a

源码如下:

 
import numpy as np
import tensorflow as tf
from scipy.misc import imread
import os
#加载训练图片,M和G的头像照片各5张,像素已全部处理为300*400
def initImageData():
    dirpath = "E:/data/cat/"
    y = np.empty([10, 2])
    x = np.empty([10, 400, 300, 3])
    index = 0
    for file in os.listdir(dirpath):
        if file.startswith("r") :
            y[index, 0] = 0
            y[index, 1] = 1
            x[index, :, :, :] = imread(dirpath + file)
        elif file.startswith("c") :
            y[index, 0] = 1
            y[index, 1] = 0
            x[index, :, :, :] = imread(dirpath + file)
        index += 1
    return x/255, y
#根据shape随机初始化参数w
def w_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
    return tf.Variable(initial)
#根据shape初始偏执量b
def b_variable(shape):
    initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
    return tf.Variable(initial)
#训练神经网络
def runTensorFlow(X,y):
    m = X.shape[0]
    grap = tf.get_default_graph()
    #初始化第一层的卷积层滤波器(filter-num=6,f=5,s=2)
    filter_w_1 = w_variable([5, 5, 3, 6])
    #初始化第一层的卷积层偏移量
    filter_b_1 = b_variable([6])
    #初始化第二层的卷积层滤波器(filter-num=16,f=5,s=2)
    filter_w_2 = w_variable([5, 5, 6, 16])
    #初始化第二层的卷积层偏移量
    filter_b_2 = b_variable([16])
    f_w_1 = w_variable([6528, 1200])
    f_b_1 = b_variable([1, 1200])
    f_w_2 = w_variable([1200, 600])
    f_b_2 = b_variable([1, 600])
    f_w_3 = w_variable([600, 2])
    f_b_3 = b_variable([1, 2])
    x = tf.placeholder(tf.float32, [None, 400, 300, 3], name="x")
    z_conv_1 = tf.add(tf.nn.conv2d(x, filter_w_1, [1, 2, 2, 1], padding="VALID"), filter_b_1)
    #第一层的卷积层输出
    a_conv_1 = tf.nn.relu(z_conv_1, name="conv1")
    #第一层的池化输出(f=2,s=2),第一个数组为ksize,第二个为step-long
    pool_1 = tf.nn.max_pool(a_conv_1, [1, 2, 2, 1], [1, 2, 2, 1], padding="VALID", name="pool1")
    z_conv_2 = tf.add(tf.nn.conv2d(pool_1, filter_w_2, [1, 2, 2, 1], padding="VALID"), filter_b_2)
    #第二层的卷积层输出
    a_conv_2 = tf.nn.relu(z_conv_2, name="conv2")
    #第二层的池化输出(f=2,s=2),第一个数组为ksize,第二个为step-long
    pool_2 = tf.nn.max_pool(a_conv_2, [1, 2, 2, 1], [1, 2, 2, 1], padding="VALID", name="pool2")
    #全连接输入
    f_input = tf.reshape(pool_2, [-1, 6528])
    #全连接第一层输出
    f_a_1 =  tf.nn.relu(tf.matmul(f_input, f_w_1) + f_b_1, name="fa1")
    #全连接第二层输出
    f_a_2 =  tf.nn.relu(tf.matmul(f_a_1, f_w_2) + f_b_2, name="fa2")
    #最终输出层
    z = tf.matmul(f_a_2, f_w_3) + f_b_3
    p = tf.nn.softmax(z, name="prediction")
    grap.add_to_collection(name="prediction", value = p)
    #softmax损失函数
    #loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(p),reduction_indices=[1]), name="loss")
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=z, name="loss"))
    grap.add_to_collection(name="loss", value=loss)
    #使用Adam的优化方式优化梯度下降
    optimizer = tf.train.AdamOptimizer(0.001).minimize(loss)
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    with tf.Session() as session:
        session.run(init)
        #打印损失函数值,便于查看训练效果
        print(session.run(loss, feed_dict={x : X}))
        for i in range(200):
            _, loss_ = session.run([optimizer, loss], feed_dict={x: X})
            print(loss_)
        #保存训练的model
        #saver.save(session, "E:/data/cat-model/model_test")
        #打印训练后的损失函数值
        print(session.run(loss, feed_dict={x: X}))
        #打印训练集的预测结果
        print(np.argmax(session.run(p, feed_dict={x: X}), axis=1))
#加载保存的模型并对新输入进行预测
def restoreAndPredict(X):
    with tf.Session() as session:
        saver = tf.train.import_meta_graph('E:/data/cat-model/model_test.meta')
        saver.restore(session, tf.train.latest_checkpoint('E:/data/cat-model/'))
        graph = tf.get_default_graph()
        x = graph.get_tensor_by_name('x:0')
        p = graph.get_collection("prediction")
        print(np.argmax(session.run(p, feed_dict={x:X})[0], axis=1))
x, y = initImageData()
runTensorFlow(x, y)
#x_test = np.empty([1, 400, 300, 3])
#交叉验证集的一个数据
#x_test[0, :, :, :] = imread("E:/data/cat/t_r.JPG")
#restoreAndPredict(x_test)


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值