tensorflow基于卷积神经网络实现表情识别分类

tensorflow基于卷积神经网络实现表情识别分类

一、模型一

from __future__ import division, print_function, absolute_import
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression
import tflearn
import tensorflow as tf
from PIL import Image
import numpy as np
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from sklearn.model_selection import train_test_split
# from sklearn.cross_validation import train_test_split
from tflearn.layers.normalization import batch_normalization
from tflearn.layers.merge_ops import merge

#n = 5
#X = np.load("C:/Users/ivylab/Desktop/FFF/image1.npy")/255
#Y = np.load("C:/Users/ivylab/Desktop/FFF/labels1.npy")
X = np.load(r"D:\FER2013\aaa\ffff\8-8/fer-mouth.npy")/255
Y = np.load(r"D:\FER2013\aaa\ffff\8-8/fer-mouth-label.npy")
X = X[:, :, :, np.newaxis]
# print(labels)
#X, X0, Y, Y0 = train_test_split(image / 255, labels, test_size=0)
#X = X[:, :, :, np.newaxis]


def net():
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    
#    img_aug = tflearn.ImageAugmentation()
#    img_aug.add_random_flip_leftright()
#    img_aug.add_random_blur()
#    img_aug.add_random_rotation(5.0)

    network = input_data(shape=[None, 48, 48, 1])
#    network = conv_2d(network, 64, 3, activation='relu')
#    network = max_pool_2d(network, 3, strides=2)
#    network = batch_normalization(network)

    # conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2')
    # pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
    # pool1_3_3 = local_response_normalization(pool1_3_3)
    # conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce')
    # conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3')
    # conv2_3_3 = local_response_normalization(conv2_3_3)
    # pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')

    # 3a
#    inception_3a_1_1 = conv_2d(network, 64, 1, activation='relu')
##    inception_3a_1_1 = batch_normalization(inception_3a_1_1 )
#    inception_3a_3_3_reduce = conv_2d(network, 96, 1, activation='relu', name='inception_3a_3_3_reduce')
##    inception_3a_3_3_reduce = batch_normalization(inception_3a_3_3_reduce )
#    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128, filter_size=3, activation='relu')
#    inception_3a_5_5_reduce = conv_2d(network, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce')
#    inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5')
#    inception_3a_pool = max_pool_2d(network, kernel_size=3, strides=1)
##    inception_3a_3_3_reduce = batch_normalization(inception_3a_pool )
#    inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu')
#    inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1],
#                                mode='concat', axis=3)
#    network1 = max_pool_2d(inception_3a_output, kernel_size=3, strides=2, name='pool3_3_3')
#    network1 = batch_normalization( network1)
#    # 3b
#    inception_3b_1_1 = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1')
#    inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu',
#                                      name='inception_3b_3_3_reduce')
#    inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3')
#    inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu',
#                                      name='inception_3b_5_5_reduce')
#    inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name='inception_3b_5_5')
#    inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool')
#    inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1, activation='relu',
#                                    name='inception_3b_pool_1_1')
#    inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1],
#                                mode='concat', axis=3, name='inception_3b_output')
#    network1 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')



#    network = fully_connected(network1, 2048, activation='tanh')
#    network = dropout(network, 0.5)
#    network = fully_connected(network, 1024, activation='tanh')
#    network = dropout(network, 0.5)
#    #    network = fully_connected(network, 2, activation='softmax',restore=False)
#    network = fully_connected(network, 2, activation='softmax')



    network = conv_2d(network, 64, 3,activation='relu' ) #22*22*32
   
    network = max_pool_2d(network, 3, strides=2)    #11*11*32
    network = batch_normalization(network)
    network = conv_2d(network, 96, 3, activation='relu') #11*11*96
    
    network = max_pool_2d(network, 3, strides=2)    # 5*5*96
    network = batch_normalization(network)
#    network = conv_2d(network, 128, 3, activation='relu')
#    network = conv_2d(network, 96, 5, activation='relu')
#    network = fully_connected(network, 256, activation='tanh' )
#    network = dropout(network, 0.5)
    network = conv_2d(network, 96, 1, activation='relu') #11*11*96
#    network = fully_connected(network, 1024, activation='tanh' )
    network = dropout(network, 0.5)
#    network = fully_connected(network, 2, activation='softmax')
    network = fully_connected(network, 2, activation='softmax',restore=False)
#    network = regression(network, optimizer=tflearn.optimizers.Momentum(lr_decay=0.96),
#                         loss='categorical_crossentropy',
#                         learning_rate=0.001)

    
#    network = max_pool_2d(network, 3, strides=2)    # 5*5*96
#    network = batch_normalization(network)
    #    network = regression(network,optimizer='momentum',loss='categorical_crossentropy',learning_rate=0.001)
    #    network = regression(network,optimizer=tflearn.optimizers.Momentum(lr_decay=0.96),loss='categorical_crossentropy',learning_rate=0.0001)
    network = regression(network, optimizer=tflearn.optimizers.Momentum(lr_decay=0.96, decay_step=2000),
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    return network



# Training
max_test_acu = 0
test_all = []
train_all = []
ss = []

for i in range(4):
    prediction = []
    test_X = X[1000 * i:1000 * i + 1000, :, :, :]
    np.save(r'D:\FER2013\aaa\ffff\8-8/' + "test" + str(i) + "-" + "img", test_X)
    test_Y = Y[1000 * i:1000 * i + 1000, :]
    np.save(r'D:\FER2013\aaa\ffff\8-8/' + "test" + str(i) + "-" + "label", test_Y)
    train_X = np.delete(X, range(1000 * i, 1000 * i + 1000), axis=0)
    np.save(r'D:\FER2013\aaa\ffff\8-8/' + "train" + str(i) + "-" + "img", train_X)
    train_Y = np.delete(Y, range(1000 * i, 1000 * i + 1000), axis=0)
    np.save(r'D:\FER2013\aaa\ffff\8-8/' + "train" + str(i) + "-" + "label", train_Y)
    test_acu = []
    train_acu = []
    tf.reset_default_graph()
    network = net()
    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0)
#    model.load("D:/train/fei/yu_Model.tfl")
    for j in range(100):
        print("********")
        print(i)
        model.fit(train_X, train_Y, n_epoch=1, shuffle=True,
                  show_metric=True, batch_size=8, snapshot_step=1000,
                  snapshot_epoch=False, run_id='alexnet_fer2013')
        acu_train = model.evaluate(train_X, train_Y)
        acu_test = model.evaluate(test_X, test_Y)
        train_acu.append(acu_train[0])
        test_acu.append(acu_test[0])
        #        for index in range(len(test_X)):
        #            test_X1=test_X[index]
        #            test_X2=test_X1.reshape(1,48,48,1)
        #            a=model.predict(test_X2)
        #            prediction.append(a)
        #            prediction1=np.array(prediction)
        #            np.save('D:/toupiao'+str(i)+"-prediction1.npy",prediction1)
        ##                for index in range(len(test_X)):
        ##            test_X=test_X[index:index+1,:,:,:]
        #            test_X1=test_X[index]
        ##            test_X1=test_X[0]
        #            test_X2=test_X1.reshape(1,48,48,1)
        #            a=model.predict(test_X2)
        #
        #
        #            prediction.append(a)
        if acu_test[0] > max_test_acu:
            max_test_acu = acu_test[0]
            model.save(r'D:\FER2013\aaa\ffff\8-8/' + str(j) + "-" + str(i) + "new_model.tfl")
            for index in range(len(test_X)):
                test_X1 = test_X[index]
                test_X2 = test_X1.reshape(1, 48, 48, 1)
                a = model.predict(test_X2)
                prediction.append(a)
                prediction1 = np.array(prediction)
                np.save(r'D:\FER2013\aaa\ffff\8-8/' + str(i) + "-prediction1.npy", prediction1)
        else:
            continue
            #    for index in range(len(test_X)):
            #        test_X1=test_X[index]
            #        test_X2=test_X1.reshape(1,48,48,1)
            #        a=model.predict(test_X2)
            #        prediction.append(a)
            #        prediction1=np.array(prediction)
            #        np.save('D:/tou_piao/'+str(i)+"-prediction1.npy",prediction1)

            #    if(acu_test>max_test_acu):
            #        model.save("D:/train/aa/"+str(i)+"-new_model.tfl")

    test_all.append(test_acu)
    train_all.append(train_acu)
aaa = np.array(test_all).reshape(4, 100)
bbb = np.array(train_all).reshape(4, 100)
np.save(r'D:\FER2013\aaa\ffff\8-8/test_all.npy', aaa)
np.save(r'D:\FER2013\aaa\ffff\8-8/train_all.npy', bbb)
s = 0
avg = 0
for m in range(4):
    avg = avg + max(aaa[m])
    s = max(aaa[m])
    ss.append(s)
ss = np.array(ss)
np.save(r'D:\FER2013\aaa\ffff\8-8/test.npy', ss)
# np.save('x_shuffle.npy',X)
# np.save('y_shuffle.npy',Y)
print("*********avg")
print(avg / 4)
print("**^^^")
print(ss)
# result_all=np.vstack((result1,result2,result3,result4,result5,result6,result7,result8,result9,result10))
print('fcn_Done!')
model.save(r"D:\FER2013\aaa\ffff\8-8/FCN.tfl")

二、模型二

from __future__ import division, print_function, absolute_import

import tflearn
import tensorflow as tf
from PIL import Image
import numpy as np
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
#from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split  # 大机器
from tflearn.layers.normalization import batch_normalization

#image = np.load("D:/lfw_new_image_48.npy")
#labels = np.load("D:/lfw_new_lable_48.npy")
## print(image)
#print('*****')
## print(labels)
#X, X0, Y, Y0 = train_test_split(image / 255, labels, test_size=0)
#X = X[:, :, :, np.newaxis]
##train_X = X[0:7000, :, :, :]
##train_Y = Y[0:7000, :]
##test_X = X[7000:7750, :, :, :]
##test_Y = Y[7000:7750, :]

#X=np.load("C:/Users/ivylab/Desktop/FFF/image.npy")
##X=X[:,:,:,np.newaxis]
#Y=np.load("C:/Users/ivylab/Desktop/FFF/labels.npy")
X = np.load(r"D:\FER2013\aaa\ffff\8-8/fer-mouth.npy")/255
Y = np.load(r"D:\FER2013\aaa\ffff\8-8/fer-mouth-label.npy")
X = X[:, :, :, np.newaxis]

# Building 'AlexNet'
def net():
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
#
#    img_aug = tflearn.ImageAugmentation()
#    img_aug.add_random_flip_leftright()
##    img_aug.add_random_blur()
#    img_aug.add_random_rotation(2.0)

    network = input_data(shape=[None, 48, 48, 1])
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = batch_normalization(network)
    
    
    network = conv_2d(network, 128, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = batch_normalization(network)
    network = fully_connected(network, 1024, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 1024, activation='tanh')
    network = dropout(network, 0.5)
#    network = fully_connected(network, 2, activation='softmax',restore=False)
    network = fully_connected(network, 2, activation='softmax')
#    network = regression(network,optimizer='momentum',loss='categorical_crossentropy',learning_rate=0.001)
#    network = regression(network,optimizer=tflearn.optimizers.Momentum(lr_decay=0.96),loss='categorical_crossentropy',learning_rate=0.0001)
    network = regression(network,  optimizer=tflearn.optimizers.Momentum(lr_decay=0.96,decay_step=2000),
                     loss='categorical_crossentropy',
                     learning_rate=0.001)
    return network




# Training
max_test_acu = 0
test_all=[] 
train_all=[]
ss=[]

for i in range(4):
    prediction=[]
    test_X=X[1000*i:1000*i+1000,:,:,:]
    np.save('D:/toupiao/'+"test"+str(i)+"-"+"img",test_X)
    test_Y=Y[1000*i:1000*i+1000,:]
    np.save('D:/toupiao/'+"test"+str(i)+"-"+"label",test_Y)
    train_X=np.delete(X,range(1000*i,1000*i+1000),axis=0)
    np.save('D:/toupiao/'+"train"+str(i)+"-"+"img",train_X)
    train_Y=np.delete(Y,range(1000*i,1000*i+1000),axis=0)
    np.save('D:/toupiao/'+"train"+str(i)+"-"+"label",train_Y)
    test_acu = []
    train_acu = []
    tf.reset_default_graph()
    network=net()
    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                    max_checkpoints=1, tensorboard_verbose=0)
#    model.load('D:/train/ee/yu_Model.tfl')
    for j in range(100):
        print("********")
        print(i)
        model.fit(train_X, train_Y, n_epoch=1, shuffle=True,
              show_metric=True, batch_size=8, snapshot_step=1000,
              snapshot_epoch=False, run_id='alexnet_fer2013')
        acu_train = model.evaluate(train_X, train_Y)
        acu_test = model.evaluate(test_X, test_Y)
        train_acu.append(acu_train[0])
        test_acu.append(acu_test[0])
#        for index in range(len(test_X)):
#            test_X1=test_X[index]
#            test_X2=test_X1.reshape(1,48,48,1)
#            a=model.predict(test_X2)
#            prediction.append(a)
#            prediction1=np.array(prediction)
#            np.save('D:/toupiao'+str(i)+"-prediction1.npy",prediction1)
##                for index in range(len(test_X)):
##            test_X=test_X[index:index+1,:,:,:]
#            test_X1=test_X[index]
##            test_X1=test_X[0]
#            test_X2=test_X1.reshape(1,48,48,1)
#            a=model.predict(test_X2)
#
#            
#            prediction.append(a)
        if acu_test[0]>max_test_acu:
            max_test_acu = acu_test[0]
#            model.save("D:/toupiao2/model/"+str(j)+"-"+str(i)+"new_model.tfl")
            for index in range(len(test_X)):
                test_X1=test_X[index]                    
                test_X2=test_X1.reshape(1,48,48,1)
                a=model.predict(test_X2)
                prediction.append(a)
                prediction1=np.array(prediction)
                np.save('D:/tou_piao/'+str(i)+"-prediction1.npy",prediction1)
        else:
            continue
#    for index in range(len(test_X)):
#        test_X1=test_X[index]                    
#        test_X2=test_X1.reshape(1,48,48,1)
#        a=model.predict(test_X2)
#        prediction.append(a)
#        prediction1=np.array(prediction)
#        np.save('D:/tou_piao/'+str(i)+"-prediction1.npy",prediction1)
                    
#    if(acu_test>max_test_acu):
#        model.save("D:/train/aa/"+str(i)+"-new_model.tfl")

    test_all.append(test_acu)
    train_all.append(train_acu)
aaa=np.array(test_all).reshape(4,100) 
bbb=np.array(train_all).reshape(4,100)
np.save('D:/tou_piao/test_all.npy',aaa)
np.save('D:/tou_piao/train_all.npy',bbb)
s=0
avg=0
for m in range(4):
    avg=avg+max(aaa[m])
    s=max(aaa[m])
    ss.append(s)
ss=np.array(ss)
np.save('D:/tou_piao/test.npy',ss)
#np.save('x_shuffle.npy',X)
#np.save('y_shuffle.npy',Y)
print("*********avg")
print(avg/4)
print("**^^^")
print(ss)
#result_all=np.vstack((result1,result2,result3,result4,result5,result6,result7,result8,result9,result10))
print('tenfold_Done!')
#print(np.max(test_acu))
#print("@@@@@@@@@@")
#for index in test_acu:
#    print(index)
#print("********")
#for index in train_acu:
#    print(index)
#
model.save("D:/train/bb/yu_Model.tfl")
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

最笨的羊羊

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值