tensorflow+OpenCV+Dlib实现人脸颜值预测

91 篇文章 1 订阅
5 篇文章 0 订阅

参考:
1、http://blog.csdn.net/wc781708249/article/details/78604740
2、http://blog.csdn.net/xingchenbingbuyu/article/details/52804013


数据下载:http://www.hcii-lab.net/data/SCUT-FBP/EN/introduce.html


小结:其实该方法也可以做人脸识别,只需要数据,稍微改下代码,即可使用!


图片与标签转成pickle文件

# -*- coding: UTF-8 -*-

"""
生成图像数据和标签
数据下载:http://www.hcii-lab.net/data/SCUT-FBP/EN/introduce.html
"""
import csv
import pandas as pd
import numpy as np
import os
import cv2
import glob
from sklearn import preprocessing
from sklearn import decomposition
import dlib

file_path="./Data_Collection/*.jpg"
img_files=glob.glob(file_path)

img_names=[]
imgs=[]

# 1.使用dlib自带的frontal_face_detector作为我们的人脸提取器
detector = dlib.get_frontal_face_detector()

for img_file in img_files:
    img_name = img_file.split('\\')[-1].split('.')[0].split('-')[-1]  # Linux 为'/'
    img_names.append(img_name) # 记录图片名 后续对应标签
    img = cv2.imread(img_file)
    rects = detector(img, 1)
    assert len(rects)==1,print('一张图片上有多个人脸出现')
    face=rects[0]
    [x1, x2, y1, y2] = [face.left(), face.right(), face.top(), face.bottom()]
    img=img[np.max((y1-50,0)):(y2+50),np.max((x1-50,0)):(x2+50)] # 提取人脸图片
    img=cv2.resize(img,(100,100)) # 统一尺寸  尺寸也可以放大点
    # cv2.imshow('dst',img)
    # cv2.waitKey(0)
    img = img.flatten()  # 按行展成一行 即每一行代表一张图片
    imgs.append(img)

imgs=np.array(imgs,np.float32)  # 转成 500x230400
# 数据归一化
normalizer = preprocessing.Normalizer(norm='l2').fit(imgs)
imgs=normalizer.transform(imgs)

# 将使用tensorflow cnn 卷积,不对数据降维

# 提取图像对应的标签
labels_path="./Rating_Collection/Attractiveness label.xlsx"
labels=pd.read_excel(labels_path)
labels=labels._values[:,:2]
labels=dict(labels)

labels2=[]
# 数据与标签对应
[labels2.append(labels[int(img_name)]) for img_name in img_names]
# 现在 imgs 与 labels2 是对应的
labels2=np.array(labels2,np.float32)[:,np.newaxis]

data=np.hstack((imgs,labels2)) # 最后一列为对应的标签

# 保存到文件中
pd.to_pickle(data,'./data.pkl')

加载pkl文件训练模型

# -*- coding: UTF-8 -*-

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim

# 加载数据
data=pd.read_pickle('./data.pkl')

# 分成train_datas 与 test_datas
np.random.shuffle(data) # 随机打乱数据
train_datas=data[0:-50]
test_datas=data[-50:]

# --------------模型----------------
# 参考:http://blog.csdn.net/wc781708249/article/details/78414028
def cifarnet(images,num_classes=10,is_training=False,
             dropout_keep_prob=0.5,
             prediction_fn=slim.softmax,
             scope='CifarNet'):
    """
    Creates a variant of the CifarNet model.
    :param images: 输入图像数据 形状[n,h,w,c]
    :param num_classes: 类别数
    :param is_training: 是否训练 模型训练设置为True,测试、推理设置为False
    :param dropout_keep_prob: droupout保持率
    :param prediction_fn: 输出层的激活函数
    :param scope: 节点名
    :return: 
        net:2D Tensor ,logits (pre-softmax激活)如果num_classes
            是非零整数,或者如果num_classes为0或None输入到逻辑层           
        end_points:从网络组件到相应的字典激活。
    """

    end_points = {}
    # images的shape为[n,100,100,3]
    with tf.variable_scope(scope, 'CifarNet', [images]): # 其中[images]为传入的数据
        net = slim.conv2d(images, 64, [5,5], scope='conv1') # 5x5卷核,输出节点64 默认stride为1  ;shape [n,100,100,64]
        end_points['conv1'] = net
        net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') # 2x2池化核  stride为2  ;shape [n,50,50,64]
        end_points['pool1'] = net
        net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') # 本地响应规范化  一般可以不使用
        net = slim.conv2d(net, 64, [5, 5], scope='conv2') # 5x5卷核,输出节点64 默认stride为1  ;shape [n,50,50,64]
        end_points['conv2'] = net
        net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
        net = slim.max_pool2d(net, [2, 2], 2, scope='pool2') # 2x2池化核  stride为2  ;shape [n,25,25,64]
        end_points['pool2'] = net
        net = slim.flatten(net) # 改变shape ;shape [n,25*25*64]
        end_points['Flatten'] = net
        net = slim.fully_connected(net, 384, scope='fc3') # 全连接层,输出节点384 ;shape [n,384]
        end_points['fc3'] = net
        # net = slim.dropout(net, dropout_keep_prob, is_training=is_training,scope='dropout3')  # droupout 层 ;shape [n,384]    这句竟然报错 下面那句却不报错 不解!
        net = tf.nn.dropout(net, dropout_keep_prob, name='dropout3')
        net = slim.fully_connected(net, 192, scope='fc4') # # 全连接层,输出节点192 ;shape [n,192]
        end_points['fc4'] = net
        if not num_classes:  # num_classes为0、Nnoe,返回的不是最终的输出层,
            return net, end_points
        logits = slim.fully_connected(net, num_classes,
                                      biases_initializer=tf.zeros_initializer(),
                                      weights_initializer=tf.truncated_normal_initializer(1 / 192.0),
                                      weights_regularizer=None,
                                      activation_fn=None,
                                      scope='logits') # 输出层 ;shape [n,num_classes]

        end_points['Logits'] = logits
        end_points['Predictions'] = prediction_fn(logits, scope='Predictions')

    return logits, end_points

cifarnet.default_image_size = 100

def inception_resnet_v2_arg_scope(weight_decay=0.00004,
                                  batch_norm_decay=0.9997,
                                  batch_norm_epsilon=0.001,
                                  activation_fn=tf.nn.relu):
  """Returns the scope with the default parameters for inception_resnet_v2.
  使用了batch_norm,相对来说要比cifarnet_arg_scope效果更佳,推荐使用该方式进行各层参数配置
  Args:
    weight_decay: the weight decay for weights variables.
    batch_norm_decay: decay for the moving average of batch_norm momentums.
    batch_norm_epsilon: small float added to variance to avoid dividing by zero.
    activation_fn: Activation function for conv2d.
  Returns:
    a arg_scope with the parameters needed for inception_resnet_v2.
  """
  # Set weight_decay for weights in conv2d and fully_connected layers.
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_regularizer=slim.l2_regularizer(weight_decay)):

    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'fused': None,  # Use fused batch norm if possible.
    }
    # Set activation_fn and parameters for batch_norm.
    with slim.arg_scope([slim.conv2d], activation_fn=activation_fn,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params) as scope:
      return scope


# 自己编写next_batch 实现按批次训练数据
start_index=0
def next_batch(data,batch_size,img_pixel=100,channels=3):
    global start_index  # 必须定义成全局变量
    global second_index  # 必须定义成全局变量

    second_index=start_index+batch_size
    if second_index>len(data):
        second_index=len(data)
    data1=data[start_index:second_index]
    # lab=labels[start_index:second_index]
    start_index=second_index
    if start_index>=len(data):
        start_index = 0

    # 将每次得到batch_size个数据按行打乱
    index = [i for i in range(len(data1))]  # len(data1)得到的行数
    np.random.shuffle(index)  # 将索引打乱
    data1 = data1[index]

    # 提起出数据和标签
    img = data1[:, 0:-1].astype(np.float32)

    label = data1[:, -1][:,np.newaxis]
    label = label.astype(np.float32)  # 类型转换

    return img,label



with tf.name_scope('input'):
   x = tf.placeholder(tf.float32, [None, 100 * 100 * 3], 'x')
   y_ = tf.placeholder(tf.float32, [None, 1], 'y_')
   keep = tf.placeholder(tf.float32)
   is_training = tf.placeholder(tf.bool, name='MODE')

image_shaped_input = tf.reshape(x, [-1, 100, 100, 3])
with slim.arg_scope(inception_resnet_v2_arg_scope()):
   y, _ = cifarnet(images=image_shaped_input, num_classes=1, is_training=is_training, dropout_keep_prob=keep)

cross_entropy=tf.reduce_mean(tf.reduce_sum(
                tf.square(y- y_), reduction_indices=[1]))
global_step = tf.Variable(0, trainable=False)
train_op=tf.train.AdamOptimizer(1e-3).minimize(cross_entropy,global_step=global_step)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())


if __name__=="__main__":
   with tf.Session() as sess:
      sess.run(init)
      for step in range(10000):
         batch_xs, batch_ys = next_batch(train_datas,30)
         train_op.run({x: batch_xs, y_: batch_ys, keep: 0.7, is_training: True})

         if step % 100 == 0:
            # acc = accuracy.eval({x: batch_xs, y_: batch_ys, keep: 1., is_training: False})
            print("step", step,
                  'loss', cross_entropy.eval({x: batch_xs, y_: batch_ys, keep: 1., is_training: False}))

      # test
      test_x, test_y = test_datas[:,:-1],test_datas[:,-1][:,np.newaxis]
      # acc = accuracy.eval({x: test_x, y_: test_y, keep: 1., is_training: False})
      print("pred value:",y.eval({x: test_x, y_: test_y, keep: 1., is_training: False}))
      print('True value', test_y)

print("Generate Model Successfully!")
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值