一、安装Intel Caffe修改版
1、下载项目
git clone https://github.com/LMdeLiangMi/caffe.git
2、安装依赖
sudo apt-get update
sudo apt-get install build-essential cmake git pkg-config
sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler
sudo apt-get install libatlas-base-dev
sudo apt-get install --no-install-recommends libboost-all-dev
sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev
sudo apt-get install libopencv-dev
3、其他配置
install protocol buffer 3.4.0, referring this link http://blog.csdn.net/twilightdream/article/details/72953338
sudo -H pip install --upgrade protobuf==3.1.0.post1
sudo apt-get install libhdf5-dev
sudo apt-get install python-h5py
4、编译安装
cp Makefile.config.example Makefile.config
make all -j24
make pycaffe
sudo vi ~/.bashrc
# add export PYTHONPATH=/path/to/caffe/python:$PYTHONPATH
sudo source ~/.bashrc
二、准备数据
1、计算Optical Flow图
git clone https://github.com/pathak22/pyflow.git
cd pyflow/
python setup.py build_ext -i
python demo.py # -viz option to visualize output
主要是python并行处理,加速计算
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
import numpy as np
from PIL import Image
import pyflow
import os
from concurrent.futures import ProcessPoolExecutor, as_completed
alpha = 0.012
ratio = 0.75
minWidth = 20
nOuterFPIterations = 7
nInnerFPIterations = 1
nSORIterations = 30
colType = 0
def createFlow(base, savebase, impath1, impath2):
im1 = np.array(Image.open(os.path.join(base, impath1)))
im2 = np.array(Image.open(os.path.join(base, impath2)))
im1 = im1.astype(float) / 255.
im2 = im2.astype(float) / 255.
#s = time.time()
u, v, im2W = pyflow.coarse2fine_flow(
im1, im2, alpha, ratio, minWidth, nOuterFPIterations, nInnerFPIterations,
nSORIterations, colType)
#e = time.time()
#print('Time Taken: %.2f seconds for image of size (%d, %d, %d)' % (
# e - s, im1.shape[0], im1.shape[1], im1.shape[2]))
flow = np.concatenate((u[..., None], v[..., None]), axis=2)
import cv2
hsv = np.zeros(im1.shape, dtype=np.uint8)
hsv[:, :, 0] = 255
hsv[:, :, 1] = 255
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite(os.path.join(savebase, impath2), rgb)
def create(base, savebase):
futures = set()
with ProcessPoolExecutor() as executor:
frames = os.listdir(os.path.join(base))
frames.sort()
print(frames)
for i in range(16):
j = i + 7
k = j + 1
future = executor.submit(createFlow, base, savebase, frames[j], frames[k])
futures.add(future)
try:
for future in as_completed(futures):
err = future.exception()
if err is not None:
raise err
except KeyboardInterrupt:
print("stopped by hand")
2、生成txt文件
python caffe_gen_path_flow -train=/home/ubuntu/data/Emotion/model/signLanguageVideo/flowFrames -shuffle=True
python caffe_gen_path_lstm -train=/home/ubuntu/data/Emotion/model/signLanguageVideo/dataset -shuffle=True
vi train_flow.txt test_flow.txt
0,100y
:n
p
:wq
三、训练LRCN模型
script -f train.txt
bash run_singleFrame_flow.sh
bash run_lstm_flow.sh
四、测试模型
五、配置服务器
sudo apt-get install screen
sudo apt-get install supervisor
sudo pip install gunicorn
cd /etc/supervisor/conf.d/
vi Emotion.conf
[program:Emotion]
environment = PATH=/usr/bin/python
environment = PYTHONPATH=/home/ubuntu/data/caffe/python
directory=/home/ubuntu/data/Emotion
command=gunicorn -w=1 main:app -b 0.0.0.0:8080
autostart=true
autorestart=true
supervisorctl reload