基本环境
#《深度学习原理与TensorFlow实战》04 CNN看懂世界 02 vgg_network.py
# 书源码地址:https://github.com/DeepVisionTeam/TensorFlowBook.git
# 视频讲座地址:http://edu.csdn.net/course/detail/5222
# win10 Tensorflow1.2.0 python3.6.1
# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1
# https://github.com/tflearn/tflearn/blob/master/examples/images/vgg_network.py
# vgg_network占用内存较大,看python内存占用大约8GB+,本机gpu为4GB,不够用,所以采用cpu only版本做测试。速度较慢。
# flowers17的数据会自动下载,且速度较快。
# 硬件配置:
# Intel(R) Xeon(R) CPU E3-1505M v5 @ 2.80GHz 2.81GHz
# 内存 64GB
# 系统: win10 x64
# 大约24小时内跑50+个epoch
vgg_network.py
""" Very Deep Convolutional Networks for Large-Scale Visual Recognition.
Applying VGG 16-layers convolutional network to Oxford's 17 Category Flower
Dataset classification task.
References:
Very Deep Convolutional Networks for Large-Scale Image Recognition.
K. Simonyan, A. Zisserman. arXiv technical report, 2014.
Links:
http://arxiv.org/pdf/1409.1556
"""
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(224, 224))
network = input_data(shape=[None, 224, 224, 3])
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 128, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = fully_connected(network, 4096, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 17, activation='softmax')
network = regression(network, optimizer='rmsprop',
loss='categorical_crossentropy',
learning_rate=0.0001)
model = tflearn.DNN(network, checkpoint_path='model_vgg',
max_checkpoints=1, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=500, shuffle=True,
show_metric=True, batch_size=32, snapshot_step=500,
snapshot_epoch=False, run_id='vgg_oxflowers17')
'''
Run id: vgg_oxflowers17
Log directory: /tmp/tflearn_logs/
Training samples: 1360
Validation samples: 0
Training Step: 1 | time: 39.151s
| RMSProp | epoch: 001 | loss: 0.00000 - acc: 0.0000
Training Step: 2 | total loss: 2.55066 | time: 78.160s
| RMSProp | epoch: 001 | loss: 2.55066 - acc: 0.0562
Training Step: 3 | total loss: 2.78507 | time: 117.662s
| RMSProp | epoch: 001 | loss: 2.78507 - acc: 0.0358
Training Step: 4 | total loss: 2.82211 | time: 163.628s
| RMSProp | epoch: 001 | loss: 2.82211 - acc: 0.0558
Training Step: 5 | total loss: 2.83343 | time: 212.448s
| RMSProp | epoch: 001 | loss: 2.83343 - acc: 0.0172
Training Step: 6 | total loss: 2.82952 | time: 257.656s
| RMSProp | epoch: 001 | loss: 2.82952 - acc: 0.0664
Training Step: 7 | total loss: 2.83349 | time: 306.941s
| RMSProp | epoch: 001 | loss: 2.83349 - acc: 0.0266
Training Step: 8 | total loss: 2.83455 | time: 355.456s
| RMSProp | epoch: 001 | loss: 2.83455 - acc: 0.0468
Training Step: 9 | total loss: 2.83402 | time: 402.628s
| RMSProp | epoch: 001 | loss: 2.83402 - acc: 0.0716
Training Step: 10 | total loss: 2.83300 | time: 449.701s
| RMSProp | epoch: 001 | loss: 2.83300 - acc: 0.0514
Training Step: 11 | total loss: 2.83271 | time: 495.893s
| RMSProp | epoch: 001 | loss: 2.83271 - acc: 0.0863
Training Step: 12 | total loss: 2.83074 | time: 537.937s
| RMSProp | epoch: 001 | loss: 2.83074 - acc: 0.0615
Training Step: 13 | total loss: 2.83104 | time: 580.727s
| RMSProp | epoch: 001 | loss: 2.83104 - acc: 0.0619
Training Step: 14 | total loss: 2.83621 | time: 623.209s
| RMSProp | epoch: 001 | loss: 2.83621 - acc: 0.0366
Training Step: 15 | total loss: 2.83518 | time: 664.900s
| RMSProp | epoch: 001 | loss: 2.83518 - acc: 0.0345
Training Step: 16 | total loss: 2.83590 | time: 706.464s
| RMSProp | epoch: 001 | loss: 2.83590 - acc: 0.0333
Training Step: 17 | total loss: 2.83771 | time: 746.936s
| RMSProp | epoch: 001 | loss: 2.83771 - acc: 0.0213
Training Step: 18 | total loss: 2.83808 | time: 788.010s
| RMSProp | epoch: 001 | loss: 2.83808 - acc: 0.0139
Training Step: 19 | total loss: 2.83710 | time: 829.101s
| RMSProp | epoch: 001 | loss: 2.83710 - acc: 0.0093
Training Step: 20 | total loss: 2.83573 | time: 869.866s
| RMSProp | epoch: 001 | loss: 2.83573 - acc: 0.0163
Training Step: 21 | total loss: 2.83293 | time: 910.728s
| RMSProp | epoch: 001 | loss: 2.83293 - acc: 0.0307
Training Step: 22 | total loss: 2.83208 | time: 951.732s
| RMSProp | epoch: 001 | loss: 2.83208 - acc: 0.0496
Training Step: 23 | total loss: 2.83391 | time: 992.608s
| RMSProp | epoch: 001 | loss: 2.83391 - acc: 0.0443
Training Step: 24 | total loss: 2.83574 | time: 1033.661s
| RMSProp | epoch: 001 | loss: 2.83574 - acc: 0.0406
Training Step: 25 | total loss: 2.83504 | time: 1074.329s
| RMSProp | epoch: 001 | loss: 2.83504 - acc: 0.0466
Training Step: 26 | total loss: 2.83532 | time: 1115.530s
| RMSProp | epoch: 001 | loss: 2.83532 - acc: 0.0342
Training Step: 27 | total loss: 2.83454 | time: 1156.374s
| RMSProp | epoch: 001 | loss: 2.83454 - acc: 0.0335
Training Step: 28 | total loss: 2.83370 | time: 1196.976s
| RMSProp | epoch: 001 | loss: 2.83370 - acc: 0.0329
Training Step: 29 | total loss: 2.83326 | time: 1237.924s
| RMSProp | epoch: 001 | loss: 2.83326 - acc: 0.0401
Training Step: 30 | total loss: 2.83426 | time: 1278.819s
| RMSProp | epoch: 001 | loss: 2.83426 - acc: 0.0380
Training Step: 31 | total loss: 2.83360 | time: 1319.769s
| RMSProp | epoch: 001 | loss: 2.83360 - acc: 0.0365
Training Step: 32 | total loss: 2.83191 | time: 1360.613s
| RMSProp | epoch: 001 | loss: 2.83191 - acc: 0.0493
Training Step: 33 | total loss: 2.83120 | time: 1401.292s
| RMSProp | epoch: 001 | loss: 2.83120 - acc: 0.0591
Training Step: 34 | total loss: 2.83001 | time: 1442.211s
| RMSProp | epoch: 001 | loss: 2.83001 - acc: 0.0665
Training Step: 35 | total loss: 2.82995 | time: 1482.782s
| RMSProp | epoch: 001 | loss: 2.82995 - acc: 0.0591
Training Step: 36 | total loss: 2.83233 | time: 1523.651s
| RMSProp | epoch: 001 | loss: 2.83233 - acc: 0.0534
Training Step: 37 | total loss: 2.83467 | time: 1564.673s
| RMSProp | epoch: 001 | loss: 2.83467 - acc: 0.0552
Training Step: 38 | total loss: 2.83420 | time: 1605.479s
| RMSProp | epoch: 001 | loss: 2.83420 - acc: 0.0506
Training Step: 39 | total loss: 2.83358 | time: 1646.481s
| RMSProp | epoch: 001 | loss: 2.83358 - acc: 0.0528
Training Step: 40 | total loss: 2.83449 | time: 1687.051s
| RMSProp | epoch: 001 | loss: 2.83449 - acc: 0.0547
Training Step: 41 | total loss: 2.83445 | time: 1727.702s
| RMSProp | epoch: 001 | loss: 2.83445 - acc: 0.0504
Training Step: 42 | total loss: 2.83419 | time: 1768.542s
| RMSProp | epoch: 001 | loss: 2.83419 - acc: 0.0525
Training Step: 43 | total loss: 2.83316 | time: 1789.223s
| RMSProp | epoch: 001 | loss: 2.83316 - acc: 0.0543
Training Step: 44 | total loss: 2.83455 | time: 20.706s
| RMSProp | epoch: 002 | loss: 2.83455 - acc: 0.0449
Training Step: 45 | total loss: 2.83637 | time: 61.555s
| RMSProp | epoch: 002 | loss: 2.83637 - acc: 0.0373
Training Step: 46 | total loss: 2.83504 | time: 102.520s
| RMSProp | epoch: 002 | loss: 2.83504 - acc: 0.0467
Training Step: 47 | total loss: 2.83507 | time: 143.075s
| RMSProp | epoch: 002 | loss: 2.83507 - acc: 0.0390
Training Step: 48 | total loss: 2.83601 | time: 184.085s
...
| RMSProp | epoch: 059 | loss: 0.07563 - acc: 0.9953
Training Step: 2509 | total loss: 0.06807 | time: 545.071s
| RMSProp | epoch: 059 | loss: 0.06807 - acc: 0.9957
Training Step: 2510 | total loss: 0.06129 | time: 584.407s
| RMSProp | epoch: 059 | loss: 0.06129 - acc: 0.9962
Training Step: 2511 | total loss: 0.05854 | time: 623.157s
| RMSProp | epoch: 059 | loss: 0.05854 - acc: 0.9934
Training Step: 2512 | total loss: 0.05310 | time: 663.058s
| RMSProp | epoch: 059 | loss: 0.05310 - acc: 0.9941
Training Step: 2513 | total loss: 0.05105 | time: 702.142s
| RMSProp | epoch: 059 | loss: 0.05105 - acc: 0.9947
'''