共分3个部分:
1. Caffe命令 usage
root@ip-172-30-0-251:/caffe#
root@ip-172-30-0-251:/caffe# build/tools/caffe
caffe: command line brew
usage: caffe <command> <args>
commands:
train train or finetune a model
test score a model
device_query show GPU diagnostic information
time benchmark model execution time
Flags from tools/caffe.cpp:
-gpu (Optional; run in GPU mode on given device IDs separated by ','.Use
'-gpu all' to run on all available GPUs. The effective training batch
size is multiplied by the number of devices.) type: string default: ""
-iterations (The number of iterations to run.) type: int32 default: 50
currently: 100
-level (Optional; network level.) type: int32 default: 0
-model (The model definition protocol buffer text file.) type: string
default: "" currently: "examples/mnist/lenet_train_test.prototxt"
-phase (Optional; network phase (TRAIN or TEST). Only used for 'time'.)
type: string default: ""
-sighup_effect (Optional; action to take when a SIGHUP signal is received:
snapshot, stop or none.) type: string default: "snapshot"
-sigint_effect (Optional; action to take when a SIGINT signal is received:
snapshot, stop or none.) type: string default: "stop"
-snapshot (Optional; the snapshot solver state to resume training.)
type: string default: ""
-solver (The solver definition protocol buffer text file.) type: string
default: ""
-stage (Optional; network stages (not to be confused with phase), separated
by ','.) type: string default: ""
-weights (Optional; the pretrained weights to initialize finetuning,
separated by ','. Cannot be set simultaneously with snapshot.)
type: string default: ""
currently: "examples/mnist/lenet_iter_10000.caffemodel"
2. 训练过程输出
I1228 12:42:22.826128 1478 layer_factory.hpp:77] Creating layer mnist
I1228 12:42:22.829514 1478 net.cpp:100] Creating Layer mnist
I1228 12:42:22.829562 1478 net.cpp:408] mnist -> data
I1228 12:42:22.829612 1478 net.cpp:408] mnist -> label
I1228 12:42:22.829762 1479 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_train_lmdb
I1228 12:42:22.840992 1478 data_layer.cpp:41] output data size: 64,1,28,28
I1228 12:42:22.841316 1478 net.cpp:150] Setting up mnist
I1228 12:42:22.841362 1478 net.cpp:157] Top shape: 64 1 28 28 (50176)
I1228 12:42:22.841388 1478 net.cpp:157] Top shape: 64 (64)
I1228 12:42:22.841411 1478 net.cpp:165] Memory required for data: 200960
I1228 12:42:22.841439 1478 layer_factory.hpp:77] Creating layer conv1
I1228 12:42:22.841478 1478 net.cpp:100] Creating Layer conv1
I1228 12:42:22.841506 1478 net.cpp:434] conv1 <- data
I1228 12:42:22.841538 1478 net.cpp:408] conv1 -> conv1
I1228 12:42:22.842957 1478 net.cpp:150] Setting up conv1
I1228 12:42:22.842998 1478 net.cpp:157] Top shape: 64 20 24 24 (737280)
I1228 12:42:22.843020 1478 net.cpp:165] Memory required for data: 3150080
I1228 12:42:22.843055 1478 layer_factory.hpp:77] Creating layer pool1
I1228 12:42:22.843087 1478 net.cpp:100] Creating Layer pool1
I1228 12:42:22.843112 1478 net.cpp:434] pool1 <- conv1
I1228 12:42:22.843163 1478 net.cpp:408] pool1 -> pool1
I1228 12:42:22.843204 1478 net.cpp:150] Setting up pool1
I1228 12:42:22.843231 1478 net.cpp:157] Top shape: 64 20 12 12 (184320)
I1228 12:42:22.843253 1478 net.cpp:165] Memory required for data: 3887360
I1228 12:42:22.843276 1478 layer_factory.hpp:77] Creating layer conv2
I1228 12:42:22.843304 1478 net.cpp:100] Creating Layer conv2
I1228 12:42:22.843328 1478 net.cpp:434] conv2 <- pool1
I1228 12:42:22.843353 1478 net.cpp:408] conv2 -> conv2
I1228 12:42:22.843607 1478 net.cpp:150] Setting up conv2
I1228 12:42:22.843641 1478 net.cpp:157] Top shape: 64 50 8 8 (204800)
I1228 12:42:22.843663 1478 net.cpp:165] Memory required for data: 4706560
I1228 12:42:22.843691 1478 layer_factory.hpp:77] Creating layer pool2
I1228 12:42:22.843719 1478 net.cpp:100] Creating Layer pool2
I1228 12:42:22.843742 1478 net.cpp:434] pool2 <- conv2
I1228 12:42:22.843767 1478 net.cpp:408] pool2 -> pool2
I1228 12:42:22.843796 1478 net.cpp:150] Setting up pool2
I1228 12:42:22.843822 1478 net.cpp:157] Top shape: 64 50 4 4 (51200)
I1228 12:42:22.843844 1478 net.cpp:165] Memory required for data: 4911360
I1228 12:42:22.843868 1478 layer_factory.hpp:77] Creating layer ip1
I1228 12:42:22.843897 1478 net.cpp:100] Creating Layer ip1
I1228 12:42:22.843921 1478 net.cpp:434] ip1 <- pool2
I1228 12:42:22.843946 1478 net.cpp:408] ip1 -> ip1
I1228 12:42:22.847321 1478 net.cpp:150] Setting up ip1
I1228 12:42:22.847625 1478 net.cpp:157] Top shape: 64 500 (32000)
I1228 12:42:22.847650 1478 net.cpp:165] Memory required for data: 5039360
I1228 12:42:22.847679 1478 layer_factory.hpp:77] Creating layer relu1
I1228 12:42:22.847707 1478 net.cpp:100] Creating Layer relu1
I1228 12:42:22.847731 1478 net.cpp:434] relu1 <- ip1
I1228 12:42:22.847756 1478 net.cpp:395] relu1 -> ip1 (in-place)
I1228 12:42:22.847787 1478 net.cpp:150] Setting up relu1
I1228 12:42:22.847815 1478 net.cpp:157] Top shape: 64 500 (32000)
I1228 12:42:22.847836 1478 net.cpp:165] Memory required for data: 5167360
I1228 12:42:22.847859 1478 layer_factory.hpp:77] Creating layer ip2
I1228 12:42:22.847885 1478 net.cpp:100] Creating Layer ip2
I1228 12:42:22.847909 1478 net.cpp:434] ip2 <- ip1
I1228 12:42:22.847935 1478 net.cpp:408] ip2 -> ip2
I1228 12:42:22.848016 1478 net.cpp:150] Setting up ip2
I1228 12:42:22.848045 1478 net.cpp:157] Top shape: 64 10 (640)
I1228 12:42:22.848068 1478 net.cpp:165] Memory required for data: 5169920
I1228 12:42:22.848093 1478 layer_factory.hpp:77] Creating layer loss
I1228 12:42:22.848122 1478 net.cpp:100] Creating Layer loss
I1228 12:42:22.848145 1478 net.cpp:434] loss <- ip2
I1228 12:42:22.848168 1478 net.cpp:434] loss <- label
I1228 12:42:22.848194 1478 net.cpp:408] loss -> loss
I1228 12:42:22.848229 1478 layer_factory.hpp:77] Creating layer loss
I1228 12:42:22.848270 1478 net.cpp:150] Setting up loss
I1228 12:42:22.848297 1478 net.cpp:157] Top shape: (1)
I1228 12:42:22.848318 1478 net.cpp:160] with loss weight 1
I1228 12:42:22.848358 1478 net.cpp:165] Memory required for data: 5169924
I1228 12:42:22.848381 1478 net.cpp:226] loss needs backward computation.
I1228 12:42:22.848404 1478 net.cpp:226] ip2 needs backward computation.
I1228 12:42:22.848426 1478 net.cpp:226] relu1 needs backward computation.
I1228 12:42:22.848448 1478 net.cpp:226] ip1 needs backward computation.
I1228 12:42:22.848470 1478 net.cpp:226] pool2 needs backward computation.
I1228 12:42:22.848492 1478 net.cpp:226] conv2 needs backward computation.
I1228 12:42:22.848515 1478 net.cpp:226] pool1 needs backward computation.
I1228 12:42:22.848541 1478 net.cpp:226] conv1 needs backward computation.
I1228 12:42:22.848563 1478 net.cpp:228] mnist does not need backward computation.
I1228 12:42:22.848585 1478 net.cpp:270] This network produces output loss
I1228 12:42:22.848613 1478 net.cpp:283] Network initialization done.
I1228 12:42:22.850889 1478 solver.cpp:181] Creating test net (#0) specified by net file: examples/mnist/lenet_train_test.prototxt
I1228 12:42:22.850950 1478 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer mnist
I1228 12:42:22.851075 1478 net.cpp:58] Initializing net from parameters:
name: "LeNet"
state {
phase: TEST
}
layer {
name: "mnist"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/mnist/mnist_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
I1228 12:42:22.852957 1478 layer_factory.hpp:77] Creating layer mnist
I1228 12:42:22.853076 1478 net.cpp:100] Creating Layer mnist
I1228 12:42:22.853106 1478 net.cpp:408] mnist -> data
I1228 12:42:22.853135 1478 net.cpp:408] mnist -> label
I1228 12:42:22.853212 1481 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_test_lmdb
I1228 12:42:22.853286 1478 data_layer.cpp:41] output data size: 100,1,28,28
I1228 12:42:22.853688 1478 net.cpp:150] Setting up mnist
I1228 12:42:22.853718 1478 net.cpp:157] Top shape: 100 1 28 28 (78400)
I1228 12:42:22.853739 1478 net.cpp:157] Top shape: 100 (100)
I1228 12:42:22.853757 1478 net.cpp:165] Memory required for data: 314000
I1228 12:42:22.853777 1478 layer_factory.hpp:77] Creating layer label_mnist_1_split
I1228 12:42:22.853802 1478 net.cpp:100] Creating Layer label_mnist_1_split
I1228 12:42:22.853821 1478 net.cpp:434] label_mnist_1_split <- label
I1228 12:42:22.853842 1478 net.cpp:408] label_mnist_1_split -> label_mnist_1_split_0
I1228 12:42:22.853866 1478 net.cpp:408] label_mnist_1_split -> label_mnist_1_split_1
I1228 12:42:22.853889 1478 net.cpp:150] Setting up label_mnist_1_split
I1228 12:42:22.853909 1478 net.cpp:157] Top shape: 100 (100)
I1228 12:42:22.853925 1478 net.cpp:157] Top shape: 100 (100)
I1228 12:42:22.853940 1478 net.cpp:165] Memory required for data: 314800
I1228 12:42:22.853955 1478 layer_factory.hpp:77] Creating layer conv1
I1228 12:42:22.853977 1478 net.cpp:100] Creating Layer conv1
I1228 12:42:22.853994 1478 net.cpp:434] conv1 <- data
I1228 12:42:22.854012 1478 net.cpp:408] conv1 -> conv1
I1228 12:42:22.854054 1478 net.cpp:150] Setting up conv1
I1228 12:42:22.854075 1478 net.cpp:157] Top shape: 100 20 24 24 (1152000)
I1228 12:42:22.854090 1478 net.cpp:165] Memory required for data: 4922800
I1228 12:42:22.854111 1478 layer_factory.hpp:77] Creating layer pool1
I1228 12:42:22.854132 1478 net.cpp:100] Creating Layer pool1
I1228 12:42:22.854161 1478 net.cpp:434] pool1 <- conv1
I1228 12:42:22.854178 1478 net.cpp:408] pool1 -> pool1
I1228 12:42:22.854200 1478 net.cpp:150] Setting up pool1
I1228 12:42:22.854220 1478 net.cpp:157] Top shape: 100 20 12 12 (288000)
I1228 12:42:22.854235 1478 net.cpp:165] Memory required for data: 6074800
I1228 12:42:22.854250 1478 layer_factory.hpp:77] Creating layer conv2
I1228 12:42:22.854270 1478 net.cpp:100] Creating Layer conv2
I1228 12:42:22.854286 1478 net.cpp:434] conv2 <- pool1
I1228 12:42:22.854306 1478 net.cpp:408] conv2 -> conv2
I1228 12:42:22.854542 1478 net.cpp:150] Setting up conv2
I1228 12:42:22.854564 1478 net.cpp:157] Top shape: 100 50 8 8 (320000)
I1228 12:42:22.854579 1478 net.cpp:165] Memory required for data: 7354800
I1228 12:42:22.854599 1478 layer_factory.hpp:77] Creating layer pool2
I1228 12:42:22.854617 1478 net.cpp:100] Creating Layer pool2
I1228 12:42:22.854636 1478 net.cpp:434] pool2 <- conv2
I1228 12:42:22.854655 1478 net.cpp:408] pool2 -> pool2
I1228 12:42:22.854677 1478 net.cpp:150] Setting up pool2
I1228 12:42:22.854694 1478 net.cpp:157] Top shape: 100 50 4 4 (80000)
I1228 12:42:22.854709 1478 net.cpp:165] Memory required for data: 7674800
I1228 12:42:22.854723 1478 layer_factory.hpp:77] Creating layer ip1
I1228 12:42:22.854742 1478 net.cpp:100] Creating Layer ip1
I1228 12:42:22.854759 1478 net.cpp:434] ip1 <- pool2
I1228 12:42:22.854776 1478 net.cpp:408] ip1 -> ip1
I1228 12:42:22.858683 1478 net.cpp:150] Setting up ip1
I1228 12:42:22.858729 1478 net.cpp:157] Top shape: 100 500 (50000)
I1228 12:42:22.858747 1478 net.cpp:165] Memory required for data: 7874800
I1228 12:42:22.858768 1478 layer_factory.hpp:77] Creating layer relu1
I1228 12:42:22.858789 1478 net.cpp:100] Creating Layer relu1
I1228 12:42:22.858805 1478 net.cpp:434] relu1 <- ip1
I1228 12:42:22.858822 1478 net.cpp:395] relu1 -> ip1 (in-place)
I1228 12:42:22.858842 1478 net.cpp:150] Setting up relu1
I1228 12:42:22.858860 1478 net.cpp:157] Top shape: 100 500 (50000)
I1228 12:42:22.858873 1478 net.cpp:165] Memory required for data: 8074800
I1228 12:42:22.858888 1478 layer_factory.hpp:77] Creating layer ip2
I1228 12:42:22.858908 1478 net.cpp:100] Creating Layer ip2
I1228 12:42:22.858924 1478 net.cpp:434] ip2 <- ip1
I1228 12:42:22.858942 1478 net.cpp:408] ip2 -> ip2
I1228 12:42:22.859010 1478 net.cpp:150] Setting up ip2
I1228 12:42:22.859030 1478 net.cpp:157] Top shape: 100 10 (1000)
I1228 12:42:22.859045 1478 net.cpp:165] Memory required for data: 8078800
I1228 12:42:22.859061 1478 layer_factory.hpp:77] Creating layer ip2_ip2_0_split
I1228 12:42:22.859079 1478 net.cpp:100] Creating Layer ip2_ip2_0_split
I1228 12:42:22.859094 1478 net.cpp:434] ip2_ip2_0_split <- ip2
I1228 12:42:22.859110 1478 net.cpp:408] ip2_ip2_0_split -> ip2_ip2_0_split_0
I1228 12:42:22.859129 1478 net.cpp:408] ip2_ip2_0_split -> ip2_ip2_0_split_1
I1228 12:42:22.859148 1478 net.cpp:150] Setting up ip2_ip2_0_split
I1228 12:42:22.859164 1478 net.cpp:157] Top shape: 100 10 (1000)
I1228 12:42:22.859186 1478 net.cpp:157] Top shape: 100 10 (1000)
I1228 12:42:22.859201 1478 net.cpp:165] Memory required for data: 8086800
I1228 12:42:22.859216 1478 layer_factory.hpp:77] Creating layer accuracy
I1228 12:42:22.859237 1478 net.cpp:100] Creating Layer accuracy
I1228 12:42:22.859253 1478 net.cpp:434] accuracy <- ip2_ip2_0_split_0
I1228 12:42:22.859268 1478 net.cpp:434] accuracy <- label_mnist_1_split_0
I1228 12:42:22.859288 1478 net.cpp:408] accuracy -> accuracy
I1228 12:42:22.888463 1478 net.cpp:150] Setting up accuracy
I1228 12:42:22.888525 1478 net.cpp:157] Top shape: (1)
I1228 12:42:22.888541 1478 net.cpp:165] Memory required for data: 8086804
I1228 12:42:22.888559 1478 layer_factory.hpp:77] Creating layer loss
I1228 12:42:22.888581 1478 net.cpp:100] Creating Layer loss
I1228 12:42:22.888599 1478 net.cpp:434] loss <- ip2_ip2_0_split_1
I1228 12:42:22.888617 1478 net.cpp:434] loss <- label_mnist_1_split_1
I1228 12:42:22.888635 1478 net.cpp:408] loss -> loss
I1228 12:42:22.888660 1478 layer_factory.hpp:77] Creating layer loss
I1228 12:42:22.888725 1478 net.cpp:150] Setting up loss
I1228 12:42:22.888746 1478 net.cpp:157] Top shape: (1)
I1228 12:42:22.888761 1478 net.cpp:160] with loss weight 1
I1228 12:42:22.888783 1478 net.cpp:165] Memory required for data: 8086808
I1228 12:42:22.888799 1478 net.cpp:226] loss needs backward computation.
I1228 12:42:22.888815 1478 net.cpp:228] accuracy does not need backward computation.
I1228 12:42:22.888831 1478 net.cpp:226] ip2_ip2_0_split needs backward computation.
I1228 12:42:22.888846 1478 net.cpp:226] ip2 needs backward computation.
I1228 12:42:22.888862 1478 net.cpp:226] relu1 needs backward computation.
I1228 12:42:22.888877 1478 net.cpp:226] ip1 needs backward computation.
I1228 12:42:22.888892 1478 net.cpp:226] pool2 needs backward computation.
I1228 12:42:22.888909 1478 net.cpp:226] conv2 needs backward computation.
I1228 12:42:22.888926 1478 net.cpp:226] pool1 needs backward computation.
I1228 12:42:22.888941 1478 net.cpp:226] conv1 needs backward computation.
I1228 12:42:22.888957 1478 net.cpp:228] label_mnist_1_split does not need backward computation.
I1228 12:42:22.888972 1478 net.cpp:228] mnist does not need backward computation.
I1228 12:42:22.888988 1478 net.cpp:270] This network produces output accuracy
I1228 12:42:22.889003 1478 net.cpp:270] This network produces output loss
I1228 12:42:22.889026 1478 net.cpp:283] Network initialization done.
I1228 12:42:22.889114 1478 solver.cpp:60] Solver scaffolding done.
I1228 12:42:22.889156 1478 caffe.cpp:251] Starting Optimization
I1228 12:42:22.889174 1478 solver.cpp:279] Solving LeNet
I1228 12:42:22.889189 1478 solver.cpp:280] Learning Rate Policy: inv
I1228 12:42:22.889972 1478 solver.cpp:337] Iteration 0, Testing net (#0)
I1228 12:42:28.686501 1478 solver.cpp:404] Test net output #0: accuracy = 0.0946
I1228 12:42:28.686633 1478 solver.cpp:404] Test net output #1: loss = 2.33963 (* 1 = 2.33963 loss)
I1228 12:42:28.778022 1478 solver.cpp:228] Iteration 0, loss = 2.30006
I1228 12:42:28.778156 1478 solver.cpp:244] Train net output #0: loss = 2.30006 (* 1 = 2.30006 loss)
I1228 12:42:28.778203 1478 sgd_solver.cpp:106] Iteration 0, lr = 0.01
I1228 12:42:37.817469 1478 solver.cpp:228] Iteration 100, loss = 0.18433
I1228 12:42:37.817602 1478 solver.cpp:244] Train net output #0: loss = 0.184329 (* 1 = 0.184329 loss)
I1228 12:42:37.817647 1478 sgd_solver.cpp:106] Iteration 100, lr = 0.00992565
I1228 12:42:46.837074 1478 solver.cpp:228] Iteration 200, loss = 0.128627
I1228 12:42:46.837229 1478 solver.cpp:244] Train net output #0: loss = 0.128627 (* 1 = 0.128627 loss)
I1228 12:42:46.837281 1478 sgd_solver.cpp:106] Iteration 200, lr = 0.00985258
I1228 12:42:55.860499 1478 solver.cpp:228] Iteration 300, loss = 0.184737
I1228 12:42:55.860676 1478 solver.cpp:244] Train net output #0: loss = 0.184737 (* 1 = 0.184737 loss)
I1228 12:42:55.860715 1478 sgd_solver.cpp:106] Iteration 300, lr = 0.00978075
I1228 12:43:04.895009 1478 solver.cpp:228] Iteration 400, loss = 0.072573
I1228 12:43:04.895154 1478 solver.cpp:244] Train net output #0: loss = 0.072573 (* 1 = 0.072573 loss)
I1228 12:43:04.895196 1478 sgd_solver.cpp:106] Iteration 400, lr = 0.00971013
I1228 12:43:13.807948 1478 solver.cpp:337] Iteration 500, Testing net (#0)
I1228 12:43:19.521517 1478 solver.cpp:404] Test net output #0: accuracy = 0.9725
I1228 12:43:19.521663 1478 solver.cpp:404] Test net output #1: loss = 0.0851369 (* 1 = 0.0851369 loss)
I1228 12:43:19.610584 1478 solver.cpp:228] Iteration 500, loss = 0.0924923
I1228 12:43:19.610713 1478 solver.cpp:244] Train net output #0: loss = 0.0924922 (* 1 = 0.0924922 loss)
I1228 12:43:19.610759 1478 sgd_solver.cpp:106] Iteration 500, lr = 0.00964069
I1228 12:43:28.614856 1478 solver.cpp:228] Iteration 600, loss = 0.0846298
I1228 12:43:28.615057 1478 solver.cpp:244] Train net output #0: loss = 0.0846298 (* 1 = 0.0846298 loss)
I1228 12:43:28.615099 1478 sgd_solver.cpp:106] Iteration 600, lr = 0.0095724
I1228 12:43:37.611666 1478 solver.cpp:228] Iteration 700, loss = 0.152674
I1228 12:43:37.611802 1478 solver.cpp:244] Train net output #0: loss = 0.152674 (* 1 = 0.152674 loss)
I1228 12:43:37.611852 1478 sgd_solver.cpp:106] Iteration 700, lr = 0.00950522
I1228 12:43:46.585642 1478 solver.cpp:228] Iteration 800, loss = 0.197292
I1228 12:43:46.585780 1478 solver.cpp:244] Train net output #0: loss = 0.197292 (* 1 = 0.197292 loss)
I1228 12:43:46.585820 1478 sgd_solver.cpp:106] Iteration 800, lr = 0.00943913
I1228 12:43:55.564452 1478 solver.cpp:228] Iteration 900, loss = 0.229118
I1228 12:43:55.564601 1478 solver.cpp:244] Train net output #0: loss = 0.229118 (* 1 = 0.229118 loss)
I1228 12:43:55.564649 1478 sgd_solver.cpp:106] Iteration 900, lr = 0.00937411
I1228 12:44:04.491788 1478 solver.cpp:337] Iteration 1000, Testing net (#0)
I1228 12:44:10.195562 1478 solver.cpp:404] Test net output #0: accuracy = 0.9813
I1228 12:44:10.195693 1478 solver.cpp:404] Test net output #1: loss = 0.0588604 (* 1 = 0.0588604 loss)
I1228 12:44:10.284278 1478 solver.cpp:228] Iteration 1000, loss = 0.122648
I1228 12:44:10.284402 1478 solver.cpp:244] Train net output #0: loss = 0.122648 (* 1 = 0.122648 loss)
I1228 12:44:10.284446 1478 sgd_solver.cpp:106] Iteration 1000, lr = 0.00931012
I1228 12:44:19.278929 1478 solver.cpp:228] Iteration 1100, loss = 0.00652424
I1228 12:44:19.279067 1478 solver.cpp:244] Train net output #0: loss = 0.00652422 (* 1 = 0.00652422 loss)
I1228 12:44:19.279112 1478 sgd_solver.cpp:106] Iteration 1100, lr = 0.00924715
I1228 12:44:28.262037 1478 solver.cpp:228] Iteration 1200, loss = 0.0227062
I1228 12:44:28.262176 1478 solver.cpp:244] Train net output #0: loss = 0.0227062 (* 1 = 0.0227062 loss)
I1228 12:44:28.262217 1478 sgd_solver.cpp:106] Iteration 1200, lr = 0.00918515
I1228 12:44:37.261036 1478 solver.cpp:228] Iteration 1300, loss = 0.0162803
I1228 12:44:37.261204 1478 solver.cpp:244] Train net output #0: loss = 0.0162802 (* 1 = 0.0162802 loss)
I1228 12:44:37.261263 1478 sgd_solver.cpp:106] Iteration 1300, lr = 0.00912412
I1228 12:44:46.274796 1478 solver.cpp:228] Iteration 1400, loss = 0.0083404
I1228 12:44:46.274945 1478 solver.cpp:244] Train net output #0: loss = 0.0083403 (* 1 = 0.0083403 loss)
I1228 12:44:46.274991 1478 sgd_solver.cpp:106] Iteration 1400, lr = 0.00906403
I1228 12:44:55.182224 1478 solver.cpp:337] Iteration 1500, Testing net (#0)
I1228 12:45:00.875882 1478 solver.cpp:404] Test net output #0: accuracy = 0.9858
I1228 12:45:00.876024 1478 solver.cpp:404] Test net output #1: loss = 0.0460209 (* 1 = 0.0460209 loss)
I1228 12:45:00.964514 1478 solver.cpp:228] Iteration 1500, loss = 0.0671758
I1228 12:45:00.964635 1478 solver.cpp:244] Train net output #0: loss = 0.0671757 (* 1 = 0.0671757 loss)
I1228 12:45:00.964679 1478 sgd_solver.cpp:106] Iteration 1500, lr = 0.00900485
I1228 12:45:10.080106 1478 solver.cpp:228] Iteration 1600, loss = 0.106931
I1228 12:45:10.080278 1478 solver.cpp:244] Train net output #0: loss = 0.106931 (* 1 = 0.106931 loss)
I1228 12:45:10.080320 1478 sgd_solver.cpp:106] Iteration 1600, lr = 0.00894657
I1228 12:45:19.073309 1478 solver.cpp:228] Iteration 1700, loss = 0.0179893
I1228 12:45:19.073454 1478 solver.cpp:244] Train net output #0: loss = 0.0179892 (* 1 = 0.0179892 loss)
I1228 12:45:19.073495 1478 sgd_solver.cpp:106] Iteration 1700, lr = 0.00888916
I1228 12:45:28.069286 1478 solver.cpp:228] Iteration 1800, loss = 0.0228243
I1228 12:45:28.069427 1478 solver.cpp:244] Train net output #0: loss = 0.0228242 (* 1 = 0.0228242 loss)
I1228 12:45:28.069473 1478 sgd_solver.cpp:106] Iteration 1800, lr = 0.0088326
I1228 12:45:37.070579 1478 solver.cpp:228] Iteration 1900, loss = 0.124677
I1228 12:45:37.070720 1478 solver.cpp:244] Train net output #0: loss = 0.124677 (* 1 = 0.124677 loss)
I1228 12:45:37.070765 1478 sgd_solver.cpp:106] Iteration 1900, lr = 0.00877687
I1228 12:45:45.984365 1478 solver.cpp:337] Iteration 2000, Testing net (#0)
I1228 12:45:51.697198 1478 solver.cpp:404] Test net output #0: accuracy = 0.9848
I1228 12:45:51.697345 1478 solver.cpp:404] Test net output #1: loss = 0.0456813 (* 1 = 0.0456813 loss)
I1228 12:45:51.786160 1478 solver.cpp:228] Iteration 2000, loss = 0.0130174
I1228 12:45:51.786289 1478 solver.cpp:244] Train net output #0: loss = 0.0130173 (* 1 = 0.0130173 loss)
I1228 12:45:51.786335 1478 sgd_solver.cpp:106] Iteration 2000, lr = 0.00872196
I1228 12:46:00.796350 1478 solver.cpp:228] Iteration 2100, loss = 0.0242315
I1228 12:46:00.796484 1478 solver.cpp:244] Train net output #0: loss = 0.0242314 (* 1 = 0.0242314 loss)
I1228 12:46:00.796530 1478 sgd_solver.cpp:106] Iteration 2100, lr = 0.00866784
I1228 12:46:09.774925 1478 solver.cpp:228] Iteration 2200, loss = 0.0239079
I1228 12:46:09.775059 1478 solver.cpp:244] Train net output #0: loss = 0.0239079 (* 1 = 0.0239079 loss)
I1228 12:46:09.775106 1478 sgd_solver.cpp:106] Iteration 2200, lr = 0.0086145
I1228 12:46:18.773983 1478 solver.cpp:228] Iteration 2300, loss = 0.0938903
I1228 12:46:18.774143 1478 solver.cpp:244] Train net output #0: loss = 0.0938903 (* 1 = 0.0938903 loss)
I1228 12:46:18.774189 1478 sgd_solver.cpp:106] Iteration 2300, lr = 0.00856192
I1228 12:46:27.773385 1478 solver.cpp:228] Iteration 2400, loss = 0.0137466
I1228 12:46:27.773525 1478 solver.cpp:244] Train net output #0: loss = 0.0137466 (* 1 = 0.0137466 loss)
I1228 12:46:27.773576 1478 sgd_solver.cpp:106] Iteration 2400, lr = 0.00851008
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
I1228 12:42:22.852957 1478 layer_factory.hpp:77] Creating layer mnist
I1228 12:42:22.853076 1478 net.cpp:100] Creating Layer mnist
I1228 12:42:22.853106 1478 net.cpp:408] mnist -> data
I1228 12:42:22.853135 1478 net.cpp:408] mnist -> label
I1228 12:42:22.853212 1481 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_test_lmdb
I1228 12:42:22.853286 1478 data_layer.cpp:41] output data size: 100,1,28,28
I1228 12:42:22.853688 1478 net.cpp:150] Setting up mnist
I1228 12:42:22.853718 1478 net.cpp:157] Top shape: 100 1 28 28 (78400)
I1228 12:42:22.853739 1478 net.cpp:157] Top shape: 100 (100)
I1228 12:42:22.853757 1478 net.cpp:165] Memory required for data: 314000
I1228 12:42:22.853777 1478 layer_factory.hpp:77] Creating layer label_mnist_1_split
I1228 12:42:22.853802 1478 net.cpp:100] Creating Layer label_mnist_1_split
I1228 12:42:22.853821 1478 net.cpp:434] label_mnist_1_split <- label
I1228 12:42:22.853842 1478 net.cpp:408] label_mnist_1_split -> label_mnist_1_split_0
I1228 12:42:22.853866 1478 net.cpp:408] label_mnist_1_split -> label_mnist_1_split_1
I1228 12:42:22.853889 1478 net.cpp:150] Setting up label_mnist_1_split
I1228 12:42:22.853909 1478 net.cpp:157] Top shape: 100 (100)
I1228 12:42:22.853925 1478 net.cpp:157] Top shape: 100 (100)
I1228 12:42:22.853940 1478 net.cpp:165] Memory required for data: 314800
I1228 12:42:22.853955 1478 layer_factory.hpp:77] Creating layer conv1
I1228 12:42:22.853977 1478 net.cpp:100] Creating Layer conv1
I1228 12:42:22.853994 1478 net.cpp:434] conv1 <- data
I1228 12:42:22.854012 1478 net.cpp:408] conv1 -> conv1
I1228 12:42:22.854054 1478 net.cpp:150] Setting up conv1
I1228 12:42:22.854075 1478 net.cpp:157] Top shape: 100 20 24 24 (1152000)
I1228 12:42:22.854090 1478 net.cpp:165] Memory required for data: 4922800
I1228 12:42:22.854111 1478 layer_factory.hpp:77] Creating layer pool1
I1228 12:42:22.854132 1478 net.cpp:100] Creating Layer pool1
I1228 12:42:22.854161 1478 net.cpp:434] pool1 <- conv1
I1228 12:42:22.854178 1478 net.cpp:408] pool1 -> pool1
I1228 12:42:22.854200 1478 net.cpp:150] Setting up pool1
I1228 12:42:22.854220 1478 net.cpp:157] Top shape: 100 20 12 12 (288000)
I1228 12:42:22.854235 1478 net.cpp:165] Memory required for data: 6074800
I1228 12:42:22.854250 1478 layer_factory.hpp:77] Creating layer conv2
I1228 12:42:22.854270 1478 net.cpp:100] Creating Layer conv2
I1228 12:42:22.854286 1478 net.cpp:434] conv2 <- pool1
I1228 12:42:22.854306 1478 net.cpp:408] conv2 -> conv2
I1228 12:42:22.854542 1478 net.cpp:150] Setting up conv2
I1228 12:42:22.854564 1478 net.cpp:157] Top shape: 100 50 8 8 (320000)
I1228 12:42:22.854579 1478 net.cpp:165] Memory required for data: 7354800
I1228 12:42:22.854599 1478 layer_factory.hpp:77] Creating layer pool2
I1228 12:42:22.854617 1478 net.cpp:100] Creating Layer pool2
I1228 12:42:22.854636 1478 net.cpp:434] pool2 <- conv2
I1228 12:42:22.854655 1478 net.cpp:408] pool2 -> pool2
I1228 12:42:22.854677 1478 net.cpp:150] Setting up pool2
I1228 12:42:22.854694 1478 net.cpp:157] Top shape: 100 50 4 4 (80000)
I1228 12:42:22.854709 1478 net.cpp:165] Memory required for data: 7674800
I1228 12:42:22.854723 1478 layer_factory.hpp:77] Creating layer ip1
I1228 12:42:22.854742 1478 net.cpp:100] Creating Layer ip1
I1228 12:42:22.854759 1478 net.cpp:434] ip1 <- pool2
I1228 12:42:22.854776 1478 net.cpp:408] ip1 -> ip1
I1228 12:42:22.858683 1478 net.cpp:150] Setting up ip1
I1228 12:42:22.858729 1478 net.cpp:157] Top shape: 100 500 (50000)
I1228 12:42:22.858747 1478 net.cpp:165] Memory required for data: 7874800
I1228 12:42:22.858768 1478 layer_factory.hpp:77] Creating layer relu1
I1228 12:42:22.858789 1478 net.cpp:100] Creating Layer relu1
I1228 12:42:22.858805 1478 net.cpp:434] relu1 <- ip1
I1228 12:42:22.858822 1478 net.cpp:395] relu1 -> ip1 (in-place)
I1228 12:42:22.858842 1478 net.cpp:150] Setting up relu1
I1228 12:42:22.858860 1478 net.cpp:157] Top shape: 100 500 (50000)
I1228 12:42:22.858873 1478 net.cpp:165] Memory required for data: 8074800
I1228 12:42:22.858888 1478 layer_factory.hpp:77] Creating layer ip2
I1228 12:42:22.858908 1478 net.cpp:100] Creating Layer ip2
I1228 12:42:22.858924 1478 net.cpp:434] ip2 <- ip1
I1228 12:42:22.858942 1478 net.cpp:408] ip2 -> ip2
I1228 12:42:22.859010 1478 net.cpp:150] Setting up ip2
I1228 12:42:22.859030 1478 net.cpp:157] Top shape: 100 10 (1000)
I1228 12:42:22.859045 1478 net.cpp:165] Memory required for data: 8078800
I1228 12:42:22.859061 1478 layer_factory.hpp:77] Creating layer ip2_ip2_0_split
I1228 12:42:22.859079 1478 net.cpp:100] Creating Layer ip2_ip2_0_split
I1228 12:42:22.859094 1478 net.cpp:434] ip2_ip2_0_split <- ip2
I1228 12:42:22.859110 1478 net.cpp:408] ip2_ip2_0_split -> ip2_ip2_0_split_0
I1228 12:42:22.859129 1478 net.cpp:408] ip2_ip2_0_split -> ip2_ip2_0_split_1
I1228 12:42:22.859148 1478 net.cpp:150] Setting up ip2_ip2_0_split
I1228 12:42:22.859164 1478 net.cpp:157] Top shape: 100 10 (1000)
I1228 12:42:22.859186 1478 net.cpp:157] Top shape: 100 10 (1000)
I1228 12:42:22.859201 1478 net.cpp:165] Memory required for data: 8086800
I1228 12:42:22.859216 1478 layer_factory.hpp:77] Creating layer accuracy
I1228 12:42:22.859237 1478 net.cpp:100] Creating Layer accuracy
I1228 12:42:22.859253 1478 net.cpp:434] accuracy <- ip2_ip2_0_split_0
I1228 12:42:22.859268 1478 net.cpp:434] accuracy <- label_mnist_1_split_0
I1228 12:42:22.859288 1478 net.cpp:408] accuracy -> accuracy
I1228 12:42:22.888463 1478 net.cpp:150] Setting up accuracy
I1228 12:42:22.888525 1478 net.cpp:157] Top shape: (1)
I1228 12:42:22.888541 1478 net.cpp:165] Memory required for data: 8086804
I1228 12:42:22.888559 1478 layer_factory.hpp:77] Creating layer loss
I1228 12:42:22.888581 1478 net.cpp:100] Creating Layer loss
I1228 12:42:22.888599 1478 net.cpp:434] loss <- ip2_ip2_0_split_1
I1228 12:42:22.888617 1478 net.cpp:434] loss <- label_mnist_1_split_1
I1228 12:42:22.888635 1478 net.cpp:408] loss -> loss
I1228 12:42:22.888660 1478 layer_factory.hpp:77] Creating layer loss
I1228 12:42:22.888725 1478 net.cpp:150] Setting up loss
I1228 12:42:22.888746 1478 net.cpp:157] Top shape: (1)
I1228 12:42:22.888761 1478 net.cpp:160] with loss weight 1
I1228 12:42:22.888783 1478 net.cpp:165] Memory required for data: 8086808
I1228 12:42:22.888799 1478 net.cpp:226] loss needs backward computation.
I1228 12:42:22.888815 1478 net.cpp:228] accuracy does not need backward computation.
I1228 12:42:22.888831 1478 net.cpp:226] ip2_ip2_0_split needs backward computation.
I1228 12:42:22.888846 1478 net.cpp:226] ip2 needs backward computation.
I1228 12:42:22.888862 1478 net.cpp:226] relu1 needs backward computation.
I1228 12:42:22.888877 1478 net.cpp:226] ip1 needs backward computation.
I1228 12:42:22.888892 1478 net.cpp:226] pool2 needs backward computation.
I1228 12:42:22.888909 1478 net.cpp:226] conv2 needs backward computation.
I1228 12:42:22.888926 1478 net.cpp:226] pool1 needs backward computation.
I1228 12:42:22.888941 1478 net.cpp:226] conv1 needs backward computation.
I1228 12:42:22.888957 1478 net.cpp:228] label_mnist_1_split does not need backward computation.
I1228 12:42:22.888972 1478 net.cpp:228] mnist does not need backward computation.
I1228 12:42:22.888988 1478 net.cpp:270] This network produces output accuracy
I1228 12:42:22.889003 1478 net.cpp:270] This network produces output loss
I1228 12:42:22.889026 1478 net.cpp:283] Network initialization done.
I1228 12:42:22.889114 1478 solver.cpp:60] Solver scaffolding done.
I1228 12:42:22.889156 1478 caffe.cpp:251] Starting Optimization
I1228 12:42:22.889174 1478 solver.cpp:279] Solving LeNet
I1228 12:42:22.889189 1478 solver.cpp:280] Learning Rate Policy: inv
I1228 12:42:22.889972 1478 solver.cpp:337] Iteration 0, Testing net (#0)
I1228 12:42:28.686501 1478 solver.cpp:404] Test net output #0: accuracy = 0.0946
I1228 12:42:28.686633 1478 solver.cpp:404] Test net output #1: loss = 2.33963 (* 1 = 2.33963 loss)
I1228 12:42:28.778022 1478 solver.cpp:228] Iteration 0, loss = 2.30006
I1228 12:42:28.778156 1478 solver.cpp:244] Train net output #0: loss = 2.30006 (* 1 = 2.30006 loss)
I1228 12:42:28.778203 1478 sgd_solver.cpp:106] Iteration 0, lr = 0.01
I1228 12:42:37.817469 1478 solver.cpp:228] Iteration 100, loss = 0.18433
I1228 12:42:37.817602 1478 solver.cpp:244] Train net output #0: loss = 0.184329 (* 1 = 0.184329 loss)
I1228 12:42:37.817647 1478 sgd_solver.cpp:106] Iteration 100, lr = 0.00992565
I1228 12:42:46.837074 1478 solver.cpp:228] Iteration 200, loss = 0.128627
I1228 12:42:46.837229 1478 solver.cpp:244] Train net output #0: loss = 0.128627 (* 1 = 0.128627 loss)
I1228 12:42:46.837281 1478 sgd_solver.cpp:106] Iteration 200, lr = 0.00985258
I1228 12:42:55.860499 1478 solver.cpp:228] Iteration 300, loss = 0.184737
I1228 12:42:55.860676 1478 solver.cpp:244] Train net output #0: loss = 0.184737 (* 1 = 0.184737 loss)
I1228 12:42:55.860715 1478 sgd_solver.cpp:106] Iteration 300, lr = 0.00978075
I1228 12:43:04.895009 1478 solver.cpp:228] Iteration 400, loss = 0.072573
I1228 12:43:04.895154 1478 solver.cpp:244] Train net output #0: loss = 0.072573 (* 1 = 0.072573 loss)
I1228 12:43:04.895196 1478 sgd_solver.cpp:106] Iteration 400, lr = 0.00971013
I1228 12:43:13.807948 1478 solver.cpp:337] Iteration 500, Testing net (#0)
I1228 12:43:19.521517 1478 solver.cpp:404] Test net output #0: accuracy = 0.9725
I1228 12:43:19.521663 1478 solver.cpp:404] Test net output #1: loss = 0.0851369 (* 1 = 0.0851369 loss)
I1228 12:43:19.610584 1478 solver.cpp:228] Iteration 500, loss = 0.0924923
I1228 12:43:19.610713 1478 solver.cpp:244] Train net output #0: loss = 0.0924922 (* 1 = 0.0924922 loss)
I1228 12:43:19.610759 1478 sgd_solver.cpp:106] Iteration 500, lr = 0.00964069
I1228 12:43:28.614856 1478 solver.cpp:228] Iteration 600, loss = 0.0846298
I1228 12:43:28.615057 1478 solver.cpp:244] Train net output #0: loss = 0.0846298 (* 1 = 0.0846298 loss)
I1228 12:43:28.615099 1478 sgd_solver.cpp:106] Iteration 600, lr = 0.0095724
I1228 12:43:37.611666 1478 solver.cpp:228] Iteration 700, loss = 0.152674
I1228 12:43:37.611802 1478 solver.cpp:244] Train net output #0: loss = 0.152674 (* 1 = 0.152674 loss)
I1228 12:43:37.611852 1478 sgd_solver.cpp:106] Iteration 700, lr = 0.00950522
I1228 12:43:46.585642 1478 solver.cpp:228] Iteration 800, loss = 0.197292
I1228 12:43:46.585780 1478 solver.cpp:244] Train net output #0: loss = 0.197292 (* 1 = 0.197292 loss)
I1228 12:43:46.585820 1478 sgd_solver.cpp:106] Iteration 800, lr = 0.00943913
I1228 12:43:55.564452 1478 solver.cpp:228] Iteration 900, loss = 0.229118
I1228 12:43:55.564601 1478 solver.cpp:244] Train net output #0: loss = 0.229118 (* 1 = 0.229118 loss)
I1228 12:43:55.564649 1478 sgd_solver.cpp:106] Iteration 900, lr = 0.00937411
I1228 12:44:04.491788 1478 solver.cpp:337] Iteration 1000, Testing net (#0)
I1228 12:44:10.195562 1478 solver.cpp:404] Test net output #0: accuracy = 0.9813
I1228 12:44:10.195693 1478 solver.cpp:404] Test net output #1: loss = 0.0588604 (* 1 = 0.0588604 loss)
I1228 12:44:10.284278 1478 solver.cpp:228] Iteration 1000, loss = 0.122648
I1228 12:44:10.284402 1478 solver.cpp:244] Train net output #0: loss = 0.122648 (* 1 = 0.122648 loss)
I1228 12:44:10.284446 1478 sgd_solver.cpp:106] Iteration 1000, lr = 0.00931012
I1228 12:44:19.278929 1478 solver.cpp:228] Iteration 1100, loss = 0.00652424
I1228 12:44:19.279067 1478 solver.cpp:244] Train net output #0: loss = 0.00652422 (* 1 = 0.00652422 loss)
I1228 12:44:19.279112 1478 sgd_solver.cpp:106] Iteration 1100, lr = 0.00924715
I1228 12:44:28.262037 1478 solver.cpp:228] Iteration 1200, loss = 0.0227062
I1228 12:44:28.262176 1478 solver.cpp:244] Train net output #0: loss = 0.0227062 (* 1 = 0.0227062 loss)
I1228 12:44:28.262217 1478 sgd_solver.cpp:106] Iteration 1200, lr = 0.00918515
I1228 12:44:37.261036 1478 solver.cpp:228] Iteration 1300, loss = 0.0162803
I1228 12:44:37.261204 1478 solver.cpp:244] Train net output #0: loss = 0.0162802 (* 1 = 0.0162802 loss)
I1228 12:44:37.261263 1478 sgd_solver.cpp:106] Iteration 1300, lr = 0.00912412
I1228 12:44:46.274796 1478 solver.cpp:228] Iteration 1400, loss = 0.0083404
I1228 12:44:46.274945 1478 solver.cpp:244] Train net output #0: loss = 0.0083403 (* 1 = 0.0083403 loss)
I1228 12:44:46.274991 1478 sgd_solver.cpp:106] Iteration 1400, lr = 0.00906403
I1228 12:44:55.182224 1478 solver.cpp:337] Iteration 1500, Testing net (#0)
I1228 12:45:00.875882 1478 solver.cpp:404] Test net output #0: accuracy = 0.9858
I1228 12:45:00.876024 1478 solver.cpp:404] Test net output #1: loss = 0.0460209 (* 1 = 0.0460209 loss)
I1228 12:45:00.964514 1478 solver.cpp:228] Iteration 1500, loss = 0.0671758
I1228 12:45:00.964635 1478 solver.cpp:244] Train net output #0: loss = 0.0671757 (* 1 = 0.0671757 loss)
I1228 12:45:00.964679 1478 sgd_solver.cpp:106] Iteration 1500, lr = 0.00900485
I1228 12:45:10.080106 1478 solver.cpp:228] Iteration 1600, loss = 0.106931
I1228 12:45:10.080278 1478 solver.cpp:244] Train net output #0: loss = 0.106931 (* 1 = 0.106931 loss)
I1228 12:45:10.080320 1478 sgd_solver.cpp:106] Iteration 1600, lr = 0.00894657
I1228 12:45:19.073309 1478 solver.cpp:228] Iteration 1700, loss = 0.0179893
I1228 12:45:19.073454 1478 solver.cpp:244] Train net output #0: loss = 0.0179892 (* 1 = 0.0179892 loss)
I1228 12:45:19.073495 1478 sgd_solver.cpp:106] Iteration 1700, lr = 0.00888916
I1228 12:45:28.069286 1478 solver.cpp:228] Iteration 1800, loss = 0.0228243
I1228 12:45:28.069427 1478 solver.cpp:244] Train net output #0: loss = 0.0228242 (* 1 = 0.0228242 loss)
I1228 12:45:28.069473 1478 sgd_solver.cpp:106] Iteration 1800, lr = 0.0088326
I1228 12:45:37.070579 1478 solver.cpp:228] Iteration 1900, loss = 0.124677
I1228 12:45:37.070720 1478 solver.cpp:244] Train net output #0: loss = 0.124677 (* 1 = 0.124677 loss)
I1228 12:45:37.070765 1478 sgd_solver.cpp:106] Iteration 1900, lr = 0.00877687
I1228 12:45:45.984365 1478 solver.cpp:337] Iteration 2000, Testing net (#0)
I1228 12:45:51.697198 1478 solver.cpp:404] Test net output #0: accuracy = 0.9848
I1228 12:45:51.697345 1478 solver.cpp:404] Test net output #1: loss = 0.0456813 (* 1 = 0.0456813 loss)
I1228 12:45:51.786160 1478 solver.cpp:228] Iteration 2000, loss = 0.0130174
I1228 12:45:51.786289 1478 solver.cpp:244] Train net output #0: loss = 0.0130173 (* 1 = 0.0130173 loss)
I1228 12:45:51.786335 1478 sgd_solver.cpp:106] Iteration 2000, lr = 0.00872196
I1228 12:46:00.796350 1478 solver.cpp:228] Iteration 2100, loss = 0.0242315
I1228 12:46:00.796484 1478 solver.cpp:244] Train net output #0: loss = 0.0242314 (* 1 = 0.0242314 loss)
I1228 12:46:00.796530 1478 sgd_solver.cpp:106] Iteration 2100, lr = 0.00866784
I1228 12:46:09.774925 1478 solver.cpp:228] Iteration 2200, loss = 0.0239079
I1228 12:46:09.775059 1478 solver.cpp:244] Train net output #0: loss = 0.0239079 (* 1 = 0.0239079 loss)
I1228 12:46:09.775106 1478 sgd_solver.cpp:106] Iteration 2200, lr = 0.0086145
I1228 12:46:18.773983 1478 solver.cpp:228] Iteration 2300, loss = 0.0938903
I1228 12:46:18.774143 1478 solver.cpp:244] Train net output #0: loss = 0.0938903 (* 1 = 0.0938903 loss)
I1228 12:46:18.774189 1478 sgd_solver.cpp:106] Iteration 2300, lr = 0.00856192
I1228 12:46:27.773385 1478 solver.cpp:228] Iteration 2400, loss = 0.0137466
I1228 12:46:27.773525 1478 solver.cpp:244] Train net output #0: loss = 0.0137466 (* 1 = 0.0137466 loss)
I1228 12:46:27.773576 1478 sgd_solver.cpp:106] Iteration 2400, lr = 0.00851008
I1228 12:46:36.668257 1478 solver.cpp:337] Iteration 2500, Testing net (#0)
I1228 12:46:42.371877 1478 solver.cpp:404] Test net output #0: accuracy = 0.9871
I1228 12:46:42.372016 1478 solver.cpp:404] Test net output #1: loss = 0.0411888 (* 1 = 0.0411888 loss)
I1228 12:46:42.460232 1478 solver.cpp:228] Iteration 2500, loss = 0.026106
I1228 12:46:42.460366 1478 solver.cpp:244] Train net output #0: loss = 0.0261059 (* 1 = 0.0261059 loss)
I1228 12:46:42.460404 1478 sgd_solver.cpp:106] Iteration 2500, lr = 0.00845897
I1228 12:46:51.466620 1478 solver.cpp:228] Iteration 2600, loss = 0.0685898
I1228 12:46:51.466787 1478 solver.cpp:244] Train net output #0: loss = 0.0685898 (* 1 = 0.0685898 loss)
I1228 12:46:51.466833 1478 sgd_solver.cpp:106] Iteration 2600, lr = 0.00840857
I1228 12:47:00.456198 1478 solver.cpp:228] Iteration 2700, loss = 0.0783101
I1228 12:47:00.456333 1478 solver.cpp:244] Train net output #0: loss = 0.0783101 (* 1 = 0.0783101 loss)
I1228 12:47:00.456378 1478 sgd_solver.cpp:106] Iteration 2700, lr = 0.00835886
I1228 12:47:09.458505 1478 solver.cpp:228] Iteration 2800, loss = 0.00134948
I1228 12:47:09.458650 1478 solver.cpp:244] Train net output #0: loss = 0.00134948 (* 1 = 0.00134948 loss)
I1228 12:47:09.458689 1478 sgd_solver.cpp:106] Iteration 2800, lr = 0.00830984
I1228 12:47:18.460925 1478 solver.cpp:228] Iteration 2900, loss = 0.0173515
I1228 12:47:18.461069 1478 solver.cpp:244] Train net output #0: loss = 0.0173515 (* 1 = 0.0173515 loss)
I1228 12:47:18.461120 1478 sgd_solver.cpp:106] Iteration 2900, lr = 0.00826148
I1228 12:47:27.370965 1478 solver.cpp:337] Iteration 3000, Testing net (#0)
I1228 12:47:33.058574 1478 solver.cpp:404] Test net output #0: accuracy = 0.9878
I1228 12:47:33.058715 1478 solver.cpp:404] Test net output #1: loss = 0.0368021 (* 1 = 0.0368021 loss)
I1228 12:47:33.146149 1478 solver.cpp:228] Iteration 3000, loss = 0.0130952
I1228 12:47:33.146278 1478 solver.cpp:244] Train net output #0: loss = 0.0130951 (* 1 = 0.0130951 loss)
I1228 12:47:33.146327 1478 sgd_solver.cpp:106] Iteration 3000, lr = 0.00821377
I1228 12:47:42.119889 1478 solver.cpp:228] Iteration 3100, loss = 0.0163137
I1228 12:47:42.120026 1478 solver.cpp:244] Train net output #0: loss = 0.0163136 (* 1 = 0.0163136 loss)
I1228 12:47:42.120066 1478 sgd_solver.cpp:106] Iteration 3100, lr = 0.0081667
I1228 12:47:51.101455 1478 solver.cpp:228] Iteration 3200, loss = 0.00634108
I1228 12:47:51.101596 1478 solver.cpp:244] Train net output #0: loss = 0.00634104 (* 1 = 0.00634104 loss)
I1228 12:47:51.101644 1478 sgd_solver.cpp:106] Iteration 3200, lr = 0.00812025
I1228 12:48:00.102923 1478 solver.cpp:228] Iteration 3300, loss = 0.00822882
I1228 12:48:00.103108 1478 solver.cpp:244] Train net output #0: loss = 0.00822874 (* 1 = 0.00822874 loss)
I1228 12:48:00.103154 1478 sgd_solver.cpp:106] Iteration 3300, lr = 0.00807442
I1228 12:48:09.112653 1478 solver.cpp:228] Iteration 3400, loss = 0.00865752
I1228 12:48:09.112803 1478 solver.cpp:244] Train net output #0: loss = 0.00865745 (* 1 = 0.00865745 loss)
I1228 12:48:09.112845 1478 sgd_solver.cpp:106] Iteration 3400, lr = 0.00802918
I1228 12:48:18.008867 1478 solver.cpp:337] Iteration 3500, Testing net (#0)
I1228 12:48:23.709991 1478 solver.cpp:404] Test net output #0: accuracy = 0.9855
I1228 12:48:23.710139 1478 solver.cpp:404] Test net output #1: loss = 0.0418341 (* 1 = 0.0418341 loss)
I1228 12:48:23.797904 1478 solver.cpp:228] Iteration 3500, loss = 0.00539367
I1228 12:48:23.798038 1478 solver.cpp:244] Train net output #0: loss = 0.00539362 (* 1 = 0.00539362 loss)
I1228 12:48:23.798079 1478 sgd_solver.cpp:106] Iteration 3500, lr = 0.00798454
I1228 12:48:32.793352 1478 solver.cpp:228] Iteration 3600, loss = 0.0325999
I1228 12:48:32.793519 1478 solver.cpp:244] Train net output #0: loss = 0.0325999 (* 1 = 0.0325999 loss)
I1228 12:48:32.793565 1478 sgd_solver.cpp:106] Iteration 3600, lr = 0.00794046
I1228 12:48:41.778705 1478 solver.cpp:228] Iteration 3700, loss = 0.0312646
I1228 12:48:41.778842 1478 solver.cpp:244] Train net output #0: loss = 0.0312646 (* 1 = 0.0312646 loss)
I1228 12:48:41.778890 1478 sgd_solver.cpp:106] Iteration 3700, lr = 0.00789695
I1228 12:48:50.761559 1478 solver.cpp:228] Iteration 3800, loss = 0.0199443
I1228 12:48:50.761703 1478 solver.cpp:244] Train net output #0: loss = 0.0199443 (* 1 = 0.0199443 loss)
I1228 12:48:50.761754 1478 sgd_solver.cpp:106] Iteration 3800, lr = 0.007854
I1228 12:48:59.751957 1478 solver.cpp:228] Iteration 3900, loss = 0.0247828
I1228 12:48:59.752090 1478 solver.cpp:244] Train net output #0: loss = 0.0247827 (* 1 = 0.0247827 loss)
I1228 12:48:59.752137 1478 sgd_solver.cpp:106] Iteration 3900, lr = 0.00781158
I1228 12:49:08.663188 1478 solver.cpp:337] Iteration 4000, Testing net (#0)
I1228 12:49:14.386026 1478 solver.cpp:404] Test net output #0: accuracy = 0.9901
I1228 12:49:14.386165 1478 solver.cpp:404] Test net output #1: loss = 0.031397 (* 1 = 0.031397 loss)
I1228 12:49:14.474560 1478 solver.cpp:228] Iteration 4000, loss = 0.0101118
I1228 12:49:14.474681 1478 solver.cpp:244] Train net output #0: loss = 0.0101117 (* 1 = 0.0101117 loss)
I1228 12:49:14.474720 1478 sgd_solver.cpp:106] Iteration 4000, lr = 0.0077697
I1228 12:49:23.466630 1478 solver.cpp:228] Iteration 4100, loss = 0.0178342
I1228 12:49:23.466775 1478 solver.cpp:244] Train net output #0: loss = 0.0178341 (* 1 = 0.0178341 loss)
I1228 12:49:23.466816 1478 sgd_solver.cpp:106] Iteration 4100, lr = 0.00772833
I1228 12:49:32.453449 1478 solver.cpp:228] Iteration 4200, loss = 0.00712245
I1228 12:49:32.453586 1478 solver.cpp:244] Train net output #0: loss = 0.00712239 (* 1 = 0.00712239 loss)
I1228 12:49:32.453632 1478 sgd_solver.cpp:106] Iteration 4200, lr = 0.00768748
I1228 12:49:41.431674 1478 solver.cpp:228] Iteration 4300, loss = 0.0362492
I1228 12:49:41.431838 1478 solver.cpp:244] Train net output #0: loss = 0.0362491 (* 1 = 0.0362491 loss)
I1228 12:49:41.431886 1478 sgd_solver.cpp:106] Iteration 4300, lr = 0.00764712
I1228 12:49:50.409802 1478 solver.cpp:228] Iteration 4400, loss = 0.0190512
I1228 12:49:50.409947 1478 solver.cpp:244] Train net output #0: loss = 0.0190512 (* 1 = 0.0190512 loss)
I1228 12:49:50.409997 1478 sgd_solver.cpp:106] Iteration 4400, lr = 0.00760726
I1228 12:49:59.300992 1478 solver.cpp:337] Iteration 4500, Testing net (#0)
I1228 12:50:05.038296 1478 solver.cpp:404] Test net output #0: accuracy = 0.9882
I1228 12:50:05.038434 1478 solver.cpp:404] Test net output #1: loss = 0.0357055 (* 1 = 0.0357055 loss)
I1228 12:50:05.127156 1478 solver.cpp:228] Iteration 4500, loss = 0.00624241
I1228 12:50:05.127286 1478 solver.cpp:244] Train net output #0: loss = 0.00624236 (* 1 = 0.00624236 loss)
I1228 12:50:05.127332 1478 sgd_solver.cpp:106] Iteration 4500, lr = 0.00756788
I1228 12:50:14.118090 1478 solver.cpp:228] Iteration 4600, loss = 0.0156433
I1228 12:50:14.118289 1478 solver.cpp:244] Train net output #0: loss = 0.0156433 (* 1 = 0.0156433 loss)
I1228 12:50:14.118346 1478 sgd_solver.cpp:106] Iteration 4600, lr = 0.00752897
I1228 12:50:23.106927 1478 solver.cpp:228] Iteration 4700, loss = 0.00575534
I1228 12:50:23.107070 1478 solver.cpp:244] Train net output #0: loss = 0.00575529 (* 1 = 0.00575529 loss)
I1228 12:50:23.107112 1478 sgd_solver.cpp:106] Iteration 4700, lr = 0.00749052
I1228 12:50:32.103515 1478 solver.cpp:228] Iteration 4800, loss = 0.0172193
I1228 12:50:32.103652 1478 solver.cpp:244] Train net output #0: loss = 0.0172193 (* 1 = 0.0172193 loss)
I1228 12:50:32.103698 1478 sgd_solver.cpp:106] Iteration 4800, lr = 0.00745253
I1228 12:50:41.092864 1478 solver.cpp:228] Iteration 4900, loss = 0.0088059
I1228 12:50:41.092998 1478 solver.cpp:244] Train net output #0: loss = 0.00880586 (* 1 = 0.00880586 loss)
I1228 12:50:41.093045 1478 sgd_solver.cpp:106] Iteration 4900, lr = 0.00741498
I1228 12:50:50.007995 1478 solver.cpp:454] Snapshotting to binary proto file examples/mnist/lenet_iter_5000.caffemodel
I1228 12:50:50.013608 1478 sgd_solver.cpp:273] Snapshotting solver state to binary proto file examples/mnist/lenet_iter_5000.solverstate
I1228 12:50:50.016250 1478 solver.cpp:337] Iteration 5000, Testing net (#0)
I1228 12:50:55.714702 1478 solver.cpp:404] Test net output #0: accuracy = 0.99
I1228 12:50:55.714843 1478 solver.cpp:404] Test net output #1: loss = 0.0308458 (* 1 = 0.0308458 loss)
I1228 12:50:55.802348 1478 solver.cpp:228] Iteration 5000, loss = 0.0337758
I1228 12:50:55.802469 1478 solver.cpp:244] Train net output #0: loss = 0.0337758 (* 1 = 0.0337758 loss)
I1228 12:50:55.802513 1478 sgd_solver.cpp:106] Iteration 5000, lr = 0.00737788
I1228 12:51:04.774809 1478 solver.cpp:228] Iteration 5100, loss = 0.0166655
I1228 12:51:04.774946 1478 solver.cpp:244] Train net output #0: loss = 0.0166655 (* 1 = 0.0166655 loss)
I1228 12:51:04.774987 1478 sgd_solver.cpp:106] Iteration 5100, lr = 0.0073412
I1228 12:51:13.770524 1478 solver.cpp:228] Iteration 5200, loss = 0.00703603
I1228 12:51:13.770663 1478 solver.cpp:244] Train net output #0: loss = 0.00703599 (* 1 = 0.00703599 loss)
I1228 12:51:13.770704 1478 sgd_solver.cpp:106] Iteration 5200, lr = 0.00730495
I1228 12:51:22.773340 1478 solver.cpp:228] Iteration 5300, loss = 0.00212657
I1228 12:51:22.773501 1478 solver.cpp:244] Train net output #0: loss = 0.00212655 (* 1 = 0.00212655 loss)
I1228 12:51:22.773547 1478 sgd_solver.cpp:106] Iteration 5300, lr = 0.00726911
I1228 12:51:31.756722 1478 solver.cpp:228] Iteration 5400, loss = 0.00780362
I1228 12:51:31.756865 1478 solver.cpp:244] Train net output #0: loss = 0.00780359 (* 1 = 0.00780359 loss)
I1228 12:51:31.756906 1478 sgd_solver.cpp:106] Iteration 5400, lr = 0.00723368
I1228 12:51:40.641762 1478 solver.cpp:337] Iteration 5500, Testing net (#0)
I1228 12:51:46.328320 1478 solver.cpp:404] Test net output #0: accuracy = 0.9892
I1228 12:51:46.328452 1478 solver.cpp:404] Test net output #1: loss = 0.0326295 (* 1 = 0.0326295 loss)
I1228 12:51:46.417279 1478 solver.cpp:228] Iteration 5500, loss = 0.0109335
I1228 12:51:46.417410 1478 solver.cpp:244] Train net output #0: loss = 0.0109334 (* 1 = 0.0109334 loss)
I1228 12:51:46.417450 1478 sgd_solver.cpp:106] Iteration 5500, lr = 0.00719865
I1228 12:51:55.415185 1478 solver.cpp:228] Iteration 5600, loss = 0.000549599
I1228 12:51:55.415355 1478 solver.cpp:244] Train net output #0: loss = 0.000549559 (* 1 = 0.000549559 loss)
I1228 12:51:55.415397 1478 sgd_solver.cpp:106] Iteration 5600, lr = 0.00716402
I1228 12:52:04.402909 1478 solver.cpp:228] Iteration 5700, loss = 0.00133583
I1228 12:52:04.403048 1478 solver.cpp:244] Train net output #0: loss = 0.00133578 (* 1 = 0.00133578 loss)
I1228 12:52:04.403093 1478 sgd_solver.cpp:106] Iteration 5700, lr = 0.00712977
I1228 12:52:13.359767 1478 solver.cpp:228] Iteration 5800, loss = 0.0199287
I1228 12:52:13.359917 1478 solver.cpp:244] Train net output #0: loss = 0.0199286 (* 1 = 0.0199286 loss)
I1228 12:52:13.359971 1478 sgd_solver.cpp:106] Iteration 5800, lr = 0.0070959
I1228 12:52:22.356218 1478 solver.cpp:228] Iteration 5900, loss = 0.00755135
I1228 12:52:22.356365 1478 solver.cpp:244] Train net output #0: loss = 0.00755131 (* 1 = 0.00755131 loss)
I1228 12:52:22.356408 1478 sgd_solver.cpp:106] Iteration 5900, lr = 0.0070624
I1228 12:52:31.254874 1478 solver.cpp:337] Iteration 6000, Testing net (#0)
I1228 12:52:36.958190 1478 solver.cpp:404] Test net output #0: accuracy = 0.9903
I1228 12:52:36.958329 1478 solver.cpp:404] Test net output #1: loss = 0.0295362 (* 1 = 0.0295362 loss)
I1228 12:52:37.046069 1478 solver.cpp:228] Iteration 6000, loss = 0.00353343
I1228 12:52:37.046198 1478 solver.cpp:244] Train net output #0: loss = 0.0035334 (* 1 = 0.0035334 loss)
I1228 12:52:37.046244 1478 sgd_solver.cpp:106] Iteration 6000, lr = 0.00702927
I1228 12:52:46.023576 1478 solver.cpp:228] Iteration 6100, loss = 0.00124939
I1228 12:52:46.023715 1478 solver.cpp:244] Train net output #0: loss = 0.00124934 (* 1 = 0.00124934 loss)
I1228 12:52:46.023766 1478 sgd_solver.cpp:106] Iteration 6100, lr = 0.0069965
I1228 12:52:55.006597 1478 solver.cpp:228] Iteration 6200, loss = 0.0103351
I1228 12:52:55.006745 1478 solver.cpp:244] Train net output #0: loss = 0.010335 (* 1 = 0.010335 loss)
I1228 12:52:55.006791 1478 sgd_solver.cpp:106] Iteration 6200, lr = 0.00696408
I1228 12:53:04.008935 1478 solver.cpp:228] Iteration 6300, loss = 0.00820768
I1228 12:53:04.009102 1478 solver.cpp:244] Train net output #0: loss = 0.00820762 (* 1 = 0.00820762 loss)
I1228 12:53:04.009150 1478 sgd_solver.cpp:106] Iteration 6300, lr = 0.00693201
I1228 12:53:13.010681 1478 solver.cpp:228] Iteration 6400, loss = 0.00429668
I1228 12:53:13.010815 1478 solver.cpp:244] Train net output #0: loss = 0.0042966 (* 1 = 0.0042966 loss)
I1228 12:53:13.010862 1478 sgd_solver.cpp:106] Iteration 6400, lr = 0.00690029
I1228 12:53:21.929111 1478 solver.cpp:337] Iteration 6500, Testing net (#0)
I1228 12:53:27.614408 1478 solver.cpp:404] Test net output #0: accuracy = 0.9904
I1228 12:53:27.614542 1478 solver.cpp:404] Test net output #1: loss = 0.0322547 (* 1 = 0.0322547 loss)
I1228 12:53:27.703261 1478 solver.cpp:228] Iteration 6500, loss = 0.0182292
I1228 12:53:27.703389 1478 solver.cpp:244] Train net output #0: loss = 0.0182291 (* 1 = 0.0182291 loss)
I1228 12:53:27.703429 1478 sgd_solver.cpp:106] Iteration 6500, lr = 0.0068689
I1228 12:53:36.712234 1478 solver.cpp:228] Iteration 6600, loss = 0.0449371
I1228 12:53:36.712409 1478 solver.cpp:244] Train net output #0: loss = 0.044937 (* 1 = 0.044937 loss)
I1228 12:53:36.712451 1478 sgd_solver.cpp:106] Iteration 6600, lr = 0.00683784
I1228 12:53:45.693488 1478 solver.cpp:228] Iteration 6700, loss = 0.0062013
I1228 12:53:45.693635 1478 solver.cpp:244] Train net output #0: loss = 0.00620122 (* 1 = 0.00620122 loss)
I1228 12:53:45.693684 1478 sgd_solver.cpp:106] Iteration 6700, lr = 0.00680711
I1228 12:53:54.686039 1478 solver.cpp:228] Iteration 6800, loss = 0.00341845
I1228 12:53:54.686187 1478 solver.cpp:244] Train net output #0: loss = 0.00341837 (* 1 = 0.00341837 loss)
I1228 12:53:54.686236 1478 sgd_solver.cpp:106] Iteration 6800, lr = 0.0067767
I1228 12:54:03.675786 1478 solver.cpp:228] Iteration 6900, loss = 0.00433415
I1228 12:54:03.675925 1478 solver.cpp:244] Train net output #0: loss = 0.00433407 (* 1 = 0.00433407 loss)
I1228 12:54:03.675966 1478 sgd_solver.cpp:106] Iteration 6900, lr = 0.0067466
I1228 12:54:12.580624 1478 solver.cpp:337] Iteration 7000, Testing net (#0)
I1228 12:54:18.301859 1478 solver.cpp:404] Test net output #0: accuracy = 0.9903
I1228 12:54:18.302001 1478 solver.cpp:404] Test net output #1: loss = 0.0303683 (* 1 = 0.0303683 loss)
I1228 12:54:18.389706 1478 solver.cpp:228] Iteration 7000, loss = 0.00686864
I1228 12:54:18.389844 1478 solver.cpp:244] Train net output #0: loss = 0.00686856 (* 1 = 0.00686856 loss)
I1228 12:54:18.389890 1478 sgd_solver.cpp:106] Iteration 7000, lr = 0.00671681
I1228 12:54:27.372422 1478 solver.cpp:228] Iteration 7100, loss = 0.00886238
I1228 12:54:27.372568 1478 solver.cpp:244] Train net output #0: loss = 0.00886229 (* 1 = 0.00886229 loss)
I1228 12:54:27.372619 1478 sgd_solver.cpp:106] Iteration 7100, lr = 0.00668733
I1228 12:54:36.339958 1478 solver.cpp:228] Iteration 7200, loss = 0.00408799
I1228 12:54:36.340101 1478 solver.cpp:244] Train net output #0: loss = 0.00408792 (* 1 = 0.00408792 loss)
I1228 12:54:36.340149 1478 sgd_solver.cpp:106] Iteration 7200, lr = 0.00665815
I1228 12:54:45.325973 1478 solver.cpp:228] Iteration 7300, loss = 0.0171202
I1228 12:54:45.326149 1478 solver.cpp:244] Train net output #0: loss = 0.0171201 (* 1 = 0.0171201 loss)
I1228 12:54:45.326191 1478 sgd_solver.cpp:106] Iteration 7300, lr = 0.00662927
I1228 12:54:54.329740 1478 solver.cpp:228] Iteration 7400, loss = 0.00486002
I1228 12:54:54.329880 1478 solver.cpp:244] Train net output #0: loss = 0.00485994 (* 1 = 0.00485994 loss)
I1228 12:54:54.329922 1478 sgd_solver.cpp:106] Iteration 7400, lr = 0.00660067
I1228 12:55:03.241461 1478 solver.cpp:337] Iteration 7500, Testing net (#0)
I1228 12:55:08.933132 1478 solver.cpp:404] Test net output #0: accuracy = 0.99
I1228 12:55:08.933291 1478 solver.cpp:404] Test net output #1: loss = 0.0325328 (* 1 = 0.0325328 loss)
I1228 12:55:09.021898 1478 solver.cpp:228] Iteration 7500, loss = 0.00268578
I1228 12:55:09.022032 1478 solver.cpp:244] Train net output #0: loss = 0.0026857 (* 1 = 0.0026857 loss)
I1228 12:55:09.022073 1478 sgd_solver.cpp:106] Iteration 7500, lr = 0.00657236
I1228 12:55:18.020838 1478 solver.cpp:228] Iteration 7600, loss = 0.0053811
I1228 12:55:18.021009 1478 solver.cpp:244] Train net output #0: loss = 0.00538101 (* 1 = 0.00538101 loss)
I1228 12:55:18.021050 1478 sgd_solver.cpp:106] Iteration 7600, lr = 0.00654433
I1228 12:55:26.999490 1478 solver.cpp:228] Iteration 7700, loss = 0.0290023
I1228 12:55:26.999635 1478 solver.cpp:244] Train net output #0: loss = 0.0290022 (* 1 = 0.0290022 loss)
I1228 12:55:26.999687 1478 sgd_solver.cpp:106] Iteration 7700, lr = 0.00651658
I1228 12:55:35.992398 1478 solver.cpp:228] Iteration 7800, loss = 0.00495144
I1228 12:55:35.992539 1478 solver.cpp:244] Train net output #0: loss = 0.00495136 (* 1 = 0.00495136 loss)
I1228 12:55:35.992589 1478 sgd_solver.cpp:106] Iteration 7800, lr = 0.00648911
I1228 12:55:44.981251 1478 solver.cpp:228] Iteration 7900, loss = 0.00728552
I1228 12:55:44.981393 1478 solver.cpp:244] Train net output #0: loss = 0.00728544 (* 1 = 0.00728544 loss)
I1228 12:55:44.981439 1478 sgd_solver.cpp:106] Iteration 7900, lr = 0.0064619
I1228 12:55:53.876646 1478 solver.cpp:337] Iteration 8000, Testing net (#0)
I1228 12:55:59.564527 1478 solver.cpp:404] Test net output #0: accuracy = 0.9903
I1228 12:55:59.564671 1478 solver.cpp:404] Test net output #1: loss = 0.0303976 (* 1 = 0.0303976 loss)
I1228 12:55:59.653228 1478 solver.cpp:228] Iteration 8000, loss = 0.00487312
I1228 12:55:59.653352 1478 solver.cpp:244] Train net output #0: loss = 0.00487304 (* 1 = 0.00487304 loss)
I1228 12:55:59.653396 1478 sgd_solver.cpp:106] Iteration 8000, lr = 0.00643496
I1228 12:56:08.654803 1478 solver.cpp:228] Iteration 8100, loss = 0.00874484
I1228 12:56:08.654949 1478 solver.cpp:244] Train net output #0: loss = 0.00874476 (* 1 = 0.00874476 loss)
I1228 12:56:08.654991 1478 sgd_solver.cpp:106] Iteration 8100, lr = 0.00640827
I1228 12:56:17.630285 1478 solver.cpp:228] Iteration 8200, loss = 0.00797972
I1228 12:56:17.630429 1478 solver.cpp:244] Train net output #0: loss = 0.00797963 (* 1 = 0.00797963 loss)
I1228 12:56:17.630470 1478 sgd_solver.cpp:106] Iteration 8200, lr = 0.00638185
I1228 12:56:26.607856 1478 solver.cpp:228] Iteration 8300, loss = 0.0234358
I1228 12:56:26.608052 1478 solver.cpp:244] Train net output #0: loss = 0.0234357 (* 1 = 0.0234357 loss)
I1228 12:56:26.608094 1478 sgd_solver.cpp:106] Iteration 8300, lr = 0.00635567
I1228 12:56:35.597169 1478 solver.cpp:228] Iteration 8400, loss = 0.00902765
I1228 12:56:35.597326 1478 solver.cpp:244] Train net output #0: loss = 0.00902755 (* 1 = 0.00902755 loss)
I1228 12:56:35.597374 1478 sgd_solver.cpp:106] Iteration 8400, lr = 0.00632975
I1228 12:56:44.483781 1478 solver.cpp:337] Iteration 8500, Testing net (#0)
I1228 12:56:50.179303 1478 solver.cpp:404] Test net output #0: accuracy = 0.9906
I1228 12:56:50.179446 1478 solver.cpp:404] Test net output #1: loss = 0.0297353 (* 1 = 0.0297353 loss)
I1228 12:56:50.268074 1478 solver.cpp:228] Iteration 8500, loss = 0.00623272
I1228 12:56:50.268204 1478 solver.cpp:244] Train net output #0: loss = 0.00623262 (* 1 = 0.00623262 loss)
I1228 12:56:50.268244 1478 sgd_solver.cpp:106] Iteration 8500, lr = 0.00630407
I1228 12:56:59.266109 1478 solver.cpp:228] Iteration 8600, loss = 0.000967596
I1228 12:56:59.266285 1478 solver.cpp:244] Train net output #0: loss = 0.000967495 (* 1 = 0.000967495 loss)
I1228 12:56:59.266335 1478 sgd_solver.cpp:106] Iteration 8600, lr = 0.00627864
I1228 12:57:08.254199 1478 solver.cpp:228] Iteration 8700, loss = 0.00239873
I1228 12:57:08.254345 1478 solver.cpp:244] Train net output #0: loss = 0.00239862 (* 1 = 0.00239862 loss)
I1228 12:57:08.254386 1478 sgd_solver.cpp:106] Iteration 8700, lr = 0.00625344
I1228 12:57:17.251121 1478 solver.cpp:228] Iteration 8800, loss = 0.00144548
I1228 12:57:17.251268 1478 solver.cpp:244] Train net output #0: loss = 0.00144538 (* 1 = 0.00144538 loss)
I1228 12:57:17.251318 1478 sgd_solver.cpp:106] Iteration 8800, lr = 0.00622847
I1228 12:57:26.228776 1478 solver.cpp:228] Iteration 8900, loss = 0.000345843
I1228 12:57:26.228924 1478 solver.cpp:244] Train net output #0: loss = 0.000345736 (* 1 = 0.000345736 loss)
I1228 12:57:26.228965 1478 sgd_solver.cpp:106] Iteration 8900, lr = 0.00620374
I1228 12:57:35.132206 1478 solver.cpp:337] Iteration 9000, Testing net (#0)
I1228 12:57:40.816022 1478 solver.cpp:404] Test net output #0: accuracy = 0.9909
I1228 12:57:40.816154 1478 solver.cpp:404] Test net output #1: loss = 0.0278701 (* 1 = 0.0278701 loss)
I1228 12:57:40.904713 1478 solver.cpp:228] Iteration 9000, loss = 0.0132574
I1228 12:57:40.904846 1478 solver.cpp:244] Train net output #0: loss = 0.0132573 (* 1 = 0.0132573 loss)
I1228 12:57:40.904887 1478 sgd_solver.cpp:106] Iteration 9000, lr = 0.00617924
I1228 12:57:49.887485 1478 solver.cpp:228] Iteration 9100, loss = 0.00660216
I1228 12:57:49.887621 1478 solver.cpp:244] Train net output #0: loss = 0.00660205 (* 1 = 0.00660205 loss)
I1228 12:57:49.887666 1478 sgd_solver.cpp:106] Iteration 9100, lr = 0.00615496
I1228 12:57:58.880218 1478 solver.cpp:228] Iteration 9200, loss = 0.00292292
I1228 12:57:58.880364 1478 solver.cpp:244] Train net output #0: loss = 0.00292282 (* 1 = 0.00292282 loss)
I1228 12:57:58.880406 1478 sgd_solver.cpp:106] Iteration 9200, lr = 0.0061309
I1228 12:58:07.862749 1478 solver.cpp:228] Iteration 9300, loss = 0.00458688
I1228 12:58:07.862910 1478 solver.cpp:244] Train net output #0: loss = 0.00458676 (* 1 = 0.00458676 loss)
I1228 12:58:07.862957 1478 sgd_solver.cpp:106] Iteration 9300, lr = 0.00610706
I1228 12:58:16.836311 1478 solver.cpp:228] Iteration 9400, loss = 0.0341082
I1228 12:58:16.836454 1478 solver.cpp:244] Train net output #0: loss = 0.0341081 (* 1 = 0.0341081 loss)
I1228 12:58:16.836496 1478 sgd_solver.cpp:106] Iteration 9400, lr = 0.00608343
I1228 12:58:25.749003 1478 solver.cpp:337] Iteration 9500, Testing net (#0)
I1228 12:58:31.442245 1478 solver.cpp:404] Test net output #0: accuracy = 0.989
I1228 12:58:31.442381 1478 solver.cpp:404] Test net output #1: loss = 0.0340806 (* 1 = 0.0340806 loss)
I1228 12:58:31.531092 1478 solver.cpp:228] Iteration 9500, loss = 0.00251447
I1228 12:58:31.531222 1478 solver.cpp:244] Train net output #0: loss = 0.00251435 (* 1 = 0.00251435 loss)
I1228 12:58:31.531267 1478 sgd_solver.cpp:106] Iteration 9500, lr = 0.00606002
I1228 12:58:40.530058 1478 solver.cpp:228] Iteration 9600, loss = 0.0033457
I1228 12:58:40.530247 1478 solver.cpp:244] Train net output #0: loss = 0.00334557 (* 1 = 0.00334557 loss)
I1228 12:58:40.530294 1478 sgd_solver.cpp:106] Iteration 9600, lr = 0.00603682
I1228 12:58:49.502943 1478 solver.cpp:228] Iteration 9700, loss = 0.00308436
I1228 12:58:49.503093 1478 solver.cpp:244] Train net output #0: loss = 0.00308424 (* 1 = 0.00308424 loss)
I1228 12:58:49.503145 1478 sgd_solver.cpp:106] Iteration 9700, lr = 0.00601382
I1228 12:58:58.485988 1478 solver.cpp:228] Iteration 9800, loss = 0.00986355
I1228 12:58:58.486138 1478 solver.cpp:244] Train net output #0: loss = 0.00986342 (* 1 = 0.00986342 loss)
I1228 12:58:58.486186 1478 sgd_solver.cpp:106] Iteration 9800, lr = 0.00599102
I1228 12:59:07.481578 1478 solver.cpp:228] Iteration 9900, loss = 0.00657413
I1228 12:59:07.481719 1478 solver.cpp:244] Train net output #0: loss = 0.006574 (* 1 = 0.006574 loss)
I1228 12:59:07.481765 1478 sgd_solver.cpp:106] Iteration 9900, lr = 0.00596843
I1228 12:59:16.380491 1478 solver.cpp:454] Snapshotting to binary proto file examples/mnist/lenet_iter_10000.caffemodel
I1228 12:59:16.386615 1478 sgd_solver.cpp:273] Snapshotting solver state to binary proto file examples/mnist/lenet_iter_10000.solverstate
I1228 12:59:16.425662 1478 solver.cpp:317] Iteration 10000, loss = 0.00357283
I1228 12:59:16.425776 1478 solver.cpp:337] Iteration 10000, Testing net (#0)
I1228 12:59:22.108726 1478 solver.cpp:404] Test net output #0: accuracy = 0.9913
I1228 12:59:22.108870 1478 solver.cpp:404] Test net output #1: loss = 0.0273851 (* 1 = 0.0273851 loss)
I1228 12:59:22.108907 1478 solver.cpp:322] Optimization Done.
I1228 12:59:22.108942 1478 caffe.cpp:254] Optimization Done.
root@ip-172-30-0-251:/caffe# Write failed: Broken pipe
[root@xuyongshi aws.rds]#
3. 预测过程部分输出
root@ip-172-30-0-251:/caffe# build/tools/caffe test --model=examples/mnist/lenet_train_test.prototxt --weights examples/mnist/lenet_iter_10000.caffemodel -iterations 100I1229 08:30:11.945741 4159 caffe.cpp:279] Use CPU.
I1229 08:30:11.947610 4159 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer mnist
I1229 08:30:11.947794 4159 net.cpp:58] Initializing net from parameters:
name: "LeNet"
state {
phase: TEST
level: 0
stage: ""
}
layer {
name: "mnist"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/mnist/mnist_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
I1229 08:30:11.950266 4159 layer_factory.hpp:77] Creating layer mnist
I1229 08:30:11.950832 4159 net.cpp:100] Creating Layer mnist
I1229 08:30:11.950876 4159 net.cpp:408] mnist -> data
I1229 08:30:11.950924 4159 net.cpp:408] mnist -> label
I1229 08:30:11.951063 4160 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_test_lmdb
I1229 08:30:11.951145 4159 data_layer.cpp:41] output data size: 100,1,28,28
I1229 08:30:11.951928 4159 net.cpp:150] Setting up mnist
I1229 08:30:11.951970 4159 net.cpp:157] Top shape: 100 1 28 28 (78400)
I1229 08:30:11.951997 4159 net.cpp:157] Top shape: 100 (100)
I1229 08:30:11.952021 4159 net.cpp:165] Memory required for data: 314000
I1229 08:30:11.952049 4159 layer_factory.hpp:77] Creating layer label_mnist_1_split
I1229 08:30:11.952078 4159 net.cpp:100] Creating Layer label_mnist_1_split
I1229 08:30:11.952105 4159 net.cpp:434] label_mnist_1_split <- label
I1229 08:30:11.952137 4159 net.cpp:408] label_mnist_1_split -> label_mnist_1_split_0
I1229 08:30:11.952167 4159 net.cpp:408] label_mnist_1_split -> label_mnist_1_split_1
I1229 08:30:11.952200 4159 net.cpp:150] Setting up label_mnist_1_split
I1229 08:30:11.952227 4159 net.cpp:157] Top shape: 100 (100)
I1229 08:30:11.952252 4159 net.cpp:157] Top shape: 100 (100)
I1229 08:30:11.952275 4159 net.cpp:165] Memory required for data: 314800
I1229 08:30:11.952298 4159 layer_factory.hpp:77] Creating layer conv1
I1229 08:30:11.952334 4159 net.cpp:100] Creating Layer conv1
I1229 08:30:11.952360 4159 net.cpp:434] conv1 <- data
I1229 08:30:11.952386 4159 net.cpp:408] conv1 -> conv1
I1229 08:30:11.952474 4159 net.cpp:150] Setting up conv1
I1229 08:30:11.952507 4159 net.cpp:157] Top shape: 100 20 24 24 (1152000)
I1229 08:30:11.952530 4159 net.cpp:165] Memory required for data: 4922800
I1229 08:30:11.952565 4159 layer_factory.hpp:77] Creating layer pool1
I1229 08:30:11.952615 4159 net.cpp:100] Creating Layer pool1
I1229 08:30:11.952641 4159 net.cpp:434] pool1 <- conv1
I1229 08:30:11.952667 4159 net.cpp:408] pool1 -> pool1
I1229 08:30:11.952705 4159 net.cpp:150] Setting up pool1
I1229 08:30:11.952733 4159 net.cpp:157] Top shape: 100 20 12 12 (288000)
I1229 08:30:11.952756 4159 net.cpp:165] Memory required for data: 6074800
I1229 08:30:11.952780 4159 layer_factory.hpp:77] Creating layer conv2
I1229 08:30:11.952808 4159 net.cpp:100] Creating Layer conv2
I1229 08:30:11.952833 4159 net.cpp:434] conv2 <- pool1
I1229 08:30:11.952859 4159 net.cpp:408] conv2 -> conv2
I1229 08:30:11.953111 4159 net.cpp:150] Setting up conv2
I1229 08:30:11.953145 4159 net.cpp:157] Top shape: 100 50 8 8 (320000)
I1229 08:30:11.953168 4159 net.cpp:165] Memory required for data: 7354800
I1229 08:30:11.953197 4159 layer_factory.hpp:77] Creating layer pool2
I1229 08:30:11.953263 4159 net.cpp:100] Creating Layer pool2
I1229 08:30:11.953294 4159 net.cpp:434] pool2 <- conv2
I1229 08:30:11.953320 4159 net.cpp:408] pool2 -> pool2
I1229 08:30:11.953351 4159 net.cpp:150] Setting up pool2
I1229 08:30:11.953377 4159 net.cpp:157] Top shape: 100 50 4 4 (80000)
I1229 08:30:11.953400 4159 net.cpp:165] Memory required for data: 7674800
I1229 08:30:11.953423 4159 layer_factory.hpp:77] Creating layer ip1
I1229 08:30:11.953454 4159 net.cpp:100] Creating Layer ip1
I1229 08:30:11.953480 4159 net.cpp:434] ip1 <- pool2
I1229 08:30:11.953505 4159 net.cpp:408] ip1 -> ip1
I1229 08:30:11.956861 4159 net.cpp:150] Setting up ip1
I1229 08:30:11.957449 4159 net.cpp:157] Top shape: 100 500 (50000)
I1229 08:30:11.957475 4159 net.cpp:165] Memory required for data: 7874800
I1229 08:30:11.957505 4159 layer_factory.hpp:77] Creating layer relu1
I1229 08:30:11.957592 4159 net.cpp:100] Creating Layer relu1
I1229 08:30:11.957619 4159 net.cpp:434] relu1 <- ip1
I1229 08:30:11.957644 4159 net.cpp:395] relu1 -> ip1 (in-place)
I1229 08:30:11.957675 4159 net.cpp:150] Setting up relu1
I1229 08:30:11.957725 4159 net.cpp:157] Top shape: 100 500 (50000)
I1229 08:30:11.957777 4159 net.cpp:165] Memory required for data: 8074800
I1229 08:30:11.957818 4159 layer_factory.hpp:77] Creating layer ip2
I1229 08:30:11.957871 4159 net.cpp:100] Creating Layer ip2
I1229 08:30:11.957929 4159 net.cpp:434] ip2 <- ip1
I1229 08:30:11.957976 4159 net.cpp:408] ip2 -> ip2
I1229 08:30:11.958083 4159 net.cpp:150] Setting up ip2
I1229 08:30:11.958230 4159 net.cpp:157] Top shape: 100 10 (1000)
I1229 08:30:11.958276 4159 net.cpp:165] Memory required for data: 8078800
I1229 08:30:11.958319 4159 layer_factory.hpp:77] Creating layer ip2_ip2_0_split
I1229 08:30:11.958362 4159 net.cpp:100] Creating Layer ip2_ip2_0_split
I1229 08:30:11.958406 4159 net.cpp:434] ip2_ip2_0_split <- ip2
I1229 08:30:11.958449 4159 net.cpp:408] ip2_ip2_0_split -> ip2_ip2_0_split_0
I1229 08:30:11.958498 4159 net.cpp:408] ip2_ip2_0_split -> ip2_ip2_0_split_1
I1229 08:30:11.958545 4159 net.cpp:150] Setting up ip2_ip2_0_split
I1229 08:30:11.958600 4159 net.cpp:157] Top shape: 100 10 (1000)
I1229 08:30:11.958642 4159 net.cpp:157] Top shape: 100 10 (1000)
I1229 08:30:11.958678 4159 net.cpp:165] Memory required for data: 8086800
I1229 08:30:11.958710 4159 layer_factory.hpp:77] Creating layer accuracy
I1229 08:30:11.958757 4159 net.cpp:100] Creating Layer accuracy
I1229 08:30:11.958796 4159 net.cpp:434] accuracy <- ip2_ip2_0_split_0
I1229 08:30:11.958844 4159 net.cpp:434] accuracy <- label_mnist_1_split_0
I1229 08:30:11.958890 4159 net.cpp:408] accuracy -> accuracy
I1229 08:30:11.958942 4159 net.cpp:150] Setting up accuracy
I1229 08:30:11.958997 4159 net.cpp:157] Top shape: (1)
I1229 08:30:11.959038 4159 net.cpp:165] Memory required for data: 8086804
I1229 08:30:11.959071 4159 layer_factory.hpp:77] Creating layer loss
I1229 08:30:11.959116 4159 net.cpp:100] Creating Layer loss
I1229 08:30:11.959161 4159 net.cpp:434] loss <- ip2_ip2_0_split_1
I1229 08:30:11.959203 4159 net.cpp:434] loss <- label_mnist_1_split_1
I1229 08:30:11.959287 4159 net.cpp:408] loss -> loss
I1229 08:30:11.959345 4159 layer_factory.hpp:77] Creating layer loss
I1229 08:30:11.959434 4159 net.cpp:150] Setting up loss
I1229 08:30:11.959507 4159 net.cpp:157] Top shape: (1)
I1229 08:30:11.959543 4159 net.cpp:160] with loss weight 1
I1229 08:30:11.959638 4159 net.cpp:165] Memory required for data: 8086808
I1229 08:30:11.959679 4159 net.cpp:226] loss needs backward computation.
I1229 08:30:11.959713 4159 net.cpp:228] accuracy does not need backward computation.
I1229 08:30:11.959748 4159 net.cpp:226] ip2_ip2_0_split needs backward computation.
I1229 08:30:11.959791 4159 net.cpp:226] ip2 needs backward computation.
I1229 08:30:11.959825 4159 net.cpp:226] relu1 needs backward computation.
I1229 08:30:11.959856 4159 net.cpp:226] ip1 needs backward computation.
I1229 08:30:11.959890 4159 net.cpp:226] pool2 needs backward computation.
I1229 08:30:11.959930 4159 net.cpp:226] conv2 needs backward computation.
I1229 08:30:11.959962 4159 net.cpp:226] pool1 needs backward computation.
I1229 08:30:11.960003 4159 net.cpp:226] conv1 needs backward computation.
I1229 08:30:11.960037 4159 net.cpp:228] label_mnist_1_split does not need backward computation.
I1229 08:30:11.960079 4159 net.cpp:228] mnist does not need backward computation.
I1229 08:30:11.960114 4159 net.cpp:270] This network produces output accuracy
I1229 08:30:11.960155 4159 net.cpp:270] This network produces output loss
I1229 08:30:11.960197 4159 net.cpp:283] Network initialization done.
I1229 08:30:11.965471 4159 caffe.cpp:285] Running for 100 iterations.
I1229 08:30:12.025677 4159 caffe.cpp:308] Batch 0, accuracy = 1
I1229 08:30:12.025766 4159 caffe.cpp:308] Batch 0, loss = 0.00755897
I1229 08:30:12.083183 4159 caffe.cpp:308] Batch 1, accuracy = 0.99
I1229 08:30:12.083266 4159 caffe.cpp:308] Batch 1, loss = 0.0125238
I1229 08:30:12.140435 4159 caffe.cpp:308] Batch 2, accuracy = 0.99
I1229 08:30:12.140517 4159 caffe.cpp:308] Batch 2, loss = 0.0366887
I1229 08:30:12.197609 4159 caffe.cpp:308] Batch 3, accuracy = 0.99
I1229 08:30:12.197691 4159 caffe.cpp:308] Batch 3, loss = 0.0179997
I1229 08:30:12.255146 4159 caffe.cpp:308] Batch 4, accuracy = 0.99
I1229 08:30:12.255257 4159 caffe.cpp:308] Batch 4, loss = 0.042092
I1229 08:30:12.312587 4159 caffe.cpp:308] Batch 5, accuracy = 0.99
I1229 08:30:12.312680 4159 caffe.cpp:308] Batch 5, loss = 0.0517442
I1229 08:30:12.369791 4159 caffe.cpp:308] Batch 6, accuracy = 0.98
I1229 08:30:12.369884 4159 caffe.cpp:308] Batch 6, loss = 0.0612323
I1229 08:30:12.427008 4159 caffe.cpp:308] Batch 7, accuracy = 0.99
I1229 08:30:12.427106 4159 caffe.cpp:308] Batch 7, loss = 0.041436
I1229 08:30:12.484457 4159 caffe.cpp:308] Batch 8, accuracy = 0.99
I1229 08:30:12.484560 4159 caffe.cpp:308] Batch 8, loss = 0.0160605
I1229 08:30:12.541947 4159 caffe.cpp:308] Batch 9, accuracy = 0.98
I1229 08:30:12.542069 4159 caffe.cpp:308] Batch 9, loss = 0.0331129
I1229 08:30:12.599222 4159 caffe.cpp:308] Batch 10, accuracy = 0.98
I1229 08:30:12.599315 4159 caffe.cpp:308] Batch 10, loss = 0.0573284
I1229 08:30:12.656574 4159 caffe.cpp:308] Batch 11, accuracy = 0.98
I1229 08:30:12.656666 4159 caffe.cpp:308] Batch 11, loss = 0.0382567
I1229 08:30:12.714007 4159 caffe.cpp:308] Batch 12, accuracy = 0.96
I1229 08:30:12.714097 4159 caffe.cpp:308] Batch 12, loss = 0.138023
I1229 08:30:12.771430 4159 caffe.cpp:308] Batch 13, accuracy = 0.98
I1229 08:30:12.771523 4159 caffe.cpp:308] Batch 13, loss = 0.0342435
I1229 08:30:12.829116 4159 caffe.cpp:308] Batch 14, accuracy = 1
I1229 08:30:12.829265 4159 caffe.cpp:308] Batch 14, loss = 0.0119491
I1229 08:30:12.887130 4159 caffe.cpp:308] Batch 15, accuracy = 0.97
I1229 08:30:12.887259 4159 caffe.cpp:308] Batch 15, loss = 0.0427386
I1229 08:30:12.944875 4159 caffe.cpp:308] Batch 16, accuracy = 0.99
I1229 08:30:12.944993 4159 caffe.cpp:308] Batch 16, loss = 0.0290488
I1229 08:30:13.002748 4159 caffe.cpp:308] Batch 17, accuracy = 0.99
I1229 08:30:13.002882 4159 caffe.cpp:308] Batch 17, loss = 0.0328682
I1229 08:30:13.060534 4159 caffe.cpp:308] Batch 18, accuracy = 1
I1229 08:30:13.060644 4159 caffe.cpp:308] Batch 18, loss = 0.0121858
I1229 08:30:13.118321 4159 caffe.cpp:308] Batch 19, accuracy = 0.99
I1229 08:30:13.118448 4159 caffe.cpp:308] Batch 19, loss = 0.0610938
I1229 08:30:13.175621 4159 caffe.cpp:308] Batch 20, accuracy = 0.98
I1229 08:30:13.175751 4159 caffe.cpp:308] Batch 20, loss = 0.0924508
I1229 08:30:13.233448 4159 caffe.cpp:308] Batch 21, accuracy = 0.97
I1229 08:30:13.233566 4159 caffe.cpp:308] Batch 21, loss = 0.0909196
I1229 08:30:13.291142 4159 caffe.cpp:308] Batch 22, accuracy = 0.99
I1229 08:30:13.291272 4159 caffe.cpp:308] Batch 22, loss = 0.0139248
I1229 08:30:13.348948 4159 caffe.cpp:308] Batch 23, accuracy = 1
I1229 08:30:13.349058 4159 caffe.cpp:308] Batch 23, loss = 0.0198691
I1229 08:30:13.406636 4159 caffe.cpp:308] Batch 24, accuracy = 0.98
I1229 08:30:13.406746 4159 caffe.cpp:308] Batch 24, loss = 0.0412692
I1229 08:30:13.464308 4159 caffe.cpp:308] Batch 25, accuracy = 0.99
I1229 08:30:13.464418 4159 caffe.cpp:308] Batch 25, loss = 0.0626006
I1229 08:30:13.522017 4159 caffe.cpp:308] Batch 26, accuracy = 0.99
I1229 08:30:13.522138 4159 caffe.cpp:308] Batch 26, loss = 0.129049
I1229 08:30:13.579826 4159 caffe.cpp:308] Batch 27, accuracy = 0.99
I1229 08:30:13.579942 4159 caffe.cpp:308] Batch 27, loss = 0.0203413
I1229 08:30:13.637822 4159 caffe.cpp:308] Batch 28, accuracy = 0.99
I1229 08:30:13.637951 4159 caffe.cpp:308] Batch 28, loss = 0.0473171
I1229 08:30:13.695449 4159 caffe.cpp:308] Batch 29, accuracy = 0.95
I1229 08:30:13.695564 4159 caffe.cpp:308] Batch 29, loss = 0.130175
I1229 08:30:13.753072 4159 caffe.cpp:308] Batch 30, accuracy = 0.99
I1229 08:30:13.753187 4159 caffe.cpp:308] Batch 30, loss = 0.0209849
I1229 08:30:13.810647 4159 caffe.cpp:308] Batch 31, accuracy = 1
I1229 08:30:13.810770 4159 caffe.cpp:308] Batch 31, loss = 0.00194038
I1229 08:30:13.868512 4159 caffe.cpp:308] Batch 32, accuracy = 0.99
I1229 08:30:13.868621 4159 caffe.cpp:308] Batch 32, loss = 0.0168306
I1229 08:30:13.926297 4159 caffe.cpp:308] Batch 33, accuracy = 1
I1229 08:30:13.926412 4159 caffe.cpp:308] Batch 33, loss = 0.00384053
I1229 08:30:13.984012 4159 caffe.cpp:308] Batch 34, accuracy = 0.98
I1229 08:30:13.984127 4159 caffe.cpp:308] Batch 34, loss = 0.0660918
I1229 08:30:14.041839 4159 caffe.cpp:308] Batch 35, accuracy = 0.95
I1229 08:30:14.041949 4159 caffe.cpp:308] Batch 35, loss = 0.118765
I1229 08:30:14.099517 4159 caffe.cpp:308] Batch 36, accuracy = 1
I1229 08:30:14.099625 4159 caffe.cpp:308] Batch 36, loss = 0.00772053
I1229 08:30:14.157325 4159 caffe.cpp:308] Batch 37, accuracy = 0.98
I1229 08:30:14.157438 4159 caffe.cpp:308] Batch 37, loss = 0.0586364
I1229 08:30:14.215265 4159 caffe.cpp:308] Batch 38, accuracy = 1
I1229 08:30:14.215390 4159 caffe.cpp:308] Batch 38, loss = 0.0119813
I1229 08:30:14.273030 4159 caffe.cpp:308] Batch 39, accuracy = 0.99
I1229 08:30:14.273146 4159 caffe.cpp:308] Batch 39, loss = 0.0247472
I1229 08:30:14.330781 4159 caffe.cpp:308] Batch 40, accuracy = 1
I1229 08:30:14.330905 4159 caffe.cpp:308] Batch 40, loss = 0.0181162
I1229 08:30:14.388540 4159 caffe.cpp:308] Batch 41, accuracy = 0.98
I1229 08:30:14.388654 4159 caffe.cpp:308] Batch 41, loss = 0.0517173
I1229 08:30:14.446426 4159 caffe.cpp:308] Batch 42, accuracy = 0.98
I1229 08:30:14.446543 4159 caffe.cpp:308] Batch 42, loss = 0.0362782
I1229 08:30:14.504629 4159 caffe.cpp:308] Batch 43, accuracy = 1
I1229 08:30:14.504768 4159 caffe.cpp:308] Batch 43, loss = 0.00602194
I1229 08:30:14.562206 4159 caffe.cpp:308] Batch 44, accuracy = 1
I1229 08:30:14.562331 4159 caffe.cpp:308] Batch 44, loss = 0.0127355
I1229 08:30:14.619529 4159 caffe.cpp:308] Batch 45, accuracy = 0.99
I1229 08:30:14.619657 4159 caffe.cpp:308] Batch 45, loss = 0.0432564
I1229 08:30:14.676769 4159 caffe.cpp:308] Batch 46, accuracy = 1
I1229 08:30:14.676884 4159 caffe.cpp:308] Batch 46, loss = 0.00795186
I1229 08:30:14.734272 4159 caffe.cpp:308] Batch 47, accuracy = 1
I1229 08:30:14.734398 4159 caffe.cpp:308] Batch 47, loss = 0.00829059
I1229 08:30:14.791604 4159 caffe.cpp:308] Batch 48, accuracy = 0.96
I1229 08:30:14.791720 4159 caffe.cpp:308] Batch 48, loss = 0.062666
I1229 08:30:14.849009 4159 caffe.cpp:308] Batch 49, accuracy = 1
I1229 08:30:14.849145 4159 caffe.cpp:308] Batch 49, loss = 0.012306
I1229 08:30:14.906517 4159 caffe.cpp:308] Batch 50, accuracy = 1
I1229 08:30:14.906632 4159 caffe.cpp:308] Batch 50, loss = 0.00024388
I1229 08:30:14.963975 4159 caffe.cpp:308] Batch 51, accuracy = 1
I1229 08:30:14.964099 4159 caffe.cpp:308] Batch 51, loss = 0.00388429
I1229 08:30:15.021663 4159 caffe.cpp:308] Batch 52, accuracy = 1
I1229 08:30:15.021785 4159 caffe.cpp:308] Batch 52, loss = 0.00335451
I1229 08:30:15.079262 4159 caffe.cpp:308] Batch 53, accuracy = 1
I1229 08:30:15.079386 4159 caffe.cpp:308] Batch 53, loss = 0.00241596
I1229 08:30:15.136984 4159 caffe.cpp:308] Batch 54, accuracy = 0.99
I1229 08:30:15.137111 4159 caffe.cpp:308] Batch 54, loss = 0.0142214
I1229 08:30:15.194694 4159 caffe.cpp:308] Batch 55, accuracy = 1
I1229 08:30:15.194811 4159 caffe.cpp:308] Batch 55, loss = 0.000404293
I1229 08:30:15.252305 4159 caffe.cpp:308] Batch 56, accuracy = 1
I1229 08:30:15.252424 4159 caffe.cpp:308] Batch 56, loss = 0.00623298
I1229 08:30:15.310086 4159 caffe.cpp:308] Batch 57, accuracy = 0.99
I1229 08:30:15.310197 4159 caffe.cpp:308] Batch 57, loss = 0.0176835
I1229 08:30:15.367950 4159 caffe.cpp:308] Batch 58, accuracy = 1
I1229 08:30:15.368067 4159 caffe.cpp:308] Batch 58, loss = 0.00307279
I1229 08:30:15.425412 4159 caffe.cpp:308] Batch 59, accuracy = 0.96
I1229 08:30:15.425529 4159 caffe.cpp:308] Batch 59, loss = 0.0957446
I1229 08:30:15.482833 4159 caffe.cpp:308] Batch 60, accuracy = 1
I1229 08:30:15.482949 4159 caffe.cpp:308] Batch 60, loss = 0.0111263
I1229 08:30:15.540472 4159 caffe.cpp:308] Batch 61, accuracy = 1
I1229 08:30:15.540583 4159 caffe.cpp:308] Batch 61, loss = 0.00620457
I1229 08:30:15.598043 4159 caffe.cpp:308] Batch 62, accuracy = 1
I1229 08:30:15.598151 4159 caffe.cpp:308] Batch 62, loss = 3.23028e-05
I1229 08:30:15.655606 4159 caffe.cpp:308] Batch 63, accuracy = 1
I1229 08:30:15.655725 4159 caffe.cpp:308] Batch 63, loss = 0.000154275
I1229 08:30:15.713317 4159 caffe.cpp:308] Batch 64, accuracy = 1
I1229 08:30:15.713426 4159 caffe.cpp:308] Batch 64, loss = 0.000742791
I1229 08:30:15.770719 4159 caffe.cpp:308] Batch 65, accuracy = 0.95
I1229 08:30:15.770843 4159 caffe.cpp:308] Batch 65, loss = 0.11012
I1229 08:30:15.828063 4159 caffe.cpp:308] Batch 66, accuracy = 0.98
I1229 08:30:15.828179 4159 caffe.cpp:308] Batch 66, loss = 0.0724991
I1229 08:30:15.886113 4159 caffe.cpp:308] Batch 67, accuracy = 0.99
I1229 08:30:15.886236 4159 caffe.cpp:308] Batch 67, loss = 0.0278719
I1229 08:30:15.943961 4159 caffe.cpp:308] Batch 68, accuracy = 1
I1229 08:30:15.944084 4159 caffe.cpp:308] Batch 68, loss = 0.00463332
I1229 08:30:16.001819 4159 caffe.cpp:308] Batch 69, accuracy = 1
I1229 08:30:16.001940 4159 caffe.cpp:308] Batch 69, loss = 0.000807585
I1229 08:30:16.059298 4159 caffe.cpp:308] Batch 70, accuracy = 1
I1229 08:30:16.059428 4159 caffe.cpp:308] Batch 70, loss = 0.000793165
I1229 08:30:16.116801 4159 caffe.cpp:308] Batch 71, accuracy = 1
I1229 08:30:16.116930 4159 caffe.cpp:308] Batch 71, loss = 0.00123211
I1229 08:30:16.174362 4159 caffe.cpp:308] Batch 72, accuracy = 1
I1229 08:30:16.174492 4159 caffe.cpp:308] Batch 72, loss = 0.012733
I1229 08:30:16.231920 4159 caffe.cpp:308] Batch 73, accuracy = 1
I1229 08:30:16.232060 4159 caffe.cpp:308] Batch 73, loss = 7.07769e-05
I1229 08:30:16.289247 4159 caffe.cpp:308] Batch 74, accuracy = 1
I1229 08:30:16.289373 4159 caffe.cpp:308] Batch 74, loss = 0.00166314
I1229 08:30:16.346583 4159 caffe.cpp:308] Batch 75, accuracy = 1
I1229 08:30:16.346707 4159 caffe.cpp:308] Batch 75, loss = 0.00133687
I1229 08:30:16.404086 4159 caffe.cpp:308] Batch 76, accuracy = 1
I1229 08:30:16.404251 4159 caffe.cpp:308] Batch 76, loss = 0.00015642
I1229 08:30:16.461724 4159 caffe.cpp:308] Batch 77, accuracy = 1
I1229 08:30:16.461879 4159 caffe.cpp:308] Batch 77, loss = 0.000267936
I1229 08:30:16.519292 4159 caffe.cpp:308] Batch 78, accuracy = 1
I1229 08:30:16.519414 4159 caffe.cpp:308] Batch 78, loss = 0.00102958
I1229 08:30:16.576902 4159 caffe.cpp:308] Batch 79, accuracy = 1
I1229 08:30:16.577018 4159 caffe.cpp:308] Batch 79, loss = 0.0019392
I1229 08:30:16.634402 4159 caffe.cpp:308] Batch 80, accuracy = 1
I1229 08:30:16.634522 4159 caffe.cpp:308] Batch 80, loss = 0.00750751
I1229 08:30:16.691875 4159 caffe.cpp:308] Batch 81, accuracy = 1
I1229 08:30:16.691999 4159 caffe.cpp:308] Batch 81, loss = 0.00151326
I1229 08:30:16.751351 4159 caffe.cpp:308] Batch 82, accuracy = 1
I1229 08:30:16.751477 4159 caffe.cpp:308] Batch 82, loss = 0.00455984
I1229 08:30:16.808917 4159 caffe.cpp:308] Batch 83, accuracy = 1
I1229 08:30:16.809056 4159 caffe.cpp:308] Batch 83, loss = 0.0110726
I1229 08:30:16.867089 4159 caffe.cpp:308] Batch 84, accuracy = 0.99
I1229 08:30:16.867207 4159 caffe.cpp:308] Batch 84, loss = 0.0229276
I1229 08:30:16.924979 4159 caffe.cpp:308] Batch 85, accuracy = 0.99
I1229 08:30:16.925104 4159 caffe.cpp:308] Batch 85, loss = 0.0181782
I1229 08:30:16.982977 4159 caffe.cpp:308] Batch 86, accuracy = 1
I1229 08:30:16.983095 4159 caffe.cpp:308] Batch 86, loss = 0.000105467
I1229 08:30:17.040793 4159 caffe.cpp:308] Batch 87, accuracy = 1
I1229 08:30:17.040918 4159 caffe.cpp:308] Batch 87, loss = 8.85461e-05
I1229 08:30:17.098577 4159 caffe.cpp:308] Batch 88, accuracy = 1
I1229 08:30:17.098690 4159 caffe.cpp:308] Batch 88, loss = 1.90067e-05
I1229 08:30:17.156373 4159 caffe.cpp:308] Batch 89, accuracy = 1
I1229 08:30:17.156487 4159 caffe.cpp:308] Batch 89, loss = 2.68037e-05
I1229 08:30:17.214165 4159 caffe.cpp:308] Batch 90, accuracy = 0.97
I1229 08:30:17.214289 4159 caffe.cpp:308] Batch 90, loss = 0.0997072
I1229 08:30:17.272058 4159 caffe.cpp:308] Batch 91, accuracy = 1
I1229 08:30:17.272197 4159 caffe.cpp:308] Batch 91, loss = 1.74496e-05
I1229 08:30:17.329752 4159 caffe.cpp:308] Batch 92, accuracy = 1
I1229 08:30:17.329874 4159 caffe.cpp:308] Batch 92, loss = 0.000416481
I1229 08:30:17.387363 4159 caffe.cpp:308] Batch 93, accuracy = 1
I1229 08:30:17.387485 4159 caffe.cpp:308] Batch 93, loss = 0.00121327
I1229 08:30:17.444933 4159 caffe.cpp:308] Batch 94, accuracy = 1
I1229 08:30:17.445055 4159 caffe.cpp:308] Batch 94, loss = 0.000482124
I1229 08:30:17.502532 4159 caffe.cpp:308] Batch 95, accuracy = 1
I1229 08:30:17.502652 4159 caffe.cpp:308] Batch 95, loss = 0.00230492
I1229 08:30:17.560281 4159 caffe.cpp:308] Batch 96, accuracy = 0.99
I1229 08:30:17.560406 4159 caffe.cpp:308] Batch 96, loss = 0.0368026
I1229 08:30:17.617797 4159 caffe.cpp:308] Batch 97, accuracy = 0.98
I1229 08:30:17.617933 4159 caffe.cpp:308] Batch 97, loss = 0.0985361
I1229 08:30:17.674965 4159 caffe.cpp:308] Batch 98, accuracy = 1
I1229 08:30:17.675092 4159 caffe.cpp:308] Batch 98, loss = 0.00306735
I1229 08:30:17.732564 4159 caffe.cpp:308] Batch 99, accuracy = 1
I1229 08:30:17.732704 4159 caffe.cpp:308] Batch 99, loss = 0.0103088
I1229 08:30:17.732751 4159 caffe.cpp:313] Loss: 0.0273851
I1229 08:30:17.732795 4159 caffe.cpp:325] accuracy = 0.9913
I1229 08:30:17.732837 4159 caffe.cpp:325] loss = 0.0273851 (* 1 = 0.0273851 loss)
root@ip-172-30-0-251:/caffe#