Caffe的LeNet实现

lenet_solver.prototxt:

net: "/home/zhaoys/myf/lenet/lenet_caffe/lenet_train_test.prototxt"
# 测试的时候,要迭代的次数。
# 应该满足:test_iter * test_batchsize = testset_images,这样可以覆盖整个测试集。
test_iter: 100
# 训练的时候,经过多少次迭代,进行一次测试。
# 应该满足:test_interval * train_batchsize = trainset_images,这样可以覆盖整个训练集。
test_interval: 500
# 基础的学习率,不宜太大,也不宜太小。
base_lr: 0.01
# 动量,使得模型平稳快速收敛。
momentum: 0.9
# 权值衰减,惩罚项,防止过拟合
weight_decay: 0.0005
# 学习率变化的策略
lr_policy: "inv"
# 学习率的衰减
gamma: 0.0001
# 指数
power: 0.75
# 进过多少次显示一次loss数值
display: 100
# 最大迭代次数
max_iter: 10000
# 保存模型数据
snapshot: 5000
snapshot_prefix: "/home/zhaoys/myf/lenet/lenet_caffe/snapshot/train"
# 训练模式
solver_mode: GPU

lenet_train_test.prototxt:

name: "LeNet"
layer {
	name: "mnist"
	type: "Data"
	top: "data"
	top: "label"
	include {
		phase: TRAIN
	}
	transform_param {
		scale: 0.00390625
	}
	data_param {
		source: "/home/zhaoys/myf/dataset/mnist/mnist_train_lmdb"
		batch_size: 64
		backend: LMDB
	}
}
layer {
	name: "mnist"
	type: "Data"
	top: "data"
	top: "label"
	include {
		phase: TEST
	}
	transform_param {
		scale: 0.00390625
	}
	data_param {
		source: "/home/zhaoys/myf/dataset/mnist/mnist_test_lmdb"
		batch_size: 100
		backend: LMDB
	}
}
layer {
	name: "conv1"
	type: "Convolution"
	bottom: "data"
	top: "conv1"
	param {
		lr_mult: 1
	}
	param {
		lr_mult: 2
	}
	convolution_param {
		num_output: 20
		kernel_size: 5
		stride: 1
		weight_filler {
			type: "xavier"
		}
		bias_filler {
			type: "constant"
		}
	}
}
layer {
	name: "pool1"
	type: "Pooling"
	bottom: "conv1"
	top: "pool1"
	pooling_param {
		pool: MAX
		kernel_size: 2
		stride: 2
	}
}
layer {
	name: "conv2"
	type: "Convolution"
	bottom: "pool1"
	top: "conv2"
	param {
		lr_mult: 1
	}
	param {
		lr_mult: 2
	}
	convolution_param {
		num_output: 50
		kernel_size: 5
		stride: 1
		weight_filler {
			type: "xavier"
		}
		bias_filler {
			type: "constant"
		}
	}
}

layer {
	name: "pool2"
	type: "Pooling"
	bottom: "conv2"
	top: "pool2"
	pooling_param {
		pool: MAX
		kernel_size: 2
		stride: 2
	}
}
layer {
	name: "ip1"
	type: "InnerProduct"
	bottom: "pool2"
	top: "ip1"
	param {
		lr_mult: 1
	}
	param {
		lr_mult: 2
	}
	inner_product_param {
		num_output: 500
		weight_filler {
			type: "xavier"
		}
		bias_filler {
			type: "constant"
		}
	}
}
layer {
	name: "relu1"
	type: "ReLU"
	bottom: "ip1"
	top: "ip1"
}
layer {
	name: "ip2"
	type: "InnerProduct"
	bottom: "ip1"
	top: "ip2"
	param {
		lr_mult: 1
	}
	param {
		lr_mult: 2
	}
	inner_product_param {
		num_output: 10
		weight_filler {
			type: "xavier"
		}
		bias_filler {
			type: "constant"
		}
	}
}
layer {
	name: "accuracy"
	type: "Accuracy"
	bottom: "ip2"
	bottom: "label"
	top: "accuracy"
	include {
		phase: TEST
	}
}
layer {
	name: "loss"
	type: "SoftmaxWithLoss"
	bottom: "ip2"
	bottom: "label"
	top: "loss"
}

train_lenet.sh:

#!/usr/bin/env/sh
set -e
GLOG_logtostderr=0 \
    GLOG_log_dir='/home/zhaoys/myf/lenet/lenet_caffe/log' \
    /home/panyun/sourcecode/caffe/cmake_build/tools/caffe \
    train --solver=lenet_solver.prototxt

lenet.png:

info:

Log file created at: 2018/12/31 05:14:03
Running on machine: sophie
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1231 05:14:03.284312 112226 caffe.cpp:218] Using GPUs 0
I1231 05:14:03.325053 112226 caffe.cpp:223] GPU 0: GeForce GTX 1080 Ti
I1231 05:14:03.616346 112226 solver.cpp:44] Initializing solver from parameters: 
test_iter: 100
test_interval: 500
base_lr: 0.01
display: 100
max_iter: 10000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 5000
snapshot_prefix: "/home/zhaoys/myf/lenet/lenet_caffe/snapshot/train"
solver_mode: GPU
device_id: 0
net: "/home/zhaoys/myf/lenet/lenet_caffe/lenet_train_test.prototxt"
train_state {
  level: 0
  stage: ""
}
I1231 05:14:03.616485 112226 solver.cpp:87] Creating training net from net file: /home/zhaoys/myf/lenet/lenet_caffe/lenet_train_test.prototxt
I1231 05:14:03.616652 112226 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer mnist
I1231 05:14:03.616668 112226 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I1231 05:14:03.616734 112226 net.cpp:51] Initializing net from parameters: 
name: "LeNet"
state {
  phase: TRAIN
  level: 0
  stage: ""
}
layer {
  name: "mnist"
  type: "Data"
  top: "data"
  top: "label"
  include {
    phase: TRAIN
  }
  transform_param {
    scale: 0.00390625
  }
  data_param {
    source: "/home/zhaoys/myf/dataset/mnist/mnist_train_lmdb"
    batch_size: 64
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 20
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 50
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "ip1"
  type: "InnerProduct"
  bottom: "pool2"
  top: "ip1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "ip1"
  top: "ip1"
}
layer {
  name: "ip2"
  type: "InnerProduct"
  bottom: "ip1"
  top: "ip2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  inner_product_param {
    num_output: 10
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "ip2"
  bottom: "label"
  top: "loss"
}
I1231 05:14:03.616830 112226 layer_factory.hpp:77] Creating layer mnist
I1231 05:14:03.616914 112226 db_lmdb.cpp:35] Opened lmdb /home/zhaoys/myf/dataset/mnist/mnist_train_lmdb
I1231 05:14:03.616936 112226 net.cpp:84] Creating Layer mnist
I1231 05:14:03.616943 112226 net.cpp:380] mnist -> data
I1231 05:14:03.616960 112226 net.cpp:380] mnist -> label
I1231 05:14:03.617887 112226 data_layer.cpp:45] output data size: 64,1,28,28
I1231 05:14:03.619812 112226 net.cpp:122] Setting up mnist
I1231 05:14:03.619823 112226 net.cpp:129] Top shape: 64 1 28 28 (50176)
I1231 05:14:03.619828 112226 net.cpp:129] Top shape: 64 (64)
I1231 05:14:03.619832 112226 net.cpp:137] Memory required for data: 200960
I1231 05:14:03.619838 112226 layer_factory.hpp:77] Creating layer conv1
I1231 05:14:03.619874 112226 net.cpp:84] Creating Layer conv1
I1231 05:14:03.619879 112226 net.cpp:406] conv1 <- data
I1231 05:14:03.619892 112226 net.cpp:380] conv1 -> conv1
I1231 05:14:04.082386 112226 net.cpp:122] Setting up conv1
I1231 05:14:04.082428 112226 net.cpp:129] Top shape: 64 20 24 24 (737280)
I1231 05:14:04.082432 112226 net.cpp:137] Memory required for data: 3150080
I1231 05:14:04.082495 112226 layer_factory.hpp:77] Creating layer pool1
I1231 05:14:04.082514 112226 net.cpp:84] Creating Layer pool1
I1231 05:14:04.082518 112226 net.cpp:406] pool1 <- conv1
I1231 05:14:04.082530 112226 net.cpp:380] pool1 -> pool1
I1231 05:14:04.082574 112226 net.cpp:122] Setting up pool1
I1231 05:14:04.082581 112226 net.cpp:129] Top shape: 64 20 12 12 (184320)
I1231 05:14:04.082584 112226 net.cpp:137] Memory required for data: 3887360
I1231 05:14:04.082587 112226 layer_factory.hpp:77] Creating layer conv2
I1231 05:14:04.082600 112226 net.cpp:84] Creating Layer conv2
I1231 05:14:04.082604 112226 net.cpp:406] conv2 <- pool1
I1231 05:14:04.082609 112226 net.cpp:380] conv2 -> conv2
I1231 05:14:04.084203 112226 net.cpp:122] Setting up conv2
I1231 05:14:04.084215 112226 net.cpp:129] Top shape: 64 50 8 8 (204800)
I1231 05:14:04.084218 112226 net.cpp:137] Memory required for data: 4706560
I1231 05:14:04.084225 112226 layer_factory.hpp:77] Creating layer pool2
I1231 05:14:04.084233 112226 net.cpp:84] Creating Layer pool2
I1231 05:14:04.084237 112226 net.cpp:406] pool2 <- conv2
I1231 05:14:04.084242 112226 net.cpp:380] pool2 -> pool2
I1231 05:14:04.084270 112226 net.cpp:122] Setting up pool2
I1231 05:14:04.084275 112226 net.cpp:129] Top shape: 64 50 4 4 (51200)
I1231 05:14:04.084281 112226 net.cpp:137] Memory required for data: 4911360
I1231 05:14:04.084285 112226 layer_factory.hpp:77] Creating layer ip1
I1231 05:14:04.084292 112226 net.cpp:84] Creating Layer ip1
I1231 05:14:04.084295 112226 net.cpp:406] ip1 <- pool2
I1231 05:14:04.084300 112226 net.cpp:380] ip1 -> ip1
I1231 05:14:04.086802 112226 net.cpp:122] Setting up ip1
I1231 05:14:04.086815 112226 net.cpp:129] Top shape: 64 500 (32000)
I1231 05:14:04.086818 112226 net.cpp:137] Memory required for data: 5039360
I1231 05:14:04.086827 112226 layer_factory.hpp:77] Creating layer relu1
I1231 05:14:04.086833 112226 net.cpp:84] Creating Layer relu1
I1231 05:14:04.086838 112226 net.cpp:406] relu1 <- ip1
I1231 05:14:04.086843 112226 net.cpp:367] relu1 -> ip1 (in-place)
I1231 05:14:04.087512 112226 net.cpp:122] Setting up relu1
I1231 05:14:04.087523 112226 net.cpp:129] Top shape: 64 500 (32000)
I1231 05:14:04.087527 112226 net.cpp:137] Memory required for data: 5167360
I1231 05:14:04.087529 112226 layer_factory.hpp:77] Creating layer ip2
I1231 05:14:04.087536 112226 net.cpp:84] Creating Layer ip2
I1231 05:14:04.087540 112226 net.cpp:406] ip2 <- ip1
I1231 05:14:04.087546 112226 net.cpp:380] ip2 -> ip2
I1231 05:14:04.088604 112226 net.cpp:122] Setting up ip2
I1231 05:14:04.088616 112226 net.cpp:129] Top shape: 64 10 (640)
I1231 05:14:04.088619 112226 net.cpp:137] Memory required for data: 5169920
I1231 05:14:04.088625 112226 layer_factory.hpp:77] Creating layer loss
I1231 05:14:04.088634 112226 net.cpp:84] Creating Layer loss
I1231 05:14:04.088637 112226 net.cpp:406] loss <- ip2
I1231 05:14:04.088641 112226 net.cpp:406] loss <- label
I1231 05:14:04.088649 112226 net.cpp:380] loss -> loss
I1231 05:14:04.094878 112226 layer_factory.hpp:77] Creating layer loss
I1231 05:14:04.095325 112226 net.cpp:122] Setting up loss
I1231 05:14:04.095335 112226 net.cpp:129] Top shape: (1)
I1231 05:14:04.095340 112226 net.cpp:132]     with loss weight 1
I1231 05:14:04.095353 112226 net.cpp:137] Memory required for data: 5169924
I1231 05:14:04.095356 112226 net.cpp:198] loss needs backward computation.
I1231 05:14:04.095363 112226 net.cpp:198] ip2 needs backward computation.
I1231 05:14:04.095367 112226 net.cpp:198] relu1 needs backward computation.
I1231 05:14:04.095371 112226 net.cpp:198] ip1 needs backward computation.
I1231 05:14:04.095374 112226 net.cpp:198] pool2 needs backward computation.
I1231 05:14:04.095377 112226 net.cpp:198] conv2 needs backward computation.
I1231 05:14:04.095381 112226 net.cpp:198] pool1 needs backward computation.
I1231 05:14:04.095384 112226 net.cpp:198] conv1 needs backward computation.
I1231 05:14:04.095388 112226 net.cpp:200] mnist does not need backward computation.
I1231 05:14:04.095392 112226 net.cpp:242] This network produces output loss
I1231 05:14:04.095417 112226 net.cpp:255] Network initialization done.
I1231 05:14:04.095551 112226 solver.cpp:172] Creating test net (#0) specified by net file: /home/zhaoys/myf/lenet/lenet_caffe/lenet_train_test.prototxt
I1231 05:14:04.095569 112226 net.cpp:294] The NetState phase (1) differed from the phase (0) specified by a rule in layer mnist
I1231 05:14:04.095638 112226 net.cpp:51] Initializing net from parameters: 
name: "LeNet"
state {
  phase: TEST
}
layer {
  name: "mnist"
  type: "Data"
  top: "data"
  top: "label"
  include {
    phase: TEST
  }
  transform_param {
    scale: 0.00390625
  }
  data_param {
    source: "/home/zhaoys/myf/dataset/mnist/mnist_test_lmdb"
    batch_size: 100
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 20
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 50
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "ip1"
  type: "InnerProduct"
  bottom: "pool2"
  top: "ip1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "ip1"
  top: "ip1"
}
layer {
  name: "ip2"
  type: "InnerProduct"
  bottom: "ip1"
  top: "ip2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  inner_product_param {
    num_output: 10
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "accuracy"
  type: "Accuracy"
  bottom: "ip2"
  bottom: "label"
  top: "accuracy"
  include {
    phase: TEST
  }
}
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "ip2"
  bottom: "label"
  top: "loss"
}
I1231 05:14:04.095754 112226 layer_factory.hpp:77] Creating layer mnist
I1231 05:14:04.095804 112226 db_lmdb.cpp:35] Opened lmdb /home/zhaoys/myf/dataset/mnist/mnist_test_lmdb
I1231 05:14:04.095819 112226 net.cpp:84] Creating Layer mnist
I1231 05:14:04.095824 112226 net.cpp:380] mnist -> data
I1231 05:14:04.095830 112226 net.cpp:380] mnist -> label
I1231 05:14:04.095901 112226 data_layer.cpp:45] output data size: 100,1,28,28
I1231 05:14:04.097389 112226 net.cpp:122] Setting up mnist
I1231 05:14:04.097401 112226 net.cpp:129] Top shape: 100 1 28 28 (78400)
I1231 05:14:04.097405 112226 net.cpp:129] Top shape: 100 (100)
I1231 05:14:04.097407 112226 net.cpp:137] Memory required for data: 314000
I1231 05:14:04.097410 112226 layer_factory.hpp:77] Creating layer label_mnist_1_split
I1231 05:14:04.097416 112226 net.cpp:84] Creating Layer label_mnist_1_split
I1231 05:14:04.097421 112226 net.cpp:406] label_mnist_1_split <- label
I1231 05:14:04.097447 112226 net.cpp:380] label_mnist_1_split -> label_mnist_1_split_0
I1231 05:14:04.097455 112226 net.cpp:380] label_mnist_1_split -> label_mnist_1_split_1
I1231 05:14:04.097487 112226 net.cpp:122] Setting up label_mnist_1_split
I1231 05:14:04.097493 112226 net.cpp:129] Top shape: 100 (100)
I1231 05:14:04.097497 112226 net.cpp:129] Top shape: 100 (100)
I1231 05:14:04.097501 112226 net.cpp:137] Memory required for data: 314800
I1231 05:14:04.097504 112226 layer_factory.hpp:77] Creating layer conv1
I1231 05:14:04.097517 112226 net.cpp:84] Creating Layer conv1
I1231 05:14:04.097522 112226 net.cpp:406] conv1 <- data
I1231 05:14:04.097527 112226 net.cpp:380] conv1 -> conv1
I1231 05:14:04.099556 112226 net.cpp:122] Setting up conv1
I1231 05:14:04.099567 112226 net.cpp:129] Top shape: 100 20 24 24 (1152000)
I1231 05:14:04.099570 112226 net.cpp:137] Memory required for data: 4922800
I1231 05:14:04.099577 112226 layer_factory.hpp:77] Creating layer pool1
I1231 05:14:04.099584 112226 net.cpp:84] Creating Layer pool1
I1231 05:14:04.099588 112226 net.cpp:406] pool1 <- conv1
I1231 05:14:04.099594 112226 net.cpp:380] pool1 -> pool1
I1231 05:14:04.099628 112226 net.cpp:122] Setting up pool1
I1231 05:14:04.099633 112226 net.cpp:129] Top shape: 100 20 12 12 (288000)
I1231 05:14:04.099637 112226 net.cpp:137] Memory required for data: 6074800
I1231 05:14:04.099640 112226 layer_factory.hpp:77] Creating layer conv2
I1231 05:14:04.099649 112226 net.cpp:84] Creating Layer conv2
I1231 05:14:04.099654 112226 net.cpp:406] conv2 <- pool1
I1231 05:14:04.099660 112226 net.cpp:380] conv2 -> conv2
I1231 05:14:04.101852 112226 net.cpp:122] Setting up conv2
I1231 05:14:04.101866 112226 net.cpp:129] Top shape: 100 50 8 8 (320000)
I1231 05:14:04.101871 112226 net.cpp:137] Memory required for data: 7354800
I1231 05:14:04.101881 112226 layer_factory.hpp:77] Creating layer pool2
I1231 05:14:04.101889 112226 net.cpp:84] Creating Layer pool2
I1231 05:14:04.101893 112226 net.cpp:406] pool2 <- conv2
I1231 05:14:04.101899 112226 net.cpp:380] pool2 -> pool2
I1231 05:14:04.101932 112226 net.cpp:122] Setting up pool2
I1231 05:14:04.101939 112226 net.cpp:129] Top shape: 100 50 4 4 (80000)
I1231 05:14:04.101958 112226 net.cpp:137] Memory required for data: 7674800
I1231 05:14:04.101964 112226 layer_factory.hpp:77] Creating layer ip1
I1231 05:14:04.101974 112226 net.cpp:84] Creating Layer ip1
I1231 05:14:04.101990 112226 net.cpp:406] ip1 <- pool2
I1231 05:14:04.102005 112226 net.cpp:380] ip1 -> ip1
I1231 05:14:04.104290 112226 net.cpp:122] Setting up ip1
I1231 05:14:04.104302 112226 net.cpp:129] Top shape: 100 500 (50000)
I1231 05:14:04.104305 112226 net.cpp:137] Memory required for data: 7874800
I1231 05:14:04.104311 112226 layer_factory.hpp:77] Creating layer relu1
I1231 05:14:04.104317 112226 net.cpp:84] Creating Layer relu1
I1231 05:14:04.104321 112226 net.cpp:406] relu1 <- ip1
I1231 05:14:04.104326 112226 net.cpp:367] relu1 -> ip1 (in-place)
I1231 05:14:04.104709 112226 net.cpp:122] Setting up relu1
I1231 05:14:04.104718 112226 net.cpp:129] Top shape: 100 500 (50000)
I1231 05:14:04.104722 112226 net.cpp:137] Memory required for data: 8074800
I1231 05:14:04.104725 112226 layer_factory.hpp:77] Creating layer ip2
I1231 05:14:04.104733 112226 net.cpp:84] Creating Layer ip2
I1231 05:14:04.104737 112226 net.cpp:406] ip2 <- ip1
I1231 05:14:04.104741 112226 net.cpp:380] ip2 -> ip2
I1231 05:14:04.104845 112226 net.cpp:122] Setting up ip2
I1231 05:14:04.104851 112226 net.cpp:129] Top shape: 100 10 (1000)
I1231 05:14:04.104854 112226 net.cpp:137] Memory required for data: 8078800
I1231 05:14:04.104859 112226 layer_factory.hpp:77] Creating layer ip2_ip2_0_split
I1231 05:14:04.104866 112226 net.cpp:84] Creating Layer ip2_ip2_0_split
I1231 05:14:04.104869 112226 net.cpp:406] ip2_ip2_0_split <- ip2
I1231 05:14:04.104873 112226 net.cpp:380] ip2_ip2_0_split -> ip2_ip2_0_split_0
I1231 05:14:04.104879 112226 net.cpp:380] ip2_ip2_0_split -> ip2_ip2_0_split_1
I1231 05:14:04.104907 112226 net.cpp:122] Setting up ip2_ip2_0_split
I1231 05:14:04.104912 112226 net.cpp:129] Top shape: 100 10 (1000)
I1231 05:14:04.104916 112226 net.cpp:129] Top shape: 100 10 (1000)
I1231 05:14:04.104919 112226 net.cpp:137] Memory required for data: 8086800
I1231 05:14:04.104923 112226 layer_factory.hpp:77] Creating layer accuracy
I1231 05:14:04.104928 112226 net.cpp:84] Creating Layer accuracy
I1231 05:14:04.104933 112226 net.cpp:406] accuracy <- ip2_ip2_0_split_0
I1231 05:14:04.104938 112226 net.cpp:406] accuracy <- label_mnist_1_split_0
I1231 05:14:04.104943 112226 net.cpp:380] accuracy -> accuracy
I1231 05:14:04.104950 112226 net.cpp:122] Setting up accuracy
I1231 05:14:04.104954 112226 net.cpp:129] Top shape: (1)
I1231 05:14:04.104957 112226 net.cpp:137] Memory required for data: 8086804
I1231 05:14:04.104975 112226 layer_factory.hpp:77] Creating layer loss
I1231 05:14:04.104982 112226 net.cpp:84] Creating Layer loss
I1231 05:14:04.104985 112226 net.cpp:406] loss <- ip2_ip2_0_split_1
I1231 05:14:04.104990 112226 net.cpp:406] loss <- label_mnist_1_split_1
I1231 05:14:04.104995 112226 net.cpp:380] loss -> loss
I1231 05:14:04.105001 112226 layer_factory.hpp:77] Creating layer loss
I1231 05:14:04.105728 112226 net.cpp:122] Setting up loss
I1231 05:14:04.105739 112226 net.cpp:129] Top shape: (1)
I1231 05:14:04.105741 112226 net.cpp:132]     with loss weight 1
I1231 05:14:04.105749 112226 net.cpp:137] Memory required for data: 8086808
I1231 05:14:04.105752 112226 net.cpp:198] loss needs backward computation.
I1231 05:14:04.105758 112226 net.cpp:200] accuracy does not need backward computation.
I1231 05:14:04.105762 112226 net.cpp:198] ip2_ip2_0_split needs backward computation.
I1231 05:14:04.105765 112226 net.cpp:198] ip2 needs backward computation.
I1231 05:14:04.105768 112226 net.cpp:198] relu1 needs backward computation.
I1231 05:14:04.105772 112226 net.cpp:198] ip1 needs backward computation.
I1231 05:14:04.105775 112226 net.cpp:198] pool2 needs backward computation.
I1231 05:14:04.105780 112226 net.cpp:198] conv2 needs backward computation.
I1231 05:14:04.105783 112226 net.cpp:198] pool1 needs backward computation.
I1231 05:14:04.105787 112226 net.cpp:198] conv1 needs backward computation.
I1231 05:14:04.105790 112226 net.cpp:200] label_mnist_1_split does not need backward computation.
I1231 05:14:04.105793 112226 net.cpp:200] mnist does not need backward computation.
I1231 05:14:04.105796 112226 net.cpp:242] This network produces output accuracy
I1231 05:14:04.105801 112226 net.cpp:242] This network produces output loss
I1231 05:14:04.105811 112226 net.cpp:255] Network initialization done.
I1231 05:14:04.105844 112226 solver.cpp:56] Solver scaffolding done.
I1231 05:14:04.106065 112226 caffe.cpp:248] Starting Optimization
I1231 05:14:04.106072 112226 solver.cpp:272] Solving LeNet
I1231 05:14:04.106076 112226 solver.cpp:273] Learning Rate Policy: inv
I1231 05:14:04.106482 112226 solver.cpp:330] Iteration 0, Testing net (#0)
I1231 05:14:04.113574 112226 blocking_queue.cpp:49] Waiting for data
I1231 05:14:04.161089 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:04.161952 112226 solver.cpp:397]     Test net output #0: accuracy = 0.1022
I1231 05:14:04.161972 112226 solver.cpp:397]     Test net output #1: loss = 2.39148 (* 1 = 2.39148 loss)
I1231 05:14:04.164427 112226 solver.cpp:218] Iteration 0 (0 iter/s, 0.0583279s/100 iters), loss = 2.46638
I1231 05:14:04.164444 112226 solver.cpp:237]     Train net output #0: loss = 2.46638 (* 1 = 2.46638 loss)
I1231 05:14:04.164453 112226 sgd_solver.cpp:105] Iteration 0, lr = 0.01
I1231 05:14:04.297490 112226 solver.cpp:218] Iteration 100 (751.7 iter/s, 0.133032s/100 iters), loss = 0.230898
I1231 05:14:04.297534 112226 solver.cpp:237]     Train net output #0: loss = 0.230898 (* 1 = 0.230898 loss)
I1231 05:14:04.297541 112226 sgd_solver.cpp:105] Iteration 100, lr = 0.00992565
I1231 05:14:04.422021 112226 solver.cpp:218] Iteration 200 (803.339 iter/s, 0.124481s/100 iters), loss = 0.165111
I1231 05:14:04.422052 112226 solver.cpp:237]     Train net output #0: loss = 0.165111 (* 1 = 0.165111 loss)
I1231 05:14:04.422060 112226 sgd_solver.cpp:105] Iteration 200, lr = 0.00985258
I1231 05:14:04.544306 112226 solver.cpp:218] Iteration 300 (818.064 iter/s, 0.12224s/100 iters), loss = 0.157251
I1231 05:14:04.544351 112226 solver.cpp:237]     Train net output #0: loss = 0.157251 (* 1 = 0.157251 loss)
I1231 05:14:04.544366 112226 sgd_solver.cpp:105] Iteration 300, lr = 0.00978075
I1231 05:14:04.662237 112226 solver.cpp:218] Iteration 400 (848.329 iter/s, 0.117879s/100 iters), loss = 0.0765152
I1231 05:14:04.662271 112226 solver.cpp:237]     Train net output #0: loss = 0.0765152 (* 1 = 0.0765152 loss)
I1231 05:14:04.662276 112226 sgd_solver.cpp:105] Iteration 400, lr = 0.00971013
I1231 05:14:04.775310 112226 solver.cpp:330] Iteration 500, Testing net (#0)
I1231 05:14:04.831015 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:04.831811 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9706
I1231 05:14:04.831848 112226 solver.cpp:397]     Test net output #1: loss = 0.090288 (* 1 = 0.090288 loss)
I1231 05:14:04.832991 112226 solver.cpp:218] Iteration 500 (585.778 iter/s, 0.170713s/100 iters), loss = 0.116756
I1231 05:14:04.833009 112226 solver.cpp:237]     Train net output #0: loss = 0.116756 (* 1 = 0.116756 loss)
I1231 05:14:04.833017 112226 sgd_solver.cpp:105] Iteration 500, lr = 0.00964069
I1231 05:14:04.949153 112226 solver.cpp:218] Iteration 600 (861.143 iter/s, 0.116125s/100 iters), loss = 0.078895
I1231 05:14:04.949213 112226 solver.cpp:237]     Train net output #0: loss = 0.078895 (* 1 = 0.078895 loss)
I1231 05:14:04.949223 112226 sgd_solver.cpp:105] Iteration 600, lr = 0.0095724
I1231 05:14:05.069152 112226 solver.cpp:218] Iteration 700 (833.828 iter/s, 0.119929s/100 iters), loss = 0.142484
I1231 05:14:05.069192 112226 solver.cpp:237]     Train net output #0: loss = 0.142484 (* 1 = 0.142484 loss)
I1231 05:14:05.069200 112226 sgd_solver.cpp:105] Iteration 700, lr = 0.00950522
I1231 05:14:05.184397 112226 solver.cpp:218] Iteration 800 (868.055 iter/s, 0.1152s/100 iters), loss = 0.211653
I1231 05:14:05.184422 112226 solver.cpp:237]     Train net output #0: loss = 0.211653 (* 1 = 0.211653 loss)
I1231 05:14:05.184430 112226 sgd_solver.cpp:105] Iteration 800, lr = 0.00943913
I1231 05:14:05.299252 112226 solver.cpp:218] Iteration 900 (870.904 iter/s, 0.114823s/100 iters), loss = 0.174603
I1231 05:14:05.299284 112226 solver.cpp:237]     Train net output #0: loss = 0.174603 (* 1 = 0.174603 loss)
I1231 05:14:05.299291 112226 sgd_solver.cpp:105] Iteration 900, lr = 0.00937411
I1231 05:14:05.337515 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:05.413573 112226 solver.cpp:330] Iteration 1000, Testing net (#0)
I1231 05:14:05.470590 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:05.472153 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9809
I1231 05:14:05.472216 112226 solver.cpp:397]     Test net output #1: loss = 0.0572349 (* 1 = 0.0572349 loss)
I1231 05:14:05.473395 112226 solver.cpp:218] Iteration 1000 (574.397 iter/s, 0.174095s/100 iters), loss = 0.096166
I1231 05:14:05.473451 112226 solver.cpp:237]     Train net output #0: loss = 0.0961661 (* 1 = 0.0961661 loss)
I1231 05:14:05.473464 112226 sgd_solver.cpp:105] Iteration 1000, lr = 0.00931012
I1231 05:14:05.590864 112226 solver.cpp:218] Iteration 1100 (851.755 iter/s, 0.117405s/100 iters), loss = 0.00682953
I1231 05:14:05.590906 112226 solver.cpp:237]     Train net output #0: loss = 0.00682963 (* 1 = 0.00682963 loss)
I1231 05:14:05.590914 112226 sgd_solver.cpp:105] Iteration 1100, lr = 0.00924715
I1231 05:14:05.705976 112226 solver.cpp:218] Iteration 1200 (869.114 iter/s, 0.11506s/100 iters), loss = 0.0163695
I1231 05:14:05.706015 112226 solver.cpp:237]     Train net output #0: loss = 0.0163696 (* 1 = 0.0163696 loss)
I1231 05:14:05.706025 112226 sgd_solver.cpp:105] Iteration 1200, lr = 0.00918515
I1231 05:14:05.821393 112226 solver.cpp:218] Iteration 1300 (866.807 iter/s, 0.115366s/100 iters), loss = 0.0149529
I1231 05:14:05.821424 112226 solver.cpp:237]     Train net output #0: loss = 0.014953 (* 1 = 0.014953 loss)
I1231 05:14:05.821432 112226 sgd_solver.cpp:105] Iteration 1300, lr = 0.00912412
I1231 05:14:05.935075 112226 solver.cpp:218] Iteration 1400 (880.008 iter/s, 0.113635s/100 iters), loss = 0.0102752
I1231 05:14:05.935109 112226 solver.cpp:237]     Train net output #0: loss = 0.0102753 (* 1 = 0.0102753 loss)
I1231 05:14:05.935118 112226 sgd_solver.cpp:105] Iteration 1400, lr = 0.00906403
I1231 05:14:06.049176 112226 solver.cpp:330] Iteration 1500, Testing net (#0)
I1231 05:14:06.100338 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:06.100885 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9849
I1231 05:14:06.100903 112226 solver.cpp:397]     Test net output #1: loss = 0.050994 (* 1 = 0.050994 loss)
I1231 05:14:06.101977 112226 solver.cpp:218] Iteration 1500 (599.298 iter/s, 0.166862s/100 iters), loss = 0.0748195
I1231 05:14:06.102027 112226 solver.cpp:237]     Train net output #0: loss = 0.0748197 (* 1 = 0.0748197 loss)
I1231 05:14:06.102036 112226 sgd_solver.cpp:105] Iteration 1500, lr = 0.00900485
I1231 05:14:06.215562 112226 solver.cpp:218] Iteration 1600 (880.884 iter/s, 0.113522s/100 iters), loss = 0.148792
I1231 05:14:06.215605 112226 solver.cpp:237]     Train net output #0: loss = 0.148793 (* 1 = 0.148793 loss)
I1231 05:14:06.215622 112226 sgd_solver.cpp:105] Iteration 1600, lr = 0.00894657
I1231 05:14:06.329977 112226 solver.cpp:218] Iteration 1700 (874.374 iter/s, 0.114368s/100 iters), loss = 0.0334666
I1231 05:14:06.330003 112226 solver.cpp:237]     Train net output #0: loss = 0.0334667 (* 1 = 0.0334667 loss)
I1231 05:14:06.330009 112226 sgd_solver.cpp:105] Iteration 1700, lr = 0.00888916
I1231 05:14:06.443758 112226 solver.cpp:218] Iteration 1800 (879.127 iter/s, 0.113749s/100 iters), loss = 0.0173706
I1231 05:14:06.443785 112226 solver.cpp:237]     Train net output #0: loss = 0.0173707 (* 1 = 0.0173707 loss)
I1231 05:14:06.443794 112226 sgd_solver.cpp:105] Iteration 1800, lr = 0.0088326
I1231 05:14:06.524282 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:06.558614 112226 solver.cpp:218] Iteration 1900 (870.899 iter/s, 0.114824s/100 iters), loss = 0.140631
I1231 05:14:06.558640 112226 solver.cpp:237]     Train net output #0: loss = 0.140631 (* 1 = 0.140631 loss)
I1231 05:14:06.558646 112226 sgd_solver.cpp:105] Iteration 1900, lr = 0.00877687
I1231 05:14:06.670892 112226 solver.cpp:330] Iteration 2000, Testing net (#0)
I1231 05:14:06.718638 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:06.719323 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9862
I1231 05:14:06.719348 112226 solver.cpp:397]     Test net output #1: loss = 0.0417459 (* 1 = 0.0417459 loss)
I1231 05:14:06.720479 112226 solver.cpp:218] Iteration 2000 (617.924 iter/s, 0.161832s/100 iters), loss = 0.02471
I1231 05:14:06.720502 112226 solver.cpp:237]     Train net output #0: loss = 0.0247102 (* 1 = 0.0247102 loss)
I1231 05:14:06.720510 112226 sgd_solver.cpp:105] Iteration 2000, lr = 0.00872196
I1231 05:14:06.836062 112226 solver.cpp:218] Iteration 2100 (865.44 iter/s, 0.115548s/100 iters), loss = 0.0129391
I1231 05:14:06.836104 112226 solver.cpp:237]     Train net output #0: loss = 0.0129393 (* 1 = 0.0129393 loss)
I1231 05:14:06.836114 112226 sgd_solver.cpp:105] Iteration 2100, lr = 0.00866784
I1231 05:14:06.951599 112226 solver.cpp:218] Iteration 2200 (865.877 iter/s, 0.11549s/100 iters), loss = 0.0219193
I1231 05:14:06.951627 112226 solver.cpp:237]     Train net output #0: loss = 0.0219195 (* 1 = 0.0219195 loss)
I1231 05:14:06.951632 112226 sgd_solver.cpp:105] Iteration 2200, lr = 0.0086145
I1231 05:14:07.072963 112226 solver.cpp:218] Iteration 2300 (824.262 iter/s, 0.121321s/100 iters), loss = 0.0956042
I1231 05:14:07.073019 112226 solver.cpp:237]     Train net output #0: loss = 0.0956043 (* 1 = 0.0956043 loss)
I1231 05:14:07.073026 112226 sgd_solver.cpp:105] Iteration 2300, lr = 0.00856192
I1231 05:14:07.186334 112226 solver.cpp:218] Iteration 2400 (882.527 iter/s, 0.113311s/100 iters), loss = 0.0106698
I1231 05:14:07.186359 112226 solver.cpp:237]     Train net output #0: loss = 0.0106699 (* 1 = 0.0106699 loss)
I1231 05:14:07.186367 112226 sgd_solver.cpp:105] Iteration 2400, lr = 0.00851008
I1231 05:14:07.298082 112226 solver.cpp:330] Iteration 2500, Testing net (#0)
I1231 05:14:07.345970 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:07.346665 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9843
I1231 05:14:07.346683 112226 solver.cpp:397]     Test net output #1: loss = 0.0502878 (* 1 = 0.0502878 loss)
I1231 05:14:07.347754 112226 solver.cpp:218] Iteration 2500 (619.627 iter/s, 0.161387s/100 iters), loss = 0.0344929
I1231 05:14:07.347771 112226 solver.cpp:237]     Train net output #0: loss = 0.034493 (* 1 = 0.034493 loss)
I1231 05:14:07.347805 112226 sgd_solver.cpp:105] Iteration 2500, lr = 0.00845897
I1231 05:14:07.462756 112226 solver.cpp:218] Iteration 2600 (869.82 iter/s, 0.114966s/100 iters), loss = 0.0772851
I1231 05:14:07.462828 112226 solver.cpp:237]     Train net output #0: loss = 0.0772852 (* 1 = 0.0772852 loss)
I1231 05:14:07.462841 112226 sgd_solver.cpp:105] Iteration 2600, lr = 0.00840857
I1231 05:14:07.576750 112226 solver.cpp:218] Iteration 2700 (877.864 iter/s, 0.113913s/100 iters), loss = 0.0620485
I1231 05:14:07.576797 112226 solver.cpp:237]     Train net output #0: loss = 0.0620486 (* 1 = 0.0620486 loss)
I1231 05:14:07.576807 112226 sgd_solver.cpp:105] Iteration 2700, lr = 0.00835886
I1231 05:14:07.690094 112226 solver.cpp:218] Iteration 2800 (882.703 iter/s, 0.113288s/100 iters), loss = 0.00376383
I1231 05:14:07.690132 112226 solver.cpp:237]     Train net output #0: loss = 0.00376396 (* 1 = 0.00376396 loss)
I1231 05:14:07.690140 112226 sgd_solver.cpp:105] Iteration 2800, lr = 0.00830984
I1231 05:14:07.699453 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:07.803148 112226 solver.cpp:218] Iteration 2900 (884.872 iter/s, 0.113011s/100 iters), loss = 0.0225361
I1231 05:14:07.803174 112226 solver.cpp:237]     Train net output #0: loss = 0.0225363 (* 1 = 0.0225363 loss)
I1231 05:14:07.803179 112226 sgd_solver.cpp:105] Iteration 2900, lr = 0.00826148
I1231 05:14:07.914687 112226 solver.cpp:330] Iteration 3000, Testing net (#0)
I1231 05:14:07.971788 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:07.972503 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9878
I1231 05:14:07.972627 112226 solver.cpp:397]     Test net output #1: loss = 0.0385816 (* 1 = 0.0385816 loss)
I1231 05:14:07.974078 112226 solver.cpp:218] Iteration 3000 (585.199 iter/s, 0.170882s/100 iters), loss = 0.00977571
I1231 05:14:07.974169 112226 solver.cpp:237]     Train net output #0: loss = 0.00977591 (* 1 = 0.00977591 loss)
I1231 05:14:07.974200 112226 sgd_solver.cpp:105] Iteration 3000, lr = 0.00821377
I1231 05:14:08.110509 112226 solver.cpp:218] Iteration 3100 (750.428 iter/s, 0.133257s/100 iters), loss = 0.0216236
I1231 05:14:08.110647 112226 solver.cpp:237]     Train net output #0: loss = 0.0216238 (* 1 = 0.0216238 loss)
I1231 05:14:08.110666 112226 sgd_solver.cpp:105] Iteration 3100, lr = 0.0081667
I1231 05:14:08.228408 112226 solver.cpp:218] Iteration 3200 (849.185 iter/s, 0.11776s/100 iters), loss = 0.0130305
I1231 05:14:08.228459 112226 solver.cpp:237]     Train net output #0: loss = 0.0130307 (* 1 = 0.0130307 loss)
I1231 05:14:08.228464 112226 sgd_solver.cpp:105] Iteration 3200, lr = 0.00812025
I1231 05:14:08.341622 112226 solver.cpp:218] Iteration 3300 (883.719 iter/s, 0.113158s/100 iters), loss = 0.0661136
I1231 05:14:08.341646 112226 solver.cpp:237]     Train net output #0: loss = 0.0661139 (* 1 = 0.0661139 loss)
I1231 05:14:08.341655 112226 sgd_solver.cpp:105] Iteration 3300, lr = 0.00807442
I1231 05:14:08.454787 112226 solver.cpp:218] Iteration 3400 (883.897 iter/s, 0.113135s/100 iters), loss = 0.00975509
I1231 05:14:08.454813 112226 solver.cpp:237]     Train net output #0: loss = 0.00975529 (* 1 = 0.00975529 loss)
I1231 05:14:08.454819 112226 sgd_solver.cpp:105] Iteration 3400, lr = 0.00802918
I1231 05:14:08.567121 112226 solver.cpp:330] Iteration 3500, Testing net (#0)
I1231 05:14:08.617847 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:08.618598 112226 solver.cpp:397]     Test net output #0: accuracy = 0.987
I1231 05:14:08.618615 112226 solver.cpp:397]     Test net output #1: loss = 0.0409665 (* 1 = 0.0409665 loss)
I1231 05:14:08.619675 112226 solver.cpp:218] Iteration 3500 (606.587 iter/s, 0.164857s/100 iters), loss = 0.00479197
I1231 05:14:08.619693 112226 solver.cpp:237]     Train net output #0: loss = 0.00479215 (* 1 = 0.00479215 loss)
I1231 05:14:08.619700 112226 sgd_solver.cpp:105] Iteration 3500, lr = 0.00798454
I1231 05:14:08.734412 112226 solver.cpp:218] Iteration 3600 (871.782 iter/s, 0.114708s/100 iters), loss = 0.0306457
I1231 05:14:08.734477 112226 solver.cpp:237]     Train net output #0: loss = 0.0306458 (* 1 = 0.0306458 loss)
I1231 05:14:08.734484 112226 sgd_solver.cpp:105] Iteration 3600, lr = 0.00794046
I1231 05:14:08.848691 112226 solver.cpp:218] Iteration 3700 (875.626 iter/s, 0.114204s/100 iters), loss = 0.0209788
I1231 05:14:08.848737 112226 solver.cpp:237]     Train net output #0: loss = 0.020979 (* 1 = 0.020979 loss)
I1231 05:14:08.848744 112226 sgd_solver.cpp:105] Iteration 3700, lr = 0.00789695
I1231 05:14:08.900290 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:08.965936 112226 solver.cpp:218] Iteration 3800 (853.504 iter/s, 0.117164s/100 iters), loss = 0.00585029
I1231 05:14:08.966074 112226 solver.cpp:237]     Train net output #0: loss = 0.00585046 (* 1 = 0.00585046 loss)
I1231 05:14:08.966104 112226 sgd_solver.cpp:105] Iteration 3800, lr = 0.007854
I1231 05:14:09.091514 112226 solver.cpp:218] Iteration 3900 (797.207 iter/s, 0.125438s/100 iters), loss = 0.0287976
I1231 05:14:09.091565 112226 solver.cpp:237]     Train net output #0: loss = 0.0287977 (* 1 = 0.0287977 loss)
I1231 05:14:09.091573 112226 sgd_solver.cpp:105] Iteration 3900, lr = 0.00781158
I1231 05:14:09.204576 112226 solver.cpp:330] Iteration 4000, Testing net (#0)
I1231 05:14:09.255736 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:09.256489 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9894
I1231 05:14:09.256515 112226 solver.cpp:397]     Test net output #1: loss = 0.0315164 (* 1 = 0.0315164 loss)
I1231 05:14:09.257594 112226 solver.cpp:218] Iteration 4000 (602.322 iter/s, 0.166024s/100 iters), loss = 0.020385
I1231 05:14:09.257612 112226 solver.cpp:237]     Train net output #0: loss = 0.0203852 (* 1 = 0.0203852 loss)
I1231 05:14:09.257618 112226 sgd_solver.cpp:105] Iteration 4000, lr = 0.0077697
I1231 05:14:09.371057 112226 solver.cpp:218] Iteration 4100 (881.541 iter/s, 0.113438s/100 iters), loss = 0.0153677
I1231 05:14:09.371074 112226 solver.cpp:237]     Train net output #0: loss = 0.0153679 (* 1 = 0.0153679 loss)
I1231 05:14:09.371078 112226 sgd_solver.cpp:105] Iteration 4100, lr = 0.00772833
I1231 05:14:09.485442 112226 solver.cpp:218] Iteration 4200 (874.46 iter/s, 0.114356s/100 iters), loss = 0.0126334
I1231 05:14:09.485476 112226 solver.cpp:237]     Train net output #0: loss = 0.0126335 (* 1 = 0.0126335 loss)
I1231 05:14:09.485482 112226 sgd_solver.cpp:105] Iteration 4200, lr = 0.00768748
I1231 05:14:09.601124 112226 solver.cpp:218] Iteration 4300 (864.796 iter/s, 0.115634s/100 iters), loss = 0.038657
I1231 05:14:09.601166 112226 solver.cpp:237]     Train net output #0: loss = 0.0386571 (* 1 = 0.0386571 loss)
I1231 05:14:09.601173 112226 sgd_solver.cpp:105] Iteration 4300, lr = 0.00764712
I1231 05:14:09.714756 112226 solver.cpp:218] Iteration 4400 (880.405 iter/s, 0.113584s/100 iters), loss = 0.0174188
I1231 05:14:09.714783 112226 solver.cpp:237]     Train net output #0: loss = 0.017419 (* 1 = 0.017419 loss)
I1231 05:14:09.714790 112226 sgd_solver.cpp:105] Iteration 4400, lr = 0.00760726
I1231 05:14:09.826902 112226 solver.cpp:330] Iteration 4500, Testing net (#0)
I1231 05:14:09.876176 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:09.876894 112226 solver.cpp:397]     Test net output #0: accuracy = 0.988
I1231 05:14:09.876917 112226 solver.cpp:397]     Test net output #1: loss = 0.0381423 (* 1 = 0.0381423 loss)
I1231 05:14:09.877930 112226 solver.cpp:218] Iteration 4500 (612.965 iter/s, 0.163142s/100 iters), loss = 0.00435871
I1231 05:14:09.877979 112226 solver.cpp:237]     Train net output #0: loss = 0.00435888 (* 1 = 0.00435888 loss)
I1231 05:14:09.877987 112226 sgd_solver.cpp:105] Iteration 4500, lr = 0.00756788
I1231 05:14:09.997242 112226 solver.cpp:218] Iteration 4600 (838.528 iter/s, 0.119257s/100 iters), loss = 0.0074584
I1231 05:14:09.997275 112226 solver.cpp:237]     Train net output #0: loss = 0.00745859 (* 1 = 0.00745859 loss)
I1231 05:14:09.997282 112226 sgd_solver.cpp:105] Iteration 4600, lr = 0.00752897
I1231 05:14:10.093076 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:10.112258 112226 solver.cpp:218] Iteration 4700 (869.734 iter/s, 0.114978s/100 iters), loss = 0.00427773
I1231 05:14:10.112284 112226 solver.cpp:237]     Train net output #0: loss = 0.00427795 (* 1 = 0.00427795 loss)
I1231 05:14:10.112290 112226 sgd_solver.cpp:105] Iteration 4700, lr = 0.00749052
I1231 05:14:10.226490 112226 solver.cpp:218] Iteration 4800 (875.668 iter/s, 0.114199s/100 iters), loss = 0.0174331
I1231 05:14:10.226513 112226 solver.cpp:237]     Train net output #0: loss = 0.0174333 (* 1 = 0.0174333 loss)
I1231 05:14:10.226521 112226 sgd_solver.cpp:105] Iteration 4800, lr = 0.00745253
I1231 05:14:10.341311 112226 solver.cpp:218] Iteration 4900 (871.145 iter/s, 0.114791s/100 iters), loss = 0.00480582
I1231 05:14:10.341336 112226 solver.cpp:237]     Train net output #0: loss = 0.00480605 (* 1 = 0.00480605 loss)
I1231 05:14:10.341343 112226 sgd_solver.cpp:105] Iteration 4900, lr = 0.00741498
I1231 05:14:10.453488 112226 solver.cpp:447] Snapshotting to binary proto file /home/zhaoys/myf/lenet/lenet_caffe/snapshot/train_iter_5000.caffemodel
I1231 05:14:10.461127 112226 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/zhaoys/myf/lenet/lenet_caffe/snapshot/train_iter_5000.solverstate
I1231 05:14:10.464032 112226 solver.cpp:330] Iteration 5000, Testing net (#0)
I1231 05:14:10.510946 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:10.511458 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9897
I1231 05:14:10.511488 112226 solver.cpp:397]     Test net output #1: loss = 0.0317376 (* 1 = 0.0317376 loss)
I1231 05:14:10.512598 112226 solver.cpp:218] Iteration 5000 (583.925 iter/s, 0.171255s/100 iters), loss = 0.0539401
I1231 05:14:10.512640 112226 solver.cpp:237]     Train net output #0: loss = 0.0539403 (* 1 = 0.0539403 loss)
I1231 05:14:10.512650 112226 sgd_solver.cpp:105] Iteration 5000, lr = 0.00737788
I1231 05:14:10.626550 112226 solver.cpp:218] Iteration 5100 (877.929 iter/s, 0.113904s/100 iters), loss = 0.0210981
I1231 05:14:10.626577 112226 solver.cpp:237]     Train net output #0: loss = 0.0210984 (* 1 = 0.0210984 loss)
I1231 05:14:10.626585 112226 sgd_solver.cpp:105] Iteration 5100, lr = 0.0073412
I1231 05:14:10.740978 112226 solver.cpp:218] Iteration 5200 (874.155 iter/s, 0.114396s/100 iters), loss = 0.00715472
I1231 05:14:10.741003 112226 solver.cpp:237]     Train net output #0: loss = 0.00715494 (* 1 = 0.00715494 loss)
I1231 05:14:10.741010 112226 sgd_solver.cpp:105] Iteration 5200, lr = 0.00730495
I1231 05:14:10.855633 112226 solver.cpp:218] Iteration 5300 (872.461 iter/s, 0.114618s/100 iters), loss = 0.00303569
I1231 05:14:10.855677 112226 solver.cpp:237]     Train net output #0: loss = 0.00303591 (* 1 = 0.00303591 loss)
I1231 05:14:10.855684 112226 sgd_solver.cpp:105] Iteration 5300, lr = 0.00726911
I1231 05:14:10.969705 112226 solver.cpp:218] Iteration 5400 (877.013 iter/s, 0.114023s/100 iters), loss = 0.00846723
I1231 05:14:10.969732 112226 solver.cpp:237]     Train net output #0: loss = 0.00846745 (* 1 = 0.00846745 loss)
I1231 05:14:10.969738 112226 sgd_solver.cpp:105] Iteration 5400, lr = 0.00723368
I1231 05:14:11.082397 112226 solver.cpp:330] Iteration 5500, Testing net (#0)
I1231 05:14:11.130837 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:11.131626 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9891
I1231 05:14:11.131644 112226 solver.cpp:397]     Test net output #1: loss = 0.0324316 (* 1 = 0.0324316 loss)
I1231 05:14:11.132758 112226 solver.cpp:218] Iteration 5500 (613.415 iter/s, 0.163022s/100 iters), loss = 0.0167479
I1231 05:14:11.132776 112226 solver.cpp:237]     Train net output #0: loss = 0.0167481 (* 1 = 0.0167481 loss)
I1231 05:14:11.132781 112226 sgd_solver.cpp:105] Iteration 5500, lr = 0.00719865
I1231 05:14:11.246398 112226 solver.cpp:218] Iteration 5600 (880.166 iter/s, 0.113615s/100 iters), loss = 0.000499987
I1231 05:14:11.246423 112226 solver.cpp:237]     Train net output #0: loss = 0.00050021 (* 1 = 0.00050021 loss)
I1231 05:14:11.246464 112226 sgd_solver.cpp:105] Iteration 5600, lr = 0.00716402
I1231 05:14:11.269551 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:11.359910 112226 solver.cpp:218] Iteration 5700 (881.2 iter/s, 0.113482s/100 iters), loss = 0.00245953
I1231 05:14:11.359935 112226 solver.cpp:237]     Train net output #0: loss = 0.00245974 (* 1 = 0.00245974 loss)
I1231 05:14:11.359942 112226 sgd_solver.cpp:105] Iteration 5700, lr = 0.00712977
I1231 05:14:11.477833 112226 solver.cpp:218] Iteration 5800 (848.263 iter/s, 0.117888s/100 iters), loss = 0.0389832
I1231 05:14:11.477866 112226 solver.cpp:237]     Train net output #0: loss = 0.0389835 (* 1 = 0.0389835 loss)
I1231 05:14:11.477871 112226 sgd_solver.cpp:105] Iteration 5800, lr = 0.0070959
I1231 05:14:11.593122 112226 solver.cpp:218] Iteration 5900 (867.754 iter/s, 0.11524s/100 iters), loss = 0.00710046
I1231 05:14:11.593165 112226 solver.cpp:237]     Train net output #0: loss = 0.00710067 (* 1 = 0.00710067 loss)
I1231 05:14:11.593173 112226 sgd_solver.cpp:105] Iteration 5900, lr = 0.0070624
I1231 05:14:11.707418 112226 solver.cpp:330] Iteration 6000, Testing net (#0)
I1231 05:14:11.758401 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:11.759146 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9907
I1231 05:14:11.759176 112226 solver.cpp:397]     Test net output #1: loss = 0.0290163 (* 1 = 0.0290163 loss)
I1231 05:14:11.760239 112226 solver.cpp:218] Iteration 6000 (598.555 iter/s, 0.167069s/100 iters), loss = 0.00378046
I1231 05:14:11.760258 112226 solver.cpp:237]     Train net output #0: loss = 0.00378066 (* 1 = 0.00378066 loss)
I1231 05:14:11.760263 112226 sgd_solver.cpp:105] Iteration 6000, lr = 0.00702927
I1231 05:14:11.872548 112226 solver.cpp:218] Iteration 6100 (890.593 iter/s, 0.112285s/100 iters), loss = 0.00106458
I1231 05:14:11.872566 112226 solver.cpp:237]     Train net output #0: loss = 0.00106477 (* 1 = 0.00106477 loss)
I1231 05:14:11.872571 112226 sgd_solver.cpp:105] Iteration 6100, lr = 0.0069965
I1231 05:14:11.989754 112226 solver.cpp:218] Iteration 6200 (853.497 iter/s, 0.117165s/100 iters), loss = 0.014281
I1231 05:14:11.989837 112226 solver.cpp:237]     Train net output #0: loss = 0.0142812 (* 1 = 0.0142812 loss)
I1231 05:14:11.989850 112226 sgd_solver.cpp:105] Iteration 6200, lr = 0.00696408
I1231 05:14:12.109879 112226 solver.cpp:218] Iteration 6300 (833.088 iter/s, 0.120035s/100 iters), loss = 0.00767381
I1231 05:14:12.109920 112226 solver.cpp:237]     Train net output #0: loss = 0.00767398 (* 1 = 0.00767398 loss)
I1231 05:14:12.109926 112226 sgd_solver.cpp:105] Iteration 6300, lr = 0.00693201
I1231 05:14:12.223362 112226 solver.cpp:218] Iteration 6400 (881.605 iter/s, 0.11343s/100 iters), loss = 0.00609869
I1231 05:14:12.223404 112226 solver.cpp:237]     Train net output #0: loss = 0.00609886 (* 1 = 0.00609886 loss)
I1231 05:14:12.223414 112226 sgd_solver.cpp:105] Iteration 6400, lr = 0.00690029
I1231 05:14:12.335515 112226 solver.cpp:330] Iteration 6500, Testing net (#0)
I1231 05:14:12.386811 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:12.387568 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9906
I1231 05:14:12.387588 112226 solver.cpp:397]     Test net output #1: loss = 0.0315652 (* 1 = 0.0315652 loss)
I1231 05:14:12.388682 112226 solver.cpp:218] Iteration 6500 (605.06 iter/s, 0.165273s/100 iters), loss = 0.0120906
I1231 05:14:12.388700 112226 solver.cpp:237]     Train net output #0: loss = 0.0120908 (* 1 = 0.0120908 loss)
I1231 05:14:12.388705 112226 sgd_solver.cpp:105] Iteration 6500, lr = 0.0068689
I1231 05:14:12.455711 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:12.503032 112226 solver.cpp:218] Iteration 6600 (874.71 iter/s, 0.114324s/100 iters), loss = 0.0408356
I1231 05:14:12.503060 112226 solver.cpp:237]     Train net output #0: loss = 0.0408357 (* 1 = 0.0408357 loss)
I1231 05:14:12.503067 112226 sgd_solver.cpp:105] Iteration 6600, lr = 0.00683784
I1231 05:14:12.616698 112226 solver.cpp:218] Iteration 6700 (880.024 iter/s, 0.113633s/100 iters), loss = 0.0120867
I1231 05:14:12.616721 112226 solver.cpp:237]     Train net output #0: loss = 0.0120869 (* 1 = 0.0120869 loss)
I1231 05:14:12.616727 112226 sgd_solver.cpp:105] Iteration 6700, lr = 0.00680711
I1231 05:14:12.730803 112226 solver.cpp:218] Iteration 6800 (876.611 iter/s, 0.114076s/100 iters), loss = 0.00273887
I1231 05:14:12.730825 112226 solver.cpp:237]     Train net output #0: loss = 0.00273904 (* 1 = 0.00273904 loss)
I1231 05:14:12.730832 112226 sgd_solver.cpp:105] Iteration 6800, lr = 0.0067767
I1231 05:14:12.848994 112226 solver.cpp:218] Iteration 6900 (846.324 iter/s, 0.118158s/100 iters), loss = 0.0063858
I1231 05:14:12.849032 112226 solver.cpp:237]     Train net output #0: loss = 0.00638596 (* 1 = 0.00638596 loss)
I1231 05:14:12.849041 112226 sgd_solver.cpp:105] Iteration 6900, lr = 0.0067466
I1231 05:14:12.968724 112226 solver.cpp:330] Iteration 7000, Testing net (#0)
I1231 05:14:13.027690 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:13.028416 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9914
I1231 05:14:13.028482 112226 solver.cpp:397]     Test net output #1: loss = 0.0280982 (* 1 = 0.0280982 loss)
I1231 05:14:13.029805 112226 solver.cpp:218] Iteration 7000 (553.213 iter/s, 0.180762s/100 iters), loss = 0.00817811
I1231 05:14:13.029870 112226 solver.cpp:237]     Train net output #0: loss = 0.00817828 (* 1 = 0.00817828 loss)
I1231 05:14:13.029883 112226 sgd_solver.cpp:105] Iteration 7000, lr = 0.00671681
I1231 05:14:13.155633 112226 solver.cpp:218] Iteration 7100 (795.18 iter/s, 0.125758s/100 iters), loss = 0.00938367
I1231 05:14:13.155678 112226 solver.cpp:237]     Train net output #0: loss = 0.00938383 (* 1 = 0.00938383 loss)
I1231 05:14:13.155695 112226 sgd_solver.cpp:105] Iteration 7100, lr = 0.00668733
I1231 05:14:13.271452 112226 solver.cpp:218] Iteration 7200 (863.825 iter/s, 0.115764s/100 iters), loss = 0.00807085
I1231 05:14:13.271486 112226 solver.cpp:237]     Train net output #0: loss = 0.00807101 (* 1 = 0.00807101 loss)
I1231 05:14:13.271493 112226 sgd_solver.cpp:105] Iteration 7200, lr = 0.00665815
I1231 05:14:13.387769 112226 solver.cpp:218] Iteration 7300 (860.097 iter/s, 0.116266s/100 iters), loss = 0.0224124
I1231 05:14:13.387817 112226 solver.cpp:237]     Train net output #0: loss = 0.0224126 (* 1 = 0.0224126 loss)
I1231 05:14:13.387827 112226 sgd_solver.cpp:105] Iteration 7300, lr = 0.00662927
I1231 05:14:13.505359 112226 solver.cpp:218] Iteration 7400 (850.806 iter/s, 0.117536s/100 iters), loss = 0.00614564
I1231 05:14:13.505391 112226 solver.cpp:237]     Train net output #0: loss = 0.00614582 (* 1 = 0.00614582 loss)
I1231 05:14:13.505398 112226 sgd_solver.cpp:105] Iteration 7400, lr = 0.00660067
I1231 05:14:13.616973 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:13.620801 112226 solver.cpp:330] Iteration 7500, Testing net (#0)
I1231 05:14:13.672514 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:13.673836 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9897
I1231 05:14:13.673854 112226 solver.cpp:397]     Test net output #1: loss = 0.0328055 (* 1 = 0.0328055 loss)
I1231 05:14:13.675462 112226 solver.cpp:218] Iteration 7500 (589.968 iter/s, 0.169501s/100 iters), loss = 0.00213244
I1231 05:14:13.675484 112226 solver.cpp:237]     Train net output #0: loss = 0.00213262 (* 1 = 0.00213262 loss)
I1231 05:14:13.675490 112226 sgd_solver.cpp:105] Iteration 7500, lr = 0.00657236
I1231 05:14:13.792430 112226 solver.cpp:218] Iteration 7600 (855.208 iter/s, 0.116931s/100 iters), loss = 0.00654828
I1231 05:14:13.792476 112226 solver.cpp:237]     Train net output #0: loss = 0.00654845 (* 1 = 0.00654845 loss)
I1231 05:14:13.792485 112226 sgd_solver.cpp:105] Iteration 7600, lr = 0.00654433
I1231 05:14:13.909584 112226 solver.cpp:218] Iteration 7700 (853.959 iter/s, 0.117102s/100 iters), loss = 0.0209833
I1231 05:14:13.909611 112226 solver.cpp:237]     Train net output #0: loss = 0.0209835 (* 1 = 0.0209835 loss)
I1231 05:14:13.909656 112226 sgd_solver.cpp:105] Iteration 7700, lr = 0.00651658
I1231 05:14:14.032776 112226 solver.cpp:218] Iteration 7800 (812.002 iter/s, 0.123152s/100 iters), loss = 0.00290527
I1231 05:14:14.032826 112226 solver.cpp:237]     Train net output #0: loss = 0.00290546 (* 1 = 0.00290546 loss)
I1231 05:14:14.032837 112226 sgd_solver.cpp:105] Iteration 7800, lr = 0.00648911
I1231 05:14:14.150871 112226 solver.cpp:218] Iteration 7900 (847.181 iter/s, 0.118039s/100 iters), loss = 0.0050287
I1231 05:14:14.150908 112226 solver.cpp:237]     Train net output #0: loss = 0.00502888 (* 1 = 0.00502888 loss)
I1231 05:14:14.150919 112226 sgd_solver.cpp:105] Iteration 7900, lr = 0.0064619
I1231 05:14:14.266319 112226 solver.cpp:330] Iteration 8000, Testing net (#0)
I1231 05:14:14.313983 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:14.315620 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9915
I1231 05:14:14.315639 112226 solver.cpp:397]     Test net output #1: loss = 0.0284429 (* 1 = 0.0284429 loss)
I1231 05:14:14.316642 112226 solver.cpp:218] Iteration 8000 (603.39 iter/s, 0.16573s/100 iters), loss = 0.00472042
I1231 05:14:14.316660 112226 solver.cpp:237]     Train net output #0: loss = 0.0047206 (* 1 = 0.0047206 loss)
I1231 05:14:14.316665 112226 sgd_solver.cpp:105] Iteration 8000, lr = 0.00643496
I1231 05:14:14.432391 112226 solver.cpp:218] Iteration 8100 (864.125 iter/s, 0.115724s/100 iters), loss = 0.0119718
I1231 05:14:14.432417 112226 solver.cpp:237]     Train net output #0: loss = 0.0119719 (* 1 = 0.0119719 loss)
I1231 05:14:14.432425 112226 sgd_solver.cpp:105] Iteration 8100, lr = 0.00640827
I1231 05:14:14.548878 112226 solver.cpp:218] Iteration 8200 (858.704 iter/s, 0.116455s/100 iters), loss = 0.0115943
I1231 05:14:14.548904 112226 solver.cpp:237]     Train net output #0: loss = 0.0115945 (* 1 = 0.0115945 loss)
I1231 05:14:14.548912 112226 sgd_solver.cpp:105] Iteration 8200, lr = 0.00638185
I1231 05:14:14.663906 112226 solver.cpp:218] Iteration 8300 (869.602 iter/s, 0.114995s/100 iters), loss = 0.0245502
I1231 05:14:14.663933 112226 solver.cpp:237]     Train net output #0: loss = 0.0245504 (* 1 = 0.0245504 loss)
I1231 05:14:14.663938 112226 sgd_solver.cpp:105] Iteration 8300, lr = 0.00635567
I1231 05:14:14.779588 112226 solver.cpp:218] Iteration 8400 (864.683 iter/s, 0.115649s/100 iters), loss = 0.00964116
I1231 05:14:14.779618 112226 solver.cpp:237]     Train net output #0: loss = 0.00964134 (* 1 = 0.00964134 loss)
I1231 05:14:14.779628 112226 sgd_solver.cpp:105] Iteration 8400, lr = 0.00632975
I1231 05:14:14.818081 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:14.893966 112226 solver.cpp:330] Iteration 8500, Testing net (#0)
I1231 05:14:14.946905 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:14.947444 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9897
I1231 05:14:14.947468 112226 solver.cpp:397]     Test net output #1: loss = 0.0298499 (* 1 = 0.0298499 loss)
I1231 05:14:14.948550 112226 solver.cpp:218] Iteration 8500 (591.983 iter/s, 0.168924s/100 iters), loss = 0.00518011
I1231 05:14:14.948603 112226 solver.cpp:237]     Train net output #0: loss = 0.00518029 (* 1 = 0.00518029 loss)
I1231 05:14:14.948618 112226 sgd_solver.cpp:105] Iteration 8500, lr = 0.00630407
I1231 05:14:15.067173 112226 solver.cpp:218] Iteration 8600 (843.458 iter/s, 0.11856s/100 iters), loss = 0.00088433
I1231 05:14:15.067211 112226 solver.cpp:237]     Train net output #0: loss = 0.000884513 (* 1 = 0.000884513 loss)
I1231 05:14:15.067220 112226 sgd_solver.cpp:105] Iteration 8600, lr = 0.00627864
I1231 05:14:15.178897 112226 solver.cpp:218] Iteration 8700 (895.435 iter/s, 0.111678s/100 iters), loss = 0.0020867
I1231 05:14:15.178933 112226 solver.cpp:237]     Train net output #0: loss = 0.00208688 (* 1 = 0.00208688 loss)
I1231 05:14:15.178941 112226 sgd_solver.cpp:105] Iteration 8700, lr = 0.00625344
I1231 05:14:15.291426 112226 solver.cpp:218] Iteration 8800 (889.028 iter/s, 0.112482s/100 iters), loss = 0.00102185
I1231 05:14:15.291489 112226 solver.cpp:237]     Train net output #0: loss = 0.00102203 (* 1 = 0.00102203 loss)
I1231 05:14:15.291497 112226 sgd_solver.cpp:105] Iteration 8800, lr = 0.00622847
I1231 05:14:15.403802 112226 solver.cpp:218] Iteration 8900 (890.432 iter/s, 0.112305s/100 iters), loss = 0.0014528
I1231 05:14:15.403837 112226 solver.cpp:237]     Train net output #0: loss = 0.00145299 (* 1 = 0.00145299 loss)
I1231 05:14:15.403843 112226 sgd_solver.cpp:105] Iteration 8900, lr = 0.00620374
I1231 05:14:15.515713 112226 solver.cpp:330] Iteration 9000, Testing net (#0)
I1231 05:14:15.563016 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:15.563760 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9904
I1231 05:14:15.563781 112226 solver.cpp:397]     Test net output #1: loss = 0.0289717 (* 1 = 0.0289717 loss)
I1231 05:14:15.564855 112226 solver.cpp:218] Iteration 9000 (621.069 iter/s, 0.161013s/100 iters), loss = 0.0110505
I1231 05:14:15.564873 112226 solver.cpp:237]     Train net output #0: loss = 0.0110507 (* 1 = 0.0110507 loss)
I1231 05:14:15.564880 112226 sgd_solver.cpp:105] Iteration 9000, lr = 0.00617924
I1231 05:14:15.676730 112226 solver.cpp:218] Iteration 9100 (894.083 iter/s, 0.111846s/100 iters), loss = 0.00823897
I1231 05:14:15.676772 112226 solver.cpp:237]     Train net output #0: loss = 0.00823915 (* 1 = 0.00823915 loss)
I1231 05:14:15.676781 112226 sgd_solver.cpp:105] Iteration 9100, lr = 0.00615496
I1231 05:14:15.789703 112226 solver.cpp:218] Iteration 9200 (885.554 iter/s, 0.112924s/100 iters), loss = 0.00273166
I1231 05:14:15.789738 112226 solver.cpp:237]     Train net output #0: loss = 0.00273185 (* 1 = 0.00273185 loss)
I1231 05:14:15.789746 112226 sgd_solver.cpp:105] Iteration 9200, lr = 0.0061309
I1231 05:14:15.902246 112226 solver.cpp:218] Iteration 9300 (888.907 iter/s, 0.112498s/100 iters), loss = 0.00619383
I1231 05:14:15.902281 112226 solver.cpp:237]     Train net output #0: loss = 0.00619402 (* 1 = 0.00619402 loss)
I1231 05:14:15.902290 112226 sgd_solver.cpp:105] Iteration 9300, lr = 0.00610706
I1231 05:14:15.983361 112271 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:16.016964 112226 solver.cpp:218] Iteration 9400 (872.007 iter/s, 0.114678s/100 iters), loss = 0.0341899
I1231 05:14:16.016996 112226 solver.cpp:237]     Train net output #0: loss = 0.0341901 (* 1 = 0.0341901 loss)
I1231 05:14:16.017004 112226 sgd_solver.cpp:105] Iteration 9400, lr = 0.00608343
I1231 05:14:16.127959 112226 solver.cpp:330] Iteration 9500, Testing net (#0)
I1231 05:14:16.173899 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:16.174634 112226 solver.cpp:397]     Test net output #0: accuracy = 0.989
I1231 05:14:16.174654 112226 solver.cpp:397]     Test net output #1: loss = 0.0339525 (* 1 = 0.0339525 loss)
I1231 05:14:16.175726 112226 solver.cpp:218] Iteration 9500 (630.021 iter/s, 0.158725s/100 iters), loss = 0.00653742
I1231 05:14:16.175745 112226 solver.cpp:237]     Train net output #0: loss = 0.00653761 (* 1 = 0.00653761 loss)
I1231 05:14:16.175751 112226 sgd_solver.cpp:105] Iteration 9500, lr = 0.00606002
I1231 05:14:16.291050 112226 solver.cpp:218] Iteration 9600 (867.354 iter/s, 0.115293s/100 iters), loss = 0.00260782
I1231 05:14:16.291090 112226 solver.cpp:237]     Train net output #0: loss = 0.00260802 (* 1 = 0.00260802 loss)
I1231 05:14:16.291101 112226 sgd_solver.cpp:105] Iteration 9600, lr = 0.00603682
I1231 05:14:16.405395 112226 solver.cpp:218] Iteration 9700 (874.93 iter/s, 0.114295s/100 iters), loss = 0.00365551
I1231 05:14:16.405433 112226 solver.cpp:237]     Train net output #0: loss = 0.0036557 (* 1 = 0.0036557 loss)
I1231 05:14:16.405441 112226 sgd_solver.cpp:105] Iteration 9700, lr = 0.00601382
I1231 05:14:16.520783 112226 solver.cpp:218] Iteration 9800 (866.969 iter/s, 0.115344s/100 iters), loss = 0.013783
I1231 05:14:16.520808 112226 solver.cpp:237]     Train net output #0: loss = 0.0137832 (* 1 = 0.0137832 loss)
I1231 05:14:16.520841 112226 sgd_solver.cpp:105] Iteration 9800, lr = 0.00599102
I1231 05:14:16.636044 112226 solver.cpp:218] Iteration 9900 (867.875 iter/s, 0.115224s/100 iters), loss = 0.00643541
I1231 05:14:16.636071 112226 solver.cpp:237]     Train net output #0: loss = 0.00643561 (* 1 = 0.00643561 loss)
I1231 05:14:16.636078 112226 sgd_solver.cpp:105] Iteration 9900, lr = 0.00596843
I1231 05:14:16.748425 112226 solver.cpp:447] Snapshotting to binary proto file /home/zhaoys/myf/lenet/lenet_caffe/snapshot/train_iter_10000.caffemodel
I1231 05:14:16.754078 112226 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/zhaoys/myf/lenet/lenet_caffe/snapshot/train_iter_10000.solverstate
I1231 05:14:16.757302 112226 solver.cpp:310] Iteration 10000, loss = 0.00403373
I1231 05:14:16.757325 112226 solver.cpp:330] Iteration 10000, Testing net (#0)
I1231 05:14:16.807796 112273 data_layer.cpp:73] Restarting data prefetching from start.
I1231 05:14:16.808288 112226 solver.cpp:397]     Test net output #0: accuracy = 0.9913
I1231 05:14:16.808310 112226 solver.cpp:397]     Test net output #1: loss = 0.0287575 (* 1 = 0.0287575 loss)
I1231 05:14:16.808320 112226 solver.cpp:315] Optimization Done.
I1231 05:14:16.808333 112226 caffe.cpp:259] Optimization Done.

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值