基础网络模块
import tensorflow as tf
import tensorflow.contrib.slim as slim
import collections
from datetime import datetime
import time
import math
from tensorflow.contrib.layers.python.layers import utils
#定义Block类
class Block(collections.namedtuple('Block',['scope','unit_fn','args'])):
"""
定义一个Block类,有三个属性:
'scope': 命名空间
'unit_fn':单元函数
'args':参数
"""
#定义相关函数
#定义降采样方法
def subsample(inputs,factor,scope=None):
"""
inputs: 输入数据
factor: 采样因子,stride步长
scope: 作用域
:return:
"""
if factor==1:
return inputs
else:
#注意此处的卷积核大小为1
return slim.max_pool2d(inputs,kernel_size=[1,1],stride=factor,scope=scope)
#定义卷积操作方法
def conv2d_same(inputs,num_outputs,kernel_size,stride,scope=None):
"""
num_outputs:卷积核的数量
paddding:卷积的方式
"""
if stride==1:
return slim.conv2d(inputs,num_outputs,kernel_size,stride=1,padding='SAME',scope=scope)
else:
#修整Inputs,对inputs进行补零操作
padding_total=kernel_size-1
padding_beg=padding_total
padding_end=padding_total-padding_beg
inputs=tf.pad(inputs,[[0,0],[padding_beg,padding_end],[padding_beg,padding_end],[0,0]])
return slim.conv2d(inputs,num_outputs,kernel_size,stride=stride,padding='VALID',scope=scope)
#定义堆叠block方法
#架构堆叠 unit-block-net ,命名+参数设置
@slim.add_arg_scope
def stack_block_dense(net,blocks,outputs_collections=None):
"""
net:input
blocks:Block的class列表
outputs_collections:收集各个end_points的collections
"""
for block in blocks:
#双层循环,遍历blocks,遍历res unit堆叠
with tf.variable_scope(block.scope,'block',[net]) as sc:
#用两个tf.variable_scope将残差学习单元命名为block1/unit_1的形式
for i ,unit in enumerate(block.args):
with tf.variable_scope('unit_%d'%(i+1),values=[net]):
#利用第二层循环拿到block中的args,将其展开为depth,depth_bottleneck,strdie
unit_depth,unit_depth_bottleneck,unit_stride=unit
#使用残差学习单元的生成函数unit_fn,顺序的创建并连接所有的残差学习单元
net=block.unit_fn(net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=unit_stride)
#使用utils.collect_named_outputs将输出net添加到collection中
net=utils.collect_named_outputs(outputs_collections,sc.name,net)
return net
#定义resnet参数
def resnet_arg_scope(is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.97,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
#创建resnet通用的arg_scope
batch_norm_params={
'is_training':is_training,
'decay':batch_norm_decay,
'epsilon':batch_norm_epsilon,
'scale':batch_norm_scale,
'updates_collections':tf.GraphKeys.UPDATE_OPS }
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params ):
with slim.arg_scope([slim.batch_norm],**batch_norm_params):
with slim.arg_scope([slim.max_pool2d],padding='SAME') as arg_sc:
return arg_sc
#定义瓶颈函数(核心方法)
@slim.add_arg_scope
def bottleneck(inputs,depth,depth_bottleneck,stride,outputs_collections=None,scope=None):
with tf.variable_scope(scope,'bottleneck_v2',[inputs]) as sc:
#获取输入的最后一个维度,输出通道数
depth_in=utils.last_dimension(inputs.get_shape(),min_rank=4)
#对输入进行batch_borm,接着用relu进行预激活
preact=slim.batch_norm(inputs,activation_fn=tf.nn.relu,scope='preact')
if depth==depth_in:
#如果残差单元输入通道和输出通道数一样,就对inputs进行降采样
shortcut=subsample(inputs,stride,'shortcut')
else:
#如果残差单元输入通道与输出通道数不一样,就使用stride步长的1*1卷积改变其通道数,是的输入通道和输出通道数一样
shortcut=slim.conv2d(preact,depth,[1,1],stride=stride,normalizer_fn=None,activation_fn=None,scope='shortcut')
#定义残差
#第一步:1*1,stride=1,输出通道数为depth_bottleneck的卷积
#第二步:3*3,stride=stride,输出通道数为depth_bottleneck的卷积
#第三步:1*1,stride=1,输出通道数为depth的卷积
residual=slim.conv2d(preact,depth_bottleneck,[1,1],stride=1,scope='conv1')
residual=slim.conv2d(residual,depth_bottleneck,[3,3],stride=stride,scope='conv2')
residual=slim.conv2d(residual,depth,[1,1],stride=1,scope='conv3')
output=shortcut+residual
#将结果添加到outputs_collections
return utils.collect_named_outputs(outputs_collections,sc.name,output)
#定义生成resnet_v2的主函数
def resnet_v2(inputs,
blocks,
num_classes=None,
global_pool=True,
include_root_block=True,
reuse=None,
scope=None):
with tf.variable_scope(scope,'resnet_v2',[inputs],reuse=reuse) as sc:
end_point_collections=sc.original_name_scope+'_end_points'
#用slim.arg_scope将slim.conv2d bottleneck stack_blocks_dense 3个函数的参数outputs_collections设置为end_point_collections
with slim.arg_scope([slim.conv2d,bottleneck,stack_block_dense],outputs_collections=end_point_collections):
net=inputs
if include_root_block:
#根据include_root_block标记,创建resnet最前面一层的卷积神经网络
with slim.arg_scope([slim.conv2d],activation_fn=None,normalizer_fn=None):
net=conv2d_same(net,64,7,stride=2,scope='conv1')
net=slim.max_pool2d(net,[3,3],stride=2,scope='pool1')
#利用stack_blocks_dense将残差学习模块完成
net=stack_block_dense(net,blocks)
net=slim.batch_norm(net,activation_fn=tf.nn.relu,scope='postnorm')
if global_pool:
#根据标记添加平均池化层
net=tf.reduce_mean(net,[1,2],name='pool5',keep_dims=True)
if num_classes is not None:
#根据是否有分类数,添加一个输出通道为num_classes的1*1卷积
net=slim.conv2d(net,num_classes,[1,1],activation_fn=None,normalizer_fn=None,scope='logits')
#utils.convert_collection_to_dict将collection转化为dict
end_points=utils.convert_collection_to_dict(end_point_collections)
if num_classes is not None:
#添加一个softmax输出层
end_points['prediction']=slim.softmax(net,scope='prediction')
return net,end_points
#定义resnet_v2_50的生成方法
def resnet_v2_50(inputs,
num_classes=None,
global_pool=True,
reuse=None,
scope='resnet_v2_50'
):
#设计50层的resnet
#四个blocks的units数量为3、4、6、3,总层数为(3+4+6+3)*3+2=50
#钱3个blocks包含步长为2的层,总尺寸244/(4*2*2*2)=7 输出通道变为2048
blocks=[
Block('block1',bottleneck,[(256,64,1)]*2+[(256,64,5)]),
Block('block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
Block('block3', bottleneck, [(1024, 256, 1) ]* 5 + [(1024, 256, 2)]),
Block('block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v2(inputs,blocks,num_classes,global_pool,include_root_block=True,reuse=reuse,scope=scope)
#定义resnet_v2_101的生成方法
def resnet_v2_101(inputs,
num_classes=None,
global_pool=True,
reuse=None,
scope='resnet_v2_50'
):
#设计50层的resnet
#四个blocks的units数量为3、4、23、3,总层数为(3+4+23+3)*3+2=101
#钱3个blocks包含步长为2的层,总尺寸244/(4*2*2*2)=7 输出通道变为2048
blocks = [
Block('block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 5)]),
Block('block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
Block('block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),
Block('block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v2(inputs,blocks,num_classes,global_pool,include_root_block=True,reuse=reuse,scope=scope)
#定义测试运行时间函数
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(num_batches+num_steps_burn_in):
start_time = time.time()
_ = session.run(target)
duration = time.time()-start_time
if i >= num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' %(datetime.now(), i-num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration*duration
mn = total_duration/num_batches
vr = total_duration_squared/num_batches-mn*mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %3.3f sec/batch' %(datetime.now(), info_string, num_batches, mn, sd))
if __name__=='__main__':
batch_size = 32
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net, end_points = resnet_v2_50(inputs, 1000)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
num_batches = 100
time_tensorflow_run(sess, net, 'Forward')
图像增强
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from PIL import Image
import os
#定义pic_path 照片路径
#定义augmentation_path 结果存放路径
pic_path = r'./3ac79f3df8dcd100755525327e8b4710b8122fdc.jpg'
augmentation_path = r'./data_augmentation'
#定义data_gen图像生成器
"""
rotation_range:随机旋转角度的范围
width(height)_shift_range:随机移动范围
zoom_range:随机缩放范围
fill_mode:填充模式(填充多出的新模式)
"""
data_gen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
fill_mode='nearest')
img = load_img(pic_path)
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
n = 1
#生成六张增强的照片
for batch in data_gen.flow(x, batch_size=1, save_to_dir=augmentation_path, save_prefix='train', save_format='jpeg'):
n += 1
if n > 6:
break
#大小归一化
img_path = r'./data_augmentation'
resize_path = r'./resize_image'
#采用os进行读取
for i in os.listdir(img_path):
im = Image.open(os.path.join(img_path,i))
out = im.resize((224, 224))
if not os.path.exists(resize_path):
os.makedirs(resize_path)
out.save(os.path.join(resize_path, i))
tfrecord格式图片的生成
import os
from PIL import Image
import tensorflow as tf
#标签文件夹地址-标签文件夹内照片地址
cwd = r"./brand_picture/" # 图片路径,两组标签都在该目录下
file_path = r"./" # tfrecord文件保存路径
bestnum = 1000 # 每个tfrecord存放图片个数
num = 0 # 第几个图片
recordfilenum = 0 # 第几个TFRecord文件
classes = [] # 将labels放入到classes中(所有文件和文件夹名字)
for i in os.listdir(cwd):
classes.append(i) # 1,2标签组
#tfrecords格式文件名
ftrecordfilename = ("traindata_63.tfrecords-%.3d" % recordfilenum)
#设置一个读的类,此处说明存储地址
writer = tf.python_io.TFRecordWriter(os.path.join(file_path, ftrecordfilename))
for index, name in enumerate(classes):
class_path = os.path.join(cwd, name) #进入到1的文件夹内部地址
for img_name in os.listdir(class_path):
num = num + 1
if num > bestnum: #超过1000,写入下一个tfrecord
num = 1
recordfilenum += 1
ftrecordfilename = ("traindata_63.tfrecords-%.3d" % recordfilenum)
writer = tf.python_io.TFRecordWriter(os.path.join(file_path, ftrecordfilename))
img_path = os.path.join(class_path, img_name) # 每一个图片的地址
img = Image.open(img_path, 'r')
img_raw = img.tobytes() # 将图片转化为二进制格式
"""
example有Features和SerializeToString属性
Features:存在属性feature,
feature:传入字典
tf.train.Feature:将数据转换
tf.train.Int64List(value=[index])二进制数据的存储
tf.train.BytesList(value=[img_raw])其他类型数据的存储
SerializeToString:将对象序列转换为字符串序列
"""
example = tf.train.Example(
features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
}))
writer.write(example.SerializeToString()) # 序列化为字符串,同时写道对应的地址处
writer.close()
tfrecord格式高效读取数据
import tensorflow as tf
from PIL import Image
import matplotlib.pyplot as plt
def read_and_decode_tfrecord(filename):
filename_deque = tf.train.string_input_producer(filename)#输出字符串到输入管道序列
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_deque)
features = tf.parse_single_example(serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string)})
label = tf.cast(features['label'], tf.int32)
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [224, 224, 3])
# img = tf.cast(img, tf.float32) / 255.0 #将矩阵归一化0-1之间
return img, label
train_list = ['traindata_63.tfrecords-000']
img, label = read_and_decode_tfrecord(train_list)
#控制每次从管道中提取的tensor数量
img_batch, label_batch = tf.train.batch([img, label], num_threads=2, batch_size=2, capacity=1000)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 创建一个协调器,管理线程
coord = tf.train.Coordinator()
# 启动QueueRunner,此时文件名队列已经进队
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
while True:
b_image, b_label = sess.run([img_batch, label_batch])
b_image = Image.fromarray(b_image[0])
plt.imshow(b_image)
plt.axis('off')
plt.show()
coord.request_stop()
# 其他所有线程关闭之后,这一函数才能返回
coord.join(threads)
训练
import tensorflow as tf
import tensorflow.contrib.slim.nets as nets
def read_and_decode_tfrecord(filename):
filename_deque = tf.train.string_input_producer(filename)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_deque)
features = tf.parse_single_example(serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string)})
label = tf.cast(features['label'], tf.int32)
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [224, 224, 3])
img = tf.cast(img, tf.float32) / 255.0 #将矩阵归一化0-1之间
return img, label
save_dir = r"F:/ResNET2/ResNet/train_image_63.model"
batch_size_ = 2
lr = tf.Variable(0.0001, dtype=tf.float32)
x = tf.placeholder(tf.float32, [None, 224, 224, 3])
y_ = tf.placeholder(tf.float32, [None])
train_list = ['traindata_63.tfrecords-005','traindata_63.tfrecords-006', 'traindata_63.tfrecords-007', 'traindata_63.tfrecords-008',
'traindata_63.tfrecords-009', 'traindata_63.tfrecords-010', 'traindata_63.tfrecords-011',
'traindata_63.tfrecords-012', 'traindata_63.tfrecords-013', 'traindata_63.tfrecords-014',
'traindata_63.tfrecords-015', 'traindata_63.tfrecords-016', 'traindata_63.tfrecords-017',
'traindata_63.tfrecords-018', 'traindata_63.tfrecords-019', 'traindata_63.tfrecords-020',
'traindata_63.tfrecords-021']
# 随机打乱顺序shuffle_batch
img, label = read_and_decode_tfrecord(train_list)
img_batch, label_batch = tf.train.shuffle_batch([img, label], num_threads=2, batch_size=batch_size_, capacity=10000,
min_after_dequeue=9900)
"""
num_threads:线程数
capacity:队列的容量,在这里设置成10000
min_after_dequeue:队列里保留的最小数据量,控制随机的程度,设置成9900的意思是,当
队列中的数据出列100个,剩下9900个的时候,就要重新补充100个数据进来并打乱顺序
"""
# 将label值进行onehot编码,100类
# 其中is_training表示网络中的固有参数是否要更新
one_hot_labels = tf.one_hot(indices=tf.cast(y_, tf.int32), depth=100)
pred, end_points = nets.resnet_v2.resnet_v2_50(x, num_classes=100, is_training=True)
pred = tf.reshape(pred, shape=[-1, 100])
# 定义损失函数和优化器
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=one_hot_labels))
optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
# 准确度
a = tf.argmax(pred, 1)
b = tf.argmax(one_hot_labels, 1)
correct_pred = tf.equal(a, b)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 创建一个协调器,管理线程
coord = tf.train.Coordinator()
# 启动QueueRunner,此时文件名队列已经进队
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
i = 0
while True:
i += 1
b_image, b_label = sess.run([img_batch, label_batch])
_, loss_, y_t, y_p, a_, b_ = sess.run([optimizer, loss, one_hot_labels, pred, a, b], feed_dict={x: b_image,
y_: b_label})
print('step: {}, train_loss: {}'.format(i, loss_))
if i % 20 == 0:
_loss, acc_train = sess.run([loss, accuracy], feed_dict={x: b_image, y_: b_label})
print('--------------------------------------------------------')
print('step: {} train_acc: {} loss: {}'.format(i, acc_train, _loss))
print('--------------------------------------------------------')
if i == 200000:
saver.save(sess, save_dir, global_step=i)
elif i == 300000:
saver.save(sess, save_dir, global_step=i)
elif i == 400000:
saver.save(sess, save_dir, global_step=i)
break
coord.request_stop()
# 其他所有线程关闭之后,这一函数才能返回
coord.join(threads)
测试
import tensorflow as tf
import tensorflow.contrib.slim.nets as nets
from PIL import Image
import os
test_dir = r'F:/ResNET2/ResNet/test' # 原始的test文件夹,含带预测的图片
model_dir = r'F:/ResNET2/ResNet/train_image_63.model-300000' # 模型地址
test_txt_dir = r'F:/ResNET2/ResNet/test.txt' # 原始的test.txt文件
result_dir = r'F:/ResNET2/ResNet/result.txt' # 生成输出结果
x = tf.placeholder(tf.float32, [None, 224, 224, 3])
classes = ['1', '10', '100', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24',
'25', '26', '27', '28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40',
'41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57',
'58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73',
'74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99'] # 标签顺序
pred, end_points = nets.resnet_v2.resnet_v2_50(x, num_classes=100, is_training=True)
pred = tf.reshape(pred, shape=[-1, 100])
a = tf.argmax(pred, 1)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, model_dir)
with open(test_txt_dir, 'r') as f:
data = f.readlines()
for i in data:
test_name = i.split()[0]
for pic in os.listdir(test_dir):
if pic == test_name:
img_path = os.path.join(test_dir, pic)
img = Image.open(img_path)
img = img.resize((224, 224))
img = tf.reshape(img, [1, 224, 224, 3])
img1 = tf.reshape(img, [1, 224, 224, 3])
img = tf.cast(img, tf.float32) / 255.0
b_image, b_image_raw = sess.run([img, img1])
t_label = sess.run(a, feed_dict={x: b_image})
index_ = t_label[0]
predict = classes[index_]
with open(result_dir, 'a') as f1:
print(test_name, predict, file=f1)
break