如何训练自己的图片模型-基于inception模型(未完成)

一共有三种方法来训练自己的图片模型:

  1. 拿到数据集和准备好的代码,从头训练。(需要数据集集大,训练时间长)
  2. 冰冻-迁移学习(本文要练习的)
  3. 和迁移学习类似,只是训练好的参数会当初始值参与训练,并且学习率调的很低

迁移学习-基于inception模型的迁移学习

第一步是下载tesorflow教程代码github.com/tensorflow/…

第二步是下载要训练的数据集www.robots.ox.ac.uk/~vgg/data/(我下载了5个分类)

然后利用参数设置之后进行训练: 编写了一个windows里的批处理文件(bat)

python E:/tf2/tensorflow-master/tensorflow/examples/image_retraining/retrain.py ^
--bottleneck_dir bottleneck ^
--how_many_traing_steps 200 ^
--model_dir E:/tf2/inception_model/ ^
--output_graph output_graph.pb ^
--image_dir E:/tf2/retrain/data
pause
复制代码

训练完之后得到一个。pb格式的图流,bottleneck文件夹中是计算训练集得到的数据。

在另一个通根目录下的tmp文件夹中一个tensorboard中的,一个是保存好的模型。

设计检测模型的检测是否友好

使用类似于juejin.im/post/5aa249…链接中使用训练好的模型对涂片进行分类,代码如下:

import tensorflow as tf
import os
import numpy as np
import re
from PIL import Image
import matplotlib.pyplot as plt


lines = tf.gfile.GFile('retrain/output_labels.txt').readlines()
uid_to_human = {}
#一行一行读取数据
for uid, line in enumerate(lines):
    #去掉换行符
    line = line.strip('\n')
    uid_to_human[uid] = line

def id_to_string(node_id):    
    if node_id not in uid_to_human:
        return ''
    return uid_to_human[node_id]


#创建一个图来存放google训练好的模型  #2 load graph
with tf.gfile.FastGFile('retrain/output_graph.pb', 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    tf.import_graph_def(graph_def, name='')

with tf.Session() as sess:
    softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
    #遍历目录
    for root, dirs, files in os.walk('retrain/images/'):
        for file in files:
            #载入图片
            image_data = tf.gfile.FastGFile(os.path.join(root,file), 'rb').read()
            predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})#图片格式是jpg格式
            predictions = np.squeeze(predictions)#把结果转为1维

            #打印图片路径及名称
            image_path = os.path.join(root,file)
            print(image_path)
            #显示图片
            img = Image.open(image_path)
            plt.imshow(img)
            plt.axis('off')
            plt.show()

            #排序
            top_k = predictions.argsort()[::-1]
            print(top_k)
            for node_id in top_k:
                #获取分类名称
                human_string = id_to_string(node_id)
                #获取该分类的置信度
                score = predictions[node_id]
                print('%s (score = %.5f)' % (human_string, score))
            print()

复制代码

结果如下: (我偷懒在训练集拿的图片,所以准确度很高)

retrain/images/105806915_a9c13e2106_n.jpg
[0 1 3 4 2]
daisy (score = 0.99892)
dandelion (score = 0.00098)
sunflowers (score = 0.00006)
tulips (score = 0.00003)
roses (score = 0.00000)

retrain/images/107592979_aaa9cdfe78_m.jpg
[0 3 1 4 2]
daisy (score = 0.90189)
sunflowers (score = 0.08454)
dandelion (score = 0.00644)
tulips (score = 0.00542)
roses (score = 0.00170)

retrain/images/10791227_7168491604.jpg
[4 2 3 0 1]
tulips (score = 0.72338)
roses (score = 0.24626)
sunflowers (score = 0.02642)
daisy (score = 0.00245)
dandelion (score = 0.00150)

retrain/images/11746080_963537acdc.jpg
[4 2 0 1 3]
tulips (score = 0.93767)
roses (score = 0.05531)
daisy (score = 0.00390)
dandelion (score = 0.00241)
sunflowers (score = 0.00072)
复制代码

拿到数据集和准备好的代码,从头训练

  1. 将图片转为tfrecord文件

import tensorflow as tf
import os
import random
import math
import sys

#验证集数量
_NUM_TEST = 500
#随机种子
_RANDOM_SEED = 0
#数据块
_NUM_SHARDS = 5
#数据集路径
DATASET_DIR = "E:/tf2/slim/images/"
#标签文件名
LABELS_FILENAME = "E:/tf2/slim/images/labels.txt"

#定义tfrecord文件的路径+名字
def _get_dataset_filename(dataset_dir, split_name, shard_id):
	output_filename = 'image_%s_%05d-of-%05d.tfrecord' % (split_name, shard_id, _NUM_SHARDS)
	return os.path.join(dataset_dir,output_filename)
	
#判断tfrecord文件是不是存在
def _dataset_exists(dataset_dir):
	for split_name in ['train', 'test']:
		for shard_id in range(_NUM_SHARDS):
			#定义tfrecord文件的路径+名称
			output_filename = _get_dataset_filename(dataset_dir, split_name, shard_id)
		if not tf.gfile.Exists(output_filename):
			return False
	return True
	
#获取所有文件以及分类
def _get_filenames_and_classes(dataset_dir):
	#数据目录
	directories = []
	#分类名称
	class_names = []
	for filename in os.listdir(dataset_dir):
		#合并文件路径
		path = os.path.join(dataset_dir, filename)
		#判断该路劲是不是目录
		if os.path.isdir(path):
			#加入数据目录
			directories.append(path)
			#加入类别名称
			class_names.append(filename)
			
	photo_filenames = []
	#循环每个分类的文件夹
	for directory in directories:
		for filename in directory:
			path = os.path.join(directory, filename)
			#把图片加入图片列表
			photo_filenames.append(path)
	
	return photo_filenames, class_names

def int64_feature(values):
	if not isinstance(values, (tuple, list)):
		values = [values]
	return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
	
def bytes_feature(values):
	return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
	
def image_to_tfexample(image_data, image_format, class_id):
	return tf.train.Example(features=tf.train.Features(feature={
	'image/encoded': bytes_feature(image_data),
	'image/format': bytes_feature(image_format),
	'image/class/label': int64_feature(class_id),
	}))
	
def write_label_file(labels_to_class_names, dataset_dir, filename=LABELS_FILENAME):
	labels_filename = os.path.join(dataset_dir, filename)
	with tf.gfile.Open(labels_filename, 'w') as f:
		for label in labels_to_class_names:
			class_name = labels_to_class_names[label]
			f.write('%d:%s\n' % (label, class_name))

#把数据转化为tfrecord格式			
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
	assert split_name in ['train', 'test']
	#计算每个数据块有多少数据
	num_per_shard = int(len(filenames) / _NUM_SHARDS)
	with tf.Graph().as_default():
		with tf.Session() as sess:
			for shard_id in range(_NUM_SHARDS):
				#定义tfrecord文件的路径
				output_filename = _get_dataset_filename(dataset_dir, split_name, shard_id)
				with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
					#每个数据块开始/结束的位置
					start_ndx = shard_id * num_per_shard
					end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
					for i in range(start_ndx, end_ndx):
						try:
							sys.stdout.write('\r>> Converring image %d/%d shard %d' % (i+1, len(filenames), shard_id))
							sys.stdout.flush()
							#读取图片
							image_data = tf.gfile.FastGFile(filenames[i], 'r').read()
							#获得图片的类别名称
							class_name = os.path.basename(os.path.dirname(filenames[i]))
							#找到类别对应的id
							class_id = class_names_to_ids[class_name]
							#生成tfrecord文件
							example = image_to_tfexample(image_data, b'jpg',class_id)
							tfrecord_writer.write(example.SerializeToString())
						except IOError as e:
							print("could not read: ", filenames[i])
							print("error: ", e)
							print("skip it \n")
							
	sys.stdout.write('\n')
	sys.stdout.flush()
					



if __name__ == '__main__':
	#判断tfrecord文件是不是存在
	if _dataset_exists(DATASET_DIR):
		print("tfrecord文件已存在")
	else:
		#获得所有图片以及分类
		photo_filenames, class_names = _get_filenames_and_classes(DATASET_DIR)
		#把分类转为字典格式,类似于{‘house’: 3, 'flower': 1, .....}
		class_names_to_ids = dict(zip(class_names, range(len(class_names))))
		
		#吧数据切分为训练集和测试集
		random.seed(_RANDOM_SEED)
		random.shuffle(photo_filenames)
		training_filenames = photo_filenames[_NUM_TEST:]
		testing_filenames = photo_filenames[:_NUM_TEST]
		
		#数据转化
		_convert_dataset('train', training_filenames, class_names_to_ids, DATASET_DIR)
		_convert_dataset('test', testing_filenames, class_names_to_ids,DATASET_DIR)
		
		#输出Labels文件
		labels_to_class_name = dict(zip(range(len(class_names)), class_names))
		write_label_file(labels_to_class_name, DATASET_DIR)
复制代码

转载于:https://juejin.im/post/5aa38747f265da23906b9ff4

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值