2.simple_models

1.线性回归

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_data=np.linspace(-0.5,0.5,200)[:,np.newaxis]#生成200行1列的二维数据
noise=np.random.normal(0,0.02,x_data.shape)#200行1列的噪声数据
y_data=np.square(x_data)+noise
#1.定义placeholder数据x,y
x=tf.placeholder(tf.float32,[None,1])#(batch,数据维度)
y=tf.placeholder(tf.float32,[None,1])
#2.定义网络结构(输入1,中间层10,输出1)个神经元
weights_L1=tf.Variable(tf.random_normal([1,10]))
biases_L1=tf.Variable(tf.zeros([1,10]))
wx_plus_b_L1=tf.matmul(x,weights_L1)+biases_L1
L1=tf.nn.tanh(wx_plus_b_L1)
weights_L2=tf.Variable(tf.random_normal([10,1]))
biases_L2=tf.Variable(tf.zeros([1,1]))
wx_plus_b_L2=tf.matmul(L1,weights_L2)+biases_L2
prediction=tf.nn.tanh(wx_plus_b_L2)
#3.优化损失,更新参数
loss=tf.reduce_mean(tf.square(y-prediction))
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for _ in range(2000):
        sess.run(train_step,feed_dict={x:x_data,y:y_data})
    prediction_value=sess.run(prediction,feed_dict={x:x_data})
#画图
plt.figure()
plt.scatter(x_data,y_data)
plt.plot(x_data,prediction_value,'r-',lw=5)
plt.show()

在这里插入图片描述

2.mnist逻辑回归

batch_size=100
n_batch=mnist.train.num_examples//batch_size
#1.定义Placeholder
x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])
#2.网络结构(输入784,输出10)
w=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
prediction=tf.nn.softmax(tf.matmul(x,w)+b)
#3.代价函数优化模型,更新参数
loss=tf.reduce_mean(tf.square(y-prediction))
train_step=tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#4.定义准确率,用测试集检验模型
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(20):
        for batch in range(n_batch):
            batch_xs,batch_ys=mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
        acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        print('iter'+str(epoch)+' test acc: '+str(acc))

3.损失函数loss:

1.损失函数---------经典损失函数--------交叉熵:交叉熵刻画了两个概率分布之间的距离,它是分类问题中使用比较广的一种损失函数。通过q来表示p的交叉熵为:

在这里插入图片描述

Softmax将神经网络前向传播得到的结果变成概率分布,原始神经网络的输出被用作置信度来生成新的输出,而新的输出满足概率分布的所有要求。

交叉熵函数不是对称的,H(p,q)!=H(q,p),他刻画的是通过概率分布q来表达概率分布p的困难程度。因为正确答案是希望得到的结果,所以当交叉熵作为神经网络的损失函数是,p代表的是正确答案,q代表的是预测值。交叉熵刻画的是两个概率分布的距离,也就是说交叉熵值越小,两个概率分布越接近。tensorflow实现交叉熵代码:
在这里插入图片描述

其中y_代表正确结果,y代表预测结果。tf.clip_by_value()函数的意思是,小于1e-10的数全换成1e-10,大于1的数全换成1。tensorflow中*的意思是对应相同位置的数项乘,不是矩阵的乘法。

因为交叉熵一般会与softmax回归一起使用,所以tensorflow对这两个功能进行了统一封装:

在这里插入图片描述

通过这个命令就可以得到使用了Softmax回归之后的交叉熵。

在只有一个正确答案的分类问题中,tensorflow提供了tf.nn.sparse_softmax_cross_entropy_with_logits函数来进一步加速计算过程。
在这里插入图片描述

2.损失函数---------经典损失函数--------均方误差(MSE,mean squared error):

在这里插入图片描述

其中yi为一个batch中第i个数据的正确答案,而yi‘为神经网络给出的预测值。tensorflow实现代码:
在这里插入图片描述

3.损失函数---------自定义函数-----

tf.greater(A,B) 返回A>B的结果,布尔值

tf.select(C,A,B) C为真时(True),返回A值,为假(False)时返回B值。

这两个函数都是在元素级别进行

4.Dropout防止过拟合

1.定义Keep——prob,2.对神经层dropout

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
batch_size=100
n_batch=mnist.train.num_examples//batch_size
keep_prob=tf.placeholder(tf.float32)#(1.定义Keep——prob)
#1.定义Placeholder
x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])
#2.网络结构(输入784,中间2000,1000,输出10)
w1=tf.Variable(tf.truncated_normal([784,2000],stddev=0.1))
b1=tf.Variable(tf.zeros([2000])+0.1)
l1=tf.nn.tanh(tf.matmul(x,w1)+b1)
l1_drop=tf.nn.dropout(l1,keep_prob)#(2.对神经层dropout)

w2=tf.Variable(tf.truncated_normal([2000,1000],stddev=0.1))
b2=tf.Variable(tf.zeros([1000])+0.1)
l2=tf.nn.tanh(tf.matmul(l1_drop,w2)+b2)
l2_drop=tf.nn.dropout(l2,keep_prob)

w3=tf.Variable(tf.truncated_normal([1000,10],stddev=0.1))
b3=tf.Variable(tf.zeros([10])+0.1)
prediction=tf.nn.softmax(tf.matmul(l2_drop,w3)+b3)

#3.代价函数优化模型,更新参数
loss=tf.reduce_mean(tf.square(y-prediction))
train_step=tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#4.定义准确率,用测试集检验模型
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(20):
        for batch in range(n_batch):
            batch_xs,batch_ys=mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})
        acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
        print('iter'+str(epoch)+' test acc: '+str(acc))

5.Tensorboard:

1.网络模型结构信息:1.定义命名空间,2.定义filewriter
2.参数变量值信息:1参数概要,2传入变量,3合并所有的变量summary,4run merged,5将变量summary写入到filewriter

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
batch_size=100
n_batch=mnist.train.num_examples//batch_size
#2.1参数概要
def variable_summaries(var):
    with tf.name_scope('summaries'):
        with tf.name_scope('mean'):
            mean=tf.reduce_mean(var)
            tf.summary.scalar('mean',tf.reduce_mean(var))
        with tf.name_scope('stddev'):
            tf.summary.scalar('stddev',tf.sqrt(tf.reduce_mean(tf.square(var-mean))))
        with tf.name_scope('max'):
            tf.summary.scalar('max',tf.reduce_max(var))
        with tf.name_scope('min'):
            tf.summary.scalar('min',tf.reduce_min(var))
        with tf.name_scope('histogram'):
            tf.summary.histogram('histogram',var)
with tf.name_scope('input'):#(1.1定义命名空间)
    x=tf.placeholder(tf.float32,[None,784],name='x_input')
    y=tf.placeholder(tf.float32,[None,10],name='y_input')
with tf.name_scope('layer'):
    w=tf.Variable(tf.zeros([784,10]),name='w')
    variable_summaries(w)#2.2传入变量
    b=tf.Variable(tf.zeros([10]),name='b')
    variable_summaries(b)
    prediction=tf.nn.softmax(tf.matmul(x,w)+b,name='pred')
    
with tf.name_scope('loss'):
    loss=tf.reduce_mean(tf.square(y-prediction),name='loss')
    variable_summaries(loss)
with tf.name_scope('train'):
    train_step=tf.train.GradientDescentOptimizer(0.2).minimize(loss)
with tf.name_scope('accuracy'):
    with tf.name_scope('correct_pred'):
        correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
    with tf.name_scope('accuracy'):
        accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
        variable_summaries(accuracy)
merged=tf.summary.merge_all()#2.3合并所有的变量summary
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer=tf.summary.FileWriter('logs/',sess.graph)#(1.2定义filewriter)
    for epoch in range(20):
        for batch in range(n_batch):
            batch_xs,batch_ys=mnist.train.next_batch(batch_size)
            summary,_=sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys})#2.4run  merged
        writer.add_summary(summary,epoch)#2.5将变量summary写入到filewriter
        acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        print('iter'+str(epoch)+' test acc: '+str(acc))

6.CNN

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
batch_size=100
n_batch=mnist.train.num_examples//batch_size
keep_prob=tf.placeholder(tf.float32)
def weight_variable(shape):#权值初始化
    initial=tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(initial)
def bias_variable(shape):#偏置值初始化
    initial=tf.constant(0.1,shape=shape)
    return tf.Variable(initial)
def conv2d(x,w):#卷积层
    return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):#池化层
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
#定义输入数据placeholder
x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])
x_image=tf.reshape(x,[-1,28,28,1])
#第一层,输入28,28,1,输出14,14,32
w_conv1=weight_variable([5,5,1,32])#5x5的卷积核,出入通道1,输出通道32
b_conv1=bias_variable([32])#初始化第一个卷积层的权值和偏置值
h_conv1=tf.nn.relu(conv2d(x_image,w_conv1)+b_conv1)
h_pool1=max_pool_2x2(h_conv1)
#第二层,输入14,14,32,输出7,7,64
w_conv2=weight_variable([5,5,32,64])
b_conv2=bias_variable([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,w_conv2)+b_conv2)
h_pool2=max_pool_2x2(h_conv2)
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])#flatten
#全连接层
w_fc1=weight_variable([7*7*64,1024])
b_fc1=bias_variable([1024])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1)
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)
w_fc2=weight_variable([1024,10])
b_fc2=bias_variable([10])
prediction=tf.matmul(h_fc1_drop,w_fc2)+b_fc2
#损失,优化,准确率
cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
train_step=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction,1),tf.argmax(y,1)),tf.float32))
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(20):
        for batch in range(n_batch):
            batch_xs,batch_ys=mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})
        acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
        print('iter '+str(epoch)+'testing accuracy: '+str(acc))

7.RNN

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data/',one_hot=True)
n_inputs=28
max_time=28
lstm_size=100
n_classes=10
batch_size=50
n_batch=mnist.train.num_examples//batch_size
x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])
weights=tf.Variable(tf.truncated_normal([lstm_size,n_classes],stddev=0.1))
biases=tf.Variable(tf.constant(0.1,shape=[n_classes]))
def RNN(x,weights,biases):
    inputs=tf.reshape(x,[-1,max_time,n_inputs])
    lstm_cell= tf.nn.rnn_cell.BasicLSTMCell(lstm_size)
    outputs,final_state=tf.nn.dynamic_rnn(lstm_cell,inputs,dtype=tf.float32)
    results=tf.matmul(final_state[1],weights)+biases
    return results
prediction=RNN(x,weights,biases)
cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
train_step=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)),tf.float32))
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(5):
        for batch in range(n_batch):
            batch_xs,batch_ys=mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
        acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        print('iter '+str(epoch)+'testing accuracy: '+str(acc))

8.保存模型

1.创建saver对象,2.调用save方法

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
batch_size=100
n_batch=mnist.train.num_examples//batch_size
x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])
w=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
prediction=tf.nn.softmax(tf.matmul(x,w)+b)
loss=tf.reduce_mean(tf.square(y-prediction))
train_step=tf.train.GradientDescentOptimizer(0.2).minimize(loss)
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver=tf.train.Saver()#1.(创建saver对象)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(20):
        for batch in range(n_batch):
            batch_xs,batch_ys=mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
        acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        print('iter'+str(epoch)+' test acc: '+str(acc))
    saver.save(sess,'net/my_net.ckpt')#2.(调用save方法)

9.导入模型

1.创建saver对象;2.调用restoe方法

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
batch_size=100
n_batch=mnist.train.num_examples//batch_size
x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])
w=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
prediction=tf.nn.softmax(tf.matmul(x,w)+b)
loss=tf.reduce_mean(tf.square(y-prediction))
train_step=tf.train.GradientDescentOptimizer(0.2).minimize(loss)
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver=tf.train.Saver()#1.(创建saver对象)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
    saver.restore(sess,'net/my_net.ckpt')#2.(调用restoe方法)
    print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))

10.下载exception-v3模型

import tensorflow as tf
import os
import tarfile
import requests
#1.模型的下载地址
inception_pretrain_model_url='http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
#2.1.模型存放地址
inception_pretrain_model_dir='inception_model'
if not os.path.exists(inception_pretrain_model_dir):
    os.makedirs(inception_pretrain_model_dir)
#2.2模型结构图存放地址
log_dir='inception_log'
if not os.path.exists(log_dir):
    os.makedirs(log_dir)
#3.1获取模型文件名以及文件路径
filename=inception_pretrain_model_url.split('/')[-1]
filepath=os.path.join(inception_pretrain_model_dir,filename)
#3.2获取模型结构文件名以及路径,classify_image_graph_def.pb是google训练好的模型,将要写入到结构图文件夹下
inception_graph_def_file=os.path.join(inception_pretrain_model_dir,'classify_image_graph_def.pb')
#4.1下载模型,解压文件
if not os.path.exists(filepath):
    print('download:',filename)
    r=requests.get(inception_pretrain_model_url,stream=True)
    with open(filepath,'wb') as f:
        for chunk in r.iter_content(chunk_size=1024):
            if chunk:
                f.write(chunk)
print('finish:',filename)
tarfile.open(filepath,'r:gz').extractall(inception_pretrain_model_dir)
#4.2下载模型并保存结构图
with tf.Session() as sess:
    #创建一个图来存放google训练好的模型
    with tf.gfile.FastGFile(inception_graph_def_file,'rb') as f:
        graph_def=tf.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def,name='')
    #保存图结构
    writer=tf.summary.FileWriter(log_dir,sess.graph)
    writer.close()

11.利用inception网络对图像进行分类

import tensorflow as tf
import os
import numpy as np
import re
from PIL import Image
import matplotlib.pyplot as plt
class NodeLookup(object):#创建数字类别与文字类别的字典
    def __init__(self):
        label_lookup_path='inception_model/imagenet_2012_challenge_label_map_proto.pbtxt'
        uid_lookup_path='inception_model/imagenet_synset_to_human_label_map.txt'
        self.node_lookup=self.load(label_lookup_path,uid_lookup_path)#下载两个文件夹内容
    def load(self,label_lookup_path,uid_lookup_path):
        uid_to_human={}#编号:文字标签
        proto_as_ascii_lines=tf.gfile.GFile(uid_lookup_path).readlines()
        for line in proto_as_ascii_lines:#对第一个文本内容进行解析,
            uid=line.strip('\n').split('\t')[0]#编号
            human_string=line.strip('\n').split('\t')[1]#文字标签
            uid_to_human[uid]=human_string#编号:文字标签
        node_id_to_uid={}#类别序号:编号
        proto_as_ascii=tf.gfile.GFile(label_lookup_path).readlines()#对第二个文本内容进行解析,
        for line in proto_as_ascii:
            if line.startswith('  target_class:'):
                target_class=int(line.split(':')[1])#类别序号
            if line.startswith('  target_class_string:'):
                target_class_string=line.split(':')[1]#编号
                node_id_to_uid[target_class]=target_class_string[2:-2]#类别序号:编号
        node_id_to_name={}#类别序号:文字标签
        for key,val in node_id_to_uid.items():
            name=uid_to_human[val]
            node_id_to_name[key]=name#类别序号:文字标签
        return node_id_to_name
    def id_to_string(self,node_id):
        if node_id not in self.node_lookup:
            return ''
        return self.node_lookup[node_id]
#创建一个图用来存放google训练好的模型
with tf.gfile.FastGFile('inception_model/classify_image_graph_def.pb','rb') as f:
    graph_def=tf.GraphDef()
    graph_def.ParseFromString(f.read())
    tf.import_graph_def(graph_def,name='')
with tf.Session() as sess:
    softmax_tensor=sess.graph.get_tensor_by_name('softmax:0')#模型的输出层起个名字
    for root,dirs,files in os.walk('images/'):#遍历此文件夹下的所有文件
        for file in files:
            image_data=tf.gfile.FastGFile(os.path.join(root,file),'rb').read()
            predictions=sess.run(softmax_tensor,{'DecodeJpeg/contents:0':image_data})#预测
            predictions=np.squeeze(predictions)#把结果转化成一维的
            image_path=os.path.join(root,file)
            print('image_path')
            img=Image.open(image_path)
            plt.imshow(img)
            plt.axis('off')
            plt.show()
            top_k=predictions.argsort()[-5:][::-1]
            node_lookup=NodeLookup()
            for node_id in top_k:
                human_string=node_lookup.id_to_string(node_id)
                score=predictions[node_id]
                print('%s (score=%.5f)\n'%(human_string,score))

12.多任务,文件转换成tfrecord格式

#1,生成验证码
from captcha.image import ImageCaptcha
import numpy as np
from PIL import Image
import random
import sys
number=['0','1','2','3','4','5','6','7','8','9']
def random_captcha_text(char_set=number,captcha_size=4):
    captcha_text=[]
    for i in range(captcha_size):
        c=random.choice(char_set)
        captcha_text.append(c)
    return captcha_text
def gen_captch_text_and_image():
    captcha_text=random_captcha_text()
    captcha_text=''.join(captcha_text)#列表字符拼接成字符串
    image=ImageCaptcha()
    captcha=image.generate(captcha_text)
    image.write(captcha_text,'captcha/images/'+captcha_text+'.jpg')
num=1000
if __name__=='__main__':
    for i in range(num):
        gen_captch_text_and_image()
        sys.stdout.write('\r>> Creating image %d/%d'%(i+1,num))
        sys.stdout.flush()
    sys.stdout.write('\n')
    sys.stdout.flush()
    print('生成完毕')
import os
import tensorflow as tf
#2.将图片文件转换成tfrecord文件格式
_NUM_TEST=50
_RANDOM_SEED=0
DATASET_DIR='D:/DeepLearning/Github小项目/0.studay/5.tensorflow/captcha/images/'
TFRECORD_DIR='D:/DeepLearning/Github小项目/0.studay/5.tensorflow/captcha/'
def _dataset_exists(dataset_dir):
    for split_name in ['train','test']:
        output_filename=os.path.join(dataset_dir,split_name+'.tfrecords')
        if not tf.gfile.Exists(output_filename):
            return False
    return True
def _get_filenames_and_classes(dataset_dir):
    photo_filenames=[]
    for filename in os.listdir(dataset_dir):
        path=os.path.join(dataset_dir,filename)
        photo_filenames.append(path)
    return photo_filenames
def int64_feature(values):
    if not isinstance(values,(tuple,list)):
        values=[values]
    return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def image_to_tfexample(image_data,label0,label1,label2,label3):
    return tf.train.Example(features=tf.train.Features(feature={'image':bytes_feature(image_data),
                                                               'label0':int64_feature(label0),
                                                               'label1':int64_feature(label1),
                                                               'label2':int64_feature(label2),
                                                               'label3':int64_feature(label3)}))
def _convert_dataset(split_name,filenames,dataset_dir):
    assert split_name in ['train','test']
    with tf.Session() as sess:
        output_filename=os.path.join(TFRECORD_DIR,split_name+'.tfrecords')
        with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
            for i,filename in enumerate(filenames):
                try:
                    sys.stdout.write('\r>>Converting iamge %d/%d'%(i+1,len(filenames)))
                    sys.stdout.flush()
                    image_data=Image.open(filename)
                    image_data=image_data.resize((224,224))
                    image_data=np.array(image_data.convert('L'))#转换成黑白图像
                    image_data=image_data.tobytes()
                    labels=filename.split('/')[-1][0:4]
                    num_labels=[]
                    for j in range(4):
                        num_labels.append(int(labels[j]))
                    example=image_to_tfexample(image_data,num_labels[0],num_labels[1],num_labels[2],num_labels[3])
                    tfrecord_writer.write(example.SerializeToString())
                except IOError as e:
                    print('could not read:',filename)
                    print('error:',e)
                    print('skip it \n')
    sys.stdout.write('\n')
    sys.stdout.flush()

if _dataset_exists(TFRECORD_DIR):
    print('tfrecode文件已存在')
else:
    photo_filenames=_get_filenames_and_classes(DATASET_DIR)
    random.seed(_RANDOM_SEED)
    random.shuffle(photo_filenames)
    training_filenames=photo_filenames[_NUM_TEST:]
    testing_filenames=photo_filenames[:_NUM_TEST]
    _convert_dataset('train',training_filenames,DATASET_DIR)
    _convert_dataset('test',testing_filenames,DATASET_DIR)
    print('生成tfrecode文件')
  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

我是小z呀

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值