import cv2
#正则匹配使用:
import re
import os
#此库用于拷贝,删除,移动,复制以及解压缩
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import resnet
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Input,GlobalAvgPool2D,Dense,Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint,TensorBoard,EarlyStopping
import matplotlib.pyplot as plt
dataAll=ImageDataGenerator(validation_split=0.3)
genIteratorForTrain=dataAll.flow_from_directory("D:/17flowers",batch_size=128,subset="training",class_mode='sparse')
iterator=genIteratorForTrain.next()
#结果是(128, 256, 256, 3)
#print(iterator[0].shape)
#print(iterator[1].shape)
mydataSet=tf.data.Dataset.from_tensor_slices((iterator[0],iterator[1]))
for i in mydataSet.as_numpy_iterator():
#(256, 256, 3)
print(i[0].shape)
break;
#设置数据产生的批次大小:
mydataSet2=mydataSet.batch(32)
for i in mydataSet2.as_numpy_iterator():
#(32, 256, 256, 3)
print(i[0].shape)
break;
#当dataset为元组形式,mymap接收两大参数,当遍历数据集取出元素时,自动用map进行处理
def mymap(obj,obj2):
print("shape:")
#(256, 256, 3)
print(obj.shape)
#()
print(obj2.shape)
return (obj,obj2)
#事先使用批次后,map1批次的数据:此时新数据集中的每个元素已改变
mydataSet2=mydataSet2.map(mymap)
#17花朵分类的输入形状为(256, 256, 3)
shape_in=genIteratorForTrain.next()[0].shape[1:]
model=Dense(100)
#定义一个输入层:
inputs=Input(shape=shape_in)
#此处x形状为(None, 7, 7, 2048)
x=model(inputs)
#此处x形状为(None, 2048)
x=GlobalAvgPool2D()(x)
#此处x形状为(None, 2048)
x=Dense(units=17)(x)
#此处x形状为(None, 2048)
x=Dropout(rate=0.5)(x)
model2=Model(inputs = inputs, outputs = x)
model2.compile(optimizer=Adam(lr=0.001),loss=SparseCategoricalCrossentropy(from_logits=True),metrics=['sparse_categorical_accuracy'])
history=model2.fit(x=mydataSet2,epochs=10,validation_data=mydataSet2)
tensorflow2 使用tf.data.Dataset生成数据集
最新推荐文章于 2023-02-20 14:32:54 发布