Keras DataGenerator with Multi-workers 加速读取训练数据
描述:
对于训练任务,有时数据量很大且不同类型的数据存放在不同的文件夹中而且每个文件夹内的数据长度不相同,如果想要快速的从多个文件夹中读取训练数据就需要使用多个worker进行同步读取操作。下面将详细展示能实现快多worker快速数据读取的python代码。
代码展示
以我自己目前四分类任务的数据读取来举例:
1. DataGenerator Module
from concurrent.futures import ThreadPoolExecutor
class DataGenerator_MultiWorkers(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, label_IDs, batch_size=128, imgSize=(64,64), n_channels=3, n_classes=4, num_workers=8,shuffle=True, transform=None, epoch_idx=None,collate_fn=default_collate_fn):
'Initialization'
self.imgSize = imgSize
self.batch_size = batch_size
self.label_IDs = label_IDs # 所有用于训练的数据路径
self.list_IDs = list_IDs # 所有用于训练的标签路径
self.n_channels = n_channels
self.shuffle = shuffle
self.n_classes = n_classes
self.transform = transform
self.epoch_idx = epoch_idx
self.num_workers = num_workers # workers的数量
self.Dataset = Dataset(self.list_IDs,self.label_IDs,self.n_classes,self.transform) # 数据读取
self.collate_fn = collate_fn
self.on_epoch_end()
def __len__(self):
'Denotes the number of steps per epoch'
return 100
def __getitem__(self, index): # index represent the steps index
# model.fit()每次调用DataGenerator所输入的index是乱序的!
# index的范围[0,len],len是__len__中设置的steps
'Generate one batch of data'
# # Generate indexes of the batch
indexes_classes = []
indexes = []
for ite in range(self.n_classes):
tmp_indexes = np.random.randint(low=0,high=self.subLength[ite],size=int(self.batch_size*0.25))
indexes_classes.append(list(tmp_indexes)) # list: 4*32
for ite in range(int(self.batch_size*0.25)):
tmp = []
for ite2 in range(self.n_classes):
tmp.append(indexes_classes[ite2][ite])
indexes.append(list(tmp)) # list 32*4
## 以batch_size=128为例,对于每个step则需要分别从4个数据集中各读取32个数据
## indexes变量中存储的是对于每个数据集随机读取数据的数据索引
samples = []
# 此处开始多个workers同时工作
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
for sample in executor.map(lambda i: self.Dataset[i],indexes):
samples.append(sample)
batch_data, batch_label = self.collate_fn(samples)
return tuple(({'data':batch_data}, [batch_label]))
def on_epoch_end(self):
'Updates indexes after each epoch'
self.subLength = []
for ite in range(len(self.list_IDs)):
# subLength中存储的是四个数据集的长度 (不等长)
self.subLength.append(len(self.list_IDs[ite]))
if self.shuffle == True:
np.random.shuffle(self.list_IDs[ite])
2.Dataset Module
尽量将需要的一些preprocessing写在Dataset Module里面,因为使用multi-workers时,Dataset Module会在多个worker上同时进行。
class Dataset(object):
def __init__(self,list_IDs,label_IDs,n_Classes,transform=None,dtype='float32'):
self.dtype = dtype
self.list_IDs = list_IDs
self.label_IDs = label_IDs
self.n_Calsses = n_Classes
self.transform = transform
def __getitem__(self, index):
data_storage_list = []
label_storage_list = []
for ite in range(self.n_Calsses):
img = np.pad(nibabel.load(self.list_IDs[ite][index[ite]]).get_fdata(),((9,10),(9,10),(0,0)),'constant')
if self.transform is None: # 判断是否进行data augmentation
data_storage_list.append(img)
else:
gen = self.transform.flow(np.expand_dims(img,axis=0))
gen_batch = next(gen)
data_storage_list.append(np.squeeze(gen_batch))
label_storage_list.append(nibabel.load(self.label_IDs[ite][index[ite]]).get_fdata())
return [data_storage_list,label_storage_list]
3. collate_fn Module
def default_collate_fn(samples):
X = np.array([sample[0] for sample in samples]) # 32*4*64*64*3 4的维度表示代表从4个不同数据集中读取的
Y = np.array([sample[1] for sample in samples]) # 32*4*4
X = np.reshape(X,(X.shape[0]*X.shape[1],X.shape[2],X.shape[3],X.shape[4])) # 128*64*64*3
Y = np.reshape(Y,(Y.shape[0]*Y.shape[1],Y.shape[2])) # 128*4 batchSize*n_classes
return X, Y
4. Data Augmentation Module (transform module)
from keras.preprocessing.image import ImageDataGenerator
# 可使用keras自带的ImageDataGenerator函数进行一些简单的数据变换
datagen = ImageDataGenerator(
rotation_range=60,
width_shift_range=10,
height_shift_range=10,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
preprocessing_function=None # 如果有自定义的data transformation可以定义一个函数
)