问题描述
训练网络时需要load一个10G左右的h5数据文件,并且包含有image和多类别mask数据…
由于h5文件中的mask并未进行任何预处理,若将mask一张张取出再进行处理是非常占内存的,因此无法使用这种简单的DataLoader方式:
import h5py
from torch.utils.data import TensorDataset
f = h5py.File("xxxx.h5", 'r')
x_data = f['image']
y_data = f['mask']
train_data= TensorDataset(x_data, y_data)
training_data_loader = DataLoader(dataset=train_data,
num_workers=0,
batch_size=8,
drop_last=True,
shuffle=True)
解决方案
继承 Dataset类 ,重新定义一个可以将数据处理成DataLoader的类
在继承 Dataset类之后,我们需要重写其中的len 方法和getitem 方法,具体可以参考这里
修改如下:
class MyDataset(data.Dataset):
def __init__(self, archive,image='image',mask='mask'):
self.archive = h5py.File(archive, 'r')
self.data = self.archive[image]
self.labels = self.archive[mask]
def __getitem__(self, index):
image = self.data[index]
mask = self.get_multi_class_labels(self.labels[index])
return image, mask
def __len__(self):
return len(self.labels)
def get_multi_class_labels(self,truth, n_labels=3, labels=(0, 1, 2)):
new_shape = [n_labels, ]+list(truth.shape[1:])
y = np.zeros(new_shape, np.int8)
for label_index in range(n_labels):
if labels is not None:
y[label_index, :, :][truth[0, :, :] == labels[label_index]] = 1
else:
y[label_index, :, :][truth[0, :, :] == label_index] = 1
return y
def close(self):
self.archive.close()
然后调用该类即可:
train_data = MyDataset('xxxx.h5',image='image',mask='mask')
train_loader = DataLoader(dataset=train_data,
num_workers=0,
batch_size=8,
shuffle=True)