最近需要生成数据集,我最开始使用的是npy的方式,发现npy的文件生成超过4GB就会报错,于是我换成了hdf5,能够生成大于4GB的文件,比如下面的例子,我生成的数据集就超过了22GB。
import snntorch as snn
from snntorch.spikevision import spikedata
import numpy as np
from tqdm import tqdm
import h5py
# create datasets
train_ds = spikedata.SHD("dataset/shd", train=True)
test_ds = spikedata.SHD("dataset/shd", train=False)
def save_data(dataset, data_name):
data=[]
labels=[]
for i in tqdm(range(len(dataset))):
x=train_ds[i][0]
y=train_ds[i][1]
data.append(x.numpy())
labels.append(y)
test_data=np.concatenate(data,axis=0)
hf=h5py.File(data_name,"w")
#分别创建dset1,dset2,dset3这三个数据集
hf.create_dataset("data",data=data)
hf.create_dataset("labels",data=labels)
hf.close()
save_data(train_ds,"train_data.hdf5")
save_data(test_ds,"test_data.hdf5")
def read_data(dataset_name):
import numpy as np
f = h5py.File(dataset_name, "r")
for key in f.keys():
print(key) #Names of the root level object names in HDF5 file - can be groups or datasets.
print(type(f[key]))
# print(f[key].value)
data = np.array(f[key])
print(data)
# test reading
read_data("test_data.hdf5")