Pytorch 定义自己的数据集
原理请阅读参考文献,这个博客只是记录一下定义流程,便于理解和学习。
素材和代码下载:
链接:https://pan.baidu.com/s/1uF1-uFDBn5aArH56Y4Xkkg
提取码:vrna
参考文献:
Pytorch中正确设计并加载数据集方法
https://ptorch.com/news/215.html
Pytorch学习(三)定义自己的数据集及加载训练
https://blog.csdn.net/sinat_42239797/article/details/90641659
一文读懂Dataset, DataLoader及collate_fn, Sampler等参数
https://blog.csdn.net/qq_40728805/article/details/103929164?ops_request_misc=%25257B%252522request%25255Fid%252522%25253A%252522160827329916780273337687%252522%25252C%252522scm%252522%25253A%25252220140713.130102334…%252522%25257D&request_id=160827329916780273337687&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2alltop_click~default-1-103929164.nonecase&utm_term=collate_fn
创建两个文件夹
在data_train文件夹中创建Annotations_synthesis和JPEGImages_synthesis两个文件夹。
其中在Annotations_synthesis文件夹中放置xml文件:
在JPEGImages_synthesis文件夹中放置对应的图片:
生成txt文件
在data_train文件夹中创建data_distribution.py文件
代码如下:
import os
import xml.etree.ElementTree as ET
def convert_annotation(_xml_path, _list_file, _class):
in_file = open(_xml_path, encoding='utf-8')
tree = ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in _class or int(difficult) == 1:
continue
cls_id = _class.index(cls)
xmlbox = obj.find('bndbox')
b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text),
int(xmlbox.find('ymax').text))
_list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
_list_file.write('\n')
if __name__ == '__main__':
style = '.png' # 改成自己的图片格式
classes = ['combustion_lining', 'fan', 'fan_stator_casing_and_support', 'hp_core_casing', 'hpc_spool',
'hpc_stage_5', 'mixer', 'nozzle', 'nozzle_cone', 'stand'] # 为了获得cls id
train_img_file_path = './JPEGImages_synthesis'
tran_xml_file_path = './Annotations_synthesis'
total_xml = os.listdir(tran_xml_file_path)
train_txt = open('./train.txt', 'w')
for i in range(len(total_xml)):
name = total_xml[i][:-4]
train_txt.write('%s/%s%s' % (train_img_file_path, name,style))
xml_path = os.path.join(tran_xml_file_path, total_xml[i])
convert_annotation(xml_path, train_txt, classes)
print('已完成第 ', i, ' 张图片')
train_txt.close()
运行
data_distribution.py
生成的txt文件如下:
定义自己的数据集
和data_train文件夹同级创建一个ttt.py文件,
代码如下:
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import numpy as np
import os
import cv2
import torch
class MyDataSet(Dataset):
def __init__(self, _tex_path, _dataset_dir):
super(MyDataSet, self).__init__()
truth = {}
f = open(_tex_path, 'r', encoding='utf-8')
for line in f.readlines():
data = line.split(" ")
truth[data[0]] = []
for i in data[1:]:
truth[data[0]].append([int(j) for j in i.split(',')])
self.truth = truth
self.dataset_dir = _dataset_dir
def __getitem__(self, index):
img_path = list(self.truth.keys())[index]
boxes=np.zeros((60,5))
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
boxes[0:bboxes.shape[0]]=bboxes
img_path = os.path.join(self.dataset_dir, img_path)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (608, 608))
return img, boxes
def __len__(self):
return len(self.truth.keys())
def collate(batch):
images = []
bboxes = []
for img, box in batch:
images.append([img])
bboxes.append([box])
images = np.concatenate(images, axis=0)
images = images.transpose(0, 3, 1, 2)
images = torch.from_numpy(images).div(255.0)
bboxes = np.concatenate(bboxes, axis=0)
bboxes = torch.from_numpy(bboxes)
return images, bboxes
if __name__ == '__main__':
tex_path = './data_train/train.txt'
dataset_dir = './data_train'
dataset = MyDataSet(tex_path, dataset_dir)
for i in range(10):
#out_img, out_bboxes = dataset.__getitem__(i) #测试一下dataset
out_img, out_bboxes = dataset[i] #测试一下dataset
print(out_img.shape)
print(out_bboxes.shape)
train_loader = DataLoader(dataset, batch_size=3,
collate_fn=collate)
for i, batch in enumerate(train_loader): # 测试 DataLoader
imgs = batch[0]
print(imgs.shape)
bboxes = batch[1]
print(bboxes.shape)
可以调试看看,加深对数据集定义的理解。