# torch加载cifar2训练集 # pytorch数据管道 import torch from torch.utils.data import DataLoader from torchvision import transforms,datasets # transform= 对数据特征进行操作 # target_transform= 对数据标签进行操作 # (10000,32,32,3) train_data = datasets.ImageFolder("../data/cifar2/train/", transform=transforms.ToTensor(), target_transform=lambda t:torch.tensor(t).float()) test_data = datasets.ImageFolder("../data/cifar2/test/", transform=transforms.ToTensor(), target_transform=lambda t:torch.tensor(t).float()) # 维度调整 (100, 3, 32, 32)* 100 train_data = DataLoader(train_data,batch_size=100,shuffle=True) test_data = DataLoader(test_data,batch_size=100,shuffle=True)
for i in range(epoch): for j, (imgs, labels) in enumerate(train_data): img = Variable(imgs) label = Variable(labels) optimizer.zero_grad()
# pytorch 加载内置数据集 import torch from torch.utils.data import DataLoader from torchvision import datasets, transforms # MNIST dataset (60000,28,28) # 其他常用内置数据集:datasets.FashionMNIST, datasets.CIFAR10, ... mnist_train = datasets.MNIST(root='MNIST_data/', train=True, transform=transforms.ToTensor(), target_transform=lambda t: torch.tensor(t).long(), download=True) mnist_test = datasets.MNIST(root='MNIST_data/', train=False, transform=transforms.ToTensor(), target_transform=lambda t: torch.tensor(t).long(), download=True) # dataset loader (100, 1, 28, 28) * 600 train_data = DataLoader(dataset=mnist_train, batch_size=100, shuffle=True) test_data = DataLoader(dataset=mnist_test, batch_size=100, shuffle=True) # 应用演示 for epoch in range(epochs): for i, (batch_xs, batch_ys) in enumerate(train_data): X = Variable(batch_xs) # (100,1,28,28) Y = Variable(batch_ys) # (100,)
#飞机车鸟数据集 import os import numpy as np from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.autograd import Variable import matplotlib.pyplot as plt from torchvision import transforms import torchvision.datasets as datasets import random from torch.optim.lr_scheduler import * np.random.seed(0) torch.manual_seed(0) learning_rate=0.01 epochs=10 train_test=0.8 #data=r'E:\p1code\study\shendu2\0705 week+opencv/' data=r'../data/data3' imgdir = '../data/data3' # 依次读取图像像素值并进行归一化处理 def loadimg(imgpath): # 读取图像像素值 img = plt.imread(imgpath) img = img/255.0 # 归一化 return img imgs = [] # x labels = [] # y for path in os.listdir(imgdir): # imgdir + '/' + path = ../data/data3/'0_6.jpg' img = loadimg(imgdir + '/' + path) imgs.append(img) labels.append(int(path[0])) # labels = utils.to_categorical(labels, 3) # train_test_split只接受numpy.array类 imgs = np.array(imgs) labels = np.array(labels) train_x, test_x, train_y, test_y = train_test_split(imgs, labels, train_size=0.7, shuffle=True) train_x=Variable(torch.Tensor(train_x.transpose([0,3,1,2]))) train_y=Variable(torch.Tensor(train_y)) test_x=Variable(torch.Tensor(test_x.transpose([0,3,1,2]))) test_y=Variable(torch.Tensor(test_y))