前言
本节主要讲解了一下pytorch自带的数据处理函数的功能,也就是DataLoader和Dataset这两个函数的功能和使用,以及如何继承这些函数,处理我们自己的数据集
一、代码
import numpy as np
import torch
from torch.utils.data import DataLoader,Dataset
class DDataset(Dataset):
def __init__(self,filepath):
xy = np.loadtxt('diabetes.csv.gz', delimiter=',', dtype=np.float32)
self.len=xy.shape[0]
self.x_data = torch.from_numpy(xy[:, :-1]) # 数据取前八列
self.y_data = torch.from_numpy(xy[:, [-1]]) # 第九列
def __getitem__(self, index):
return self.x_data[index],self.y_data[index]
def __len__(self):
return self.len
dataset=DDataset('diabetes.csv.gz')
train_loader=DataLoader(dataset=dataset,batch_size=32,shuffle=True,num_workers=2)
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear1=torch.nn.Linear(8,6)
self.linear2=torch.nn.Linear(6,4)
self.linear3=torch.nn.Linear(4,1)
self.activation=torch.nn.Sigmoid()
def forward(self,x):
x=self.activation(self.linear1(x))
x=self.activation(self.linear2(x))
x=self.activation(self.linear3(x))
return x
model=Model()
criterion=torch.nn.BCELoss(size_average=True)#平均了
optimizer=torch.optim.Adam(model.parameters(),lr=0.01)
if __name__=='__main__':#windows下面的多进程
# 训练数据集100次
for epoch in range(100):
#每次都用min_batch
for i,data in enumerate(train_loader,0):
inputs,labels=data
y_pred=model(inputs)
loss=criterion(y_pred,labels)
print(epoch,i,loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()