今天也要加油鸭!冲冲冲😊
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
# Dataset是一个抽象的类,我们可以继承这个类来定义我们自己的类
# 准备数据
class DiabetesDataset(Dataset): # DiabetesDatasets继承了Dataset
def __init__(self,filepath):
xy = np.loadtxt(filepath, delimiter ==',', dtype=np.float32)
self.len = xy.shape[0]
self.x_data = torch.from_numpy(xy[:,-1])
self.y_data = torch.from_numpy(xy[:,[-1]])
def __getitem__ (self , index): # 获得数据的索引
return self.x_data[index], self.y_data[index]
def __len__(self): # 获得数据的长度
return self.len
dataset = DiabetesDataset('diabetes.csv.gz') # 构建实例
train_loader = DataLoader( dataset = dataset,
batch_size=32
shuffle=True
num_workers=2)
# 用类定义模型
class Model(torch.nn.Module):
def __init__ (self):
super (Model, self).__init__()
self .linear1 = torch.nn.Linear(8 , 6)
self .linear2 = torch.nn.Linear(6 , 4)
self .linear3 = torch.nn.Linear(4 , 1)
self .sigmoid = torch.nn.Sigmoid()
def forward( self ,
x = self . self .linear1(x))
x = self . self .linear2(x))
x = self . self .linear3(x))
return x
model = Model()
# 构建损失函数和优化器
criterion = torch.nn.BCELoss( size_average=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
if __name__ == '__main__': # 进行封装
for epoch in range (100): # Training cycle
for i, data in enumerate (train_loader, 0): # Loop over all batches
# 1. Prepare data
inputs, labels = data
# 2. Forward
y_pred = model(inputs)
loss = criterion(y_pred, labels)
print (epoch, i, loss.item)
# 3. Backward
optimizer.zero_grad()
loss.backward()
# 4. Update
optimizer.step()