Torch自查使用

数据集Dataset&Dataloader

dataset = MyDataset(file)

dataloader = DataLoader(dataset, batch_size, shuffle = True)

模型结构 class Model

class MyModel(nn.Module):
    def __init__(self):
        super(MyModule,self).__init__()
        self.net =  nn.Sequential(
            nn.Linear(),
            nn.Sigmoid(),
            nn.Linear()
        )

    def forward(self,x):
        return self.net(x)



class MyModel(nn.Module):
    def __init__(self):
        super(MyModule,self).__init__()
        self.layer1 = nn.Linear(),
        self.layer2 = nn.Sigmoid(),
        self.layer3 = nn.Linear()

    def forward(self,x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = self.layer3(out)
        return out

整体流程

//训练
dataset = MyDataset(file)
tr_set = DataLoader(dataset,16,shuffle=True)
model = MyModel.to(device)
criterion = nn.MSEloss()
optimizer = torch.optim.SGD(model.parameter(),0.1)

for epoch in range(n_epochs):
    model.train()
    for x,y in tr.set:
        optimizer.zero_grad()
        x,y = x.to(device), y.to(device)
        pred = model(x)  //forward pass
        loss = criterion(pred,y)
        loss.backward()
        optimizer.step()


//测试
model.eval()
total_loss = 0
for x,y in dv_set:
    x,y = x.to(device), y.to(device)
    with torch.no_grad():
        pred = model(x)
        loss = criterion(pred,y)
    total_loss += loss.cpu().item() * len(x)
    avg_loss = total_loss / len(dv_set.dataset)
    

//验证
model.eval()
preds = []
for x in tt_set:
    x = x.to(device)
    with torch.no_grad():
        pred = model(x)
        preds.append(pred.cpu())


torch.save(model.state_dict(),path)

ckpt = torch.load(path)
model.load_state_dict(ckpt)

加载预训练模型

model=CNN(in_channels=3) #加载模型结构
model.load_state_dict(torch.load('model.pth')) #加载模型参数
model.cuda() #模型加载到GPU
model.eval() #不启用BatchNormalization和Dropout

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值