pytorch-basic tutorial


CONTACT

WEIBO


REFERENCES

''' by L'''
import torch 
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import optim
from torch.autograd import Variable
import torch.utils.data as data

from tensorboardX import SummaryWriter

# MODULE CONTAINER
class NET(nn.Module):
    def __init__(self, xx):
        super(NET, self).__init__()
        self.conv1 = nn.Conv2d(3,64,kernel_size=[3,3])
        self.seq = nn.Sequential(OrderedDict([('conv1': xx),()]))
        self.lst = nn.ModuleList([xx,])

        self._weights_init()

    def forward(self, x):
        ''' COMPUTE PROCESS '''
        out = F.relu(self.conv1(x))
        out = self.seq(out)
        out = self.lst[i](out)

        return out

    #parameter initialization
    def _weights_init(self):
        for mm in self.modules():
            if isinstance(mm, nn.Conv2d):
                pass
            elif isinstance(mm,):
                pass

# INIT
def weight_init(mm):
    if isinstance(mm, nn.Conv2d):
        mm.weight
        mm.bias
    elif isinstance():
        pass


model = NET()
#model.apply(weight_init)

# SINGLE GPU
model = model.cuda(i)

# WRITER
writer = SummaryWriter()

# MULTI GPU /one machine multi gpu/
model = nn.DataParallel(model.cuda(1), device_ids=[1,2,..])


# OPTIM
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9) 

# ignored_params = list(map(id, model.fc.parameters()))
# base_params = filter( lambda p: id(p) not in ignored_params, model.parameters())
# optimizer = optim.SGD([{'params': base_params}, {'params': model.fc.parameters(), 'lr': INIT_LR*10}], lr=INIT_LR, momentum=0.9)

scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=, gamma=0.1)

# CRITERIA /loss/
criterion = nn.CrossEntropyLoss()
# criterion = nn.CrossEntropyLoss(xxx, reduce=True)


# PROFILER
torch.autograd.profiler.profile()


# DATASET
''' mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225] '''

custom_dataset = CUSTOM_DATASET()
train_loader = data.DataLoader(dataset=,batch_size=)
test_loader = data.DataLoader(dataset=,batch_size=)

# TRAIN EVAL FUNC

def policy_lr(e, optimizer):
    if e % step or e == 0:
        return optimizer
    for param_group in optimizer.param_groups:
        param_group['lr'] *= 0.1
    return optimizer 

def train(e, train_data_loader):
    ''' training process '''
    model = model.train()

    # policy_lr()

    for j, (x,y) in enumerate(train_data_loader):

        x, y = Variable(x), Variable(y)
        if cuda:
            x, y = x.cuda(), y.cuda()

        out = model(x)

        loss = criterion(out, y) 

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


        if j%100 == 0:
            print( 'epoch/iters {:0>2}/{0:0>4} loss:{.4}'.format(i, j, loss) )

        if i%5 ==0 and acc > xx :
            torch.save(model.state_dict(), 'name_{:0>2}.pth'.format(i))

        writer.add_scale('/train/loss', loss, global_step=)
        writer.add_scale('/train/acc', acc, global_step=)
        writer.add_image( torchvision.utils.make_grid() )
        writer.add_text()

def eval(test_data_loader):
    ''' eval process '''
    model = mode.eval()
    correct = 0

    for j, (x,y) in enumerate(test_data_loader):
        xxx
        correct+= xx

    print('')


if __name__ == '__main__':

    train_loader = data.DataLoader()
    test_loader = data.DataLoader()

    for i in range(epoch):

        scheduler.step()
        writer.add_scalar('/train/lr', optimizer.param_groups[0]['lr'],global_step=e)

        train(i)
        eval()

    writer.export_scalars_to_json('./xx.json')
    writer.close()

    # SAVE RESTORE
    torch.save(model.state_dict(), 'path_{}.pth'.format(i))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值