import model from tensorboardX import SummaryWriter

本文介绍了如何在Python中使用TensorboardX库,重点讲解了如何导入model以及利用SummaryWriter进行日志记录,以实现训练过程的可视化。首先,文章详细阐述了安装TensorboardX的步骤,接着通过实例展示了model的导入和SummaryWriter的用法,帮助读者理解其在深度学习项目中的应用。
摘要由CSDN通过智能技术生成

1、使用方法

from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir="summary/{}".format(args.model))
def train():
    writer.add_scalar("Train/loss", epoch_loss, epoch)
    writer.add_scalar("Train/ppl", math.pow(2, epoch_loss) , epoch)
    writer.add_scalar("Train/bleu", epoch_bleu, epoch)
def evaluate():
    pass
for epoch in range(1 , NUM_EPOCHS):
    epoch_start_time = time.time()
    train()
    epoch_loss, epoch_bleu = evaluate()
    print('+' * 100)
    print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
          'valid ppl {:6.2f} | bleu {:3.2f} |'.format(epoch, (time.time() - epoch_start_time), epoch_loss,
                                                      math.pow(2, epoch_loss), epoch_bleu))
    print('+' * 100)
    if not best_epoch_loss or epoch_loss < best_epoch_loss:
        with open(args.save, 'wb')
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
import torch from torch import nn from torch.utils.tensorboard import SummaryWriter class MyModule(nn.Module): def __init__(self): super(MyModule, self).__init__() self.model1 = nn.Sequential( nn.Flatten(), nn.Linear(3072, 100), nn.ReLU(), nn.Linear(100, 1), nn.Sigmoid() ) def forward(self, x): x = self.model1(x) return x import torch import torchvision from PIL.Image import Image from torch.utils.tensorboard import SummaryWriter from torch import nn, optim from torch.utils.data import dataloader from torchvision.transforms import transforms from module import MyModule train = torchvision.datasets.CIFAR10(root="../data",train=True, download=True, transform= transforms.ToTensor()) vgg_model = torchvision.models.vgg16(pretrained=True) vgg_model.classifier.add_module('add_linear', nn.Linear(1000,2)) #ToImage = transforms.ToPILImage() #Image.show(ToImage(train[0][0])) train_data = dataloader.DataLoader(train, batch_size = 128, shuffle=True) model = MyModule() #criterion = nn.BCELoss() epochs = 5 learningRate = 1e-3 optimizer = optim.SGD(model.parameters(),lr = learningRate) loss = nn.CrossEntropyLoss() Writer = SummaryWriter(log_dir="Training") step = 0 for epoch in range(epochs): total_loss = 0 for data,labels in train_data: y = vgg_model(data) los = loss(y,labels) optimizer.zero_grad() los.backward() optimizer.step() Writer.add_scalar("Training",los,step) step = step + 1 if step%100 == 0: print("Training for {0} times".format(step)) total_loss += los print("total_loss is {0}".format(los)) Writer.close() torch.save(vgg_model,"model_vgg.pth")修改变成VGG16-两分类模型
05-30
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值