201024-5步PyTorchLightning中设置并访问tensorboard

  • 导入工具箱
from pytorch_lightning.loggers import TensorBoardLogger
  • 写入记录
def training_step(self, batch, batch_idx):
    self.log('my_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
  • 创建记录器logger
logger = TensorBoardLogger('tb_logs', name='my_model')
  • 关联训练器trainer
# train
model = LightningMNISTClassifier(outdim=outdim)
trainer = pl.Trainer(gpus=None, max_epochs=2, logger=logger)
trainer.fit(model)
  • 打开tensorboard
tensorboard --logdir ./tb_logs

⚠️:tensorboard命令中路径名称需要与创建logger文件夹路径名称相同

# PyTorch Lightning 1.x + Neptune [Basic Example]

# Before you start

# Install dependencies

# Step 1: Import Libraries

from pytorch_lightning.loggers.neptune import NeptuneLogger
import os

import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision import transforms

import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger

# Step 2: Define Hyper-Parameters

PARAMS = {'max_epochs': 3,
          'learning_rate': 0.005,
          'batch_size': 256}

# Step 3: Define LightningModule and DataLoader

# pl.LightningModule


class LitModel(pl.LightningModule):
    def __init__(self):
        super().__init__()
        self.l1 = torch.nn.Linear(28 * 28, 10)

    def forward(self, x):
        return torch.relu(self.l1(x.view(x.size(0), -1)))

    def training_step(self, batch, batch_idx):
        x, y = batch
        y_hat = self(x)
        loss = F.cross_entropy(y_hat, y)
        self.log('train_loss', loss)
        return loss

    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=PARAMS['learning_rate'])


# DataLoader
train_loader = DataLoader(MNIST(os.getcwd(), download=True, transform=transforms.ToTensor()),
                          batch_size=PARAMS['batch_size'])

# Step 4: Create TensorBoardLogger


logger = TensorBoardLogger('tb_logs', name='my_model')


# Step 5: Pass NeptuneLogger to the Trainer

trainer = pl.Trainer(max_epochs=PARAMS['max_epochs'],
                     logger=logger)

# Step 6: Run experiment

model = LitModel()

trainer.fit(model, train_loader)

# Explore Results

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

GuokLiu

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值