tensorboard 可视化pytorch

1. 启动:

tensorboard --logdir=dir
example:
tensorboard --logdir=D:\code\Machine-Learning-Collection\ML\AladdinPerssonPytorch\Basics\dataset\MNIST

result:

在这里插入图片描述

2 .some useful guide:

writer.add_scalar

#首先引入SummaryWriter块
from torch.utils.tensorboard import SummaryWriter  # to print to tensorboard

#define a class model and the other
#and set some hyperparemeters 
train_dataset = datasets.MNIST(
    root="dataset/", train=True, transform=transforms.ToTensor(), download=True
)
batch_sizes = [32, 256]
learning_rates = [1e-2, 1e-3, 1e-4, 1e-5]
step = 1 
# Train Network
for batch_size in batch_sizes:
    for learning_rate in learning_rates:
        #define container
        writer = SummaryWriter(
            f"D:\\code\\Machine-Learning-Collection\\ML\\AladdinPerssonPytorch\\Basics\\dataset\\MNIST\\MiniBatchSize {batch_size} LR {learning_rate}"
        )
        #do some other thing like loss and optimizer define and backward 
        _, prediction = scores.max(1)
        num_correct = (prediction == targets).sum()
        run_training_acc = float(num_correct) / float(data.shape[0])
        
        writer.add_scalar("training_loss", loss, global_step=step)
        writer.add_scalar("run_training_acc", run_training_acc, global_step=step)
        step +=1

在这里插入图片描述

guide2

writer.add_hparams({“key1”:value1,“key2”:value2},{“key3”:value3,“key3”:value3})
{“key3”:value3,“key3”:value3} 为在给定条件{“key1”:value1,“key2”:value2} 经过计算得出的值:

# Train Network
for batch_size in batch_sizes:
    losses = []
    accuracies = []
    for learning_rate in learning_rates:
        writer = SummaryWriter(
            f"D:\\code\\Machine-Learning-Collection\\ML\\AladdinPerssonPytorch\\Basics\\dataset\\MNIST\\MiniBatchSize {batch_size} LR {learning_rate}"
        )
        model = CNN(in_channels=in_channels, num_classes=num_classes).to(device)
        model.train()
        train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
        # Loss and optimizer
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        for epoch in range(num_epochs):
            for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
                # Get data to cuda if possible
                data = data.to(device=device)
                targets = targets.to(device=device)

                # forward
                scores = model(data)
                loss = criterion(scores, targets)
                losses.append(loss.item())
                # backward
                optimizer.zero_grad()
                loss.backward()
                # gradient descent or adam step
                optimizer.step()
                # do some training data show
                _, prediction = scores.max(1)
                num_correct = (prediction == targets).sum()
                run_training_acc = float(num_correct) / float(data.shape[0])
                accuracies.append(run_training_acc)
        writer.add_hparams(
            {"lr": learning_rate, "bsize": batch_size},
            {
                "accuracy": sum(accuracies) / len(accuracies),
                "loss": sum(losses) / len(losses),
            },
        )

在这里插入图片描述
在这里插入图片描述

other guide

in minst_data we set batch_size=64 and we wanna see each batch what the images are and we can see in tersorboard by user torchvision.utils.make_grid(data) and write.add_image(“name”,value)
在这里插入图片描述
trace the fc1.layer’s weight change and it can be seen as a debug tool
在这里插入图片描述

writer.add_graph(model,data)

"""
An implementation of LeNet CNN architecture.

Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
*    2020-04-05 Initial coding
*    2022-12-20 Update comments, code revision, checked still works with latest PyTorch version
"""

import torch
import torch.nn as nn  # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions
from torch.utils.tensorboard import SummaryWriter  # to print to tensorboard


class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.relu = nn.ReLU()
        self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
        self.conv1 = nn.Conv2d(
            in_channels=1,
            out_channels=6,
            kernel_size=5,
            stride=1,
            padding=0,
        )
        self.conv2 = nn.Conv2d(
            in_channels=6,
            out_channels=16,
            kernel_size=5,
            stride=1,
            padding=0,
        )
        self.conv3 = nn.Conv2d(
            in_channels=16,
            out_channels=120,
            kernel_size=5,
            stride=1,
            padding=0,
        )
        self.linear1 = nn.Linear(120, 84)
        self.linear2 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.relu(self.conv1(x))
        x = self.pool(x)
        x = self.relu(self.conv2(x))
        x = self.pool(x)
        x = self.relu(
            self.conv3(x)
        )  # num_examples x 120 x 1 x 1 --> num_examples x 120
        x = x.reshape(x.shape[0], -1)
        x = self.relu(self.linear1(x))
        x = self.linear2(x)
        return x


def test_lenet():
    x = torch.randn(64, 1, 32, 32)
    model = LeNet()
    writer = SummaryWriter(
        f"D:\\code\\Machine-Learning-Collection\\ML\\AladdinPerssonPytorch\\Basics\\dataset\\MNIST\\lenet5"
    )
    writer.add_graph(model,x)
    return model(x)


if __name__ == "__main__":
    out = test_lenet()
    print(out.shape)

在这里插入图片描述

留言

参考了 youtube refer

https://www.youtube.com/watch?v=ACmuBbuXn20&list=PLhhyoLH6IjfxeoooqP9rhU3HJIAVAJ3Vz&index=17

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值