【笔记】argmax:用法如acc=torch.mean((output.argmax(1)==target.argmax(1)),dtype=torch.float32)

注意:

正文:

batch_size = 1 : 

batch_size !=1 :

结果有0,0.5,1。它是取每列的最大行值

batch_size 恢复正常,取100:

import torch
from PIL import Image
import numpy as np
import torch.nn as nn
import os
from torch.utils.data import Dataset, DataLoader


class mydataset(Dataset):
    def __init__(self, path):
        self.path = path
        self.dataset = os.listdir(self.path)
        self.mean = [0.4878, 0.4545, 0.4168]
        self.std = [0.2623, 0.2555, 0.2577]

    def __getitem__(self, index):
        name = self.dataset[index]
        name_list = name.split(".")
        target = int(name_list[0])
        target = torch.tensor(target)
        img = Image.open(os.path.join(self.path, name))
        img = np.array(img) / 255
        # 去均值
        img = (img - self.mean) / self.std
        # img 是 float64
        data = torch.tensor(img, dtype=torch.float32).permute(2, 0, 1)
        return data, target

    def __len__(self):
        return len(self.dataset)


class mynetwork(nn.Module):
    def __init__(self):
        super(mynetwork, self).__init__()

    # 有序容器
        self.line1 = nn.Sequential(
            nn.Linear(3 * 100 * 100, 5120),
            nn.ReLU(),
            nn.Linear(5120, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 2560),
            nn.ReLU(),
            nn.Linear(2560, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 2),
        )

    #  parse  vt. 解析;从语法上分析
    def forward(self, parse):
        data = torch.reshape(parse, shape=(-1, 3 * 100 * 100))
        return self.line1(data)


class train(object):
    def __init__(self, path):
        self.path = path
        self.test_dataset = mydataset(self.path)
        self.train_dataset = mydataset(self.path)
        self.criterion = torch.nn.MSELoss()
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.net = mynetwork().to(self.device)
        self.optimize = torch.optim.Adam(self.net.parameters())

    def dataloader(self, batch):
        train_data_loader = DataLoader(dataset=self.train_dataset, batch_size=batch, shuffle=True)
        test_data_loader = DataLoader(dataset=self.test_dataset, batch_size=batch, shuffle=True)
        return train_data_loader, test_data_loader

    def trainnet(self, batch, epoch):
        train_data_loader, test_data_loader = self.dataloader(batch)
        losses = []
        accuracy = []
        for i in range(epoch):
            for j, (input, target) in enumerate(train_data_loader):
                input = input.to(self.device)
                output = self.net(input)
                # print(target,type(target),target.shape,target.size())
                target = torch.zeros(target.size(0), 2).scatter_(1,target.view(-1,1),1).to(self.device)
                # print(target,type(target),target.shape,target.size())

                loss=self.criterion(output,target)
                losses.append(loss.item())

                self.optimize.zero_grad()
                loss.backward()
                self.optimize.step()

                if j%5==0:
                    print("epoch{}/{} , iteration{}/{}".format(i,epoch,j,len(train_data_loader)))
                    acc=torch.mean((output.argmax(1)==target.argmax(1)),dtype=torch.float32)
                    print(acc,output.argmax(1)==target.argmax(1),type(output.argmax(1)==target.argmax(1)),(output.argmax(1)==target.argmax(1)).shape)
                    input()



if __name__ == "__main__":
    path = r"./cat_dog/img"
    t = train(path)
    t.trainnet(100, 10)








output:

epoch0/10 , iteration0/120
tensor(0.4700, device='cuda:0') tensor([False, False,  True, False,  True,  True, False,  True, False, False,
        False,  True, False, False, False, False, False,  True,  True,  True,
        False, False, False, False, False, False, False,  True,  True, False,
        False, False,  True, False,  True, False,  True,  True, False, False,
        False, False,  True, False,  True, False, False,  True, False,  True,
         True,  True, False, False,  True,  True, False,  True, False, False,
         True, False, False, False,  True,  True,  True,  True,  True,  True,
         True,  True, False,  True, False, False,  True,  True,  True, False,
        False, False,  True,  True,  True,  True,  True, False,  True,  True,
        False, False, False, False,  True, False,  True, False,  True,  True],
       device='cuda:0') <class 'torch.Tensor'> torch.Size([100])

函数资料:

numpy.ma.core.MaskedArray def argmax(self,
           axis: Optional[int] = None,
           fill_value: Any = None,
           out: Union[None, int, float] = None) -> Any
Returns array of indices of the maximum values along the given axis. Masked values are treated as if they had the value fill_value.
Examples
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
Params:
axis – If None, the index is into the flattened array, otherwise along the specified axis
fill_value – Value used to fill in the masked values. If None, the output of maximum_fill_value(self._data) is used instead.
out – Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output.
`argmax(self, axis=None, fill_value=None, out=None)` on docs.scipy.org 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

程序猿的探索之路

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值