pytorch 图像分类demo

Size

#torch.size([3])
[1,2,3]
#torch.size([1,3])
[[1,2,3]]
#torch.size([3,1])
[[1],[2],[3]]

分类top1 Demo:

for i, (inputs, labels) in enumerate(train_loader, 0):
	# input.shape = torch.size([B,C,W,H])
	# labels.shape = torch.size([B])
	inputs, labels = inputs.to(device), labels.to(device)
	
	optimizer.zero_grad()
	# outputs.shape = torch.size([B, num_class])
	outputs = model(inputs)
	loss = criterion(outputs,labels)
	loss.backward()
	optimizer.step()
	
	# 按维度等于1(每一行中选取最大值),返回最大值和最大值索引
	# predicted.shape = [B]
	# predicted = tensor([3,2,1,4,5])
	_, predicted = torch.max(outputs.data, dim=1)
	total = labels.size(0)
	correct = predicted.eq(labels.data).cpu().sum()
	
	# ===========================top 1的写法================================
    """
	    topk会保持维度,不降维
		k表示topk
		dim维度,dim=1,一行中取topk,dim=0,每一列取topk
		largest = True,从大到小排序,反之从小到大排序
		sorted=True,按顺序返回结果
		当maxk=1时,等同于 _, predicted = torch.max(outputs.data, dim=1, keepdim=True)
	"""
	# topk_predicted.shape = [B,1]
	# topk_predicted = tensor([[3],[2],[1],[4],[5]...])
	_, topk_predicted = outputs.top(k=1,dim=1,largest=True,sorted=True)  
	# 转置后
	# topk_predicted.shape = [1,B]
	# topk_predicted = tensor([[3,2,1,4,5,...]])
	topk_predicted = topk_predicted.t()
	# correct.shape = [1,B]
	correct = topk_predicted.eq(labels.reshape(1,-1))
	correct = correct[:1].reshape(-1).float().sum(0).cpu()
	# ======================================================================
	
	if i % 200 == 0:
		print('Acc: {:.4f}'.format(correct/total))

分类top5 Demo:

def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    maxk = min(max(topk), output.size()[1])
    batch_size = target.size(0)
    # topk会保持维度,不降维
	# k表示topk; dim维度,dim=1,一行中取topk,dim=0,每一列取topk
	# largest = True,从大到小排序,反之从小到大排序; sorted=True,按顺序返回结果
	# 当maxk=1时,等同于 _, predicted = torch.max(outputs.data, dim=1, keepdim=True)
	# pred.shape = [B,maxk]
    _, pred = output.topk(k=maxk,dim=1,largest=True,sorted=True)
    # pred.shape = [maxk,B]
    pred = pred.t()
    # 保持维度相同
    correct = pred.eq(target.reshape(1, -1).expand_as(pred))
    return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]

...
for i, (inputs, labels) in enumerate(train_loader, 0):
	# input.shape = torch.size([B,C,W,H])
	# labels.shape = torch.size([B])
	inputs, labels = inputs.to(device), labels.to(device)
	
	optimizer.zero_grad()
	# outputs.shape = torch.size([B, num_class])
	outputs = model(inputs)
	loss = criterion(outputs,labels)
	loss.backward()
	optimizer.step()
	
	# 按维度等于1(每一行中选取最大值),返回最大值和最大值索引
	# predicted.shape = [B]
	# predicted = tensor([3,2,1,4,5])

	# ===========================top k的写法================================
    acc1, acc5 = accuracy(outputs, labels, topk=(1, 5))
	# ======================================================================

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值