Pytorch && Numpy :squeeze()函数

注:

 

正文:

  • 也就是说axis是指定要删除的维度为1的位置吗,例6中,axis=2,e的shape是(1,10,1),所以就是指定要删除的维度为1的位置是第二个1,所以最后的shape为(1,10)吗?

    回复:对 默认就是删除所有维度值为1的维度, 如果要删除指定的维度,就需要使用axis来指定,axis 就是删除指定的维度值是1的那个维度,如果维度不是1的话,就会报错。
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用PyTorch实现的胶囊网络模型,以及训练和测试代码,包括准确率、损失率和混淆矩阵热图的计算和可视化: ```python import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torchvision import datasets, transforms from torch.utils.data import DataLoader from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import numpy as np # 定义胶囊网络模型 class CapsuleNet(nn.Module): def __init__(self): super(CapsuleNet, self).__init__() self.conv1 = nn.Conv2d(1, 256, kernel_size=9) self.primary_capsules = PrimaryCapsules() self.digit_capsules = DigitCapsules() def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.primary_capsules(x) x = self.digit_capsules(x) return x class PrimaryCapsules(nn.Module): def __init__(self): super(PrimaryCapsules, self).__init__() self.conv2 = nn.Conv2d(256, 32*8, kernel_size=9, stride=2, padding=0) def forward(self, x): x = self.conv2(x) x = x.view(x.size(0), 32, -1) x = self.squash(x) return x def squash(self, x): norm = x.norm(dim=-1, keepdim=True) x = x / (1 + norm**2) * norm return x class DigitCapsules(nn.Module): def __init__(self): super(DigitCapsules, self).__init__() self.W = nn.Parameter(torch.randn(10, 32, 16*6*6)) self.num_iterations = 3 def forward(self, x): x = x[:, :, None, :] u_hat = torch.matmul(self.W, x) b = torch.zeros(u_hat.size(0), 10, 16, 1) if x.is_cuda: b = b.cuda() for i in range(self.num_iterations): c = F.softmax(b, dim=1) s = (c * u_hat).sum(dim=-1, keepdim=True) v = self.squash(s) if i < self.num_iterations - 1: b = b + (u_hat * v).sum(dim=-1, keepdim=True) return v.squeeze() def squash(self, x): norm = x.norm(dim=-1, keepdim=True) x = x / (1 + norm**2) * norm return x # 加载数据集 transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) train_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True) train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True) test_dataset = datasets.MNIST(root='./data', train=False, transform=transform, download=True) test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # 训练模型 def train(model, train_loader, criterion, optimizer, epoch): model.train() train_loss = 0 correct = 0 for batch_idx, (data, target) in enumerate(train_loader): optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item() pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() train_loss /= len(train_loader.dataset) accuracy = 100. * correct / len(train_loader.dataset) print('Epoch: {} Train set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format( epoch, train_loss, correct, len(train_loader.dataset), accuracy)) return train_loss, accuracy # 测试模型 def test(model, test_loader, criterion): model.eval() test_loss = 0 correct = 0 y_true = [] y_pred = [] with torch.no_grad(): for data, target in test_loader: output = model(data) test_loss += criterion(output, target).item() pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() y_true.extend(target.tolist()) y_pred.extend(pred.view(-1).tolist()) test_loss /= len(test_loader.dataset) accuracy = 100. * correct / len(test_loader.dataset) print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format( test_loss, correct, len(test_loader.dataset), accuracy)) conf_mat = confusion_matrix(y_true, y_pred) return test_loss, accuracy, conf_mat # 训练和测试模型 model = CapsuleNet() train_losses = [] train_accuracies = [] test_losses = [] test_accuracies = [] conf_matrices = [] for epoch in range(1, 11): train_loss, train_accuracy = train(model, train_loader, criterion, optimizer, epoch) test_loss, test_accuracy, conf_mat = test(model, test_loader, criterion) train_losses.append(train_loss) train_accuracies.append(train_accuracy) test_losses.append(test_loss) test_accuracies.append(test_accuracy) conf_matrices.append(conf_mat) # 绘制准确率和损失率曲线 plt.figure() plt.plot(range(1, 11), train_losses, 'bo-', label='Training loss') plt.plot(range(1, 11), test_losses, 'ro-', label='Test loss') plt.legend() plt.xlabel('Epoch') plt.ylabel('Loss') plt.title('Training and Test Loss') plt.show() plt.figure() plt.plot(range(1, 11), train_accuracies, 'bo-', label='Training accuracy') plt.plot(range(1, 11), test_accuracies, 'ro-', label='Test accuracy') plt.legend() plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.title('Training and Test Accuracy') plt.show() # 绘制混淆矩阵热图 for i in range(10): conf_matrix = conf_matrices[i] conf_matrix = conf_matrix / conf_matrix.sum(axis=1, keepdims=True) plt.figure() plt.imshow(conf_matrix, cmap='Blues', vmin=0, vmax=1) plt.colorbar() plt.xticks(range(10)) plt.yticks(range(10)) plt.xlabel('Predicted label') plt.ylabel('True label') plt.title('Confusion Matrix (Epoch {})'.format(i+1)) plt.show() ``` 上述代码实现了胶囊网络模型的训练和测试,包括准确率、损失率和混淆矩阵热图的计算和可视化。其中,胶囊网络模型使用PyTorch实现,数据集使用了MNIST,并且使用了Adam优化器和交叉熵损失函数。训练过程中,每个epoch都会输出训练集的平均损失和准确率,测试集的平均损失、准确率和混淆矩阵,然后绘制准确率和损失率曲线以及混淆矩阵热图。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值