Tensor for ‘out’ is on CPU, Tensor for argument #1 ‘self’ is on CPU, but expected them to be on GPU (while checking arguments for addmm)
检查发现是Net类中出现了问题。
错误模块代码
class Net(torch.nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5)
self.conv2 = torch.nn.Conv2d(88,20,kernel_size=5)
self.inception1 = Inception_A(10) #这里是Inception_A类,与本问题无关
self.inception2 = Inception_A(20)
self.mp = torch.nn.MaxPool2d(2)
def forward(self,x):
inside_batch = x.size(0)
x = self.conv1(x)
x = F.relu(x)
x = self.mp(x)
x = self.inception1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.inception2(x)
x = self.mp(x)
x = x.view(inside_batch,-1)
"""
这里之所以这样写是,不想计算x.size(1)真实数据,如果把全连接层写在__init__中要计算一下,就是这样出错了
"""
fc = torch.nn.Linear(x.size(1),10)
x = self.fc(x) #fc 即full connect
return x
正确代码
class Net(torch.nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5)
self.conv2 = torch.nn.Conv2d(88,20,kernel_size=5)
self.inception1 = Inception_A(10)
self.inception2 = Inception_A(20)
self.mp = torch.nn.MaxPool2d(2)
"""把下面这一句写上就好了,也能用gpu了,训练损失刚刚的下降"""
self.fc = torch.nn.Linear(1408,10)
# self.cuda()
def forward(self,x):
# self.cuda()
inside_batch = x.size(0)
x = self.conv1(x)
x = F.relu(x)
x = self.mp(x)
x = self.inception1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.inception2(x)
x = self.mp(x)
x = x.view(inside_batch,-1)
x = self.fc(x)
return x