##实现多输入通道互相关运算
import torch
from d2l import torch as d2l
def corr2d_multi_in(X,K):
#用corr2d这个函数,假设X和k都是3d的,把X和k打包zip起来
return sum(d2l.corr2d(x,k) for x,k in zip (X,K))
##验证互相关运算的输出
X = torch.tensor([[[0.0,1.0,2.0],[3.0,4.0,5.0],[6.0,7.0,8.0]],
[[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]])
K = torch.tensor([[[0.0,1.0],[2.0,3.0]],[[1.0,2.0],[3.0,4.0]]])
print(corr2d_multi_in(X,K))
'''
tensor([[ 56., 72.],
[104., 120.]])
'''
##计算多个通道的输出的互相关函数
def corr2d_multi_in_out(X,K):
#X是3-d的,K是4-d的,对每个输出通道的K,拿出一个小k,用小k和完整的输入X作多通道的互相关运算
#最后新建一个维度,把所有的东西stack起来,一个个堆积起来
return torch.stack([corr2d_multi_in(X,K) for k in K], 0 )
#设定K,创建3个输出通道,K, K+1,K+2,最后在0的位置上stack起来
K = torch.stack((K,K +1, K + 2),0)
print('k-shape:',K.shape)
#k-shape: torch.Size([3, 2, 2, 2])——意思是输出通道是3,输入是2,最后两个表示高和宽都是2,
#print(corr2d_multi_in_out(X,K))
#有3个卷积核,每个卷积核有2个通道,每个通道都是2*2的矩阵
#ValueError: too many values to unpack (expected 2)
##1*1卷积 = 全连接
def corr2d_multi_in_out_1x1(X,K):
c_i,h,w = X.shape
c_o = K.shape[0] #K的第一个shape
X = X.reshape((c_i,h*w))
K = K.reshape((c_o,c_i))
Y = torch.matmul(K,X) #调用矩阵乘法K*X
return Y.reshape((c_o,h,w))
X = torch.normal(0,1,(3,3,3,))
K = torch.normal(0,1,(2,3,1,1))
Y1 = corr2d_multi_in_out_1x1(X,K)
Y2 = corr2d_multi_in_out(X, K)
assert float(torch.abs(Y1-Y2).sum()) < 1e-6
一直报错
#ValueError: too many values to unpack (expected 2)