import torch
from torch import nn
import cv2
from torchvision.transforms import ToTensor, ToPILImage
import numpy as np
class Linear(nn.Module):
def __init__(self, in_features, out_features):
super(Linear, self).__init__()
self.w = nn.Parameter(torch.randn(in_features, out_features))
self.b = nn.Parameter(torch.randn(out_features))
# x(batch, in_features) w(in_features, out_feature)
# 注意shape不够的只能向前补
def forward(self, x):
x = x.mm(self.w)
# expand_as 类似广播机制
return x + self.b.expand_as(x)
layer = Linear(4, 3)
input = torch.rand(8, 4)
output = layer(input)
print(output)
class Perception(nn.Module):
def __init__(self, in_features, hidden_features, out_features):
super(Perception, self).__init__()
self.layer1 = Linear(in_features, hidden_features)
self.layer2 = Linear(hidden_features, out_features)
# 前馈函数负责计算 不应包含定义额外的paramters
def forward(self, x):
x = self.layer1(x)
x = torch.sigmoid(x)
return self.layer2(x)
lena = cv2.imread("Lena.png", flags=0)
# 归一化到0-1之间 的tensor 利用unsqueeze 增加一个batchsize的维度
lena = lena[np.newaxis, np.newaxis, :, :]
print(lena.shape)
lena = torch.tensor(lena, dtype=torch.float)
kernel = torch.ones(3, 3)/-9
kernel[1, 1] = 1
conv = nn.Conv2d(1, 1, (3, 3), 1, padding=1, bias=False)
# tensor的data 是个numpy 修改其值可修改tensor的值
conv.weight.data = kernel.view(1, 1, 3, 3)
out = conv(lena).squeeze().detach().numpy()
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.list = [nn.Linear(3, 4), nn.ReLU()]
self.module_list = nn.ModuleList
注意 : (4, 2) 和(2)可以 但 (2, 4)和(2)不行 是逐个比较维数 维度不等时前面补1