深度学习编程代码笔记VIT+MAE+RESNET18+数据集处理等

提示:代码笔记,都是自己跑过的代码,内容还在完善当中。若有问题欢迎大家评论指出


数据集的划分和处理

图片数据集,先将所有数据的路径及标签写入一个总文本中

import os
import numpy as np
import pandas as pd

def generate(dir,label,listText):
    files = os.listdir(dir) #os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表。
    print('****************')
    print('input :', dir)
    print('start...')
    target = ''
    i = 0
    for file in files:  #遍历文件夹中的文件
        fileType = os.path.split(file) #os.path.split()返回文件的路径和文件名,【0】为路径,【1】为文件名
        if fileType[1] == '.txt':  #若文件名的后缀为txt,则继续遍历循环,否则退出循环
            continue
        name = r_path + folder + '/' +file  #name 为文件路径和文件名+空格+label+换行
        name = name + ' ' + str(label) + '\n'
        i += 1
        listText.write(name)  #在创建的txt文件中写入name
    print('down!')
    print('****************')
 
r_path = '/root/Dataset/imagenet100/'  # 这里是你的图片路径
out_path='/root/Dataset/out/all_data_list.txt'
 
if __name__ == '__main__':  #主函数
    folderlist = os.listdir(r_path)# 列举文件夹
    listText = open(out_path, 'a+')  #创建并打开一个txt文件,a+表示打开一个文件并追加内容
    listText.truncate(0)#清空txt文件里的内容
    label=0#标签
    for folder in folderlist:  #遍历文件夹中的文件夹(若engagement文件夹中存在txt或py文件,则后面会报错)
        generate(os.path.join(r_path, folder),label,listText)#调用generate函数,函数中的参数为:(图片路径+文件夹名,标签号)
        label=label+1
    listText.close() #关闭txt文件

从总文本中随机分割数据生成训练集和测试集

import os
import random
# 划分比例,训练集 : 验证集 = 8 : 2
split_rate = 0.2
 
class SplitFiles():
    """按行分割文件"""
 
    def __init__(self, file_name):
        """初始化要分割的源文件名和分割后的文件行数"""
        self.file_name = file_name
 
    def split_file(self):
        if self.file_name and os.path.exists(self.file_name):
            try:
                with open(self.file_name) as f:  # 使用with读文件
                    # temp_count = 1
                    file = f.readlines()
                    count = len(file)
                    eval_index = random.sample(file, k=int(count * split_rate))  # 从images列表中随机抽取 k 个图像名称
                    for index,image_path in enumerate(file):
                        if image_path in eval_index:
                            self.write_file('test', image_path)
                        else:
                            self.write_file('train', image_path)
                        # temp_count += 1
 
            except IOError as err:
                print(err)
        else:
            print("%s is not a validate file" % self.file_name)
 
    def get_part_file_name(self, part_name):
        """"获取分割后的文件名称:在源文件相同目录下建立临时文件夹temp_part_file,然后将分割后的文件放到该路径下"""
        temp_path = os.path.dirname(self.file_name)  # 获取文件的路径(不含文件名)
        file_folder = temp_path
        if not os.path.exists(file_folder):  # 如果临时目录不存在则创建
            os.makedirs(file_folder)
        part_file_name = file_folder + "/" + str(part_name) + "_list_1.txt"
        return part_file_name
 
    def write_file(self, part_num, line):
        """将按行分割后的内容写入相应的分割文件中"""
        part_file_name = self.get_part_file_name(part_num)
        try:
            with open(part_file_name, "a") as part_file:
                part_file.writelines(line)
        except IOError as err:
            print(err) 
 
if __name__ == "__main__":
    file = SplitFiles(r'/root/Dataset/out/all_data_list.txt')
    file.split_file()
 

加载数据集

import os
import numpy as np
import torch
from torchvision import transforms
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
# 我们读取图片的根目录, 在根目录下有所有图片的txt文件, 拿到txt文件后, 先读取txt文件, 之后遍历txt文件中的每一行, 首先去除掉尾部的换行符, 在以空格切分,前半部分是图片名称, 后半部分是图片标签, 当图片名称和根目录结合,就得到了我们的图片路径
class MyDataset(Dataset):
    def __init__(self, img_path, transform=None):
        super(MyDataset, self).__init__()
        self.root = img_path
        # self.txt_root = self.root + 'all_list.txt'
        f = open(self.root, 'r')
        data = f.readlines()
 
        imgs = []
        labels = []
        # label_1,label_2,label_3 = [],[],[]
        for line in data:
            line = line.rstrip()
            word = line.split()
            imgs.append(os.path.join(self.root, word[1],word[0]))
            labels.append([word[1]])
        self.img = imgs
        self.label = labels
        self.transform = transform
 
    def __len__(self):
        return len(self.label)
        return len(self.img)
 
    def __getitem__(self, item):
        img = self.img[item]
        label = self.label[item]
        img = Image.open(img).convert('RGB')
 
        # 此时img是PIL.Image类型   label是str类型
 
        if transforms is not None:
            img = self.transform(img)
 
        label = np.array(label).astype(np.int32)
        label = torch.from_numpy(label)
        return img, label
 

root_train = r'D:/vscode/data/imagenet100/out/train_list_1.txt'
root_test = r'D:/vscode/data/imagenet100/out/test_list_1.txt'

normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
 
train_transform = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    normalize])
val_transform = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    normalize])

train_dataset = MyDataset(root_train,transform=transforms.ToTensor())
val_dataset = MyDataset(root_test,transform=transforms.ToTensor())
 
train_dataloader = DataLoader(dataset=train_dataset,batch_size=16,shuffle=True)
val_dataloader = DataLoader(dataset=val_dataset,batch_size=16,shuffle=True)

Lenet

#使用lenet实现对图片分类即测试
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
from torchvision.models import AlexNet
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet,self).__init__()
        # 定义模型
        self.features = nn.Sequential(
            nn.Conv2d(in_channels=1,out_channels=6,kernel_size=(5,5),stride=1,padding=2),          
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2,stride=2),
            nn.Conv2d(in_channels=6,out_channels=16,kernel_size=(5,5),stride=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2,stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Linear(in_features=400, out_features=120),#输入为400是因为前面的输出是25*4*4为400,输出是120是一个超参数
            nn.ReLU(),
            nn.Linear(in_features=120, out_features=84),
            nn.ReLU(),
            nn.Linear(in_features=84, out_features=10)#最后的输出和总分类数相等,假设是手写数字识别,则输出为10,若为手写英文字母识别则为26
        )
    def forward(self,x):
        # 定义前向算法
        x = self.features(x)#进入卷积层
        # print(x.shape)
        x = torch.flatten(x,1)#展平操作,将前面的输出展平为一维向量再进入全连接层
        # print(x.shape)
        result = self.classifier(x)#进入全连接层
        return result
# 下载数据集或者加载数据集
train_dataset = MNIST(root='../data/',train=True,transform=transforms.ToTensor(),download=True)
test_dataset = MNIST(root='../data/',train=False,transform=transforms.ToTensor())
# 加载数据: 分批次,每批256个数据
batch_size = 32
train_loader = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_loader = DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=False)
# start time
start_time = time.time()
# 创建模型
model = LeNet()
#模型放入cpu中
#model.cuda()
#model = torch.load("../save_model/mynn_30.pth", map_location=torch.device('cpu'))
# 模型放入GPU中
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# 定义损失函数
loss_func = nn.CrossEntropyLoss()
loss_list = [] # 用来存储损失值
# 定义优化器
SGD = optim.SGD(params=model.parameters(),lr=0.001,momentum=0.9)
# 训练指定次数
for i in range(1):
    loss_temp = 0 # 定义一个损失值,用来打印查看
    # 其中j是迭代次数,data和label都是批量的,每批32个
    for j,(batch_data,batch_label) in enumerate(train_loader):
        # 启用GPU
        batch_data,batch_label = batch_data.cuda(),batch_label.cuda()
        # 清空梯度
        SGD.zero_grad()
        # 模型训练
        prediction = model(batch_data)
        # 计算损失
        loss = loss_func(prediction,batch_label)#梯度下降是损失函数对模型中可学习的参数求梯度,损失函数是通过可学习参数计算而来
        loss_temp += loss
        # BP算法
        loss.backward()#计算梯度
        # 更新梯度
        SGD.step()#更新梯度(梯度下降)
        if (j + 1) % 200 == 0:
            print('第%d次训练,第%d批次,损失值: %.3f' % (i + 1, j + 1, loss_temp / 200))
            loss_temp = 0
# 测试
correct = 0
for batch_data,batch_label in test_loader:
    batch_data, batch_label = batch_data.cuda(), batch_label.cuda()
    prediction = model(batch_data)#输出是32行每行10个数,32表示32个样本,10个数表示这个样本属于0~10的可能性
    predicted = torch.max(prediction.data, 1)[1]#对prediction按行取最大值,并返回索引,返回值即是预测值    
    correct += (predicted == batch_label).sum()
print('准确率: %.2f %%' % (100 * correct / 10000)) # 因为总共10000个测试数据

Transformer

import math
import pandas as pd
import torch
from torch import nn
from d2l import torch as d2l

class PositionWiseFFN(nn.Module):
    """前馈网络单隐藏层的mlp"""
    def __init__(self, ffn_num_input, ffn_num_hiddens, ffn_num_outputs,
                 **kwargs):
        super(PositionWiseFFN, self).__init__(**kwargs)
        self.dense1 = nn.Linear(ffn_num_input, ffn_num_hiddens)#全连接层
        self.relu = nn.ReLU()
        self.dense2 = nn.Linear(ffn_num_hiddens, ffn_num_outputs)#全连接层
    def forward(self, X):
        return self.dense2(self.relu(self.dense1(X)))

class AddNorm(nn.Module):
    """残差连接后进行层规范化"""
    def __init__(self, normalized_shape, dropout, **kwargs):
        super(AddNorm, self).__init__(**kwargs)
        self.dropout = nn.Dropout(dropout)#即将进入mlp层,对mlp使用丢弃法
        self.ln = nn.LayerNorm(normalized_shape)#normalized_shape:即将归一化的数据的维度
    def forward(self, X, Y):
        return self.ln(self.dropout(Y) + X)#ln是层归一化操作,加x是残差连接操作,self.dropout(Y)是这一层的输出

class EncoderBlock(nn.Module):
    """transformer编码器块"""
    def __init__(self, key_size, query_size, value_size, num_hiddens,
                 norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
                 dropout, use_bias=False, **kwargs):
        super(EncoderBlock, self).__init__(**kwargs)
        self.attention = d2l.MultiHeadAttention(
            key_size, query_size, value_size, num_hiddens, num_heads, dropout,
            use_bias)
        nn.MultiheadAttention()
        self.addnorm1 = AddNorm(norm_shape, dropout)        
        self.ffn = PositionWiseFFN(
            ffn_num_input, ffn_num_hiddens, num_hiddens)
        self.addnorm2 = AddNorm(norm_shape, dropout)
    def forward(self, X, valid_lens): #valid_lens:tensor([batch_size]), 序列中有效长度 ,批量大小为64,即有64个词,valid_lens记录每个词的有效长度,对于无效的通过softmax后输出为0 
        Y = self.addnorm1(X, self.attention(X, X, X, valid_lens))
        return self.addnorm2(Y, self.ffn(Y))

class TransformerEncoder(d2l.Encoder):#在这里传参数是?
    """transformer编码器,在transformer中有六个编码器块和六个解码器块"""
    def __init__(self, vocab_size, key_size, query_size, value_size,
                 num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
                 num_heads, num_layers, dropout, use_bias=False, **kwargs):
        super(TransformerEncoder, self).__init__(**kwargs)
        self.num_hiddens = num_hiddens#隐藏层大小
        self.embedding = nn.Embedding(vocab_size, num_hiddens)#embedding操作将输入文本转为数字表示
        self.pos_encoding = d2l.PositionalEncoding(num_hiddens, dropout)#位置编码       
        self.blks = nn.Sequential()        
        for i in range(num_layers):#num_layers表示有多少个encoder块
            self.blks.add_module("block"+str(i),#每个encoder的参数存起来
                EncoderBlock(key_size, query_size, value_size, num_hiddens,
                             norm_shape, ffn_num_input, ffn_num_hiddens,
                             num_heads, dropout, use_bias))
    def forward(self, X, valid_lens, *args):
        #valid_lens:tensor([batch_size]), 序列中有效长度 ,批量大小为64,即有64个词,valid_lens记录每个词的有效长度,
        #每个词是一个向量,每个词的有效长度若为a,则这个词向量的第a个值后的所有值将设置为一个非常大的负数
        # 因为位置编码值在-1和1之间,
        # 因此嵌入值乘以嵌入维度的平方根进行缩放,
        # 然后再与位置编码相加。        
        X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))#对于embedding的输出我们处理一下使得值的大小和位置编码的值大小差不多
        # 方便后面画图,记录一下权重
        self.attention_weights = [None] * len(self.blks)
        for i, blk in enumerate(self.blks):#将输入一层一层丢进encoder块
            X = blk(X, valid_lens)#将X丢进encoder块后的输出再赋值给X仍进下一个encoder块
            self.attention_weights[#记录权重?
                i] = blk.attention.attention.attention_weights
        return X

class DecoderBlock(nn.Module):
    """解码器中第i个块"""
    def __init__(self, key_size, query_size, value_size, num_hiddens,
                 norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
                 dropout, i, **kwargs):
        super(DecoderBlock, self).__init__(**kwargs)
        self.i = i
        self.attention1 = d2l.MultiHeadAttention(#这个应该是一个带掩码的多头自注意力
            key_size, query_size, value_size, num_hiddens, num_heads, dropout)
        self.addnorm1 = AddNorm(norm_shape, dropout)
        self.attention2 = d2l.MultiHeadAttention(
            key_size, query_size, value_size, num_hiddens, num_heads, dropout)
        self.addnorm2 = AddNorm(norm_shape, dropout)
        self.ffn = PositionWiseFFN(ffn_num_input, ffn_num_hiddens,
                                   num_hiddens)
        self.addnorm3 = AddNorm(norm_shape, dropout)
    def forward(self, X, state):#参数state里面有三个东西,第一个是encoder的输出,第二个是encoder的valid_lens,第三个是存储key和value
        enc_outputs, enc_valid_lens = state[0], state[1]#encoder的valid_lens是什么?当前是第几个词元?
        # 训练阶段,输出序列的所有词元都在同一时间处理,
        # 因此state[2][self.i]初始化为None。
        # 预测阶段,输出序列是通过词元一个接着一个解码的,
        # 因此state[2][self.i]包含着直到当前时间步第i个块解码的输出表示
        if state[2][self.i] is None:#如果没有过去的key,则这个key就是x本身
            key_values = X
        else:
            key_values = torch.cat((state[2][self.i], X), axis=1)#如果已经存在key-value,那么key和value是之前的decoder的输出
        state[2][self.i] = key_values
        if self.training:
            batch_size, num_steps, _ = X.shape
            # dec_valid_lens的开头:(batch_size,num_steps),
            # 其中每一行是[1,2,...,num_steps]
            dec_valid_lens = torch.arange(#训练时要把后面的遮住
                1, num_steps + 1, device=X.device).repeat(batch_size, 1)
        else:
            dec_valid_lens = None
        # 自注意力
        X2 = self.attention1(X, key_values, key_values, dec_valid_lens)#第二个自注意力的输入有两个key是encoder的输出
        Y = self.addnorm1(X, X2)
        # 编码器-解码器注意力。
        # enc_outputs的开头:(batch_size,num_steps,num_hiddens)
        Y2 = self.attention2(Y, enc_outputs, enc_outputs, enc_valid_lens)
        Z = self.addnorm2(Y, Y2)
        return self.addnorm3(Z, self.ffn(Z)), state

class TransformerDecoder(d2l.AttentionDecoder):
    def __init__(self, vocab_size, key_size, query_size, value_size,
                 num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
                 num_heads, num_layers, dropout, **kwargs):
        super(TransformerDecoder, self).__init__(**kwargs)
        self.num_hiddens = num_hiddens
        self.num_layers = num_layers
        self.embedding = nn.Embedding(vocab_size, num_hiddens)
        self.pos_encoding = d2l.PositionalEncoding(num_hiddens, dropout)
        self.blks = nn.Sequential()
        for i in range(num_layers):
            self.blks.add_module("block"+str(i),
                DecoderBlock(key_size, query_size, value_size, num_hiddens,
                             norm_shape, ffn_num_input, ffn_num_hiddens,
                             num_heads, dropout, i))
        self.dense = nn.Linear(num_hiddens, vocab_size)#最后加一个线形层
    def init_state(self, enc_outputs, enc_valid_lens, *args):
        return [enc_outputs, enc_valid_lens, [None] * self.num_layers]
    def forward(self, X, state):
        X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
        self._attention_weights = [[None] * len(self.blks) for _ in range (2)]
        for i, blk in enumerate(self.blks):
            X, state = blk(X, state)
            # 解码器自注意力权重
            self._attention_weights[0][
                i] = blk.attention1.attention.attention_weights
            # “编码器-解码器”自注意力权重
            self._attention_weights[1][
                i] = blk.attention2.attention.attention_weights
        return self.dense(X), state
    @property
    def attention_weights(self):
        return self._attention_weights

#训练 
num_hiddens, num_layers, dropout, batch_size, num_steps = 32, 2, 0.1, 64, 10 #注意力中隐藏层大小,编码器块的数量,dropout概率,批量大小,句子大小
lr, num_epochs, device = 0.005, 200, d2l.try_gpu()  #学习率,训练次数,设备
ffn_num_input, ffn_num_hiddens, num_heads = 32, 64, 4 #mlp层的输入大小,隐藏层大小,自注意力头个数
key_size, query_size, value_size = 32, 32, 32
norm_shape = [32]#即将归一化的数据的维度
train_iter, src_vocab, tgt_vocab = d2l.load_data_nmt(batch_size, num_steps)#加载数据集,批量大小(键值对)batch_size,句子大小控制在num_steps,即每次输入的序列的长度为num_steps
#num_steps的意思是将所有样本分10次输入吧?
encoder = TransformerEncoder(
    len(src_vocab), key_size, query_size, value_size, num_hiddens,
    norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
    num_layers, dropout)
decoder = TransformerDecoder(
    len(tgt_vocab), key_size, query_size, value_size, num_hiddens,
    norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
    num_layers, dropout)
net = d2l.EncoderDecoder(encoder, decoder)
d2l.train_seq2seq(net, train_iter, lr, num_epochs, tgt_vocab, device)#序列到序列模式
#预测
engs = ['go .', "i lost .", 'he\'s calm .', 'i\'m home .']
fras = ['va !', 'j\'ai perdu .', 'il est calme .', 'je suis chez moi .']
for eng, fra in zip(engs, fras):
    translation, dec_attention_weight_seq = d2l.predict_seq2seq(
        net, eng, src_vocab, tgt_vocab, num_steps, device, True)
    print(f'{eng} => {translation}, ',
          f'bleu {d2l.bleu(translation, fra, k=2):.3f}')

VisualTransformer

import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torchvision.transforms import Compose, Resize, ToTensor
from torch import optim
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST      #图片大小为28*28
import torchvision.transforms as transforms
import time
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
class PatchEmbedding(nn.Module):
     def __init__(self, in_channels: int = 1, patch_size: int = 14, emb_size: int = 196,img_size:int=28):
         self.patch_size = patch_size
         super().__init__()
         self.projection = nn.Sequential(
             nn.Conv2d(in_channels, emb_size, kernel_size=patch_size, stride=patch_size),
             Rearrange('b e (h) (w) -> b (h w) e'),
         ) # this breaks down the image in s1xs2 patches, and then flat them         
         self.cls_token = nn.Parameter(torch.randn(1,1, emb_size))
         self.positions = nn.Parameter(torch.randn((img_size // patch_size) **2 + 1, emb_size))         
     def forward(self, x: Tensor) -> Tensor:
         b, _, _, _ = x.shape
         x = self.projection(x)
         cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)
         x = torch.cat([cls_tokens, x], dim=1) #prepending the cls token
         x += self.positions
         return x
class MultiHeadAttention(nn.Module):
     def __init__(self, emb_size: int = 196, num_heads: int = 4, dropout: float = 0):
         super().__init__()
         self.emb_size = emb_size
         self.num_heads = num_heads
         self.qkv = nn.Linear(emb_size, emb_size * 3) # queries, keys and values matrix
         self.att_drop = nn.Dropout(dropout)
         self.projection = nn.Linear(emb_size, emb_size)        
     def forward(self, x : Tensor, mask: Tensor = None) -> Tensor:
         # split keys, queries and values in num_heads.为什么要将588分割成24份?588=196*3  24=3*8
         qkv = rearrange(self.qkv(x), "b n (h d qkv) -> (qkv) b h n d", h=self.num_heads, qkv=3)
         queries, keys, values = qkv[0], qkv[1], qkv[2]
         # sum up over the last axis
         energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys) # batch, num_heads, query_len, key_len        
         if mask is not None:
             fill_value = torch.finfo(torch.float32).min
             energy.mask_fill(~mask, fill_value)             
         scaling = self.emb_size ** (1/2)        
         att = F.softmax(energy, dim=-1) / scaling
         att = self.att_drop(att)
         out = torch.einsum('bhal, bhlv -> bhav ', att, values) # sum over the third axis
         out = rearrange(out, "b h n d -> b n (h d)")
         out = self.projection(out)         
         return out
class ResidualAdd(nn.Module):
     def __init__(self, fn):
         super().__init__()
         self.fn = fn         
     def forward(self, x, **kwargs):
         res = x
         x = self.fn(x, **kwargs)
         x += res
         return x
class FeedForwardBlock(nn.Sequential):
     def __init__(self, emb_size: int, L: int = 4, drop_p: float = 0.):
         super().__init__(
             nn.Linear(emb_size, L * emb_size),
             nn.GELU(),
             nn.Dropout(drop_p),
             nn.Linear(L * emb_size, emb_size),
         )
class TransformerEncoderBlock(nn.Sequential):
     def __init__(self, emb_size: int = 196, drop_p: float = 0., forward_expansion: int = 4,
                  forward_drop_p: float = 0.,
                  **kwargs):                  
         super().__init__(
             ResidualAdd(nn.Sequential(
                 nn.LayerNorm(emb_size),
                 MultiHeadAttention(emb_size, **kwargs),
                 nn.Dropout(drop_p)
             )),
             ResidualAdd(nn.Sequential(
                 nn.LayerNorm(emb_size),
                 FeedForwardBlock(
                     emb_size, L=forward_expansion, drop_p=forward_drop_p),
                 nn.Dropout(drop_p)
             )
             ))
class TransformerEncoder(nn.Sequential):
     def __init__(self, depth: int = 12, **kwargs):
         super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)])
class ClassificationHead(nn.Sequential):
     def __init__(self, emb_size: int = 196, n_classes: int = 10):
         super().__init__(
             Reduce('b n e -> b e', reduction='mean'),
             nn.LayerNorm(emb_size), 
             nn.Linear(emb_size, n_classes))
class ViT(nn.Sequential):
     def __init__(self,     
                 in_channels: int = 1,
                 patch_size: int = 14,
                 emb_size: int = 196,
                 img_size: int = 28,
                 depth: int = 12,
                 n_classes: int = 10,
                 **kwargs):
         super().__init__(
             PatchEmbedding(in_channels, patch_size, emb_size, img_size),
             TransformerEncoder(depth, emb_size=emb_size, **kwargs),
             ClassificationHead(emb_size, n_classes)
         )

MAE

mae的代码从github下载即可,当然也可以用我这里给出的带注释的版本,在手写数字识别数据集上预训练(图像修复)和微调(分类)

from functools import partial
import torch
import torch.nn as nn
from timm.models.vision_transformer import PatchEmbed, Block
from pos_embed import get_2d_sincos_pos_embed
from torchvision.datasets import MNIST
import time
import matplotlib.pyplot as plt
from torch import optim
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
import numpy as np
from util.misc import NativeScalerWithGradNormCount as NativeScaler
class MaskedAutoencoderViT(nn.Module):
    """ 
    MAE结构,主要包含两个部分构成,即encoder+decoder定义
    Masked Autoencoder with VisionTransformer backbone
    """
    def __init__(self, img_size=28, patch_size=4, in_chans=3,
                 embed_dim=128, depth=24, num_heads=4,
                 decoder_embed_dim=256, decoder_depth=8, decoder_num_heads=12,
                 mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False):
        super().__init__()
        # --------------------------------------------------------------------------
        # MAE encoder specifics
        self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
        #内部是卷积操作,核大小和步幅都和patchsize一样,用卷积提取一个patch的信息,后面再将得到的值掩蔽掉75%
        num_patches = self.patch_embed.num_patches
        self.patch_size=patch_size
        self.in_chans=in_chans
        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False)  # fixed sin-cos embedding
        self.blocks = nn.ModuleList([
            Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True,norm_layer=norm_layer)
            for i in range(depth)])
        self.norm = norm_layer(embed_dim)
        # --------------------------------------------------------------------------
        # --------------------------------------------------------------------------
        # MAE decoder specifics
        self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True)
        #经过一个线性层,这里将Linear定义好并赋值给decoder_embed,在使用decoder_embed时才传入输入
        #nn.Parameter将里面的tensor设置为可学习的参数,目的是想让某些变量在学习的过程中不断的修改其值以达到最优化。
        self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))#torch.zeros返回一个大小为(1,1,decoder_embed_dim)的全为0的tensor
        self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=False)  # fixed sin-cos embedding
        self.decoder_blocks = nn.ModuleList([
            Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True,  norm_layer=norm_layer)#qk_scale=None,
            for i in range(decoder_depth)])
        self.decoder_norm = norm_layer(decoder_embed_dim)
        self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size**2 * in_chans, bias=True) # decoder to patch    bias表示可学习的偏置
        # --------------------------------------------------------------------------
        self.norm_pix_loss = norm_pix_loss
        self.initialize_weights()
    def initialize_weights(self):
        # initialization
        # initialize (and freeze) pos_embed by sin-cos embedding
        pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
        self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
        decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
        self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))
        # initialize patch_embed like nn.Linear (instead of nn.Conv2d)
        w = self.patch_embed.proj.weight.data
        torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
        # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
        torch.nn.init.normal_(self.cls_token, std=.02)
        torch.nn.init.normal_(self.mask_token, std=.02)
        # initialize nn.Linear and nn.LayerNorm
        self.apply(self._init_weights)
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            # we use xavier_uniform following official JAX ViT:
            torch.nn.init.xavier_uniform_(m.weight)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
    def patchify(self, imgs):#输入32  1  28  28 将其分块为32*16*49
        """
        imgs: (N, 3, H, W)  (32  1  28  28)
        x: (N, L, patch_size**2 *3)
        """  
        '''
        d=self.patch_size*self.patch_size*self.in_chans
        a=nn.Conv2d(self.in_chans, d, kernel_size=self.patch_size, stride=self.patch_size,device="cuda:0")#
        x=a(imgs)
        x=rearrange(x,'b e (h) (w) -> b (h w) e')#输出为32*16*49    
        '''
        x=imgs
        if(x.shape[1]==1):  # 如果输入是1通道则转为3通道            
            x = np.array(x.cpu())        
            x = x.transpose((1, 0, 2, 3))             # array 转置        
            image = np.concatenate((x, x, x), axis=0)        
            image = image.transpose((1, 0, 2, 3))     # array 转置回来        
            image = torch.tensor(image)               # 将 numpy 数据格式转为 tensor
            x=image
        x=x.cuda()
        imgs=x
        p = self.patch_embed.patch_size[0]
        assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
        h = w = imgs.shape[2] // p
        x = imgs.reshape(shape=(imgs.shape[0], self.in_chans, h, p, w, p))#32*1*4*7*4*7
        x = torch.einsum('nchpwq->nhwpqc', x)#32*4*4*7*7*1
        x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * self.in_chans))#32*16*49
        return x
    def unpatchify(self, x):#patchify的逆过程,将序列恢复到图像。即序列N×196×768,恢复到RGB图像N×3×224×224
        """
        x: (N, L, patch_size**2 *3)
        imgs: (N, 3, H, W)
        """
        p = self.patch_embed.patch_size[0]
        h = w = int(x.shape[1]**.5)
        assert h * w == x.shape[1]
        x = x.reshape(shape=(x.shape[0], h, w, p, p, self.in_chans))#1表示1通道
        x = torch.einsum('nhwpqc->nchpwq', x)
        imgs = x.reshape(shape=(x.shape[0], self.in_chans, h * p, h * p))
        return imgs
    def random_masking(self, x, mask_ratio):
        """
        random_masking函数:执行随机采样。随机采样的过程是,首先通过均匀分布产生一个随机的数组,
        然后将之进行高低排序,选择小数值即1/4部分保留,余下的去掉,即洗牌操作,可实现图像块的随机遮挡。
        不是像素级的掩码,是按图像块掩码,假如你的图像分为4块,0.75掩码率则将某一块遮住
        Perform per-sample random masking by per-sample shuffling.
        Per-sample shuffling is done by argsort random noise.
        x: [N, L, D], sequence
        """        
        N, L, D = x.shape  # batch, length, dim
        len_keep = int(L * (1 - mask_ratio))   #4(1-0.75)=4*0.25=1    
        noise = torch.rand(N, L, device=x.device)  # noise in [0, 1],每个图像块对应一个0~1的噪声值,   
        # sort noise for each sample
        ids_shuffle = torch.argsort(noise, dim=1)  # ascend: small is keep, large is remove,对每行的噪声排序,对排好序的序列输出其在noise的下标        
        ids_restore = torch.argsort(ids_shuffle, dim=1)#再用argsort排一次序        
        # keep the first subset
        ids_keep = ids_shuffle[:, :len_keep]#统计每行第一个0前面的数的个数        
        x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))#ids_keep先增加一个维度32,1,1,然后在对应维度复制D份,结果是32,1,588        
        # generate the binary mask: 0 is keep, 1 is remove
        mask = torch.ones([N, L], device=x.device)        
        mask[:, :len_keep] = 0
        # unshuffle to get the binary mask
        mask = torch.gather(mask, dim=1, index=ids_restore)         
        return x_masked, mask, ids_restore#x_masked是被掩码后的x,mask是掩码
    def forward_encoder(self, x, mask_ratio):
        '''
        forward_encoder函数:一张完整的图片输入到encoder中,首先执行常见的patch_embedding,
        然后添加2D_sin_cos位置信息;再执行随机掩码,
        从代码中看,masked后的输出添加了class token和pos_embed。然后将class token与masked后的x合并起来,
        输入到一个堆叠的block栈中。最后需要进行Layer Norm归一化。
        '''
        if(x.shape[1]==1):  # 如果输入是1通道则转为3通道            
            x = np.array(x.cpu())        
            x = x.transpose((1, 0, 2, 3))             # array 转置        
            image = np.concatenate((x, x, x), axis=0)        
            image = image.transpose((1, 0, 2, 3))     # array 转置回来        
            image = torch.tensor(image)               # 将 numpy 数据格式转为 tensor
            x=image
        x=x.cuda()
        x = self.patch_embed(x)#32*16*588,对每个图像块用长为588的向量表示
        # add pos embed w/o cls token
        x = x + self.pos_embed[:, 1:, :]#加位置信息
        # masking: length -> length * mask_ratio
        x, mask, ids_restore = self.random_masking(x, mask_ratio)
        #对x进行掩码,输出x形状为32*4*588  mask形状为32*16用于记录掩码,ids_restore是剩下3/4部分的图像块对应的掩码id
        # append cls token
        cls_token = self.cls_token + self.pos_embed[:, :1, :]
        cls_tokens = cls_token.expand(x.shape[0], -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)#加入cls,结果为32*5*588
        # apply Transformer blocks
        for blk in self.blocks:
            x = blk(x)
        x = self.norm(x)
        return x, mask, ids_restore
    def forward_decoder(self, x, ids_restore):
        '''
        forward_decoder函数:输入到解码器中,包含两个部分,即encoded学习到的representation,
        和剩下3/4部分的图像块对应的掩码id。mask_token是个虚构的标记,文章中说是为了模仿NLP中的class_token而设计的一个虚构标记;
        然后将encoded的representation和mask_token通过tensor合并到一起,之后再添加class_token。这波操作,然后再添加decoder的位置信息,
        输入到一个浅层的transformer block中,最后除了layer norm外,还通过以全连接层映射到输出,并去掉class_token。不禁提问,为什么这么做?
        '''
        # embed tokens
        x = self.decoder_embed(x)#对x进行编码,输出32*5*392
        # append mask tokens to sequence
        mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)
        #mask_tokens就是将遮住的部分编码,初始为0(1*1*392)进入nn.Parameter中。repeat函数复制对应维度输出[32, 12, 192]       
        x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1)  # no cls token[32, 16, 392],将mask_tokens和x合并
        x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2]))  # unshuffle[32, 16, 392]
        x = torch.cat([x[:, :1, :], x_], dim=1)  # append cls token[32, 17, 392]
        x = x + self.decoder_pos_embed#[32, 17, 392]位置编码,但不训练
        # apply Transformer blocks
        for blk in self.decoder_blocks:
            x = blk(x)
        x = self.decoder_norm(x)#[32, 17, 392]
        # predictor projection
        x = self.decoder_pred(x)#线性层,改变维度到图像块大小[32, 17, 49]        
        x = x[:, 1:, :]#[32, 16, 392]# remove cls token
        return x
    def forward_loss(self, imgs, pred, mask):
        """
        forward_loss函数:输出是重构的向量,原始的图片可以通过patchify将图像数据转换成重构向量一样的维度。
        这里,原始图片即目标,需要像素值的归一化,首先取均值再求标准差。损失函数即均方误差,并且只在3/4部分上求误差
        imgs: [N, 3, H, W]
        pred: [N, L, p*p*3]
        mask: [N, L], 0 is keep, 1 is remove, 
        """
        target = self.patchify(imgs)  #32*16*49     
        if self.norm_pix_loss:
            mean = target.mean(dim=-1, keepdim=True)
            var = target.var(dim=-1, keepdim=True)
            target = (target - mean) / (var + 1.e-6)**.5         
        loss = (pred - target) ** 2
        loss = loss.mean(dim=-1)  # [N, L], mean loss per patch每个块的均方误差
        loss = (loss * mask).sum() / mask.sum()  # mean loss on removed patches
        return loss
    def forward(self, imgs, mask_ratio=0.75):
        latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio)  #latent为[32, 5, 588]        
        pred = self.forward_decoder(latent, ids_restore)  # [N, L, p*p*3]#输出[32, 16, 49]        
        loss = self.forward_loss(imgs, pred, mask)
        return loss,pred,mask
        #return pred
def mae_vit_base_patch16_dec512d8b(**kwargs):
    model = MaskedAutoencoderViT(#图像大小28,输入通道3,还要改一下patchify
        patch_size=14, embed_dim=588, depth=3, num_heads=6,
        decoder_embed_dim=392, decoder_depth=1, decoder_num_heads=4,
        #为什么embed_dim和decoder_embed_dim不等长,为在进入encoder前会进入线形层将长度改变,decoder_embed_dim=linner(embed_dim,decoder_embed_dim)
        mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
    return model
# set recommended archs
mae_vit_large_patch16 = mae_vit_base_patch16_dec512d8b  # decoder: 512 dim, 8 blocks

Resnet18

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import time
import numpy as np
import matplotlib.pyplot as plt
import LoadDataset
from torch import optim

def resnet18(pretrained=False, **kwargs):
    """构建一个ResNet-18模型

    参数:
        pretrained (bool): 若为True则返回在ImageNet上预训练的模型
    """
    model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
    if pretrained:
        model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
    return model



class ResNet(nn.Module):

    def __init__(self, block, layers, num_classes=100, zero_init_residual=False):
        """定义ResNet网络的结构
        
        参数:
            block (BasicBlock / Bottleneck): 残差块类型
            layers (list): 每一个stage的残差块的数目,长度为4
            num_classes (int): 类别数目
            zero_init_residual (bool): 若为True则将每个残差块的最后一个BN层初始化为零,
                这样残差分支从零开始每一个残差分支,每一个残差块表现的就像一个恒等映射,根据
                https://arxiv.org/abs/1706.02677这可以将模型的性能提升0.2~0.3%
        """
        super(ResNet, self).__init__()
        self.inplanes = 64  # 第一个残差块的输入通道数
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        
        # Stage1 ~ Stage4
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))  # GAP
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        # 网络参数初始化
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
    
    def forward(self, x):
        x = self.conv1(x)#输入是8*3*224*224,输出是8*64*112*112
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)#输出8*64*56*56
        #下面每层有两个block,只有第一个block的第一个卷积改变形状
        x = self.layer1(x)#没有改变形状
        x = self.layer2(x)#输出8*128*28*28
        x = self.layer3(x)#输出8*256*14*14
        x = self.layer4(x)#输出8*512*7*7

        x = self.avgpool(x)#输出8*512*1*1
        x = x.view(x.size(0), -1)
        x = self.fc(x)

        return x

    def _make_layer(self, block, planes, blocks, stride=1):
        """定义ResNet的一个Stage的结构
    
        参数:
            block (BasicBlock / Bottleneck): 残差块结构
            plane (int): 残差块中第一个卷积层的输出通道数
            bloacks (int): 当前Stage中的残差块的数目
            stride (int): 残差块中第一个卷积层的步长
        """
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=1, bias=False)


def conv1x1(in_planes, out_planes, stride=1):
    """1x1 convolution"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)


class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        """定义BasicBlock残差块类
        
        参数:
            inplanes (int): 输入的Feature Map的通道数
            planes (int): 第一个卷积层输出的Feature Map的通道数
            stride (int, optional): 第一个卷积层的步长
            downsample (nn.Sequential, optional): 旁路下采样的操作
        注意:
            残差块输出的Feature Map的通道数是planes*expansion
        """
        super(BasicBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        identity = x#输入是8*64*56*56

        out = self.conv1(x)#输出是8*64*56*56
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)

        return out

DCGAN

def weights_init_normal(m):
    classname = m.__class__.__name__
    if classname.find("Conv") != -1:
        torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find("BatchNorm2d") != -1:
        torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
        torch.nn.init.constant_(m.bias.data, 0.0)


class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()

        self.init_size = opt.img_size // 4#整除
        self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))

        self.conv_blocks = nn.Sequential(#输入32*128*8*8
            nn.BatchNorm2d(128),
            nn.Upsample(scale_factor=2),#上采样,将最后一维*2    输出?32*128*16*16
            nn.Conv2d(128, 128, 3, stride=1, padding=1),#输出32*128*16*16
            nn.BatchNorm2d(128, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Upsample(scale_factor=2),#32*128*32*32
            nn.Conv2d(128, 64, 3, stride=1, padding=1),#32*64*32*32
            nn.BatchNorm2d(64, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),#输出32*1*32*32
            nn.Tanh(),
        )

    def forward(self, z):
        out = self.l1(z)#输入是32*100,进过线性层为32*8192
        out = out.view(out.shape[0], 128, self.init_size, self.init_size)#相当于reshape,重新调整参数的维度为32*128*8*8
        img = self.conv_blocks(out)#输出32*1*32*32
        return img


class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()

        def discriminator_block(in_filters, out_filters, bn=True):
            block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
            if bn:
                block.append(nn.BatchNorm2d(out_filters, 0.8))
            return block

        self.model = nn.Sequential(
            *discriminator_block(1, 16, bn=False),
            *discriminator_block(16, 32),
            *discriminator_block(32, 64),
            *discriminator_block(64, 128),
        )     

        # The height and width of downsampled image
        ds_size = opt.img_size // 2 ** 4
        self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())

    def forward(self, img):
        out = self.model(img)#输入是32*1*32*32,输出是32*128*2*2
        out = out.view(out.shape[0], -1)#输出32*512
        validity = self.adv_layer(out)#输出32*1

        return validity

#Loss function
adversarial_loss = torch.nn.BCELoss()

#Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

if cuda:
    generator.cuda()
    discriminator.cuda()
    adversarial_loss.cuda()

#Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
#Configure data loader
os.makedirs("../data/", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
    datasets.MNIST(
        "../../data/mnist",
        train=True,
        download=True,
        transform=transforms.Compose(
            [transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
        ),
    ),
    batch_size=opt.batch_size,
    shuffle=True,
)
#Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))

Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
ctime=time.time()
loss_D=0
loss_G=0
print("start train %d epoch"%(opt.n_epochs))
for epoch in range(opt.n_epochs):
    for i, (imgs, _) in enumerate(dataloader):

        # Adversarial ground truths
        valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)#32*1全
        fake = Variable(Tensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)#32*1全0

        # Configure input
        real_imgs = Variable(imgs.type(Tensor))

        # -----------------
        #  Train Generator
        # -----------------

        optimizer_G.zero_grad()

        # Sample noise as generator input
        z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
        #输入的形状为什么是32*100呢
        # Generate a batch of images
        gen_imgs = generator(z)

        # Loss measures generator's ability to fool the discriminator
        g_loss = adversarial_loss(discriminator(gen_imgs), valid)#学习目标,让discriminator(gen_imgs)逼近0
        #discriminator(gen_imgs)等于0的意思是辨别器识别生成器的输出是一张假图片,而我们生成器的目的不是骗过辨别器吗?即让辨别器识别生成器的输出是一张真图片
        #回答上面问题:g_loss在上升,所以我们的loss函数是使得辨别器的结果逼近0的可能性越小越好
        g_loss.backward()
        optimizer_G.step()

        # ---------------------
        #  Train Discriminator
        # ---------------------

        optimizer_D.zero_grad()

        # Measure discriminator's ability to classify real from generated samples
        real_loss = adversarial_loss(discriminator(real_imgs), valid)#辨别真实图片,目的使辨别器输出为真
        fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)#辨别假图片,目的使辨别器输出为假
        d_loss = (real_loss + fake_loss) / 2#d_loss在下降

        d_loss.backward()
        optimizer_D.step()
        loss_D=loss_D+d_loss.item()
        loss_G=loss_G+g_loss.item()
        if (i + 1) % 125 == 0:
            #print( "[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item()))
            print(
                "[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
                % (epoch, opt.n_epochs, i, len(dataloader), loss_D/125, loss_G/125)
            )
            loss_D=0
            loss_G=0

        batches_done = epoch * len(dataloader) + i
        if batches_done % opt.sample_interval == 0:
            save_image(gen_imgs.data[:25], "outIMG/%d.png" % batches_done, nrow=5, normalize=True)

dtime=time.time()
print("训练用时:"+str(dtime-ctime))

forward函数详解

class A():   
 	def __init__(self, init_age):        
	 	super().__init__()       
	 	 print('我年龄是:',init_age)        
	 	 self.age = init_age     
 	 def __call__(self, added_age):                 
	 	 res = self.forward(added_age)        
	 	 return res     
 	 def forward(self, input_):        
	 	 print('forward 函数被调用了')                
	 	 return input_ + self.ageprint('对象初始化。。。。')
 a = A(10)  #初始化时只调用init函数
input_param = a(2)#这里只会调用call函数
 print("我现在的年龄是:", input_param)

上面代码的输出是
对象初始化。。。。
我年龄是: 10
forward 函数被调用了
我现在的年龄是: 12

模型的保存和加载

两种方式保存,分别对应两种保存后加载方式
方式一
torch.save(model.state_dict(),“保存路径.pth”)#保存参数
model.load_state_dict(torch.load(“保存路径.pth”,map_location=‘cpu’) )#获得参数
方式二
torch.save(model,“保存路径.pth”)#保存整个模型包括参数
model=torch.load(“保存路径.pth”,map_location=‘cpu’)#获得模型
模型参数的加载与保存
load_state_dict(state_dict, strict=True)#从 state_dict 中复制参数和缓冲区到 Module 及其子类中
#其中state_dict是包含参数和缓冲区的 Module 状态字典
state_dict():返回一个包含 Module 实例完整状态的字典 。包括参数和缓冲区,字典的键值是参数或缓冲区的名称

Optimizer解释

optimizer在模型训练时的固定搭配如下::
loss.backward()就是反向计算出各参数的梯度,然后optimizer.step()更新网络中的参数,optimizer.zero_grad()将这一轮的梯度清零,防止其影响下一轮的更新。
基本属性:eps: 学习率最小值,在动态更新学习率时,学习率最小不会小于该值。
weight_decay: 权值衰减。相当于对参数进行L2正则化(使模型复杂度尽可能低,防止过拟合),该值可以理解为正则化项的系数

调参技巧

model.parameters()#获得模型的所有参数
model.named_parameters()#获得模型的所有参数及名字
for k,v in model.named_parameters():#下面代码表示只训练模型中的head,即最后一层分类层
v.requires_grad = False
if k in [‘head.weight’, ‘head.bias’]:
v.requires_grad = True
for name, value in model.named_parameters():
print(name, value.requires_grad)#打印参数r
params = filter(lambda p: p.requires_grad, model.parameters())#把这个参数给优化器就可以了

warmup 的 epoch 数目设置在总 epoch 数的 5% 到 20% 之间比较合适

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值