基于pytorch的四种天气分类含Dropout层和BN层

import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
import torchvision
import time
import os
import shutil
from torchvision import datasets,transforms
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split

start=time.time()

base_dir = r'D:/pycharmworkspace/ISLR-master/fourweather'
#如果base_dir不是文件目录
if not os.path.isdir(base_dir):
    #就创建base_dir这个目录
    os.mkdir(base_dir)
    #在base_dir目录基础上再建train目录,下同
    train_dir = os.path.join(base_dir,'train')
    test_dir = os.path.join(base_dir, 'test')
    os.mkdir(train_dir)
    os.mkdir(test_dir)

specises=['cloudy','rain','shine','sunrise']
#继续循环创建目录
for train_or_test in ['train','test']:
        for spc in specises:
            exist = os.path.join(base_dir,train_or_test,spc)
            if not os.path.isdir(exist):
                os.mkdir(os.path.join(base_dir,train_or_test,spc))

#图片原始存放目录
image_dir = r'D:/pycharmworkspace/ISLR-master/4w'
#os.listdir(image_dir)会把图片列举出来
#利用enumerate则i为0 img为第一张图片,i为1 img为第二张图片
for i,img in enumerate(os.listdir(image_dir)):
    for spec in specises:
        #字符串判断
        if spec in img:
            s = os.path.join(image_dir,img)
            #4/5数据分到train目录
            if i%5 == 0:
                d = os.path.join(base_dir,'test',spec,img)
            #1/5数据分到test目录
            else:
                d = os.path.join(base_dir, 'train',spec,img)
            #将数据从s路径拷贝到d路径
            shutil.copy(s,d)
#分割线----------------------以上是数据移动的内容---------------------------

#将所有变换都以列表形式放在Compose里面
transformation = 
  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是一个符合要求的VIT五分类网络: ```python import torch import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): def __init__(self, in_features, hidden_features, out_features): super().__init__() self.fc1 = nn.Linear(in_features, hidden_features) self.fc2 = nn.Linear(hidden_features, out_features) def forward(self, x): x = F.relu(self.fc1(x)) x = F.dropout(x, p=0.1, training=self.training) x = self.fc2(x) return x class Attention(nn.Module): def __init__(self, dim, heads=8, dropout=0.1): super().__init__() self.heads = heads self.scale = dim ** -0.5 self.to_qkv = nn.Linear(dim, dim * 3, bias=False) self.to_out = nn.Linear(dim, dim) self.dropout = nn.Dropout(dropout) def forward(self, x): b, n, _, h = *x.shape, self.heads qkv = self.to_qkv(x).chunk(3, dim=-1) q, k, v = map(lambda t: t.reshape(b, n, h, -1).transpose(1, 2), qkv) dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale attn = dots.softmax(dim=-1) attn = self.dropout(attn) out = torch.einsum('bhij,bhjd->bhid', attn, v) out = out.transpose(1, 2).reshape(b, n, -1) out = self.to_out(out) out = self.dropout(out) return out class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = nn.LayerNorm(dim) self.fn = fn def forward(self, x): return self.fn(self.norm(x)) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout=0.1): super().__init__() self.net = nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class CBAM(nn.Module): def __init__(self, in_features, reduction_ratio=16): super().__init__() self.in_features = in_features self.reduction_ratio = reduction_ratio self.avg_pool = nn.AdaptiveAvgPool2d((1,1)) self.max_pool = nn.AdaptiveMaxPool2d((1,1)) self.fc1 = nn.Linear(in_features, in_features // reduction_ratio) self.relu = nn.ReLU() self.fc2 = nn.Linear(in_features // reduction_ratio, in_features) self.sigmoid = nn.Sigmoid() def forward(self, x): b, c, _, _ = x.size() avg_out = self.avg_pool(x).view(b, c) avg_out = self.fc2(self.relu(self.fc1(avg_out))) max_out = self.max_pool(x).view(b, c) max_out = self.fc2(self.relu(self.fc1(max_out))) out = avg_out + max_out out = self.sigmoid(out).view(b, c, 1, 1) return x * out class VIT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, dropout): super().__init__() assert image_size % patch_size == 0, 'image size must be divisible by patch size' num_patches = (image_size // patch_size) ** 2 patch_dim = 3 * patch_size ** 2 self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.patch_to_embedding = nn.Linear(patch_dim, dim) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(dropout) self.transformer = nn.ModuleList([ Residual(PreNorm(dim, Attention(dim, heads=heads, dropout=dropout))), Residual(PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))) ]) self.to_cls_token = nn.Identity() self.mlp_head = MLP(dim, hidden_features=512, out_features=num_classes) self.cbam = CBAM(in_features=dim) def forward(self, x): b, _, h, w = x.shape p = self.patch_size # assert input size is divisible by patch size assert h % p == 0 and w % p == 0, f'image size {h}x{w} not divisible by patch size {p}' # convert image to patches x = x.reshape(b, 3, h//p, p, w//p, p) x = x.permute(0, 2, 4, 1, 3, 5) x = x.reshape(b, -1, 3 * p ** 2) # add cls token and convert patches to embeddings cls_token = self.cls_token.expand(b, -1, -1) x = torch.cat((cls_token, x), dim=1) x = self.patch_to_embedding(x) # add positional embedding x = x + self.pos_embedding # apply transformer for transformer_block in self.transformer: x = transformer_block(x) # extract cls token and apply MLP head x = self.to_cls_token(x[:, 0]) x = self.cbam(x.permute(0, 2, 1).reshape(b, -1, h//p, w//p)).permute(0, 2, 3, 1).reshape(b, -1) x = self.dropout(x) x = self.mlp_head(x) return x ``` 使用方式: ```python import torch model = VIT(image_size=128, patch_size=16, num_classes=5, dim=256, depth=6, heads=8, mlp_dim=512, dropout=0.1) input_tensor = torch.randn(64, 3, 128, 128) output_tensor = model(input_tensor) print(output_tensor.shape) # 输出 (64, 5) ```
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值