ResNet 1D实现

2 篇文章 0 订阅
2 篇文章 0 订阅

实现残差-18,1D

1D实现,可做分类任务,使用鸢尾花数据集进行检验
ResNet1D.py

from torch.nn import functional as F
from torch import nn
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from sklearn import datasets
from common import train, test, LoadData, device
import torch
import pandas as pd
class Residual(nn.Module):
    def __init__(self, input_channels, num_channels,
                 use_1x1conv=False, strides=1):
        super().__init__()
        self.conv1 = nn.Conv1d(input_channels, num_channels,
                               kernel_size=3, padding=1, stride=strides)
        self.conv2 = nn.Conv1d(num_channels, num_channels,
                               kernel_size=3, padding=1)
        if use_1x1conv:
            self.conv3 = nn.Conv1d(input_channels, num_channels,
                                   kernel_size=1, stride=strides)
        else:
            self.conv3 = None
        self.bn1 = nn.BatchNorm1d(num_channels)
        self.bn2 = nn.BatchNorm1d(num_channels)

    def forward(self, X):
        Y = F.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))
        if self.conv3:
            X = self.conv3(X)
        Y += X
        return F.relu(Y)


def resnet_block(input_channels, num_channels, num_residuals,
                 first_block=False):
    blk = []
    for i in range(num_residuals):
        if i == 0 and not first_block:
            blk.append(Residual(input_channels, num_channels,
                                use_1x1conv=True, strides=2))
        else:
            blk.append(Residual(num_channels, num_channels))
    return blk


b1 = nn.Sequential(nn.Conv1d(1, 64, kernel_size=7, stride=2, padding=3),
                   nn.BatchNorm1d(64), nn.ReLU(),
                   nn.MaxPool1d(kernel_size=3, stride=2, padding=1))

b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))
b3 = nn.Sequential(*resnet_block(64, 128, 2))
b4 = nn.Sequential(*resnet_block(128, 256, 2))
b5 = nn.Sequential(*resnet_block(256, 512, 2))

class ResNetModel(nn.Module):
    def __init__(self, input_size, num_classes):
        super(ResNetModel, self).__init__()

        self.res = nn.Sequential(
                        b1, b2, b3, b4, b5,
                        nn.AdaptiveAvgPool1d(1),
                        nn.Flatten(),
                        nn.Linear(512, 512),nn.ReLU(),
                        nn.Linear(512, 64), nn.ReLU(),
                        nn.Linear(64, num_classes)
        )
        # 定义全连接层,将 Transformer 编码器的输出映射到分类空间
        # self.fc = nn.Linear(input_size, num_classes)

    def forward(self, x):
        x = self.res(x)
        return x

if __name__=='__main__':
    # 加载鸢尾花数据集
    iris = datasets.load_iris()
    # 获取特征数据
    X = pd.DataFrame(iris.data)
    # 获取目标数据(类别标签)
    y = pd.Series(iris.target)
    num_classes = len(y.unique())
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, stratify=y, random_state=50)
    train_data = LoadData(X_train, y_train)
    test_data = LoadData(X_test, y_test)
    X_dimension = len(X_train.columns)
    y_dimension = len(y_train.value_counts())
    batch_size = 32
    loss_fn = nn.CrossEntropyLoss()
    train_dataloader = DataLoader(train_data, batch_size=batch_size)
    test_dataloader = DataLoader(test_data, batch_size=batch_size)
    epochs = 10
    lr = 0.001
    model = ResNetModel(input_size=X_dimension, num_classes=num_classes)
    model.to(device=device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    losses, iter = train(model, optimizer, loss_fn, epochs, train_dataloader, train_data, X_dimension)
    test(model,test_dataloader, X_dimension, loss_fn)

通用文件 common.py

import torch
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, confusion_matrix

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
from torch.utils.data import Dataset

class LoadData(Dataset):
    def __init__(self, X, y):
        self.X = X
        self.y = y
    def __len__(self):
        return len(self.X)
    def __getitem__(self, index):
        X = torch.tensor(self.X.iloc[index])
        y = torch.tensor(self.y.iloc[index])
        return X, y

def train(model, optimizer, loss_fn, epochs, train_dataloader, train_data, X_dimension):
    losses = []
    iter = 0
    for epoch in range(epochs):
        print(f"epoch {epoch + 1}\n-----------------")
        for i, (X, y) in enumerate(train_dataloader):
            X, y = X.to(device).to(torch.float32), y.to(device).to(torch.float32)
            X = X.reshape(X.shape[0], 1, X_dimension)
            y_pred = model(X)
            loss = loss_fn(y_pred, y.long())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % 100 == 0:
                print(f"loss: {loss.item()}\t[{(i + 1) * len(X)}/{len(train_data)}]")
                iter += 1
                losses.append(loss.item())

    return losses, iter

def test(model, test_dataloader, X_dimension, loss_fn):
    positive = 0
    negative = 0
    y_true_list = []
    y_pred_list = []
    with torch.no_grad():
        iter = 0
        loss_sum = 0
        for X, y in test_dataloader:
            X, y = X.to(device).to(torch.float32), y.to(device).to(torch.float32)
            X = X.reshape(X.shape[0], 1, X_dimension)
            y_pred = model(X)
            loss = loss_fn(y_pred, y.long())
            loss_sum += loss.item()
            iter += 1
            for item in zip(y_pred, y):
                y_true_list.append(int(item[1].item()))
                pre_y = torch.argmax(item[0])
                y_pred_list.append(int(pre_y.item()))
                if pre_y == item[1]:
                    positive += 1
                else:
                    negative += 1
    acc = positive / (positive + negative)
    avg_loss = loss_sum / iter
    print("avg_loss:", avg_loss)
    print("Accuracy:", acc)

  • 10
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值