ResGCN实现时间序列预测 pytorch源码

import torch
import torch.nn as nn
import torch.nn.functional as F

class ResGCNBlock(nn.Module):
    def __init__(self, in_channels, out_channels, dropout=0.5):
        super(ResGCNBlock, self).__init__()
        self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
        self.bn1 = nn.BatchNorm1d(out_channels)
        self.gelu = nn.GELU()
        self.dropout = nn.Dropout(dropout)
        self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=1)
        self.bn2 = nn.BatchNorm1d(out_channels)

    def forward(self, x):
        identity = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.gelu(out)
        out = self.dropout(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out = self.gelu(out)
        out = self.dropout(out)
        out += identity
        return out

class ResGCN(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_blocks, dropout=0.5):
        super(ResGCN, self).__init__()
        self.initial_conv = nn.Conv1d(input_dim, hidden_dim, kernel_size=1)
        self.blocks = nn.ModuleList(
            [ResGCNBlock(hidden_dim, hidden_dim, dropout) for _ in range(num_blocks)]
        )
        self.final_conv = nn.Conv1d(hidden_dim, output_dim, kernel_size=1)

    def forward(self, x):
        out = self.initial_conv(x)
        for block in self.blocks:
            out = block(out)
        out = self.final_conv(out)
        return out

# Example usage
input_dim = 5  # Number of input features
hidden_dim = 64  # Hidden layer size
output_dim = 1  # Output size
num_blocks = 3  # Number of ResGCN blocks
dropout = 0.5  # Dropout rate

model = ResGCN(input_dim, hidden_dim, output_dim, num_blocks, dropout)

# Sample data
batch_size = 16
sequence_length = 50
x = torch.rand(batch_size, input_dim, sequence_length)

# Forward pass
output = model(x)
print(output.shape)

 

import torch.optim as optim

# Hyperparameters
learning_rate = 0.001
num_epochs = 100

# Loss and optimizer
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(num_epochs):
    model.train()
    optimizer.zero_grad()
    
    outputs = model(x)
    loss = criterion(outputs, torch.rand(batch_size, output_dim, sequence_length))  # Assuming random targets for example
    
    loss.backward()
    optimizer.step()
    
    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
from sklearn.metrics import mean_squared_error, mean_absolute_error

model.eval()
with torch.no_grad():
    predictions = model(x)
    targets = torch.rand(batch_size, output_dim, sequence_length)  # Assuming random targets for example

    predictions = predictions.cpu().numpy()
    targets = targets.cpu().numpy()

    rmse = mean_squared_error(targets, predictions, squared=False)
    mae = mean_absolute_error(targets, predictions)

    print(f'RMSE: {rmse:.4f}, MAE: {mae:.4f}')

  • 3
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

计算机毕设论文

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值