CUDA 一维卷积实现

2 篇文章 0 订阅

简单实现了在CUDA中的一维卷积

//一维卷积实现
__global__ void convolution_1D_basic_kernel(int *N, int *M, int *P,
	int Mask_Width, int Width){
	int i = blockIdx.x*blockDim.x + threadIdx.x;

	float Pvalue = 0;
	int N_start_point = i - (Mask_Width / 2);
	for (int j = 0; j < Mask_Width; j++){
		if(N_start_point + j >= 0 && N_start_point + j < Width){
			Pvalue += N[N_start_point + j] * M[j];
		}
	}
	P[i] = Pvalue;
}

int main()
{
	const int M[5] = { 3, 4, 5, 4, 3 };
	const int N[7] = { 1, 2, 3, 4, 5, 6, 7 };
	int Mask_Width = 5;
	int Width = 7;
	int P[7] = { 0 };


	int *dev_M = 0;
	int *dev_N = 0;
	int *dev_P = 0;


	//申请内存
	cudaMalloc((void**)&dev_M, Mask_Width * sizeof(int));
	cudaMalloc((void**)&dev_N, Width * sizeof(int));
	cudaMalloc((void**)&dev_P, Width * sizeof(int));

	cudaMemcpy(dev_M, M, Mask_Width * sizeof(int), cudaMemcpyHostToDevice);
	cudaMemcpy(dev_N, N, Width * sizeof(int), cudaMemcpyHostToDevice);

	convolution_1D_basic_kernel <<<1, 7 >>>(dev_N, dev_M, dev_P, Mask_Width,Width);

	cudaMemcpy(P, dev_P, 7 * sizeof(int), cudaMemcpyDeviceToHost);
	for (int i = 0; i < 7; i++)
	{
		cout << P[i] << " ";
		if (i == 6)
		{
			cout << endl;
		}
	}

	cudaFree(dev_M);
	cudaFree(dev_N);
	cudaFree(dev_P);

    return 0;

}


  • 2
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个简单的 PyTorch 实现一维深度卷神经网络抑郁识别的代码: ```python import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, Dataset class DepressionDataset(Dataset): def __init__(self, X, y): self.X = torch.tensor(X, dtype=torch.float32) self.y = torch.tensor(y, dtype=torch.long) def __len__(self): return len(self.X) def __getitem__(self, idx): return self.X[idx], self.y[idx] class DepressionNet(nn.Module): def __init__(self): super(DepressionNet, self).__init__() self.conv1 = nn.Conv1d(in_channels=1, out_channels=32, kernel_size=3) self.conv2 = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3) self.pool = nn.MaxPool1d(kernel_size=2) self.dropout = nn.Dropout(p=0.5) self.fc1 = nn.Linear(in_features=64 * 23, out_features=128) self.fc2 = nn.Linear(in_features=128, out_features=2) def forward(self, x): x = self.conv1(x) x = nn.functional.relu(x) x = self.pool(x) x = self.conv2(x) x = nn.functional.relu(x) x = self.pool(x) x = self.dropout(x) x = torch.flatten(x, start_dim=1) x = self.fc1(x) x = nn.functional.relu(x) x = self.dropout(x) x = self.fc2(x) return x def train(model, dataloader, criterion, optimizer, device): model.train() for inputs, labels in dataloader: inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() def evaluate(model, dataloader, criterion, device): model.eval() running_loss = 0.0 running_corrects = 0 with torch.no_grad(): for inputs, labels in dataloader: inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) loss = criterion(outputs, labels) running_loss += loss.item() * inputs.size(0) _, preds = torch.max(outputs, 1) running_corrects += torch.sum(preds == labels.data) loss = running_loss / len(dataloader.dataset) acc = running_corrects.double() / len(dataloader.dataset) return loss, acc if __name__ == '__main__': X_train, y_train = ... X_val, y_val = ... train_dataset = DepressionDataset(X_train, y_train) val_dataset = DepressionDataset(X_val, y_val) train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = DepressionNet().to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) best_acc = 0.0 for epoch in range(20): train(model, train_loader, criterion, optimizer, device) val_loss, val_acc = evaluate(model, val_loader, criterion, device) print(f'Epoch {epoch + 1}: val_loss={val_loss:.4f}, val_acc={val_acc:.4f}') if val_acc > best_acc: best_acc = val_acc torch.save(model.state_dict(), 'best_model.pth') ``` 这段代码中,我们首先定义了一个 `DepressionDataset` 类来加载数据,然后定义了一个 `DepressionNet` 类来构建模型。在 `DepressionNet` 类中,我们使用了一维卷层、池化层、全连接层和 Dropout 层来构建神经网络。在训练过程中,我们使用了 Adam 优化器和交叉熵损失函数。最后,我们进行了 20 个 epoch 的训练,并保存了最好的模型。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值