我的CNN

import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import pandas as pd
from torch.autograd import Variable
import numpy as np

def label_transform(label):

    num_example, num_label =label.shape
    label = label.tolist()
    label_new = np.zeros((num_example,),dtype = np.int)
    #print(label)
    for i in range (0,num_example):

        label_new[i] = label[i].index(1.0)
    #print(label_new)
    return label_new



def get_data(filename_train,filename_test):
    data_train = pd.read_excel(filename_train)
    data_train = data_train.values
    data_test = pd.read_excel(filename_test)
    data_test = data_test.values
    x_train = data_train[0:500, 5:]
    y_train = data_train[0:500, 0:5]
    x_test = data_test[0:100, 5:]
    y_test = data_test[0:100, 0:5]

    x_train = x_train.reshape(x_train.shape[0], 1, 2, -1)
    x_test = x_test.reshape(x_test.shape[0],1, 2, -1)
    y_train = label_transform(y_train)
    y_test = label_transform(y_test)
    return x_train, y_train, x_test, y_test

class MyCNN(nn.Module):
    
    def __init__(self):
        super(MyCNN, self).__init__()

        #in_channels输入数据通道数
        #padding:第一个数是高度,第二个是宽度
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=50, kernel_size=(1,8), stride=1, padding=(0,2)),
            nn.BatchNorm2d(50),
            nn.ReLU(),             
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=50, out_channels=50, kernel_size=(2,8), stride=1, padding=(0,2)),
            nn.BatchNorm2d(50),
            nn.ReLU(),
        )
        # fully connected layer
        self.mlp1 = nn.Linear(50*122,256)
        self.mlp2 = nn.Linear(256,5)

    def forward(self, x):
        """
        input: N * 3 * image_size * image_size
        output: N * num_classes
        """
        x = self.conv1(x)
        x = self.conv2(x)
        # view(x.size(0), -1): change tensor size from (N ,H , W) to (N, H*W)
        x = self.mlp1(x.view(x.size(0),-1))
        x = self.mlp2(x)
        return x

model = MyCNN()
BATCH_SIZE = 100
#print(model)
#数据获取与处理
x_train,y_train,x_test,y_test = get_data('compound_snr_15_train.xlsx','compound_snr_15_test.xlsx')
x_train = torch.from_numpy(x_train).type(torch.FloatTensor)
y_train = torch.from_numpy(y_train).type(torch.LongTensor)
x_test = torch.from_numpy(x_test).type(torch.FloatTensor)
y_test = torch.from_numpy(y_test).type(torch.LongTensor)

train_set = Data.TensorDataset(x_train,y_train)
test_set = Data.TensorDataset(x_test,y_test)
train_loader = Data.DataLoader(
            dataset=train_set,
            batch_size=BATCH_SIZE,
            shuffle=True
            )
test_loader = Data.DataLoader(
            dataset=test_set,
            batch_size=BATCH_SIZE,
            shuffle=True
            )


loss_func = nn.CrossEntropyLoss()
opt = torch.optim.Adam(model.parameters(),lr=0.001)
loss_count = []
for epoch in range(3):
    print('epoch=',epoch)
    for i,(x,y) in enumerate(train_loader):
        batch_x = Variable(x) # torch.Size([128, 1, 28, 28])
        batch_y = Variable(y) # torch.Size([128])

        # 获取最后输出
        out = model(batch_x) # torch.Size([128,10])
        # 获取损失
        loss = loss_func(out,batch_y)
        # 使用优化器优化损失
        opt.zero_grad()  # 清空上一步残余更新参数值
        loss.backward() # 误差反向传播,计算参数更新值
        opt.step() # 将参数更新值施加到net的parmeters上
        #if i%20 == 0:
        loss_count.append(loss)
        print('{}:\t'.format(i), loss.item())
            #torch.save(model,'D:/Liuliwen/MLDF')
        #if i % 100 == 0:
        for a,b in test_loader:
            test_x = Variable(a)
            test_y = Variable(b)
            out = model(test_x)
            accuracy = torch.max(out,1)[1].numpy() == test_y.numpy()#max(out,1)返回每一行中最大的那个值[1]对应的索引(标签)
            print('accuracy:\t',accuracy.mean())
            break
"""
plt.figure('PyTorch_CNN_Loss')
plt.plot(loss_count,label='Loss')
plt.legend()
plt.show()
"""


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

universe_1207

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值