sofasofa-形状识别:是方还是圆

题目地址:sofasofa-6

卷积神经网络(Pytorch, Python)

跟官方答案keras思路一样,简化了一下,毕竟我太菜了。答案里把训练集30%用作测试集,还画了图,我都省略了。

注意点

torch和numpy的格式不一样,numpy转torch需要torch数组=torch.from_numpy(numpy数组),转化完之后加.float(),因为发现神经网络的参数类型和训练数据一样的,本来是long,weight也会变成Long,然后报错说weight期望的数据类型是long结果竟然是float。文中对应

#将numpy格式转换为torch格式,并且要变成float,不然说weight期望是long,却是float的,报错
   train_x,train_y=torch.from_numpy(train_x).float(),torch.from_numpy(train_y).float()

主要思路

建网络

def build_model():#输入数据 图片(1,40,40)
    net=nn.Sequential(#除起来截断#数据形状(n,高,长,宽)
        nn.Conv2d(in_channels=1,out_channels=8,kernel_size=5),#40-4=36
        nn.ReLU(),
        nn.Conv2d(in_channels=8,out_channels=16,kernel_size=3),#36-2=34
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=4),#34/4=8
        nn.Conv2d(16,16,3),#8-2=6
        nn.ReLU(),
        nn.MaxPool2d(4),#6/4=1
        nn.Flatten(),#16
        nn.Linear(16,128),
        nn.Dropout(0.5),
        nn.Linear(128,1),
        nn.Sigmoid()
    )
    return net

dataset和dataloader处理

train_x和train_y之前都经过处理了,这里省略,都变成了numpy类型的矩阵。

#将numpy格式转换为torch格式,并且要变成float,不然说weight期望是long,却是float的,报错
    train_x,train_y=torch.from_numpy(train_x).float(),torch.from_numpy(train_y).float()
    #简易的创建一个torch格式的dataset,网上的都是写类创建的,当然这个函数的实现也是写类
    torch_dataset=Data.TensorDataset(train_x,train_y)
    data_loader=Data.DataLoader(
        dataset=torch_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
    )

训练和预测

optimizer=torch.optim.Adam(model.parameters(),lr=0.001)
    # training and testing
    for epoch in range(epochs):
        for step,(b_x,b_y) in enumerate(data_loader):   # 分配 batch data, normalize x when iterate train_loader
            output = model(b_x)               # cnn output
            loss=F.binary_cross_entropy(output,b_y)  # cross entropy loss
            optimizer.zero_grad()           # clear gradients for this training step
            loss.backward()                 # backpropagation, compute gradients
            optimizer.step()                # apply gradients
    
    test=torch.from_numpy(test).float()
    pred=model(test).detach().numpy()#报错后就这么提醒我的,加.detach().numpy()

完整代码

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data

def load_train_test_data(train,test):
    np.random.shuffle(train)
    labels=train[:,-1]
    data_test=np.array(test)

    data, data_test = data_modify_suitable_train(train, True), data_modify_suitable_train(test, False)
    return data, labels,data_test

def data_modify_suitable_train(data_set=None, type=True):
    if data_set is not None:
        data = []
        if type is True:
            np.random.shuffle(data_set)#总喜欢shuffle一下
            data = data_set[:, 0: data_set.shape[1] - 1]
        else:
            data = data_set
    data = np.array([np.reshape(i, (40, 40)) for i in data])#一维转化成二维
    data = np.array([np.reshape(i, (1,i.shape[0], i.shape[1])) for i in data])#(高,长,宽)一般卷积都有个长宽高,加个高
    return data

def f1(y_true, y_pred):
    def recall(y_true,y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true,y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall))

def build_model():#输入数据 图片(1,40,40)
    net=nn.Sequential(#除起来截断#数据形状(n,高,长,宽)
        nn.Conv2d(in_channels=1,out_channels=8,kernel_size=5),#40-4=36
        nn.ReLU(),
        nn.Conv2d(in_channels=8,out_channels=16,kernel_size=3),#36-2=34
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=4),#34/4=8
        nn.Conv2d(16,16,3),#8-2=6
        nn.ReLU(),
        nn.MaxPool2d(4),#6/4=1
        nn.Flatten(),#16
        nn.Linear(16,128),
        nn.Dropout(0.5),
        nn.Linear(128,1),
        nn.Sigmoid()
    )
    return net

def train_model(train,test,batch_size=64,epochs=10,model=None):
    train_x, train_y, test = load_train_test_data(train, test)
    if model is None:
        model=build_model()

    #将numpy格式转换为torch格式,并且要变成float,不然说weight期望是long,却是float的,报错
    train_x,train_y=torch.from_numpy(train_x).float(),torch.from_numpy(train_y).float()
    #简易的创建一个torch格式的dataset,网上的都是写类创建的,当然这个函数的实现也是写类
    torch_dataset=Data.TensorDataset(train_x,train_y)
    data_loader=Data.DataLoader(
        dataset=torch_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
    )

    optimizer=torch.optim.Adam(model.parameters(),lr=0.001)
    # training and testing
    for epoch in range(epochs):
        for step,(b_x,b_y) in enumerate(data_loader):   # 分配 batch data, normalize x when iterate train_loader
            output = model(b_x)               # cnn output
            loss=F.binary_cross_entropy(output,b_y)  # cross entropy loss
            optimizer.zero_grad()           # clear gradients for this training step
            loss.backward()                 # backpropagation, compute gradients
            optimizer.step()                # apply gradients
    
    test=torch.from_numpy(test).float()
    pred=model(test).detach().numpy()#报错后就这么提醒我的,加.detach().numpy()
    return pred

if __name__ == '__main__':
    train, test = pd.read_csv('train.csv'), pd.read_csv('test.csv') 
    train = np.array(train.drop('id', axis=1))
    test = np.array(test.drop('id', axis=1))

    pred = train_model(train, test)
    pred=(pred>0.5).astype(int)#转换格式
    submit = pd.read_csv('sample_submit.csv')
    submit['y'] = pred
    submit.to_csv('Pytorch_my_CNN_prediction.csv', index=False)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值