旧代码示例

 
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import os
import matplotlib.pyplot as plt
import torchvision.models as models
# This is for the progress bar.
from tqdm import tqdm
import seaborn as sns

import torch.nn.functional as F
import torch.optim as optim




# 继承pytorch的dataset,创建自己的
class LcData(Dataset):#Lc流场
    def __init__(self,train_in_path, train_labels_path,test_in_path, test_labels_path,
                  mode='train', resize_channel=9,resize_height=40, resize_width=25):
        """
        Args:
            
        """
        
        # 需要调整后的照片尺寸#
        self.resize_channel= resize_channel
        self.resize_height = resize_height
        self.resize_width = resize_width
        
        self.train_in_path=train_in_path
        self.train_labels_path = train_labels_path
        
        self.test_in_path=test_in_path
        self.test_labels_path=test_labels_path
        
        self.mode = mode
        
        #加载数据,并分为训练集和测试集合
        if mode == 'train':
            self.data_in = pd.read_csv(self.train_in_path, header=0)  #header=None是去掉表头部分
            self.data_la = pd.read_csv(self.train_labels_path, header=0)
        else:
            self.data_in = pd.read_csv(self.test_in_path, header=0)  #header=None是去掉表头部分
            self.data_la = pd.read_csv(self.test_labels_path, header=0)
        
        #数据整理为我所要的形状的张量
        self.inputs_data = np.asarray(self.data_in.iloc[:,2]) 
        self.labels_data = np.asarray(self.data_la.iloc[:,2])
        self.inputs=torch.tensor(self.inputs_data).reshape(-1,resize_channel,resize_height,resize_width)
        self.labels=torch.tensor(self.labels_data).reshape(-1,resize_height,resize_width)
            # 计算 length
        self.data_len = len(self.data_in.index)
        
                
    def __getitem__(self,x):
        # 该魔法函数在实例化后在调用for循环时会自动运行
        input = self.inputs[x,:,:,:]
        label = self.labels[x,:,:]
        return (input,label)
             
       
    def __len__(self):
        return self.data_len


# In[3]:


#数据实例化
train_in_path ='E:/Desktop/train_inputs.csv'
train_labels_path = 'E:/Desktop/train_lables.csv'
test_in_path = 'E:/Desktop/test_inputs.csv'
test_labels_path ='E:/Desktop/test_labels.csv'
train_dataset = LcData(train_in_path, train_labels_path,test_in_path, test_labels_path, mode='train')
test_dataset = LcData(train_in_path, train_labels_path,test_in_path, test_labels_path, mode='valid')


# #可以显示整理好的部分数据
# print(train_dataset[1])
# print(test_dataset.data_la.head(5))
# print(len(train_dataset)

# In[4]:


# 定义DataLoader
train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=1)


test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=1)


# In[5]:

'''
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1=torch.nn.Conv2d(9,18,kernel_size=3)
        self.conv2=torch.nn.Conv2d(18,32,kernel_size=(2,3))
        self.pooling=torch.nn.MaxPool2d(2)
        self.fc=torch.nn.Linear(768,1000)

    def forward(self,x):
        x=F.relu(self.pooling(self.conv1(x)))
        x=F.relu(self.pooling(self.conv2(x)))
        x=x.view(1,-1)
        x=self.fc(x)
        x=x.view(100,10)
        return x
'''

net = nn.Sequential(nn.Flatten(),
                    nn.Linear(1000, 256),
                    nn.ReLU(),
                    nn.Linear(256, 1000))

model= net()
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
criterion=torch.nn.MSELoss()
optimizer=optim.SGD(model.parameters(),lr=0.01,momentum=0.5)


# In[7]:


def train(epoch):
    running_loss=0.0
    for batch_idx,data in enumerate(train_loader,1):
        inputs, labels = data     #此处inputs对应特征
        inputs, labels = inputs.to(device), labels.to(device)
        optimizer.zero_grad()

        outputs=model(input)
        print(outputs.shape)

        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss+=loss.item()
        if batch_idx % 2 == 0:
            print('[%d,%5d] loss:%.4f' %(epoch+1,batch_idx,running_loss))
            running_loss=0.0
'''
def predict():
    predict_list = []
    with torch.no_grad():
        for data in test_loader:
            images,labels=data
            images,labels=images.to(device),labels.to(device)
            outputs=model(images)
            predict_list.append(outputs)
     return predict_list                   
    #print(' ')
'''

# In[ ]:


if __name__=='__main__':
    epoch_list=[]
    loss_list=[]

    for epoch in range(10):
        train(epoch)
        #epoch_list.append(epoch)
        #loss_list.append(acc)

    #plt.plot(epoch_list,acc_list)
    #plt.ylabel('loss')
    #plt.xlabel('epoch')
    #plt.show()

 

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值