为什么我的loss降不下去了,怎样才能下降

import torch
from torch import nn
import torch as d2l
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
import pandas as pd
from torch.utils.data import TensorDataset,Dataset,DataLoader,random_split
import torch.utils.data as Data
from torch.optim import lr_scheduler

import matplotlib.pyplot as plt



ct=1e12*375e-9/3e8


data = pd.read_csv("i.csv")
#print (data)
#print(type(data))
import numpy as np
import torch
data_x = data.iloc[:,0:43]
#print (data_x)
#print (y)



data_x = np.array(data_x)
x = torch.FloatTensor(data_x)
print(x)


data_y = data.iloc[:,43:84]
#print(data_y)
data_y = np.array(data_y)

y = torch.FloatTensor(data_y)
print(y)




#这个地方是我们要使用的网络
import torch.nn.functional as f
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(43, 50)
        self.fc2 = nn.Linear(50, 150)

        self.fc8 = nn.Linear(150, 50)
        self.fc9 = nn.Linear(50, 41)
        self.dropout = nn.Dropout(p=0.6)
        self.relu=nn.ReLU()
        self.sigmoid=nn.Sigmoid()
    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.fc8(x)

        x=self.sigmoid(x)
        x = self.fc9(x)
        return x

net=Net()





#print (u)
#print (type(u))
#net(u)
#print (y)

from sklearn.model_selection import train_test_split

data_TD = TensorDataset(x, y)
n_train = int(len(data_TD)*0.8)
n_val = len(data_TD) - n_train
train_dataset, test_dataset = random_split(data_TD, [n_train, n_val])
#print(train_dataset[0])
#print(test_dataset[0])
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)



train_data=Data.TensorDataset(X_train,y_train)
test_data=Data.TensorDataset(X_test,y_test)
train_loader =DataLoader(dataset=train_data,
                              batch_size=int(n_train/2),
                              shuffle=False,
                              num_workers=0)


test_loader =DataLoader(dataset=test_data,
                              batch_size=int(n_val/2),
                              shuffle=False,
                              num_workers=0)








optimizer = torch.optim.Adam(net.parameters(), lr=0.001)

scheduler1 = lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.1)
loss_func = torch.nn.MSELoss()  # 均方差
# print(y)
# print (u)
train_loss_all=[]
test_loss_all=[]


epochs =8500
for epoch in range(epochs):
    train_loss=0
    test_loss=0
    train_num=0
    test_num=0
    net.train()
    for xb, yb in train_loader:
        prediction = net(xb)
        loss = loss_func(prediction, yb)
        # loss.requires_grad_(True)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item() * xb.size(0)
        train_num += xb.size(0)
    #scheduler1.step()
    net.eval()

    for xb, yb in test_loader:
        prediction = net(xb)
        loss = loss_func(prediction, yb)
        # loss.requires_grad_(True)
        test_loss += loss.item() * xb.size(0)
        test_num += xb.size(0)

    train_loss_all.append(train_loss/ train_num)
    test_loss_all.append(test_loss/ test_num)
    if epoch % 10 == 0:
        print('{} Train Loss:{:.4f} ',epoch,train_loss_all[-1])
        print('{} test Loss:{:.4f} ', epoch, test_loss_all[-1])



#    for batch_x in X_test:
#        eval_loss = 0
#        val_num = 0
#        out = net(batch_x)
#        loss = loss_func(out,y_test)
#        eval_loss += loss.item()*batch_x.size(0)
#        val_num+=batch_x.size(0)
#
#    list3.append(eval_loss / val_num)

plt.plot(range(len(test_loss_all)), train_loss_all)
plt.xlabel("epochs")
plt.ylabel("train_loss")
plt.ylim(0, 0.1)
plt.show()

plt.plot(range(len(test_loss_all)), test_loss_all)
plt.xlabel("epochs")
plt.ylabel("test_loss")
plt.ylim(0, 0.1)
plt.show()


torch.save(net.state_dict(),'net_parms.pkl')
net.eval()
# 提取模型
# 1.提取整个模型
#net = torch.load('net.pkl')

#训练集验证
a=torch.tensor([[0.28,9,0.2,0.21,0.22,0.23,0.24,0.25,0.26,0.27,0.28,0.29,0.3,0.31,0.32,0.33,0.34,0.35,0.36,0.37,0.38,0.39,0.4,0.41,0.42,0.43,0.44,0.45,0.46,0.47,0.48,0.49,0.5,0.51,0.52,0.53,0.54,0.55,0.56,0.57,0.58,0.59,0.6]])
lb=[0.99974,1.0002,1.0002,0.99919,0.99805,0.99731,0.98953,0.88293,1.3405E-16,2.8765E-17,3.2841E-17,1.3143E-16,2.2757E-15,0.00000000000030198,-0.00000000044563,-0.0000043372,0.99962,1.0002,0.99933,0.99919,0.99903,0.99881,0.99867,0.99859,0.99846,0.9985,0.99501,0.99638,0.73392,0.038623,0.00000000000009119,1.9015E-16,6.2448E-18,1.9267E-18,5.1691E-18,1.5637E-16,0.00000000000012239,0.000000049522,0.9945,0.99756,0.98833]
#lb = np.array(lb)*ct
b=torch.FloatTensor(lb)
pre_a=net(a)
#print(pre_a)

plt.scatter(np.arange(a.size(1)-2),pre_a.detach().numpy(),s=3,c="b",label="prediction")
plt.scatter(np.arange(a.size(1)-2),b.detach().numpy(),s=3,c="r",label="orange")
plt.legend()

plt.show()


#测试集验证
a=torch.tensor([[0.35,7,0.2,0.21,0.22,0.23,0.24,0.25,0.26,0.27,0.28,0.29,0.3,0.31,0.32,0.33,0.34,0.35,0.36,0.37,0.38,0.39,0.4,0.41,0.42,0.43,0.44,0.45,0.46,0.47,0.48,0.49,0.5,0.51,0.52,0.53,0.54,0.55,0.56,0.57,0.58,0.59,0.6]])
lb=[1.0004,1,0.99978,0.9988,0.99554,0.99433,0.99113,0.98465,0.98003,-0.00000000043991,-0.0000000042174,-0.0000011082,-0.82648,1.0008,1.0007,1.0005,1.0003,1.0003,1.0003,0.99983,0.99975,0.99904,0.99878,0.99832,0.99723,0.99736,0.98964,0.92768,0.0038562,0.000000000000031216,1.1232E-16,4.2977E-18,8.8676E-19,8.5645E-19,0.000000000011323,0.0051247,0.015349,0.046698,0.98918,0.98608,0.97964]
#lb = np.array(lb)*ct
#b=torch.tensor([[0.00000313,23.663,47.165,70.337,92.981,114.85,135.58,154.6,170.86,182.51,186.9,187.29,188.44,190.33,192.88,196.02,199.63,203.58,207.72,211.85,215.75,219.19,221.9,223.65,224.25,221.82,214.13,200.5,181.01,156.67,128.76,98.385,66.394,33.426]*1e12*375e-6/3e8])
b=torch.FloatTensor(lb)
pre_a=net(a)
#print(pre_a)


plt.scatter(np.arange(a.size(1)-2),pre_a.detach().numpy(),s=3,c="b",label="prediction")
plt.scatter(np.arange(a.size(1)-2),b.detach().numpy(),s=3,c="r",label="orange")
plt.legend()

plt.show()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值