python 根据lstm的多变量时间预测模型,预测海洋中S、T的浓度,本人预测出来的值为空,需要详细的预测过程,数据来源:http://mds.nmdis.org.cn/pages/dataViewDetail.html?dataSetId=18
import os
import pandas as pd
#import re
path = r"C:\Users\Asus\Desktop\毕设\data\new" # 读取csv文件目录路径
# listdir()--返回path指定 的 文件夹中包含的文件或者文件夹名字 的 列表
FileNames = os.listdir(path)# 因此Filename是一个列表
df = []
for fn in FileNames:
fullfilename = os.path.join(path, fn)
df.append(pd.read_csv(fullfilename,encoding='utf-8',index_col = None))
data = pd.concat(df)
data=data[['Station', 'date', 'lat', 'lon', 'SampleDepth','T', 'S']]
data["date"] = pd.to_datetime(data["date"], format='%m/%d/%Y')
group = data.groupby(['Station'])
SAT=data['Station'].value_counts()
n_SAT=pd.DataFrame(SAT)
n_SAT=n_SAT[n_SAT['Station']>=100]
n_SAT['counts']=n_SAT['Station']
n_SAT['Station'] = n_SAT.index
n_SAT
n_data=[]
for i in n_SAT['Station']:
n_data.append(data.loc[data["Station"]==i])
data0 = pd.concat(n_data)
a = data0[['lat','lon']]
data0['location']=a.apply(lambda x: str(x['lat'])+" "+str(x['lon']),axis=1)
#准备数据
#T_data = data0[[ 'date', 'location','SampleDepth','T']]
#S_data = data0[[ 'date', 'location','SampleDepth','S']]
#T_data = T_data.groupby('date').mean()
data0 =data0.sort_values(by='date')
b = data0.groupby(['date','location']).mean()
b['indexs']=b.index
ls = list(b['indexs'])
bd = []
bl = []
for i in range(0,len(ls)):
d = ls[i]
for j in range(0,len(d)):
if j == 0:
n = str(d[j])
bd.append(n.split(' ')[0])
else:
bl.append(d[j])
b['location']=bl
b['date']=bd
T_data = b[[ 'date', 'lat','lon','SampleDepth','T']]
T_data= T_data.set_index('date')
import numpy as np
import pandas as pd
#from matplotlib import pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense, Dropout
from sklearn.preprocessing import MinMaxScaler
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import GridSearchCV
#拆分数据集
test_split=round(len(T_data)*0.20)
df_for_training=T_data[:-5828]
df_for_testing=T_data[-5828:]
print(df_for_training.shape)
print(df_for_testing.shape)
#MinMaxScaler缩放数据
#from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
df_for_training_scaled = scaler.fit_transform(df_for_training)
df_for_testing_scaled=scaler.transform(df_for_testing)
df_for_training_scaled
def createXY(dataset,n_past):
dataX = []
dataY = []
for i in range(n_past, len(dataset)):
dataX.append(dataset[i - n_past:i, 0:dataset.shape[1]])
dataY.append(dataset[i,0])
return np.array(dataX),np.array(dataY)
trainX,trainY=createXY(df_for_training_scaled,30)
testX,testY=createXY(df_for_testing_scaled,30)
def build_model(optimizer):
grid_model = Sequential()
grid_model.add(LSTM(50,return_sequences=True,input_shape=(30,4)))
grid_model.add(LSTM(50))
grid_model.add(Dropout(0.2))
grid_model.add(Dense(1,activation='linear'))
grid_model.compile(loss = 'mse',optimizer = optimizer)
return grid_model
grid_model = KerasRegressor(build_fn=build_model,verbose=1,validation_data=(testX,testY))
parameters = {'batch_size' : [18,20],
'epochs' : [45,50],
'optimizer' : ['adam'] }
grid_search = GridSearchCV(estimator = grid_model,
param_grid = parameters,
cv = 2)
grid_search = grid_search.fit(trainX,trainY)
#获取最优参数
grid_search.best_params_
my_model=grid_search.best_estimator_.model
#测试数据集测试模型
prediction=my_model.predict(testX)
print("prediction\n", prediction)
print("\nPrediction Shape-",prediction.shape)