py38环境

pip install tensorflow==2.6.2 Keras==2.6.0 protobuf==3.20.1 scikit-learn torch==1.7.0+cpu torchvision==0.8.1+cpu torchaudio===0.7.0 numpy==1.20.3 gensim==4.2.0 pillow pandas jieba tqdm matplotlib -i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn

 

具体参考这个网址:完整的教程-
https://blog.csdn.net/mqdlff_python/article/details/135490706

python 38 的情况 
pip install tensorflow==2.6.2 Keras==2.6.0 -i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn
pip install tensorflow==2.6.2 Keras==2.6.0 -i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn
pip install protobuf==3.20.1 -i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn
pip install scikit-learn -i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn
pip install torch==1.7.0+cpu torchvision==0.8.1+cpu torchaudio===0.7.0 -f https://download.pytorch.org/whl/torch_stable.html 
pip install transformers==4.13.0 -i https://pypi.tuna.tsinghua.edu.cn/simple 
pip install --upgrade nni --ignore-installed -i https://pypi.tuna.tsinghua.edu.cn/simple 
pip install easydict -i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn
pip install numpy==1.20.3 -i https://pypi.tuna.tsinghua.edu.cn/simple

# 下面的不需要看 --------------------------------------------------------------------------------------------------------------------------------------------
    #model = Word2Vec(LineSentence(open('word2vec_txt.txt', 'r', encoding='utf-8')), sg=0, vector_size=64, window=8,min_count=2, workers=4)
    # 模型保存
    # model.save('word2vec.model')
    # 通过模型加载词向量(recommend)

    model_vec = gensim.models.Word2Vec.load('word2vec.model')
    dic = model_vec.wv.index_to_key
    # print(dic)
    print(len(dic))
    print(model_vec.wv['痔疮'])
    print(model_vec.wv.most_similar('痔疮', topn=2))
    print(query_list[:10])
不仅仅是rmsprop优化器,adam也是一样。

调用adam优化器
使用
optimizer =adam_v2.Adam(learning_rate=1e-4)
而不是
optimizer = Adam(lr=1e-4)

调用rmsprop优化器
使用
optimizer =rmsprop_v2.rmsprop(learning_rate=1e-4)
而不是
optimizer = rmsprop(lr=1e-4) 或 optimizer = RMSprop(lr=1e-4)
————————————————
版权声明:本文为CSDN博主「晓亮.」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/m0_51816252/article/details/126714517


代码没问题-----------------
# -*- coding: utf-8 -*-
# 导入库pip install numpy==1.20.3 -i https://pypi.tuna.tsinghua.edu.cn/simple
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import mean_squared_error  # 评价指标
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, GRU
from keras import optimizers
import keras
import tensorflow as tf
#  mse rmse mae rmape
#  adam sgd

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import warnings
warnings.filterwarnings("ignore")  # 忽略一些警告 不影响运行
from data_read import data_process
train_x,train_y=data_process()
# 序列长度
int_sequence_len = train_x.shape[1]
# 每个序列的长度
int_a = train_x.shape[2]

# 输出几个元素 几步:
out_len = train_y.shape[1]

# 划分验证集和测试集
x_train, x_test, y_train, y_test = train_test_split(np.array(train_x), np.array(train_y), test_size=0.2, random_state=1)

print(x_train.shape)
print(len(x_train), len(x_test))  # 1243 311
x_train = x_train.reshape(len(x_train),int_sequence_len, int_a) # 三维度数据 全部数据长度 序列长度 每个序列维度
y_train = y_train.reshape(len(x_train),out_len)

print(x_train.shape)
print(y_train.shape)
x_test = x_test.reshape(len(x_test),int_sequence_len, int_a)
y_test = y_test.reshape(len(x_test),out_len)

print(x_test.shape)
print(y_test.shape)


def create_model_1():
    model = keras.models.Sequential([
        keras.layers.LSTM(100, activation='relu', input_shape=(int_sequence_len, int_a)),  # (1,9) 要与三维度的(1243,1,9) 一一对应
        #                                             序列长度 每个序列维度
        # (1,9) 要与三维度的(1243,1,9) 一一对应
        #                                             序列长度 每个序列维度
        keras.layers.Dense(128, activation='relu'),  # 全连接
        keras.layers.Dense(64),  # 全连接
        keras.layers.Dense(out_len)  # 1个全链接
    ])
    model.compile(loss='mean_absolute_error', optimizer='Adam')  # 回归损失函数和优化器 Adam SGD
    return model

model1 = create_model_1()
model1.summary()
history=model1.fit(x_train, y_train, validation_data=(x_train, y_train), epochs=100, batch_size=32, shuffle=True)
#                                                                训练世代      batch
model1.save_weights('lstmmoxing')  # 模型保存

import matplotlib.pyplot as plt
training_loss = history.history['loss']
test_loss = history.history['val_loss']
# 创建迭代数量
epoch_count = range(1, len(training_loss) + 1)
# 可视化损失历史
plt.plot(epoch_count, training_loss, 'r--')
plt.plot(epoch_count, test_loss, 'b-')
plt.legend(['Training Loss', 'Test Loss'])
plt.title("train loss and test loss")
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()


from sklearn.metrics import mean_squared_error  # 均方误差
from sklearn.metrics import mean_absolute_error  # 平方绝对误差
from sklearn.metrics import r2_score  # R square

# 调用
# 引用上边的模型实例
model_jiazai_1 = create_model_1()
# 加载保存好的模型
model_jiazai_1.load_weights('lstmmoxing')

y1_pred_lstm = model_jiazai_1.predict(x_test)

y_true=[]
y_pred=[]
for i in range(len(y1_pred_lstm)):
    print("真实:", y_test[i])
    y_true.extend(y_test[i])
    print("预测:", y1_pred_lstm[i])
    y_pred.extend(y1_pred_lstm[i])
    print("-----------------------")

print(mean_squared_error(y_true, y_pred))
print(mean_absolute_error(y_true, y_pred))
print(r2_score(y_true, y_pred))
print(len(y_true),len(y_pred))
# 所有画图
len_ = [i for i in range(len(y_true))]
plt.xlabel('标签', fontsize=8)
plt.ylabel('均值', fontsize=8)
plt.plot(len_, y_true, label='y_true', color="blue")
plt.plot(len_, y_pred, label='y_pred', color="yellow")
plt.title("天气预测走势图")
plt.legend()
plt.show()
plt.clf()

# # 输出到excle
# name = ['真实值', '预测值']
# test = pd.DataFrame(columns=name, data=result)
# test.to_excel('result_天气.xlsx')
# # 输出 。。。excle表

  • 5
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

程序员奇奇

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值