Tensorflow-datasetI使用_4.csv转为dataset再转为tfrecord文件,读取解析tfrecord文件,训练生成model

"""@author: khoing@contact: Khoing@126.com@time: 2019/12/16 14:36@file: tf_data_generate_tfrecord.py"""import matplotlib as mpl # Matplotlib 是 Python 的绘图库。 它可与 NumPy 一起使用import matplotlib.p...
摘要由CSDN通过智能技术生成
"""
@author: khoing
@contact: Khoing@126.com
@time: 2019/12/16 14:36
@file: tf_data_generate_tfrecord.py
"""


import matplotlib as mpl  # Matplotlib 是 Python 的绘图库。 它可与 NumPy 一起使用

import matplotlib.pyplot as plt  # Python数据可视化matplotlib.pyplot

# %matplotlib inline #在使用jupyter notebook 或者 jupyter qtconsole的时候,经常会用到%matplotlib inline。其作用就是在你调用plot()进行画图或者直接输入Figure的实例对象的时候,会自动的显示并把figure嵌入到console中。

import numpy as np  # 数值计算扩展。这种工具可用来存储和处理大型矩阵

import sklearn  # 机器学习中常用的第三方模块,对常用的机器学习方法进行了封装,包括回归(Regression)、降维(Dimensionality Reduction)、分类(Classfication)、聚类(Clustering)等方法。

import pandas as pd  # 是python的一个数据分析包
import os  # 系统编程的操作模块,可以处理文件和目录
import sys  # sys模块包含了与Python解释器和它的环境有关的函数
import time
import tensorflow as tf

from tensorflow import keras

##################################################################################################
# 选择GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

##################################################################################################

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)

"""output:
    2.0.0
    sys.version_info(major=3, minor=7, micro=4, releaselevel='final', serial=0)
    matplotlib 3.1.1
    numpy 1.16.5
    pandas 0.25.3
    sklearn 0.21.3
    tensorflow 2.0.0
    tensorflow_core.keras 2.2.4-tf
"""

##################################################################################################
source_dir = "./generate_csv/"
print(os.listdir(source_dir))
"""output:
    ['test_00.csv', 'test_01.csv', 'test_02.csv', 'test_03.csv', 'test_04.csv', 'test_05.csv', 'test_06.csv', 'test_07.csv', 'test_08.csv', 'test_09.csv', 
    'train_00.csv', 'train_01.csv', 'train_02.csv', 'train_03.csv', 'train_04.csv', 'train_05.csv', 'train_06.csv', 'train_07.csv', 'train_08.csv', 'train_09.csv', 'train_10.csv', 'train_11.csv', 'train_12.csv', 'train_13.csv', 'train_14.csv', 'train_15.csv', 'train_16.csv', 'train_17.csv', 'train_18.csv', 'train_19.csv',
    'valid_00.csv', 'valid_01.csv', 'valid_02.csv', 'valid_03.csv', 'valid_04.csv', 'valid_05.csv', 'valid_06.csv', 'valid_07.csv', 'valid_08.csv', 'valid_09.csv'
    ]
"""
def get_filenames_by_prefix(source_dir, prefix_name):
    results = []
    all_files = os.listdir(source_dir)
    for filename in all_files:
        if filename.startswith(prefix_name):
            results.append(os.path.join(source_dir,filename))

    return results

train_filenames = get_filenames_by_prefix(source_dir, "train")
valid_filenames = get_filenames_by_prefix(source_dir, "valid")
test_filenames = get_filenames_by_prefix(source_dir, "test")

import pprint
pprint.pprint(train_filenames)
pprint.pprint(valid_filenames)
pprint.pprint(test_filenames)
"""output:
    ['./generate_csv/train_00.csv',
     './generate_csv/train_01.csv',
     './generate_csv/train_02.csv',
     './generate_csv/train_03.csv',
     './generate_csv/train_04.csv',
     './generate_csv/train_05.csv',
     './generate_csv/train_06.csv',
     './generate_csv/train_07.csv',
     './generate_csv/train_08.csv',
     './generate_csv/train_09.csv',
     './generate_csv/train_10.csv',
     './generate_csv/train_11.csv',
     './generate_csv/train_12.csv',
     './generate_csv/train_13.csv',
     './generate_csv/train_14.csv',
     './generate_csv/train_15.csv',
     './generate_csv/train_16.csv',
     './generate_csv/train_17.csv',
     './generate_csv/train_18.csv',
     './generate_csv/train_19.csv']
     
    ['./generate_csv/valid_00.csv',
     './generate_csv/valid_01.csv',
     './generate_csv/valid_02.csv',
     './generate_csv/valid_03.csv',
     './generate_csv/valid_04.csv',
     './generate_csv/valid_05.csv',
     './generate_csv/valid_06.csv',
     './generate_csv/valid_07.csv',
     './generate_csv/valid_08.csv',
     './generate_csv/valid_09.csv']
     
    ['./generate_csv/test_00.csv',
     './generate_csv/test_01.csv',
     './generate_csv/test_02.csv',
     './generate_csv/test_03.csv',
     './generate_csv/test_04.csv',
     './generate_csv/test_05.csv',
     './generate_csv/test_06.csv',
     './generate_csv/test_07.csv',
     './generate_csv/test_08.csv',
     './generate_csv/test_09.csv']

"""

##################################################################################################
#从csv文件中读取训练集,验证集和测试集

def parse_csv_line(line, n_fields = 9):
    defs = [tf.constant(np.nan)] * n_fields
    parsed_fields = tf.io
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,这是一个很有意思的问题。首先,我们需要导入相关的库和数据集。可以通过以下代码完成: ```python import pandas as pd import numpy as np # 读取数据集 beijing_data = pd.read_csv('BeijingPM20100101_20151231.csv') shanghai_data = pd.read_csv('ShanghaiPM20100101_20151231.csv') guangzhou_data = pd.read_csv('GuangzhouPM20100101_20151231.csv') chengdu_data = pd.read_csv('ChengduPM20100101_20151231.csv') shenyang_data = pd.read_csv('ShenyangPM20100101_20151231.csv') ``` 接下来,我们需要对数据进行处理和清洗。首先,需要将每个城市的数据集合并成一个大的数据集,然后去掉无效数据和重复数据。可以通过以下代码完成: ```python # 合并数据集 data = pd.concat([beijing_data, shanghai_data, guangzhou_data, chengdu_data, shenyang_data]) # 去掉无效数据和重复数据 data = data.dropna() # 去掉缺失值 data = data.drop_duplicates() # 去掉重复值 ``` 接下来,我们需要将数据集分成训练集和测试集,并将数据集标准化。可以通过以下代码完成: ```python from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler # 将数据集分成训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(data.iloc[:, :-1], data.iloc[:, -1], test_size=0.2, random_state=0) # 数据标准化 scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) ``` 然后,我们可以使用MindSpore等人工智能框架构建一个神经网络模型,并对模型进行训练和预测。可以通过以下代码完成: ```python import mindspore.nn as nn import mindspore.ops as ops import mindspore.dataset as ds import mindspore.dataset.transforms as transforms from mindspore import Tensor from mindspore.train.callback import ModelCheckpoint, CheckpointConfig from mindspore.train.serialization import load_checkpoint, load_param_into_net class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Dense(10, 10) self.fc2 = nn.Dense(10, 1) def construct(self, x): x = ops.ReLU()(self.fc1(x)) x = self.fc2(x) return x # 定义训练集 train_data = ds.NumpySlicesDataset((X_train, y_train)) # 定义测试集 test_data = ds.NumpySlicesDataset((X_test, y_test)) # 定义模型 net = Net() # 定义损失函数和优化器 loss_fn = nn.MSELoss() optimizer = nn.Adam(net.trainable_params(), learning_rate=0.01) # 定义数据转换器 type_cast_op = transforms.TypeCast(np.float32) # 定义训练和测试管道 train_pipe = train_data.map(input_columns=["x", "y"], operations=type_cast_op) test_pipe = test_data.map(input_columns=["x", "y"], operations=type_cast_op) # 定义训练和测试网络 train_network = nn.TrainOneStepCell(net, optimizer, loss_fn) eval_network = nn.WithEvalCell(net, loss_fn) # 训练网络 for epoch in range(10): for step, data in enumerate(train_pipe.create_dict_iterator(output_numpy=True)): inputs, labels = data["x"], data["y"] loss = train_network(inputs, labels) print("Epoch: ", epoch, " Loss: ", loss) # 测试网络 for step, data in enumerate(test_pipe.create_dict_iterator(output_numpy=True)): inputs, labels = data["x"], data["y"] output = eval_network(inputs) print("Input: ", inputs, " Label: ", labels, " Prediction: ", output) ``` 最后,我们可以使用训练好的模型对未来5天后的PM2.5值进行预测。可以通过以下代码完成: ```python # 待预测数据 future_data = np.array([[60, 20, 30, 40, 50]]) # 标准化数据 future_data = scaler.transform(future_data) # 预测未来5天的PM2.5值 output = net(Tensor(future_data)) print("Prediction: ", output) ``` 以上就是使用机器学习和MindSpore等人工智能框架对未来5天后的PM2.5值进行预测的整个过程。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值