keras处理csv数据流程

文章展示了如何使用Python的Scikit-Learn库构建随机森林模型,以及对数据进行预处理,包括标准化和独热编码。接着,通过TensorFlow和Keras创建了一个神经网络模型,应用了Dropout层和早停策略来防止过拟合,优化模型性能。
摘要由CSDN通过智能技术生成

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()

from sklearn.ensemble import RandomForestClassifier

y = train_data["Survived"]

features = ["Pclass", "Sex", "SibSp", "Parch",'Ticket','Fare','Cabin']
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])

model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)

output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!")
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import make_column_transformer
from sklearn.model_selection import GroupShuffleSplit

from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import callbacks

spotify = pd.read_csv('../input/dl-course-data/spotify.csv')

# dropna() 默认为0  axis=0 纵轴 从上到下   axis=1 横轴  从左到右
X = spotify.copy().dropna()  

y = X.pop('track_popularity')    #  删除X中的最后一个元素 并返回该元素
artists = X['track_artist']

features_num = ['danceability', 'energy', 'key', 'loudness', 'mode',
                'speechiness', 'acousticness', 'instrumentalness',
                'liveness', 'valence', 'tempo', 'duration_ms']
features_cat = ['playlist_genre']

preprocessor = make_column_transformer(
    (StandardScaler(), features_num),   #标准化数据  保证每个维度数据方差为1,均值为0 使得预测结果不会
                                        #被维度过大的特征值而主导。
    (OneHotEncoder(), features_cat),    # 将用于分类特征的每个元素转化为一个可直接计算的数值,即特征值数字化
                                        # 常用于特征工程中的数据预处理。
)

def group_split(X, y, group, train_size=0.75):
    #训练集比例   与LeavePGroupsOut不同的是,GroupShuffleSplit可以生成用户确定的随机数量的测试拆分,
    #                                    每个拆分都有用户确定的唯一组分数。
    splitter = GroupShuffleSplit(train_size=train_size)  
    
    train, test = next(splitter.split(X, y, groups=group))  #迭代器 next()和iter()一起使用
    return (X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test])

X_train, X_valid, y_train, y_valid = group_split(X, y, artists)

# 先对训练数据进行 fit_transform, 再对测试数据进行 transform。
X_train = preprocessor.fit_transform(X_train)  #  先拟合数据 再转化为标准形式
X_valid = preprocessor.transform(X_valid)   #通过找中心和缩放等实现标准化
y_train = y_train / 100
y_valid = y_valid / 100


# YOUR CODE HERE: Add two 30% dropout layers, one after 128 and one after 64
model = keras.Sequential([
    layers.Dense(128, activation='relu', input_shape=input_shape),
#     layers.Dropout(0.3),
    layers.Dense(64, activation='relu'),
#     layers.Dropout(0.3),
    layers.Dense(1)
])


model.compile(
    optimizer='adam',
    loss='mae',
)
history = model.fit(
    X_train, y_train,
    validation_data=(X_valid, y_valid),
    batch_size=512,
    epochs=50,
    verbose=0, # 0为不在标准输出流输出日志信息  
                # 1 显示进度条
                # 2每个epoch输出一行记录
)
history_df = pd.DataFrame(history.history)
history_df.loc[:, ['loss', 'val_loss']].plot()
print("Minimum Validation Loss: {:0.4f}".format(history_df['val_loss'].min()))






import pandas as pd

concrete = pd.read_csv('../input/dl-course-data/concrete.csv')
df = concrete.copy()

df_train = df.sample(frac=0.7, random_state=0)
df_valid = df.drop(df_train.index)

X_train = df_train.drop('CompressiveStrength', axis=1)
X_valid = df_valid.drop('CompressiveStrength', axis=1)
y_train = df_train['CompressiveStrength']
y_valid = df_valid['CompressiveStrength']

input_shape = [X_train.shape[1]]
























import pandas as pd
from IPython.display import display

red_wine = pd.read_csv('../input/dl-course-data/red-wine.csv')

# Create training and validation splits
#数据清洗 
# frac随机抽取行和列的比例  random_state随机种子 可以复现抽样结果
df_train = red_wine.sample(frac=0.7, random_state=0) 
# 不包括df_train的剩余行 传给df_valid
df_valid = red_wine.drop(df_train.index)
display(df_train.head(4))

# Scale to [0, 1]
# axis=0 纵轴 从上到下 
# axis=1 横轴 从左到右
max_ = df_train.max(axis=0)
min_ = df_train.min(axis=0)
df_train = (df_train - min_) / (max_ - min_)
df_valid = (df_valid - min_) / (max_ - min_)

# Split features and target
X_train = df_train.drop('quality', axis=1)
X_valid = df_valid.drop('quality', axis=1)
y_train = df_train['quality']
y_valid = df_valid['quality']





from tensorflow import keras
from tensorflow.keras import layers, callbacks

####  如果在之前的20个epochs中,验证损失没有得到至少0.001的改善,那么就停止训练,保留你找的最佳模型。有时很难判断验证损失上升是由于过度拟合还是仅仅由于随机批次的变化,这些参数允许我们设置一些关于何时停止的限制。
early_stopping = callbacks.EarlyStopping(
    min_delta=0.001, # minimium amount of change to count as an improvement
    patience=20, # how many epochs to wait before stopping
    restore_best_weights=True,
)
# 定义网络模型
model = keras.Sequential([
    layers.Dense(512, activation='relu', input_shape=[11]),
    layers.Dense(512, activation='relu'),
    layers.Dense(512, activation='relu'),
    layers.Dense(1),
])
# 实例化优化器对象,
model.compile( 
    optimizer='adam', # 优化器
    loss='mae', # 损失函数
)



# callbacks 这个list中的回调函数将会在训练过程中适当应用于model中

history = model.fit(
    X_train, y_train,
    validation_data=(X_valid, y_valid),
    batch_size=256,
    epochs=500,
    callbacks=[early_stopping], # put your callbacks in a list
    verbose=0,  # turn off training log
)

history_df = pd.DataFrame(history.history)
history_df.loc[:, ['loss', 'val_loss']].plot();
print("Minimum validation loss: {}".format(history_df['val_loss'].min()))







  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

我叫杨傲天

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值