# 分类问题的时候用交叉熵损失函数会更有效 # binary_crossentropy 来计算二元交叉熵 import tensorflow as tf import numpy as np import pandas as pd import matplotlib.pyplot as plt data = pd.read_csv('credit-a.csv', header=None) # 数据15列是标签 # print(data.head()) # print(data.iloc[:, -1].value_counts()) x = data.iloc[:, :-1] aa = data.iloc[:, -1] y = data.iloc[:, -1].replace(-1, 0) # 定义模型 model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(4, input_shape=(15,), activation='relu')) model.add(tf.keras.layers.Dense(4, activation='relu')) # 隐藏层 model.add(tf.keras.layers.Dense(1, activation='sigmoid')) # print(model.summary()) # 编译 model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) # 正确率 # 训练过程 history = model.fit(x, y, epochs=100) # print(history.history.keys()) ['loss','acc'] # plt.plot(history.epoch,history.history.get('loss')) plt.plot(history.epoch,history.history.get('acc')) plt.show()
分类问题的时候用交叉熵损失函数会更有效
最新推荐文章于 2022-12-07 22:51:26 发布