Keras实现线性判别分析(LDA)

导入所需模块并限制GPU效率
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import adam_v2
import matplotlib.pyplot as plt
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf

gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)  # 设置显存按需申请
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.
                                                        VirtualDeviceConfiguration(memory_limit=1024)])
生成两类数据
# 生成训练数据集,每类100个数据
def get_train_data(data_size=100):
    data_label = np.zeros((2 * data_size, 1))
    # class 1
    x1 = np.reshape(np.random.normal(1, 0.6, data_size), (data_size, 1))
    y1 = np.reshape(np.random.normal(1, 0.8, data_size), (data_size, 1))
    data_train = np.concatenate((x1, y1), axis=1)
    data_label[0:data_size, :] = 0
    # class 2
    x2 = np.reshape(np.random.normal(-1, 0.3, data_size), (data_size, 1))
    y2 = np.reshape(np.random.normal(-1, 0.5, data_size), (data_size, 1))
    data_train = np.concatenate((data_train, np.concatenate((x2, y2), axis=1)), axis=0)
    data_label[data_size:2 * data_size, :] = 1
    return data_train, data_label, x1, y1, x2, y2


# 生成测试数据集,每类10个

def get_test_data(data_size=10):
    testdata_label = np.zeros((2 * data_size, 1))
    # class 1
    x1 = np.reshape(np.random.normal(1, 0.6, data_size), (data_size, 1))
    y1 = np.reshape(np.random.normal(1, 0.8, data_size), (data_size, 1))
    data_test = np.concatenate((x1, y1), axis=1)
    testdata_label[0:data_size, :] = 0

    # class 2
    x2 = np.reshape(np.random.normal(-1, 0.3, data_size), (data_size, 1))
    y2 = np.reshape(np.random.normal(-1, 0.5, data_size), (data_size, 1))
    data_test = np.concatenate((data_test, np.concatenate((x2, y2), axis=1)), axis=0)
    testdata_label[data_size:2 * data_size, :] = 1
    return data_test, testdata_label, x1, y1, x2, y2


(data_train, data_label, t_x1, t_y1, t_x2, t_y2) = get_train_data()
定义并训练模型
np.random.seed(1671)  # 重复性设置
model = Sequential()
adam = adam_v2.Adam(lr=0.5)
model.add(Dense(1, input_shape=(2,), activation='sigmoid'))
model.summary()
model.compile(optimizer="adam", loss='mse', metrics=['accuracy'])
h = model.fit(data_train, data_label, verbose=1, batch_size=5, epochs=100, validation_split=0.2)
测试集准确率
(data_test, testdata_label, x1, y1, x2, y2) = get_test_data()
loss, score = model.evaluate(data_test, testdata_label, verbose=1)
print("prediction is: ", score)
print(h.history.keys())
查看训练过程中拟合情况
acc = h.history['accuracy']
val_acc = h.history['val_accuracy']
loss = h.history['loss']
val_loss = h.history['val_loss']
# 损失值变化
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(loss, 'r', label='Training loss')
plt.plot(val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend()
# 准确率变化
plt.subplot(1, 2, 2)
plt.plot(acc, 'r', label='Training acc')
plt.plot(val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend()
打印印出模型训练出的线性关系
w, b = model.layers[0].get_weights()  #得到加权值,分配到w和b中
print('线性关系为:', 'y =', w[0][0], 'x +', b[0])  

x = np.arange(-1, 1, 0.1)

y = w[0][0] * x + b[0]

plt.figure()
# 执行函数并画出之前的点

plt.plot(x, y)
plt.scatter(x1, y1, marker="o", label="data_test_class1")
plt.scatter(x2, y2, marker="o", label="data_test_class2")
plt.scatter(t_x1, t_y1, marker="o", label="data_train_class1")
plt.scatter(t_x2, t_y2, marker="o", label="data_train_class2")
plt.legend()
plt.show()

训练过程:
训练过程
测试集准确率:
在这里插入图片描述
分类结果:
在这里插入图片描述

  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值