线性判别函数-批处理感知器算法实现

本博客包括了批处理感知器算法、Ho-Kashyap算法和MSE多类扩展方法

readData.py

import pandas as pd
import numpy as np


class readData(object):

    def __init__(self, io='../数据集.xlsx'):
        """
        io:数据集路径 excel格式
        """
        df = pd.read_excel(io='../数据集.xlsx')
        all_data = df.values  # 所有数据 特征+标签
        self.data = all_data[:, 0:2]  # 提取特征集
        self.label = all_data[:, 2]  # 提取标签
        self.trn_dataA = []
        self.trn_dataB = []
        self.classA = 0
        self.classB = 0
        # print(np.max(all_data), np.min(all_data))
        # print(self.data)
        # print(self.label)

    def get_train_data(self, classA=1, classB=2):
        self.classA = classA
        self.classB = classB
        trn_dataA = np.array([self.data[i] for i in range(len(self.data)) if self.label[i] == classA])
        trn_dataB = np.array([self.data[i] for i in range(len(self.data)) if self.label[i] == classB])
        # print(trn_dataA, trn_dataB)
        """取classA为正类,classB为负类,并进行增广"""
        X1 = np.hstack((np.ones((trn_dataA.shape[0], 1)), trn_dataA))  # 每行都插入1
        X2 = -1 * np.hstack((np.ones((trn_dataB.shape[0], 1)), trn_dataB))  # 每行插入1后取负值(负类
        X = np.vstack((X1, X2))  # 得到增广矩阵

        self.trn_dataA = trn_dataA
        self.trn_dataB = trn_dataB
        return X

    def getMSEdata(self):
        '''选择每类的前八个为训练集,后两类为测试集'''
        trn_x = np.array([self.data[i:i + 8] for i in [0, 10, 20, 30]])
        trn_y = np.array([self.label[i:i + 8] for i in [0, 10, 20, 30]])
        tst_x = np.array([self.data[i + 8:i + 10] for i in [0, 10, 20, 30]])
        tst_y = np.array([self.label[i + 8:i + 10] for i in [0, 10, 20, 30]])
        trn_x = trn_x.reshape((len(trn_x[0]) * len(trn_x[:, 0])), 2)
        trn_y = trn_y.reshape(-1).astype(int) - 1  # 类别标签从1-4变为0-3,便于转成onw-hot矩阵进行计算
        tst_x = tst_x.reshape((len(tst_x[0]) * len(tst_x[:, 0])), 2)
        tst_y = tst_y.reshape(-1).astype(int) - 1  # 类别标签从1-4变为0-3

        train_label = np.eye(4)[trn_y]  # 32x4大小,one-hot向量矩阵
        test_label = np.eye(4)[tst_y]  # 8x4大小

        train_data = np.hstack((trn_x, np.ones((trn_x.shape[0], 1))))  # 增广
        test_data = np.hstack((tst_x, np.ones((tst_x.shape[0], 1))))

        train_data = train_data.T  # 转置
        train_label = train_label.T
        test_data = test_data.T
        test_label = test_label.T

        # print(train_data.shape, train_label.shape)
        # print(test_data.shape, test_label.shape)

        return train_data, train_label, test_data, test_label

Perceptron.py

import numpy as np
from itertools import count


class Perceptron(object):
    """
    感知器:用于二分类
    w:权向量 初始权向量取0矩阵
    ita:更新的η
    """

    def __init__(self, x, ita=1):
        """
        x:增广矩阵
        w:初始权向量(全0)
        """
        self.x = x
        # self.w = np.zeros((3, 1), dtype='float32')
        self.w = np.full(self.x.shape[1], 0.0) #初始权向量为全0矩阵
        self.ita = ita
        self.step = 0

    def train(self):
        X = self.x
        w = self.w.T
        ita = self.ita
        step = 0
        for i in count():
            if i == 0:
                y_mis = np.where(np.dot(X, w) == 0)[0]  # 记录错分点的index
            else:
                y_mis = np.where(np.dot(X, w) < 0)[0]
            step += 1
            # print('第%2d次更新,分类错误的点个数:%2d' % (step, len(y_mis)))
            # print('错分点的位置为:', y_mis)
            if len(y_mis) > 0:  # 仍有错分样本
                w += ita * (np.sum(X[y_mis, :], axis=0))  # 更新权重
            else:
                break
        self.step = step
        self.w = w
        return w

HoKashyap.py 

import numpy as np


class HoKashyap(object):
    """
    Ho-Kashyap算法
    w:权向量 初始权向量取0矩阵
    ita:更新的η
    """

    def __init__(self, x, ita=0.1, kmax=1000):
        """
        x:增广矩阵
        kmax:最大迭代此处
        ita:更新权重系数
        """
        self.x = x
        self.w = np.random.randn(3, 1)  # 随机生成浮点数 randn正态分布 randint随机整数
        # random用法可见https://www.cnblogs.com/DOMLX/p/9751471.html
        self.b = np.random.rand(self.x.shape[0], 1)*2  # 生成[0,1)之间的随机浮点数
        self.ita = ita
        self.kmax = kmax  # 最大迭代次数
        self.step = 0
        self.acc = 0  # 分类准确率

    def train(self):
        X = self.x
        w = self.w
        b = self.b
        ita = self.ita
        step = 0
        kmax = self.kmax
        acc = 0
        for i in range(kmax):
            e = np.dot(X, w) - b
            e_ = (1 / 2) * (e + abs(e))
            ita_ = ita / (i + 1)
            if max(abs(e)) <= min(b):
                print(e)
                break
            else:
                b += 2 * ita_ * e_
                w = np.dot(np.linalg.pinv(X), b)  # 增广矩阵的伪逆
            step += 1
            acc = (np.sum(np.dot(X, w) > 0)) / X.shape[0]
            # print('第%2d次更新, 分类准确率%f' % (step, acc))
        if step == kmax and acc != 1:
            print('迭代次数满,线性不可分!')
            print('最终分类准确率为:', acc)
        else:
            # acc = (np.sum(np.dot(X, w) > 0)) / X.shape[0]
            print('一共%2d次更新, 分类准确率%f' % (step, acc))
        self.acc = acc
        self.step = step
        self.w = w
        self.b = b

        return w, b

MSEfunc.py

import numpy as np


class MSEfunc(object):

    def __init__(self, train_x, train_y, test_x, test_y):
        self.train_x = train_x
        self.train_y = train_y
        self.test_x = test_x
        self.test_y = test_y

    def train(self):
        lam = 1e-9
        train_data = self.train_x
        train_label = self.train_y
        test_data = self.test_x
        test_label = self.test_y
        temp = np.linalg.inv(np.dot(train_data, train_data.T) + lam)  # 得到点积后求逆,为防止其不满秩,对角线上加个小正数
        # 疑问:为什么不使用伪逆矩阵克服奇异?
        W = np.dot(np.dot(temp, train_data), train_label.T)  # 3x4维
        # print(test_label)
        y_pred = np.dot(W.T, test_data)  # 使得w.T*x取到最大的w即为预测类别
        index = np.argmax(y_pred, axis=0)  # 得到每个样本对应类别的值最大的索引
        # print(index)
        y_pred = np.eye(y_pred.shape[0])[index].T  # 转换为one-hot向量

        wrong_num = sum(sum(y_pred - test_label))
        acc = (1 - wrong_num / len(test_label)) * 100
        result_tabel = np.array(index + 1)  # 预测的类别

        return result_tabel, acc

main.py

from perceptron import Perceptron
from readData import readData
from MSEfunc import MSEfunc
import matplotlib.pyplot as plt
import matplotlib
import math
import numpy as np
from HoKashyap import HoKashyap

if __name__ == "__main__":
    """
    感知器二分类并绘图
    # """
    # data = readData()
    # X = data.get_train_data(1, 2)
    # perceptron = Perceptron(X, 0.1)
    # w = perceptron.train()
    # step = perceptron.step
    # print('现在分类的样本为w%d和w%d' % (data.classA, data.classB))
    # print('迭代更新次数为:', step)
    # print('最后得到的权重向量:', w)
    #
    # dataA = data.trn_dataA
    # dataB = data.trn_dataB
    # x1 = np.array([-10, 10])
    # x2 = -1 * (w[1] * x1 + w[0]) / w[2]
    # plt.scatter(dataA[:, 0], dataA[:, 1], color='blue', marker='o', label='w%d' % data.classA)
    # plt.scatter(dataB[:, 0], dataB[:, 1], color='red', marker='x', label='w%d' % data.classB)
    # plt.plot([x1[0], x1[1]], [x2[0], x2[1]], 'black')
    # plt.xlabel(''r'$x_1$')
    # plt.ylabel(''r'$x_2$')
    # plt.legend(loc='best')
    # plt.title('classified Data')
    # plt.show()

    """
    Ho-Kashyap算法进行迭代并绘图
    """
    # data = readData()
    # X = data.get_train_data(2, 4)
    #
    # print('现在进行分类的是w%d和w%d' % (data.classA, data.classB))
    # HoK = HoKashyap(X, 0.1, 5000)
    # w, b = HoK.train()
    # acc = HoK.acc
    # error = 1 - acc
    # dataA = data.trn_dataA
    # dataB = data.trn_dataB
    # x1 = np.array([-7, 7])
    # x2 = -1 * (w[1] * x1 + w[0]) / w[2]
    #
    # plt.scatter(dataA[:, 0], dataA[:, 1], color='blue', marker='o', label='w%d' % data.classA)
    # plt.scatter(dataB[:, 0], dataB[:, 1], color='red', marker='x', label='w%d' % data.classB)
    # plt.plot([x1[0], x1[1]], [x2[0], x2[1]], 'black')
    # plt.xlabel(''r'$x_1$')
    # plt.ylabel(''r'$x_2$')
    # plt.text(0, 0, 'Accuracy: %.3f' % acc, color='red')
    # plt.text(0, -1, 'Error: %.3f' % error, color='green')
    # plt.legend(loc='best')
    # plt.title('classified Data')
    # plt.show()

    """
    MSE多类扩展方法
    """
    data = readData()
    train_x, tran_y, test_x, test_y = data.getMSEdata()
    MSE = MSEfunc(train_x, tran_y, test_x, test_y)
    # MSE.train()
    result_tabel, acc = MSE.train()
    print('预测得到的类别标签:', result_tabel)
    print('分类准确率为%.2f' % acc)

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值