rbf核函数_机器学习-向量机模型-西瓜书代码(SVM)-RBF核函数

d9f22774b7c6040a34d2def5e1fbd91d.png

以下代码是本人在学习西瓜书时花费两个礼拜根据原理进行原创,若需转载请咨询本人,谢谢!

自我研究模拟

附上离散类别截图

d8b3aa33756b034f7e5c615e79e77898.png
数据截图

f271432ad8208025c25b4a989670b3d7.png
结果截图
Svm_config.py
"""
 Filename: Svm
 Author: kdd_zyx
 Description: 机器学习 - 支持向量机
 Datas:kdd - 随机划分
 Start: 2018.10.27
 End: 2018.10.29
"""
 
import time as t
import numpy as np
import random as r
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cProfile as cPf
 
start = 1   # 数据上标
end = -1    # 结尾下标
 
# str->float
def flt(pro_data):
    flt_data = []
    for data in pro_data:
        flt_data.append(float(data))
 
    return flt_data
 
 
# 计算核结果
def Rbf_kernel(X, A, Ktup):
    m = np.shape(X)[0]
    K = np.mat(np.zeros((m, 1)))
 
    if Ktup[0] == 'rbf':
        for i in range(m):
            row_k = X[i, :] - A
            K[i]= row_k * row_k.T
 
        K = np.exp(K / (-2 * Ktup[1] ** 2))
 
    else:
        print('!rbf')
        pass
 
    return K
 
# 引入训练集
def Lead_dataset(text_addr, text_splt):
    try:
        pro_dataset = []
        end_dataset = []
        f = open(text_addr, 'r', encoding='UTF-8')
        for line in f.readlines():
            data = line.strip().replace('water', '1').replace('forest', '-1').split(text_splt)
            pro_dataset.append(flt(data[ :end]))
            end_dataset.append(float(data[end]))
 
        return pro_dataset, end_dataset
 
    except Exception as e:
        print('Error:', e)
        Lead_dataset(text_addr, text_splt)
Svm.py
"""
 Filename: Svm
 Author: kdd_zyx
 Description: 机器学习 - 支持向量机
 Datas:kdd - 随机划分
 Start: 2018.10.27
 End:
"""
 
from Svm_config import *
 
# 优化数据存储结构
class dataset:
    def __init__(self, pro_dataset, end_dataset, C, tol, Ktup):
        self.X = np.mat(pro_dataset)  # 特征数据集
        self.Y = np.mat(end_dataset).T  # 类别数据集
        self.C = C  # 错误惩罚参数
        self.tol = tol  # Ei的误差容忍度
        self.m = np.shape(self.Y)[0]  # 数据行数
        self.alphas = np.mat(np.zeros((self.m, 1)))  # 拉格朗日乘子集
        self.b = 0  # 初始化设为0
        self.Ei_cache = np.mat(np.zeros((self.m, 2)))  # Ei(预测类别误差)缓存集(加快运算速度)
        self.Ktup = Ktup # 核函数类型,核方差参数
        self.K = np.mat(np.zeros((self.m, self.m)))
        self.Cacl_kernel() # 核函数的计算结果集
 
    def Cacl_kernel(self):
        # 核函数的计算结果集
        for i in range(self.m):
            self.K[:, i] = Rbf_kernel(self.X, self.X[i, :], self.Ktup)
 
 
class Svm:
    def __init__(self, pro_dataset, end_dataset):
        self.max_iter = 10000
        # 输入:特征数据集,类别数据集,错误惩罚参数,Ei的误差容忍度,外层循环最大迭代次数,kTup: 核函数类型,核方差参数
        self.Set = dataset(pro_dataset, end_dataset, 200, 0.5, ('rbf', 1.3))
 
    def Show_sv(self):
        if self.Set.X.shape[1] != 2:
            print('this just is 2 dimention figure!')
            return
 
        # 画出所有的样本点
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        for i in range(self.Set.m):
            if self.Set.Y[i] == 1:
                ax.scatter(self.Set.X[i, 0], self.Set.X[i, 1], self.Set.K[:, i].sum(), c='red')
            elif self.Set.Y[i] == -1:
                ax.scatter(self.Set.X[i, 0], self.Set.X[i, 1], self.Set.K[:, i].sum(), c='blue')
 
        # 画出支持向量点
        index_sV = np.nonzero(self.Set.alphas.A > 0)[0]
        for i in index_sV:
            ax.scatter(self.Set.X[i, 0], self.Set.X[i, 1], self.Set.K[:, i].sum(), c='black')
 
        ax.set_xlabel('X')
        ax.set_ylabel('Y')
        ax.set_zlabel('Z')
 
        plt.show()
 
    def Verify(self):
        verify_num = 0
 
        for i in range(self.Set.m):
            K_eval = Rbf_kernel(self.sV_dat_set, self.Set.X[i, :], self.Set.Ktup)
            end_data = np.dot(K_eval.T, np.multiply(self.sV_alp_set, self.sV_end_set)) + self.Set.b
            if np.sign(end_data) == np.sign(self.Set.Y[i]):
                verify_num += 1
        print("the training true rate is:", (verify_num / self.Set.m))  # 打印出正确率
 
    def Svm_Create(self):
        self.Smo()
        self.sV_row = np.nonzero(self.Set.alphas)[0] # 支持向量行数
        self.sV_dat_set = self.Set.X[self.sV_row] # 支持向量属性集
        self.sV_end_set = self.Set.Y[self.sV_row] # 支持向量类别集
        self.sV_alp_set = self.Set.alphas[self.sV_row] # 支持向量乘子集
        print("there are {} Support Vectors!".format(np.shape(self.sV_end_set)[0]))  # 打印出共有多少的支持向量
        self.Verify()
        self.Show_sv()
 
    def Smo(self):
        '''
        1. 迭代次数小于上限
        2. alpha不再调整
        3. 数据集全体符合Ktt条件
        :return: alpha, b
        '''
 
        _iter = 0 # 初始化循环迭代变量
        alpha_changed = 0
        entire_set = True # 循环交替执行全集搜索与非边界搜索参数
 
        while _iter < self.max_iter and (alpha_changed > 0 or entire_set):
            alpha_changed = 0
 
            if entire_set:
                for i in range(self.Set.m):
                    alpha_changed += self.J_loop(i)
 
                print("Fullset :", str(alpha_changed))
                # 将循环权移交给非边界搜索
                entire_set = False
            else:
                # 不符合边界范围的alpha将进行计算更改
                m = np.nonzero(np.multiply(self.Set.alphas > 0 , self.Set.alphas < self.Set.C))[0]
                for i in m:
                    alpha_changed += self.J_loop(i)
 
                print("Not Fullset :", str(alpha_changed))
                if alpha_changed == 0:
                    # 确定本此不符合边界范围的alpha计算更改完毕,将循环权移交给全集搜索
                    entire_set = True
 
            _iter += 1
 
        print("Smo() iteration number:" + str(_iter))
 
 
    def J_loop(self, i):
        Ei = self.Cacl_Ek(i)
        yiEi = self.Set.Y[i] * Ei # y[i]*E_i = y[i]*f(i) - y[i]^2 = y[i]*f(i) - 1
 
        if (yiEi < - self.Set.tol and self.Set.alphas[i] < self.Set.C) or (yiEi > self.Set.tol and self.Set.alphas[i] > 0): # KTT条件是否违背
            j, Ej = self.J_maxE(i, Ei)
 
            # 存储旧数据 ai, aj
            alpha_i_old = self.Set.alphas[i].copy()
            alpha_j_old = self.Set.alphas[j].copy()
 
            # 确定alpha_j_new的取值范围
            if self.Set.Y[i] != self.Set.Y[j]:
                L = max(0, alpha_j_old - alpha_i_old)
                H = min(self.Set.C, self.Set.C + alpha_j_old - alpha_i_old)
            else:
                L = max(0, alpha_j_old + alpha_i_old - self.Set.C)
                H = min(self.Set.C, alpha_j_old + alpha_i_old)
 
            if L == H:
                return 0
 
            Tc = self.Set.K[i, i] + self.Set.K[j, j] - 2.0 * self.Set.K[i, j]
 
            if Tc <= 0: # Tc = |K_x1 - K_x2| ** 2.0 <= 0
                return 0
 
            # 确定alpha_j_new
            self.Set.alphas[j] += self.Set.Y[j] * (Ei - Ej) / Tc
            self.Set.alphas[j] = self.Clip_aj(self.Set.alphas[j], L, H)
            # 更新Ej_Cache缓存
            self.UpdataEk(j)
 
            new_old_alphja_j = self.Set.alphas[j] - alpha_j_old
 
            if np.abs(new_old_alphja_j) < self.Set.tol:
                return 0
 
            # 确定alpha_i_new
            self.Set.alphas[i] += self.Set.Y[i] * self.Set.Y[j] * (- new_old_alphja_j)
            # 更新Ei_Cache缓存
            self.UpdataEk(i)
 
            # 获取b的过程
            new_old_alphja_i = self.Set.alphas[i] - alpha_i_old
            bi = -Ei - self.Set.Y[i] * self.Set.K[i, i] * new_old_alphja_i - self.Set.Y[j] * self.Set.K[j, i] * new_old_alphja_j + self.Set.b
            bj = -Ej - self.Set.Y[i] * self.Set.K[i, j] * new_old_alphja_i - self.Set.Y[j] * self.Set.K[j, j] * new_old_alphja_j + self.Set.b
 
            if 0 < self.Set.alphas[i] < self.Set.C:
                self.Set.b = bi
            elif 0 < self.Set.alphas[j] < self.Set.C:
                self.Set.b = bj
            else:
                self.Set.b = (bi + bj) / 2.0
 
            return 1
 
        else:
            return 0
 
    def UpdataEk(self, k):
        Ek = self.Cacl_Ek(k)
        self.Set.Ei_cache[k] = [1, Ek]
 
    @staticmethod
    def Clip_aj(aj, L, H):
        if aj > H:
            return H
 
        elif aj < L:
            return L
 
        else:
            return aj
 
    def J_maxE(self, i, Ei):
        # 选择最大步长的J or (若支持向量已都被优化)随机选取一个
        maxj = 0
        maxEj = 0
        maxEi_Ej = 0
        self.Set.Ei_cache[i] = [1, Ei]
        vaild_list = np.nonzero(self.Set.Ei_cache[:, 0])[0]
 
        if len(vaild_list) > 1:
            for j in vaild_list:
                if j != i:
                    Ej = self.Cacl_Ek(j)
                    Ei_Ej = np.abs(Ei - Ej)
 
                    if Ei_Ej > maxEi_Ej: # 寻找与 i 间隔最大的 j
                        maxEi_Ej = Ei_Ej
                        maxj = j
 
            maxEj = self.Cacl_Ek(maxj)
        else:
            maxj = self.J_rand(i)
            maxEj = self.Cacl_Ek(maxj)
 
        return maxj, maxEj
 
    def J_rand(self, i):
        j = i
        while j == i:
            j = int(r.uniform(0, self.Set.m))
 
        return j
 
    def Cacl_Ek(self, k):
        u_k = np.dot(np.multiply(self.Set.alphas, self.Set.Y).T, self.Set.K[:, k])
        Ek = float(u_k + self.Set.b - self.Set.Y[k])
 
        return Ek
 
def main():
    sta_time = t.time()
 
    pro_dataset, end_dataset = Lead_dataset('text_data.txt', '   ')
    svm = Svm(pro_dataset, end_dataset)
    svm.Svm_Create()
 
    end_time = t.time()
    print("Time:", end_time - sta_time)
 
if __name__ == '__main__':
    main()
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值