最小二乘支持向量机LS-SVM

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as optimize
import sklearn.datasets as ds
from sklearn import datasets
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.svm import SVC
from time import time


class lssvm:
    def __init__(self):
        # self.model = 'svm'          # 'svm', 'lssvm', 'twsvm'
        # self.method = 'ovo'         # Multi-class method. 'ovo': One vs One, 'ova': One vs All
        # Kernel settings
        self.kernel_type = 'rbf'   # 'linear', 'poly', 'rbf', 'erbf', 'tanh', 'lspline'
        self.b = 1                  # constant multiplier for hyperbolic tangent
        self.c = 1                  # constant sum for polynomial, tangent and linear splines
        self.d = 2                  # polynomial power
        self.sigma = 0.5            # RBF and ERBF sigma

        # SVM settings
        self.C = 0.01                 # soft margin

        #
        self.alpha = None           # Lagrange multiplier (SVM and LSSVM)
        self.SV = None              # Support vectors
        self.bias = 1               # bias
        # Kernel types
        # linear : linear
        # poly   : polynomial
        # rbf    : radial base gaussian function (RFB)
        # erbf   : radial base exponencial
        # tanh   : hyperbolic tangent
        # lspline: linear splines
        return

    def kernel(self, x1, x2):
        # Input parameters
        #   t = kernel type
        #       linear:     linear
        #       poly:       polynomial
        #       rbf:        radial base gaussian function (RFB)
        #       erbf:       radial base exponencial
        #       tanh:       hyperbolic tangent
        #       lspline:    linear splines
        #   b:      constant multiplier for hyperbolic tangent
        #   c:      constant sum for polynomial, tangent and linear splines
        #   d:      polynomial and linear splines power indicator
        #   sigma:  free parameter for RBF and exponential base
        # One problem with the polynomial kernel is that it may suffer
        # from numerical instability:
        # when x1Tx2 + c < 1, k(x1, x2) = (x1Tx2 + c)^d tends to zero with increasing d,
        # whereas when x1Tx2 + c > 1, k(x1, x2) tends to infinity
        #
        if x1.ndim == 1:
            x1 = np.array([x1]).T

        if x2.ndim == 1:
            x2 = np.array([x2]).T

        m1 = x1.shape[0]
        m2 = x2.shape[0]
        k = np.zeros((m1, m2))

        t = self.kernel_type
        b = self.b
        c = self.c
        d = self.d
        sigma = self.sigma
        for i in range(m1):
            for j in range(m2):

                # Linear kernel
                if t == 'linear':
                    k[i, j] = x1[i] @ x2[j]
                # Polynomial kernel
                elif t == 'poly':
                    k[i, j] = (x1[i] @ x2[j] + c) ** d
                # Radial base gaussian function (RBF)
                elif t == 'rbf':
                    k[i, j] = np.exp(-(x1[i] - x2[j]) @ (x1[i] - x2[j]) / (2 * sigma ** 2))
                # Radial base exponential function
                elif t == 'erbf':
                    k[i, j] = np.exp(-np.abs(x1[i] - x2[j]) / (2 * sigma ** 2))
                # Hyperbolic tangent
                elif t == 'tanh':
                    k[i, j] = np.tanh(b * (x1[i] @ x2[j]) + c)
                # Linear splines
                elif t == 'lspline':
                    k[i, j] = c + x1[i] * x2[j] + x1[i] * x2[j] * min(x1[i], x2[j]) + 1 / 2 * (x1[i] + x2[j]) *  min(x1[i], x2[j]) ** d

        return k


    def lssvm_predict(self, X_test, X_train, y_train):
        """
        Predict LS-SVM values.

        Input
            X_test:         test set. Numeric normalized values or categorical values
                            encoded as numeric.
            X_train:        train set. Numeric normalized values or categorical values
                            encoded as numeric.
            y_train:        train set, y={-1, 1} or y={0, 1}. Vector if binary,
                            one-hot enconded if multiclass.
        Output
            y_hat:          predicted values
        """

        y_train = np.where(y_train == 0, -1, y_train)

        # y_hat = np.sign(np.sum(self.alpha * y_train * self.kernel(X_train, X_test), axis=0,
        #                        keepdims=True) + self.bias).T
        y_hat = np.sign(np.sum(self.alpha * y_train * rbf_kernel(X_train, X_test,gamma=self.sigma), axis=0,
                               keepdims=True) + self.bias).T

        y_hat = np.where(y_hat == -1, 0, y_hat)

        return y_hat

    def lssvm_fit(self, X_train, y_train):
        """
        Train LS-SVM model. Parameters defined in the svm object. Trained values
        stored in the svm object.

        Input
            X_train:        train set. Numeric normalized values or categorical
                            values encoded as numeric.
            y_train:        train set, y={-1, 1} or y={0, 1}. Vector if binary,
                            one-hot enconded if multiclass.
        SVM parameter
            C:              SVM soft margin
        Kernel parameters
            t:              kernel type: 'linear', 'poly', 'rbf', 'erbf','tanh',
                            'lspline'
            b:              constant multiplier for hyperbolic tangent
            c:              constant sum for polynomial, hyperbolic tangent and
                            linear splines
            d:              polynomial and linear splines power indicator
            sigma:          free parameter for RBF and exponential base
        Output
            Trained model stored in svm object.
        """

        y_train = np.where(y_train == 0, -1, y_train)

        N = X_train.shape[0]
        # y_train = y_train[:,None]
        # nc = y_train.shape[1]
        # K = self.kernel(X_train, X_train)
        K = rbf_kernel(X_train, X_train,gamma=self.sigma)

        # 3. Compute omega
        omega = np.zeros((N, N), int)
        for k in range(K.shape[0]):
            for l in range(K.shape[1]):
                omega[k, l] = y_train[k] * y_train[l] * K[k, l]

        # 4. Build Matrix A and vector b
        I = np.eye(omega.shape[0])
        ZZCI = omega + self.C ** -1 * I

        # 4.1 Build matrix A
        A11 = np.zeros((1, 1))  # Element A(1,1)
        A1 = np.hstack((A11, -y_train.T))  # Row 1
        A2 = np.hstack((y_train, ZZCI))  # Row 2

        # Build matrix A
        A = np.vstack((A1, A2))

        # 4.2 Output vector b
        b = np.vstack((np.zeros((1, 1)), np.ones((N, 1))))

        # 5. Solve the linear equation Ax = b
        x = np.linalg.solve(A, b)

        self.bias = x[0]
        self.alpha = x[1:len(x)]

        return

if __name__ == '__main__':
    X,y = datasets.make_blobs(n_samples=5000, n_features=2,
                              centers=2, cluster_std=[2.0,2.0], random_state=12)
    plt.scatter(X[:,0], X[:,1], c=y)
    plt.show()
    # X, y = datasets.load_breast_cancer(return_X_y=True)
    print("nDim=",X.shape[1])
    X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2,random_state=100)
    y_train = y_train[:,None]

    s_time = time()
    model = lssvm()
    model.lssvm_fit(X_train=X_train,y_train=y_train)
    y_hat = model.lssvm_predict(X_test=X_test,X_train=X_train,y_train=y_train)
    Acc = accuracy_score(y_test, y_hat)
    e_time = time()
    print("精度=",Acc)
    print("耗时=",e_time - s_time)

    s_time = time()
    model = SVC(kernel='rbf')
    model.fit(X_train, y_train)
    y_hat = model.predict(X_test)
    Acc = accuracy_score(y_test, y_hat)
    e_time = time()
    print("精度=",Acc)
    print("耗时=",e_time - s_time)

不知为什么 LS-SVM的效果始终无法超过SVM,特别是在一些数据集上无论怎么调整参数分类精度远低于SVM。

另外,LS-SVM对容差系数C貌似不是特别敏感,相对于SVM而言。

  • 0
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

DeniuHe

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值