Python:线性SVM(LinearSVM)

import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold

"""极大梯度下降法"""
class LinearSVM_maxGD():
    def __init__(self):
        self._w = self._b = None

    def fit(self, X, y, C=1, lr=0.01, epoch=10000):
        X = np.asarray(X, np.float)
        y = np.asarray(y, np.float)
        self._w = np.zeros(X.shape[1])
        self._b = 0.

        for _ in range(epoch):
            self._w *= 1 - lr
            err = 1 - y * self.predict(X,True)
            idx = np.argmax(err)
            if err[idx] <= 0:
                continue
                # 当wx+b >=1时,只更新w
                # 当wx+b <1时,更新w和b
            delta = lr * C * y[idx]
            self._w += delta * X[idx]
            self._b += delta


    def predict(self, X, raw=False):
        X = np.asarray(X, np.float)
        y_pred = X.dot(self._w) + self._b
        if raw:
            return y_pred
        return np.sign(y_pred).astype(np.float)


'''Mini-Batch-Gradient-Descent'''
class LinearSVM_MBGD():
    def __init__(self):
        self._w = self._b = None

    def fit(self, X, y, C=1, lr=0.01, batch_size=128 , epoch=10000):
        X = np.asarray(X, np.float)
        y = np.asarray(y, np.float)
        batch_size = min(batch_size, len(y))
        self._w = np.zeros(X.shape[1])
        self._b = 0.

        for _ in range(epoch):
            self._w *= 1 - lr
            err = 1 - y * self.predict(X,True)
            batch = np.argsort(err)[-batch_size:][::-1]
            err = err[batch]
            if err[0] <= 0:
                continue
            mask = err > 0
            batch = batch[mask]
            delta = lr * C * y[batch]
            self._w += np.mean(delta[...,None] * X[batch], axis=0)
            self._b += np.mean(delta)


    def predict(self, X, raw=False):
        X = np.asarray(X, np.float)
        y_pred = X.dot(self._w) + self._b
        if raw:
            return y_pred
        return np.sign(y_pred).astype(np.float)



if __name__ == '__main__':
    X,y = datasets.make_blobs(n_samples=1000, n_features=2, centers=2, cluster_std=[3.0,3.0],random_state=13)
    y[y==0] = -1
    plt.scatter(X[:,0],X[:,1],c=y)
    plt.show()
    Acc_List = []
    SKF = StratifiedKFold(n_splits=5, shuffle=True)
    for train_idx, test_idx in SKF.split(X=X,y=y):
        X_train = X[train_idx]
        y_train = y[train_idx]
        X_test = X[test_idx]
        y_test = y[test_idx]
        model = LinearSVM_MBGD()
        model.fit(X=X_train, y=y_train)
        y_pred = model.predict(X=X_test)
        Acc_List.append(accuracy_score(y_true=y_test, y_pred=y_pred))
    print("平均精度={}".format(np.mean(Acc_List)))
    print("标准差={}".format(np.std(Acc_List)))

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

DeniuHe

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值