Python实现SVM算法

该博客介绍了如何使用Python实现支持向量机(SVM)进行二分类任务。首先从GitHub下载数据集,然后通过`getdata`函数读取数据并绘制数据分布。接着定义了一个SVM类,实现了简化版的SMO算法进行训练,并提供了`drawresult`方法来展示决策边界。最后,SVM模型被应用于数据集并绘制了结果。
摘要由CSDN通过智能技术生成

数据集下载地址:
https://github.com/Jack-Cherish/Machine-Learning/blob/master/SVM/testSet.txt

# coding=UTF-8
import random
import matplotlib.pyplot as plt
import numpy as np


def getdata(filename):
    x = []
    y = []
    with open(filename) as file:
        for line in file:
            lineArr = line.strip().split('\t')
            x.append([float(lineArr[0]), float(lineArr[1])])  # 添加数据
            y.append(float(lineArr[2]))  # 添加标签
    return x, y


def drawdata(x, y):
    plus = []
    minus = []
    for i in range(len(x)):
        if y[i] > 0:
            plus.append(x[i])
        else:
            minus.append(x[i])
    plus_matrix = np.array(plus)
    minus_matrix = np.array(minus)
    plt.scatter(np.transpose(plus_matrix)[0], np.transpose(plus_matrix)[1])
    plt.scatter(np.transpose(minus_matrix)[0], np.transpose(minus_matrix)[1])
    plt.show()


class SVM(object):
    """
    参数:
        x: 数据
        y: 标签
        c: 松弛变量
        toler: 容错率
        n_iter: 最大迭代次数
    """

    def __init__(self, c=0.6, toler=0.001, n_iter=40):
        self.c = c
        self.toler = toler
        self.n_iter = n_iter
        self.alphas = np.array([])
        self.w_ = []
        self.b = 0

    def fit(self, x, y):
        # 转换为矩阵
        x_matrix = np.mat(x)
        y_matrix = np.mat(y).transpose()  # 矩阵转置
        # 利用SMO计算b和alpha
        self.b, self.alphas = self.smosimple(x_matrix, y_matrix, self.c, self.toler)
        self.w_ = np.zeros((x_matrix.shape[1], 1))
        for i in range(self.alphas.shape[0]):
            self.w_ += np.multiply(self.alphas[i] * y_matrix[i], x_matrix[i, :].T)
        return self

    # 简化版SMO
    def smosimple(self, x_matrix, y_matrix, C, toler):
        # 初始化b参数
        b = 0
        # 统计x矩阵维度(m行n列)
        m, n = np.shape(x_matrix)
        # 初始化alpha参数
        alphas = np.mat(np.zeros((m, 1)))
        count = 0
        # alphas为矩阵,y为矩阵,x为矩阵,
        while count < self.n_iter:
            alphaschanged = 0
            for i in range(m):
                # 步骤1:计算误差E
                # Ei = (sum[aj * yj * K(xi,xj)] + b) - yi;误差 = 预测值 - 真实值
                fi = float(np.multiply(alphas, y_matrix).T * self.kernel(x_matrix[i, :], x_matrix)) + b
                Ei = fi - float(y_matrix[i])

                # 优化alpha
                # 满足KKT条件
                if ((y_matrix[i] * Ei < -toler) and (alphas[i] < C)) or (
                        (y_matrix[i] * Ei > toler) and (alphas[i] > 0)):
                    # 随机选择另一个与 alpha_i 成对优化的 alpha_j
                    j = self.selectJrand(i, m)
                    fj = float(np.multiply(alphas, y_matrix).T * self.kernel(x_matrix[j, :], x_matrix)) + b
                    Ej = fj - float(y_matrix[j])
                    alphaIold = alphas[i].copy()
                    alphaJold = alphas[j].copy()  # 深拷贝,不随原数据修改而修改
                    # 步骤2:计算上下界L和H
                    if y_matrix[i] != y_matrix[j]:
                        L = max(0, alphas[j] - alphas[i])
                        H = min(C, C + alphas[j] - alphas[i])
                    else:
                        L = max(0, alphas[j] + alphas[i] - C)
                        H = min(C, alphas[j] + alphas[i])
                    if L == H:
                        continue
                    # 步骤3:计算学习率
                    # eta = K11 + K22 - 2*K12
                    eta = (self.kernel(x_matrix[i, :], x_matrix[i, :])
                           + self.kernel(x_matrix[j, :], x_matrix[j, :])
                           - 2.0 * self.kernel(x_matrix[i, :], x_matrix[j, :]))
                    if eta <= 0:
                        continue
                    # 步骤4:更新 alpha_j
                    alphas[j] += y_matrix[j] * (Ei - Ej) / eta
                    # 步骤5:对alpha_j进行剪枝
                    alphas[j] = self.clipper(alphas[j], H, L)
                    # 步骤6:更新alpha_i
                    alphas[i] += y_matrix[i] * y_matrix[j] * (alphaJold - alphas[j])
                    # 步骤7:更新b1,b2,b
                    b1 = (- Ei
                          - y_matrix[i] * self.kernel(x_matrix[i, :], x_matrix[i, :]) * (alphas[i] - alphaIold)
                          - y_matrix[j] * self.kernel(x_matrix[j, :], x_matrix[i, :]) * (alphas[j] - alphaJold)
                          + b)
                    b2 = (- Ej
                          - y_matrix[i] * self.kernel(x_matrix[i, :], x_matrix[j, :]) * (alphas[i] - alphaIold)
                          - y_matrix[j] * self.kernel(x_matrix[j, :], x_matrix[j, :]) * (alphas[j] - alphaJold)
                          + b)
                    if (0 < alphas[i]) and (C > alphas[i]):
                        b = b1
                    elif (0 < alphas[j]) and (C > alphas[j]):
                        b = b2
                    else:
                        b = (b1 + b2) / 2.0
                    alphaschanged += 1
            if alphaschanged == 0:
                count += 1
        return b, alphas

    def kernel(self, xi, xj):
        return xj * xi.T

    def selectJrand(self, i, m):
        while True:
            j = int(random.uniform(0, m))
            if j != i:
                return j

    def clipper(self, alphas, H, L):
        if alphas > H:
            return H
        elif L <= alphas <= H:
            return alphas
        elif alphas < L:
            return L

    def drawresult(self, x, y):
        # x = np.mat(x)
        # y = np.mat(y).transpose()
        plus = []
        minus = []
        for i in range(len(x)):
            if y[i] > 0:
                plus.append(x[i])
            else:
                minus.append(x[i])
        plus_matrix = np.array(plus)
        minus_matrix = np.array(minus)
        plt.scatter(np.transpose(plus_matrix)[0], np.transpose(plus_matrix)[1], s=30, alpha=0.7)
        plt.scatter(np.transpose(minus_matrix)[0], np.transpose(minus_matrix)[1], s=30, alpha=0.7)

        x1 = max(x)[0]
        x2 = min(x)[0]
        a1, a2 = self.w_
        b = float(self.b)
        a1 = float(a1[0])
        a2 = float(a2[0])
        y1 = (-b - a1 * x1) / a2
        y2 = (-b - a1 * x2) / a2
        plt.plot([x1, x2], [y1, y2])

        for i, alpha in enumerate(self.alphas):
            if abs(alpha) > 0:
                X, Y = x[i]
                plt.scatter([X], [Y], s=150, c='none', alpha=0.7, linewidth=1.5, edgecolor='red')
        plt.show()


if __name__ == '__main__':
    filename = "../data/SVMTest.txt"
    x, y = getdata(filename)
    # drawdata(x, y)
    svm = SVM()
    svm.fit(x, y)
    # print(svm.w_)
    svm.drawresult(x, y)

以下是使用Python实现SVM算法对鸢尾花数据集进行分类的示例代码: 首先,导入必要的库: ```python import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler ``` 然后,加载鸢尾花数据集并进行数据预处理: ```python # Load iris dataset iris = datasets.load_iris() X = iris.data y = iris.target # Split dataset into training set and testing set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) # Scale data sc = StandardScaler() X_train_std = sc.fit_transform(X_train) X_test_std = sc.transform(X_test) ``` 接下来,定义SVM分类器并训练模型: ```python from sklearn.svm import SVC # Create SVM classifier svm = SVC(kernel='linear', C=1.0, random_state=0) # Train SVM classifier svm.fit(X_train_std, y_train) ``` 最后,评估模型性能并可视化分类结果: ```python from sklearn.metrics import accuracy_score # Predict class labels using the testing set y_pred = svm.predict(X_test_std) # Calculate accuracy score accuracy = accuracy_score(y_test, y_pred) print('Accuracy:', accuracy) # Visualize classification results from utils import plot_decision_regions X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X=X_combined_std, y=y_combined, classifier=svm, test_idx=range(len(y_test))) plt.xlabel('sepal length [standardized]') plt.ylabel('petal length [standardized]') plt.legend(loc='upper left') plt.show() ``` 其中,’utils.py’是用于绘制决策边界的辅助函数,可以在GitHub上找到。运行完整的代码,可以得到以下结果: ``` Accuracy: 0.9777777777777777 ``` 分类结果的可视化图像如下所示: ![image.png](attachment:image.png)
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

羽路星尘

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值