统计学习方法_支持向量机SVM实现

由于在MNIST上运行SVM耗时过久,所以这里使用了伪造的数据集,并使用线性核和多项式核进行实验。

#!/usr/bin/env python3and
# -*- coding: utf-8 -*-

import time
import random
import logging

import pandas as pd

from generate_dataset import *

class SVM(object):
	def __init__(self, kernel='linear', epsilon=0.001):
		self.kernel = kernel
		self.epsilon = epsilon  # P129,检验在epsilon范围内进行

	def init_parameters(self, features, labels):
		'''
		初始化参数
		'''
		self.X = features
		self.Y = labels

		self.b = 0.0
		self.n = len(features[0])  # 2,每个样本维度数
		self.N = len(features)  # 1334,样本数
		self.alpha = [0.0] * self.N  # list快速初始化
		self.E = [self._E(i) for i in range(self.N)]
		self.C = 1000
		self.Max_Interation = 5000

	def K(self, x1, x2):
		'''
		核函数
		'''
		if self.kernel == 'linear':  # 线性核,等价于没有用核函数
			return sum([x1[k] * x2[k] for k in range(self.n)])
		elif self.kernel == 'poly':  # 多项式核,这里取三次方
			return (sum([x1[k] * x2[k] for k in range(self.n)]) + 1) ** 3

	def G(self, i):
		'''
		公式(7.104)
		'''
		result = self.b
		for j in range(N):
			result += self.alpha[j] * self.Y[j] * self.K(self.X[i], self.X[j])
		return result

	def _E(self, i):
		'''
		公式(7.105)
		E(i)为输入xi的预测值与真实输出Yi的误差
		'''
		return self.G(i) - self.Y[i]

	def satisfy_KKT(self, i):
		'''
		验证是否满足KKT,以便选取alpha
		'''
		ygx = self.Y[i] * self.G(i)
		if abs(self.alpha[i]) < self.epsilon:  # alpha与0的误差,(7.111)
			return ygx > 1 or ygx == 1
		elif abs(self.alpha[i] - self.C) < self.epsilon:  # (7.113)
			return ygx < 1 or ygx == 1
		else:
			return abs(ygx - 1) < self.epsilon  # (7.112)

	def select_two_parameters(self):
		'''
		按照书7.4.2选择两个变量
		'''
		index_list = [i for i in range(self.N)]

		# 只选择间隔边界上的支持向量点
		i1_list_1 = filter(lambda i: self.alpha[i] > 0 and self.alpha[i] < self.C, index_list)
		i1_list_1 = list(i1_list_1)  # 注意Python3返回的是迭代器对象,Python2返回的是列表
		i1_list_2 = list(set(index_list) - set(i1_list_1))  # 其余点

		i1_list = i1_list_1
		i1_list.extend(i1_list_2)  # 先选取0<alpha<C的,再选择其它的

		for i in i1_list:  # 遍历alpha的下标
			if self.satisfy_KKT(i):  # 选择一个违反KKT条件的alpha1
				continue

			# 下面选择alpha2
			E1 = self.E[i]
			# 第一个值记录E最大差值,第二个值记录alpha2下标
			max_parameters = (0, 0)

			for j in index_list:
				if i == j:
					continue

				E2 = self.E[j]
				if abs(E1 - E2) > max_parameters[0]:
					max_parameters = (abs(E1 - E2), j)

			return i, max_parameters[1]

	def train(self, features, labels):
		self.init_parameters(features, labels)

		for iteration in range(self.Max_Interation):
			# logging.debug('iteration %d:' % iteration)

			# 选出alpha1和alpha2的下标
			i1, i2 = self.select_two_parameters()

			# P126,给出alpha2new的上下界
			if self.Y[i1] != self.Y[i2]:
				L = max(0, self.alpha[i2] - self.alpha[i1])
				H = min(self.C, self.C + self.alpha[i2] - self.alpha[i1])
			else:
				L = max(0, self.alpha[i2] - self.alpha[i1] - self.C)
				H = min(self.C, self.alpha[i2] + self.alpha[i1])

			E1 = self.E[i1]
			E2 = self.E[i2]
			# (7.107)
			eta = self.K(self.X[i1], self.X[i1]) + self.K(self.X[i2], self.X[i2]) - self.K(self.X[i1], self.X[i2])

			# (7.106),未裁剪的alpha2new
			alpha2_new_unc = self.alpha[i2] + self.Y[i2] * (E1 - E2) / eta

			# (7.108),进行裁剪
			alpha2_new = 0
			if alpha2_new_unc > H:
				alpha2_new = H
			elif alpha2_new_unc < L:
				alpha2_new = L
			else:
				alpha2_new = alpha2_new_unc

			# (7.109),求alpha1new
			alpha1_new = self.alpha[i1] + self.Y[i1] * self.Y[i2] * (self.alpha[i2] - alpha2_new)

			# (7.115)和(7.116),选择b1new和b2new从而选择bnew
			b_new = 0
			b1_new = -E1 - self.Y[i1] * self.K(self.X[i1], self.X[i1]) * (alpha1_new - self.alpha[i1]) - self.Y[i2] * self.K(self.X[i2], self.X[i1]) * (alpha2_new - self.alpha[i2]) + self.b
			b2_new = -E2 - self.Y[i1] * self.K(self.X[i1], self.X[i2]) * (alpha1_new - self.alpha[i1]) - self.Y[i2] * self.K(self.X[i2], self.X[i2]) * (alpha2_new - self.alpha[i2]) + self.b

			if alpha1_new > 0 and alpha1_new < self.C:
				b_new = b1_new
			elif alpha2_new > 0 and alpha2_new < self.C:
				b_new = b2_new
			else:
				b_new = (b1_new + b2_new) / 2

			self.alpha[i1] = alpha1_new
			self.alpha[i2] = alpha2_new
			self.b = b_new

			# (7.117),还需要更新E,这里需要bnew的值
			self.E[i1] = self._E(i1)
			self.E[i2] = self._E(i2)

	def predict_one(self, feature):
		result = self.b

		for i in range(self.N):
			result += self.alpha[i] * self.Y[i] * self.K(feature, self.X[i])

		if result > 0:
			return 1
		else:
			return -1

	def predict(self, features):
		results = []

		for feature in features:
			results.append(self.predict_one(feature))

		return results

if __name__ == '__main__':
	logger = logging.getLogger()
	logger.setLevel(logging.DEBUG)

	print('Start reading data:')

	time1 = time.time()

	# 选取2/3为训练数据,1/3为测试数据
	train_features, train_labels, test_features, test_labels = generate_dataset(2000, visualization=False)

	# print(len(train_features))  # list 1334
	# print(len(train_features[0]))  # list 2
	# print(len(train_labels))  # list 1334
	# print(len(test_features))  # list 666
	# print(len(test_labels))  # list 666

	time2 = time.time()
	print('read data cost %f seconds' % (time2 - time1))

	print('Start training:')
	svm = SVM('poly')
	svm.train(train_features, train_labels)
	time3 = time.time()
	print('training cost %f seconds' % (time3 - time2))

	print('Start predicting:')
	test_predict = svm.predict(test_features)
	time4 = time.time()
	print('predicting cost %f seconds' % (time4 - time3))

	accuracy = sum(test_labels[i] == test_predict[i] for i in range(len(test_labels))) / len(test_labels)
	print('The accuracy is %f!' % accuracy)

'''
output:(linear)
Start reading data:
read data cost 0.013078 seconds
Start training:
training cost 4.556223 seconds
Start predicting:
predicting cost 3.825693 seconds
The accuracy is 0.887387!
'''

'''
output:(poly)
Start reading data:
read data cost 0.013761 seconds
Start training:
training cost 4.902359 seconds
Start predicting:
predicting cost 4.038038 seconds
The accuracy is 0.875375!
'''

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值