支持向量机python代码_SVM支持向量机(python)

【实例简介】支持向量机(support vector machine,简称SVM)于1964年由Vapnik和Chervonenkis建立,在上世纪90年代获得快速发展并衍生出一系列改进和扩展算法,在人像识别、文本分类、手写字识别及生物信息学等领域获得广泛应用。

【实例截图】

315ebfd7c9bb94fc1c866fdd32c8e1b4.png

【核心代码】

class SMO(object):

def __init__(self, C = 100, toler = 0.001, maxIter = 10000):

self.C = C

self.tol = toler

self.maxIter = maxIter

def fit(self, X, y):

self.X, self.y = X, y

self.n_samples = len(X)

self.alphas = np.zeros(self.n_samples, dtype = float)

self.b = 0.

self.Error = np.zeros_like(self.alphas)

self.iterNum = 0

iterNum = 0

examineAll = True

alphaChanged = 0

while iterNum < self.maxIter and (alphaChanged > 0 or examineAll == True):

alphaChanged = 0

if examineAll:

for i in range(len(self.X)): alphaChanged = self._innerLoop(i)

iterNum = 1

examineAll = False

else:

nonBoundInd = np.nonzero((self.alphas > 0) * (self.alphas < self.C))[0]

for i in nonBoundInd: alphaChanged = self._innerLoop(i)

iterNum = 1

if alphaChanged == 0: examineAll = True

self.iterNum = iterNum

return self

def _innerLoop(self, i):

Ei = self.updateError(i)

if (((Ei * self.y[i] < -self.tol) and (self.alphas[i] < self.C)) or

((Ei * self.y[i] > self.tol) and (self.alphas[i] > 0))):

j = self.selectJ(i)

Ej = self.Error[j]

alphaIold, alphaJold = self.alphas[i], self.alphas[j]

if self.y[i] != self.y[j]:

L = max(0, alphaIold - alphaJold)

H = min(self.C, self.C alphaIold - alphaJold)

else:

L = max(0, alphaJold alphaIold -self.C)

H = min(self.C, alphaJold alphaIold)

if H == L: return 0

Kii, Kij, Kjj = (self.K(self.X[i], self.X[i]), self.K(self.X[i], self.X[j]),

self.K(self.X[j], self.X[j]))

eta = Kii Kjj - 2 * Kij

if eta <= 0: return 0

self.alphas[i] = self.y[i] * (Ej - Ei)/eta

if self.alphas[i] <= L:

self.alphas[i] = L

elif self.alphas[i] >= H:

self.alphas[i] = H

if np.abs(self.alphas[i] - alphaIold) < 1.e-10: return 0

self.alphas[j] = self.y[j] * self.y[i] * (alphaIold - self.alphas[i])

b0 = (self.b - Ej - self.y[j] * Kjj * (self.alphas[j] - alphaJold) -

self.y[i] * Kij * (self.alphas[i] - alphaIold))

b1 = (self.b - Ei - self.y[j] * Kij * (self.alphas[j] - alphaJold) -

self.y[i] * Kii * (self.alphas[i] - alphaIold))

if 0 < self.alphas[j] < self.C: self.b = b0

elif 0 < self.alphas[i] < self.C: self.b = b1

else: self.b = (b0 b1) / 2

return 1

else: return 0

def selectJ(self, i):

j = 0

maxDeltaE = -1.

priorIndices = np.nonzero(self.Error)[0]

if len(priorIndices) > 1:

for k in priorIndices:

if k == i: continue

Ek = self.updateError(k)

deltaE = np.abs(Ek - self.Error[i])

if deltaE > maxDeltaE: j, maxDeltaE = k, deltaE

return j

else:

j = np.random.choice([k for k in range(self.n_samples) if k != i])

self.updateError(j)

return j

def updateError(self, i):

fxi = np.sum(self.alphas * self.y * np.array([self.K(self.X[i], self.X[j]) for

j in range(self.n_samples)])) self.b

self.Error[i] = fxi - self.y[i]

return self.Error[i]

def K(self, Xi, Xj):

return np.sum(Xi * Xj)

def predict(self, testX):

num = len(testX)

y_pred = np.ones(num, dtype = int)

for i in range(num):

fxi = np.sum(self.alphas * self.y * np.array([self.K(testX[i], self.X[j]) for

j in range(self.n_samples)])) self.b

if fxi < 0: y_pred[i] = -1

return y_pred

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <float.h> #include <string.h> #include <stdarg.h> #include <limits.h> #include <locale.h> #include "svm.h" int libsvm_version = LIBSVM_VERSION; typedef float Qfloat; typedef signed char schar; #ifndef min template <class T> static inline T min(T x,T y) { return (x<y)?x:y; } #endif #ifndef max template <class T> static inline T max(T x,T y) { return (x>y)?x:y; } #endif template <class T> static inline void swap(T& x, T& y) { T t=x; x=y; y=t; } template <class S, class T> static inline void clone(T*& dst, S* src, int n) { dst = new T[n]; memcpy((void *)dst,(void *)src,sizeof(T)*n); } static inline double powi(double base, int times) { double tmp = base, ret = 1.0; for(int t=times; t>0; t/=2) { if(t%2==1) ret*=tmp; tmp = tmp * tmp; } return ret; } #define INF HUGE_VAL #define TAU 1e-12 #define Malloc(type,n) (type *)malloc((n)*sizeof(type)) static void print_string_stdout(const char *s) { fputs(s,stdout); fflush(stdout); } static void (*svm_print_string) (const char *) = &print_string_stdout; #if 1 static void info(const char *fmt,...) { char buf[BUFSIZ]; va_list ap; va_start(ap,fmt); vsprintf(buf,fmt,ap); va_end(ap); (*svm_print_string)(buf); } #else static void info(const char *fmt,...) {} #endif // // Kernel Cache // // l is the number of total data items // size is the cache size limit in bytes // class Cache { public: Cache(int l,long int size); ~Cache(); // request data [0,len) // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) int get_data(const int index, Qfloat **data, int len); void swap_index(int i, int j); private: int l; long int size; struct head_t { head_t *prev, *next; // a circular list Qfloat *data; int len; // data[0,len) is cached in this entry }; head_t *head; head_t lru_head; void lru_delete(head_t *h); void lru_insert(head_t *h); }; Cache::Cache(int l_,long int size_):l(l_),size(size_) { head = (head_t *)calloc(l,sizeof(head_t)); // initialized to 0 size /= sizeof(Qfloat); size -= l * sizeof(head_t) / sizeof(Qfloat); size = max(size, 2 * (long int) l); // cache must be large enough for two columns lru_head.next = lru_head.prev = &lru_head; } Cache::~Cache() { for(head_t *h = lru_head.next; h != &lru_head; h=h->next) free(h->data); free(head); } void Cache::lru_delete(head_t *h) { // delete from current location h->prev->next = h->next; h->next->prev = h->prev; } void Cache::lru_insert(head_t *h) { // insert to last position h->next = &lru_head; h->prev = lru_head.prev; h->prev->next = h; h->next->prev = h; } int Cache::get_data(const int index, Qfloat **data, int len) { head_t *h = &head[index]; if(h->len) lru_delete(h); int more = len - h->len; if(more > 0) { // free old space while(size < more) { head_t *old = lru_head.next; lru_delete(old); free(old->data); size += old->len; old->data = 0; old->len = 0; } // allocate new space h->data = (Qfloat *)realloc(h->data,sizeof(Qfloat)*len); size -= more; swap(h->len,len); }
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值