主要原理:
f(x) = sign(w*x + b)
损失函数 L(w, b) = -\Sigma{y_{i}(w*x_{i} + b)}
随即梯度下降法 Stochastic Gradient Descent
随机抽取一个误分类点使其梯度下降。
$w = w + \eta y_{i}x_{i}$
$b = b + \eta y_{i}$
当实例点被误分类,即位于分离超平面的错误侧,则调整w, b的值,使分离超平面向该无分类点的一侧移动,直至误分类点被正确分类
拿出iris数据集中两个分类的数据和[sepal length,sepal width]作为特征
方法一:perception
导入所需要的库
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
%matplotlib inline
加载数据
# load data
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
#
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
df.label.value_counts()
#将原始数据的前两个维度提取出来作为特征向量,最后一个维度提取出来作为目标
data = np.array(df.iloc[:100, [0, 1, -1]])
X, y = data[:,:-1], data[:,-1]
#将标签记作-1和1
y = np.array([1 if i == 1 else -1 for i in y])
Perceptron
# 数据线性可分,二分类数据
# 此处为一元一次线性方程
class Model:
def __init__(self):
self.w = np.ones(len(data[0])-1, dtype=np.float32)
self.b = 0
self.l_rate = 0.1
# self.data = data
#定义感知机的模型
def sign(self, x, w, b):
y = np.dot(x, w) + b
return y
# 随机梯度下降法
def fit(self, X_train, y_train):
is_wrong = False
while not is_wrong:
is_wrong = True
for i in range(len(X_train)):
if y_train[i] * self.sign(X_train[i],self.w,self.b) < 0:
self.w = self.w + self.l_rate*np.dot(X_train[i],y_train[i])
self.b = self.b + self.l_rate*y_train[i]
is_wrong = False
break
return 'Perceptron Model!'
def score(self):
pass
perceptron = Model()
perceptron.fit(X, y)
画图
fig = plt.figure()
x_points = np.linspace(4, 7,10)
#画出分类线及样本点的散状图
y_= -(perceptron.b+perceptron.w[0]*x_points)/perceptron.w[1]
plt.plot(x_points,y_)
right = plt.scatter(X[:,0][y==1],X[:,1][y==1])
false = plt.scatter(X[:,0][y==-1],X[:,1][y==-1],marker='^')
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend([right,false],['0','1'])
plt.show()
结果:
方法二:使用scikit-learn实现Perceptron
from sklearn.linear_model import Perceptron
from sklearn.metrics import f1_score, classification_report
clf = Perceptron(random_state=10)
clf.fit(X, y)
# Weights assigned to the features.
print(clf.coef_)
# 截距 Constants in decision function.
print(clf.intercept_)
predictions=clf.predict(X)
print(classification_report(y,predictions))
y_ = -(clf.coef_[0][0]*x_ponits + clf.intercept_)/clf.coef_[0][1]
x_points = np.linspace(4, 7, 10)
print(x_points)
print(y_)
#x_ponits = np.arange(4,8)# 4,5,6,7
x_points = [4,5,6,7]
y_ = -(clf.coef_[0][0]*x_ponits + clf.intercept_)/clf.coef_[0][1]
#画分类超平面
plt.plot(x_points, y_)
plt.plot(data[:50,0],data[:50,1],'bo',color='blue',label='0')
plt.plot(data[50:100,0],data[50:100,1],'bo',color='orange',label='1')
#画出训练样本点的散状图,蓝色一类,红色一类
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend()