目录
一、LDA算法代码实现
1、python编程实现
(1)引入相关库
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#计算均值,要求输入数据为numpy的矩阵格式,行表示样本数,列表示特征
def meanX(data):
return np.mean(data, axis=0) #axis=0表示按照列来求均值,如果输入list,则axis=1
(2)LDA算法实现
#计算类内离散度矩阵子项si
def compute_si(xi):
n = xi.shape[0]
ui = meanX(xi)
si = 0
for i in range(0, n):
si = si + ( xi[i, :] - ui).T * (xi[i, :] - ui )
return si
#计算类间离散度矩阵Sb
def compute_Sb(x1, x2):
dataX=np.vstack((x1,x2))#合并样本
print ("dataX:", dataX)
#计算均值
u1=meanX(x1)
u2=meanX(x2)
u=meanX(dataX) #所有样本的均值
Sb = (u-u1).T * (u-u1) + (u-u2).T * (u-u2)
return Sb
def LDA(x1, x2):
#计算类内离散度矩阵Sw
s1 = compute_si(x1)
s2 = compute_si(x2)
#Sw=(n1*s1+n2*s2)/(n1+n2)
Sw = s1 + s2
#计算类间离散度矩阵Sb
#Sb=(n1*(m-m1).T*(m-m1)+n2*(m-m2).T*(m-m2))/(n1+n2)
Sb = compute_Sb(x1, x2)
#求最大特征值对应的特征向量
eig_value, vec = np.linalg.eig(np.mat(Sw).I*Sb)#特征值和特征向量
index_vec = np.argsort(-eig_value)#对eig_value从大到小排序,返回索引
eig_index = index_vec[:1] #取出最大的特征值的索引
w = vec[:, eig_index] #取出最大的特征值对应的特征向量
return w
(3)构造数据集
def createDataSet():
X1 = np.mat(np.random.random((8, 2)) * 5 + 15) #类别A
X2 = np.mat(np.random.random((8, 2)) * 5 + 2) #类别B
return X1, X2
x1, x2 = createDataSet()
print(x1,x2)
(4)LDA训练
w = LDA(x1, x2)
print("w:",w)
# 编写一个绘图函数
def plotFig(group):
fig = plt.figure()
plt.ylim(0, 30)
plt.xlim(0, 30)
ax = fig.add_subplot(111)
ax.scatter(group[0,:].tolist(), group[1,:].tolist())
plt.show()
#绘制图形
plotFig(np.hstack((x1.T, x2.T)))
(5)实例测试
test2 = np.mat([2, 8])
g = np.dot(w.T, test2.T - 0.5 * (meanX(x1)-meanX(x2)).T)
print("Output: ", g )
2、sklearn 库实现
#coding=utf-8
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import numpy as np
def main():
iris = datasets.load_iris() #典型分类数据模型
#这里我们数据统一用pandas处理
data = pd.DataFrame(iris.data, columns=iris.feature_names)
data['class'] = iris.target
#这里只取两类
# data = data[data['class']!=2]
#为了可视化方便,这里取两个属性为例
X = data[data.columns.drop('class')]
Y = data['class']
#划分数据集
X_train, X_test, Y_train, Y_test =train_test_split(X, Y)
lda = LinearDiscriminantAnalysis(n_components=2)
lda.fit(X_train, Y_train)
#显示训练结果
print (lda.means_) #中心点
print (lda.score(X_test, Y_test))#score是指分类的正确率
print (lda.scalings_) #score是指分类的正确率
X_2d = lda.transform(X) #现在已经降到二维X_2d=np.dot(X-lda.xbar_,lda.scalings_)
#对于二维数据,我们做个可视化
#区域划分
lda.fit(X_2d,Y)
h = 0.02
x_min, x_max = X_2d[:, 0].min() - 1, X_2d[:, 0].max() + 1
y_min, y_max = X_2d[:, 1].min() - 1, X_2d[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = lda.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
#做出原来的散点图
class1_x = X_2d[Y==0,0]
class1_y = X_2d[Y==0,1]
l1 = plt.scatter(class1_x,class1_y,color='b',label=iris.target_names[0])
class1_x = X_2d[Y==1,0]
class1_y = X_2d[Y==1,1]
l2 = plt.scatter(class1_x,class1_y,color='y',label=iris.target_names[1])
class1_x = X_2d[Y==2,0]
class1_y = X_2d[Y==2,1]
l3 = plt.scatter(class1_x,class1_y,color='r',label=iris.target_names[2])
plt.legend(handles = [l1, l2, l3], loc = 'best')
plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
二、月亮数据集算法可视化
1、使用线性LDA对月亮数据集聚类
#基于线性LDA算法对月亮数据集进行分类
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from mpl_toolkits.mplot3d import Axes3D
def LDA(X, y):
X1 = np.array([X[i] for i in range(len(X)) if y[i] == 0])
X2 = np.array([X[i] for i in range(len(X)) if y[i] == 1])
len1 = len(X1)
len2 = len(X2)
mju1 = np.mean(X1, axis=0)#求中心点
mju2 = np.mean(X2, axis=0)
cov1 = np.dot((X1 - mju1).T, (X1 - mju1))
cov2 = np.dot((X2 - mju2).T, (X2 - mju2))
Sw = cov1 + cov2
w = np.dot(np.mat(Sw).I,(mju1 - mju2).reshape((len(mju1),1)))# 计算w
X1_new = func(X1, w)
X2_new = func(X2, w)
y1_new = [1 for i in range(len1)]
y2_new = [2 for i in range(len2)]
return X1_new, X2_new, y1_new, y2_new
def func(x, w):
return np.dot((x), w)
if '__main__' == __name__:
X, y = make_moons(n_samples=100, noise=0.15, random_state=42)
X1_new, X2_new, y1_new, y2_new = LDA(X, y)
plt.scatter(X[:, 0], X[:, 1], marker='o', c=y)
plt.show()
2、使用k-means对鸢尾花数据集聚类
# -*- coding:utf-8 -*-
from sklearn.datasets import make_moons
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# 导入月亮型数据(证明kmean不能很好对其进行聚类)
X,y = make_moons(n_samples=200,random_state=0,noise=0.05)
print(X.shape)
print(y.shape)
plt.scatter(X[:,0],X[:,1])
kmeans = KMeans(n_clusters=2)
kmeans.fit(X)
y_pred = kmeans.predict(X)
cluster_center = kmeans.cluster_centers_
plt.scatter(X[:,0],X[:,1],c=y_pred)
plt.scatter(cluster_center[:,0],cluster_center[:,1],marker='^',linewidth=4)
plt.show()
3、使用SVM对月亮数据集聚类
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_moons
# 导入数据集
X,y = make_moons(n_samples=200,random_state=0,noise=0.05)
h = .02 # 网格中的步长
# 创建支持向量机实例,并拟合出数据
C = 1.0 # SVM正则化参数
svc = svm.SVC(kernel='linear', C=C).fit(X, y) # 线性核
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) # 径向基核
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) # 多项式核
lin_svc = svm.LinearSVC(C=C).fit(X, y) #线性核
# 创建网格,以绘制图像
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# 图的标题
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# 绘出决策边界,不同的区域分配不同的颜色
plt.subplot(2, 2, i + 1) # 创建一个2行2列的图,并以第i个图为当前图
plt.subplots_adjust(wspace=0.4, hspace=0.4) # 设置子图间隔
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) #将xx和yy中的元素组成一对对坐标,作为支持向量机的输入,返回一个array
# 把分类结果绘制出来
Z = Z.reshape(xx.shape) #(220, 280)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) #使用等高线的函数将不同的区域绘制出来
# 将训练数据以离散点的形式绘制出来
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
三、总结
本次实验主要使我理解了LDA算法以及SVM算法的原理思路以及各自的优缺点,以及其实现方法,过程还是较为顺利。