import numpy as np
import matplotlib.pyplot as plt
X = np.array([[1,1],[2,1],[1,2],[2,2]])#试一下异常的
y = np.array([0,1,1,0,])#.reshape(-1,1)
plt.scatter(X[:,0], X[:,1], c=y)
plt.show()
X.shape, y.shape #(4, 2)和(4,)
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC,LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
lr = LogisticRegression(C=100)#降低正则化强度
lsvc = LinearSVC()
svc = SVC()
rf = RandomForestClassifier()
mlp = MLPClassifier()
# ### 绘制决策边界,[参考博客](https://blog.csdn.net/qinhanmin2010/article/details/65692760)
# 思路就是把一定范围的每个点都上色
# 1. 限界
# 2. meshgrid 生成xy轴
# 3. ravel 生成
# 4. np.c_
# 5. contourf绘制等高线,scatter绘制散点
# 设xy轴
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 #x1->xx
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 #x2->yy
h = 0.01
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) #笛卡尔积?
print(xx.shape, yy.shape)
# 生成Z轴
clf = svc
clf.fit(X, y)
def get_Z(pred_func): #单纯想学一下lambda表达式
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])#都拉成列向量;再拼成一对坐标(x1,x2) 放入分类器得到结果 还是列向量
print(Z.shape)
Z = Z.reshape(xx.shape)
return Z
Z = get_Z(lambda x: clf.predict(x)) #相当于传一个函数
plt.contourf(xx, yy, Z)
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.show()
pytorch实现逻辑回归
import torch
from torch.autograd import Variable
x_data = Variable(torch.Tensor(X))
y_data = Variable(torch.Tensor(y))
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(2, 1) # 2->1
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
y_pred = self.sigmoid(self.linear(x))
return y_pred
model = Model()
criterion = torch.nn.BCELoss()#二分类交叉熵
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
E_opoch = 100
for epoch in range(100):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
if epoch % 10 == 0:
print(epoch, loss.data[0])
xy = Variable(torch.Tensor(np.c_[xx.ravel(), yy.ravel()]))#转化为Tensor;再都拉成列向量;拼成一对坐标(x1,x2) 放入分类器得到结果 还是列向量
Z = model.forward(xy).data #得到Z;torch.Size([90000, 1])
Z = Z.reshape(xx.shape)# torch.Size([300, 300])
Z[Z > 0.5] = 1
Z[Z < 0.5] = 0
plt.contourf(xx, yy, Z)
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.show()
optimizer.zero_grad()#清零
loss.backward()#反向传播,计算梯度
optimizer.step()#更新
# Variable对Tensor对象进行封装,只需要Variable::data即可取出Tensor,并且Variable还封装了该Tensor的梯度Variable::grad(是个Variable对象)。现在用Variable作为计算图的节点,则通过反向传播自动求得的导数就保存在Variable对象中了。