二分类问题
"""
解决分类问题:
使用逻辑回归的方式
"""
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm
x = np.array([
[3, 1],
[2, 5],
[1, 8],
[6, 4],
[5, 2],
[3, 5],
[4, 7],
[4, -1]])
y = np.array([0, 1, 1, 0, 0, 1, 1, 0])
model = lm.LogisticRegression(solver="liblinear", C=1)
model.fit(x, y)
result = model.predict([[3, 9], [6, 1]])
print(result)
left, right = x[:, 0].min() - 1, x[:, 0].max() + 1
bottom, top = x[:, 1].min() - 1, x[:, 1].max() + 1
n = 500
grid_x, grid_y = np.meshgrid(np.linspace(left, right, n), np.linspace(bottom, top, n))
mesh_x = np.column_stack((grid_x.ravel(), grid_y.ravel()))
mesh_z = model.predict(mesh_x)
grid_z = mesh_z.reshape(grid_x.shape)
mp.figure("LR classification", facecolor="lightgray")
mp.title("LR classification", fontsize=16)
mp.scatter(x[:, 0], x[:, 1], c=y,
cmap="jet", label="sample points", s=70, zorder=3)
mp.grid(linestyle=":")
mp.legend()
mp.pcolormesh(grid_x, grid_y, grid_z, cmap="gray")
mp.tight_layout()
mp.show()
多分类问题
"""
解决分类问题:
使用逻辑回归的解决多分类的问题
"""
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm
x = np.array([
[4, 7],
[3.5, 8],
[3.1, 6.2],
[0.5, 1],
[1, 2],
[1.2, 1.9],
[6, 2],
[5.7, 1.5],
[5.4, 2.2]])
y = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
model = lm.LogisticRegression(solver="liblinear", C=100, multi_class="auto")
model.fit(x, y)
result = model.predict([[3, 9], [6, 1]])
print(result)
left, right = x[:, 0].min() - 1, x[:, 0].max() + 1
bottom, top = x[:, 1].min() - 1, x[:, 1].max() + 1
n = 500
grid_x, grid_y = np.meshgrid(np.linspace(left, right, n), np.linspace(bottom, top, n))
mesh_x = np.column_stack((grid_x.ravel(), grid_y.ravel()))
mesh_z = model.predict(mesh_x)
grid_z = mesh_z.reshape(grid_x.shape)
mp.figure("LR classification", facecolor="lightgray")
mp.title("LR classification", fontsize=16)
mp.scatter(x[:, 0], x[:, 1], c=y, cmap="jet", label="sample points", s=70, zorder=3)
mp.grid(linestyle=":")
mp.legend()
mp.pcolormesh(grid_x, grid_y, grid_z, cmap="gray")
mp.tight_layout()
mp.show()