关于鸢尾花分类实现的python实现

import numpy as np

from sklearn.datasets import load_iris   #导入数据集iris
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']  # 中文显示
plt.rcParams['axes.unicode_minus'] = False  # 负号显示
iris=load_iris()
# print(iris)
train_x=iris.data
# print(train_x)
train_y=iris.target
# print(train_y)

def data_deal(x,y):
    x-=np.mean(x,axis=0)
    x/=np.std(x,axis=0)
    x1 = x[:, 0]
    x2 = x[:, 1]
    x3 = x[:, 2]
    x4 = x[:, 3]
    x=np.c_[np.ones(len(x)),x1+x2,x3+x4]
    # 0.1*x1+0.2*x2,0.1*x3+0.05*x4]
    y=np.c_[y]

    return x,y
[X,Y]=data_deal(train_x,train_y)
# print(X,Y)
#0,1,1
X1=X
Y1=np.c_[[np.zeros(50)]
,[np.ones(50)],[np.ones(50)]]
Y1=Y1.reshape(150,1)
# print(Y1)
#1,0,1
X2=X
Y2=np.c_[[np.ones(50)]
,[np.zeros(50)],[np.ones(50)]]
Y2=Y2.reshape(150,1)
# print(Y2)
#1,1,0
X3=X
Y3=np.c_[[np.ones(50)]
,[np.ones(50)],[np.zeros(50)]]
Y3=Y3.reshape(150,1)

def model(x,theta):
    z=np.dot(x,theta)
    h=1/(1+np.exp(-z))
    return h
def loss_fn(h,y):
    m=len(Y)
    loss = -(1 / m) * np.sum(y* np.log(h+1e-5) + (1 - y) * np.log(1 - h+1e-5))  # 0和1
    return loss
def grad_fall(x,y,iterations=20000,lr=0.02):
    theta=np.ones((3, 1))
    m=100
    loss_iterations=np.zeros(iterations)
    for i in range(iterations):
        h=model(x,theta)
        loss_iterations=loss_fn(h,y)
        theta_grad = (1/m)*np.dot(x.T, h - y)
        theta-=lr*theta_grad
        # if iterations%50==0:
        #     print(loss_iterations)
    return loss_iterations,theta
#画图展示
def plot(x,y,theta):

    min_x1, max_x1 = np.min(x[:, 1]), np.max(x[:, 1])  # 找到min_x1和max_x1点
    # 横着的
    min_x1_x2, max_x1_x2 = -(theta[0] + theta[1] * min_x1) / theta[2], -(theta[0] + theta[1] * max_x1) / theta[2]
    # 找到min_x1和max_x1点之后用h(x)=0推出此时的x2们(即min_x1_x2, max_x1_x2)
    # 然后连点做线
    x_line = np.linspace(-3, 3, 300)
    k = (max_x1_x2 - min_x1_x2) / (max_x1 - min_x1)
    b = min_x1_x2 - k * min_x1
    # print(min_x1, min_x1_x2)
    # print(max_x1,max_x1_x2)
    # plt.plot([min_x1, max_x1], [min_x1_x2, max_x1_x2], label='分界线')
    # plt.plot([min_x1, max_x1], [min_x1_x2, max_x1_x2], label='分界线')
    # plt.plot(x_line, k * x_line + b, 'g')
    # plt.legend()
    # plt.show()
    return k,b
loss_end,theta1 =grad_fall(X1,Y1)
k1,b1=plot(X1, Y1, theta1)
print(k1,b1,loss_end)
loss_end,theta2 =grad_fall(X2,Y2)
# k2,b2=plot(X2,Y2,theta2,'训练集')
# print(k2,b2)
loss_end,theta3=grad_fall(X3,Y3)
k3,b3=plot(X3,Y3,theta3)
print(k3,b3)
def plot_end(x,y,k1,b1,k3,b3,title):
    plt.title(title)
    plt.scatter(x[y.flatten()==0,1],x[y.flatten()==0,2],label='setosa')
    plt.scatter(x[y.flatten()==1,1],x[y.flatten()==1,2],label='versicolor')
    plt.scatter(x[y.flatten()==2,1],x[y.flatten()==2,2],label='virginica')
    x_line = np.linspace(-3, 3, 450)
    # fig = plt.figure(dpi=600)
    plt.plot(x_line, k1 * x_line + b1, 'r')
    plt.plot(x_line, k3 * x_line + b3, 'g')
    plt.legend()
    plt.show()
plot_end(X,Y,k1,b1,k3,b3,'训练集')
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值