one V one

from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
import numpy as np

def bGradDescent(x,y):
    alpha=0.01
    deviation = 1
    iter = 0    
    w=np.random.rand(4)
    while True:
        for i in range(len(x)): 
            deviation = 0
            h = w[0] * x[i][0] + w[1] * x[i][1]+w[2] * x[i][2] + w[3] * x[i][3]
            for j in range(4):
                w[j] = w[j] + alpha * (y[i] - h)*x[i][j]
        iter = iter + 1
        for i in range(len(x)):
            deviation = deviation + (y[i] - (w[0] * x[i][0] + w[1] * x[i][1]+w[2] * x[i][2] + w[3] * x[i][3])) ** 2
        if iter >150 :           
            break
    return w
    
def stochasticGradientDescent(x, y):
    theta=np.random.rand(4)
    for i in range(4):
        data.append(i)
    x_train = x.transpose()
    for i in range(0, 140):
        hypothesis = np.dot(x, theta)
        # 损失函数
        loss = hypothesis - y
        # 选取一个随机数
        index = random.sample(data, 1)
        index1 = index[0]
        # 下降梯度
        gradient = loss[index1] * x[index1]
        # 求导之后得到theta
        theta = theta - alpha * gradient
    return theta

def sigmoid(x):
    return (1/(1+np.exp(-x)) ) 
  
if __name__=="__main__":
 
    data = load_iris()   
    X = data.data  
    y = data.target    
    features = data.feature_names  
    targets = data.target_names  
    X_train, X_test_raw, y_train, y_test = train_test_split(X,y)#112,38    
    weight1 = []    
    ih=[]
    for i in range(2):   
        labelMat1 = []  
        dataMat1=[]
        for j in range(112): 
            if y_train[j] == i:  
                labelMat1.append(0) 
                dataMat1.append(X_train[j]) 
            elif y_train[j]==i+1:  
                labelMat1.append(1)  
                dataMat1.append(X_train[j]) 
        weight1.append(bGradDescent(dataMat1,labelMat1))
    labelMat1 = []  
    dataMat1=[]    
    for j in range(112):
        if y_train[j] == 2:  
            labelMat1.append(1)  
            dataMat1.append(X_train[j]) 
        elif y_train[j]==0:  
            labelMat1.append(0)  
            dataMat1.append(X_train[j]) 
    weight1.append(bGradDescent(dataMat1,labelMat1))   
    for j in range(38):  
        voteResult =[0,0,0]  
        h=np.zeros(3)    
        for i in range(3):  
            ob=X_test_raw[j][0]*weight1[i][0]+X_test_raw[j][1]*weight1[i][1]+X_test_raw[j][2]*weight1[i][2]+X_test_raw[j][3]*weight1[i][3]
            h[i]=sigmoid(ob)
        for jj in range(2):
            if h[jj] > 0.5 and h[jj] <= 1: 
                    voteResult[jj+1]+=1
            else:
                    voteResult[jj]+=1
        if h[2] > 0.5 and h[2] <= 1: 
                    voteResult[2]+=1
        else:
                    voteResult[0]+=1
        ih.append(voteResult.index(max(voteResult))  )
    error = 0.0  
    for j in range(38):  
        if ih[j] != y_test[j]:  
            error = error +1      
    pro = 1 - error / 38
    print (pro)  

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值