神经网络多分类算法

本文主要教神经网络原理+神经网络算法实现

反向传播算法是目前用来训练人工神经网络(Artificial Neural Network,ANN)的最常用且最有效的算法,其主要思想是:将训练集数据输入到ANN的输入层,经过隐藏层,最后达到输出层并输出结果,这是ANN的前向传播过程;由于ANN的输出结果与实际结果有误差,则先计算估计值与实际值之间的误差,并将该误差从输出层向隐藏层反向传播,直至传播到输入层;在反向传播的过程中,根据误差调整各种参数的值;不断迭代上述过程,直至收敛。

 

 

 

 

 

 

 

 

 代码展示

#!/usr/bin/env python
#-*-coding:utf-8-*-
#神经网络的实现方法
 
import numpy as np
#定义双曲函数
def tanh(x):
    return np.tanh(x)
#双曲线求导
def tanh_deriv(x):
    return 1.0-np.tanh(x)*np.tanh(x)
#逻辑函数
def logistic(x):
    return 1/(1+np.exp(-x))
#求导
def logistic_derivative(x):
    return logistic(x)*(1-logistic(x))
 
class NeuralNetwork:
    def __init__(self,layers,activation='tanh'):
        if activation == 'logistic':
            self.activation=logistic
            self.activation_deriv=logistic_derivative
        elif activation == 'tanh':
            self.activation=tanh
            self.activation_deriv=tanh_deriv
        self.weights=[]
        #初始化权重
        for i in range(1,len(layers)-1):
            self.weights.append((2*np.random.random((layers[i-1]+1,layers[i]+1))-1)*0.25)
            self.weights.append((2*np.random.random((layers[i]+1,layers[i+1]))-1)*0.25)
 
    def fit(self,X,y,learning_rate=0.2,epochs=10000):
        #每次随机抽取epochs个实例
        X=np.atleast_2d(X)
        #X=self.normalize(X)
        #print(X)
        temp=np.ones([X.shape[0],X.shape[1]+1])
        temp[:,0:-1]=X
        X=temp
        #bias初值
        y=np.array(y)

        for k in range(epochs):
            #随机抽取每行
            i=np.random.randint(X.shape[0])
            a=[X[i]]
            #更新的实例
            #正向更新
            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l],self.weights[l])))
            error=y[i]-a[-1]#反向传送最后一个错误率
            deltas=[error*self.activation_deriv(a[-1])]
            #输出层Errj=Oj(1-Oj)(Tj-Oj)
            #根据误差反向传送
            #隐藏层
            for l in range(len(a)-2,0,-1):
                 deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))
            deltas.reverse()
            #更新权重
            for i in range(len(self.weights)):
                layer=np.atleast_2d(a[i])
                delta=np.atleast_2d(deltas[i])
                self.weights[i]+=learning_rate*layer.T.dot(delta)
 
    #此为但分类预测
    def predict(self,x):
        x=np.array(x)
        temp=np.ones(x.shape[0]+1)
        temp[0:-1]=x
        a=temp
        for l in range(0,len(self.weights)):
            a=self.activation(np.dot(a,self.weights[l]))

        print(a)
        a = np.argmax(a)
        return a

    #此为多分类预测(可以兼容但分类预测,所以一般调用多分类即可)
    def predict2(self,x):
        x=np.array(x)
        #x = np.mat(self.normalize(x))
        x=np.mat(x)

 
        sample_cnt = x.shape[0]
        ret_list = []

        for i in range(0,sample_cnt):
            cur_sample = x[i]

            temp=np.ones(cur_sample.shape[1]+1)
            temp[0:-1]=cur_sample
            a=temp
            for l in range(0,len(self.weights)):
                a=self.activation(np.dot(a,self.weights[l]))
            ret_list.append(np.argmax(a))
        return ret_list

    # 标准化(此函数暂时不用)
    def normalize(self, feature):
        feature_normalized = np.copy(feature).astype(float)
        feature_mean = np.mean(feature, 0)
        feature_deviation = np.std(feature, 0)
        if feature.shape[0] > 1:
            feature_normalized -= feature_mean
        feature_deviation[feature_deviation == 0] = 1
        feature_normalized /= feature_deviation
        return feature_normalized

net = NeuralNetwork([2,32,33,3])
#输入x
x = np.array([[0,0,1],
              [0,1,1],
              [1,0,1],
              [1,1,1],
              [0,0,1]])

#输入y
y = np.array([[0],
              [1],
              [1],
              [1],
              [0]])

#########################protect###########################
#说明,以下为测试代码示例
#输入x
# x = np.array([[0,0,1],
#               [0,1,1],
#               [1,0,1],
#               [1,1,1],
#               [0,0,1]])

# #输入y
# y = np.array([[0],
#               [1],
#               [1],
#               [1],
#               [0]])
# x = [[1, 1], [1, 2], [2, 1], [3, 3], [4, 4], [5, 5], [7, 7], [7, 8], [8, 7], [8, 8]]
# #y = [0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
# #y = [[0],[0],[0],[1],[1],[1],[2],[2],[2],[2]]
# y = np.array([[1,0,0],[1,0,0],[1,0,0],[0,1,0],[0,1,0],[0,1,0],[0,0,1],[0,0,1],[0,0,1],[0,0,1]])

# net.fit(x,y,0.05,10000)

# # out = np.argmax(net.predict([1,0,1]))


# print(net.predict2(np.array([[1, 1], [1, 2], [2, 1], [3, 3], [4, 4], [5, 5]])))

# #print(net.predict2(np.array([[1, 1], [1, 2], [2, 1], [3, 3], [4, 4], [5, 5], [7, 7], [7, 8], [8, 7], [8, 8]])))

# print(net.predict2(np.array([[9,9]])))
#########################protect###########################


##生成数据集
#x_min, x_max, y_min, y_max矩形区间
#targetval目标值
#valcnt样本数
#sortcnt分类数
#sortid分类id
def producedata(x_min, x_max, y_min, y_max, targetval, valcnt, sortcnt, sortid):
    x = np.random.randint(x_min, x_max, valcnt)
    y = np.random.randint(y_min, y_max, valcnt)
    array = np.array([[k, v] for k, v in zip(x, y)])

    train = array[0:valcnt*2//3]
    test = array[valcnt*2//3:]

    zero_t = np.full((1,sortcnt),0)
    zero_t = zero_t.reshape(3,)
    zero_t[sortid-1] = 1

    relist = []
    for i in range(1,valcnt+1):
        relist.append(zero_t)
    relist = np.array(relist)

    y_train = relist[0:valcnt*2//3]
    y_test = relist[valcnt*2//3:]
    return train, test, y_train, y_test

train1, test1, y_train1, y_test1 = producedata(2,20,3,15,0,200,3,1)
train2, test2, y_train2, y_test2 = producedata(17,25,1,2,1,200,3,2)
train3, test3, y_train3, y_test3 = producedata(26,30,3,15,2,200,3,3)

train = np.vstack((train1,train2,train3))
test = np.vstack((test1,test2,test3))

y_train = np.vstack((y_train1,y_train2,y_train3))
y_test = np.vstack((y_test1,y_test2,y_test3))

net.fit(train,y_train,0.0005,10000)
result = net.predict2(test)
print(result)

print(y_test)

sum = 0
for i in range(0,len(result)):
    j = result[i]
    if y_test[i][j] == 1:
        sum += 1
print("acc:{}%".format(sum/len(result)*100))

  • 2
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值