all_np_bp—神经网络基础 实现逻辑与和或

import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import Normalizer
import random
# 定义数据集
x_data = np.array([[0, 0],
          [0, 1],
          [1, 0],
          [1, 1]])
y_data = np.array([[0,1],
          [1,0],
          [1,0],
          [0,1]])
leraing_rate=0.9
all_traing_eop=1000


def sofmax(list01):
    list01=np.array(list01)
    over_list=[]
    for i in list01:
        sum1 = np.sum([np.log(k) for k in i])
        new_hang=[]
        for a in i:
            new_hang.append(np.log(a)/sum1)
        over_list.append(new_hang)
    return np.array(over_list)


def zero_to_one(list1):
    list1=np.array(list1)
    x_mean=np.mean(list1)
    x_fang=list1-np.square(x_mean)
    return (list1-x_mean)/(x_fang)


def g(z,dda=True):
     if dda:
          return 1/(1+np.exp(-z))
     else:
          return z*(1-z)

def cost(h,y):
     # print(h)
     # return -np.mean(np.sum(y*np.log(h) + (1-y)*np.log(1-h)))
     return -np.mean(np.sum(y*np.log(h),axis=1))



def score(h, y):
     # h1=np.array(h>0.5,dtype=float)
     s=np.mean(np.array(np.equal(np.argmax(h,1),np.argmax(y,1)),dtype=float))
     return s

def qian(x,W1,W2,b1,b2):
     z1=np.matmul(x,W1)+b1
     # z1=zero_to_one(z1)
     a1=g(z1)
     z2 = np.matmul(a1, W2)+b2
     a2 = g(z2)
     a2=sofmax(a2)
     return a1,a2

def BP(x,a1,a2,y,W1,W2,b1,b2):
     dz2=y-a2
     dW2=np.matmul(a1.T,dz2)
     db2=np.mean(dz2)

     # print(dz2.shape)
     # print(W1.shape)
     # print(W2.shape)
     dz1=np.matmul(dz2,W2.T)*g(a1,dda=False)
     dW1=np.matmul(x.T,dz1)
     db1=np.mean(dz1,axis=0)

     W2 = W2-dW2*leraing_rate
     b2 = b2-db2*leraing_rate
     W1 = W1-dW1*leraing_rate
     b1 = b1-db1*leraing_rate
     return W1,W2,b1,b2

#正则化
def zheng(h):
     model_Nor = Normalizer(norm='l1', copy=True)
     # norm:正则化的范数(l1范数, l2范数, max无穷大范数)
     x = model_Nor.fit_transform(h)
     return x

np.random.seed(22)
W1=np.array(2*np.random.rand(2,3)-1)
b1=np.array(2*np.random.rand(3)-1)
W2=np.array(2*np.random.rand(3,2)-1)
b2=np.array(2*np.random.rand(2)-1)

list_cost=[]
for i in range(all_traing_eop):
     a1, a2=qian(x_data,W1,W2,b1,b2)
     loss=cost(a2,y_data)
     W1, W2, b1, b2=BP(x_data,a1,a2,y_data,W1,W2,b1,b2)
     if i!=0:
          list_cost.append(loss)
     if i%100==0:
          acc=score(a2,y_data)
          print(loss,acc)

plt.plot(list_cost)
plt.show()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值