import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import Normalizer
import random
# 定义数据集
x_data = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
y_data = np.array([[0,1],
[1,0],
[1,0],
[0,1]])
leraing_rate=0.9
all_traing_eop=1000
def sofmax(list01):
list01=np.array(list01)
over_list=[]
for i in list01:
sum1 = np.sum([np.log(k) for k in i])
new_hang=[]
for a in i:
new_hang.append(np.log(a)/sum1)
over_list.append(new_hang)
return np.array(over_list)
def zero_to_one(list1):
list1=np.array(list1)
x_mean=np.mean(list1)
x_fang=list1-np.square(x_mean)
return (list1-x_mean)/(x_fang)
def g(z,dda=True):
if dda:
return 1/(1+np.exp(-z))
else:
return z*(1-z)
def cost(h,y):
# print(h)
# return -np.mean(np.sum(y*np.log(h) + (1-y)*np.log(1-h)))
return -np.mean(np.sum(y*np.log(h),axis=1))
def score(h, y):
# h1=np.array(h>0.5,dtype=float)
s=np.mean(np.array(np.equal(np.argmax(h,1),np.argmax(y,1)),dtype=float))
return s
def qian(x,W1,W2,b1,b2):
z1=np.matmul(x,W1)+b1
# z1=zero_to_one(z1)
a1=g(z1)
z2 = np.matmul(a1, W2)+b2
a2 = g(z2)
a2=sofmax(a2)
return a1,a2
def BP(x,a1,a2,y,W1,W2,b1,b2):
dz2=y-a2
dW2=np.matmul(a1.T,dz2)
db2=np.mean(dz2)
# print(dz2.shape)
# print(W1.shape)
# print(W2.shape)
dz1=np.matmul(dz2,W2.T)*g(a1,dda=False)
dW1=np.matmul(x.T,dz1)
db1=np.mean(dz1,axis=0)
W2 = W2-dW2*leraing_rate
b2 = b2-db2*leraing_rate
W1 = W1-dW1*leraing_rate
b1 = b1-db1*leraing_rate
return W1,W2,b1,b2
#正则化
def zheng(h):
model_Nor = Normalizer(norm='l1', copy=True)
# norm:正则化的范数(l1范数, l2范数, max无穷大范数)
x = model_Nor.fit_transform(h)
return x
np.random.seed(22)
W1=np.array(2*np.random.rand(2,3)-1)
b1=np.array(2*np.random.rand(3)-1)
W2=np.array(2*np.random.rand(3,2)-1)
b2=np.array(2*np.random.rand(2)-1)
list_cost=[]
for i in range(all_traing_eop):
a1, a2=qian(x_data,W1,W2,b1,b2)
loss=cost(a2,y_data)
W1, W2, b1, b2=BP(x_data,a1,a2,y_data,W1,W2,b1,b2)
if i!=0:
list_cost.append(loss)
if i%100==0:
acc=score(a2,y_data)
print(loss,acc)
plt.plot(list_cost)
plt.show()
all_np_bp—神经网络基础 实现逻辑与和或
最新推荐文章于 2022-10-18 18:55:32 发布