人工神经网络ANN简单学习小结

一、激励函数

#阶跃函数
def step_function(x):
    return (np.array(x > 0, dtype=np.int))

#sigmoid
def sigmoid(x):
    return (1 / (1 + np.exp(-x)))

#ReLU
def relu(x):
    return np.maximum(0, x)

#softmax
def softmax(a):
    c = np.max(a)
    exp_a = np.exp(a - c)               # 防溢出对策
    sum_exp_a = np.sum(exp_a)
    y = exp_a / sum_exp_a
    return y

阶跃函数和sigmoid函数
ReLu函数
softmax函数的输出是0.0到1.0之间的实数。
softmax函数的输出值的总和为1。
把多分类问题的结果以概率的形式展现出来。

二、梯度下降

梯度下降方法的视觉解释(动量,AdaGrad,RMSProp,Adam)
详情见机器学习

三、一些网络

网络类型分类

网络类型

1.MLP(多层感知机)

多层感知机(一般三层)能解决单层不能解决的线性不可分问题,其原理是将原始问题在隐含层映射成线性可分问题。

#MLP 理解网络结构
import numpy as np
import matplotlib.pyplot as plt
import math

a = np.array([0.05, 0.1])  # a1,a2的输入值
w1 = np.array([[0.15, 0.25], [0.2, 0.3]])  # a1对b1,b2的权重,a2对b1,b2的权重
w2 = np.array([[0.4, 0.5], [0.45, 0.55]])  # b1对c1,c2的权重,b2对c1,c2的权重
target = np.array([0.01, 0.99])
d1 = 0.35  # 输入层的偏置(1)的权重
d2 = 0.6  # 隐藏层的偏置(1)的权重
β = 0.5  # 学习效率

# 一:前向传播

# 计算输入层到隐藏层的输入值,得矩阵netb1,netb2
netb = np.dot(a, w1) + d1

# 计算隐藏层的输出值,得到矩阵outb1,outb2
m = []
for i in range(len(netb)):
    outb = 1.0 / (1.0 + math.exp(-netb[i]))#sigmoid
    m.append(outb)
m = np.array(m)

# 计算隐藏层到输出层的输入值,得矩阵netc1,netc2
netc = np.dot(m, w2) + d2

# 计算隐藏层的输出值,得到矩阵outc1,outc2
n = []
for i in range(len(netc)):
    outc = 1.0 / (1.0 + math.exp(-netc[i]))
    n.append(outc)
n = np.array(n)

# 二:反向传播
count = 0  # 计数
e = 0  # 误差
E = []  # 统计误差
# 梯度下降
while True:
    count += 1

    # 总误差对w1-w4的偏导
    pd1 = (-(target[0] - n[0]) * n[0] * (1 - n[0]) * w2[0][0] - (target[1] - n[1]) * n[1] * (1 - n[1]) *
           w2[0][1]) * m[0] * (1 - m[0]) * a[0]
    pd2 = (-(target[0] - n[0]) * n[0] * (1 - n[0]) * w2[0][0] - (target[1] - n[1]) * n[1] * (1 - n[1]) *
           w2[0][1]) * m[0] * (1 - m[0]) * a[1]
    pd3 = (-(target[0] - n[0]) * n[0] * (1 - n[0]) * w2[1][0] - (target[1] - n[1]) * n[1] * (1 - n[1]) *
           w2[0][1]) * m[0] * (1 - m[0]) * a[0]
    pd4 = (-(target[0] - n[0]) * n[0] * (1 - n[0]) * w2[1][1] - (target[1] - n[1]) * n[1] * (1 - n[1]) *
           w2[0][1]) * m[0] * (1 - m[0]) * a[1]
    w1[0][0] = w1[0][0] - β * pd1
    w1[1][0] = w1[1][0] - β * pd2
    w1[0][1] = w1[0][1] - β * pd3
    w1[1][1] = w1[1][1] - β * pd4

    # 总误差对w5-w8的偏导
    pd5 = -(target[0] - n[0]) * n[0] * (1 - n[0]) * m[0]
    pd6 = -(target[0] - n[0]) * n[0] * (1 - n[0]) * m[1]
    pd7 = -(target[1] - n[1]) * n[1] * (1 - n[1]) * m[0]
    pd8 = -(target[1] - n[1]) * n[1] * (1 - n[1]) * m[1]
    w2[0][0] = w2[0][0] - β * pd5
    w2[1][0] = w2[1][0] - β * pd6
    w2[0][1] = w2[0][1] - β * pd7
    w2[1][1] = w2[1][1] - β * pd8

    netb = np.dot(a, w1) + d1
    m = []
    for i in range(len(netb)):
        outb = 1.0 / (1.0 + math.exp(-netb[i]))
        m.append(outb)
    m = np.array(m)
    netc = np.dot(m, w2) + d2
    n = []
    for i in range(len(netc)):
        outc = 1.0 / (1.0 + math.exp(-netc[i]))
        n.append(outc)
    n = np.array(n)

    # 计算总误差
    for j in range(len(n)):
        e += (target[j] - n[j]) ** 2 / 2
    E.append(e)
    # 判断
    if e < 0.0000001:
        break
    else:
        e = 0
print(count)
print(e)
print(n)
plt.plot(range(len(E)), E, label='error')
plt.legend()
plt.xlabel('time')
plt.ylabel('error')
plt.show()

2.RBFNN(径向基神经网络)

  1. 理论支撑:三维线性可分(类)

它的隐层为径向基神经元结构。径向基神经元的净输⼊采⽤距离函数(如欧式距离)乘以偏置 ,即使⽤径向基函数作为激活函数。
⽤RBF作为“隐”单元的基构成隐含层空间,将输⼊⽮量直接(即不需要经过权连接)映射隐空间,再将低维空间样本点投影到高维。

高斯

#RBFNN
import numpy as np


x=np.loadtxt("Experiments-2022-4-24/circle data/X.txt")
y=np.loadtxt("Experiments-2022-4-24/circle data/y.txt")
mu=[0.1,0.2,0.3,0.4]
sigma=[0.5,0.6,0.7,0.8]
w=np.array([0.01,0.02,0.03,0.04])


# print(x)
def guass(x,mu,sigma):
    G=np.ones((len(x),len(mu)))
    for i in range(len(x)):
        for j in range(len(mu)):
            G[i][j]=np.exp(-(np.linalg.norm(x[i]-mu[j])**2)/(2*(sigma[j]**2)))
    return G
G=guass(x,mu,sigma)
# print(G)
def error_loss(y,w,G):
    y_=[]
    loss=0
    error=[0]*len(y)
    for i in range(len(G)):
        y_.append(np.dot(w.T,G[i]))
    y_sum=np.sum(y_)
    for j in range(len(y)):
        loss+=y[j]-y_sum
        error[j]=y[j]-y_[j]
    loss=loss/2
    return [error,loss]
G=guass(x,mu,sigma)
error=error_loss(y,w,G)[0]
loss=error_loss(y,w,G)[1]
# print(error)
# print(loss)

def update(error,w,mu,sigma,G,x,alpha):
    s=len(w)
    grad_w=[0]*s
    grad_mu=[0]*s
    grad_sigma=[0]*s
    n=len(error)
    for i in range(n):
        for j in range(s):
            grad_w[j] += -error[i]*G[i][j]
            grad_mu[j] += -(w[j]/np.square(sigma[j]))*error[i]*G[i][j]*(x[i]-mu[j])
            grad_sigma[j] += -(w[j]/np.power(sigma[j],3))*error[i]*G[i][j]*((np.linalg.norm(x[i]-mu[j]))**2)
    for k in range(s):
        w[k] += alpha*grad_w[k]
        mu[k] += alpha*grad_mu[k]
        sigma[k] += alpha*grad_sigma[k]
    return [w,mu,sigma]
w_new=update(error,w,mu,sigma,G,x,0.1)[0]
mu_new=update(error,w,mu,sigma,G,x,0.1)[1]
sigma_new=update(error,w,mu,sigma,G,x,0.1)[2]
print("w:"+str(w_new))
print("mu:"+str(mu_new))
print("σ:"+str(sigma_new))
MLP和RBFNN的异同

MLP
RBF

  1. RBFNN输入层到隐藏层的权重固定为1,激励函数不同。
  2. MLP和RBFNN均为前馈网络,网络结构一样。由于多层前馈网络经常用反向传播算法(BP),提到的两种网络均有使用,所以人们也常将其称为BP网络。

3.HNN

HNN

'''
Hopfield Improved Algorithm
'''

import numpy as np

################################### Global Parameters ###################################
# Data Type
uintType = np.uint8
floatType = np.float32
################################### Global Parameters ###################################

# Hopfield Class
class HOP(object):
    def __init__(self, N):
        # Bit Dimension
        self.N = N
        # Weight Matrix
        self.W = np.zeros((N, N), dtype = floatType)

    # Calculate Kronecker Square Product of [factor] itself OR use np.kron()
    def kroneckerSquareProduct(self, factor):
        ksProduct = np.zeros((self.N, self.N), dtype = floatType)

        # Calculate
        for i in range(0, self.N):
            ksProduct[i] = factor[i] * factor

        return ksProduct

    # Training a single stableState once a time, mainly to train [W]
    def trainOnce(self, inputArray):
        # Learn with normalization
        mean = float(inputArray.sum()) / inputArray.shape[0]
        self.W = self.W + self.kroneckerSquareProduct(inputArray - mean) / (self.N * self.N) / mean / (1 - mean)

        # Erase diagonal self-weight
        index = range(0, self.N)
        self.W[index, index] = 0.

    # Overall training function
    def hopTrain(self, stableStateList):
        # Preprocess List to Array type
        stableState = np.asarray(stableStateList, dtype = uintType)

        # Exception
        if np.amin(stableState) < 0 or np.amax(stableState) > 1:
            print('Vector Range ERROR!')
            return

        # Train
        if len(stableState.shape) == 1 and stableState.shape[0] == self.N:
            print('stableState count: 1')
            self.trainOnce(stableState)
        elif len(stableState.shape) == 2 and stableState.shape[1] == self.N:
            print('stableState count: ' + str(stableState.shape[0]))
            for i in range(0, stableState.shape[0]):
                self.trainOnce(stableState[i])
        else:
            print('SS Dimension ERROR! Training Aborted.')
            return
        print('Hopfield Training Complete.')

    # Run HOP to output
    def hopRun(self, inputList):
        # Preprocess List to Array type
        inputArray = np.asarray(inputList, dtype = floatType)

        # Exception
        if len(inputArray.shape) != 1 or inputArray.shape[0] != self.N:
            print('Input Dimension ERROR! Runing Aborted.')
            return

        # Run
        matrix = np.tile(inputArray, (self.N, 1))
        matrix = self.W * matrix
        ouputArray = matrix.sum(1)

        # Normalize
        m = float(np.amin(ouputArray))
        M = float(np.amax(ouputArray))
        ouputArray = (ouputArray - m) / (M - m)

        # Binary
        ''' \SWITCH/ : 1/-1 OR 1/0
        ouputArray[ouputArray < 0.5] = -1.
        ''' # \Division/
        ouputArray[ouputArray < 0.5] = 0.
        # ''' # \END/
        ouputArray[ouputArray > 0] = 1.

        return np.asarray(ouputArray, dtype = uintType)

    # Reset HOP to initialized state
    def hopReset(self):
        # Weight Matrix RESET
        self.W = np.zeros((self.N, self.N), dtype = floatType)

# Utility routine for printing the input vector: [NperGroup] numbers each piece
def printFormat(vector, NperGroup):
    string = ''
    for index in range(len(vector)):
        if index % NperGroup == 0:
            ''' \SWITCH/ : Single-Row OR Multi-Row
            string += ' '
            ''' # \Division/
            string += '\n'
            # ''' # \END/

        # ''' \SWITCH/ : Image-Matrix OR Raw-String
        if str(vector[index]) == '0':
            string += '    '
        elif str(vector[index]) == '1':
            string += ' * '
        else:
            string += str(vector[index])
        ''' # \Division/
        string += str(vector[index])
        # ''' # \END/
    string += '\n'
    print(string)

# DEMO of Hopfield Net
def HOP_demo():
    ten = [0, 0, 1,0, 0,
            0, 0, 1, 0, 0,
            1, 1, 1, 1, 1,
            0, 0, 1, 0, 0,
            0, 0, 1, 0, 0]
    X = [1, 0, 0, 0, 1,
           0, 1, 0, 1, 0,
           0, 0, 1, 0, 0,
           0, 1, 0, 1, 0,
           1, 0, 0, 0, 1]

    hop = HOP(5 * 5)
    hop.hopTrain([ten, X])

    half_ten = [0, 0, 0, 0, 0,
                 0, 0, 0, 0, 0,
                 1, 1, 1, 1, 1,
                 0, 0, 0, 0, 0,
                 0, 0, 0, 0, 0]
    print('Half-Ten:')
    printFormat(half_ten, 5)
    result = hop.hopRun(half_ten)
    print('Recovered:')
    printFormat(result, 5)

    half_X = [1, 0, 0, 0, 0,
                0, 0, 0, 0, 0,
                0, 0, 1, 0, 0,
                0, 0, 0, 0, 0,
                0, 0, 0, 0, 0]
    print('Half-X:')
    printFormat(half_X, 5)
    result = hop.hopRun(half_X)
    print('Recovered:')
    printFormat(result, 5)

##########################
if __name__ == '__main__':
    HOP_demo()

4.CNN(卷积神经网络)

卷积神经网络包括卷积层、池化层、全连接层。
详情待续

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值