# 用 C 语言实现神经网络需要几步？

1684人阅读 评论(2)

GitChat 作者：沧夜

【不要错过文末彩蛋】

### 二、从几何开始

f(x_1,,x_n)

f(x_1,,x_n)=(fx_1,fx_1)

fx_i=_iNfz_kz_kx_i

_xf=_zfA

A=z_jx_i

Ax=b

z=Ax

_ifz_kz_kx_i=fz_kz_kx_i

### 再补充一些群的概念

v=[x˙_1,,x˙_n]

x˙_1=dxdt

x˙_i=dx_idt=x_iz_jdz_jdt=x_iz_jz˙

v_x=Bv_z

|v|2=v_iv_i

|v|2=g_ijv_iv_j

d=|v_1v_2|pp

|v|2=v_1v_1v_2v_2v_3v_3v_4v_4

### 三、张量和矩阵

v=v_ie_i

A=[v_1,,v_m]

An×n=EΛE1

BTB=A_n×n

A_n×n=EΛE1

m_i=Bv_i

m_im_j=v_iTBTBv_j=v_iTAv_j=v_iTλ_jv_j=λ_jδ_ij

Av=λv

μ_i=Bv_i|Bv_i|=Bv_iλ_i

Bv_i=λ_iμ_i

BV=UΛ

B=UΛVT(V)

S_x=1n1XXT=UΛVTVΛUT

### 四、函数分析

Ax=b

f(x)=xTAx2bx

f(x)=Axb=0

f(x)=f(x_0)+a_1(f(x_0))dx+a_2dxTHdx+o(dx3)

x_t+1=x_tλf(x)

dx=x_t+1x_t=HTf

g=f+f_ix_j(xx_t)_i=f+J(xx_t)

dx=x_t+1x_t=(JTJ)TJTf(x)

ε(w)=|dy|=_i(d_iy_i)2

wε=εw[1]++εw[N]

εw[N]=f(y[N1]w[N])(dy[N])Ty[N]=[N]y[N]

εw[N1]=f(y[N2]w[N1])w[N][f(y[N1]w[N])(dy[N])T]y[N1]=[N1]y[N1]

[N1]=f(x)[w[N][N1]]

### 五、实现


"""
@author: Cangye@hotmail.com
"""

import numpy as np

class BPAlg():
def sigmoid(self,x):
"""
Define active function sigomid
"""
return 1/(1+np.exp(-x))
def d_sigmiod(self,x):
"""
Define df/dx
"""
return np.exp(-x)/(1+np.exp(-x))**2
def __init__(self,shape):
"""
Initialize weights
"""
self.shape=shape
self.layer=len(shape)
self.W = []
self.b = []
self.e = []
self.y = []
self.dW = []
self.v = []
self.db = []
self.d_sigmoid_v = []
for itrn in range(self.layer-1):
self.W.append(np.random.random([shape[itrn], shape[itrn+1]]))
self.dW.append(np.random.random([shape[itrn], shape[itrn+1]]))
self.b.append(np.random.random([shape[itrn+1]]))
self.db.append(np.random.random([shape[itrn+1]]))
for itr in shape:
self.e.append(np.random.random([itr]))
self.y.append(np.random.random([itr]))
self.v.append(np.random.random([itr]))
self.d_sigmoid_v.append(np.ones([itr]))
def forward(self, data):
"""
forward propagation
"""
self.y[0][:] = data
temp_y = data
for itrn in range(self.layer-1):
temp_v = np.dot(temp_y, self.W[itrn])
temp_y = self.sigmoid(temp_vb)
self.y[itrn+1][:] = temp_y
self.d_sigmoid_v[itrn+1][:] = self.d_sigmiod(temp_vb)
return self.y[-1]
def back_forward(self, dest):
"""
back propagation
"""
self.e[self.layer-1] = dest-self.y[self.layer-1]
temp_delta = self.e[self.layer-1]*self.d_sigmoid_v[self.layer-1]
temp_delta = np.reshape(temp_delta,[-1,1])
self.dW[self.layer-2][:] = np.dot(np.reshape(self.y[self.layer-2],[-1,1]),np.transpose(temp_delta))
self.db[self.layer-2][:] = np.transpose(temp_delta)
for itrn in range(self.layer-2, 0, -1):
sigma_temp_delta = np.dot(self.W[itrn],temp_delta)
temp_delta = sigma_temp_delta*np.reshape(self.d_sigmoid_v[itrn],[-1,1])
self.dW[itrn-1][:] = np.dot(np.reshape(self.y[itrn-1], [-1,1]), np.transpose(temp_delta))
self.db[itrn-1][:] = np.transpose(temp_delta)
def data_feed(self, data, dest, eta):
NDT = len(data)
for itrn in range(NDT):
self.forward(data[itrn])
self.back_forward(dest[itrn])
for itrn in range(self.layer-1):
self.W[itrn][:] = self.W[itrn] + eta*self.dW[itrn]
self.b[itrn][:] = self.b[itrn] + eta*self.db[itrn]

### 彩蛋

《高效学习，快速变现：不走弯路的五大学习策略》

Chat简介：

1
0

* 以上用户言论只代表其个人观点，不代表CSDN网站的观点或立场
个人资料
• 访问：114681次
• 积分：1947
• 等级：
• 排名：千里之外
• 原创：72篇
• 转载：0篇
• 译文：0篇
• 评论：155条
文章存档
阅读排行
最新评论