简单实例实现深度学习(3/3)前向传播与反向传播

提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档


拓朴排序 python 3.9 自带拓朴排序

import numpy as np
simple_graph = {
    'a' : [1,2],
    'b' : [2,3]
}
list(simple_graph.keys())

[‘a’, ‘b’]

from functools import reduce
list_a = [1,2,3]
list_b = [2,3,4]
set(list_a) - set(list_b)

{1}

reduce(lambda a, b : a + b ,list(simple_graph.values()))

[1, 2, 2, 3]


import random
def topologic(graph):
    sorted_node = []
    
    while graph :
        all_nodes_have_inputs = reduce(lambda a,b:a+b,list(graph.values()))
        all_nodes_have_outputs = list(graph.keys())
        
        all_nodes_only_have_outputs_no_inputs = set(all_nodes_have_outputs ) - set(all_nodes_have_inputs)
        
        
        if all_nodes_only_have_outputs_no_inputs:
            
            node = random.choice(list(all_nodes_only_have_outputs_no_inputs))
            
            sorted_node.append(node)
            
            if len(graph) == 1:
                sorted_node += graph[node]
                
            graph.pop(node)
            
            for _,links in graph.items():
                if node in links: links.remove(node)
        else:
            raise TypeError('this graph have error')
    return sorted_node
x , k ,b ,linear , sigmoid, y ,loss = 'x' , 'k' , 'b ', 'linear' , 'sigmoid' , 'y' , 'loss'

test_graph = {
    x:[linear],
    k:[linear],
    b:[linear],
    linear:[sigmoid],
    sigmoid:[loss],
    y:[loss],
}
topologic(test_graph)

[‘x’, 'b ', ‘k’, ‘y’, ‘linear’, ‘sigmoid’, ‘loss’]


检验拓扑排序有效性

class Node:
    def __init__(self,inputs=[],name = None,is_trainable=False):
        self.inputs = inputs
        self.outputs = []
        self.name = name
        self.value = None
        self.is_trainable = is_trainable
        
        self.gradients = dict()
        
        for node in inputs:
            node.outputs.append(self)
        
    def __repr__(self):
        return 'Node : {}'.format(self.name)
    
    def forward(self):
        print('i am {},no human ,my calculate my value {}'.format(self.name,self.value))
        
    def backward(self):
        for n in self.inputs:
            print(' {} /  {} is get'.format(self.name,n.name))
        

# node_x = Node(inputs = None,outputs = [node_linear])
# node_y = Node(inputs = None,outputs = [node_loss])
# node_k = Node(inputs = None,outputs = [node_linear])
# node_b = Node(inputs = None,outputs = [node_linear])
# node_linear = Node(inputs = [node_x,node_k,node_b],outputs = [node_sigmoid])
# node_sigmoid = Node(inputs = [node_linear],outputs = [node_loss])
# node_loss = Node(inputs =[node_sigmoid,node_y],outputs = None)


class Linear(Node):
    def __init__(self,x,k,b,name = None):
        Node.__init__(self,inputs=[x,k,b], name=name)
        
    
        
    def __repr__(self):
        return 'Linear : {}'.format(self.name)
    
    def forward(self):
        x,k,b = self.inputs[0],self.inputs[1],self.inputs[2]
        self.value = k.value*x.value+b.value
        print('i am {},no human ,my calculate my value {}'.format(self.name,self.value))
    
    
    def backward(self):
        
        x,k,b = self.inputs[0],self.inputs[1],self.inputs[2]
        self.value = k.value*x.value+b.value
        
        self.gradients[self.inputs[0]] = self.outputs[0].gradients[self] * k.value
        self.gradients[self.inputs[1]] = self.outputs[0].gradients[self] * x.value
        self.gradients[self.inputs[2]] = self.outputs[0].gradients[self] * 1
        
#         self.gradients[self.inputs[0]] = '*'.join([self.outputs[0].gradients[self],'@{} / @{}'.format(self.name,self.inputs[0].name)])
#         self.gradients[self.inputs[1]] = '*'.join([self.outputs[0].gradients[self],'@{} / @{}'.format(self.name,self.inputs[1].name)])
#         self.gradients[self.inputs[2]] = '*'.join([self.outputs[0].gradients[self],'@{} / @{}'.format(self.name,self.inputs[2].name)])
        
        print('self.gradients[self.inputs[0]] {}'.format(self.gradients[self.inputs[0]]))
        print('self.gradients[self.inputs[1]] {}'.format(self.gradients[self.inputs[1]]))
        print('self.gradients[self.inputs[2]] {}'.format(self.gradients[self.inputs[2]]))
class Sigmoid(Node):
    def __init__(self,x,name = None):
        Node.__init__(self,inputs = [x], name = name)
        
    def _sigmoid(self,x):
        return 1/(1+np.exp(-x))
        
        
    def __repr__(self):
        return 'Sigmoid : {}'.format(self.name)
    
    def forward(self):
        x = self.inputs[0]
        self.value = self._sigmoid(x.value)
        print('i am {},no human ,my calculate my value {}'.format(self.name,self.value))
       
    def backward(self):
        
        x = self.inputs[0]
#         self.value = self._sigmoid(x.value)
#         sigmoid导数  = sigmoid * (1- sigmoid)
        self.gradients[self.inputs[0]] = self.outputs[0].gradients[self] * self._sigmoid(x.value)*(1 - self._sigmoid(x.value))
        
        
#         self.gradients[self.inputs[0]] = '*'.join([self.outputs[0].gradients[self],'@{} / @{}'.format(self.name,self.inputs[0].name)])
        
        print('self.gradients[self.inputs[0]] {}'.format(self.gradients[self.inputs[0]]))
        
class Loss(Node):
    def __init__(self,y,yhat,name = None):
        Node.__init__(self,inputs = [y,yhat], name = name)
        
        
        
    def __repr__(self):
        return 'Loss : {}'.format(self.name)
    
    def forward(self):
        y = self.inputs[0]
        yhat = self.inputs[1]
        
        self.value = np.mean((y.value - yhat.value)**2)
        print('i am {},no human ,my calculate my value {}'.format(self.name,self.value))
        
        
    def backward(self):
        y = self.inputs[0]
        yhat = self.inputs[1]
        
        self.gradients[self.inputs[0]] = 2 * np.mean(y.value - yhat.value)
        self.gradients[self.inputs[1]] = -2*np.mean(y.value - yhat.value)
        
#         self.gradients[self.inputs[0]] = '@ {}  / @ {}'.format(self.name,self.inputs[0].name)
#         self.gradients[self.inputs[1]] = '@ {}  / @ {}'.format(self.name,self.inputs[1].name)
        
        
        
        
        
        print("self.gradients[self.inputs[0]] {}".format(self.gradients[self.inputs[0]]))
        print("self.gradients[self.inputs[1]] {}".format(self.gradients[self.inputs[1]]))
class Placeholder(Node):
    def __init__(self,name = None,is_trainable=False):
      
        Node.__init__(self,name = name,is_trainable=is_trainable)
        
        
        
    def __repr__(self):
        return 'Placeholeder : {}'.format(self.name)
    
    def forward(self):
        print('i am {},i was assigned {}'.format(self.name,self.value))
        
    def backward(self): 
        self.gradients[self]= self.outputs[0].gradients[self]
node_x = Placeholder(name = 'x',is_trainable=False)
node_y = Placeholder(name = 'y',is_trainable=False)
node_k = Placeholder(name = 'k',is_trainable=True)
node_b = Placeholder(name = 'b',is_trainable=True)
node_linear = Linear(node_x,node_k,node_b,name = 'linear')
node_sigmoid = Sigmoid(x= node_linear,name = 'sigmoid')
node_loss = Loss(y = node_sigmoid, yhat = node_y,name = 'loss')


feed_dict = {
    node_x:3,
    node_y:random.random(),
    node_b:random.random(),
    node_k:0.38
}
from collections import defaultdict
def convert_feed_dict_to_graph(feed_dict):
    
    need_expend = [n for n in feed_dict]
    computing_graph = defaultdict(list)
    while need_expend:
        n = need_expend.pop(0)

        if n in computing_graph:continue
            
        if isinstance(n,Placeholder): n.value = feed_dict[n]    

        for m in n.outputs:
            computing_graph[n].append(m)
            need_expend.append(m)
            
    return computing_graph
sorted_nodes = topologic(convert_feed_dict_to_graph(feed_dict))
sorted_nodes

[Placeholeder : k,
Placeholeder : y,
Placeholeder : b,
Placeholeder : x,
Linear : linear,
Sigmoid : sigmoid,
Loss : loss]

def forward(graph_sorted_nodes):
#forward
    for node in graph_sorted_nodes:
        node.forward()
    
#backward

def backward(graph_sorted_nodes):
    for node in graph_sorted_nodes[::-1]:
        print('i am {}'.format(node.name))
        node.backward()

def run_one_epoch(graph_nodes):
    forward(graph_nodes)
    backward(graph_nodes)

def optimize(graph_nodes,learning_rate = 1e-3):
    #优化

    for node in graph_nodes:
        if node.is_trainable:
            node.value = node.value + -1 * node.gradients[node] * alpha
            cmp = 'large' if node.gradients[node]>0 else 'small'
            print("{} value is too {} ,i need to update myself".format(node.name,cmp))

完整一次求导更新:

run_one_epoch(sorted_nodes)
optimize(sorted_nodes)

i am k,i was assigned 0.4175584031981763
i am y,i was assigned 0.8894536740531678
i am b,i was assigned 0.676804258303691
i am x,i was assigned 3
i am linear,no human ,my calculate my value 1.92947946789822
i am sigmoid,no human ,my calculate my value 0.8731917936547509
i am loss,no human ,my calculate my value 0.00026444875409241636
i am loss
self.gradients[self.inputs[0]] -0.032523760796833834
self.gradients[self.inputs[1]] 0.032523760796833834
i am sigmoid
self.gradients[self.inputs[0]] -0.0036012872501172298
i am linear
self.gradients[self.inputs[0]] -0.0015037477536169017
self.gradients[self.inputs[1]] -0.010803861750351688
self.gradients[self.inputs[2]] -0.0036012872501172298
i am x
i am b
i am y
i am k
k value is too small ,i need to update myself
b value is too small ,i need to update myself


总结

节点反向求导都写好,根据图多训练几轮即可

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值