手写神经网络(tensorflow版)

这里的xs 等同于SSD中默认的bbox,   ys等同于SSD中送入的GT和类别

import tensorflow as tf
import numpy as np

# 添加层
def add_layer(inputs, in_size, out_size, activation_function=None):
   # add one more layer and return the output of this layer
   Weights = tf.Variable(tf.random_normal([in_size, out_size]))
   biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)  
   Wx_plus_b = tf.matmul(inputs, Weights) + biases
   if activation_function is None:
       outputs = Wx_plus_b
   else:
       outputs = activation_function(Wx_plus_b)
   return outputs

# 1.训练的数据
# Make up some real data 
x_data = np.linspace(-1,1,300)[:, np.newaxis]       #[:, np.newaxis] 作用将1*300=》300*1
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise

# 2.定义节点准备接收数据
# define placeholder for inputs to network  
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])

# 3.定义神经层:隐藏层和预测层
# add hidden layer 输入值是 xs,在隐藏层有 10 个神经元   
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# add output layer 输入值是隐藏层 l1,在预测层输出 1 个结果
prediction = add_layer(l1, 10, 1, activation_function=None)

# 4.定义 loss 表达式
# the error between prediciton and real data    
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                    reduction_indices=[1]))              #reduction_indices:坍塌维度 降维(列降维)操作 由

# 5.选择 optimizer 使 loss 达到最小                   
# 这一行定义了用什么方式去减少 loss,学习率是 0.1       
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)


# important step 对所有变量进行初始化
sess = tf.Session()
# 上面定义的都没有运算,直到 sess.run 才会开始运算
sess.run(tf.initialize_all_variables())

#with tf.Session() as sess:
#    sess.run(tf.initialize_all_variables())

# 迭代 1000 次学习,sess.run optimizer
for i in range(1000):
   # training train_step 和 loss 都是由 placeholder 定义的运算,所以这里要用 feed 传入参数
   sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
   if i % 50 == 0:
       # to see the step improvement
       print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))

其中对于分类问题:

# loss 函数用 cross entropy
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
                                              reduction_indices=[1]))       # loss
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

 应用Tensorboard时候,对每个参数用with tf.name_scope() 进行定义:

import tensorflow as tf


def add_layer(inputs, in_size, out_size, activation_function=None):
    # add one more layer and return the output of this layer
    # 区别:大框架,定义层 layer,里面有 小部件
    with tf.name_scope('layer'):
        # 区别:小部件
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b, )
        return outputs


# define placeholder for inputs to network
# 区别:大框架,里面有 inputs x,y
with tf.name_scope('inputs'):
    xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
    ys = tf.placeholder(tf.float32, [None, 1], name='y_input')

# add hidden layer
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None)

# the error between prediciton and real data
# 区别:定义框架 loss
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                                        reduction_indices=[1]))

# 区别:定义框架 train
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

sess = tf.Session()

# 区别:sess.graph 把所有框架加载到一个文件中放到文件夹"logs/"里 
# 接着打开terminal,进入你存放的文件夹地址上一层,运行命令 tensorboard --logdir='logs/'
# 会返回一个地址,然后用浏览器打开这个地址,在 graph 标签栏下打开
writer = tf.train.SummaryWriter("logs/", sess.graph)
# important step
sess.run(tf.initialize_all_variables())

其中,tf.reduce_sum用法如下:

# 'x' is [[1, 1, 1]

#         [[1, 1, 1]]

#求和

tf.reduce_sum(x) ==> 6

#按列求和

tf.reduce_sum(x, 0) ==> [2, 2, 2]

#按行求和

tf.reduce_sum(x, 1) ==> [3, 3]

#按照行的维度求和

tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]

#行列求和

tf.reduce_sum(x, [0, 1]) ==> 6
 

其中,tf.reduce_mean 函数用于计算张量tensor沿着指定的数轴(tensor的某一维度)上的的平均值,主要用作降维或者计算tensor(图像)的平均值。

 

tf实现if 语句:

 

 

 

3、手写梯度下降

import numpy as np

x_data=np.linspace(0,1,10)
y_label=np.full(x_data.shape,1)

input_data=np.array([y_label,x_data]).T
bias=np.random.normal(0,1,x_data.shape)
output_data= np.square(x_data)+bias             #生成1*len(x_date)的随机矩阵
w=np.random.randn(2)

alpha=0.01
erro=np.zeros(2)
eplise=1e-5
print(erro)
for i in range(100):     #训练次数
    for j in range(len(x_data)):    #每个数据更新一次
        diff=np.dot(w,input_data[j])-output_data[j]   #矩阵个元素相乘后相加
        w=w-alpha*diff*input_data[j]                   #某层w的梯度= 误差* 前层输入*(n/m)
    # if np.linalg.norm(w-erro)<eplise:
    #     break
    # else:
    #     erro=w
print('梯度下降法结果\n')
#print('循环%d次'%iter_max,'权重w:[%f,%f]'%(w[0],w[1]))




3、手写卷积

import cv2
import numpy as np


filename='1.jpg'
img=cv2.imread(filename)
r,g,b=cv2.split(img)

data=np.array(r)
kernel=[[1,1,1],[1,1,1],[1,1,1]]            #低通滤波器

def conv(pic,kernel):
    w,h=data.shape
    res=[]                             #一次生成一行结果进行保存
    for i in range(w-len(kernel[0])):
        tmp=[]
        for j in range(h-len(kernel)):
            src=pic[i:i+len(kernel[0]),j:j+len(kernel)] #ROL 获取
            tmp.append(np.sum(np.multiply(src,kernel))) #乘法预算
        res.append(tmp)
    return np.array(res)

conv(data,kernel)

 

import numpy as np
 
import numpy as np
 
def py_cpu_nms(dets, thresh):
    """Pure Python NMS baseline."""
    x1 = dets[:, 0]                #结果:[  30.   50.  210.  430.]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]
    areas = (x2 - x1 + 1) * (y2 - y1 + 1)     #结果:[ 36381.  36081.  -5064.   2511.]
    # res是按照score降序排序的
    res = scores.argsort()[::-1]           #结果:[0 1 2 3]
    # print(res)
    # print(x1[res[0]])
 
    keep = []
    while res.size > 0:
        i = res[0]                   #每次都对res列表进行更新,
        keep.append(i)
        xx1 = np.maximum(x1[i], x1[res[1:]])      #res[1:] 由于每次都要与res[0]进行比较
        yy1 = np.maximum(y1[i], y1[res[1:]])
        xx2 = np.minimum(x2[i], x2[res[1:]])
        yy2 = np.minimum(y2[i], y2[res[1:]])
 
        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        ovr = inter / (areas[i] + areas[res[1:]] - inter)
        # 去除掉与分数重叠度大于设定阈值的anchor box(或滑框)
        inds = np.where(ovr <= thresh)[0]             #结果 训练3次  [1 2]表示,第二行,第三行各一个满足条件
        #返回满足条件的行索引(一行有几个满足条件,返回几次)

        # 真实行数=索引+1
        res = res[inds + 1]                       #结果 训练3次  [2 3],[3],[]
        print(res)
 
    return keep
 
# test
if __name__ == "__main__":
    dets = np.array([[30, 20, 230, 200, 1], [50, 50, 260, 220, 0.9], [210, 30, 420, 5, 0.8], [430, 280, 460, 360, 0.7]])
    thresh = 0.1
    keep_dets = py_cpu_nms(dets, thresh)
    print(keep_dets)
 

举例子:

import numpy as np
a = np.array([[0, 1, 2],
             [0, 2, 4],
             [0, 9, 6]])
b=np.where(a < 4)
print(b)
  • 2
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值