Computer Vision and Deep Learning

tf+cv HOG+SVM行人检测。
机器学习需要:
1.通过视频获取样本收集,用视频分解成图片的方式进行样本收集
2.特征,待检测目标的特征。Haar+adaboost(强分类器,弱分类器,node节点)人脸识别。HAAR特征=白色-黑色=整个区域权重-黑色权重=(p1-p2-p3+p4)*w
3.分类器训练
4.训练出来的预测和检验:训练出来的特征是否有效

视频分解成图片

import tensorflow as tf
import cv2

cap=cv2.VideoCapture("1.mp4")
isOpened=cap.isOpened
print(isOpened)
fps=cap.get(cv2.CAP_PROP_FPS)  #帧率
width=int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height=int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(fps,width,height)
i=0
while(!isOpened):
    if i==10:
        break;
    else:
        i=i+1
    (flag,frame)=cap.read()  #读取每一帧图片 frame内容 flag是否读取成功
    fileName='image'+str(i)+'.jpg'
    print(fileName)
    if flag ==True
        cv2.imwrite(fileName,frame,[cv2.IMWRITE_JPEG-QUALITY,100])
print(‘end’)    

图片合成视频

import cv2
img=cv2.imread('image1.jpg')
imgInfo=img.shape #获取图像info信息
size=(imgInfo[1],imgInfo[0]) #分别是高度和宽度信息
print(size)
videoWrite=cv2.VideoWriter('2.mp4',-1,5,size) #完成图片合成视频,选择一个解码器,5帧,大小。完成写入对象的创建
#视频分解成图片,需要解码和编码
for i in range(1,11):
    fileName="image+str(i)+".jpg"
    img=cv2.imread(fileName) 
    VideoWrite.write(img) #写入方法,编码之前的数据
print(‘endl’)

*haar特征
像素经过运算之后得到某一个结果。具体值,向量,矩阵,多维矩阵
如何利用特征区分目标和分类? 目标和非目标
如何得到判决?机器学习
什么是特征,如何利用特征进行判决,如何得到判决
Haar特征在cv中得到的三种类型,14个特征
讲白色区域的像素之和。特征就是某一个区域中的像素运算
模板滑动,缩放

adaboost分类器的结构,计算过程,xml文件结构
两级分类器 T1,T2 haar>T1 & haar>T2 ,一般分类器有15至20个。
adaboost训练:
1.初始化权值分布,要全部相等 2.遍历判别阈值 3.权重分布 4.权值分布update*

import cv2
import numpy as np

#1.ad两个xml文件,人脸,眼睛 2.loag jpg 3.haar gray 4.detec
face=cv2.CascadeClassfier(“haarcascade_frontalface_default.xml”) #xml文件引入
eye=cv2.CascadeClassfier(“haarcascade_eye.xml”)

#2.load jpg
img=cv2.imread('face.jpg')
cv2.imshow('src',img)

#3.haar gray
gray=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)

#4.DETECT
face.detectMultiScale(gray,1.3,5) #灰度图片数据,haar模板缩放系数,目标大小最小不能小于5个像素
print('face=',len(faces)) #检测当前人脸个数
#draw square,绘制人脸
for (x,y,w,h) in faces:
    cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) #cv绘图模块,最后参数是线条宽度
    roi_face=gray(y:y+h,x:x+w)#行信息从Y开始到y+h,高度从X开始到X+w
    roi_color=img[y:y+h;x:x+w] #彩色数据
    eyes=eye.detectMultiScale(roi_face)#这里参数必须是灰度图像
    print("eye+",len(eyes))
    for(e_x,e_y,e_w,e_h) in eyes:
        cv2.rectangle(roi_color,(e_x,e_y),(e_x+e_w,e_y+e_h),(0,255,0),2)
cv2.imshow("dst",img)
cv2.waitKey(0)

Hand Writing

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/", one_hot=True)

import tensorflow as tf

# Parameters
learning_rate = 0.001
training_epochs = 30
batch_size = 100
display_step = 1

# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 512 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])


# Create model
def multilayer_perceptron(x, weights, biases):
    # Hidden layer with RELU activation
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)
    # Hidden layer with RELU activation
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)

    # layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
    # layer_3 = tf.nn.relu(layer_3)



    #we can add dropout layer
    # drop_out = tf.nn.dropout(layer_2, 0.75)



    # Output layer with linear activation
    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
    return out_layer

# Store layers weight & biases
weights = {
    #you can change 
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    #'h3': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    #'b3': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

# Construct model
pred = multilayer_perceptron(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Initializing the variables
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                          y: batch_y})
            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch+1), "cost=", \
                "{:.9f}".format(avg_cost))
    print("Optimization Finished!")

    # Test model
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # Calculate accuracy
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值