python实现运动检测

# -*- coding=GBK -*-
import cv2 as cv
import time
import datetime

#打开摄像头获取图片
cap = cv.VideoCapture(0)#打开摄像头,0代表的是设备id,如果有多个摄像头,可以设置其他数值
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
size = width, height
print(repr(size))  # 打印分辨率
fps = 5
pre_frame = None

def is_inside(o, i):
        ox, oy, ow, oh = o
        ix, iy, iw, ih = i
        return ox > ix and oy > iy and ox + ow < ix + iw and oy + oh < iy + ih
def draw_person(image, person):
    x, y, w, h = person
    cv.rectangle(image, (x, y), (x + w, y + h), (255, 0, 255), 2)
while True:
        start = time.time()
        ret, frame = cap.read() #读取摄像头,它能返回两个参数,第一个参数是bool型的ret,其值为True或False,代表有没有读到图片;第二个参数是frame,是当前截取一帧的图片
        frame = cv.flip(frame, 1)#翻转 等于0:逆时针180度旋转, 大于0:正常 ,小于0上下颠倒
        if not ret:
            print("open carme filed")
            break
        end = time.time()
        # 灰度化处理
        gray_cap = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        # 运动检测部分,看看是不是5FPS
        seconds = end - start
        if seconds < 1.0 / fps:
            time.sleep(1.0 / fps - seconds)
        gray_cap = cv.resize(gray_cap, (480, 480))
        # 用高斯滤波进行模糊处理
        gray_cap = cv.GaussianBlur(gray_cap, (21, 21), 0)
        # 如果没有背景图像就将当前帧当作背景图片
        if pre_frame is None:
            pre_frame = gray_cap
        else:
            # absdiff把两幅图的差的绝对值输出到另一幅图上面来
            img_delta = cv.absdiff(pre_frame, gray_cap)
            # threshold阈值函数(原图像应该是灰度图,对像素值进行分类的阈值,当像素值高于(有时是小于)阈值时应该被赋予的新的像素值,阈值方法)
            thresh = cv.threshold(img_delta, 30, 255, cv.THRESH_BINARY)[1]
            # 用一下腐蚀与膨胀
            thresh = cv.dilate(thresh, None, iterations=2)
            # findContours检测物体轮廓(寻找轮廓的图像,轮廓的检索模式,轮廓的近似办法)
            hog = cv.HOGDescriptor()
            hog.setSVMDetector(cv.HOGDescriptor_getDefaultPeopleDetector())
            found, w = hog.detectMultiScale(frame)
            found_filtered = []
            for ri, r in enumerate(found):
                for qi, q, in enumerate(found):
                    if ri != qi and is_inside(r, q):
                        break
                    else:
                        found_filtered.append(r)
                for person in found_filtered:
                    draw_person(frame, person)
                text = "Occupied"
                TI = time.strftime('%Y-%m-%d', time.localtime(time.time()))
                cv.putText(frame, "Room Status: {}".format(text), (10, 20),
                           cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                cv.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                           (10, frame.shape[0] - 10), cv.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
                # 显示当前帧并记录用户是否按下按键
            cv.imshow("Security Feed", frame)
            key = cv.waitKey(1)

            # 如果q键被按下,跳出循环
            if key == ord("q"):
                break

# release()释放摄像头

cap.release()

# destroyAllWindows()关闭所有图像窗口

cv.destroyAllWindows()


人脸识别

# -*- coding=UTF-8 -*-
import cv2
import os
import face_recognition
import threading,time
import numpy as np
cap = cv2.VideoCapture(0)
old = '/home/bj/PycharmProjects/facefound/drivers/god.jpg'
new = '/home/bj/PycharmProjects/facefound/newdrivers/aa.jpg'
# def add_Driver():
#     print("welcome to here ,new driver")
#     print("please input your name,and look at the camare,it will take photos after 3s:")
#     i = input(":")
#     ret, frame = cap.read()
#     cv2.imshow('cap', frame)
#     output_path = os.path.join(new, "%s.jpg" % i)
#     cv2.imwrite(output_path, frame)
#     return i

# load oldface and read oldface_recognition
# old_path = os.path.join(old, "god.jpg")
old_image = face_recognition.load_image_file(old)
old_encoding = face_recognition.face_encodings(old_image)[0]
# load newface and read newface_recognition
#new_path = os.path.join(new, "%s.jpg" % i)
new_image = face_recognition.load_image_file(new)
new_encoding = face_recognition.face_encodings(new_image)[0]
#creat arrays of record face name
Know_face_encodings=[
    old_encoding,
    new_encoding
]
Know_face_names=[
    "god",
    "newdriver"
]
#Initialize some variables
face_locations=[]
face_encodings=[]
face_names=[]
process_this_frame = True
while True:
    ret,frame = cap.read()
    #Resize video's size to receive faster
    small_frame = cv2.resize(frame,(0,0),fx=0.25,fy=0.25)
#turn BGR to RGB color
    rgb_small_frame = small_frame[:, :, ::-1]
    if process_this_frame:
        #Find all face and faceencodings to contract
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(rgb_small_frame,face_locations)
        face_names=[]
        for face_encoding in face_encodings:
            #see if the face match the face we knowen
            matchs = face_recognition.compare_faces(Know_face_encodings,face_encoding)
            name = "Unknown"
            #if we find the same face
            #name =we knownface
            face_distance = face_recognition.face_distance(Know_face_encodings,face_encoding)
            best_match_index = np.argmin(face_distance)
            if matchs[best_match_index]:
                name = Know_face_names[best_match_index]
            face_names.append(name)
        process_this_frame = not process_this_frame
        #display the results
        for (top, right, bottom, left), name in zip(face_locations, face_names):
            top*=4
            right*=4
            bottom*=4
            left*=4
        #Draw a box around face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 225), 2)
        #Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom-35), (right, bottom), (0, 0, 255), cv2.FILLED)
            #set zi ti
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left+6, bottom-6), font, 1.0, (255, 255, 255),1)
        cv2.imshow('cap', frame)
        flag = cv2.waitKey(1)
        if flag == 27:
            break
cap.release()
cv2.destroyAllWindows()

人脸识别2。0

# -*- coding=UTF-8 -*-
import cv2
import os
import face_recognition
import re
import threading,time
import numpy as np
cap = cv2.VideoCapture(0)
old = 'drivers'
new = 'newdrivers'
# def add_Driver():
#     print("welcome to here ,new driver")
#     print("please input your name,and look at the camare,it will take photos after 3s:")
#     i = input(":")
#     ret, frame = cap.read()
#     cv2.imshow('cap', frame)
#     output_path = os.path.join(new, "%s.jpg" % i)
#     cv2.imwrite(output_path, frame)
#     return i
# add_Driver()
# load oldface and read oldface_recognition
# old_path = os.path.join(old, "god.jpg")
path='/home/bj/PycharmProjects/facefound/drivers'
filelist = os.listdir(path)
drivers_Num = len(filelist)
# old_image = face_recognition.load_image_file('god.jpg')
# old_encoding = face_recognition.face_encodings(old_image)[0]
# # load newface and read newface_recognition
# #new_path = os.path.join(new, "%s.jpg" % i)
# new_image = face_recognition.load_image_file('wangqin.jpg')
# new_encoding = face_recognition.face_encodings(new_image)[0]
Know_face_encodings = []
Know_face_names = []
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
for i in filelist:
    # creat arrays of record face name
    img_path = os.path.join('drivers', "%s" % i)
    image = face_recognition.load_image_file(img_path)
    img_encoding = face_recognition.face_encodings(image)[0]
    Know_face_encodings.append(img_encoding)
    Know_face_names.append(i)
#creat arrays of record face name
# Know_face_encodings = [
#     old_encoding,
#     new_encoding
# ]
# Know_face_names = [
#     "god",
#     "wangqin"
# ]
#Initialize some variables
while True:
    ret, frame = cap.read()
    #Resize video's size to receive faster
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

#turn BGR to RGB color
    rgb_small_frame = small_frame[:, :, ::-1]

    if process_this_frame:
        #Find all face and faceencodings to contract
        face_locations = face_recognition.face_locations(rgb_small_frame,model="cnn")
        face_encodings = face_recognition.face_encodings(rgb_small_frame,face_locations)
        face_names=[]
        for face_encoding in face_encodings:
            #see if the face match the face we knowen
            matchs = face_recognition.compare_faces(Know_face_encodings,face_encoding)
            name = "Unknown"
            #if we find the same face
            #name =we knownface
            face_distances = face_recognition.face_distance(Know_face_encodings,face_encoding)
            best_match_index = np.argmin(face_distances)
            if matchs[best_match_index]:
                name = Know_face_names[best_match_index]
            face_names.append(name)
    process_this_frame = not process_this_frame

    #display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
    #Draw a box around face
        cv2.rectangle(frame, (left-10, top-100), (right+10, bottom+50), (0, 0, 225), 2)
    #Draw a label with a name below the face
        cv2.rectangle(frame, (left-10, bottom+15), (right+10, bottom+50), (0, 0, 255), cv2.FILLED)
        #set zi ti
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left+6, bottom+44), font, 1.0, (255, 255, 255),1)
    cv2.imshow('cap', frame)
    flag = cv2.waitKey(1)
    if flag == 27:
        break
cap.release()
cv2.destroyAllWindows()

人脸识别+加入人脸库

# -*- coding=UTF-8 -*-
import cv2
import os
import face_recognition
import numpy as np
import sys
cap = cv2.VideoCapture(0)
new = '/home/bj/PycharmProjects/facefound/drivers'
Know_face_encodings = []
Know_face_names = []
face_locations = []
face_encodings = []
face_names = []
filelist = os.listdir(new)
process_this_frame = True
def add_Driver():
    print("welcome to here ,new driver")
    print("please input your name,and look at the camare,it will take photos after 3s:")
    i = input(":")
    ret, frame = cap.read()
    cv2.imshow('cap', frame)
    output_path = os.path.join(new, "%s.jpg" % i)
    cv2.imwrite(output_path, frame)
    out = face_recognition.load_image_file(output_path)
    out_encoding = face_recognition.face_encodings(out)[0]
    Know_face_encodings.append(out_encoding)
    Know_face_names.append(i)

for i in filelist:
    # creat arrays of record face name
    img_path = os.path.join('drivers', "%s" % i)
    image = face_recognition.load_image_file(img_path)
    img_encoding = face_recognition.face_encodings(image)[0]
    Know_face_encodings.append(img_encoding)
    Know_face_names.append(i)
#Initialize some variables
while True:
    ret, frame = cap.read()
    flag = cv2.waitKey(1)
    #Resize video's size to receive faster
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

#turn BGR to RGB color
    rgb_small_frame = small_frame[:, :, ::-1]
    if process_this_frame:
        #Find all face and faceencodings to contract
        face_locations = face_recognition.face_locations(rgb_small_frame,model="cnn")
        face_encodings = face_recognition.face_encodings(rgb_small_frame,face_locations)
        face_names=[]
        for face_encoding in face_encodings:
            #see if the face match the face we knowen
            matchs = face_recognition.compare_faces(Know_face_encodings,face_encoding)
            name = "Unknown"
            # print("if you want to join us,push 'o':")
            # if flag==13 :
            #     add_Driver()
            # else:
                #if we find the same face
                #name =we knownface
            face_distances = face_recognition.face_distance(Know_face_encodings,face_encoding)
            best_match_index = np.argmin(face_distances)
            print(best_match_index)
            if matchs[best_match_index]:
                name = Know_face_names[best_match_index]
            if best_match_index == 0:
                while(1):
                    print("do you want to join in?")
                    a=input()
                    if a == 'y':
                        add_Driver()
                        break
                    if a == 'n':
                        break

            face_names.append(name)
    process_this_frame = not process_this_frame

    #display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
    #Draw a box around face
        cv2.rectangle(frame, (left-10, top-100), (right+10, bottom+50), (0, 0, 225), 2)
    #Draw a label with a name below the face
        cv2.rectangle(frame, (left-10, bottom+15), (right+10, bottom+50), (0, 0, 255), cv2.FILLED)
        #set zi ti
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left+6, bottom+44), font, 1.0, (255, 255, 255),1)
    cv2.imshow('cap', frame)
    if flag == 27:
        break
cap.release()
cv2.destroyAllWindows()
# -*-coding:utf-8-*-
import cv2
import os
import numpy as np
from PIL import Image
path = 'drivers'

recog = cv2.face.LBPHFaceRecognizer_create(1, 8, 3, 3, 50)

face_detector = cv2.CascadeClassifier(r'/home/bj/PycharmProjects/FACE/haarcascade_frontalface_default.xml')
def get_images(path):
    image_paths = [os.path.join(path, f) for f in os.listdir(path)]
    face_samples = []
    ids = []
    for image_path in image_paths:
        img = Image.open(image_path).convert('L')
        img_np = np.array(img, 'uint8')
        face = face_detector.detectMultiScale(img_np)
        print(img_np)
        print(face)
        for(x, y, w, h) in face:
            face_samples.append(img_np[y:y+h, x:x+w])
            ids.append(id)
            print("ID=id")
        return face_samples, ids
print('thhhhh')
faces, ids = get_images(path)
# print(faces,ids)
# recog.train(faces, np.array(ids))
# recog.save('trainner/trainner.yml')

  • 0
    点赞
  • 18
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值