行人与车辆检测计数&人脸识别

(未完)
人脸识别并计数1

from PIL import Image
import face_recognition
import cv2
# Load the jpg file into a numpy array
pic=("2.jpg")
image = face_recognition.load_image_file(pic)
img=cv2.imread(pic)
# Find all the faces in the image using the default HOG-based model.
# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.
# See also: find_faces_in_picture_cnn.py
face_locations = face_recognition.face_locations(image)

print("I found {} face(s) in this photograph.".format(len(face_locations)))






for face_location in face_locations:

    # Print the location of each face in this image
    top, right, bottom, left = face_location
    print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
    img = cv2.rectangle(img, (left,top), (right,bottom), (0, 255, 0), 2)  # 绘制矩形
    cv2.imshow("show", img)


    # You can access the actual face itself like this:
    face_image = image[top:bottom, left:right]
    pil_image = Image.fromarray(face_image)
    cv2.waitKey(0)
    #pil_image.show()

人脸识别并计数2

import cv2
img=cv2.imread("2.jpg")
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(img,  cv2.COLOR_BGR2GRAY)  # 转化为灰度图
faces = face_cascade.detectMultiScale(gray,  1.04, 5)  # 返回人脸矩形数组
#faces = face_cascade.detectMultiScale(gray, 1.03, 2, cv2.CASCADE_SCALE_IMAGE, (50, 50), (100, 100))  分别是  图片、检测缩放比例、检测次数、
i= 0
for (x,  y,  w,  h) in faces:
    img = cv2.rectangle(img,  (x, y),  (x+w,  y+h),  (0,  255,  0),  2)  #绘制矩形
    i=i+ 1#统计
#cv2.imwrite( 'd: \\ test.jpg' , img)
img = cv2.resize(img , ( 1200 ,800 ))
print (i)
cv2.imshow("show",  img )
cv2.waitKey( 0 )

车辆行人检测1

import cv2
import dlib
import time
import threading
import math

carCascade = cv2.CascadeClassifier('myhaar.xml')
video = cv2.VideoCapture(r'cars.mp4')

WIDTH = 1280
HEIGHT = 720


def estimateSpeed(location1, location2):
    d_pixels = math.sqrt(math.pow(location2[0] - location1[0], 2) + math.pow(location2[1] - location1[1], 2))
    # ppm = location2[2] / carWidht
    ppm = 8.8
    d_meters = d_pixels / ppm
    # print("d_pixels=" + str(d_pixels), "d_meters=" + str(d_meters))
    fps = 18
    speed = d_meters * fps * 3.6
    return speed


def trackMultipleObjects():
    rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    fps = 0
    js=0
    zs=0

    carTracker = {}
    carNumbers = {}
    carLocation1 = {}
    carLocation2 = {}
    speed = [None] * 1000

    # Write output to video file
    out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (WIDTH, HEIGHT))

    while True:
        start_time = time.time()
        rc, image = video.read()
        if type(image) == type(None):
            break

        image = cv2.resize(image, (WIDTH, HEIGHT))
        resultImage = image.copy()

        frameCounter = frameCounter + 1

        carIDtoDelete = []

        for carID in carTracker.keys():
            trackingQuality = carTracker[carID].update(image)

            if trackingQuality < 7:
                carIDtoDelete.append(carID)

        for carID in carIDtoDelete:
            print('Removing carID ' + str(carID) + ' from list of trackers.')
            print('Removing carID ' + str(carID) + ' previous location.')
            print('Removing carID ' + str(carID) + ' current location.')
            carTracker.pop(carID, None)
            carLocation1.pop(carID, None)
            carLocation2.pop(carID, None)
            js=js+1


        if not (frameCounter % 10):
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cars = carCascade.detectMultiScale(gray, 1.1, 13, 18, (24, 24))

            for (_x, _y, _w, _h) in cars:
                x = int(_x)
                y = int(_y)
                w = int(_w)
                h = int(_h)

                x_bar = x + 0.5 * w
                y_bar = y + 0.5 * h

                matchCarID = None

                for carID in carTracker.keys():
                    trackedPosition = carTracker[carID].get_position()

                    t_x = int(trackedPosition.left())
                    t_y = int(trackedPosition.top())
                    t_w = int(trackedPosition.width())
                    t_h = int(trackedPosition.height())

                    t_x_bar = t_x + 0.5 * t_w
                    t_y_bar = t_y + 0.5 * t_h

                    if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and (
                            x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):
                        matchCarID = carID

                if matchCarID is None:
                    print('Creating new tracker ' + str(currentCarID))

                    tracker = dlib.correlation_tracker()
                    tracker.start_track(image, dlib.rectangle(x, y, x + w, y + h))

                    carTracker[currentCarID] = tracker
                    carLocation1[currentCarID] = [x, y, w, h]
                    currentCarID = currentCarID + 1
                    zs=currentCarID-js

        # cv2.line(resultImage,(0,480),(1280,480),(255,0,0),5)

        for carID in carTracker.keys():
            trackedPosition = carTracker[carID].get_position()

            t_x = int(trackedPosition.left())
            t_y = int(trackedPosition.top())
            t_w = int(trackedPosition.width())
            t_h = int(trackedPosition.height())

            cv2.rectangle(resultImage, (t_x, t_y), (t_x + t_w, t_y + t_h), rectangleColor, 4)
            center_x = t_x + t_w // 2
            center_y = t_y + t_h // 2
            # rects.append((t_x, t_y, t_w, t_h))
            cv2.circle(resultImage, (center_x, center_y), 8, (0, 255, 255), 1)
            # speed estimation
            carLocation2[carID] = [t_x, t_y, t_w, t_h]

        end_time = time.time()

        if not (end_time == start_time):
            fps = 1.0 / (end_time - start_time)

        # cv2.putText(resultImage, 'FPS: ' + str(int(fps)), (620, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        for i in carLocation1.keys():
            if frameCounter % 1 == 0:
                [x1, y1, w1, h1] = carLocation1[i]
                [x2, y2, w2, h2] = carLocation2[i]

                # print 'previous location: ' + str(carLocation1[i]) + ', current location: ' + str(carLocation2[i])
                carLocation1[i] = [x2, y2, w2, h2]

                # print 'new previous location: ' + str(carLocation1[i])
                if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
                    if (speed[i] == None or speed[i] == 0) and y1 >= 275 and y1 <= 285:
                        speed[i] = estimateSpeed([x1, y1, w1, h1], [x2, y2, w2, h2])

                    # if y1 > 275 and y1 < 285:
                    if speed[i] != None and y1 >= 180:
                        cv2.putText(resultImage, str(int(speed[i])) + " km/hr", (int(x1 + w1 / 2), int(y1 - 5)),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)
                info = [

                    ("tracker", zs)

                ]
                for (i, (k, v)) in enumerate(info):
                    text = "{}: {} people".format(k, v)
                    cv2.putText(resultImage, text, (670, HEIGHT - ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
                # print ('CarID ' + str(i) + ': speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')

                # else:
                #	cv2.putText(resultImage, "Far Object", (int(x1 + w1/2), int(y1)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

                # print ('CarID ' + str(i) + ' Location1: ' + str(carLocation1[i]) + ' Location2: ' + str(carLocation2[i]) + ' speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')
        cv2.imshow('result', resultImage)

        # Write the frame into the file 'output.avi'
        # out.write(resultImage)

        if cv2.waitKey(33) == 27:
            break

    cv2.destroyAllWindows()


if __name__ == '__main__':
    trackMultipleObjects()

行人检测计数2

import cv2
import dlib
import time
import threading
import numpy as np
# from imutils.object_detection import non_max_suppression
import math

hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
carCascade = cv2.CascadeClassifier('myhaar.xml')
video = cv2.VideoCapture('zuqiu.flv')


def estimateSpeed(location1, location2):
    d_pixels = math.sqrt(math.pow(location2[0] - location1[0], 2) + math.pow(location2[1] - location1[1], 2))
    # ppm = location2[2] / carWidht
    ppm = 8.8
    d_meters = d_pixels / ppm
    # print("d_pixels=" + str(d_pixels), "d_meters=" + str(d_meters))
    fps = 18
    speed = d_meters * fps * 3.6
    return speed


def trackMultipleObjects():
    out = None
    counter_right = []
    counter_moto = []
    counter_wrong = []
    counter_wrong_detect = []
    rects = []
    js=0

    line_pass_left = np.array([[115, 200], [115, 320]])
    line_pass_right = np.array([[930, 200], [930, 320]])
    mask_people = np.array([line_pass_left[0], line_pass_left[1], line_pass_right[1], line_pass_right[0]])

    # line_down = [(0, 360), (800, 360)]
    # rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    fps = 0

    carTracker = {}
    carNumbers = {}
    carLocation1 = {}
    carLocation2 = {}
    speed = [None] * 1000

    HEIGHT = 720
    WIDTH = 1280
    # EXIT_COLOR = (66, 183, 42)
    # Write output to video file
    out = cv2.VideoWriter('project_output_haar_and_svm1.avi', cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 10,
                          (WIDTH, HEIGHT))

    while True:
        start_time = time.time()
        rc, image = video.read()

        if type(image) == type(None):
            break

        image = cv2.resize(image, (WIDTH, HEIGHT))
        resultImage = image.copy()
        frameCounter = frameCounter + 1

        carIDtoDelete = []

        for carID in carTracker.keys():
            trackingQuality = carTracker[carID].update(image)

            if trackingQuality < 7:
                carIDtoDelete.append(carID)

        for carID in carIDtoDelete:
            print('Removing carID ' + str(carID) + ' from list of trackers.')
            print('Removing carID ' + str(carID) + ' previous location.')
            print('Removing carID ' + str(carID) + ' current location.')
            carTracker.pop(carID, None)
            carLocation1.pop(carID, None)
            carLocation2.pop(carID, None)

            js = js+1#减少追踪数量




        # detecting
        if not (frameCounter % 10):
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            (rects, weights) = hog.detectMultiScale(gray, winStride=(4, 4),
                                                    padding=(8, 8), scale=1.01)

            for (_x, _y, _w, _h) in rects:
                x = int(_x)
                y = int(_y)
                w = int(_w)
                h = int(_h)

                x_bar = x + 0.5 * w
                y_bar = y + 0.5 * h

                matchCarID = None

                for carID in carTracker.keys():
                    trackedPosition = carTracker[carID].get_position()

                    t_x = int(trackedPosition.left())
                    t_y = int(trackedPosition.top())
                    t_w = int(trackedPosition.width())
                    t_h = int(trackedPosition.height())

                    t_x_bar = t_x + 0.5 * t_w
                    t_y_bar = t_y + 0.5 * t_h

                    if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and (
                            x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):
                        matchCarID = carID
                # if new car exists
                if matchCarID is None:
                    print('Creating new tracker ' + str(currentCarID))

                    tracker = dlib.correlation_tracker()
                    tracker.start_track(image, dlib.rectangle(x, y, x + w, y + h))

                    carTracker[currentCarID] = tracker
                    carLocation1[currentCarID] = [x, y, x + w, y + h]

                    currentCarID = currentCarID + 1
                    zs=currentCarID-js
                    print(zs)

        # cv2.line(resultImage, line_pass_right[0], line_pass_right[1], (255, 255, 0), 5)
        # cv2.line(resultImage, line_pass_left[0], line_pass_left[1], (255, 255, 0), 5)
        # cv2.line(resultImage, line_down[0], line_down[1], (255, 0, 0), 5)
        #
        # tracking

        for carID in carTracker.keys():
            trackedPosition = carTracker[carID].get_position()

            t_x = int(trackedPosition.left())
            t_y = int(trackedPosition.top())
            t_w = int(trackedPosition.width())
            t_h = int(trackedPosition.height())
            center_x = t_x + t_w//2
            center_y = t_y + t_h//2
            #rects.append((t_x, t_y, t_w, t_h))
            cv2.circle(resultImage, (center_x, center_y),5, (0, 255, 255), 3)
            cv2.rectangle(resultImage, (t_x, t_y), (t_x + t_w, t_y + t_h), (0, 255, 0), 4)
 # ---------------------------------------------------------------------

            # t_x = int(trackedPosition.left())
            # t_y = int(trackedPosition.top())
            # t_w = int(trackedPosition.width())
            # t_h = int(trackedPosition.height())
            #
            # cv2.rectangle(resultImage, (t_x, t_y), (t_x + t_w, t_y + t_h),(0, 255, 0), 4)
# ------------------------------------------------------------------
            """
            if ((line_pass_left[0][0] <= center_x <= line_pass_left[1][0] + 40) and ( line_pass_left[0][1] <= center_y <= line_pass_left[1][1])) or ((line_pass_right[0][0] - 10 <= center_x <= line_pass_right[1][0]) and ( line_pass_right[0][1] <= center_y <= line_pass_right[1][1])):
                counter_right.append(carID)
                cv2.putText(resultImage, "Pedestrian", ( center_x - 10, center_y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
            elif (line_down[0][0] <= center_x <= line_down[1][0]) and (line_down[0][1] - 20 <= center_y <= line_down[0][1] + 20):
                counter_moto.append(carID)
                cv2.putText(resultImage, "moto", (center_x - 10, center_y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 250, 250), 2)
 
            else:
                counter_wrong.append(carID)
                cv2.putText(resultImage, "Pedestrian", (center_x - 10, center_y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
            # speed estimation
            carLocation2[carID] = [t_x, t_y, t_w, t_h]
        info = [
            ("walking in right place", len(set(counter_right))),
            ("moto", len(set(counter_moto))),
 
            ("walking in wrong place", len(set(counter_wrong))),
        ]
        for (i, (k, v)) in enumerate(info):
            text = "{}: {} people".format(k, v)
            cv2.putText(resultImage, text, (670, HEIGHT - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
        """
            carLocation2[carID] = [t_x, t_y, t_w, t_h]
        end_time = time.time()

        if not (end_time == start_time):
            fps = 1.0 / (end_time - start_time)

        # cv2.putText(resultImage, 'FPS: ' + str(int(fps)), (620, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        for i in carLocation1.keys():
            if frameCounter % 1 == 0:
                [x1, y1, w1, h1] = carLocation1[i]
                [x2, y2, w2, h2] = carLocation2[i]

                # print 'previous location: ' + str(carLocation1[i]) + ', current location: ' + str(carLocation2[i])
                carLocation1[i] = [x2, y2, w2, h2]

                # print 'new previous location: ' + str(carLocation1[i])
                if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
                    if (speed[i] == None or speed[i] == 0):
                        speed[i] = estimateSpeed([x1, y1, w1, h1], [x2, y2, w2, h2])



                    # if y1 > 275 and y1 < 285:
                    if (33 <= speed[i] <= 45) and y1 >= 100:
                        cv2.circle(resultImage, (x1 + w1 // 2, y1 + h1 // 2), 4, (0, 255, 255), -1)
                        cv2.putText(resultImage, "P:" + str(int(speed[i])) + " km/hr", (int(x1 + w1 / 2), int(y1 - 5)),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
                        counter_right.append(i)
                        if speed[i] > 45:
                            cv2.circle(resultImage, (x1 + w1 // 2, y1 + h1 // 2), 4, (0, 255, 255), -1)
                            cv2.putText(resultImage, "Moto" + str(int(speed[i])) + "km/hr",
                                        (int(x1 + w1 / 2), int(y1 - 5)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2)
                            counter_moto.append(i)

                info = [
                    ("walking in right place", len(set(counter_right))),
                    ("moto", len(set(counter_moto))),
                    ("tracker",zs)

                ]
                for (i, (k, v)) in enumerate(info):
                    text = "{}: {} people".format(k, v)
                    cv2.putText(resultImage, text, (670, HEIGHT - ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
                # print ('CarID ' + str(i) + ': speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')

                # else:
                #	cv2.putText(resultImage, "Far Object", (int(x1 + w1/2), int(y1)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

                # print ('CarID ' + str(i) + ' Location1: ' + str(carLocation1[i]) + ' Location2: ' + str(carLocation2[i]) + ' speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')

        cv2.imshow('result', resultImage)
        # Write the frame into the file 'output.avi'
        # out.write(resultImage)

        out.write(resultImage)

        if cv2.waitKey(33) == 27:
            break
    # # check to see if we need to release the video writer pointer
    # if out is not None:
    # 	out.release()
    #
    # # release the video file pointer
    # video.release()
    cv2.destroyAllWindows()


if __name__ == '__main__':
    trackMultipleObjects()

借鉴大佬代码Kewei_LI_ECN

  • 2
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值