USAGE
python motion_detector.py
python motion_detector.py --video videos/example_01.mp4
#python motion_detector.py --video videos/example_01.mp4
#python motion_detector.py --video Gawain-2066.wmv
import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import time
from imutils import contours
construct the argument parser and parse the qarguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", “–video”, help=“path to the video file”)
ap.add_argument("-a", “–min-area”, type=int, default=500, help=“minimum area size”) #default =500
args = vars(ap.parse_args())
if the video argument is None, then we are reading from webcam
if args.get(“video”, None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(“Gawain-2066.wmv”)
initialize the first frame in the video stream
f = open(‘test1.txt’, ‘w’)
firstFrame = None
vs.set(cv2.CAP_PROP_POS_FRAMES, 800) # 设置要获取的帧号 4848 blank 4860
number =800
a, b = vs.read() # read方法返回一个布尔值和一个视频帧。若帧读取成功,则返回True
frame = imutils.resize(b, width=800)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#gray = cv2.GaussianBlur(gray, (21, 21), 0)
#gray = cv2.dilate(gray, None, iterations=2)
gray = cv2.GaussianBlur(gray, (9, 9), 0)
#gray = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY_INV)[1]
imageArea = gray[250:,250:680]
firstFrame = imageArea#firstFrame = gray[150:,140:570]
cv2.imshow(‘frame’,firstFrame)
cv2.imwrite(‘original.jpg’,firstFrame)
cv2.waitKey(0)
count=1
vs.set(cv2.CAP_PROP_POS_FRAMES, number)
loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame if args.get(“video”, None) is None else frame[1]
text = “Unprinted”
font = (0, 255, 0)
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
time.sleep(10)
frame = imutils.resize(frame, width=800)
frame = frame[250:,250:680]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (9, 9), 0)
gray = cv2.dilate(gray, None, iterations=2)
#ime.sleep()
# if the first frame is None, initialize it
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 20, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cv2.imshow("thresh",thresh)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
(cnts, _) = contours.sort_contours(cnts)
flag = False
number+=1
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
if flag == True:
continue
# compute the bounding box for the contour, draw it on the frame,
flag == True
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Printing"
font = (0, 0, 255)
count = 0
# draw the text and timestamp on the frame
cv2.putText(frame, "Print Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, font, 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
print(text)
f.write(str(number))
f.write('\t')
f.write(text)
f.write('\t')
f.write(str(count))
f.write('\n')
count = 1
show the frame and record if the user presses a key
time.sleep(0.1)
cv2.imshow("Security Feed", frame)
cv2.imshow(“Thresh”, thresh)
cv2.imshow(“Frame Delta”, frameDelta)
key = cv2.waitKey(1) & 0xFF
#print(text)
#print('hello')
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
cleanup the camera and close any open windows
vs.stop() if args.get(“video”, None) is None else vs.release()
cv2.destroyAllWindows()
f.close()