13. Corner Detection (角点检测)
# coding: utf-8
# In[7]:
#lesson13
# In[18]:
import cv2
import numpy as np
# In[19]:
img = cv2.imread('opencv-corner-detection-sample.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
# In[20]:
corners = cv2.goodFeaturesToTrack(gray,50,0.01,10)
corners = np.int0(corners)
# In[21]:
for corner in corners:
x, y = corner.ravel()
cv2.circle(img,(x,y),5,255,-1)
# In[22]:
cv2.imwrite('lesson13-corner.png',img)
其中
opencv-corner-detection-sample.jpg
lesson13-corner.png
14. feature matching-brute force( 特征匹配-暴力算法)
# coding: utf-8
# In[1]:
#lesson14
# In[3]:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# In[4]:
img1 = cv2.imread('opencv-feature-matching-template.jpg',0)
img2 = cv2.imread('opencv-feature-matching-image.jpg',0)
# In[5]:
orb = cv2.ORB_create()
# In[6]:
kp1,des1 = orb.detectAndCompute(img1,None)
kp2,des2 = orb.detectAndCompute(img2,None)
# In[8]:
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
# In[10]:
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:30],None,flags=2)
cv2.imwrite('lesson14-result1.jpg',img3)
plt.imshow(img3)
plt.show()
# In[11]:
img4 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10],None,flags=2)
cv2.imwrite('lesson14-result2.jpg',img4)
plt.imshow(img4)
plt.show()
其中,输入
opencv-feature-matching-template.jpg
opencv-feature-matching-image.jpg
输出结果
lesson14-result1.jpg
lesson14-result2.jpg
15. MOG background reduction
# coding: utf-8
# In[1]:
#lesson15
# In[13]:
import cv2
import numpy as np
# In[14]:
cap = cv2.VideoCapture('people-walking.mp4')
fgbg = cv2.createBackgroundSubtractorMOG2()
# In[15]:
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('original',frame)
cv2.imshow('fg',fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
结果
16. Haar Cascade Object DetectionFace & Eye
# coding: utf-8
# In[1]:
#lesson16
# In[1]:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# In[ ]:
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
if ret is True:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
continue
faces = face_cascade.detectMultiScale(gray,1.3,5)
for(x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for(ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()