OpenCV学习

OpenCV-代码简单实践

import cv2
print("Package imported")

img = cv2.imread("img/test.jpg")

#第一个参数是窗口名称,第二个参数是图像参数
cv2.imshow("output",img)

#0表示无限延长 其他以毫秒为单位
cv2.waitKey(0)
import cv2

cap = cv2.VideoCapture("img/20210318.mp4")

while True:
    success, img = cap.read()
    cv2.imshow("Vidio",img)

    #cv2.waitkey返回当前按下的键的unicode值 如果没有按键始终返回-1 二进制中为0xFFFF
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
import cv2

cap = cv2.VideoCapture(0)

#3 4为长宽 如果第一个参数是10 就是设置亮度
cap.set(3,640)
cap.set(4,640)

while True:
    success, img = cap.read()
    cv2.imshow("Vidio",img)

    #cv2.waitkey返回当前按下的键的unicode值 如果没有按键始终返回-1 二进制中为0xFFFF
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
import cv2
import numpy as np

img = cv2.imread("img/test.jpg")

#创建unit类型的全是1的矩阵
kernel = np.ones((5,5),np.uint8)

#将图像转换成灰度值
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

#进行模糊化处理 第二个参数指处理的边框范围 第三个参数是高斯内核X方向上的值
imgBlur = cv2.GaussianBlur(imgGray,(9,9),10)

#是对图像进行边缘处理,二三参数表示一个边界选择范围 数值越小线越多
imgCanny = cv2.Canny(img, 100, 200)

#第二个参数是内核参数,iterations表示迭代次数,次数决定这线的厚度
imgDialation = cv2.dilate(imgCanny,kernel,iterations=2)

#与Dialation相反,也就是把线变细
imgEroded = cv2.erode(imgDialation,kernel,iterations=2)

cv2.imshow("Gray Image", imgGray)
cv2.imshow("Blur Image", imgBlur)
cv2.imshow("Canny1 Image", imgCanny)
cv2.imshow("Dialation Image", imgDialation)
cv2.imshow("Eroded Image", imgEroded)
cv2.waitKey(0)
import cv2
import numpy as np

img = cv2.imread("img/test.jpg")
print(img.shape)

#调整图像的大小
imgResize = cv2.resize(img,(300,200))

#进行图像的剪切 图像只有长宽不能裁剪RGB
imgCropped = img[0:500,0:500]

cv2.imshow("img",img)
cv2.imshow("imgResize",imgResize)
cv2.imshow("imgCropped",imgCropped)

cv2.waitKey(0)
import cv2
import numpy as np

#设置三层矩阵,模拟RGB图像
img = np.zeros((512,512,3),np.uint8)
print(img)

#在这个范围内,把第一层设置成255,第二层第三层设置成0
#img[200:300,100:300] = 255,0,0

#在图像里面设置一条线,从第二个参数点到第三个参数点,以第四个参数的颜色,厚度为第五个参数
#坐标系从左上角开始,坐标参数,第一个参数是横轴,第二个参数是高度,和img.shape返回的相反
cv2.line(img,(0,0),(300,300),(0,255,0),3)

#在图像中创建矩形,第二三个参数是对角点的坐标,如果最后一个参数是cv2.FILLED则在矩阵内进行颜色填充
cv2.rectangle(img,(0,0),(250,350),(0,0,255),2)

#圆形
cv2.circle(img,(400,50),30,(255,0,0),5)

#放置文字 第四个参数表示文字类型 第五个参数表示字体大小
cv2.putText(img,"OPENCV",(300,100),cv2.FONT_HERSHEY_COMPLEX,1,(0,150,0),1)

cv2.imshow("Image",img)

cv2.waitKey(0)
import cv2
import numpy as np

img = cv2.imread("img/test.jpg")

width,height = 250,350
pts1 = np.float32([[111,219],[287,118],[154,482],[352,440]])
pts2 = np.float32([[0,0],[width,0],[0,height],[width,height]])

#pts1的四个点的坐标,对应到pts2的四个点坐标中去
matrix = cv2.getPerspectiveTransform(pts1,pts2)

#第三个参数是图像框的大小,一般和图片大小一致
imgOutput = cv2.warpPerspective(img,matrix,(width,height))

cv2.imshow("Image",img)
cv2.imshow("ImgOutput",imgOutput)

cv2.waitKey(0)
import cv2
import numpy as np

img = cv2.imread("img/test.jpg")

#利用numpy库对矩阵进行水平拓展
imghor = np.hstack((img,img))

#进行垂直拓展
imgVer = np.vstack((img,img))

cv2.imshow("Imahor",imghor)
cv2.imshow("ImaVer",imgVer)

cv2.waitKey(0)
import cv2
import numpy as np

def empty(a):
    pass

#创建一个cv2的新窗口,命名为TrackBars
cv2.namedWindow("TrackBars")

#定义窗口大学
cv2.resizeWindow("TrackBars",640,240)

#定义一个跟踪条,第一个参数为属性名,第二个参数是添加的窗口,三四窗口是初始值和最大值,最后一个是移动后触发的函数
cv2.createTrackbar("Hue Min","TrackBars",0,179,empty)
cv2.createTrackbar("Hue Max","TrackBars",179,179,empty)
cv2.createTrackbar("Sat Min","TrackBars",0,255,empty)
cv2.createTrackbar("Sat Max","TrackBars",255,255,empty)
cv2.createTrackbar("Val Min","TrackBars",0,255,empty)
cv2.createTrackbar("Val Max","TrackBars",255,255,empty)

while True:

    img = cv2.imread("img/test.jpg")

    #和转换成灰度图一样,不过这里被转换成HSV模型
    imgHSV = cv2.cvtColor(img,cv2.COLOR_HSV2BGR)

    #获取第二个参数指定窗口中的第一个参数名的属性值
    h_min = cv2.getTrackbarPos("Hue Min","TrackBars")
    h_max = cv2.getTrackbarPos("Hue Max","TrackBars")
    s_min = cv2.getTrackbarPos("Sat Min","TrackBars")
    s_max = cv2.getTrackbarPos("Sat Max","TrackBars")
    v_min = cv2.getTrackbarPos("Val Min","TrackBars")
    v_max = cv2.getTrackbarPos("Val Max","TrackBars")

    #筛选HSV模型的图像颜色范围
    lower = np.array([h_min,s_min,v_min])
    upper = np.array([h_max,s_max,v_max])
    mask = cv2.inRange(imgHSV,lower,upper)

    #将在mask图像中白色区域在img中提取出来
    imgResult = cv2.bitwise_and(img,img,mask=mask)

    cv2.imshow("Image",img)
    cv2.imshow("ImgHSV",imgHSV)
    cv2.imshow("Mask",mask)
    cv2.imshow("imgResult",imgResult)

    cv2.waitKey(1)
import cv2
import numpy as np

#返回一个组合后的图像,图像来自与imgArray,scale表示比例
def stackImages(scale,imgArray):
    rows = len(imgArray)
    cols = len(imgArray[0])
    rowsAvailable = isinstance(imgArray[0], list)
    width = imgArray[0][0].shape[1]
    height = imgArray[0][0].shape[0]
    if rowsAvailable:
        for x in range ( 0, rows):
            for y in range(0, cols):
                if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
                    imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
                else:
                    imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
                if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
        imageBlank = np.zeros((height, width, 3), np.uint8)
        hor = [imageBlank]*rows
        hor_con = [imageBlank]*rows
        for x in range(0, rows):
            hor[x] = np.hstack(imgArray[x])
        ver = np.vstack(hor)
    else:
        for x in range(0, rows):
            if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
                imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
            else:
                imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
            if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
        hor= np.hstack(imgArray)
        ver = hor
    return ver

# 把各种几何图形框起来,并且进行识别,第一个是原图像,第二个是目标图像
def getCountours(img):
    #该函数用于寻找轮廓,第二个参数是轮廓的检索模式,这里是检测外边框轮,轮廓结果保存在countours中
    #详见https://blog.csdn.net/hjxu2016/article/details/77833336/
    countours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    for cnt in countours:
        #获取轮廓线包括起来的面积
        area = cv2.contourArea(cnt)
        print(area)
        #绘制等高线,第一个参数表示绘制目的地,第二个参数是轮廓线,第三个参数-1表示绘制所有的轮廓线,第五个参数表示线的厚度
        if area > 500:
            cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
            # 计算每一个轮廓的长度,第二个参数True表示计算封闭轮廓的
            peri = cv2.arcLength(cnt,True)
            # print(peri)

            # 找寻轮廓的折点 通过折点的个数可以判断形状,0是圆形,3是三角形,返回的是每个cnt轮廓下折点的坐标
            approx= cv2.approxPolyDP(cnt,0.02*peri,True)
            print(approx)
            objCor = len(approx)

            # 返回折点集合最左上角的那个折点坐标,以及包含整个折点的宽和高 这个的参数也可以是cnt
            x, y, w, h = cv2.boundingRect(approx)

            ObjeactType = "None"
            if objCor == 3:
                ObjeactType = "Tri"

            # 绘制矩形
            cv2.rectangle(imgContour,(x,y),(x+w,y+h),(0,255,0),2)
            cv2.putText(imgContour,ObjeactType,
                        (x+(w//2)-10,y+(h//2)-10),cv2.FONT_HERSHEY_COMPLEX,1,
                        (0,255,0),1)


path = "img/test.jpg"
img = cv2.imread(path)
imgContour = img.copy()

imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray,(7,7),1)
imgCanny = cv2.Canny(imgBlur,50,50)
getCountours(imgCanny)

imgBlack = np.zeros_like(img)
imgStack = stackImages(0.6,([img,imgGray,imgBlur],
                            [imgCanny,imgContour,imgBlack]))

cv2.imshow("imgStack",imgStack)

cv2.waitKey(0)
import cv2

# 识别人脸,添加级联分类
faceCascade = cv2.CascadeClassifier("Resources/haarcascade_frontalface_default.xml")
img = cv2.imread("Resources/lena.png")

imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

# 1.1表示把检测到的结果范围扩大1.1倍,4表示目标相邻矩阵的最小个数
# https://www.cnblogs.com/lyx2018/p/7073025.html
faces = faceCascade.detectMultiScale(imgGray,1.1,4)

for (x,y,w,h) in faces:
    cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)

cv2.imshow("img",img)
cv2.waitKey(0)

import cv2
import numpy as np
import time


##################
widthImg = 640
heightImg = 480
#######################
frameWidth = 640
frameHeight = 480


cap = cv2.VideoCapture(0)
cap.set(3,frameWidth)
cap.set(4,frameHeight)
cap.set(10,150)


def perProcessing(img):
    imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
    imgCanny = cv2.Canny(imgBlur,200,200)
    kernel = np.ones((5,5))
    imgDial = cv2.dilate(imgCanny,kernel,iterations=2)
    imgThres = cv2.erode(imgDial,kernel,iterations=1)

    return imgThres


def getCountours(img):
    biggest = np.array([])
    maxArea = 0
    contours, hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area>5000:
            # cv2.drawContours(imgCount,cnt,-1,(255,0,0,3))
            peri = cv2.arcLength(cnt,True)
            # 用于找寻挂点的
            approx = cv2.approxPolyDP(cnt,0.02*peri,True)
            if area > maxArea and len(approx) == 4:
                biggest = approx
                maxArea = area
    cv2.drawContours(img, biggest, -1, (255, 0, 0), 20)
    return biggest


def reorder(myPoints):
    myPoints = myPoints.reshape((4, 2))
    myPointsNew = np.zeros((4, 1, 2), np.int32)
    add =myPoints.sum(1)
    # print("add",add)

    myPointsNew[0] = myPoints[np.argmin(add)]
    myPointsNew[3] = myPoints[np.argmax(add)]
    # print("NewPoints",myPointsNew)
    diff = np.diff(myPoints,axis=1)
    myPointsNew[1] = myPoints[np.argmin(diff)]
    myPointsNew[2] = myPoints[np.argmax(diff)]
    return myPointsNew


def getWarp(img, biggest):
    biggest = reorder(biggest)
    print(biggest)
    pts1 = np.float32(biggest)
    pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])
    matrix = cv2.getPerspectiveTransform(pts1, pts2)
    imgOutPut = cv2.warpPerspective(img, matrix, (widthImg, heightImg))

    imgCropped = imgOutPut[20:imgOutPut.shape[0]-20, 20:imgOutPut.shape[1]-20]
    imgCropped = cv2.resize(imgCropped,(widthImg,heightImg))

    return imgCropped


# img = cv2.imread("Resources/paper.jpg")
while True:
    success,img = cap.read()
    cv2.resize(img, (widthImg, heightImg))
    imgCount = img.copy()

    imgThres = perProcessing(img)
    biggest = getCountours(imgThres)
    if biggest.size != 0:
        imgWarped = getWarp(img, biggest)

    cv2.imshow("result",imgWarped)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
import cv2
import numpy as np
########################################
nPlateCascade = cv2.CascadeClassifier("Resources/haarcascade_russian_plate_number.xml")
minArea = 500
color = (255, 0, 255)
########################################
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
cap.set(10,130)
count = 0

while True:
    success, img = cap.read()
    imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    numberPlates = nPlateCascade.detectMultiScale(imgGray, 1.1 ,4)
    for (x, y, w, h) in numberPlates:
        area = w*h
        if area > minArea:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            cv2.putText(img, "Number Plate", (x, y-5),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 2)
            imgRoi = img[y:y+h, x:x+w]
            cv2.imshow("ROI",imgRoi)

    cv2.imshow("Result",img)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        # 写入图像到第一个参数中去
        cv2.imwrite("Resources/Scanned/NoPlate_",str(count) + ".jpg",imgRoi)
        cv2.rectangle(img, (0, 200), (640, 300), (0, 255, 0), cv2.FILLED)
        cv2.putText(img, "Scan Saved", (150, 265), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                    2, (0, 0, 255), 2)
        cv2.imshow("Result", img)
        cv2.waitKey(500)
        count += 1
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值