读python下opencv的一点点感受(第一篇文章)

#!/usr/bin/env python
# Target track program
import cv
import cv2


class Target:


    def __init__(self, vfile):
        #self.capture = cv.CaptureFromCAM(0)
        # please change the name of the video here:
        #self.capture = cv.CaptureFromFile('SH-3802-3-20150211120000.mp4')
        self.capture = cv.CaptureFromFile(vfile)
        cv.NamedWindow("Target", 1)


    def run(self, outputfold):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)


        first = True
        kn = 0
        while True:
            kn=kn+1
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]


            color_image = cv.QueryFrame(self.capture)
            if color_image==None:
                break


            if kn%25 != 0:
                continue


            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)


            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)


            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)


            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)


            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)


            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)


            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)


            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []




            ki = 0
            while contour:
                ki=ki+1
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()


                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])

                points.append(pt1)
                points.append(pt2)




                if kn%25 == 0 and bound_rect[2] > 40 and bound_rect[3] > 40:
                    #cv2.imwrite("out/"+str(kn)+".jpg",frame)
                    print pt1,pt2,pt2[0]-pt1[0],pt2[1]-pt1[1],pt1[1]
                    #print pt1[0]>840 and pt1[0]<1100 and pt1[1]<60
                    if pt1[0]>840 and pt1[0]<1100 and pt1[1]<80 and pt1[1]>55 and pt2[0]>890 and pt2[0]<1140 and pt2[1]>100 and pt2[1]<120:
                        print "find number"
                    else:
                        crop_img = color_image[pt1[1]:pt2[1], pt1[0]:pt2[0]]
                        #crop_img = color_image[200:400, 100:300]
                        # please change the image out put location here:
                        cv.SaveImage(outputfold+str(kn)+"_"+str(ki)+".jpg",crop_img)


                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(0,255,0), 1)


            if len(points):
                center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)


            cv.ShowImage("Target", color_image)


            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break


if __name__=="__main__":
    t = Target("../img/SH1.mp4")

    t.run("../outobj/")



我还没弄明白怎么比较简洁的贴代码,先全复制上来吧。主要是标注成红色的三段,

第一段,关于这个cv.FindContours的返回值,opencv doc 上 python中 FindContours的返回值就是 contour

  这个contour 是一种数据类型(我估计是这样的)。

然后接下来,bound_rect = cv.BoundingRect(list(contour))

然后把list(contour)作为实参传到cv.BoundingRect 里,通过定义知道,cvBoundingRect计算点集的最外面(up-right)矩形边界。


update=0, contour ~ CvContour*: 不计算矩形边界,但直接由轮廓头的 rect 域得到。 


怎么理解上一句话呢。从                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])  
中 可以看出,

bound_rect[],这个数组中,有四个元素是否就是对应着x,y,width,height.参照如下

cvPoint( rect.x, rect.y ), cvPoint( rect.x + rect.width, rect.y + rect.height )

然后我在逻辑上进行了修改,结果验证了 自己的想法。基本上以后这种就能掌握了。



第一次写这种blog,平时生活中的blog都不写,真是够了。但是还是希望把自己的感想,自己的一些对未知知识的认识表达出来吧。希望大家多多指教


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值