光流法 处理视频

本文整理汇总了Python中cv2.calcOpticalFlowFarneback方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.calcOpticalFlowFarneback方法的具体用法?Python cv2.calcOpticalFlowFarneback怎么用?Python cv2.calcOpticalFlowFarneback使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块cv2的用法示例。

在下文中一共展示了cv2.calcOpticalFlowFarneback方法的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: compute_dense_optical_flow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def compute_dense_optical_flow(prev_image, current_image):
old_shape = current_image.shape
prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
assert current_image.shape == old_shape
hsv = np.zeros_like(prev_image)
hsv[…, 1] = 255
flow = None
flow = cv2.calcOpticalFlowFarneback(prev=prev_image_gray,
next=current_image_gray, flow=flow,
pyr_scale=0.8, levels=15, winsize=5,
iterations=10, poly_n=5, poly_sigma=0,
flags=10)
mag, ang = cv2.cartToPolar(flow[…, 0], flow[…, 1])
hsv[…, 0] = ang * 180 / np.pi / 2
hsv[…, 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
开发者ID:ferreirafabio,项目名称:video2tfrecord,代码行数:20,代码来源:video2tfrecord.py

示例2: compute_opticalflow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def compute_opticalflow(prev_image, cur_image, args):
prev_gray = cv2.cvtColor(prev_image, cv2.COLOR_RGB2GRAY)
cur_gray = cv2.cvtColor(cur_image, cv2.COLOR_RGB2GRAY)
pyr_scale = args.pyr_scale
pyr_levels = args.pyr_levels
winsize = args.winsize
iterations = args.iterations
poly_n = args.poly_n
poly_sigma = args.poly_sigma
flow = cv2.calcOpticalFlowFarneback(prev_gray, cur_gray, flow=None,
pyr_scale=pyr_scale,
levels=pyr_levels,
iterations=iterations,
winsize=winsize,
poly_n=poly_n,
poly_sigma=poly_sigma,
flags=0)
return flow
开发者ID:linjieyangsc,项目名称:video_seg,代码行数:20,代码来源:image_util.py

示例3: run_farneback

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def run_farneback(frames):
try:
return cv2.calcOpticalFlowFarneback(
frames[0], frames[1],
# options, defaults
None, # output
0.5, # pyr_scale, 0.5
10, # levels, 3
min(frames[0].shape[:2]) // 5, # winsize, 15
10, # iterations, 3
7, # poly_n, 5
1.5, # poly_sigma, 1.2
cv2.OPTFLOW_FARNEBACK_GAUSSIAN, # flags, 0
)
except cv2.error:
return None
开发者ID:facebookresearch,项目名称:DetectAndTrack,代码行数:18,代码来源:tracking_engine.py

示例4: denseOpticalFlow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def denseOpticalFlow():
# use 0 for webcam capturing
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(‘test/Pedestrian overpass.mp4’)
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[…,1] = 255
while(1):
ret, frame2 = cap.read()
next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[…,0], flow[…,1])
hsv[…,0] = ang*180/np.pi/2
hsv[…,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
# print(np.sum(mag[100:300, 100:300]))
if (np.sum(mag)> 100000):
print(‘motion detected’)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
cv2.imshow(‘frame2’,bgr)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
elif k == ord(‘s’):
cv2.imwrite(‘opticalfb.png’,frame2)
cv2.imwrite(‘opticalhsv.png’,bgr)
prvs = next
cap.release()
cv2.destroyAllWindows()
开发者ID:sahibdhanjal,项目名称:Mask-RCNN-Pedestrian-Detection,代码行数:38,代码来源:opticalFlow.py

示例5: extract_optical_flow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
cap = cv2.VideoCapture(fn)
n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
outputs = []
if n_frames < frames * 2:
return outputs
def resize(im):
if scale_factor != 1.0:
new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
else:
return im
for t in times:
cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
ret, frame0 = cap.read()
im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
mags = []
middle_frame = frame0
for f in range(frames - 1):
ret, frame1 = cap.read()
if f == frames // 2:
middle_frame = frame1
im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
flow = cv2.calcOpticalFlowFarneback(im0, im1,
None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[…,0], flow[…,1])
mags.append(mag)
im0 = im1
mag = np.sum(mags, 0)
mag = mag.clip(min=0)
norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5)
x = middle_frame[…, ::-1].astype(np.float32) / 255
outputs.append((x, norm_mag))
return outputs
开发者ID:gustavla,项目名称:self-supervision,代码行数:38,代码来源:video_avi_flow_saliency.py

示例6: extract_optical_flow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def extract_optical_flow(fn, n_frames=34):
img = dd.image.load(fn)
if img.shape != (128*34, 128, 3):
return []
frames = np.array_split(img, 34, axis=0)
grayscale_frames = [fr.mean(-1) for fr in frames]
mags = []
skip_frames = np.random.randint(34 - n_frames + 1)
middle_frame = frames[np.random.randint(skip_frames, skip_frames+n_frames)]
im0 = grayscale_frames[skip_frames]
for f in range(1+skip_frames, 1+skip_frames+n_frames-1):
im1 = grayscale_frames[f]
flow = cv2.calcOpticalFlowFarneback(im0, im1,
None, # flow
0.5, # pyr_scale
3, # levels
np.random.randint(3, 20), # winsize
3, #iterations
5, #poly_n
1.2, #poly_sigma
0 # flags
)
mag, ang = cv2.cartToPolar(flow[…,0], flow[…,1])
mags.append(mag)
im0 = im1
mag = np.sum(mags, 0)
mag = mag.clip(min=0)
#norm_mag = np.tanh(mag * 10000)
norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5)
outputs = []
outputs.append((middle_frame, norm_mag))
return outputs
开发者ID:gustavla,项目名称:self-supervision,代码行数:34,代码来源:video_jpeg_rolls_flow_saliency.py

示例7: MFMGetFM

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def MFMGetFM(self, src):
# convert scale
I8U = np.uint8(255 * src)
cv2.waitKey(10)
# calculating optical flows
if self.prev_frame is not None:
farne_pyr_scale = pySaliencyMapDefs.farne_pyr_scale
farne_levels = pySaliencyMapDefs.farne_levels
farne_winsize = pySaliencyMapDefs.farne_winsize
farne_iterations = pySaliencyMapDefs.farne_iterations
farne_poly_n = pySaliencyMapDefs.farne_poly_n
farne_poly_sigma = pySaliencyMapDefs.farne_poly_sigma
farne_flags = pySaliencyMapDefs.farne_flags
flow = cv2.calcOpticalFlowFarneback(
prev=self.prev_frame,
next=I8U,
pyr_scale=farne_pyr_scale,
levels=farne_levels,
winsize=farne_winsize,
iterations=farne_iterations,
poly_n=farne_poly_n,
poly_sigma=farne_poly_sigma,
flags=farne_flags,
flow=None
)
flowx = flow[…, 0]
flowy = flow[…, 1]
else:
flowx = np.zeros(I8U.shape)
flowy = np.zeros(I8U.shape)
# create Gaussian pyramids
dst_x = self.FMGaussianPyrCSD(flowx)
dst_y = self.FMGaussianPyrCSD(flowy)
# update the current frame
self.prev_frame = np.uint8(I8U)
# return
return dst_x, dst_y
# conspicuity maps
# standard range normalization
开发者ID:tyarkoni,项目名称:pliers,代码行数:42,代码来源:pySaliencyMap.py

示例8: optic_flow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def optic_flow(mov, tmpl, nflows):
“”" optic flow computation using farneback “”"
window = int(1 / 0.2) # window size
nframes, Ly, Lx = mov.shape
mov = mov.astype(np.float32)
mov = np.reshape(mov[:int(np.floor(nframes/window)*window),:,:],
(-1,window,Ly,Lx)).mean(axis=1)
mov = mov[np.random.permutation(mov.shape[0])[:min(nflows,mov.shape[0])], :, :]
pyr_scale=.5
levels=3
winsize=100
iterations=15
poly_n=5
poly_sigma=1.2 / 5
flags=0
nframes, Ly, Lx = mov.shape
norms = np.zeros((nframes,))
flows = np.zeros((nframes,Ly,Lx,2))
for n in range(nframes):
flow = cv2.calcOpticalFlowFarneback(
tmpl, mov[n,:,:], None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
flows[n,:,:,:] = flow
norms[n] = ((flow**2).sum()) ** 0.5
return flows, norms
开发者ID:MouseLand,项目名称:suite2p,代码行数:32,代码来源:metrics.py

示例9: get_direction

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def get_direction(self, frame1, frame2, show=False):
frame1 = cv2.resize(frame1, (self.width, self.height))
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
frame2 = cv2.resize(frame2, (self.width, self.height))
frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(frame1[self.height_start:self.height_end],
frame2[self.height_start:self.height_end], None, 0.5, 3, 15, 1, 5, 1.2, 0)
flow_avg = np.median(flow, axis=(0, 1)) # [x, y]
move_x = -1 * flow_avg[0]
move_y = -1 * flow_avg[1]
if show:
hsv = np.zeros((self.height_end - self.height_start, self.width, 3))
hsv[…,1] = 255
mag, ang = cv2.cartToPolar(flow[…, 0], flow[…, 1])
hsv[…, 0] = ang * 180 / np.pi / 2
hsv[…, 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
bgr = cv2.cvtColor(np.array(hsv).astype(np.uint8), cv2.COLOR_HSV2BGR)
cv2.imshow(‘opt_flow’, bgr)
if cv2.waitKey(1) & 0xFF == ord(‘q’):
print(‘User Interrupted’)
exit(1)
return move_x, move_y
开发者ID:YoongiKim,项目名称:Walk-Assistant,代码行数:29,代码来源:opt_flow.py

示例10: _calc_optical_flow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def calc_optical_flow(prev, next):
flow = cv2.calcOpticalFlowFarneback(prev, next_, flow=None, pyr_scale=0.5, levels=3, winsize=15, iterations=3,
poly_n=5, poly_sigma=1.2, flags=0)
return flow
开发者ID:woodfrog,项目名称:ActionRecognition,代码行数:6,代码来源:OF_utils.py

示例11: dense_flow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def dense_flow(image):
global prvs
next = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[…, 0], flow[…, 1])
hsv[…, 0] = ang * 180 / np.pi / 2
hsv[…, 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
prvs = next
return image
开发者ID:charlielito,项目名称:snapchat-filters-opencv,代码行数:15,代码来源:dense_optflow.py

示例12: next

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def next(self, arImage:np.array) -> np.array:
# first?
if self.arPrev.shape == (1,1): return self.first(arImage)
# get image in black&white
arCurrent = cv2.cvtColor(arImage, cv2.COLOR_BGR2GRAY)
if self.sAlgorithm == “tvl1”:
arFlow = self.oTVL1.calc(self.arPrev, arCurrent, None)
elif self.sAlgorithm == “farnback”:
arFlow = cv2.calcOpticalFlowFarneback(self.arPrev, arCurrent, flow=None,
pyr_scale=0.5, levels=1, winsize=15, iterations=2, poly_n=5, poly_sigma=1.1, flags=0)
else: raise ValueError(“Unknown optical flow type”)
# only 2 dims
arFlow = arFlow[:, :, 0:2]
# truncate to +/-15.0, then rescale to [-1.0, 1.0]
arFlow[arFlow > self.fBound] = self.fBound
arFlow[arFlow < -self.fBound] = -self.fBound
arFlow = arFlow / self.fBound
if self.bThirdChannel:
# add third empty channel
arFlow = np.concatenate((arFlow, self.arZeros), axis=2)
self.arPrev = arCurrent
return arFlow
开发者ID:FrederikSchorr,项目名称:sign-language,代码行数:32,代码来源:opticalflow.py

示例13: extract_optical_flow

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
cap = cv2.VideoCapture(fn)
if not cap.isOpened():
return []
n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
outputs = []
if n_frames < frames * 2:
return outputs
def resize(im):
if scale_factor != 1.0:
new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
else:
return im
for t in times:
cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
ret, frame0 = cap.read()
im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
mags = []
middle_frame = frame0
flows = []
for f in range(frames - 1):
ret, frame1 = cap.read()
if f == frames // 2:
middle_frame = frame1
im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
flow = cv2.calcOpticalFlowFarneback(im0, im1,
None,
0.5, # py_scale
8, # levels
int(40 * scale_factor), # winsize
10, # iterations
5, # poly_n
1.1, # poly_sigma
cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
#mag, ang = cv2.cartToPolar(flow[…,0], flow[…,1])
#mags.append(mag)
flows.append(flow)
im0 = im1
flow = (np.mean(flows, 0) / 100).clip(-1, 1)
#flow = np.mean(flows, 0)
#flow /= (flow.mean() * 5 + 1e-5)
#flow = flow.clip(-1, 1)
#flows = flows / (np.mean(flows, 0, keepdims=True) + 1e-5)
x = middle_frame[…, ::-1].astype(np.float32) / 255
outputs.append((x, flow))
return outputs
开发者ID:gustavla,项目名称:self-supervision,代码行数:52,代码来源:video_avi_flow.py

示例14: run_parameter

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def run_parameter(config_item):
prev_img = cv2.imread(config_item[“files”][“prevImg”])
curr_img = cv2.imread(config_item[“files”][“currImg”])
flow_method = config_item[“parameter”][“flow_method”]
estimate_base = config_item[“files”][“estimatepath”] + “/”
if os.path.exists(estimate_base) == False:
os.makedirs(estimate_base)
if os.path.exists(config_item[“files”][“estflow”]):
return
# compute optical flow
if flow_method.find(“dual”) >= 0:
dual_proc = cv2.DualTVL1OpticalFlow_create(config_item[“parameter”][“tau”],
config_item[“parameter”][“lambda”],
config_item[“parameter”][“theta”],
config_item[“parameter”][“nscales”],
config_item[“parameter”][“warps”])
est_flow = np.zeros(shape=(prev_img.shape[0], prev_img.shape[1],2), dtype=np.float32)
dual_proc.calc(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY), cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY), est_flow)
#
elif flow_method.find(“farneback”) >= 0:
est_flow = cv2.calcOpticalFlowFarneback(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY),
None, 0.5, 3, 15, 3, 5, 1.2, 0)
elif flow_method.find(“plk”) >= 0:
prev_pts = list()
for r in range(prev_img.shape[0]):
for c in range(prev_img.shape[1]):
prev_pts.append((c,r))
prev_pts = np.array(prev_pts, dtype=np.float32)
curr_pts, st, err = cv2.calcOpticalFlowPyrLK(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY),
prev_pts, None,
winSize=(21,21), maxLevel=3, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.001))
est_flow = np.zeros(shape=(prev_img.shape[0], prev_img.shape[1],2), dtype=np.float32)
n = 0
flow_pts = curr_pts - prev_pts
for r in range(prev_img.shape[0]):
for c in range(prev_img.shape[1]):
est_flow[r, c, :] = flow_pts[n,:]
n = n + 1
#here alternative optical flow methods can be applied
#
else:
raise ValueError(“flow method has not been implemented”)
ut.writeFlowFile(config_item[“files”][“estflow”], est_flow)
ut.drawFlowField(config_item[“files”][“estflow”][:-3] + “png”, est_flow)
print("Done -> ", config_item[“files”][“estflow”])
开发者ID:tsenst,项目名称:CrowdFlow,代码行数:52,代码来源:opticalflow_estimate.py

示例15: processFrame

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def processFrame(self, frame, distance=None, timestep=1):
‘’’
Processes one image frame, returning summed X,Y flow and frame.
Optional inputs are:
distance - distance in meters to image (focal length) for returning flow in meters per second
timestep - time step in seconds for returning flow in meters per second
‘’’
frame2 = cv2.resize(frame, self.size)
gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
xsum, ysum = 0,0
xvel, yvel = 0,0
flow = None
if not self.prev_gray is None:
flow = cv2.calcOpticalFlowFarneback(self.prev_gray, gray, flow, pyr_scale=0.5, levels=5, winsize=13, iterations=10, poly_n=5, poly_sigma=1.1, flags=0)
for y in range(0, flow.shape[0], self.move_step):
for x in range(0, flow.shape[1], self.move_step):
fx, fy = flow[y, x]
xsum += fx
ysum += fy
cv2.line(frame2, (x,y), (int(x+fx),int(y+fy)), self.mv_color_bgr)
cv2.circle(frame2, (x,y), 1, self.mv_color_bgr, -1)
# Default to system time if no timestep
curr_time = time.time()
if not timestep:
timestep = (curr_time - self.prev_time) if self.prev_time else 1
self.prev_time = curr_time
xvel = self._get_velocity(flow, xsum, flow.shape[1], distance, timestep)
yvel = self._get_velocity(flow, ysum, flow.shape[0], distance, timestep)
self.prev_gray = gray
if self.window_name:
cv2.imshow(self.window_name, frame2)
if cv2.waitKey(1) & 0x000000FF== 27: # ESC
return None
# Return x,y velocities and new image with flow lines
return xvel, yvel, frame2
开发者ID:simondlevy,项目名称:OpenCV-Python-Hacks,代码行数:55,代码来源:init.py

示例16: MFMGetFM

需要导入模块: import cv2 [as 别名]

或者: from cv2 import calcOpticalFlowFarneback [as 别名]

def MFMGetFM(self, src):
# Convert scale
I8U = np.uint8(255 * src)
# cv2.waitKey(10)
# Calculating optical flows
if self.prev_frame is not None:
farne_pyr_scale = pySaliencyMapDefs.farne_pyr_scale
farne_levels = pySaliencyMapDefs.farne_levels
farne_winsize = pySaliencyMapDefs.farne_winsize
farne_iterations = pySaliencyMapDefs.farne_iterations
farne_poly_n = pySaliencyMapDefs.farne_poly_n
farne_poly_sigma = pySaliencyMapDefs.farne_poly_sigma
farne_flags = pySaliencyMapDefs.farne_flags
flow = cv2.calcOpticalFlowFarneback(
prev=self.prev_frame,
next=I8U,
pyr_scale=farne_pyr_scale,
levels=farne_levels,
winsize=farne_winsize,
iterations=farne_iterations,
poly_n=farne_poly_n,
poly_sigma=farne_poly_sigma,
flags=farne_flags,
flow=None
)
flowx = flow[…, 0]
flowy = flow[…, 1]
else:
flowx = np.zeros(I8U.shape)
flowy = np.zeros(I8U.shape)
# Create Gaussian pyramids
dst_x = self.FMGaussianPyrCSD(flowx)
dst_y = self.FMGaussianPyrCSD(flowy)
# Update the current frame
self.prev_frame = np.uint8(I8U)
return dst_x, dst_y
# Conspicuity maps
# Standard range normalization
开发者ID:aalto-ui,项目名称:aim,代码行数:45,代码来源:pySaliencyMap.py

原文链接:
http://www.xinbiancheng.cn/python3/python-method-cv2.calcOpticalFlowFarneback.html

光流法可视化:
https://github.com/sahibdhanjal/Mask-RCNN-Pedestrian-Detection/blob/master/opticalFlow.py

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值