【AI创造营】和我一起使用paddlepaddle来做物理小实验吧!!!

【AI创造营】

快来和我一起使用paddle来做个物理小实验吧!接下来,我们将在paddle的基础上来完成物体运动轨迹的绘制,运动时间的分析,平均速度、加速度的求解。

老师再也不用担心我的学习了!!!

做该项目的心得:其实这是个挺小的项目的,构想的实现过程也不复杂,但实际操作的时候其实还是很有一些出入的,实践起来还是会有一些小小的问题。所以要勤于实验。同时,要注意目标任务的分解,“微分”的这种思想其实还是很重要,很实用的。

【灵感来源及实现思路】

该项目的灵感来源于高中物理实验测量自由落体运动的加速度以及测量小车的加速度。感谢,我上过高中,并选择了理科。

【实现思路:】划重点大佬们略过

我将以文本的形式从“高层到低层”,也就是从构象到实现的过程。过程可是很重要的哦,因为从过程中我们可以学到很多技巧、积累一些经验。在这个浮躁的社会,希望大家能安静下来,好好体会一下生活。好吧,扯远了。

拉回来,如果你是计算机,你会怎么测量物体的加速度???哦?好像说反了…反正,很简单,识别出物体,记录相同时间相邻位移之差,然后再计算。对,没错,就是这样。OK,接下来再深入一下:怎么识别出物体???怎么记录相同时间相邻位移之差???识别物体很简单了,就用paddlehub封装好的的目标检测模型呗!!!而记录相邻位移之差,关键要记录位移与判断相同的时间。回想一下高中物理实验是怎么做的,,是不是隐隐想到了好像有一个打点计时器?对的吧?这个打点计时器就是用来“固定时间”。那我们应该用什么来测量时间呢?当然是——“帧”了!!!

OK,大致思路有了,接下来:请看代码——根据面向对象的思想:

哦豁,——,好了,不扯了,认真点。下面的几个函数的功能都有标注(根据思路,将目标任务分解!!!然后封装成函数!!!),个人认为可读性应该,也不是特别的差,望诸君好好阅读。

另外,总结一下这个小项目(需要改进的地方):加速度过大,无法识别。(可能原因:没有目标跟踪算法。);由于,个人能力有限,算法计算的结果准确度不是特别高且适用场景不是很丰富。(望诸君提出宝贵意见以修正算法。)

#获取视频的总时间以及每帧的时间

import cv2

def get_total_time(vedio_path):
    
    cap=cv2.VideoCapture(vedio_path)
    if cap.isOpened():
        rate = cap.get(5)       #获取帧率
        fraNum = cap.get(7)       #获取帧数
        print(fraNum)
        duration = fraNum/rate
        total_second = 0
        total_second += duration
        cost_eachfra = total_second / fraNum
        return total_second, cost_eachfra, fraNum
    else:
        print('---can not open the vedio_path----')

total_second, cost_eachfra, fraNum = get_total_time('VID_20210307_154439.mp4')
print(total_second, cost_eachfra, fraNum)
93.0
3.1059777777777775 0.033397610513739545 93.0
#获取物体的中心点

def get_center_point(result, input_object):
    img_object_h = 0
    center_points = []
    for dictionary in result:
        data = dictionary['data']
        for each in data:
            if each['label'] == input_object:
                center_w = (each['right'] - each['left']) / 2 + each['left']
                center_h = (each['bottom'] - each['top']) / 2 + each['top']
                img_object_h = each['bottom'] - each['top']
                center_point = (center_w, center_h)
                center_points.append(center_point)
    return center_points, img_object_h
#小知识点test
point = (5.66, 666.0)
x, y = point
x = int(x)
y = int(y)
point = (x, y)
print(point)
(5, 666)
#物体运动分析图像
import cv2
import matplotlib.pylab as plt
import numpy as np

def analysis_movement(img_size, points):
    img = np.zeros(img_size)
    #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    for point in points:
        x, y = point
        x = int(x)
        y = int(y)
        point = (x, y)
        cv2.circle(img, point, 2, (0, 255, 0))
    #cv2.putText(img, "aver_a= %.5f" % (aver_accelebration), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
    #cv2.putText(img, "aver_v= %.5f" % (aver_speed), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
    return img

#测试函数  
img = analysis_movement(( 480, 680, 3), [(100, 2), (100, 3), (100, 4), (100, 5)])
plt.imshow(img)

这张图片为测试函数的图片

#测试目标检测模型
import paddlehub as hub
import cv2
import matplotlib.pyplot as plt

object_detector = hub.Module(name="yolov3_resnet34_coco2017")
img = cv2.imread('image/IMG_20210307_154944.jpg')
print(img.shape)
plt.imshow(img)
plt.show()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
result = object_detector.object_detection(images=[img])
center_point, img_object_h = get_center_point(result, 'bottle')
print(center_point)
print('-----分割线-----')
print(result)
[2021-03-07 15:52:49,863] [    INFO] - Installing yolov3_resnet34_coco2017 module
[2021-03-07 15:52:49,929] [    INFO] - Module yolov3_resnet34_coco2017 already installed in /home/aistudio/.paddlehub/modules/yolov3_resnet34_coco2017


(4096, 3072, 3)

测试图片!

[]
-----分割线-----
[{'data': [{'label': 'person', 'confidence': 0.614029049873352, 'left': 1317.4725341796875, 'top': 19.609725952148438, 'right': 1656.1409912109375, 'bottom': 447.143798828125}, {'label': 'cup', 'confidence': 0.9585285782814026, 'left': 1183.792236328125, 'top': 1196.179443359375, 'right': 1654.283447265625, 'bottom': 1846.680908203125}, {'label': 'laptop', 'confidence': 0.9871315360069275, 'left': 1817.885009765625, 'top': 315.3946533203125, 'right': 3061.878173828125, 'bottom': 2501.64794921875}], 'save_path': 'detection_result/image_numpy_0.jpg'}]
#vidio test
import cv2

vedio_path = 'VID_20210307_154439.mp4'
input_object = 'bottle'
cap = cv2.VideoCapture(vedio_path)

while 1:
    ret, img = cap.read()
    
    if ret == True:
        img_size = img.shape
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.GaussianBlur(img, (5, 5), 1)
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        result = object_detector.object_detection(images=[img])
        center_points, img_object_h = get_center_point(result, input_object)
        print(center_points)
    else:
enter_points)
    else:
        continue
#处理center_points获得加速度
def processing_data(center_points, cost_eachfra, variable_distance, rate): #variable_distance位移变化量

    recall_change_pixel = [] #记录改变的pixel
    recall_changed = []
    accelebration_list = []
    sum_accelebration = 0
    changed_move_time = cost_eachfra * variable_distance
    len_points = len(center_points)
    print(len_points)
    times = int(len_points / variable_distance)
    new_len = times * variable_distance
    print(new_len)
    if len_points != new_len:
        for i in range(new_len, len_points):
            center_points.pop()
    else:
        pass
    flag = 0
    print('center_points:')
    print(center_points)
    for point in center_points:
        if flag == 0:
            x, center_point_s = point
        flag += 1
        if flag == variable_distance:
            x_, center_point_f = point
            a_team = center_point_s -  center_point_f
            recall_change_pixel.append(a_team)
            flag = 0
    print('recall_change_pixel is:{}'.format(recall_change_pixel))
    lag = 0
    for change_pixel in recall_change_pixel:
        if lag == 0:
            distance_b = change_pixel
        lag = lag + 1
        if lag == 2:
            distance_a = change_pixel
            team_a = distance_b - distance_a
            recall_changed.append(team_a)
            lag = 0
    print('recall_changed is:{}'.format(recall_changed))
    for recall in recall_changed:
        accelebration = (recall * rate) / (changed_move_time * changed_move_time)
        accelebration_list.append(accelebration)
    print('accelebration_list is {}:'.format(accelebration_list))
    for accelebrate in accelebration_list:
        sum_accelebration += accelebrate
    aver_accelebration = sum_accelebration / len(accelebration_list)

    return aver_accelebration
len_object = input('please input object length:')
print('\n')
print(type(len_object))
len_object = float(len_object)
print('\n')
print(len_object)
please input object length:

<class 'str'>


0.095
import paddlehub as hub
import cv2

#len_object = input('please input object length:')
#len_object = float(len_object)
len_object = 0.045
#variable_distance = input('please input variable_distance:')  #40
#variable_distance = int(variable_distance)
variable_distance = 10
#vedio_path = input('please input vedio_path:')
vedio_path = 'VID_20210307_154439.mp4'
#input_object = input('please input the object name:') #'bottle'
input_object = 'bottle'

object_detector = hub.Module(name = 'yolov3_resnet34_coco2017')
cap = cv2.VideoCapture(vedio_path)
#获取视频的时间和每帧的时间
print('获取视频的时间和每帧的时间')
total_second, cost_eachfra, fraNum = get_total_time(vedio_path)

location = []
recall_points = []
recall_points_new = []
move_fra = 0
img_size = (224, 224, 3)
rate = 0
count = 0

while 1:
    ret, img = cap.read()
    count += 1
    if ret == True:
        img_size = img.shape
        result = object_detector.object_detection(images=[img])
        center_points, img_object_h = get_center_point(result, input_object) #获得中心点
        if move_fra == 2:
            rate = len_object / img_object_h
        location.append(center_points)
        print('location is:{}'.format(location))
        print('recall_points is :{}'.format(recall_points))
        if len(location) == 2:
            if location[0] != location[1]:
                move_fra += 1
                recall_points.append(location[0])
                recall_points.append(center_points)
                location.remove(location[0])
                #move
            else:
                location.remove(location[0])
                #print("---object is not move---")
        else:
            #print('---location is not enough---')
            continue
    else:
        #print('---not read the vedio---')
        continue
    print('正在处理第{}帧......'.format(count))
    if count == fraNum:
        break
#图像大小:
print('图像大小:')
print(img_size)
#运动时间的计算
print('计算运动时间')
move_time = move_fra * cost_eachfra
#求平均速度
print('求平均速度')
all_ = len(recall_points)
x, y = recall_points[0][0]
x_, y_ = recall_points[all_ - 1][0]
aver_speed = ((y_ - y) * rate) / move_time
#重写recall_points
for point in recall_points:
    recall_points_new.append(point[0])
#获得加速度
print('获得加速度')
aver_accelebration = processing_data(recall_points_new, cost_eachfra, variable_distance, rate)
#分析物体运动
print('分析物体的运动')
move_img = analysis_movement(img_size, recall_points_new)
plt.imshow(move_img)
print(' aver_speed is :{} m/s \n aver_acecelebration is :{} m/s*2\n total_move_time is :{} s \n'.format(aver_speed, aver_accelebration, move_time))

测试结果:

运动轨迹及相关数据
视频中的检测结果:
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值