yolo8简单代码

import torch
print(torch.__version__)
print('CUDA available: ' + str(torch.cuda.is_available()))
print('cuDNN version: ' + str(torch.backends.cudnn.version()))
a = torch.cuda.FloatTensor(2).zero_()
print('Tensor a = ' + str(a))
b = torch.randn(2).cuda()
print('Tensor b = ' + str(b))
c = a + b
print('Tensor c = ' + str(c))
import torchvision
print(torchvision.__version__)

xk@ubuntu:~/Desktop/code$ python3 test_cuda_code.py
2.1.2
CUDA available: False
cuDNN version: None
Traceback (most recent call last):
  File "test_cuda_code.py", line 5, in <module>
    a = torch.cuda.FloatTensor(2).zero_()
TypeError: type torch.cuda.FloatTensor not available. Torch not compiled with CUDA enabled.

import os
import requests
import cv2
from ultralytics import YOLO

# 创建目录来存储下载的图像和识别结果
os.makedirs('images', exist_ok=True)
os.makedirs('results', exist_ok=True)

# COCO数据集示例图像URL
image_urls = [
    'http://images.cocodataset.org/val2017/000000039769.jpg',
    'http://images.cocodataset.org/val2017/000000001268.jpg',
    'http://images.cocodataset.org/val2017/000000005357.jpg',
    'http://images.cocodataset.org/val2017/000000009584.jpg',
    'http://images.cocodataset.org/val2017/000000029203.jpg',
    'http://images.cocodataset.org/val2017/000000033281.jpg',
    'http://images.cocodataset.org/val2017/000000036777.jpg',
    'http://images.cocodataset.org/val2017/000000046983.jpg',
    'http://images.cocodataset.org/val2017/000000057870.jpg',
    'http://images.cocodataset.org/val2017/000000062808.jpg'
]

# 下载图像
for i, url in enumerate(image_urls):
    response = requests.get(url)
    with open(f'images/image_{i}.jpg', 'wb') as f:
        f.write(response.content)

print("Images downloaded.")

# 加载YOLOv8模型
model = YOLO('yolov8n.pt')  # 使用预训练的YOLOv8n模型

# 识别图像并保存结果
for i in range(10):
    img_path = f'images/image_{i}.jpg'
    result = model(img_path)[0]
    result_plotted = result.plot()  # 绘制识别结果

    # 保存带有识别结果的图像
    result_img_path = f'results/result_{i}.jpg'
    cv2.imwrite(result_img_path, result_plotted)
    print(f'Result saved to {result_img_path}')

print("Recognition and saving completed.")

import os
import requests
import zipfile
import cv2
from ultralytics import YOLO

# 创建目录来存储下载的图像和识别结果
os.makedirs('coco', exist_ok=True)
os.makedirs('results', exist_ok=True)

# COCO数据集URL
coco_url = 'http://images.cocodataset.org/zips/val2017.zip'

# 下载COCO数据集
coco_zip_path = 'coco/val2017.zip'
if not os.path.exists(coco_zip_path):
    print("Downloading COCO dataset...")
    response = requests.get(coco_url)
    with open(coco_zip_path, 'wb') as f:
        f.write(response.content)
    print("Download completed.")

# 解压COCO数据集
with zipfile.ZipFile(coco_zip_path, 'r') as zip_ref:
    zip_ref.extractall('coco')
print("COCO dataset extracted.")

# 加载YOLOv8模型
model = YOLO('yolov8n.pt')  # 使用预训练的YOLOv8n模型

# 获取所有图片文件
image_dir = 'coco/val2017'
image_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.jpg')]

# 识别图像并保存结果
for i, img_path in enumerate(image_files):
    result = model(img_path)[0]
    result_plotted = result.plot()  # 绘制识别结果

    # 保存带有识别结果的图像
    result_img_path = f'results/result_{i}.jpg'
    cv2.imwrite(result_img_path, result_plotted)
    print(f'Result saved to {result_img_path}')

print("Recognition and saving completed.")
 

#gpu版本

import os
import requests
import zipfile
import cv2
import torch
from ultralytics import YOLO

# 创建目录来存储下载的图像和识别结果
os.makedirs('coco', exist_ok=True)
os.makedirs('results', exist_ok=True)

# COCO数据集URL
coco_url = 'http://images.cocodataset.org/zips/val2017.zip'

# 下载COCO数据集
coco_zip_path = 'coco/val2017.zip'
if not os.path.exists(coco_zip_path):
    print("Downloading COCO dataset...")
    response = requests.get(coco_url)
    with open(coco_zip_path, 'wb') as f:
        f.write(response.content)
    print("Download completed.")

# 解压COCO数据集
with zipfile.ZipFile(coco_zip_path, 'r') as zip_ref:
    zip_ref.extractall('coco')
print("COCO dataset extracted.")

# 加载YOLOv8模型
model = YOLO('yolov8n.pt')  # 使用预训练的YOLOv8n模型

# 检查CUDA是否可用,并设置设备
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)

# 获取所有图片文件
image_dir = 'coco/val2017'
image_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.jpg')]

# 识别图像并保存结果
for i, img_path in enumerate(image_files):
    print(f'Processing {img_path}...')
    result = model(img_path)[0]
    result_plotted = result.plot()  # 绘制识别结果

    # 保存带有识别结果的图像
    result_img_path = f'results/result_{i}.jpg'
    cv2.imwrite(result_img_path, result_plotted)
    print(f'Result saved to {result_img_path}')

print("Recognition and saving completed.")
 

#完全GPU

import os
import requests
import zipfile
import cv2
import torch
from ultralytics import YOLO

# 创建目录来存储下载的图像和识别结果
os.makedirs('coco', exist_ok=True)
os.makedirs('results', exist_ok=True)

# COCO数据集URL
coco_url = 'http://images.cocodataset.org/zips/val2017.zip'

# 下载COCO数据集
coco_zip_path = 'coco/val2017.zip'
if not os.path.exists(coco_zip_path):
    print("Downloading COCO dataset...")
    response = requests.get(coco_url)
    with open(coco_zip_path, 'wb') as f:
        f.write(response.content)
    print("Download completed.")

# 解压COCO数据集
with zipfile.ZipFile(coco_zip_path, 'r') as zip_ref:
    zip_ref.extractall('coco')
print("COCO dataset extracted.")

# 加载YOLOv8模型
model = YOLO('yolov8n.pt')  # 使用预训练的YOLOv8n模型

# 检查CUDA是否可用,并设置设备
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)

# 获取所有图片文件
image_dir = 'coco/val2017'
image_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.jpg')]

# 识别图像并保存结果
for i, img_path in enumerate(image_files):
    print(f'Processing {img_path}...')
    
    # 读取图像并调整大小
    img = cv2.imread(img_path)
    resized_img = cv2.resize(img, (640, 640))  # 调整图像大小为640x640
    img_tensor = torch.from_numpy(resized_img).to(device).float()
    
    # 调整张量形状为适合模型输入
    img_tensor = img_tensor.permute(2, 0, 1).unsqueeze(0)
    
    # 进行推理
    result = model(img_tensor)[0]
    result_plotted = result.plot()  # 绘制识别结果

    # 保存带有识别结果的图像
    result_img_path = f'results/result_{i}.jpg'
    cv2.imwrite(result_img_path, result_plotted)
    print(f'Result saved to {result_img_path}')

print("Recognition and saving completed.")

# choose one rtmpose model
mim download mmpose --config rtmpose-m_8xb64-270e_coco-wholebody-256x192 --dest .

git clone https://github.com/open-mmlab/mmdeploy.git

python3 mmdeploy/tools/deploy.py \
    mmdeploy/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py \
    mmpose/rtmpose-m_8xb64-270e_coco-wholebody-256x192.py \
    mmpose/rtmpose-m_simcc-coco-wholebody_pt-aic-coco_270e-256x192-cd5e845c_20230123.pth \
    mmdeploy/demo/resources/human-pose.jpg \
    --work-dir mmdeploy_model/mmpose/ort \
    --device cuda \
    --dump-info

pip3 install -r requirements.txt

python main.py \
    {ONNX_FILE} \
    {IMAGE_FILE} \
    --device {DEVICE} \
    --save-path {SAVE_PATH}

import cv2

from rtmlib import Wholebody, draw_skeleton

device = 'cpu'  # cpu, cuda, mps
backend = 'onnxruntime'  # opencv, onnxruntime, openvino
img = cv2.imread('./demo.jpg')

openpose_skeleton = False  # True for openpose-style, False for mmpose-style

wholebody = Wholebody(to_openpose=openpose_skeleton,
                      mode='balanced',  # 'performance', 'lightweight', 'balanced'. Default: 'balanced'
                      backend=backend, device=device)

keypoints, scores = wholebody(img)

# visualize

# if you want to use black background instead of original image,
# img_show = np.zeros(img_show.shape, dtype=np.uint8)

img_show = draw_skeleton(img_show, keypoints, scores, kpt_thr=0.5)


cv2.imshow('img', img_show)
cv2.waitKey()

#rtmlib video

import cv2
import numpy as np
import time
from rtmlib import Wholebody, draw_skeleton

device = 'cpu'  # cpu, cuda, mps
backend = 'onnxruntime'  # opencv, onnxruntime, openvino

openpose_skeleton = False  # True for openpose-style, False for mmpose-style

wholebody = Wholebody(to_openpose=openpose_skeleton,
                      mode='balanced',  # 'performance', 'lightweight', 'balanced'. Default: 'balanced'
                      backend=backend, device=device)

# 打开视频文件
video_path = 'input_video.mp4'  # 输入视频文件路径
cap = cv2.VideoCapture(video_path)

# 获取视频属性
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

# 定义视频写入对象
fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 或者 'XVID','MJPG','X264' 等
out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))

# 记录处理时间和帧数
start_time = time.time()
processed_frames = 0

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break

    # 对每一帧进行推理
    keypoints, scores = wholebody(frame)

    # 可视化
    frame_show = frame.copy()
    frame_show = draw_skeleton(frame_show, keypoints, scores, kpt_thr=0.5)

    # 写入结果帧到输出视频
    out.write(frame_show)

    # 增加处理的帧数
    processed_frames += 1

    # 显示处理进度(可选)
    cv2.imshow('Frame', frame_show)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# 计算总处理时间
end_time = time.time()
total_time = end_time - start_time
fps_processed = processed_frames / total_time

# 打印处理结果
print(f"Total frames: {total_frames}")
print(f"Processed frames: {processed_frames}")
print(f"Total processing time: {total_time:.2f} seconds")
print(f"Average FPS: {fps_processed:.2f}")

# 释放视频读取和写入对象
cap.release()
out.release()
cv2.destroyAllWindows()
 

#

import cv2
import numpy as np
import time
from rtmlib import Wholebody, draw_skeleton
import torch

device = 'cuda' if torch.cuda.is_available() else 'cpu'
backend = 'onnxruntime'  # 'opencv', 'onnxruntime', 'openvino'

openpose_skeleton = False  # True for openpose-style, False for mmpose-style

wholebody = Wholebody(to_openpose=openpose_skeleton,
                      mode='balanced',  # 'performance', 'lightweight', 'balanced'
                      backend=backend, device=device)

# 打开视频文件
video_path = 'input_video.mp4'  # 输入视频文件路径
cap = cv2.VideoCapture(video_path)

# 获取视频属性
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

# 定义视频写入对象
fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 或者 'XVID','MJPG','X264' 等
out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))

# 记录处理时间和帧数
start_time = time.time()
processed_frames = 0

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break

    # 移动数据到GPU
    frame_gpu = torch.from_numpy(frame).to(device).float()

    keypoints, scores = wholebody(frame_gpu)

    frame_show = frame.copy()
    frame_show = draw_skeleton(frame_show, keypoints, scores, kpt_thr=0.5)

    # 写入结果帧到输出视频
    out.write(frame_show)

    # 增加处理的帧数
    processed_frames += 1

    # 每处理十帧打印一次当前已处理的帧数和总帧数
    if processed_frames % 10 == 0:
        print(f'Processed frames: {processed_frames}/{total_frames}')

# 计算总处理时间
end_time = time.time()
total_time = end_time - start_time
fps_processed = processed_frames / total_time

# 打印处理结果
print(f"Total frames: {total_frames}")
print(f"Processed frames: {processed_frames}")
print(f"Total processing time: {total_time:.2f} seconds")
print(f"Average FPS: {fps_processed:.2f}")

# 释放视频读取和写入对象
cap.release()
out.release()
cv2.destroyAllWindows()
 

  • 5
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值