yolov5_obb报错合集

yolov5 obb的cuda版本和pytorch版本最好和作者给的一样,pytorch版本不能超过1.10,这样就能减少很多报错,所以强烈建议配置官方教程的版本,
并且要求:CUDA Driver Version ≥ CUDA Toolkit Version(runtime version) = torch.version.cuda
官方网站:https://github.com/hukaixuan19970627/yolov5_obb/blob/master/docs/install.md


然而,我的cuda和驱动版本是11.7,pytorch为2.0,所以调通前报错一个接一个。下面是我遇到的部分问题(只有一部分,其它的解决完忘记了)另外我Python 3.9 ,Ubuntu 20.04


最初准备用Windows10系统,但是被报错多的劝退了;
windows10安装yolov5_obb时的一些报错记录: https://blog.csdn.net/qq_51882416/article/details/124326766?spm=1001.2014.3001.5502

1.fatal error: THC/THC.h: No such file or directory

原因:新版pytorch移除了 THC/THC.h 及系列函数,部分函数替换。
解决方案:更改poly_nms_cuda.cu头文件,更改弃用函数为新版函数。
参考链接:https://zhuanlan.zhihu.com/p/592049655
文件位置:
在这里插入图片描述
将下面的这个代码替换原本的这个poly_nms_cuda.cu文件

#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>

#include <ATen/cuda/ThrustAllocator.h>
// #include <THC/THC.h>
// #include <THC/THCDeviceUtils.cuh>
#include "ATen/cuda/DeviceUtils.cuh"
#include "ATen/ceil_div.h"

#include <vector>
#include <iostream>

// #define CUDA_CHECK(condition) \
//   /* Code block avoids redefinition of cudaError_t error */ \
//   do { \
//     cudaError_t error = condition; \
//     if (error != cudaSuccess) { \
//       std::cout << cudaGetErrorString(error) << std::endl; \
//     } \
//   } while (0)

// #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;


#define maxn 10
const double eps=1E-8;

__device__ inline int sig(float d){
    return(d>eps)-(d<-eps);
}

__device__ inline int point_eq(const float2 a, const float2 b) {
    return sig(a.x - b.x) == 0 && sig(a.y - b.y)==0;
}

__device__ inline void point_swap(float2 *a, float2 *b) {
    float2 temp = *a;
    *a = *b;
    *b = temp;
}

__device__ inline void point_reverse(float2 *first, float2* last)
{
    while ((first!=last)&&(first!=--last)) {
        point_swap (first,last);
        ++first;
    }
}

__device__ inline float cross(float2 o,float2 a,float2 b){  //叉积
    return(a.x-o.x)*(b.y-o.y)-(b.x-o.x)*(a.y-o.y);
}
__device__ inline float area(float2* ps,int n){
    ps[n]=ps[0];
    float res=0;
    for(int i=0;i<n;i++){
        res+=ps[i].x*ps[i+1].y-ps[i].y*ps[i+1].x;
    }
    return res/2.0;
}
__device__ inline int lineCross(float2 a,float2 b,float2 c,float2 d,float2&p){
    float s1,s2;
    s1=cross(a,b,c);
    s2=cross(a,b,d);
    if(sig(s1)==0&&sig(s2)==0) return 2;
    if(sig(s2-s1)==0) return 0;
    p.x=(c.x*s2-d.x*s1)/(s2-s1);
    p.y=(c.y*s2-d.y*s1)/(s2-s1);
    return 1;
}

__device__ inline void polygon_cut(float2*p,int&n,float2 a,float2 b, float2* pp){

    int m=0;p[n]=p[0];
    for(int i=0;i<n;i++){
        if(sig(cross(a,b,p[i]))>0) pp[m++]=p[i];
        if(sig(cross(a,b,p[i]))!=sig(cross(a,b,p[i+1])))
            lineCross(a,b,p[i],p[i+1],pp[m++]);
    }
    n=0;
    for(int i=0;i<m;i++)
        if(!i||!(point_eq(pp[i], pp[i-1])))
            p[n++]=pp[i];
    // while(n>1&&p[n-1]==p[0])n--;
    while(n>1&&point_eq(p[n-1], p[0]))n--;
}

//---------------华丽的分隔线-----------------//
//返回三角形oab和三角形ocd的有向交面积,o是原点//
__device__ inline float intersectArea(float2 a,float2 b,float2 c,float2 d){
    float2 o = make_float2(0,0);
    int s1=sig(cross(o,a,b));
    int s2=sig(cross(o,c,d));
    if(s1==0||s2==0)return 0.0;//退化,面积为0
    // if(s1==-1) swap(a,b);
    // if(s2==-1) swap(c,d);
    if (s1 == -1) point_swap(&a, &b);
    if (s2 == -1) point_swap(&c, &d);
    float2 p[10]={o,a,b};
    int n=3;
    float2 pp[maxn];
    polygon_cut(p,n,o,c,pp);
    polygon_cut(p,n,c,d,pp);
    polygon_cut(p,n,d,o,pp);
    float res=fabs(area(p,n));
    if(s1*s2==-1) res=-res;return res;
}
//求两多边形的交面积
__device__ inline float intersectArea(float2*ps1,int n1,float2*ps2,int n2){
    if(area(ps1,n1)<0) point_reverse(ps1,ps1+n1);
    if(area(ps2,n2)<0) point_reverse(ps2,ps2+n2);
    ps1[n1]=ps1[0];
    ps2[n2]=ps2[0];
    float res=0;
    for(int i=0;i<n1;i++){
        for(int j=0;j<n2;j++){
            res+=intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1]);
        }
    }
    return res;//assumeresispositive!
}

// TODO: optimal if by first calculate the iou between two hbbs
__device__ inline float devPolyIoU(float const * const p, float const * const q) {
    float2 ps1[maxn], ps2[maxn];
    int n1 = 4;
    int n2 = 4;
    for (int i = 0; i < 4; i++) {
        ps1[i].x = p[i * 2];
        ps1[i].y = p[i * 2 + 1];

        ps2[i].x = q[i * 2];
        ps2[i].y = q[i * 2 + 1];
    }
    float inter_area = intersectArea(ps1, n1, ps2, n2);
    float union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area;
    float iou = 0;
    if (union_area == 0) {
        iou = (inter_area + 1) / (union_area + 1);
    } else {
        iou = inter_area / union_area;
    }
    return iou;
}

__global__ void poly_nms_kernel(const int n_polys, const float nms_overlap_thresh,
                            const float *dev_polys, unsigned long long *dev_mask) {
    const int row_start = blockIdx.y;
    const int col_start = blockIdx.x;

    const int row_size =
            min(n_polys - row_start * threadsPerBlock, threadsPerBlock);
    const int cols_size =
            min(n_polys - col_start * threadsPerBlock, threadsPerBlock);

    __shared__ float block_polys[threadsPerBlock * 9];
    if (threadIdx.x < cols_size) {
        block_polys[threadIdx.x * 9 + 0] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 0];
        block_polys[threadIdx.x * 9 + 1] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 1];
        block_polys[threadIdx.x * 9 + 2] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 2];
        block_polys[threadIdx.x * 9 + 3] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 3];
        block_polys[threadIdx.x * 9 + 4] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 4];
        block_polys[threadIdx.x * 9 + 5] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 5];
        block_polys[threadIdx.x * 9 + 6] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 6];
        block_polys[threadIdx.x * 9 + 7] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 7];
        block_polys[threadIdx.x * 9 + 8] =
            dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 8];
    }
    __syncthreads();

    if (threadIdx.x < row_size) {
        const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
        const float *cur_box = dev_polys + cur_box_idx * 9;
        int i = 0;
        unsigned long long t = 0;
        int start = 0;
        if (row_start == col_start) {
            start = threadIdx.x + 1;
        }
        for (i = start; i < cols_size; i++) {
            if (devPolyIoU(cur_box, block_polys + i * 9) > nms_overlap_thresh) {
                t |= 1ULL << i;
            }
        }

        // const int col_blocks = THCCeilDiv(n_polys, threadsPerBlock);
        const int col_blocks =  at::ceil_div(n_polys, threadsPerBlock);

        dev_mask[cur_box_idx * col_blocks + col_start] = t;
    }
}

// boxes is a N x 9 tensor
at::Tensor poly_nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {

    at::DeviceGuard guard(boxes.device());

    using scalar_t = float;
    AT_ASSERTM(boxes.device().is_cuda(), "boxes must be a CUDA tensor");
    auto scores = boxes.select(1, 8);
    auto order_t = std::get<1>(scores.sort(0, /*descending=*/true));
    auto boxes_sorted = boxes.index_select(0, order_t);

    int boxes_num = boxes.size(0);

    // const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
    const int col_blocks =  at::ceil_div(boxes_num, threadsPerBlock);

    scalar_t* boxes_dev = boxes_sorted.data_ptr<scalar_t>();

    // THCState *state = at::globalContext().lazyInitCUDA();
    
    unsigned long long* mask_dev = NULL;

    // mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
    mask_dev = (unsigned long long*) c10::cuda::CUDACachingAllocator::raw_alloc(boxes_num * col_blocks * sizeof(unsigned long long));

    // dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
    //             THCCeilDiv(boxes_num, threadsPerBlock));
    dim3 blocks(at::ceil_div(boxes_num, threadsPerBlock), at::ceil_div(boxes_num, threadsPerBlock));
    dim3 threads(threadsPerBlock);

    poly_nms_kernel<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(boxes_num,
                                        nms_overlap_thresh,
                                        boxes_dev,
                                        mask_dev);
    
    std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
    // THCudaCheck(cudaMemcpyAsync(
	// 		    &mask_host[0],
    //                         mask_dev,
    //                         sizeof(unsigned long long) * boxes_num * col_blocks,
    //                         cudaMemcpyDeviceToHost,
	// 		    at::cuda::getCurrentCUDAStream()
	// 		    ));

    AT_CUDA_CHECK(cudaMemcpyAsync(
                &mask_host[0],
                mask_dev,
                sizeof(unsigned long long) * boxes_num * col_blocks,
                cudaMemcpyDeviceToHost,
                at::cuda::getCurrentCUDAStream()
	));
    
    std::vector<unsigned long long> remv(col_blocks);
    memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);

    at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
    int64_t* keep_out = keep.data_ptr<int64_t>();

    int num_to_keep = 0;
    for (int i = 0; i < boxes_num; i++) {
        int nblock = i / threadsPerBlock;
        int inblock = i % threadsPerBlock;

        if (!(remv[nblock] & (1ULL << inblock))) {
            keep_out[num_to_keep++] = i;
            unsigned long long *p = &mask_host[0] + i * col_blocks;
            for (int j = nblock; j < col_blocks; j++) {
                remv[j] |= p[j];
            }
        }
    }

    // THCudaFree(state, mask_dev);
    c10::cuda::CUDACachingAllocator::raw_delete(mask_dev);

    return order_t.index({
        keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
          order_t.device(), keep.scalar_type())});
}

2 报错error: command ‘/usr/local/cuda/bin/nvcc’ failed with exit code 1

#include <cusparse.h>
^~~~~~~~~~~~
compilation terminated.
error: command '/usr/local/cuda/bin/nvcc' failed with exit code 1

解决方案:
先确定 cuda 是否安装成功

nvcc -V

安装成功的话直接在命令行里输入

export CUDA_HOME=/usr/local/cuda

3 报错:RuntimeError: result type Float can‘t be cast to the desired output type long int

报错的原因是:
loss计算过程中,出现Float的数值精度,需要进行精度的强转换。
具体看这个链接:https://blog.csdn.net/Thebest_jack/article/details/125649451
https://blog.csdn.net/baidu_39629638/article/details/128025583
解决方法:
修改【utils】中的【loss.py】里面的两处内容
在这里插入图片描述
第一处代码修改

 anchors, shape = self.anchors[i], p[i].shape 

第二处代码修改:

indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1)))  # image, anchor, grid

目的是将两种类型的shape进行拉齐

4 好像是报错说没有驱动什么的,但是我明明安装好了

先检查有没有安装好

nvcc -V
nvidia-smi

没安装好就去重新安装一下
如果安装好了还报错,解决方案:
在train.py文件开头引包的时候加上下面几行

print(torch.cuda.is_available())
torch.cuda.current_device()
torch.cuda._initialized = True

5 报错 RuntimeError: indices should be either on cpu or on the same device as the indexed tensor (cpu)

检查过自己的代码,模型和数据都分别放入了GPU中,将数据和模型打印出来也在GPU上、运行时GPU的显存确实有被占但依然报这个错误。
解决方案:找到报错的地方
下面是我报错的地方
在这里插入图片描述
解决方案
原本:

ori_inds = torch.arange(dets_th.size(0)) # 0 ~ n-1

将数据放到GPU上训练
修改后:

ori_inds = torch.arange(dets_th.size(0)).to('cuda') # 0 ~ n-1

其它问题类似

6 bug when trying to save txt bboxes with detect.py --save-txt

https://github.com/hukaixuan19970627/yolov5_obb/issues/375

/content/yolov5_obb
detect: weights=['/content/sample_data/best.pt'], source=../datasets/sample/valid/images, imgsz=[640, 640], conf_thres=0.5, iou_thres=0.4, max_det=1000, device=0, view_img=False, save_txt=True, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=True, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=2, hide_labels=False, hide_conf=False, half=False, dnn=False
YOLOv5 🚀 c7409ff torch 1.10.1+cu113 CUDA:0 (Tesla P100-PCIE-16GB, 16281MiB)

Fusing layers... 
Model Summary: 290 layers, 21584355 parameters, 0 gradients
Traceback (most recent call last):
  File "detect.py", line 251, in <module>
    main(opt)
  File "detect.py", line 246, in main
    run(**vars(opt))
  File "/usr/local/lib/python3.7/dist-packages/torch/autograd/grad_mode.py", line 28, in decorate_context
    return func(*args, **kwargs)
  File "detect.py", line 159, in run
    poly = poly.tolist()
AttributeError: 'list' object has no attribute 'tolist'

I am working with just one class, so the below workaround worked for me for saving labels with detect.py

detect.py

 poly = poly.tolist() 
 line = (cls, *poly, conf) if save_conf else (cls, *poly)  # label format 
 with open(txt_path + '.txt', 'a') as f: 
     f.write(('%g ' * len(line)).rstrip() % line + '\n') 

修改为:

                   # poly = poly.tolist()
                   # line = (cls, *poly, conf) if save_conf else (cls, *poly)  # label format
                    poly_value = [x.item() for x in poly]
                    line = (cls, *poly, conf) if save_conf else (*poly,)  # label format

                    with open(txt_path + '.txt', 'a') as f:
                        f.write(('%g ' * len(line)).rstrip() % line +  ' screws 0'+ '\n')
  • 7
    点赞
  • 26
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值