ultra fast lane detection车道线检测项目修改为CPU版本,进行演示

车道线检测:ultra fast lane detection + 车道保持,把必须用GPU,修改为CPU,修改请看代码,
需要项目工程代码,请关注并留言评论
原来的代码:

import cv2
from model.model import parsingNet
from utils.common import merge_config
from utils.dist_utils import dist_print
import torch
import scipy.special
import numpy as np
import torchvision.transforms as transforms
from data.dataset import LaneTestDataset
from data.constant import culane_row_anchor, tusimple_row_anchor
from PIL import Image


test_img = 'tmp/1.jpg'

if __name__ == "__main__":
    torch.backends.cudnn.benchmark = True
    args, cfg = merge_config()
    dist_print('start testing...')
    assert cfg.backbone in ['18', '34', '50', '101', '152', '50next', '101next', '50wide', '101wide']

    if cfg.dataset == 'CULane':
        cls_num_per_lane = 18
    elif cfg.dataset == 'Tusimple':
        cls_num_per_lane = 56
    else:
        raise NotImplementedError

    net = parsingNet(pretrained=False, backbone=cfg.backbone, cls_dim=(cfg.griding_num + 1, cls_num_per_lane, 4),
                     use_aux=False).cuda()  # we dont need auxiliary segmentation in testing
    state_dict = torch.load(cfg.test_model, map_location='cpu')['model']
    compatible_state_dict = {}
    for k, v in state_dict.items():
        if 'module.' in k:
            compatible_state_dict[k[7:]] = v
        else:
            compatible_state_dict[k] = v

    net.load_state_dict(compatible_state_dict, strict=False)
    net.eval()

    img_transforms = transforms.Compose([
        # transforms.CenterCrop((590,1640)),
        transforms.Resize((288, 800)),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    frame = cv2.imread(test_img)
    print('输入图片大小为:',frame.shape)
    # oir_shape = (frame_ori.shape[1],frame_ori.shape[0])
    # frame = cv2.resize(frame_ori, (1280, 720), interpolation=cv2.INTER_LINEAR)
    img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    img_ = Image.fromarray(img)
    imgs = img_transforms(img_)
    imgs = imgs.unsqueeze(0)
    imgs = imgs.cuda()
    with torch.no_grad():
        out = net(imgs)

    col_sample = np.linspace(0, 800 - 1, cfg.griding_num)
    col_sample_w = col_sample[1] - col_sample[0]

    out_j = out[0].data.cpu().numpy()
    out_j = out_j[:, ::-1, :]
    prob = scipy.special.softmax(out_j[:-1, :, :], axis=0)
    idx = np.arange(cfg.griding_num) + 1
    idx = idx.reshape(-1, 1, 1)
    loc = np.sum(prob * idx, axis=0)
    out_j = np.argmax(out_j, axis=0)
    loc[out_j == cfg.griding_num] = 0
    out_j = loc

    for i in range(out_j.shape[1]):
        if np.sum(out_j[:, i] != 0) > 2:
            for k in range(out_j.shape[0]):
                if out_j[k, i] > 0:
                    ppp = (int(out_j[k, i] * col_sample_w * frame.shape[1] / 800) - 1, int(frame.shape[0] * (tusimple_row_anchor[56-1-k]/288)) - 1 )
                    cv2.circle(frame, ppp, 5, (0, 255, 0), -1)
    # frame = cv2.resize(frame,oir_shape,interpolation=cv2.INTER_LINEAR)
    print('输出图片大小为:',frame.shape)
    cv2.imshow('result',frame)
    cv2.waitKey(0)

修改后的代码:

import cv2
from model.model import parsingNet
from utils.common import merge_config
from utils.dist_utils import dist_print
import torch
import scipy.special as ssp
import numpy as np
import torchvision.transforms as transforms
from data.dataset import LaneTestDataset
from data.constant import culane_row_anchor, tusimple_row_anchor
from PIL import Image


test_img = 'tmp/1.jpg'

if __name__ == "__main__":
    torch.backends.cudnn.benchmark = True
    args, cfg = merge_config()
    dist_print('start testing...')
    assert cfg.backbone in ['18', '34', '50', '101', '152', '50next', '101next', '50wide', '101wide']

    if cfg.dataset == 'CULane':
        cls_num_per_lane = 18
    elif cfg.dataset == 'Tusimple':
        cls_num_per_lane = 56
    else:
        raise NotImplementedError

    net = parsingNet(pretrained=False, backbone=cfg.backbone, cls_dim=(cfg.griding_num + 1, cls_num_per_lane, 4),
                     use_aux=False)  # we dont need auxiliary segmentation in testing
    state_dict = torch.load(cfg.test_model, map_location='cpu')['model']
    compatible_state_dict = {}
    for k, v in state_dict.items():
        if 'module.' in k:
            compatible_state_dict[k[7:]] = v
        else:
            compatible_state_dict[k] = v

    net.load_state_dict(compatible_state_dict, strict=False)
    net.eval()

    img_transforms = transforms.Compose([
        # transforms.CenterCrop((590,1640)),
        transforms.Resize((288, 800)),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    frame = cv2.imread(test_img)
    print('输入图片大小为:',frame.shape)
    # oir_shape = (frame_ori.shape[1],frame_ori.shape[0])
    # frame = cv2.resize(frame_ori, (1280, 720), interpolation=cv2.INTER_LINEAR)
    img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    img_ = Image.fromarray(img)
    imgs = img_transforms(img_)
    imgs = imgs.unsqueeze(0)
   # imgs = imgs.cuda()
    with torch.no_grad():
        out = net(imgs)

    col_sample = np.linspace(0, 800 - 1, cfg.griding_num)
    col_sample_w = col_sample[1] - col_sample[0]

    out_j = out[0].data.cpu().numpy()
    out_j = out_j[:, ::-1, :]
    prob = ssp.softmax(out_j[:-1, :, :], axis=0)
    idx = np.arange(cfg.griding_num) + 1
    idx = idx.reshape(-1, 1, 1)
    loc = np.sum(prob * idx, axis=0)
    out_j = np.argmax(out_j, axis=0)
    loc[out_j == cfg.griding_num] = 0
    out_j = loc

    for i in range(out_j.shape[1]):
        if np.sum(out_j[:, i] != 0) > 2:
            for k in range(out_j.shape[0]):
                if out_j[k, i] > 0:
                    ppp = (int(out_j[k, i] * col_sample_w * frame.shape[1] / 800) - 1, int(frame.shape[0] * (tusimple_row_anchor[56-1-k]/288)) - 1 )
                    cv2.circle(frame, ppp, 5, (0, 255, 0), -1)
    # frame = cv2.resize(frame,oir_shape,interpolation=cv2.INTER_LINEAR)
    print('输出图片大小为:',frame.shape)
    cv2.imshow('result',frame)
    cv2.waitKey(0)
net = parsingNet(pretrained=False, backbone=cfg.backbone, cls_dim=(cfg.griding_num + 1, cls_num_per_lane, 4),
                 use_aux=False).cuda()去掉cuda
                 imgs = imgs.cuda()注释调这一行

报错:cannot import name ‘softmax‘ from ‘scipy.special‘
然后安装一个环境包:
查看版本:

import scipy 
print(scipy.__version__)
#打印出来1.1.0版本

安装最新版本:
解决方法:

pip install scipy -U

新版1.7.3

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

鼾声鼾语

感谢您的支持鼓励!

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值