Qt+OpenVino部署yolo5模型

一、openvino简介

OpenVINO是英特尔针对自家硬件平台开发的一套深度学习工具库,包含推断库,模型优化等等一系列与深度学习模型部署相关的功能。

OpenVINO™工具包是用于快速开发应用程序和解决方案的综合工具包,可解决各种任务,包括模拟人类视觉,自动语音识别,自然语言处理,推荐系统等。该工具包基于最新一代的人工神经网络,包括卷积神经网络(CNN),循环和基于注意力的网络,可在英特尔®硬件上扩展计算机视觉和非视觉工作负载,从而最大限度地提高性能。它通过从边缘到云的高性能,人工智能和深度学习推理来加速应用程序。

install:

https://nickhuang1996.blog.csdn.net/article/details/81385008

二、qt 中配置

安装好后,在pro文件中添入如下信息

OPENVINO_ABS_PATH=$$quote(C:/Program Files (x86)/Intel/openvino_2021.4.752)
OPENVINOPATH=$$OPENVINO_ABS_PATH/inference_engine
OPENVINO_LIB=$$OPENVINOPATH/lib/intel64
OPENVINO_INC=$$OPENVINOPATH/include

INCLUDEPATH += $$OPENVINO_INC

CONFIG(debug, debug|release) {
LIBS += -L$$OPENVINO_LIB/Debug
LIBS += -linference_engined \
        -linference_engine_c_apid \
        -linference_engine_transformationsd \
} else{
LIBS += -L$$OPENVINO_LIB/Release
LIBS += -linference_engine \
        -linference_engine_c_api \
        -linference_engine_transformations \
}


OPENCVPATH=$$OPENVINO_ABS_PATH/opencv
OPENCV_LIB=$$OPENCVPATH/lib
OPENCV_INC=$$OPENCVPATH/include
INCLUDEPATH += $$OPENCV_INC

CONFIG(debug, debug|release) {
LIBS += -L$$OPENCV_LIB
LIBS += -lopencv_core453d \
        -lopencv_dnn453d \
        -lopencv_ml453d \
        -lopencv_highgui453d \
        -lopencv_imgcodecs453d \
        -lopencv_imgproc453d \
        -lopencv_video453d
}else{
LIBS += -L$$OPENCV_LIB
LIBS += -lopencv_core453 \
        -lopencv_dnn453 \
        -lopencv_ml453 \
        -lopencv_highgui453 \
        -lopencv_imgcodecs453 \
        -lopencv_imgproc453 \
        -lopencv_video453 \
        -lopencv_videoio453
}

三、yolo5部署代码

#ifndef YOLO5PLUGIN_H
#define YOLO5PLUGIN_H

#include "DefectInterface.h"
#include <inference_engine.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <opencv2/opencv.hpp>

using namespace InferenceEngine;
using namespace cv;

class QIcon;

struct T_Yolov5sYamlData
{
    QVector<QString> names;
    QVector<bool> shows;
};

class Yolo5Plugin : public QObject, public DefectInterface
{
    /* 注意: QObject只能放在这里,放在父类继承会编译错误 */
    Q_OBJECT
#if QT_VERSION >= 0x050000
    Q_PLUGIN_METADATA(IID IPlugin_iid FILE "Yolo5Plugin.json")
#endif // QT_VERSION >= 0x050000
    Q_INTERFACES(DefectInterface)

public:
    typedef struct
    {
        float prob;
        int label;
        QRectF rect;
    } T_Object;

    Yolo5Plugin();
    ~Yolo5Plugin();
    PluginSpec::E_TypeFlag typeFlag() const;
    QIcon icon() const;

    int init(const T_DefectSpec &spec);
    bool isInitialized() const;
    int run(const T_DefectSpec &spec, T_DefectResult &result);
    DefectInterface *newInstance();

private:
    double sigmoid(double x);

    std::vector<int> get_anchors(int net_grid);

    bool parse_yolov5(const Blob::Ptr &blob, int net_grid,
                      float cof_threshold, int item_size,
                      std::vector<cv::Rect> &o_rect,
                      std::vector<float> &o_rect_cof,
                      std::vector<float> &o_labels);

    void resize(const cv::Mat &graySrc, cv::Mat &dst, double &scale);

    bool processFrame(const cv::Mat &gray, int item_size, std::vector<T_Object> &detected_objects);

private:
    std::string _input_name;
    ExecutableNetwork _network;
    OutputsDataMap _outputinfo;
    T_Yolov5sYamlData _yolov5sYamlData;
    bool m_isInitialized;
    QString m_modelPath;
};

#endif // YOLO5PLUGIN_H

#include "Yolo5Plugin.h"
#include <memory>
#include <iostream>
#include <fstream>
#include <istream>
#include <string>
#include <QPointF>
#include <QRect>
#include <QSizeF>
#include <QTextCodec>
#include <QDebug>
#include <QIcon>
#include <QDir>
#include <QTime>
#include <omp.h>

#include "yaml-cpp/yaml.h"

using namespace std;

static std::mutex mtx;
static omp_lock_t lock_use;
static omp_lock_t s_lock;

#define IMG_LEN 640
#define IMG_LEN_F  640.0f

//https://github.com/fb029ed/yolov5_cpp_openvino/blob/master/cvmart_competition/openvino_cpp_code/my_detector.cpp

bool isFileExist(const QString &fullFilePath)
{
    QFileInfo fileInfo(fullFilePath);
    if(fileInfo.exists())
    {
        return true;
    }
    return false;
}

bool isDirExist(const QString &fullPath)
{
    if(fullPath.isEmpty())
    {
        return false;
    }
    QDir dir(fullPath);
    if(dir.exists())
    {
        return true;
    }
    return false;
}

bool parseClass(const string &path, T_Yolov5sYamlData &data)
{
    data.names.clear();
    data.shows.clear();
    try
    {
        YAML::Node tRoot = YAML::LoadFile(path);
        YAML::Node tNames = tRoot["Names"];
        for(int i = 0; i < tNames.size(); ++i)
        {
            std::string name = tNames[i].as<std::string>();
            data.names.append(QString::fromStdString(name));
        }
        if(tRoot["Shows"])
        {
            for(int i = 0; i < tRoot["Shows"].size(); ++i)
            {
                bool isShow = tRoot["Shows"][i].as<bool>();
                data.shows.append(isShow);
            }
        }
    }
    catch (YAML::Exception &e)
    {
        const char *err_msg = e.what();
        qDebug() << "exception caught: {}" << err_msg;
        return false;
    }
    catch (...)
    {
        qDebug() << "exception caught:  unknown";
        return false;
    }
    return true;
}

Yolo5Plugin::Yolo5Plugin()
    : m_isInitialized(false), m_modelPath("")
{

}

Yolo5Plugin::~Yolo5Plugin()
{

}

PluginSpec::E_TypeFlag Yolo5Plugin::typeFlag() const
{
    return PluginSpec::E_TypeFlag::Defect;
}

QIcon Yolo5Plugin::icon() const
{
    return QIcon(":/Yolo5Plugin.png");
}

int Yolo5Plugin::init(const T_DefectSpec &spec)
{
    qDebug() << spec.arguments;
    /* 判断参数是否有变化,有则重新加载,没有则跳过 */
    QString dirPath = spec.arguments.value("-path").toString();
    if(m_modelPath == dirPath)
    {
        qDebug() << "Initialized";
        return 0;
    }
    m_modelPath = dirPath;
    m_errmsg = "";

    if(!isDirExist(dirPath))
    {
        m_errmsg = "The model path does not exist";
        return -1;
    }
    /* 模型路径 */
    QTextCodec *code = QTextCodec::codecForName("GB2312");
    std::string modelPath = code->fromUnicode(dirPath).data();
    string xml_path = modelPath + "/yolov5s.xml";
    string bin_path = modelPath + "/yolov5s.bin";
    string class_path = modelPath + "/yolov5s.yaml";
    if(!isFileExist(QString::fromStdString(xml_path)))
    {
        m_errmsg = "yolov5s.xml does not exist";
        return -1;
    }
    if(!isFileExist(QString::fromStdString(bin_path)))
    {
        m_errmsg = "yolov5s.bin does not exist";
        return -1;
    }
    if(!isFileExist(QString::fromStdString(class_path)))
    {
        m_errmsg = "yolov5s.yaml does not exist";
        return -1;
    }
//    qDebug() << "xml_path:" << QString::fromStdString(xml_path);

    bool b = parseClass(class_path, _yolov5sYamlData);
    if(!b)
    {
        m_errmsg = "yolov5s.yaml parsing failed";
        return -1;
    }
    /* 推理引擎初始化 */
    Core ie;
    CNNNetwork network = ie.ReadNetwork(xml_path, bin_path);
    network.setBatchSize(1);

    /* 输入设置 */
    InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
    _input_name = network.getInputsInfo().begin()->first;
    input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
    input_info->setPrecision(Precision::FP32);
    input_info->getInputData()->setLayout(Layout::NCHW);

    ICNNNetwork::InputShapes inputShapes = network.getInputShapes();
    SizeVector &inSizeVector = inputShapes.begin()->second;
    network.reshape(inputShapes);

    /* 输出设置 */
    _outputinfo = OutputsDataMap(network.getOutputsInfo());
    for (auto &output : _outputinfo)
    {
        output.second->setPrecision(Precision::FP32);
    }
    /* 加载网络 */
    _network = ie.LoadNetwork(network, "CPU");

    m_isInitialized = true;

    return 0;
}

bool Yolo5Plugin::isInitialized() const
{
    return m_isInitialized;
}

int Yolo5Plugin::run(const T_DefectSpec &spec, T_DefectResult &result)
{
    if(!isInitialized())
    {
        m_errmsg = "uninitialized";
        return -1;
    }
    try
    {
        /* 解析参数 */
        double confidence_thr = spec.arguments.value("-confidenceThr").toString().toDouble();
        int split_col_num = spec.arguments.value("-split_col_num").toString().toInt();
        int split_row_num = spec.arguments.value("-split_row_num").toString().toInt();

        /* 大图变ROI */
        int maxW = spec.frameParam.width;
        int maxH = spec.frameParam.height;
        Mat grayMat(maxH, maxW, CV_8UC1, spec.pImage);
        if(!spec.rect.isEmpty())
        {
            maxW = spec.rect.width();
            maxH = spec.rect.height();
            grayMat = grayMat(cv::Rect(spec.rect.x(), spec.rect.y(),
                                       spec.rect.width(), spec.rect.height()));
        }
        /* 分割图片 */
        vector<Mat> tiles;
        vector<int> baseXs;
        vector<int> baseYs;
        int subWidth  =  maxW / split_col_num;
        int subHeight =  maxH / split_row_num;
        for(int i = 0; i < split_row_num; i++)
        {
            for(int j = 0; j < split_col_num; ++j)
            {
                int baseX = j * subWidth;
                int baseY = i * subHeight;
                Rect tile_rect = cv::Rect(baseX, baseY, subWidth, subHeight);
                tiles.push_back(grayMat(tile_rect));
                baseXs.push_back(baseX);
                baseYs.push_back(baseY);
            }
        }
        auto calFunction = [ & ](int i, const Mat gray, int baseX, int baseY)
        {
            int item_size = _yolov5sYamlData.names.size() + 5;   //N+5
            std::vector<Yolo5Plugin::T_Object> detected_objects;
            processFrame(gray, item_size,  detected_objects);
            mtx.lock();
            for(int i = 0; i < detected_objects.size(); ++i)
            {
                int label = detected_objects.at(i).label;
                float prob = detected_objects.at(i).prob;
                if(_yolov5sYamlData.shows.value(label, true))
                {
                    QRectF box        = detected_objects.at(i).rect;
                    QString name      = _yolov5sYamlData.names.value(label, "None");
                    /* 置信度阈值 */
                    if(prob > confidence_thr)
                    {
                        /* 合并结果 */
                        double rectX = box.x() + baseX;
                        double rectY = box.y() + baseY;
                        double w = box.width();
                        double h = box.height();
                        if(!spec.rect.isEmpty())
                        {
                            result.boxVec.push_back(QRectF(rectX + spec.rect.x(), rectY + spec.rect.y(), w, h));
                        }
                        else
                        {
                            result.boxVec.push_back(QRectF(rectX, rectY, w, h));
                        }
                        result.nameVec.push_back(name);
                        result.confidenceVec.push_back(prob);
                    }
                }
            }
            mtx.unlock();
        };
        /* 多线程检测 */
        vector<std::thread> threads;
        for (int i = 0; i < tiles.size(); i++)
        {
            threads.push_back(std::thread(calFunction, i, tiles[i], baseXs.at(i), baseYs.at(i)));
        }
        for (int i = 0; i < threads.size(); i++)
        {
            threads[i].join();
        }
    }
    catch(cv::Exception &e )
    {
        const char *err_msg = e.what();
        m_errmsg = QString(err_msg);
        qDebug() << "exception caught: {}" << err_msg;
        return -1;
    }
    catch(...)
    {
        m_errmsg = "exception caught:  unknown";
        qDebug() << "exception caught:  unknown";
        return -1;
    }
    return 0;
}

DefectInterface *Yolo5Plugin::newInstance()
{
    return new Yolo5Plugin();
}

double Yolo5Plugin::sigmoid(double x)
{
    return (1 / (1 + exp(-x)));
}

std::vector<int> Yolo5Plugin::get_anchors(int net_grid)
{
    vector<int> anchors(6);
    int a80[6] = { 10, 13, 16, 30, 33, 23 };
    int a40[6] = { 30, 61, 62, 45, 59, 119 };
    int a20[6] = { 116, 90, 156, 198, 373, 326 };
    if(net_grid == 80)
    {
        anchors.insert(anchors.begin(), a80, a80 + 6);
    }
    else if(net_grid == 40)
    {
        anchors.insert(anchors.begin(), a40, a40 + 6);
    }
    else if(net_grid == 20)
    {
        anchors.insert(anchors.begin(), a20, a20 + 6);
    }
    return anchors;
}

bool Yolo5Plugin::parse_yolov5(const Blob::Ptr &blob, int net_grid,
                               float cof_threshold, int item_size,
                               std::vector<Rect> &o_rect,
                               std::vector<float> &o_rect_cof,
                               std::vector<float> &o_labels)
{
    vector<int> anchors = get_anchors(net_grid);
    LockedMemory<const void> blobMapped = as<MemoryBlob>(blob)->rmap();
    const float *output_blob = blobMapped.as<float *>();

    size_t gi =  net_grid * item_size;
    size_t ggi = net_grid * gi;
    size_t anchor_n = 3;

//    omp_set_num_threads(8);
//    #pragma omp parallel for
    for (int n = 0; n < anchor_n; ++n)
    {
        for (int i = 0; i < net_grid; ++i)
        {
            for (int j = 0; j < net_grid; ++j)
            {
                double box_prob = output_blob[n * ggi + i * gi + j * item_size + 4];
                box_prob = sigmoid(box_prob);
                /* 框置信度不满足则整体置信度不满足 */
                if (box_prob < cof_threshold)
                {
                    continue;
                }
                /* 此处输出为中心点坐标,需要转化为角点坐标   */
                double x = output_blob[n * ggi + i * gi + j * item_size + 0];
                double y = output_blob[n * ggi + i * gi + j * item_size + 1];
                double w = output_blob[n * ggi + i * gi + j * item_size + 2];
                double h = output_blob[n * ggi + i * gi + j * item_size + 3];

                double max_prob = 0;
                int idx = 0;
                for (int t = 5; t < item_size; ++t)
                {
                    double tp = output_blob[n * ggi + i * gi + j * item_size + t];
                    tp = sigmoid(tp);
                    if (tp > max_prob)
                    {
                        max_prob = tp;
                        idx = t - 5;
                    }
                }
                double cof = box_prob * max_prob;
                if (cof < cof_threshold)
                {
                    continue;
                }

                x = (sigmoid(x) * 2 - 0.5 + j) * 640.0f / net_grid;
                y = (sigmoid(y) * 2 - 0.5 + i) * 640.0f / net_grid;
                w = pow(sigmoid(w) * 2, 2) * anchors[2 * n];
                h = pow(sigmoid(h) * 2, 2) * anchors[2 * n + 1];
                double r_x = x - w / 2;
                double r_y = y - h / 2;
                Rect rect = Rect(int(r_x), int(r_y), int(w), int(h));
                o_rect.push_back(rect);
                o_rect_cof.push_back(cof);
                o_labels.push_back(idx);
            }
        }
    }
    return true;
}

void Yolo5Plugin::resize(const Mat &graySrc, Mat &dst, double &scale)
{
    Mat src = graySrc;
    cvtColor(src, src, COLOR_GRAY2RGB);
    int width = src.cols;
    int height = src.rows;
    scale = min(640.0 / width, 640.0 / height);
    int w = round(width * scale);
    int h = round(height * scale);
    Mat resize_img;
    cv::resize(src, resize_img, Size(w, h));  // 640*X or X*640
    int top = 0, bottom = 0, left = 0, right = 0;
    if (w > h)
    {
        top = (w - h) / 2;
        bottom = (w - h) - top;
    }
    else if (h > w)
    {
        left = (h - w) / 2;
        right = (h - w) - left;
    }

    copyMakeBorder(resize_img, resize_img, top, bottom, left, right, BORDER_CONSTANT, Scalar(114, 114, 114));  // 640*640
    cvtColor(resize_img, resize_img, cv::COLOR_BGR2RGB);
    dst = resize_img;
}

bool Yolo5Plugin::processFrame(const Mat &gray, int item_size,
                               std::vector<Yolo5Plugin::T_Object> &detected_objects)
{
    /* resize  && 颜色通道转换 */
    Mat src = gray;
    cvtColor(src, src, COLOR_GRAY2RGB);
    int width = src.cols;
    int height = src.rows;
    double scale = min(640.0 / width, 640.0 / height);
    int w = round(width * scale);
    int h = round(height * scale);
    Mat resize_img;
    cv::resize(src, resize_img, Size(w, h));  // 640*X or X*640
    int top = 0, bottom = 0, left = 0, right = 0;
    if (w > h)
    {
        top = (w - h) / 2;
        bottom = (w - h) - top;
    }
    else if (h > w)
    {
        left = (h - w) / 2;
        right = (h - w) - left;
    }

    copyMakeBorder(resize_img, resize_img, top, bottom, left, right, BORDER_CONSTANT, Scalar(114, 114, 114));  // 640*640
    cvtColor(resize_img, resize_img, cv::COLOR_BGR2RGB);

    /* 推断请求和blob填充 */
    InferRequest infer_request = _network.CreateInferRequest();
    Blob::Ptr frameBlob = infer_request.GetBlob(_input_name);

    size_t img_size = IMG_LEN * IMG_LEN;
    InferenceEngine::LockedMemory<void> blobMapped = InferenceEngine::as<InferenceEngine::MemoryBlob>(frameBlob)->wmap();
    float *blob_data = blobMapped.as<float *>();
//    omp_set_num_threads(4);
//    #pragma omp parallel for
    for (int row = 0; row < IMG_LEN; row++)
    {
        for (int col = 0; col < IMG_LEN; col++)
        {
            for (int ch = 0; ch < 3; ch++)
            {
                blob_data[img_size * ch + row * IMG_LEN + col] = float(resize_img.at<Vec3b>(row, col)[ch]) / 255.0f;
            }
        }
    }
    /* 推断执行 */
    infer_request.Infer();

    /* 获取推断结果 */
    int net_grids[3] = {80, 40, 20};
    vector<Blob::Ptr> blobs;
    for (auto &output : _outputinfo)
    {
        auto output_name = output.first;
        Blob::Ptr blob = infer_request.GetBlob(output_name);
        blobs.push_back(blob);
    }

    float cof_threshold = 0.25;

    vector<Rect> o_rect;
    vector<float> o_rect_cof;
    vector<int> labels;

    QTime globalTime2;
    globalTime2.start();
    for(int i = 0; i < blobs.size(); ++i)
    {
        if(i < 3)
        {
            vector<Rect> o_rect_temp;
            vector<float> o_rect_cof_temp;
            vector<float> o_labels_temp;
            parse_yolov5(blobs[i], net_grids[i], cof_threshold, item_size,
                         o_rect_temp, o_rect_cof_temp, o_labels_temp);
            o_rect.insert(o_rect.end(), o_rect_temp.begin(), o_rect_temp.end());
            o_rect_cof.insert(o_rect_cof.end(), o_rect_cof_temp.begin(), o_rect_cof_temp.end());
            labels.insert(labels.end(), o_labels_temp.begin(), o_labels_temp.end());
        }
    }

    /* NMS去除同一个物体多余的框 */
    float nms_area_threshold = 0.45;
    vector<int> final_id;
    dnn::NMSBoxes(o_rect, o_rect_cof, cof_threshold, nms_area_threshold, final_id);

    int num = final_id.size();
    for (int i = 0; i < num; ++i)
    {
        int label = labels[final_id[i]];
        Rect resize_rect = o_rect[final_id[i]];
        int find_col = int(max(resize_rect.x - left, 0) / scale);
        int find_row = int(max(resize_rect.y - top, 0) / scale);
        int find_w = int(resize_rect.width / scale);
        int find_h = int(resize_rect.height / scale);
        QRectF rawrect(find_col, find_row, find_w, find_h);
        float cof = o_rect_cof[final_id[i]];
        detected_objects.push_back(T_Object{cof, label, rawrect});
    }
    return true;
}

#if QT_VERSION < 0x050000
    Q_EXPORT_PLUGIN2(Yolo5Plugin, Yolo5Plugin)
#endif // QT_VERSION < 0x050000

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
OpenVINO是一个深度学习推理框架,可用于将训练好的神经网络模型部署到各种硬件平台上进行推理。下面是在OpenVINO部署Yolo的一些步骤: 1. 下载和安装OpenVINO Toolkit。在OpenVINO官网下载并安装适合您操作系统的版本。 2. 下载Yolo的权重文件和配置文件。在Yolo官网上下载权重文件和配置文件。 3. 将Yolo模型转换为OpenVINO格式。使用OpenVINO提供的Model Optimizer工具将Yolo模型转换为OpenVINO格式。您需要使用以下命令: ``` python mo_tf.py --input_model yolov3.weights --tensorflow_use_custom_operations_config extensions/front/tf/yolo_v3.json --input_shape [1,416,416,3] --data_type FP32 --output_dir yolov3_openvino --model_name yolov3 ``` 其中,yolov3.weights是您下载的权重文件,yolo_v3.json是一个自定义操作文件,用于告诉Model Optimizer如何处理Yolo模型。--input_shape参数指定输入张量的形状,--data_type参数指定数据类型,--output_dir参数指定输出文件夹,--model_name参数指定模型名称。 4. 运行OpenVINO推理引擎。使用OpenVINO Inference Engine API加载和推理转换后的模型。您需要编写一个Python脚本来完成这一步骤。以下是一个简单的示例: ``` import cv2 import numpy as np from openvino.inference_engine import IECore # Load the OpenVINO model model_xml = 'yolov3_openvino/yolov3.xml' model_bin = 'yolov3_openvino/yolov3.bin' ie = IECore() net = ie.read_network(model=model_xml, weights=model_bin) # Load the input image img = cv2.imread('input.jpg') # Preprocess the input image input_blob = next(iter(net.inputs)) n, c, h, w = net.inputs[input_blob].shape img = cv2.resize(img, (w, h)) img = img.transpose((2, 0, 1)) img = img.reshape((n, c, h, w)) # Run inference exec_net = ie.load_network(network=net, device_name='CPU') output = exec_net.infer(inputs={input_blob: img}) # Process the output output_blob = next(iter(net.outputs)) output = output[output_blob][0] boxes = output[:, 0:4] confs = output[:, 4] class_ids = output[:, 5:].argmax(axis=-1) # Draw the predicted boxes on the input image for i in range(len(boxes)): if confs[i] > 0.5: x, y, w, h = boxes[i] cv2.rectangle(img, (int(x), int(y)), (int(x+w), int(y+h)), (0, 255, 0), 2) # Save the output image cv2.imwrite('output.jpg', img.transpose((1, 2, 0))) ``` 这个脚本加载了转换后的Yolo模型,读取了输入图像,对图像进行预处理,运行了推理,处理了输出并将结果绘制在输入图像上,最后保存了输出图像。 这些步骤仅仅是一个简单的例子,如果您需要更复杂的操作,您需要仔细阅读OpenVINO文档,并参考OpenVINO的示例代码。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Jason~shen

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值