AMD平台ffmpeg使用VAAPI硬解码视频

#include "YfcfPlayer.h"
#include "err_code.hpp"
extern "C"
{
    #include "libavutil/opt.h"
    #include "libavutil/imgutils.h"
}

YfcfPlayer::YfcfPlayer()
{
    m_hw_device_ctx = NULL;
    m_decoder_ctx = NULL;
    mW = mH = 0;
    mMaxLen4BufEnc = MAX_BUF_LEN_DEFAULT;
    mMaxLen4BufDec =MAX_BUF_LEN_DEFAULT;
    mShouldEnd = false;
    mLogSt4Dec = mLogSt4Enc = false;
}

YfcfPlayer::~YfcfPlayer()
{
}
static void reader_(void* arg){
    YfcfPlayer* p = (YfcfPlayer*)arg;
    p->doRead();

}
static void decoder_(void* arg){
    YfcfPlayer* p = (YfcfPlayer*)arg;
    p->doDecode();
}

static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
    const enum AVPixelFormat *pix_fmts)
{
    const enum AVPixelFormat *p;

    for (p = pix_fmts; *p != -1; p++) {
        if (*p == AV_PIX_FMT_VAAPI)
            return *p;
    }

    LOGE(TAG, "Failed to get HW surface format.\n");
    return AV_PIX_FMT_NONE;
}
int32_t YfcfPlayer::strtPlay(const string& _url,bool useGpuBuf){
    int32_t ret = 0;
    mUrl = _url;
    mShouldEnd = false;
    mSwitch = true;
    mSwitch4Dec = true;
    mUseGpuBUf = useGpuBuf;
    mTidReader = thread(reader_,this);
    return ret;
}

int32_t YfcfPlayer::doRead(){
rst:
    int32_t ret = 0;
    int32_t lVideoIdx = -1;
    AVCodec *decoder = NULL;
    AVPacket* lAvPack = NULL;
    enum AVHWDeviceType type = AV_HWDEVICE_TYPE_VAAPI;
    AVDictionary *lDic = NULL;  
    AVFormatContext *input_ctx = NULL;   
    std::unique_lock<mutex> lLock(mLock4BufEnc,std::defer_lock);      
    av_dict_set(&lDic, "protocol_whitelist", "udp,rtp,file", 0); 
    ret = avformat_open_input(&input_ctx, mUrl.c_str(), NULL, &lDic) ;
    if( ret != 0) {
        LOGE(TAG, "Cannot open input file %s %d\n", mUrl.c_str(),ret);
        ret = -1;
        goto fail;
    }
    if(avformat_find_stream_info(input_ctx, NULL) < 0) {
        LOGE(TAG, "Cannot find input stream information.\n");
        ret = -2;
        goto fail;
    }

    /* find the video stream information */
    lVideoIdx = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, (const AVCodec **)&decoder, 0);
    if (lVideoIdx < 0) {
        LOGE(TAG,"Cannot find a video stream in the input file\n");
        ret = -3;
        goto fail;
    }
    mVdTimebase = input_ctx->streams[lVideoIdx]->time_base;
    if(input_ctx->streams[lVideoIdx]->avg_frame_rate.num==0||
        input_ctx->streams[lVideoIdx]->avg_frame_rate.den==0){
        LOGE(TAG,"invalid fps\n");
        ret = -3;
        goto fail;
    }
    mFps = input_ctx->streams[lVideoIdx]->avg_frame_rate.num/input_ctx->streams[lVideoIdx]->avg_frame_rate.den;
   
    for (size_t i = 0;; i++) {
        const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
        if (!config) {
            LOGE(TAG, "Decoder %s does not support device type %s.\n",
                decoder->name, av_hwdevice_get_type_name(AV_HWDEVICE_TYPE_VAAPI));
            ret = -4;
            goto fail;
        }
        if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
            config->device_type == AV_HWDEVICE_TYPE_VAAPI) {
            m_hw_pix_fmt = config->pix_fmt;
            break;
        }
    }
    if (!(m_decoder_ctx = avcodec_alloc_context3(decoder))){
        LOGE(TAG,"fail to alloc dec ctx");
        ret = -5;
        goto fail;
    }
    ret = avcodec_parameters_to_context(m_decoder_ctx, input_ctx->streams[lVideoIdx]->codecpar);
    if (ret < 0)
    {
        LOGE(TAG,"vcodec_parameters_to_context fail %d");
        goto fail;
    }
    //填入回调函数 通过这个函数 编解码器能够知道显卡支持的像素格式
    m_decoder_ctx->get_format = get_hw_format;
    mW = m_decoder_ctx->width;
    mH = m_decoder_ctx->height;

    ret = av_hwdevice_ctx_create(&m_hw_device_ctx,AV_HWDEVICE_TYPE_VAAPI,
        NULL, NULL, 0);
    if (ret< 0) {
        LOGE(TAG, "Failed to create specified HW device.\n");
        goto fail;
    }
    //绑定编解码器上下文和硬件设备信息上下文
    m_decoder_ctx->hw_device_ctx = av_buffer_ref(m_hw_device_ctx);
   //绑定完成后 打开编解码器
    if ((ret = avcodec_open2(m_decoder_ctx, decoder, NULL)) < 0) {
        LOGE(TAG, "Failed to open codec for stream #%u\n", lVideoIdx);
        goto fail;
    }
   
    mTidDecoder = thread(decoder_,this);
   
    while (mSwitch)
    {
        lAvPack = av_packet_alloc();
        if(UNLIKELY(lAvPack==NULL)){
            LOGE(TAG,"this should never happen");
        }
        ret = av_read_frame(input_ctx, lAvPack);
        if(ret!=0){
            av_packet_free(&lAvPack);
            LOGW(TAG,"av_read_frame err %d %d %d %d",ret,AVERROR(EINVAL),AVERROR(ENOMEM),AVERROR_EOF);
            break;
        }
        if (lVideoIdx != lAvPack->stream_index){
            av_packet_free(&lAvPack);
            continue;
        }
        lLock.lock();
        if(mBufEnc.size()>mMaxLen4BufEnc){
            if(mLogSt4Enc == false){
                LOGW(TAG,"BUF FULL");
                mLogSt4Enc = true;
            }
            mCond4BufEnc.wait(lLock);
        }else{
            if(mLogSt4Enc == true){
                LOGW(TAG,"BUF FULL drained");
                mLogSt4Enc = false;
            }
        }
        mBufEnc.push_back(lAvPack);
      
        lLock.unlock();
        mCond4BufEnc.notify_one();
    }
    if(ret != AVERROR_EOF){
        mSwitch4Dec = false;
    }
    mSwitch = false;
    mCond4BufEnc.notify_one();
    
    LOGI(TAG,"reader will end");
    mTidDecoder.join();
    
fail:
    avcodec_free_context(&m_decoder_ctx);
    avformat_close_input(&input_ctx);
    av_buffer_unref(&m_hw_device_ctx);
    LOGI(TAG,"reader end");
    if(!mShouldEnd){
        mSwitch4Dec = mSwitch = true;
        goto rst;
    }
        
    return ret;
}

#include "MiscUtils.h"
int32_t YfcfPlayer::doDecode(){
    int32_t ret = 0;
    AVPacket* lpAvPack = NULL;
    AVFrame* lpAvFrame_dec = NULL;
    AVFrame* lpAvFrame_cpu = NULL;
    AVFrame* lpAvFrame_tmp = NULL;
    std::unique_lock<mutex> lLock(mLock4BufEnc,std::defer_lock);
    std::unique_lock<mutex> lLock4Dec(mLock4BufDec,std::defer_lock);
    uint8_t *buffer = NULL;
    int size = 0;

   
    while (mSwitch4Dec)
    {
       
        lLock.lock();
        
        if(mBufEnc.size()==0){
            if(mSwitch == false){
                lLock.unlock();
                break;
            }
            
            mCond4BufEnc.wait(lLock);
            lLock.unlock();
            continue;
        }
        lpAvPack = mBufEnc.front();
       
        mBufEnc.pop_front();
        
        lLock.unlock();
        mCond4BufEnc.notify_one();
       
        uint64_t lastT = 0;
        uint64_t curT = 0;
        lastT= bag_get_boot_time();
        while(true){
            
            ret = avcodec_send_packet(m_decoder_ctx, lpAvPack);
            if (ret == AVERROR(EAGAIN)) {
                
                continue;
            }else if (ret == 0 )
            { 
                break;
            }
            LOGE(TAG,"DEC ERR %d %d %d %d",ret,AVERROR(EINVAL),AVERROR(ENOMEM),AVERROR_EOF);
            goto err;
        }
        av_packet_free(&lpAvPack);
        
        lpAvPack = NULL;
        lpAvFrame_dec = av_frame_alloc();
        if(mUseGpuBUf==false)
            lpAvFrame_cpu = av_frame_alloc();
        if(lpAvFrame_dec == NULL ){
            LOGE(TAG,"NO MEM");
            goto err;
        }
        if(mUseGpuBUf==false && lpAvFrame_cpu == NULL){
            LOGE(TAG,"NO MEM");
            goto err;
        }
        ret = avcodec_receive_frame(m_decoder_ctx,lpAvFrame_dec);
        if(ret != 0 ){
            av_frame_free(&lpAvFrame_dec);
            av_frame_free(&lpAvFrame_cpu);
            continue;
        }
        
        if (lpAvFrame_dec->format == m_hw_pix_fmt) {
            if(mUseGpuBUf){
                lLock4Dec.lock();
                if(mBufDec.size()>mMaxLen4BufDec){
                    if(mLogSt4Dec == false){
                        LOGW(TAG,"buf_decoded full %p %d,drop frame",this,mBufDec.size());
                        mLogSt4Dec = true;
                    }
                    av_frame_free(&lpAvFrame_dec);
            
                } else{
                    mBufDec.push_back(lpAvFrame_dec);
                    if(mLogSt4Dec == true){
                        LOGW(TAG,"buf_decoded full %p %d,drained",this,mBufDec.size());
                        mLogSt4Dec = false;
                    }
                }
                lLock4Dec.unlock();
                mCond4BufDec.notify_one();
                lpAvFrame_dec = NULL;
                continue;
            }
            if ((ret = av_hwframe_transfer_data(lpAvFrame_cpu, lpAvFrame_dec, 0)) < 0) {
                LOGE(TAG,"Error transferring the data to system memory");
                goto err;
            }
            
            lpAvFrame_tmp = lpAvFrame_cpu;
        }
        else{
            LOGE(TAG,"this should never happen");
            lpAvFrame_tmp = lpAvFrame_dec;
        }
            
        

        size = av_image_get_buffer_size(AV_PIX_FMT_NV12,lpAvFrame_tmp->width,lpAvFrame_tmp->height, 1);
        buffer = (uint8_t*)av_malloc(size);
        if(buffer == NULL){
            LOGE(TAG, "Can not alloc buffer\n");
            goto err;
        }
        ret = av_image_copy_to_buffer(buffer, size,
                                      (const uint8_t *const *)lpAvFrame_tmp->data,
                                      (const int *)lpAvFrame_tmp->linesize, AV_PIX_FMT_NV12,
                                     lpAvFrame_tmp->width, lpAvFrame_tmp->height, 1);
        if (ret < 0)
        {
            LOGE(TAG, "Can not copy image to buffer\n");
            goto err;
        }
        lLock4Dec.lock();
        if(mBufDecoded.size()>mMaxLen4BufDec){
            if(mLogSt4Dec == false){
                LOGW(TAG,"buf_decoded full %p %d,drop frame",this,mBufDecoded.size());
                mLogSt4Dec = true;
            }
            av_freep(&buffer);
            
        }else{
            mBufDecoded.push_back(buffer);
            if(mLogSt4Dec == true){
                mLogSt4Dec = false;
                LOGW(TAG,"buf_decoded full %p %d,drained",this,mBufDecoded.size());
            }
        }
        lLock4Dec.unlock();
        mCond4BufDec.notify_one();
        buffer = nullptr;

        av_frame_free(&lpAvFrame_cpu);
        av_frame_free(&lpAvFrame_dec);
        curT = bag_get_boot_time();
        if(curT-lastT>25000)
            LOGI(TAG,"DEC CONSUMED %" PRIu64" %" PRIu64 " %" PRIu64,lastT,curT,curT-lastT );
        
    }
    
err:
    LOGI(TAG,"dec will end");
    mSwitch = false;
    mCond4BufEnc.notify_one();
    lLock.lock();
    while (mBufEnc.size()>0)
    {
        AVPacket* p = mBufEnc.front();
        mBufEnc.pop_front();
        av_packet_free(&p);
        
    }
    lLock.unlock();

    lLock4Dec.lock();
    if(mUseGpuBUf==false){
        while (mBufDecoded.size()>0)
        {
            uint8_t* p = mBufDecoded.front();
            mBufDecoded.pop_front();
            av_freep(&p);
        
        }
    }else{
        while (mBufDec.size()>0)
        {
            AVFrame* p = mBufDec.front();
            mBufDec.pop_front();
            av_frame_free(&p);
        
        }
    }
    lLock4Dec.unlock();
    if(lpAvPack)
        av_packet_free(&lpAvPack);
   
    if(lpAvFrame_dec)
        av_frame_free(&lpAvFrame_dec);

    if(buffer){
        av_freep(&buffer);
        
    }
    LOGI(TAG,"dec end");
    return ret;
}
bool YfcfPlayer::getSwitch(){
    return mSwitch;
}
void* YfcfPlayer::getOneFrame(){
    std::unique_lock<mutex> lLock4Dec(mLock4BufDec,std::defer_lock);
    void* f;
    lLock4Dec.lock();
    if(mUseGpuBUf==false){
        if(mBufDecoded.size()==0){
         
            mCond4BufDec.wait(lLock4Dec);
            if(mBufDecoded.size()==0){
                lLock4Dec.unlock();
                return nullptr;
            }
        }
      
        f = mBufDecoded.front();
        mBufDecoded.pop_front();
    }else{
        if(mBufDec.size()==0){
         
            mCond4BufDec.wait(lLock4Dec);
            if(mBufDec.size()==0){
                lLock4Dec.unlock();
                return nullptr;
            }
        }
      
        f = mBufDec.front();
        mBufDec.pop_front();
    }
    lLock4Dec.unlock();
    return f;

}

void YfcfPlayer::stopPlay(){
    mShouldEnd = true;
    mSwitch4Dec = false;
    mSwitch = false;
    mCond4BufEnc.notify_all();
    mCond4BufDec.notify_all();
    mTidReader.join();
}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 2
    评论
使用FFmpegVAAPI播放视频需要以下步骤: 1. 初始化FFmpegVAAPI环境: ```c #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <unistd.h> #include <fcntl.h> #include <pthread.h> #include <va/va.h> #include <libavformat/avformat.h> #include <libavcodec/avcodec.h> // VAAPI static VADisplay va_dpy; static VAConfigID va_config; static VAContextID va_context; static VASurfaceID va_surfaces[10]; static int va_buffers[10]; static int va_buffers_num; ``` ```c // 初始化VAAPI static bool va_init(AVCodecContext *p_codec_ctx, int num_surfaces) { VAStatus va_status; VASurfaceAttrib va_surface_attrib[2]; VAConfigAttrib va_config_attrib; int major_version, minor_version; int num_entries; VAEntrypoint *entrypoints; VAProfile *profiles; va_dpy = vaGetDisplayDRM(0); va_status = vaInitialize(va_dpy, &major_version, &minor_version); if (va_status != VA_STATUS_SUCCESS) { fprintf(stderr, "vaInitialize failed: %d\n", va_status); return false; } num_entries = vaMaxNumEntrypoints(va_dpy); entrypoints = malloc(num_entries * sizeof(*entrypoints)); vaQueryConfigEntrypoints(va_dpy, VAProfileH264High, entrypoints, &num_entries); num_entries = vaMaxNumProfiles(va_dpy); profiles = malloc(num_entries * sizeof(*profiles)); vaQueryConfigProfiles(va_dpy, profiles, &num_entries); va_status = vaCreateConfig(va_dpy, VAProfileH264High, VAEntrypointVLD, NULL, 0, &va_config); if (va_status != VA_STATUS_SUCCESS) { fprintf(stderr, "vaCreateConfig failed: %d\n", va_status); return false; } va_surface_attrib[0].type = VASurfaceAttribPixelFormat; va_surface_attrib[0].value.type = VAGenericValueTypeInteger; va_surface_attrib[0].flags = VA_SURFACE_ATTRIB_SETTABLE; va_surface_attrib[0].value.value.i = VA_FOURCC('N', 'V', '1', '2'); va_surface_attrib[1].type = VASurfaceAttribMemoryType; va_surface_attrib[1].value.type = VAGenericValueTypeInteger; va_surface_attrib[1].flags = VA_SURFACE_ATTRIB_SETTABLE; va_surface_attrib[1].value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME; va_status = vaCreateSurfaces(va_dpy, VA_RT_FORMAT_YUV420, p_codec_ctx->width, p_codec_ctx->height, va_surfaces, num_surfaces, va_surface_attrib, 2); if (va_status != VA_STATUS_SUCCESS) { fprintf(stderr, "vaCreateSurfaces failed: %d\n", va_status); return false; } va_config_attrib.type = VAConfigAttribRTFormat; vaQueryConfigAttributes(va_dpy, va_config, &va_config_attrib, 1); if ((va_config_attrib.value & VA_RT_FORMAT_YUV420) == 0) { fprintf(stderr, "RT format not supported\n"); return false; } va_buffers_num = vaMaxNumBufferSlots(va_dpy); va_status = vaCreateContext(va_dpy, va_config, p_codec_ctx->width, p_codec_ctx->height, VA_PROGRESSIVE, va_surfaces, num_surfaces, &va_context); if (va_status != VA_STATUS_SUCCESS) { fprintf(stderr, "vaCreateContext failed: %d\n", va_status); return false; } free(entrypoints); free(profiles); return true; } ``` 2. 解码视频并将解码后的帧渲染到VAAPI surface上: ```c // 解码视频并将解码后的帧渲染到VAAPI surface上 int decode_and_render(AVCodecContext *p_codec_ctx, AVPacket *p_pkt, AVFrame **pp_frame, int *pi_frame_available) { int i_ret; AVFrame *p_frame; VAStatus va_status; VABufferID va_buffers[3]; VAEncPictureParameterBufferH264 va_pic_param; VAEncSliceParameterBufferH264 va_slice_param; int i_surface_index; int i; *pi_frame_available = 0; i_ret = avcodec_send_packet(p_codec_ctx, p_pkt); if (i_ret < 0) { fprintf(stderr, "avcodec_send_packet failed\n"); return i_ret; } while (1) { p_frame = av_frame_alloc(); if (!p_frame) { fprintf(stderr, "av_frame_alloc failed\n"); return -1; } i_ret = avcodec_receive_frame(p_codec_ctx, p_frame); if (i_ret == AVERROR(EAGAIN)) { av_frame_free(&p_frame); break; } if (i_ret < 0) { fprintf(stderr, "avcodec_receive_frame failed\n"); av_frame_free(&p_frame); return i_ret; } for (i = 0; i < va_buffers_num; i++) { if (va_buffers[i] == VA_INVALID_ID) { va_buffers[i] = vaCreateBuffer(va_dpy, va_context, VAEncPictureParameterBufferType, sizeof(va_pic_param), 1, &va_pic_param, NULL); break; } } if (i == va_buffers_num) { fprintf(stderr, "no free picture parameter buffers available\n"); av_frame_free(&p_frame); return -1; } memset(&va_pic_param, 0, sizeof(va_pic_param)); va_pic_param.CurrPic.picture_id = va_surfaces[i_surface_index]; va_pic_param.CurrPic.TopFieldOrderCnt = p_frame->coded_picture_number; va_pic_param.CurrPic.BottomFieldOrderCnt = p_frame->coded_picture_number; va_pic_param.ReferenceFrames[0].picture_id = VA_INVALID_SURFACE; va_pic_param.ReferenceFrames[0].flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE; va_pic_param.picture_width_in_mbs_minus1 = (p_codec_ctx->width + 15) / 16 - 1; va_pic_param.picture_height_in_mbs_minus1 = (p_codec_ctx->height + 15) / 16 - 1; va_pic_param.bits_per_pixel = 0x20; va_pic_param.num_slice_groups_minus1 = 0; va_pic_param.slice_group_map_type = VA_SLICE_GROUP_MAP_TYPE_INTERLEAVED; va_pic_param.num_ref_idx_l0_active_minus1 = 0; va_pic_param.num_ref_idx_l1_active_minus1 = 0; va_pic_param.chroma_qp_index_offset = 0; va_pic_param.second_chroma_qp_index_offset = 0; va_pic_param.pic_init_qp_minus26 = p_codec_ctx->qmin; va_pic_param.num_ref_frames = 1; va_pic_param.frame_num = p_frame->coded_picture_number; va_pic_param.frametype = VA_FRAME_PICTURE; va_status = vaMapBuffer(va_dpy, va_buffers[i], (void **)&va_pic_param); if (va_status != VA_STATUS_SUCCESS) { fprintf(stderr, "vaMapBuffer failed: %d\n", va_status); av_frame_free(&p_frame); return -1; } i_surface_index = (i_surface_index + 1) % num_surfaces; for (i = 0; i < va_buffers_num; i++) { if (va_buffers[i] == VA_INVALID_ID) { va_buffers[i] = vaCreateBuffer(va_dpy, va_context, VAEncSliceParameterBufferType, sizeof(va_slice_param), 1, &va_slice_param, NULL); break; } } if (i == va_buffers_num) { fprintf(stderr, "no free slice parameter buffers available\n"); av_frame_free(&p_frame); return -1; } memset(&va_slice_param, 0, sizeof(va_slice_param)); va_slice_param.slice_data_size = p_frame->pkt_size; va_slice_param.slice_data_offset = 0; va_slice_param.slice_type = VA_SLICE_TYPE_I; va_slice_param.pic_parameter_set_id = 0; va_slice_param.slice_group_change_cycle = 0; va_slice_param.num_macroblocks = (p_codec_ctx->height / 16) * (p_codec_ctx->width / 16); va_slice_param.disable_deblocking_filter_idc = 0; va_slice_param.slice_alpha_c0_offset_div2 = 0; va_slice_param.slice_beta_offset_div2 = 0; va_status = vaMapBuffer(va_dpy, va_buffers[i], (void **)&va_slice_param); if (va_status != VA_STATUS_SUCCESS) { fprintf(stderr, "vaMapBuffer failed: %d\n", va_status); av_frame_free(&p_frame); return -1; } i_surface_index = (i_surface_index + 1) % num_surfaces; for (i = 0; i < va_buffers_num; i++) { if (va_buffers[i] == VA_INVALID_ID) { va_buffers[i] = vaCreateBuffer(va_dpy, va_context, VAEncCodedBufferType, p_frame->pkt_size, 1, NULL, NULL); break; } } if (i == va_buffers_num) { fprintf(stderr, "no free coded buffers available\n"); av_frame_free(&p_frame); return -1; } va_status = vaMapBuffer(va_dpy, va_buffers[i], (void **)&va_buffers[i]); if (va_status != VA_STATUS_SUCCESS) { fprintf(stderr, "vaMapBuffer failed: %d\n", va_status); av_frame_free(&p_frame); return -1; } if (vaBeginPicture(va_dpy, va_context, va_surfaces[i_surface_index]) != VA_STATUS_SUCCESS) { fprintf(stderr, "vaBeginPicture failed\n"); av_frame_free(&p_frame); return -1; } if (vaRenderPicture(va_dpy, va_context, va_buffers, 3) != VA_STATUS_SUCCESS) { fprintf(stderr, "vaRenderPicture failed\n"); av_frame_free(&p_frame); return -1; } if (vaEndPicture(va_dpy, va_context) != VA_STATUS_SUCCESS) { fprintf(stderr, "vaEndPicture failed\n"); av_frame_free(&p_frame); return -1; } av_frame_free(&p_frame); *pi_frame_available = 1; *pp_frame = p_frame; } return 0; } ``` 3. 使用SDL或其他图形库将VAAPI surface上的帧渲染到屏幕上。 完整的代码示例可以参考以下链接:https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/hw_decode.c

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

无v邪

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值