FFMpeg-10、完成两个mp4文件的视频拼接输出yuv及mp4

效果图
在这里插入图片描述

主要是完成解码两个mp4文件将其视频帧进行尺寸转换后进行拼接输出到YUV文件中,
其实后续还可以完成将拼接后的视频帧进行编码输出mp4文件的形式。并且代码进行封装将解码步骤封装成一个类。

完成输出mp4文件,存在诸多问题
1、视频播放很卡顿,不确定是帧匹配的时候锁加多了导致编码的时候太慢了,还是解码原视频帧的时候拼接存在一个等待时间导致的
2、优化声音部分未加入可以进一步优化,
在这里插入图片描述

代码实践;
解码一个类EncodeVideoFrame、创建两个对象就可以解码两个文件了
编码一个类DecodeVideoFrame,就是将拼接好的帧放入队列中,从里面去取然后编码
mian函数中进行视频帧拼接以及开启解码和编码以及帧的存储

EncodeVideoFrame

头文件
#pragma once
#include <iostream>
#include <queue>
#include <mutex>
#include <thread>
using namespace std;
class AVFrame;
class AVFormatContext;
class AVCodecContext;
class AVCodec;
class AVCodecContext;
class EncodeVideoFrame
{
public:
	EncodeVideoFrame();
	~EncodeVideoFrame();
	void startEncode();
	void doEncode();

	string path;
	bool m_bFlagStart = false;
	mutex m_muxVideo1;
	std::thread	 *m_pVideoThread = NULL;
	queue<AVFrame *> m_qAVFrameVideo1;
	queue<uint8_t *> m_qAVFrameBuffer1;
private:
	AVFormatContext *ic = NULL;
	AVCodecContext *vc = NULL;
	AVCodec *acodec = NULL;
	AVCodecContext *ac = NULL;
	int videoStream;
	int audioStream;
};

cpp文件
#include "EncodeVideoFrame.h"
#include<windows.h>
#ifdef __cplusplus
extern "C"
{
#endif // !__cplusplus
#include "libavutil/opt.h"
#include <libavutil/imgutils.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include "libavutil/time.h"
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include "libswresample/swresample.h"//包含头文件
#include <libavutil/imgutils.h>
#ifdef __cplusplus
}
#endif // !__cplusplus

static int XError(int errNum)
{
	char buf[1024] = { 0 };
	av_strerror(errNum, buf, sizeof(buf));//ffmpeg中打印错误信息
	cout << buf << endl;
	getchar();
	return -1;
}

EncodeVideoFrame::EncodeVideoFrame()
{
}


EncodeVideoFrame::~EncodeVideoFrame()
{
}
void LocalVideoThread(void *param)
{
	EncodeVideoFrame *pMan = (EncodeVideoFrame *)param;
	if (pMan)
		pMan->doEncode();
}
void EncodeVideoFrame::doEncode()
{
	AVPacket *pkt = av_packet_alloc();
	AVFrame *frame2 = av_frame_alloc();
	AVFrame *m_pFrameEncodeYUV1 = NULL;
	uint8_t *m_VideoEncodebuffer1 = NULL;
	struct SwsContext *m_pAddVideoConvert_ctx1 = NULL;
	int m_iOutWidth = 1280;
	int m_iOutHeight = 720;
	for (;;)
	{
		int re = av_read_frame(ic, pkt);
		if (re != 0)
		{
			break;
		}
		AVCodecContext *cc = 0;
		if (pkt->stream_index == videoStream)
		{
			cc = vc;
		}
		if (pkt->stream_index == audioStream)
		{
			cc = ac;
		}

		///解码视频
		//发送packet到解码线程  send传NULL后调用多次receive取出所有缓冲帧
		re = avcodec_send_packet(cc, pkt);
		if (re != 0)
		{
			XError(re);
			continue;
		}
		av_packet_unref(pkt);
		long long startTime = av_gettime();
		for (;;)
		{
			//从线程中获取解码接口,一次send可能对应多次receive
			re = avcodec_receive_frame(cc, frame2);
			if (re != 0) {
				break;
			}
			//视频
			if (cc == vc)
			{

				m_pFrameEncodeYUV1 = av_frame_alloc();
				m_VideoEncodebuffer1 = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight));
				avpicture_fill((AVPicture *)m_pFrameEncodeYUV1, m_VideoEncodebuffer1, AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight);
				m_pFrameEncodeYUV1->width = m_iOutWidth;
				m_pFrameEncodeYUV1->height = m_iOutHeight;
				m_pFrameEncodeYUV1->format = AV_PIX_FMT_YUV420P;

				if (!m_pAddVideoConvert_ctx1)
				{
					m_pAddVideoConvert_ctx1 = sws_getContext(frame2->width, frame2->height,
						AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
				}
				sws_scale(m_pAddVideoConvert_ctx1, frame2->data, frame2->linesize, 0, frame2->height, m_pFrameEncodeYUV1->data, m_pFrameEncodeYUV1->linesize);

				//自测
				//int y_size = frame2->width*frame2->height;
				//fwrite(frame2->data[0], 1, y_size, yuv_file2);    //Y   
				//fwrite(frame2->data[1], 1, y_size / 4, yuv_file2);  //U  
				//fwrite(frame2->data[2], 1, y_size / 4, yuv_file2);  //V  
				//Sleep(20);

				m_muxVideo1.lock();
				m_qAVFrameVideo1.push(m_pFrameEncodeYUV1);
				m_qAVFrameBuffer1.push(m_VideoEncodebuffer1);
				m_bFlagStart = true;
				m_muxVideo1.unlock();
				Sleep(200);
			}
			else if (cc == ac)//音频
			{
				//cout << "Audio" << endl;
			}
		}
	}
	cout << "m_qAVFrameVideo end" << endl;
}
void EncodeVideoFrame::startEncode()
{
	AVDictionary *opts = NULL;
	int re = avformat_open_input(&ic, path.c_str(), 0, &opts);
	if (re != 0)
	{
		XError(re);
		return;
	}
	re = avformat_find_stream_info(ic, 0);
	int totalMs = ic->duration / (AV_TIME_BASE / 1000);
	videoStream = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);;
	 audioStream = av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);;

	AVCodec *vcodec = avcodec_find_decoder(ic->streams[videoStream]->codecpar->codec_id);
	if (!vcodec)
	{
		cout << "can't find the codec id " << ic->streams[videoStream]->codecpar->codec_id;
		getchar();
		return;
	}
	cout << "find the AVCodec " << ic->streams[videoStream]->codecpar->codec_id << endl;
	vc = avcodec_alloc_context3(vcodec);
	///配置解码器上下文参数
	avcodec_parameters_to_context(vc, ic->streams[videoStream]->codecpar);
	//八线程解码
	vc->thread_count = 8;
	///打开解码器上下文
	re = avcodec_open2(vc, 0, 0);
	if (re != 0)
	{
		XError(re);
		getchar();
		return;
	}
	///音频解码器打开
	acodec = avcodec_find_decoder(ic->streams[audioStream]->codecpar->codec_id);
	if (!acodec)
	{
		cout << "can't find the codec id " << ic->streams[audioStream]->codecpar->codec_id;
		getchar();
		return;
	}
	cout << "find the AVCodec " << ic->streams[audioStream]->codecpar->codec_id << endl;
	///创建解码器上下文呢
	ac = avcodec_alloc_context3(acodec);

	///配置解码器上下文参数
	avcodec_parameters_to_context(ac, ic->streams[audioStream]->codecpar);
	//八线程解码
	ac->thread_count = 8;

	///打开解码器上下文
	re = avcodec_open2(ac, 0, 0);
	if (re != 0)
	{
		XError(re);
		return;
	}
	cout << "audio avcodec_open2 success!" << endl;

	m_pVideoThread = new std::thread(LocalVideoThread, this);
	
	return;
}

DecodeVideoFrame编码类

头文件
#pragma once
#include <iostream>
#include <queue>
#include <mutex>
#include <thread>
using namespace std;
class AVFrame;
class AVFormatContext;
class AVCodecContext;
class AVCodec;
class AVCodecContext;
class AVStream;
/*
完成视频帧的编码
*/
class DecodeVideoFrame
{
public:
	DecodeVideoFrame();
	~DecodeVideoFrame();
	bool m_bencode_video = false;
	bool m_bencodeEnd_video = false;
	int initVideo();
	void DoDecodeVideo();
	std::thread	 *m_pVideoDecodeThread = NULL;
	mutex m_muxDecodeVideo;
	queue<AVFrame *> m_qAVFrameDecodeVideo;
	queue<uint8_t *> m_qAVFrameDecodeBuffer;
private:
	AVFormatContext	*m_pFormatCtx = NULL;
	AVCodecContext	*m_pVideoCodecCtx = NULL;
	AVCodec			*m_pVideoCodec = NULL;
	
	AVStream		*m_pVideo_st = NULL;
	int64_t			m_first_vid_time1 = -1;
	int64_t			m_first_vid_time2 = -1;
	AVFrame *m_pFrameDecodeYUV2 = NULL;
	uint8_t * m_VideoDecodebuffer = NULL;
	struct SwsContext *m_pAddVideoConvert_ctx1 = NULL;
	int m_iOutWidth = 1280;
	int m_iOutHeight = 720;
	std::string		m_strRecordPath = "111.mp4";
};

cpp文件
#include "DecodeVideoFrame.h"
#include<windows.h>
#ifdef __cplusplus
extern "C"
{
#endif // !__cplusplus
#include "libavutil/opt.h"
#include <libavutil/imgutils.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include "libavutil/time.h"
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include "libswresample/swresample.h"//包含头文件
#include <libavutil/imgutils.h>
#ifdef __cplusplus
}
#endif // !__cplusplus

DecodeVideoFrame::DecodeVideoFrame()
{
}


DecodeVideoFrame::~DecodeVideoFrame()
{
}
void VideoEncodeThread(void *param)
{
	DecodeVideoFrame *pMan = (DecodeVideoFrame *)param;
	if (pMan)
		pMan->DoDecodeVideo();
}
int DecodeVideoFrame::initVideo()
{
	av_register_all();
	avcodec_register_all();
	AVCodecID		m_video_codec_id = AV_CODEC_ID_H264;
	avformat_alloc_output_context2(&m_pFormatCtx, NULL, NULL, m_strRecordPath.c_str());
	m_pVideoCodec = avcodec_find_encoder(m_video_codec_id);
	if (!m_pVideoCodec)
	{
		return false;
	}

	m_pVideoCodecCtx = avcodec_alloc_context3(m_pVideoCodec);
	m_pVideoCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
	m_pVideoCodecCtx->codec_id = m_pVideoCodec->id;
	m_pVideoCodecCtx->width = m_iOutWidth;// 1280;
	m_pVideoCodecCtx->height = m_iOutHeight;// 720;
	m_pVideoCodecCtx->time_base.num = 1;
	m_pVideoCodecCtx->time_base.den = 25;
	m_pVideoCodecCtx->bit_rate = 2000000;
	m_pVideoCodecCtx->gop_size = 60;
	m_pVideoCodecCtx->max_b_frames = 0;
	m_pVideoCodecCtx->b_frame_strategy = 0;

	AVDictionary *param = NULL;
	if (m_video_codec_id == AV_CODEC_ID_H264)
	{
		m_pVideoCodecCtx->qmin = 10;
		m_pVideoCodecCtx->qmax = 51;

		av_dict_set(&param, "preset", "ultrafast", 0);
		av_dict_set(&param, "tune", "zerolatency", 0);
	}

	if (avcodec_open2(m_pVideoCodecCtx, m_pVideoCodec, &param) < 0)
	{
		return false;
	}

	m_pVideo_st = avformat_new_stream(m_pFormatCtx, m_pVideoCodec);
	if (m_pVideo_st == NULL)
	{
		return false;
	}

	avcodec_parameters_from_context(m_pVideo_st->codecpar, m_pVideoCodecCtx);

	m_pVideo_st->time_base.num = 1;
	m_pVideo_st->time_base.den = 25;
	m_pVideo_st->codec = m_pVideoCodecCtx;

	if (avio_open(&m_pFormatCtx->pb, m_strRecordPath.c_str(), AVIO_FLAG_READ_WRITE) < 0)
	{
		return false;
	}
	m_first_vid_time1 = -1;//要进行初始化 否则第二次时间戳会有问题
	m_first_vid_time2 = -1;


	m_pFrameDecodeYUV2 = av_frame_alloc();
	m_VideoDecodebuffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight));
	avpicture_fill((AVPicture *)m_pFrameDecodeYUV2, m_VideoDecodebuffer, AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight);
	m_pFrameDecodeYUV2->width = m_iOutWidth;
	m_pFrameDecodeYUV2->height = m_iOutHeight;
	m_pFrameDecodeYUV2->format = AV_PIX_FMT_YUV420P;

	AVDictionary* opt = NULL;
	if (avformat_write_header(m_pFormatCtx, &opt) < 0)
	{
		return false;
	}

	m_pVideoDecodeThread = new std::thread(VideoEncodeThread, this);
}

void DecodeVideoFrame::DoDecodeVideo()
{
	int ret;
	AVFrame *frameTemp = NULL;
	uint8_t	*Video1bufferTemp = NULL;
	int nCount = 0;
	AVRational time_base_q = { 1, AV_TIME_BASE };
	AVPacket enc_pkt;
	while (1)
	{
		//编码
		m_first_vid_time1 = av_gettime();
		m_muxDecodeVideo.lock();
		if (m_bencodeEnd_video)
		{
			break;
		}
		if (m_bencode_video == false || m_qAVFrameDecodeVideo.size() == 0)
		{
			m_muxDecodeVideo.unlock();
			Sleep(1000);
			continue;
		}
		
		frameTemp = m_qAVFrameDecodeVideo.front();
		Video1bufferTemp = m_qAVFrameDecodeBuffer.front();
		m_qAVFrameDecodeVideo.pop();
		m_qAVFrameDecodeBuffer.pop();
		m_muxDecodeVideo.unlock();

		

		

		if (!m_pAddVideoConvert_ctx1)
		{
			m_pAddVideoConvert_ctx1 = sws_getContext(frameTemp->width, frameTemp->height,
				AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
		}
		sws_scale(m_pAddVideoConvert_ctx1, frameTemp->data, frameTemp->linesize, 0, frameTemp->height, m_pFrameDecodeYUV2->data, m_pFrameDecodeYUV2->linesize);
		av_frame_free(&frameTemp);
		av_free(Video1bufferTemp);

		enc_pkt.data = NULL;
		enc_pkt.size = 0;
		av_init_packet(&enc_pkt);
		int enc_got_frame = 0;
		ret = avcodec_encode_video2(m_pVideoCodecCtx, &enc_pkt, m_pFrameDecodeYUV2, &enc_got_frame);

		if (enc_got_frame == 1)
		{
			if (m_first_vid_time2 == -1)
				m_first_vid_time2 = m_first_vid_time1;

			enc_pkt.stream_index = m_pVideo_st->index;
			enc_pkt.dts = enc_pkt.pts = (int64_t)(m_pVideo_st->time_base.den * (m_first_vid_time1 - m_first_vid_time2) / AV_TIME_BASE);
			
			//m_muxFormat.lock(); 有音频有视频的时候裸流时需要
			ret = av_interleaved_write_frame(m_pFormatCtx, &enc_pkt);
			//m_muxFormat.unlock();
			if (ret < 0)
			{
				av_packet_unref(&enc_pkt);
			}

			av_free_packet(&enc_pkt);
		}
		else
		{
			av_free_packet(&enc_pkt);
		}
		//av_frame_unref(m_pFrameFilterYUV);
		//std::cout << "视频拼接视频" << enc_pkt.pts << endl;
	}
	if (m_pFormatCtx != NULL)
	{
		av_write_trailer(m_pFormatCtx);
	}
	Sleep(200);
	return;
}

main函数

#include <iostream>
#include <mutex>
#include<windows.h>
#ifdef __cplusplus
extern "C"
{
#endif // !__cplusplus
#include "libavutil/opt.h"
#include <libavutil/imgutils.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include "libavutil/time.h"
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include "libswresample/swresample.h"//包含头文件
#include <libavutil/imgutils.h>
#ifdef __cplusplus
}
#endif // !__cplusplus
#pragma comment(lib,"avformat.lib")//添加库文件,也可以在属性处添加
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"swscale.lib")
#include <thread>
#include <queue>
#include "EncodeVideoFrame.h"
#include "DecodeVideoFrame.h"
using namespace std;
#define _CRT_SECURE_NO_WARNINGS;
int XError(int errNum)
{
	char buf[1024] = { 0 };
	av_strerror(errNum, buf, sizeof(buf));//ffmpeg中打印错误信息
	cout << buf << endl;
	getchar();
	return -1;
}
static double r2d(AVRational r)
{
	return r.num == 0 || r.den == 0 ? 0. : (double)r.num / (double)r.den;
}
FILE *yuv_file3 = fopen("D:\\yuv_file13.yuv", "wb");

int main(int argc, char *argv[])
{
	av_register_all();
	avformat_network_init();
	avcodec_register_all();
	DecodeVideoFrame decodeVideo;
	decodeVideo.initVideo();
	//m_pVideo1Thread = new std::thread(LocalVideoThread);
	//m_pVideo2Thread = new std::thread(LocalVideo2Thread);
	EncodeVideoFrame encodeVideo1;
	encodeVideo1.path = "01.mp4";
	encodeVideo1.startEncode();
	Sleep(200);
	EncodeVideoFrame encodeVideo2;
	encodeVideo2.path = "lane.avi";
	encodeVideo2.startEncode();
	//Sleep(200);
	
	int m_iOutWidth = 2560;
	int m_iOutHeight = 720;
	
	uint8_t *tempBuff1 = NULL;
	uint8_t *tempBuff2 = NULL;
	//用这种方法保证队列中都有解码好的视频帧
	cout << "Audio0" << endl;
	for (; ; )
	{
		encodeVideo1.m_muxVideo1.lock();
		if (encodeVideo1.m_bFlagStart == false)
		{
			encodeVideo1.m_muxVideo1.unlock();
			Sleep(200);
			continue;
		}
		encodeVideo1.m_muxVideo1.unlock();
		break;
	}
	cout << "Audio1" << endl;
	for (; ; )
	{
		encodeVideo2.m_muxVideo1.lock();
		if (encodeVideo2.m_bFlagStart == false)
		{
			encodeVideo2.m_muxVideo1.unlock();
			Sleep(200);
			
			continue;
		}
		encodeVideo2.m_muxVideo1.unlock();
		break;
	}
	cout << "Audio2" << endl;
	for (;;)
	{
		cout << "Audio3" << endl;

		Sleep(200);
		encodeVideo1.m_muxVideo1.lock();
		encodeVideo2.m_muxVideo1.lock();
		if (encodeVideo1.m_qAVFrameVideo1.size() == 0 || encodeVideo2.m_qAVFrameVideo1.size() == 0)//视频帧都没了则表示有文件拼接结束了
		{
			encodeVideo2.m_muxVideo1.unlock();
			encodeVideo1.m_muxVideo1.unlock();
			decodeVideo.m_muxDecodeVideo.lock();
			decodeVideo.m_bencodeEnd_video = true;
			decodeVideo.m_muxDecodeVideo.unlock();
			break;
		}

		tempBuff2 = encodeVideo2.m_qAVFrameBuffer1.front();
		AVFrame * frame2 = encodeVideo2.m_qAVFrameVideo1.front();
		encodeVideo2.m_qAVFrameBuffer1.pop();
		encodeVideo2.m_qAVFrameVideo1.pop();
		encodeVideo2.m_muxVideo1.unlock();

		tempBuff1 = encodeVideo1.m_qAVFrameBuffer1.front();
		AVFrame * frame = encodeVideo1.m_qAVFrameVideo1.front();
		encodeVideo1.m_qAVFrameBuffer1.pop();
		encodeVideo1.m_qAVFrameVideo1.pop();
		encodeVideo1.m_muxVideo1.unlock();

		AVFrame *m_pFrameEncodeYUV2 = av_frame_alloc();
		uint8_t * m_VideoEncodebuffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight));
		avpicture_fill((AVPicture *)m_pFrameEncodeYUV2, m_VideoEncodebuffer, AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight);
		m_pFrameEncodeYUV2->width = m_iOutWidth;
		m_pFrameEncodeYUV2->height = m_iOutHeight;
		m_pFrameEncodeYUV2->format = AV_PIX_FMT_YUV420P;

		cout << "Audio4" << endl;
		for (int i = 0; i < m_iOutHeight; i++)
		{
			memcpy(m_pFrameEncodeYUV2->data[0] + (i)*m_iOutWidth, frame->data[0] + i*frame->width, frame->width);
			memcpy(m_pFrameEncodeYUV2->data[0] + (i)*m_iOutWidth + frame->width, frame2->data[0] + i*frame2->width, frame2->width);
			
		}
		int iHeightHlaf = m_iOutHeight / 2;
		int iWidthHlaf = m_iOutWidth / 2;
		int iWidth1Hlaf = frame->width / 2;
		int iWidth2Hlaf = frame2->width / 2;
		for (int i = 0; i < iHeightHlaf; i++)
		{
			memcpy(m_pFrameEncodeYUV2->data[1] + (i)*(iWidthHlaf), frame->data[1] + i*(iWidth1Hlaf), (iWidth1Hlaf));
			memcpy(m_pFrameEncodeYUV2->data[1] + (i)*(iWidthHlaf)+(iWidth1Hlaf), frame2->data[1] + i*(iWidth2Hlaf), (iWidth2Hlaf));

			memcpy(m_pFrameEncodeYUV2->data[2] + (i)*(iWidthHlaf), frame->data[2] + i*(iWidth1Hlaf), (iWidth1Hlaf));
			memcpy(m_pFrameEncodeYUV2->data[2] + (i)*(iWidthHlaf)+(iWidth1Hlaf), frame2->data[2] + i*(iWidth2Hlaf), (iWidth2Hlaf));
		}
		av_frame_free(&frame);
		av_frame_free(&frame2);
		av_free(tempBuff1);
		av_free(tempBuff2);
		cout << "Audio6" << endl;

		decodeVideo.m_muxDecodeVideo.lock();
		decodeVideo.m_qAVFrameDecodeVideo.push(m_pFrameEncodeYUV2);
		decodeVideo.m_qAVFrameDecodeBuffer.push(m_VideoEncodebuffer);
		decodeVideo.m_bencode_video = true;
		decodeVideo.m_muxDecodeVideo.unlock();
		//int y_size = m_pFrameEncodeYUV2->width*m_pFrameEncodeYUV2->height;
		//fwrite(m_pFrameEncodeYUV2->data[0], 1, y_size, yuv_file3);    //Y   
		//fwrite(m_pFrameEncodeYUV2->data[1], 1, y_size / 4, yuv_file3);  //U  
		//fwrite(m_pFrameEncodeYUV2->data[2], 1, y_size / 4, yuv_file3);  //V 	
	}
	//fclose(yuv_file3);
	//m_pVideoThread = new std::thread(VideoReadThread);

	system("pause");
	return 0;
}

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值