1 avcodec_find_decoder 查找解码器
使用前先调用avcodec_register_all();
/**
* Register all the codecs, parsers and bitstream filters which were enabled at
* configuration time. If you do not call this function you can select exactly
* which formats you want to support, by using the individual registration
* functions.
*
* @see avcodec_register
* @see av_register_codec_parser
* @see av_register_bitstream_filter
*/
void avcodec_register_all(void);
/**
* Find a registered decoder with a matching codec ID.
*
* @param id AVCodecID of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
AVCodec *avcodec_find_decoder(enum AVCodecID id);
/**
* Find a registered decoder with the specified name.
*
* @param name name of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
AVCodec *avcodec_find_decoder_by_name(const char *name);
2 AVCodecContext 上下文
/**
* main external API structure.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* You can use AVOptions (av_opt* / av_set/get*()) to access these fields from user
* applications.
* The name string for AVOptions options matches the associated command line
* parameter name and can be found in libavcodec/options_table.h
* The AVOption/command line parameter names differ in some cases from the C
* structure field names for historic reasons or brevity.
* sizeof(AVCodecContext) must not be used outside libav*.
*/
typedef struct AVCodecContext {
........
/**
* thread count
* is used to decide how many independent tasks should be passed to execute()
* - encoding: Set by user.
* - decoding: Set by user.
*/
int thread_count;
/**
* This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identically 1.
* This often, but not always is the inverse of the frame rate or field rate
* for video. 1/time_base is not the average frame rate if the frame rate is not
* constant.
*
* Like containers, elementary streams also can store timestamps, 1/time_base
* is the unit in which these timestamps are specified.
* As example of such codec time base see ISO/IEC 14496-2:2001(E)
* vop_time_increment_resolution and fixed_vop_rate
* (fixed_vop_rate == 0 implies that it is different from the framerate)
*
* - encoding: MUST be set by user.
* - decoding: the use of this field for decoding is deprecated.
* Use framerate instead.
*/
AVRational time_base;
........
}
/**
* Allocate an AVCodecContext and set its fields to default values. The
* resulting struct should be freed with avcodec_free_context().
*
* @param codec if non-NULL, allocate private data and initialize defaults
* for the given codec. It is illegal to then call avcodec_open2()
* with a different codec.
* If NULL, then the codec-specific defaults won't be initialized,
* which may result in suboptimal default settings (this is
* important mainly for encoders, e.g. libx264).
*
* @return An AVCodecContext filled with default values or NULL on failure.
*/
AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
/**
* Free the codec context and everything associated with it and write NULL to
* the provided pointer.
*/
void avcodec_free_context(AVCodecContext **avctx);
/**
* Initialize the AVCodecContext to use the given AVCodec. Prior to using this
* function the context has to be allocated with avcodec_alloc_context3().
*
* The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
* avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
* retrieving a codec.
*
* @warning This function is not thread safe!
*
* @note Always call this function before using decoding routines (such as
* @ref avcodec_receive_frame()).
*
* @code
* avcodec_register_all();
* av_dict_set(&opts, "b", "2.5M", 0);
* codec = avcodec_find_decoder(AV_CODEC_ID_H264);
* if (!codec)
* exit(1);
*
* context = avcodec_alloc_context3(codec);
*
* if (avcodec_open2(context, codec, opts) < 0)
* exit(1);
* @endcode
*
* @param avctx The context to initialize.
* @param codec The codec to open this context for. If a non-NULL codec has been
* previously passed to avcodec_alloc_context3() or
* for this context, then this parameter MUST be either NULL or
* equal to the previously passed codec.
* @param options A dictionary filled with AVCodecContext and codec-private options.
* On return this object will be filled with options that were not found.
*
* @return zero on success, a negative value on error
* @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
* av_dict_set(), av_opt_find().
*/
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
通过解封装器赋值
/**
* Fill the codec context based on the values from the supplied codec
* parameters. Any allocated fields in codec that have a corresponding field in
* par are freed and replaced with duplicates of the corresponding field in par.
* Fields in codec that do not have a counterpart in par are not touched.
*
* @return >= 0 on success, a negative AVERROR code on failure.
*/
int avcodec_parameters_to_context(AVCodecContext *codec,
const AVCodecParameters *par);
3 AVFrame 存放解码后的数据
AVPacket: 存储解码前数据(编码数据:H264/AAC等)
AVFrame: 存储解码后数据(像素数据:YUV/RGB/PCM等)存储原始数据(即非压缩数据,视频:YUV,RGB;音频:PCM)
AVFrame的复制是毫秒级别的,一般用引用去做;
/**
* Allocate an AVFrame and set its fields to default values. The resulting
* struct must be freed using av_frame_free().
*
* @return An AVFrame filled with default values or NULL on failure.
*
* @note this only allocates the AVFrame itself, not the data buffers. Those
* must be allocated through other means, e.g. with av_frame_get_buffer() or
* manually.
*/
AVFrame *av_frame_alloc(void);
/**
* Free the frame and any dynamically allocated objects in it,
* e.g. extended_data. If the frame is reference counted, it will be
* unreferenced first.
*
* @param frame frame to be freed. The pointer will be set to NULL.
*/
void av_frame_free(AVFrame **frame);
/**
* Set up a new reference to the data described by the source frame.
*
* Copy frame properties from src to dst and create a new reference for each
* AVBufferRef from src.
*
* If src is not reference counted, new buffers are allocated and the data is
* copied.
*
* @warning: dst MUST have been either unreferenced with av_frame_unref(dst),
* or newly allocated with av_frame_alloc() before calling this
* function, or undefined behavior will occur.
*
* @return 0 on success, a negative AVERROR on error
*/
int av_frame_ref(AVFrame *dst, const AVFrame *src);
/**
* Create a new frame that references the same data as src.
*
* This is a shortcut for av_frame_alloc()+av_frame_ref().
*
* @return newly created AVFrame on success, NULL on error.
*/
AVFrame *av_frame_clone(const AVFrame *src);
/**
* Unreference all the buffers referenced by frame and reset the frame fields.
*/
void av_frame_unref(AVFrame *frame);
AVFrame中的内容:
typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8
/**
* pointer to the picture/channel planes.
* This might be different from the first allocated byte
*
* Some decoders access areas outside 0,0 - width,height, please
* see avcodec_align_dimensions2(). Some filters and swscale can read
* up to 16 bytes beyond the planes, if these filters are to be used,
* then 16 extra bytes must be allocated.
*
* NOTE: Except for hwaccel formats, pointers not needed by the format
* MUST be set to NULL.
*/
uint8_t *data[AV_NUM_DATA_POINTERS];
/**
* For video, size in bytes of each picture line.如果是视频,一行数据的大小
* For audio, size in bytes of each plane.如果是音频,一个通道数据的大小
*
* For audio, only linesize[0] may be set. For planar audio, each channel
* plane must be the same size.
*
* For video the linesizes should be multiples of the CPUs alignment
* preference, this is 16 or 32 for modern desktop CPUs.
* Some code requires such alignment other code can be slower without
* correct alignment, for yet other it makes no difference.
*
* @note The linesize may be larger than the size of usable data -- there
* may be extra padding present for performance reasons.
*/
int linesize[AV_NUM_DATA_POINTERS];
/**
* @name Video dimensions
* Video frames only. The coded dimensions (in pixels) of the video frame,
* i.e. the size of the rectangle that contains some well-defined values.
*
* @note The part of the frame intended for display/presentation is further
* restricted by the @ref cropping "Cropping rectangle".
* @{
*/
int width, height;
/**
* @}
*/
/**
* number of audio samples (per channel) described by this frame
*/
int nb_samples;
/**
* Presentation timestamp in time_base units (time when frame should be shown to user).
*/
int64_t pts;
/**
* DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
* This is also the Presentation time of this AVFrame calculated from
* only AVPacket.dts values without pts values.
*/
int64_t pkt_dts;
/**
* Sample rate of the audio data.
*/
int sample_rate;
/**
* Channel layout of the audio data.
*/
uint64_t channel_layout;
/**
* format of the frame, -1 if unknown or unset
* Values correspond to enum AVPixelFormat for video frames,
* enum AVSampleFormat for audio)
*/
int format;
linesize:
linesize的作用:字节对齐,通过宽高算出了的yuv大小可能比实际的yuv的大一点
解码
解码放在后台线程中
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt); 将AVPacket发送到后台线程中的解码队列中,会对AVPacket的引用加1,因此调用该接口后可以对avpkt进行清理。如果传递的是引用,会拷贝一份,存在开销;
avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);从中取出一帧,可能收到上次的解码;在解码的过程中,前几帧可能会失败,因为线程中做了缓冲。并不是发一次收一次。receiver是从队列中收,队列中可能存在已经解码好的好几帧。
/**
* Supply raw packet data as input to a decoder.
*
* Internally, this call will copy relevant AVCodecContext fields, which can
* influence decoding per-packet, and apply them when the packet is actually
* decoded. (For example AVCodecContext.skip_frame, which might direct the
* decoder to drop the frame contained by the packet sent with this function.)
*
* @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE
* larger than the actual read bytes because some optimized bitstream
* readers read 32 or 64 bits at once and could read over the end.
*
* @warning Do not mix this API with the legacy API (like avcodec_decode_video2())
* on the same AVCodecContext. It will return unexpected results now
* or in future libavcodec versions.
*
* @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
* before packets may be fed to the decoder.
*
* @param avctx codec context
* @param[in] avpkt The input AVPacket. Usually, this will be a single video
* frame, or several complete audio frames.
* Ownership of the packet remains with the caller, and the
* decoder will not write to the packet. The decoder may create
* a reference to the packet data (or copy it if the packet is
* not reference-counted).
* Unlike with older APIs, the packet is always fully consumed,
* and if it contains multiple frames (e.g. some audio codecs),
* will require you to call avcodec_receive_frame() multiple
* times afterwards before you can send a new packet.
* It can be NULL (or an AVPacket with data set to NULL and
* size set to 0); in this case, it is considered a flush
* packet, which signals the end of the stream. Sending the
* first flush packet will return success. Subsequent ones are
* unnecessary and will return AVERROR_EOF. If the decoder
* still has frames buffered, it will return them after sending
* a flush packet.
*
* @return 0 on success, otherwise negative error code:
* AVERROR(EAGAIN): input is not accepted in the current state - user
* must read output with avcodec_receive_frame() (once
* all output is read, the packet should be resent, and
* the call will not fail with EAGAIN).
* AVERROR_EOF: the decoder has been flushed, and no new packets can
* be sent to it (also returned if more than 1 flush
* packet is sent)
* AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush
* AVERROR(ENOMEM): failed to add packet to internal queue, or similar
* other errors: legitimate decoding errors
*/
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
/**
* Return decoded output data from a decoder.
*
* @param avctx codec context
* @param frame This will be set to a reference-counted video or audio
* frame (depending on the decoder type) allocated by the
* decoder. Note that the function will always call
* av_frame_unref(frame) before doing anything else.
*
* @return
* 0: success, a frame was returned
* AVERROR(EAGAIN): output is not available in this state - user must try
* to send new input
* AVERROR_EOF: the decoder has been fully flushed, and there will be
* no more output frames
* AVERROR(EINVAL): codec not opened, or it is an encoder
* other negative values: legitimate decoding errors
*/
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);
视频像素和尺寸转换
优势:接口简单;缺点:开销大
建议硬解码
/**
* Allocate and return an SwsContext. You need it to perform
* scaling/conversion operations using sws_scale().
*
* @param srcW the width of the source image
* @param srcH the height of the source image
* @param srcFormat the source image format
* @param dstW the width of the destination image
* @param dstH the height of the destination image
* @param dstFormat the destination image format
* @param flags specify which algorithm and options to use for rescaling
* @param param extra parameters to tune the used scaler
* For SWS_BICUBIC param[0] and [1] tune the shape of the basis
* function, param[0] tunes f(1) and param[1] f´(1)
* For SWS_GAUSS param[0] tunes the exponent and thus cutoff
* frequency
* For SWS_LANCZOS param[0] tunes the width of the window function
* @return a pointer to an allocated context, or NULL in case of error
* @note this function is to be removed after a saner alternative is
* written
*/
struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
//一路视频建议使用,如果context可以用,直接返回,如果不可以用,清理context,重新指定;
//第一个可以传入null,返回一个context,以后就可以用context传入
/**
* Check if context can be reused, otherwise reallocate a new one.
*
* If context is NULL, just calls sws_getContext() to get a new
* context. Otherwise, checks if the parameters are the ones already
* saved in context. If that is the case, returns the current
* context. Otherwise, frees context and gets a new context with
* the new parameters.
*
* Be warned that srcFilter and dstFilter are not checked, they
* are assumed to remain the same.
*/
struct SwsContext *sws_getCachedContext(struct SwsContext *context,
int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
/**
* Scale the image slice in srcSlice and put the resulting scaled
* slice in the image in dst. A slice is a sequence of consecutive
* rows in an image.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param c the scaling context previously created with
* sws_getContext() 输入数组
* @param srcSlice the array containing the pointers to the planes of
* the source slice 数组长度
* @param srcStride the array containing the strides for each plane of
* the source image
* @param srcSliceY the position in the source image of the slice to
* process, that is the number (counted starting from
* zero) in the image of the first row of the slice
* @param srcSliceH the height of the source slice, that is the number
* of rows in the slice 图像高度
* @param dst the array containing the pointers to the planes of
* the destination image
* @param dstStride the array containing the strides for each plane of
* the destination image
* @return the height of the output slice
*/
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *const dst[], const int dstStride[]);
//注意struct SwsContext *swsContext 是指针,不是指针的指针,使用后最好手动置0
/**
* Free the swscaler context swsContext.
* If swsContext is NULL, then does nothing.
*/
void sws_freeContext(struct SwsContext *swsContext);
#include <iostream>
#include <thread>
extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
}
using namespace std;
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"swscale.lib")
static double r2d(AVRational r)
{
return r.den == 0 ? 0 : (double)r.num / (double)r.den;
}
void XSleep(int ms)
{
//c++ 11
chrono::milliseconds du(ms);
this_thread::sleep_for(du);
}
int main(int argc, char *argv[])
{
cout << "Test Demux FFmpeg.club" << endl;
const char *path = "v1080.mp4";
//初始化封装库
av_register_all();
//初始化网络库 (可以打开rtsp rtmp http 协议的流媒体视频)
avformat_network_init();
//注册解码器
avcodec_register_all();
//参数设置
AVDictionary *opts = NULL;
//设置rtsp流已tcp协议打开
av_dict_set(&opts, "rtsp_transport", "tcp", 0);
//网络延时时间
av_dict_set(&opts, "max_delay", "500", 0);
//解封装上下文
AVFormatContext *ic = NULL;
int re = avformat_open_input(
&ic,
path,
0, // 0表示自动选择解封器
&opts //参数设置,比如rtsp的延时时间
);
if (re != 0)
{
char buf[1024] = { 0 };
av_strerror(re, buf, sizeof(buf) - 1);
cout << "open " << path << " failed! :" << buf << endl;
getchar();
return -1;
}
cout << "open " << path << " success! " << endl;
//获取流信息
re = avformat_find_stream_info(ic, 0);
//总时长 毫秒
int totalMs = ic->duration / (AV_TIME_BASE / 1000);
cout << "totalMs = " << totalMs << endl;
//打印视频流详细信息
av_dump_format(ic, 0, path, 0);
//音视频索引,读取时区分音视频
int videoStream = 0;
int audioStream = 1;
//获取音视频流信息 (遍历,函数获取)
for (int i = 0; i < ic->nb_streams; i++)
{
AVStream *as = ic->streams[i];
cout << "codec_id = " << as->codecpar->codec_id << endl;
cout << "format = " << as->codecpar->format << endl;
//音频 AVMEDIA_TYPE_AUDIO
if (as->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioStream = i;
cout << i << "音频信息" << endl;
cout << "sample_rate = " << as->codecpar->sample_rate << endl;
//AVSampleFormat;
cout << "channels = " << as->codecpar->channels << endl;
//一帧数据?? 单通道样本数
cout << "frame_size = " << as->codecpar->frame_size << endl;
//1024 * 2 * 2 = 4096 fps = sample_rate/frame_size
}
//视频 AVMEDIA_TYPE_VIDEO
else if (as->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStream = i;
cout << i << "视频信息" << endl;
cout << "width=" << as->codecpar->width << endl;
cout << "height=" << as->codecpar->height << endl;
//帧率 fps 分数转换
cout << "video fps = " << r2d(as->avg_frame_rate) << endl;
}
}
//获取视频流
videoStream = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
//
///视频解码器打开
///找到视频解码器
AVCodec *vcodec = avcodec_find_decoder(ic->streams[videoStream]->codecpar->codec_id);
if (!vcodec)
{
cout << "can't find the codec id " << ic->streams[videoStream]->codecpar->codec_id;
getchar();
return -1;
}
cout << "find the AVCodec " << ic->streams[videoStream]->codecpar->codec_id << endl;
AVCodecContext *vc = avcodec_alloc_context3(vcodec);
///配置解码器上下文参数
avcodec_parameters_to_context(vc, ic->streams[videoStream]->codecpar);
//八线程解码
vc->thread_count = 8;
///打开解码器上下文
re = avcodec_open2(vc, 0, 0);
if (re != 0)
{
char buf[1024] = { 0 };
av_strerror(re, buf, sizeof(buf) - 1);
cout << "avcodec_open2 failed! :" << buf << endl;
getchar();
return -1;
}
cout << "video avcodec_open2 success!" << endl;
//
///音频解码器打开
AVCodec *acodec = avcodec_find_decoder(ic->streams[audioStream]->codecpar->codec_id);
if (!acodec)
{
cout << "can't find the codec id " << ic->streams[audioStream]->codecpar->codec_id;
getchar();
return -1;
}
cout << "find the AVCodec " << ic->streams[audioStream]->codecpar->codec_id << endl;
///创建解码器上下文呢
AVCodecContext *ac = avcodec_alloc_context3(acodec);
///配置解码器上下文参数
avcodec_parameters_to_context(ac, ic->streams[audioStream]->codecpar);
//八线程解码
ac->thread_count = 8;
///打开解码器上下文
re = avcodec_open2(ac, 0, 0);
if (re != 0)
{
char buf[1024] = { 0 };
av_strerror(re, buf, sizeof(buf) - 1);
cout << "avcodec_open2 failed! :" << buf << endl;
getchar();
return -1;
}
cout << "audio avcodec_open2 success!" << endl;
///ic->streams[videoStream]
//malloc AVPacket并初始化
AVPacket *pkt = av_packet_alloc();
AVFrame *frame = av_frame_alloc();
//像素格式和尺寸转换上下文
SwsContext *vctx = NULL;
unsigned char *rgb = NULL;
for (;;)
{
int re = av_read_frame(ic, pkt);
if (re != 0)
{
//循环播放
cout << "==============================end==============================" << endl;
int ms = 3000; //三秒位置 根据时间基数(分数)转换
long long pos = (double)ms / (double)1000 * r2d(ic->streams[pkt->stream_index]->time_base);
av_seek_frame(ic, videoStream, pos, AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_FRAME);
continue;
}
cout << "pkt->size = " << pkt->size << endl;
//显示的时间
cout << "pkt->pts = " << pkt->pts << endl;
//转换为毫秒,方便做同步
cout << "pkt->pts ms = " << pkt->pts * (r2d(ic->streams[pkt->stream_index]->time_base) * 1000) << endl;
//解码时间
cout << "pkt->dts = " << pkt->dts << endl;
AVCodecContext *cc = 0;
if (pkt->stream_index == videoStream)
{
cout << "图像" << endl;
cc = vc;
}
if (pkt->stream_index == audioStream)
{
cout << "音频" << endl;
cc = ac;
}
///解码视频
//发送packet到解码线程 send传NULL后调用多次receive取出所有缓冲帧
re = avcodec_send_packet(cc, pkt);
//释放,引用计数-1 为0释放空间
av_packet_unref(pkt);
if (re != 0)
{
char buf[1024] = { 0 };
av_strerror(re, buf, sizeof(buf) - 1);
cout << "avcodec_send_packet failed! :" << buf << endl;
continue;
}
for (;;)
{
//从线程中获取解码接口,一次send可能对应多次receive
re = avcodec_receive_frame(cc, frame);
if (re != 0) break;
cout << "recv frame " << frame->format << " " << frame->linesize[0] << endl;
//视频
if (cc == vc)
{
vctx = sws_getCachedContext(
vctx, //传NULL会新创建
frame->width, frame->height, //输入的宽高
(AVPixelFormat)frame->format, //输入格式 YUV420p
frame->width, frame->height, //输出的宽高
AV_PIX_FMT_RGBA, //输入格式RGBA
SWS_BILINEAR, //尺寸变化的算法
0, 0, 0);
//if(vctx)
//cout << "像素格式尺寸转换上下文创建或者获取成功!" << endl;
//else
// cout << "像素格式尺寸转换上下文创建或者获取失败!" << endl;
if (vctx)
{
if (!rgb) rgb = new unsigned char[frame->width*frame->height * 4];//AV_PIX_FMT_RGBA 4字节 输出rgba的数据
uint8_t *data[2] = { 0 };// data[0]存放rgba 数据,data[1]=null表示数据结束
data[0] = rgb;
int lines[2] = { 0 };//图像每行数据的大小
lines[0] = frame->width * 4;
re = sws_scale(vctx,
frame->data, //输入数据
frame->linesize, //输入行大小
0,
frame->height, //输入高度
data, //输出数据和大小
lines
);
cout << "sws_scale = " << re << endl;
}
}
}
//XSleep(500);
}
av_frame_free(&frame);
av_packet_free(&pkt);
if (ic)
{
//释放封装上下文,并且把ic置0
avformat_close_input(&ic);
}
getchar();
return 0;
}
音频重采样
https://blog.csdn.net/LIJIWEI0611/article/details/111500850
AAC 使用 AV_SAMPLE_FMT_FLTP 使用32位 四字节 fLOAT,声卡是不支持的,需要转成16位
音频重采样如果改变采样率,很难保证正确性
资源:https://download.csdn.net/download/LIJIWEI0611/18350915