我们知道,MediaPlayerInterface接口是Android框架中承上启下的关键接口,Android下面几个播放器都是冲这个接口派生过来的,前面在写flac的时候已经基本看了一些关于OGG player的相关东西,但是那个只是音频,还没有涉及到视频,下面简单的介绍一下其中最复杂的PVPlayer
{
public:
PVPlayer();
virtual ~PVPlayer();
virtual status_t initCheck(); //1
virtual status_t setDataSource(const char *url);//2
virtual status_t setDataSource(int fd, int64_t offset, int64_t length);//2
virtual status_t setVideoSurface(const sp<ISurface>& surface);//3
virtual status_t prepare();//4
virtual status_t prepareAsync();//5
virtual status_t start();//5
virtual status_t stop();//6
virtual status_t pause();//6
virtual bool isPlaying();
virtual status_t seekTo(int msec);
virtual status_t getCurrentPosition(int *msec);
virtual status_t getDuration(int *msec);
virtual status_t reset();
virtual status_t setLooping(int loop);
virtual player_type playerType() { return PV_PLAYER; }
// make available to PlayerDriver
void sendEvent(int msg, int ext1=0, int ext2=0) { MediaPlayerBase::sendEvent(msg, ext1, ext2); }
private:
static void do_nothing(status_t s, void *cookie, bool cancelled) { }
static void run_init(status_t s, void *cookie, bool cancelled);
static void run_set_video_surface(status_t s, void *cookie, bool cancelled);
static void run_set_audio_output(status_t s, void *cookie, bool cancelled);
static void run_prepare(status_t s, void *cookie, bool cancelled);
PlayerDriver* mPlayerDriver;
char * mDataSourcePath;
bool mIsDataSourceSet;
sp<ISurface> mSurface;
int mSharedFd;
status_t mInit;
int mDuration;
#ifdef MAX_OPENCORE_INSTANCES
static volatile int32_t sNumInstances;
#endif
};
1、 virtual status_t initCheck(); //这个函数会在我们创建播放器的时候调用,可以做一些基本的初始化工作,如下所示:
static sp<MediaPlayerBase> createPlayer(player_type playerType, void* cookie,
notify_callback_f notifyFunc)
{
sp<MediaPlayerBase> p;
switch (playerType) {
case PV_PLAYER:
LOGV(" create PVPlayer");
p = new PVPlayer();
break;
case SONIVOX_PLAYER:
LOGV(" create MidiFile");
p = new MidiFile();
break;
case VORBIS_PLAYER:
LOGV(" create VorbisPlayer");
p = new VorbisPlayer();
break;
}
if (p != NULL) {
if (p->initCheck() == NO_ERROR) {
p->setNotifyCallback(cookie, notifyFunc);
} else {
p.clear();
}
}
if (p == NULL) {
LOGE("Failed to create player object");
}
return p;
}
发现还有setNotifyCallback函数也会一起调用。
2、 两种设置源的方式 setDataSource,一个是打开一个文件重头开始,一个事从文件的中间开始。
status_t MediaPlayerService::Client::setDataSource(const char *url)
{
LOGV("setDataSource(%s)", url);
if (url == NULL)
return UNKNOWN_ERROR;
if (strncmp(url, "content://", 10) == 0) {
// get a filedescriptor for the content Uri and
// pass it to the setDataSource(fd) method
String16 url16(url);
int fd = Android::openContentProviderFile(url16);//由content得到文件的句柄
if (fd < 0)
{
LOGE("Couldn't open fd for %s", url);
return UNKNOWN_ERROR;
}
setDataSource(fd, 0, 0x7fffffffffLL); // this sets mStatus
close(fd);
return mStatus;
} else {
player_type playerType = getPlayerType(url);
LOGV("player type = %d", playerType);
// create the right type of player
sp<MediaPlayerBase> p = createPlayer(playerType);
if (p == NULL) return NO_INIT;
if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput();
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
// now set data source
LOGV(" setDataSource");
mStatus = p->setDataSource(url);
if (mStatus == NO_ERROR) mPlayer = p;
return mStatus;
}
}
status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64_t length)
{
LOGV("setDataSource fd=%d, offset=%lld, length=%lld", fd, offset, length);
struct stat sb;
int ret = fstat(fd, &sb);
if (ret != 0) {
LOGE("fstat(%d) failed: %d, %s", fd, ret, strerror(errno));
return UNKNOWN_ERROR;
}
LOGV("st_dev = %llu", sb.st_dev);
LOGV("st_mode = %u", sb.st_mode);
LOGV("st_uid = %lu", sb.st_uid);
LOGV("st_gid = %lu", sb.st_gid);
LOGV("st_size = %llu", sb.st_size);
if (offset >= sb.st_size) {
LOGE("offset error");
::close(fd);
return UNKNOWN_ERROR;
}
if (offset + length > sb.st_size) {
length = sb.st_size - offset;
LOGV("calculated length = %lld", length);
}
player_type playerType = getPlayerType(fd, offset, length);
LOGV("player type = %d", playerType);
// create the right type of player
sp<MediaPlayerBase> p = createPlayer(playerType);
if (p == NULL) return NO_INIT;
if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput();
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
// now set data source
mStatus = p->setDataSource(fd, offset, length);
if (mStatus == NO_ERROR) mPlayer = p;
return mStatus;
}
3 、
status_t MediaPlayerService::Client::setVideoSurface(const sp<ISurface>& surface)
{
LOGV("[%d] setVideoSurface(%p)", mConnId, surface.get());
sp<MediaPlayerBase> p = getPlayer();
return p->setVideoSurface(surface);
}
由此可见,其实框架把显示的功能并没有替你做下来,因为这个地方很多p->setVideoSurface(surface);返回值都是空的,这个函数只是给你一个接口,把上层的一个 sp<ISurface>给你,至于你在他上面画什么东西,是你的事情,如果你调用这个函数就有,不调用就没有,很明白简单。所以说,显示的工作还得自己来做。
4、5、 都是设置了源,但是在播放前的一些准备工作,一个是同步,一个是异步。
sp<IMemory> MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* 这个工程有这样几步,准备,然后等待准备完成,然后开始,然后等待start完成,完成之后就可以得到解码后的数据。上面的几个接口函数基本上是没有什么东西的,下面我们具体来看这个PVPlayer怎么实现的。主要看他的几个私有函数:
static void do_nothing(status_t s, void *cookie, bool cancelled) { }
static void run_init(status_t s, void *cookie, bool cancelled);
static void run_set_video_surface(status_t s, void *cookie, bool cancelled);
static void run_set_audio_output(status_t s, void *cookie, bool cancelled);
static void run_prepare(status_t s, void *cookie, bool cancelled);
和几个私有成员;
PlayerDriver* mPlayerDriver; //整个pv的播放引擎
char * mDataSourcePath;//数据源
bool mIsDataSourceSet;//一个数据源的标识符
sp<ISurface> mSurface;//显示面
int mSharedFd; //这个估计是文件句柄
status_t mInit; //一个状态标志
int mDuration; //文件播放长度
我们来看实现:
// ----------------------------------------------------------------------------
// implement the Packet Video player
// ----------------------------------------------------------------------------
PVPlayer::PVPlayer()
{
LOGV("PVPlayer constructor");
mDataSourcePath = NULL;
mSharedFd = -1;
mIsDataSourceSet = false;
mDuration = -1;
mPlayerDriver = NULL;
LOGV("construct PlayerDriver");
mPlayerDriver = new PlayerDriver(this);
LOGV("send PLAYER_SETUP");
mInit = mPlayerDriver->enqueueCommand(new PlayerSetup(0,0));//给他一个初始化的命令放在队列里面
}
status_t PVPlayer::initCheck()
{
return mInit;
}没有什么工作,初始化工作其实也可以放在这里的。
PVPlayer::~PVPlayer()
{
LOGV("PVPlayer destructor");
if (mPlayerDriver != NULL) {
PlayerQuit quit = PlayerQuit(0,0);//发送一个退出的命令
mPlayerDriver->enqueueCommand(&quit); // will wait on mSyncSem, signaled by player thread
}
free(mDataSourcePath); //如果文件句柄存在的话,就关闭
if (mSharedFd >= 0) {
close(mSharedFd);
}
}
status_t PVPlayer::setDataSource(const char *url)
{
LOGV("setDataSource(%s)", url);
if (mSharedFd >= 0) {
close(mSharedFd);
mSharedFd = -1;
}
free(mDataSourcePath);
mDataSourcePath = NULL;
// Don't let somebody trick us in to reading some random block of memory
if (strncmp("sharedfd://", url, 11) == 0)
return Android::UNKNOWN_ERROR;
mDataSourcePath = strdup(url);
return OK;
} //这个函数只是改变了一下mDataSourcePath这个东东,但是我们的opencore里面如何知道的呢?
status_t PVPlayer::setDataSource(int fd, int64_t offset, int64_t length) {
// This is all a big hack to allow PV to play from a file descriptor.
// Eventually we'll fix PV to use a file descriptor directly instead
// of using mmap().
LOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
if (mSharedFd >= 0) {
close(mSharedFd);
mSharedFd = -1;
}
free(mDataSourcePath);
mDataSourcePath = NULL;
char buf[80];
mSharedFd = dup(fd);
sprintf(buf, "sharedfd://%d:%lld:%lld", mSharedFd, offset, length);
mDataSourcePath = strdup(buf);
return OK;
}
然后是:
status_t PVPlayer::setVideoSurface(const sp<ISurface>& surface)
{
LOGV("setVideoSurface(%p)", surface.get());
mSurface = surface;
return OK;
}
然后是prepare,这个函数如果你在setsource里面没有做什么事情的话,这个里面就开始忙了
status_t PVPlayer::prepare()
{
status_t ret;
// We need to differentiate the two valid use cases for prepare():
// 1. new PVPlayer/reset()->setDataSource()->prepare()
// 2. new PVPlayer/reset()->setDataSource()->prepare()/prepareAsync()
// ->start()->...->stop()->prepare()
// If data source has already been set previously, no need to run
// a sequence of commands and only the PLAYER_PREPARE command needs
// to be run.
if (!mIsDataSourceSet) {//首先看我们的源设置了没有,需不需要重新设置
// set data source
LOGV("prepare");
LOGV(" data source = %s", mDataSourcePath);
ret = mPlayerDriver->enqueueCommand(new PlayerSetDataSource(mDataSourcePath,0,0));//如果需要,首先发送设置源的命令
if (ret != OK)
return ret;
// init然后是初始化的命令
LOGV(" init");
ret = mPlayerDriver->enqueueCommand(new PlayerInit(0,0));
if (ret != OK)
return ret;
// set video surface, if there is one然后设置显示面
if (mSurface != NULL) {
LOGV(" set video surface");
ret = mPlayerDriver->enqueueCommand(new PlayerSetVideoSurface(mSurface,0,0));
if (ret != OK)
return ret;
}
// set audio output然后设置音频
// If we ever need to expose selectable audio output setup, this can be broken
// out. In the meantime, however, system audio routing APIs should suffice.
LOGV(" set audio sink");
ret = mPlayerDriver->enqueueCommand(new PlayerSetAudioSink(mAudioSink,0,0));
if (ret != OK)
return ret;
// New data source has been set successfully.
mIsDataSourceSet = true;
}
// prepare 一些列的搞好了之后,才发送准备命令
LOGV(" prepare");
return mPlayerDriver->enqueueCommand(new PlayerPrepare(0,0));
}
如果是异步的话 就涉及到回调
status_t PVPlayer::prepareAsync()
{
LOGV("prepareAsync");
status_t ret = OK;
if (!mIsDataSourceSet) { // If data source has NOT been set.
// Set our data source as cached in setDataSource() above.
LOGV(" data source = %s", mDataSourcePath);
ret = mPlayerDriver->enqueueCommand(new PlayerSetDataSource(mDataSourcePath,run_init,this));
//这里设置了一个回调函数run_init,表明我们的setdatasource完成后会干哈,
mIsDataSourceSet = true;
} else { // If data source has been already set.
// No need to run a sequence of commands.
// The only command needed to run is PLAYER_PREPARE.
ret = mPlayerDriver->enqueueCommand(new PlayerPrepare(do_nothing, NULL));
}
return ret;
}
初始化的函数
void PVPlayer::run_init(status_t s, void *cookie, bool cancelled)
{
LOGV("run_init s=%d, cancelled=%d", s, cancelled);
if (s == NO_ERROR && !cancelled) {
PVPlayer *p = (PVPlayer*)cookie;
p->mPlayerDriver->enqueueCommand(new PlayerInit(run_set_video_surface, cookie));
}//这里发现初始化完成之后还有下步run_set_video_surface
}
void PVPlayer::run_set_video_surface(status_t s, void *cookie, bool cancelled)
{
LOGV("run_set_video_surface s=%d, cancelled=%d", s, cancelled);
if (s == NO_ERROR && !cancelled) {
// If we don't have a video surface, just skip to the next step.
PVPlayer *p = (PVPlayer*)cookie;
if (p->mSurface == NULL) {
run_set_audio_output(s, cookie, false);
} else {
p->mPlayerDriver->enqueueCommand(new PlayerSetVideoSurface(p->mSurface, run_set_audio_output, cookie));
//设置视频之后还要run_set_audio_output
}
}
}
void PVPlayer::run_set_audio_output(status_t s, void *cookie, bool cancelled)
{
LOGV("run_set_audio_output s=%d, cancelled=%d", s, cancelled);
if (s == NO_ERROR && !cancelled) {
PVPlayer *p = (PVPlayer*)cookie;
p->mPlayerDriver->enqueueCommand(new PlayerSetAudioSink(p->mAudioSink, run_prepare, cookie));
}
}
void PVPlayer::run_prepare(status_t s, void *cookie, bool cancelled)
{
LOGV("run_prepare s=%d, cancelled=%d", s, cancelled);
if (s == NO_ERROR && !cancelled) {
PVPlayer *p = (PVPlayer*)cookie;
p->mPlayerDriver->enqueueCommand(new PlayerPrepare(do_nothing,0));
}
}
最后才是do_nothing,把同步做的事情分成几步来做了。
剩下的基本都是很简单的,就是发送一条又一条的命令即可,其实主要的操作,都在PlayerDriver中。
下面我们来简单的看看这个PlayerDriver,这个事实现播放的主要成员,首先他是一个管理器,他管理者OpenCore的整个框架,最后的输出的 MIO,其次,他是一个异步的东东,存在着一个命令的队列。这个里面虽然代码很多,但是思路很清晰,我们就不一一的列出来,这个里面主要的播放功能被封装到一个叫PVPlayerInterface的接口中了。
这里我们首先分析它的视频显示,在接收到设置显示面的时候,有这样的一个处理:
// if no device-specific MIO was created, use the generic one
if (mio == NULL) {
LOGW("Using generic video MIO");
mio = new AndroidSurfaceOutput();
}
// initialize the MIO parameters
status_t ret = mio->set(mPvPlayer, command->surface(), mEmulation);
if (ret != NO_ERROR) {
LOGE("Video MIO set failed");
commandFailed(command);
delete mio;
return;
}
mVideoOutputMIO = mio;
mVideoNode = PVMediaOutputNodeFactory::CreateMediaOutputNode(mVideoOutputMIO);
mVideoSink = new PVPlayerDataSinkPVMFNode;
((PVPlayerDataSinkPVMFNode *)mVideoSink)->SetDataSinkNode(mVideoNode);
((PVPlayerDataSinkPVMFNode *)mVideoSink)->SetDataSinkFormatType(PVMF_YUV420);
OSCL_TRY(error, mPlayer->AddDataSink(*mVideoSink, command));
这个mPlayer就是一个PVPlayerInterface的成员。在opencore中我们的最终都是要封装成NODE的,MIO可以属于 NODE,MIO主要负责和硬件打交道的一部分,这几行代码首先创建一个AndroidSurfaceOutput的MIO,创建了之后设置一些基本的属性set函数,然后,由MIO创建OutPutNode,这就是一个NOde了,这个东东创建了之后,就要把这个Node添加到整个数据链路中,并且设置一下基本的属性。这样我们的输出Node就添加到了数据链路中了,那么我们的MIO是如何工作的呢?
创建MIO之后有这样的一个函数
status_t AndroidSurfaceOutput::set(PVPlayer* pvPlayer, const sp<ISurface>& surface, bool emulation)
{
mPvPlayer = pvPlayer;
mSurface = surface;
mEmulation = emulation;
return NO_ERROR;
}
这个函数就设置了我们Vedio out MIO最主要的几个属性,一个是pvPlayer,一个是surface,最后的一个参数应该是表明是不是模拟器。
我们来看看这个MIO。关于MIO前面我们基本已经讲过,可能没有放在blog上。一般由这几个接口派生:
public OsclTimerObject, public PvmiMIOControl,
public PvmiMediaTransfer, public PvmiCapabilityAndConfig
但是作为一个视频输出的MIO,这里有几个自己特色的函数:
// For frame buffer
virtual bool initCheck();
virtual PVMFStatus writeFrameBuf(uint8* aData, uint32 aDataLen, const PvmiMediaXferHeader& data_header_info);
virtual void postLastFrame();
virtual void closeFrameBuf();
bool GetVideoSize(int *w, int *h);
我们一跳一条的分析:
首先是initCheck,这个函数什么时候开始调用?
// create a frame buffer for software codecs
OSCL_EXPORT_REF bool AndroidSurfaceOutput::initCheck()
{
// initialize only when we have all the required parameters
//首先看看是不是视频相关的属性发生改变了,如果不是视频就不用管他,直接返回
if (!checkVideoParameterFlags())
return mInitialized;
// release resources if previously initialized 删除以前分配的缓存
closeFrameBuf();
// reset flags in case display format changes in the middle of a stream
resetVideoParameterFlags(); //视频改变的标志位还原
// copy parameters in case we need to adjust them
//得到新的宽和高,(包括视频和显示器,我们在横屏的时候就可以这样搞)
int displayWidth = iVideoDisplayWidth;
int displayHeight = iVideoDisplayHeight;
int frameWidth = iVideoWidth;
int frameHeight = iVideoHeight;
int frameSize;
// RGB-565 frames are 2 bytes/pixel //因为我们的数据都是565的16位的像素点
//&-2 表明取偶数
displayWidth = (displayWidth + 1) & -2;
displayHeight = (displayHeight + 1) & -2;
frameWidth = (frameWidth + 1) & -2;
frameHeight = (frameHeight + 1) & -2;
frameSize = frameWidth * frameHeight * 2;
// create frame buffer heap and register with surfaceflinger //然后分配两帧的数据
mFrameHeap = new MemoryHeapBase(frameSize * kBufferCount);
if (mFrameHeap->heapID() < 0) {
LOGE("Error creating frame buffer heap");
return false;
}
//分配之后把数据指明给buffer,这样我们的buffer是到是一个什么样的格式,长和宽分别是多少
ISurface::BufferHeap buffers(displayWidth, displayHeight,
frameWidth, frameHeight, PIXEL_FORMAT_RGB_565, mFrameHeap);
//然后注册这个buffer
mSurface->registerBuffers(buffers);
// create frame buffers
//mFrameBuffers[i]保存的是第I帧的起始位置
for (int i = 0; i < kBufferCount; i++) {
mFrameBuffers[i] = i * frameSize;
}
//然后初始化视频数据转换器
// initialize software color converter
iColorConverter = ColorConvert16::NewL();
iColorConverter->Init(displayWidth, displayHeight, frameWidth, displayWidth, displayHeight, displayWidth, CCROTATE_NONE);
iColorConverter->SetMemHeight(frameHeight);
iColorConverter->SetMode(1);
LOGV("video = %d x %d", displayWidth, displayHeight);
LOGV("frame = %d x %d", frameWidth, frameHeight);
LOGV("frame #bytes = %d", frameSize);
// register frame buffers with SurfaceFlinger
mFrameBufferIndex = 0;
mInitialized = true;
mPvPlayer->sendEvent(MEDIA_SET_VIDEO_SIZE, iVideoDisplayWidth, iVideoDisplayHeight);
return mInitialized;
}
然后我们的东东就搞定了。
第二个函数:writeFrameBuf这个函数是什么时候用的呢?我们前面看MIO的时候就已经知道MIO的数据传递是通过队列的方式,消息和数据可以再一个队列里传输。在MIO的writeAsync函数中有这样的一个case:
case PVMI_MEDIAXFER_FMT_TYPE_DATA :
switch(aFormatIndex)
{
case PVMI_MEDIAXFER_FMT_INDEX_FMT_SPECIFIC_INFO:
……………………
case PVMI_MEDIAXFER_FMT_INDEX_DATA:
//data contains the media bitstream.
//Verify the state
if (iState!=STATE_STARTED)
{
PVLOGGER_LOGMSG(PVLOGMSG_INST_REL, iLogger, PVLOGMSG_ERR,
(0,"AndroidSurfaceOutput::writeAsync: Error - Invalid state"));
status=PVMFErrInvalidState;
}
else
{
//printf("V WriteAsync { seq=%d, ts=%d }/n", data_header_info.seq_num, data_header_info.timestamp);
// Call playback to send data to IVA for Color Convert
status = writeFrameBuf(aData, aDataLen, data_header_info);
PVLOGGER_LOGMSG(PVLOGMSG_INST_REL, iLogger, PVLOGMSG_ERR,
(0,"AndroidSurfaceOutput::writeAsync: Playback Progress - frame %d",iFrameNumber++));
}
break;
这个地方就开始调用我们的画屏幕的函数。这个时候就是我们接受到一帧数据的时候,注意我们这里基本上音频和视频的同步在NODE框架中已经做了,这里能够收到数据,叫表明一定是要显示的数据,不用考虑什么音视频同步了。
OSCL_EXPORT_REF PVMFStatus AndroidSurfaceOutput::writeFrameBuf(uint8* aData, uint32 aDataLen, const PvmiMediaXferHeader& data_header_info)
{
if (mSurface == 0) return PVMFFailure;
if (++mFrameBufferIndex == kBufferCount) mFrameBufferIndex = 0;
iColorConverter->Convert(aData, static_cast<uint8*>(mFrameHeap->base()) + mFrameBuffers[mFrameBufferIndex]);
// post to SurfaceFlinger
mSurface->postBuffer(mFrameBuffers[mFrameBufferIndex]);
return PVMFSuccess;
}
这个函数其实很简单,直接把对应的数据转换到我们显示屏支持的,然后直接postBuffer即可显示。
看下面的一个函数:
// post the last video frame to refresh screen after pause
void AndroidSurfaceOutput::postLastFrame()
{
mSurface->postBuffer(mFrameBuffers[mFrameBufferIndex]);
}
把数据显示在屏幕上。在暂停的时候,这个函数就可以调用,我们的视频会不变。
OSCL_EXPORT_REF void AndroidSurfaceOutput::closeFrameBuf()
{
LOGV("closeFrameBuf");
if (!mInitialized) return;
mInitialized = false;
if (mSurface.get()) {
LOGV("unregisterBuffers");
mSurface->unregisterBuffers();
mSurface.clear();
}
// free frame buffers
LOGV("free frame buffers");
for (int i = 0; i < kBufferCount; i++) {
mFrameBuffers[i] = 0;
}
// free heaps
LOGV("free mFrameHeap");
mFrameHeap.clear();
// free color converter
if (iColorConverter != 0)
{
LOGV("free color converter");
delete iColorConverter;
iColorConverter = 0;
}
}
这个函数前面已经说过,是一个简单的清空操作。
OSCL_EXPORT_REF bool AndroidSurfaceOutput::GetVideoSize(int *w, int *h) {
*w = iVideoDisplayWidth;
*h = iVideoDisplayHeight;
return iVideoDisplayWidth != 0 && iVideoDisplayHeight != 0;
}
返回长宽。
本篇文章来源于 Linux公社网站(www.linuxidc.com)