最近应领导蛋疼的需求对binder和fifo做一次测试对比,并给出测试数据,所以简单写了fifo和binder的测试对比程序,拿到了对比结果;
由于有个项目需要进程间传输数据,之前在网上查看了一些资料,了解了一些binder的实现原理,觉得binder传输效率应该是比FIFO高的,于是就用binder实现了;但是领导对binder一无所知,觉得binder不靠谱,要求我在项目实现中FIFO和binder共用,FIFO传输数据流,binder做FIFO传输的同步控制,由于是在linux平台上用C实现,binder那一套移植过来本来就费事儿,一听就觉得反感,完全是多此一举,没事找事儿,再说怎么也轮不到这两个共用,binder大材小用,冗余又麻烦;个木脑壳,给他解释也没用,说什么要拿测试对比数据说话,怄上气了,最后带着情绪做了套测试,但却得到了一些吃惊的结果:
测试环境:linux平台,
以下是测试代码,比较粗糙;
FIFO测试代码:(注:FIFO代码可以直接编译运行)
client.c:
/*fifl_read.c*/
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include<sys/time.h>
#include<unistd.h>
#define FIFO "/tmp/myfifo"
#define READ_DATA_LEN 4096
main(int argc,char** argv)
{
char buf_r[READ_DATA_LEN];
int nIndex = 0;
int fd;
int nread;
int nNum = 0;
struct timeval tv;
struct timezone tz;
/*创建有名管道,并设置相应的权限*/
if((mkfifo(FIFO,O_CREAT|O_EXCL)<0)&&(errno!=EEXIST))
printf("cannot create fifoserver\n");
printf("Preparing for reading bytes...\n");
memset(buf_r,0,sizeof(buf_r));
/*打开有名管道,并设置非阻塞标志*/
fd=open(FIFO,O_RDONLY|O_NONBLOCK,0);
if(fd== -1)
{
perror("open");
exit(1);
}
sleep(5);
gettimeofday (&tv , &tz);
printf("nIndex is %d, nNum = %d, tv_sec;TEST0 %d\n",nIndex, nNum, tv.tv_sec);
printf("nIndex is %d,“nNum = %d, tv_usec;TEST0 %d\n",nIndex, nNum, tv.tv_usec);
for(nIndex = 0; nNum < 1024 * 1024 * 1024; nIndex++)
{
memset(buf_r,0,sizeof(buf_r));
if((nread=read(fd,buf_r,READ_DATA_LEN)) <= 0){
// if(errno==EAGAIN)
// printf("no data yet\n");
}
nNum += nread;
}
gettimeofday (&tv , &tz);
printf("nIndex is %d,“nNumb = %d, tv_sec;TEST1 %d\n",nIndex, nNum, tv.tv_sec);
printf("nIndex is %d,“nNumb = %d, tv_usec;TEST1 %d\n",nIndex, nNum, tv.tv_usec);
pause();
unlink(FIFO);
}
server.c:
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define FIFO "/tmp/myfifo"
#define WRITE_DATA_LEN 4096
main(int argc,char** argv)
/*参数为即将写入的字节数*/
{
int nIndex = 1;
int fd;
char w_buf[WRITE_DATA_LEN];
int nwrite;
/*打开FIFO管道,并设置非阻塞标志*/
fd=open(FIFO,O_WRONLY|O_NONBLOCK,0);
if(fd== -1)
printf("open error; no reading process\n");
if(argc==1)
printf("Please send something\n");
memset(w_buf, 1, sizeof(w_buf));
while(nIndex++)
{
/*向管道中写入字符串*/
if((nwrite=write(fd,w_buf, WRITE_DATA_LEN))== -1)
{
// printf("write fifo failed!\n");
// if(errno==EAGAIN)
// printf("The FIFO has not been read yet.Please try later\n");
}
}
printf("Write FIFO nIndex is %d\n", nIndex);
}
Binder测试代码:(注:binder代码只有部分关键点,无法直接编译运行)
client.c
#include <unistd.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include "Tspushermanager/TspusherManager.h"
#define READ_DATA_LEN 4096
int main(int argc, char **argv)
{
int nRet = 0;
int nIndex = 0;
int nNum = 0;
void *pvTspusher = 0;
TspusherManager *mp = new TspusherManager();
char acBuf[READ_DATA_LEN];
int nWriteFile;
struct timeval tvBefore;
struct timeval tvAfter;
struct timezone tz;
gettimeofday(&tvBefore, &tz);
printf("---[%s.%d]---before connect, time: %lld(us)\n", __FUNCTION__, __LINE__,
(int64_t)(tvBefore.tv_sec)*1000*1000 + tvBefore.tv_usec);
sp<TspusherManager> manager = mp->connect();
gettimeofday(&tvAfter, &tz);
printf("---[%s.%d]---connect cost: %ld(us)\n", __FUNCTION__, __LINE__,
(tvAfter.tv_sec-tvBefore.tv_sec)*1000*1000+(tvAfter.tv_usec - tvBefore.tv_usec));
pvTspusher = manager->Open();
printf("---[%s.%d]---pvTspusher: %p\n", __FUNCTION__, __LINE__, pvTspusher);
if (pvTspusher == NULL)
return -1;
gettimeofday(&tvBefore, &tz);
printf("nIndex is %d, nNum = %d, tv_sec;TEST0 %d!\n",nIndex, nNum, tvBefore.tv_sec);
printf("nIndex is %d, nNum = %d, tv_usec;TEST0 %d!\n",nIndex, nNum, tvBefore.tv_usec);
for (nIndex=0; nNum< 1024 * 1024 * 1024; nIndex++)
{
memset(acBuf, 0, sizeof(acBuf));
nRet = manager->Read(pvTspusher, acBuf, sizeof(acBuf));
nNum += nRet;
}
gettimeofday(&tvAfter, &tz);
printf("nIndex is %d, nNum = %d, tv_sec;TEST1 %d!\n",nIndex, nNum, tvAfter.tv_sec);
printf("nIndex is %d, nNum = %d, tv_usec;TEST1 %d!\n",nIndex, nNum, tvAfter.tv_usec);
manager->Close(pvTspusher);
return 0;
}
server.c
#include <string.h>
#include <stdio.h>
#include <fcntl.h>
#include <binder/ProcessState.h>
#include <binder/IPCThreadState.h>
#include "TspusherManagerService.h"
#define APP_NAME "Sample Server"
#define WRITE_DATA_LEN 4096
static char s_acData[WRITE_DATA_LEN];
int main(int argc, char **argv)
{
printf("---[%s.%d]---\n", __FUNCTION__, __LINE__);
TspusherManagerServiceInit();
printf("---[%s.%d]---\n", __FUNCTION__, __LINE__);
ProcessState::self()->startThreadPool();
printf("---[%s.%d]---\n", __FUNCTION__, __LINE__);
IPCThreadState::self()->joinThreadPool();
return 0;
}
void *Open(CSDVBTSPUSHEROpenParam_S *psParam)
{
memset(s_acData, 1, sizeof(s_acData));
return (void *)1;
}
int Read(void *pvTsPusher, char *pcBuffer, int nLen)
{
memset(pcBuffer, 1, WRITE_DATA_LEN);
//strncpy(pcBuffer, s_acData, nLen);
return nLen;
}
int Close(void *pvTsPusher)
{
return 0;
}
对比结果:
读取1G bytes,Binder每次读取数据为4k bytes,FIFO每次读写的数据都是4k bytes,管道单工模式,总
耗时统计:
fifo: 2s左右速率为500M bytes每秒
1.8s496086次
1.55s 354749次
1.4s 306684次
1.52s 340999次
注:单工模式读取次数应该为(262144 * 2)次,由于读写进程是手动启动,读写进程有先后启动关系,不是同时开始对管道操作,所以得到的测试数据无法准确体现读取(262144 * 2)次,得到的结果也是预估出来的,应该没什么大的误差。
binder: 31s左右速率为32M bytes每秒
strcpy()
31.5s 262144次
31.0s 262144次
30.9s 262144次
30.9s 262144次
memset()
24s左右 速率为50M bytes每秒
24.9s 262144次
24.2s 262144次
24.1s 262144次
24.1s 262144次
24.2s 262144次
注:strcpy()和memset()表示Binder的server传数据时调用的函数,由于不相信Binder与FIFO这么大的差距,怀疑是这个地方太耗时,所以。。。
总结:这个结果让我大吃一惊,都不愿意相信FIFO会是Binder的10倍,抱着怀疑的态度,重复测了好多次,结果还是这样,虽然Binder满足我项目上的需求,但还是对于网上资料给出的Binder效率比FIFO高有太多不可思议,想不通,如果哪位有更好的结果或解释,请告知我一下。