Linux IPC 进程之间通信——有名管道FIFO实例

        本文主要通过一个简单的有名管道实例,来分析管道到底是如何实现的。

一、实例展示

写端fifo_write.c

#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <string.h>

#define FIFO_PATH	"/tmp/myfifo"

int main(){
	int pipe_fd = -1;
	unsigned char buff[]="hello world";

	if (access(FIFO_PATH, F_OK) == -1){
		printf("FIFO file not exist, creat it\n");
		if(mkfifo(FIFO_PATH, 0777) < 0){
			perror("mkfifo");
			_exit(-1);
		}
	}
	

	pipe_fd = open(FIFO_PATH, O_WRONLY);
	if(pipe_fd < 0){
		perror("open");
		printf("open file failed! %d\n", pipe_fd);
		return -1;
	}
	
	while(1){
		if(write(pipe_fd, buff, sizeof(buff)) < 0){
			printf("write error!\n");
			close(pipe_fd);
			return -1;
		}else{
			printf("send to fifo success!\n");
		}
		sleep(1);
	}
	close(pipe_fd);

	return 0;
}

读端fifo_read.c

#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>

#define FIFO_PATH	"/tmp/myfifo"

int main(void){
	int fd;
	unsigned char buff[50];

	if (access(FIFO_PATH, F_OK) == -1){
		printf("FIFO_PATH file not exist, creat it\n");
		if(mkfifo(FIFO_PATH, 0777) < 0){
			perror("mkfifo");
			_exit(-1);
		}
	}
	
	if((fd = open(FIFO_PATH, O_RDONLY|O_NONBLOCK, 0)) < 0){
		perror("open");
		return -1;
	}
	
	while(1){
		memset(buff, 0x00, sizeof(buff));
		if(read(fd, buff, sizeof(buff)) < 0){
			printf("read from fifo failed!\n");
			continue;
		}
		printf("receive:%s\n",  buff);
		sleep(1);
	}
	close(fd);
	return 0;
}

运行效果:

写端

send to fifo success!
send to fifo success!
send to fifo success!
send to fifo success!
send to fifo success!
send to fifo success!
send to fifo success!
send to fifo success!
send to fifo success!

读端:

FIFO_PATH file not exist, creat it
receive:
receive:
receive:
receive:
receive:
receive:
receive:
receive:hello world
receive:hello world
receive:hello world
receive:hello world
receive:hello world
receive:hello world
receive:hello world
receive:hello world

二、延申阅读

        知道了上面的有名管道实例,那这么做的原因是什么呢?我们都知道Linux不同进程运行的物理空间都是分开的,那么两个进程之间如果想直接通信是不可能做到的,这个时候就需要一个“梯子”来作为双方沟通的桥梁。 要分析梯子,我们先看函数mkfifo的实现,来探探Linux实现这个特殊的文件是怎么做到的。

        查看mkfifo需要看glibc的源码,先看定义:

       glibc/sysdeps/posix/mkfifo.c

int
mkfifo (const char *path, mode_t mode)
{
  dev_t dev = 0;
  return __xmknod (_MKNOD_VER, path, mode | S_IFIFO, &dev);
}

        这里可以看到它最终调用了xmknod方法,去创建节点,来看看mknod的实现

int
__xmknod (int vers, const char *path, mode_t mode, dev_t *dev)
{
  unsigned long long int k_dev;
  if (vers != _MKNOD_VER)
    return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
  /* We must convert the value to dev_t type used by the kernel.  */
  k_dev =  (*dev) & ((1ULL << 32) - 1);
  if (k_dev != *dev)
    return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
  return INLINE_SYSCALL (mknod, 3, path, mode, (unsigned int) k_dev);
}

        最终通过INLINE_SYSCALL (mknod, 3, path, mode, (unsigned int) k_dev);实现mknod的调用。继续看mknod实现linux/security/inode.c

static int mknod(struct inode *dir, struct dentry *dentry,
			 int mode, dev_t dev)
{
	struct inode *inode;
	int error = -EPERM;

	if (dentry->d_inode)
		return -EEXIST;

	inode = get_inode(dir->i_sb, mode, dev);
	if (inode) {
		d_instantiate(dentry, inode);
		dget(dentry);
		error = 0;
	}
	return error;
}

        首先判断inode是否存在,说到这里,Linux下的文件都依赖于一个很重要的结构体inode,他们都是通过inode产生关联,感兴趣的可以继续深挖inode结构体。接下来继续看看get_inode这个方法实现。

static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev)
{
	struct inode *inode = new_inode(sb);

	if (inode) {
		inode->i_mode = mode;
		inode->i_uid = 0;
		inode->i_gid = 0;
		inode->i_blocks = 0;
		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
		switch (mode & S_IFMT) {
		default:
			init_special_inode(inode, mode, dev);
			break;
		case S_IFREG:
			inode->i_fop = &default_file_ops;
			break;
		case S_IFDIR:
			inode->i_op = &simple_dir_inode_operations;
			inode->i_fop = &simple_dir_operations;

			/* directory inodes start off with i_nlink == 2 (for "." entry) */
			inc_nlink(inode);
			break;
		}
	}
	return inode;
}

        到这里我们可以看到inode的一些初始化动作,包括赋予它创建时间,我们通过命令行或者相关api查看文件完整信息的时候就包括创建时间,就是在这里写入的。继续看针对mode的处理,S_IFREG是普通文件,S_IFDIR是普通的目录,而我们fifo创建的当然是不一样的文件,这里可以看到普通文件和目录,都会对inode->i_fop赋予文件操作函数,为了避免展开太多,我们只看fifo这个特殊文件节点的。

        

void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
{
	inode->i_mode = mode;
	if (S_ISCHR(mode)) {
		inode->i_fop = &def_chr_fops;
		inode->i_rdev = rdev;
	} else if (S_ISBLK(mode)) {
		inode->i_fop = &def_blk_fops;
		inode->i_rdev = rdev;
	} else if (S_ISFIFO(mode))
		inode->i_fop = &def_fifo_fops;
	else if (S_ISSOCK(mode))
		inode->i_fop = &bad_sock_fops;
	else
		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n",
		       mode);
}
EXPORT_SYMBOL(init_special_inode);

        千呼万唤始出来,终于看到了我们的fifo文件操作指针def_fifo_fops,顺便我们也可以看到其它三个特殊的,字符文件、块设备文件、sock文件,关注我们的重点对象def_fifo_fops。

const struct file_operations def_fifo_fops = {
	.open		= fifo_open,	/* will set read_ or write_pipefifo_fops */
};

        继续看fifo_open实现。

static int fifo_open(struct inode *inode, struct file *filp)
{
	struct pipe_inode_info *pipe;
	int ret;

	mutex_lock(&inode->i_mutex);
	pipe = inode->i_pipe;
	if (!pipe) {
		ret = -ENOMEM;
		pipe = alloc_pipe_info(inode);
		if (!pipe)
			goto err_nocleanup;
		inode->i_pipe = pipe;
	}
	filp->f_version = 0;

	/* We can only do regular read/write on fifos */
	filp->f_mode &= (FMODE_READ | FMODE_WRITE);

	switch (filp->f_mode) {
	case 1:
	/*
	 *  O_RDONLY
	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
	 *  opened, even when there is no process writing the FIFO.
	 */
		filp->f_op = &read_pipefifo_fops;
		pipe->r_counter++;
		if (pipe->readers++ == 0)
			wake_up_partner(inode);

		if (!pipe->writers) {
			if ((filp->f_flags & O_NONBLOCK)) {
				/* suppress POLLHUP until we have
				 * seen a writer */
				filp->f_version = pipe->w_counter;
			} else 
			{
				wait_for_partner(inode, &pipe->w_counter);
				if(signal_pending(current))
					goto err_rd;
			}
		}
		break;
	
	case 2:
	/*
	 *  O_WRONLY
	 *  POSIX.1 says that O_NONBLOCK means return -1 with
	 *  errno=ENXIO when there is no process reading the FIFO.
	 */
		ret = -ENXIO;
		if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
			goto err;

		filp->f_op = &write_pipefifo_fops;
		pipe->w_counter++;
		if (!pipe->writers++)
			wake_up_partner(inode);

		if (!pipe->readers) {
			wait_for_partner(inode, &pipe->r_counter);
			if (signal_pending(current))
				goto err_wr;
		}
		break;
	
	case 3:
	/*
	 *  O_RDWR
	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
	 *  This implementation will NEVER block on a O_RDWR open, since
	 *  the process can at least talk to itself.
	 */
		filp->f_op = &rdwr_pipefifo_fops;

		pipe->readers++;
		pipe->writers++;
		pipe->r_counter++;
		pipe->w_counter++;
		if (pipe->readers == 1 || pipe->writers == 1)
			wake_up_partner(inode);
		break;

	default:
		ret = -EINVAL;
		goto err;
	}

	/* Ok! */
	mutex_unlock(&inode->i_mutex);
	return 0;

err_rd:
	if (!--pipe->readers)
		wake_up_interruptible(&pipe->wait);
	ret = -ERESTARTSYS;
	goto err;

err_wr:
	if (!--pipe->writers)
		wake_up_interruptible(&pipe->wait);
	ret = -ERESTARTSYS;
	goto err;

err:
	if (!pipe->readers && !pipe->writers)
		free_pipe_info(inode);

err_nocleanup:
	mutex_unlock(&inode->i_mutex);
	return ret;
}

        这个open对应的就是我们上面fifo_read.c和fifo_write.c里面的open函数,回看调用的地方

pipe_fd = open(FIFO_PATH, O_WRONLY); //写管道的打开调用
fd = open(FIFO_PATH, O_RDONLY|O_NONBLOCK, 0) //读管道的打开调用

        从这里可以看出,open函数指定了读写权限,对应上面fifo_open函数实现,首先是获取inode里面的pipe成员(通过看inode源码,可以发现字符设备、块设备和管道设备属于一个union结构体)通过对open指定的mode解析可以看到,O_RDONLY、O_WRONLY、O_RDWR的处理主要是针对这几个成员的的处理

filp->f_op
pipe->r_counter
pipe->readers
pipe->w_counter
pipe->writers

        可以看到只读O_RDONLY的时候,r_counter自增1,readers自增1,如果readers自增之后还为0,wake_up_partner(inode),如果写writers为0,且没有指定O_NONBLOCK,就会等待w_counte的改变;同样,如果是只写O_WRONLY的时候,如果指定了O_NONBLOCK,但是没有读端存在,就立马返回错误,否则,w_counter自增,writers自增,继续判定读端是否存在,如果读端不存在就阻塞等待r_counter的改变。

        看看三个文件操作指针read_pipefifo_fops、write_pipefifo_fops、rdwr_pipefifo_fops

const struct file_operations read_pipefifo_fops = {
	.llseek		= no_llseek,
	.read		= do_sync_read,
	.aio_read	= pipe_read,
	.write		= bad_pipe_w,
	.poll		= pipe_poll,
	.unlocked_ioctl	= pipe_ioctl,
	.open		= pipe_read_open,
	.release	= pipe_read_release,
	.fasync		= pipe_read_fasync,
};

const struct file_operations write_pipefifo_fops = {
	.llseek		= no_llseek,
	.read		= bad_pipe_r,
	.write		= do_sync_write,
	.aio_write	= pipe_write,
	.poll		= pipe_poll,
	.unlocked_ioctl	= pipe_ioctl,
	.open		= pipe_write_open,
	.release	= pipe_write_release,
	.fasync		= pipe_write_fasync,
};

const struct file_operations rdwr_pipefifo_fops = {
	.llseek		= no_llseek,
	.read		= do_sync_read,
	.aio_read	= pipe_read,
	.write		= do_sync_write,
	.aio_write	= pipe_write,
	.poll		= pipe_poll,
	.unlocked_ioctl	= pipe_ioctl,
	.open		= pipe_rdwr_open,
	.release	= pipe_rdwr_release,
	.fasync		= pipe_rdwr_fasync,
};

        read的时候,主要实现do_sync_read,write主要实现do_sync_write,rw的时候,主要实现do_sync_read和do_sync_write,最终都会call pipe_read和pipe_write。

        pipe_read实现的是从kernel层到用户层数据的拷贝pipe_iov_copy_to_user,pipe_write实现的是从用户层到kernel层数据的拷贝pipe_iov_copy_from_user,至此已经看到了一条从user到kernel再回到user数据传输通道,也就是说这条路是通过Kernel这个“梯子”来实现的。

        

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值