ramfs系统中write过程浅析

ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)	//算比较上层的api函数
{
	ssize_t ret;

	if (!(file->f_mode & FMODE_WRITE))
		return -EBADF;
	if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
		return -EINVAL;
	if (unlikely(!access_ok(VERIFY_READ, buf, count)))
		return -EFAULT;

	ret = rw_verify_area(WRITE, file, pos, count);
	if (ret >= 0) {
		count = ret;
		ret = security_file_permission (file, MAY_WRITE);
		if (!ret) {
			if (file->f_op->write)
				ret = file->f_op->write(file, buf, count, pos);
			else
				ret = do_sync_write(file, buf, count, pos);
			if (ret > 0) {
				fsnotify_modify(file->f_path.dentry);
				add_wchar(current, ret);
			}
			inc_syscw(current);
		}
	}

	return ret;
}

    按照ramfs的write进行分析,前面的check之类的直接pass掉,暂时不去分析,进入ret = file->f_op->write(file, buf, count, pos);。ramfs文件系统中调用do_sync_write。

ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
{
	struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
	struct kiocb kiocb;
	ssize_t ret;

	init_sync_kiocb(&kiocb, filp);
	kiocb.ki_pos = *ppos;
	kiocb.ki_left = len;

	for (;;) {
		ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);	//这里再返回回去
		if (ret != -EIOCBRETRY)
			break;
		wait_on_retry_sync_kiocb(&kiocb);
	}

	if (-EIOCBQUEUED == ret)
		ret = wait_on_sync_kiocb(&kiocb);
	*ppos = kiocb.ki_pos;
	return ret;
}

inti_sync_kiob没什么花头的,重点无非是kiocb.ki_filp=filp。还有就是将要传入的buff信息打包至iov。好了,接下来是重点的 filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);因为我们假设现在是在ramfs中,所以这个函数是generic_file_aio_write

ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
		unsigned long nr_segs, loff_t pos)
{
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;	//maping看一下
			//for:在fs/inode.c中的alloc_inode中有mapping=&inode->idata,mapping->host=inode
	struct inode *inode = mapping->host;
	ssize_t ret;

	BUG_ON(iocb->ki_pos != pos);

	mutex_lock(&inode->i_mutex);
	ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
			&iocb->ki_pos);
	mutex_unlock(&inode->i_mutex);

	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
		ssize_t err;

		err = sync_page_range(inode, mapping, pos, ret);
		if (err < 0)
			ret = err;
	}
	return ret;
} 

mapping指向的是inode_>i_data的地址,mapping->host=inode.这个现在这里做下标记,后面要用到,到时候容易云里雾里。

然后进入函数ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,&iocb->ki_pos);这个入口参数都跟上边的如出一辙

static ssize_t
__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
				unsigned long nr_segs, loff_t *ppos)
{
	struct file *file = iocb->ki_filp;
	struct address_space * mapping = file->f_mapping;
	size_t ocount;		/* original count */
	size_t count;		/* after file limit checks */
	struct inode 	*inode = mapping->host;
	unsigned long	seg;
	loff_t		pos;
	ssize_t		written;
	ssize_t		err;

	ocount = 0;
	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *iv = &iov[seg];

		/*
		 * If any segment has a negative length, or the cumulative
		 * length ever wraps negative then return -EINVAL.
		 */
		ocount += iv->iov_len;
		if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
			return -EINVAL;
		if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
			continue;
		if (seg == 0)
			return -EFAULT;
		nr_segs = seg;
		ocount -= iv->iov_len;	/* This segment is no good */
		break;
	}

	count = ocount;
	pos = *ppos;

	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);

	/* We can write back this queue in page reclaim */
	current->backing_dev_info = mapping->backing_dev_info;
	written = 0;

	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
	if (err)
		goto out;

	if (count == 0)
		goto out;

	err = remove_suid(file->f_path.dentry);
	if (err)
		goto out;

	file_update_time(file);	//时间更新

	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
	if (unlikely(file->f_flags & O_DIRECT)) {
		loff_t endbyte;
		ssize_t written_buffered;

		written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
							ppos, count, ocount);
		if (written < 0 || written == count)
			goto out;
		/*
		 * direct-io write to a hole: fall through to buffered I/O
		 * for completing the rest of the request.
		 */
		pos += written;
		count -= written;
		written_buffered = generic_file_buffered_write(iocb, iov,
						nr_segs, pos, ppos, count,
						written);
		/*
		 * If generic_file_buffered_write() retuned a synchronous error
		 * then we want to return the number of bytes which were
		 * direct-written, or the error code if that was zero.  Note
		 * that this differs from normal direct-io semantics, which
		 * will return -EFOO even if some bytes were written.
		 */
		if (written_buffered < 0) {
			err = written_buffered;
			goto out;
		}

		/*
		 * We need to ensure that the page cache pages are written to
		 * disk and invalidated to preserve the expected O_DIRECT
		 * semantics.
		 */
		endbyte = pos + written_buffered - written - 1;
		err = do_sync_file_range(file, pos, endbyte,
					 SYNC_FILE_RANGE_WAIT_BEFORE|
					 SYNC_FILE_RANGE_WRITE|
					 SYNC_FILE_RANGE_WAIT_AFTER);
		if (err == 0) {
			written = written_buffered;
			invalidate_mapping_pages(mapping,
						 pos >> PAGE_CACHE_SHIFT,
						 endbyte >> PAGE_CACHE_SHIFT);
		} else {
			/*
			 * We don't know how much we wrote, so just return
			 * the number of bytes which were direct-written
			 */
		}
	} else {
		written = generic_file_buffered_write(iocb, iov, nr_segs,
				pos, ppos, count, written);
	}
out:
	current->backing_dev_info = NULL;
	return written ? written : err;
}
一般情况下是不带O_DIRECT标志的,所以进入是的generic_file_buffered_write(iocb, iov, nr_segs,pos, ppos, count, written);这个函数。

对这个函数的入口参数进行分析,iocb还是那个icob。iov,nr_segs都是一样的。在文件大小没有突破限制的情况下,pos=ppos,count=iov->iov_len,written如果不是O_DIRECT的话,为0。好了,进入下一个函数里

ssize_t
generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
		unsigned long nr_segs, loff_t pos, loff_t *ppos,
		size_t count, ssize_t written)
{
	struct file *file = iocb->ki_filp;
	struct address_space * mapping = file->f_mapping;
	const struct address_space_operations *a_ops = mapping->a_ops;
	struct inode 	*inode = mapping->host;
	long		status = 0;
	struct page	*page;
	struct page	*cached_page = NULL;
	size_t		bytes;
	struct pagevec	lru_pvec;
	const struct iovec *cur_iov = iov; /* current iovec */
	size_t		iov_base = 0;	   /* offset in the current iovec */
	char __user	*buf;

	pagevec_init(&lru_pvec, 0);

	/*
	 * handle partial DIO write.  Adjust cur_iov if needed.
	 */
	if (likely(nr_segs == 1))
		buf = iov->iov_base + written;	//所需写入的数据指针
	else {
		filemap_set_next_iovec(&cur_iov, &iov_base, written);
		buf = cur_iov->iov_base + iov_base;
	}

	do {
		unsigned long index;
		unsigned long offset;
		size_t copied;

		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */	//偏移	一页页的的偏移的,这个是最后那一页的偏移量			          
                index = pos >> PAGE_CACHE_SHIFT;//序号,到了第几页
		bytes = PAGE_CACHE_SIZE - offset;	//剩余的,最后那页空闲的字节数

		/* Limit the size of the copy to the caller's write size */
		bytes = min(bytes, count);   //最后那页空闲的字节跟所需写入的字节做对比,取小值

		/* We only need to worry about prefaulting when writes are from
		 * user-space.  NFSd uses vfs_writev with several non-aligned
		 * segments in the vector, and limiting to one segment a time is
		 * a noticeable performance for re-write
		 */
		if (!segment_eq(get_fs(), KERNEL_DS)) {
			/*
			 * Limit the size of the copy to that of the current
			 * segment, because fault_in_pages_readable() doesn't
			 * know how to walk segments.
			 */
			bytes = min(bytes, cur_iov->iov_len - iov_base);

			/*
			 * Bring in the user page that we will copy from
			 * _first_.  Otherwise there's a nasty deadlock on
			 * copying from the same page as we're writing to,
			 * without it being marked up-to-date.
			 */
			fault_in_pages_readable(buf, bytes);
		}
		 //到页高速缓存中寻找index对应的页面。如果不存在,则新建
		page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);	//得到page
							//如果第一次创建的话,就利用邋cache_page创建,lru链表添加
		if (!page) {
			status = -ENOMEM;
			break;
		}

		if (unlikely(bytes == 0)) {
			status = 0;
			copied = 0;
			goto zero_length_segment;
		}

		status = a_ops->prepare_write(file, page, offset, offset+bytes);
		if (unlikely(status)) {
			loff_t isize = i_size_read(inode);

			if (status != AOP_TRUNCATED_PAGE)
				unlock_page(page);
			page_cache_release(page);
			if (status == AOP_TRUNCATED_PAGE)
				continue;
			/*
			 * prepare_write() may have instantiated a few blocks
			 * outside i_size.  Trim these off again.
			 */
			if (pos + bytes > isize)
				vmtruncate(inode, isize);
			break;
		}
		if (likely(nr_segs == 1))
			copied = filemap_copy_from_user(page, offset,	//把数据copy到缓冲区
							buf, bytes);
		else
			copied = filemap_copy_from_user_iovec(page, offset,
						cur_iov, iov_base, bytes);
		flush_dcache_page(page);
		 //调用commit_write。将数据写回设备
		status = a_ops->commit_write(file, page, offset, offset+bytes);	
		 //在ramfs中是,设置page为脏,更新inode->i_size
		if (status == AOP_TRUNCATED_PAGE) {
			page_cache_release(page);
			continue;
		}
zero_length_segment:
		if (likely(copied >= 0)) {
			if (!status)
				status = copied;

			if (status >= 0) {
				written += status;
				count -= status;	//减去写进去的
				pos += status;
				buf += status;
				if (unlikely(nr_segs > 1)) {
					filemap_set_next_iovec(&cur_iov,
							&iov_base, status);
					if (count)
						buf = cur_iov->iov_base +
							iov_base;
				} else {
					iov_base += status;
				}
			}
		}
		if (unlikely(copied != bytes))
			if (status >= 0)
				status = -EFAULT;
		unlock_page(page);
		mark_page_accessed(page);
		page_cache_release(page);
		if (status < 0)
			break;
		balance_dirty_pages_ratelimited(mapping);
		cond_resched();
	} while (count);
	*ppos = pos;

	if (cached_page)
		page_cache_release(cached_page);

	/*
	 * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
	 */
	if (likely(status >= 0)) {
		if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
			if (!a_ops->writepage || !is_sync_kiocb(iocb))
				status = generic_osync_inode(inode, mapping,
						OSYNC_METADATA|OSYNC_DATA);
		}
  	}
	
	/*
	 * If we get here for O_DIRECT writes then we must have fallen through
	 * to buffered writes (block instantiation inside i_size).  So we sync
	 * the file data here, to try to honour O_DIRECT expectations.
	 */
	if (unlikely(file->f_flags & O_DIRECT) && written)
		status = filemap_write_and_wait(mapping);

	pagevec_lru_add(&lru_pvec);
	return written ? written : status;
}

page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);这个步骤得到所需的写的页面,这个页面挂载在mapping->page_tree上,这个树可以是通过index直接找到所对应的页面,按照我个人理解,理解成一个类似于数组的链表,会不会更通俗一点。还有如果是一个全新创建的page的话,lru_pvec->pages[0]=page。

status = a_ops->prepare_write(file, page, offset, offset+bytes);在ramfs中,这个函数是simple_prepare_write,这个函数就是一个为写入数据做的一个初始化工作。找到页的虚拟地址,将不需要写的区域清0。

copied = filemap_copy_from_user(page, offset,buf, bytes);page为所需写入的页,offset为偏移量,buf为所写入的数据,bytes为所写入的数据长度。这个函数的作用就是讲数据写到page虚拟地址所对应(offset,offset+bytes)上。

status = a_ops->commit_write(file, page, offset, offset+bytes);这个函数在ramfs中的作用是,如果写入数据大于原来数据长度了,扩充inode->i_size,并将此页设置成脏页。

pagevec_lru_add(&lru_pvec);这个函数的作用是如果有新的page分配,这个新分配的page添加到zone->lru上

最后这个返回的writeen为最后所写入的数据长度。


返回到generic_file_aio_write这个函数里,


  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值