问题
多线程录制摄像头视频文件16MB,录制完成后关闭文件描述符,发送sync进行系统调用,在多线程情况下,TF卡空闲容量小于1.5GB后会出现大概率的线程在sync卡住,导致线程进行D状态。
线程卡住后的堆栈信息:
[<c00756ec>] sleep_on_page+0x8/0x10
[<c007551c>] wait_on_page_bit+0xb4/0xbc
[<c0075648>] filemap_fdatawait_range+0xd4/0x130
[<c00756dc>] filemap_fdatawait+0x38/0x40
[<c00c0744>] sync_inodes_sb+0x108/0x13c
[<c00a3da8>] iterate_supers+0xa4/0xec
[<c00c43ac>] sys_sync+0x34/0x9c
[<c0012e40>] ret_fast_syscall+0x0/0x30
[<ffffffff>] 0xffffffff
内核
问题内核版本:linux-3.10.y(下面代码在linux-4.1.27中分析)
代码
Linux2.6.18内核版本后,改为了使用SYSCALL_DEFINEx来定义系统调用,但本质上还是sys_xxx的模式。后面的x表示参数个数,如sync系统调用:
SYSCALL_DEFINE0(sync) Sync everything
SYSCALL_DEFINE1(syncfs, int, fd) sync a single super
SYSCALL_DEFINE1(fsync, unsigned int, fd)
SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
unsigned int, flags)
sync系统调用函数实现:fs/sync.c
SYSCALL_DEFINE0(sync)
{
int nowait = 0, wait = 1;
wakeup_flusher_threads(0, WB_REASON_SYNC);
iterate_supers(sync_inodes_one_sb, NULL); 调用这里
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
iterate_bdevs(fdatawait_one_bdev, NULL);
if (unlikely(laptop_mode))
laptop_sync_completion();
return 0;
}
根据堆栈信息,继续分析函数调用:
static void sync_inodes_one_sb(struct super_block *sb, void *arg)
{
if (!(sb->s_flags & MS_RDONLY)) 检查文件系统是否挂载为只读,只读就不用sync了
sync_inodes_sb(sb);
}
传入参数是super_block,代码了VFS层的文件系统,可以操作到下面所有的具体文件系统:
/**
* sync_inodes_sb - sync sb inode pages
* @sb: the superblock
*
* This function writes and waits on any dirty inode belonging to this
* super_block.
*/
void sync_inodes_sb(struct super_block *sb)
{
DECLARE_COMPLETION_ONSTACK(done);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
.range_cyclic = 0,
.done = &done,
.reason = WB_REASON_SYNC,
.for_sync = 1,
};
/* Nothing to do? */
if (sb->s_bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_queue_work(sb->s_bdi, &work);
wait_for_completion(&done);
wait_sb_inodes(sb); 等待cache中所有的脏inode全部写入,阻塞
}
wait_sb_inodes中在inode hash表中查找所有的脏数据,inode代表文件信息数据,并通过filemap_fdatawait(mapping);把inode对应的数据写入磁盘且阻塞:
static void wait_sb_inodes(struct super_block *sb)
{
struct inode *inode, *old_inode = NULL;
/*
* We need to be protected against the filesystem going from
* r/o to r/w or vice versa.
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 查找
struct address_space *mapping = inode->i_mapping; 数据映射赋值
spin_lock(&inode->i_lock);
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
(mapping->nrpages == 0)) {
spin_unlock(&inode->i_lock);
continue;
}
__iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_sb_list_lock);
iput(old_inode);
old_inode = inode;
filemap_fdatawait(mapping); 写数据到磁盘
cond_resched();
spin_lock(&inode_sb_list_lock);
}
spin_unlock(&inode_sb_list_lock);
iput(old_inode);
}
继续分析,获取文件数据的大小i_size:
int filemap_fdatawait(struct address_space *mapping)
{
loff_t i_size = i_size_read(mapping->host);
if (i_size == 0)
return 0;
return filemap_fdatawait_range(mapping, 0, i_size - 1);
}
读取文件范围内占用多少page数据,并把每个page数据阻塞的刷如磁盘。
int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
loff_t end_byte)
{
pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
struct pagevec pvec;
int nr_pages;
int ret2, ret = 0;
if (end_byte < start_byte)
goto out;
pagevec_init(&pvec, 0);
while ((index <= end) &&
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_WRITEBACK,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
unsigned i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/* until radix tree lookup accepts end_index */
if (page->index > end)
continue;
wait_on_page_writeback(page);
if (TestClearPageError(page))
ret = -EIO;
}
pagevec_release(&pvec);
cond_resched();
}
out:
ret2 = filemap_check_errors(mapping);
if (!ret)
ret = ret2;
return ret;
}
/*
* Wait for a page to complete writeback
*/
static inline void wait_on_page_writeback(struct page *page)
{
if (PageWriteback(page))
wait_on_page_bit(page, PG_writeback);
}
void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
if (test_bit(bit_nr, &page->flags))
__wait_on_bit(page_waitqueue(page), &wait, bit_wait_io,
TASK_UNINTERRUPTIBLE);
}
int __sched
__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
wait_bit_action_f *action, unsigned mode)
{
int ret = 0;
do {
prepare_to_wait(wq, &q->wait, mode);
if (test_bit(q->key.bit_nr, q->key.flags))
ret = (*action)(&q->key);
} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
finish_wait(wq, &q->wait);
return ret;
}
3.10内核的action是sleep_on_page,4.1内核是bit_wait_io,
static int sleep_on_page(void *word)
{
io_schedule();
return 0;
}
__sched int bit_wait_io(struct wait_bit_key *word)
{
if (signal_pending_state(current->state, current))
return 1;
io_schedule();
return 0;
}
内核3.10不带超时,那如果卡住会怎样呢?死等吗?【】
请求CPU写page内存数据到磁盘,在加入请求队列后什么情况下无限期的卡住?【】
void __sched io_schedule(void)
{
struct rq *rq = raw_rq();
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
schedule();
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
有带超时函数却没有使用
long __sched io_schedule_timeout(long timeout)
{
struct rq *rq = raw_rq();
long ret;
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
ret = schedule_timeout(timeout);
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
}
内核4.1是带超时的返回的,
static inline void io_schedule(void)
{
io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
}
long __sched io_schedule_timeout(long timeout)
{
int old_iowait = current->in_iowait;
struct rq *rq;
long ret;
current->in_iowait = 1;
blk_schedule_flush_plug(current);
delayacct_blkio_start();
rq = raw_rq();
atomic_inc(&rq->nr_iowait);
ret = schedule_timeout(timeout);
current->in_iowait = old_iowait;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
}
测试
1、加入io_schedule_timeout机制是否还会卡住?【也会卡住】
修改内核3.10代码:
./mm/filemap.c +179
#include <linux/jiffies.h>
static int sleep_on_page(void *word)
{
- io_schedule();
+ io_schedule_timeout(msecs_to_jiffies(20000));//20s
return 0;
}
编译成功,进行测试【出现问题时间变久,但最后还是会出现;理论不可行,最多能规避但没有找到原因不能实质性解决问题】
2、使用fsync(fd)而不用sync【也会卡住】
晚上测试明早看结果【也会卡住】
3、查看sync后进入不可终端的睡眠状态为何没有条件来唤醒【】
4、修改I/O调度器模式:
默认是deadline,修改为cfq【测试2小时删除88次后sync卡住】
/sys/block/sdb/queue/scheduler
修改为noop模式进行测试【】
进展
Linux 3.10.y多线程写SD卡后sync会卡住,目前发现drivers/scsi/sd.c文件中sd_prep_fn函数在组装scsi cmd时,写的物理sector位置触及到SD卡边界,代码如下:
/*
* Some SD card readers can't handle multi-sector accesses which touch
* the last one or two hardware sectors. Split accesses as needed.
*/
threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
(sdp->sector_size / 512);
if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
printk(KERN_ERR "eason %s,%d,--%d,%llu,%llu\n",__FUNCTION__, __LINE__,this_count,(unsigned long long)block,(unsigned long long)threshold);
if (block < threshold) {
/* Access up to the threshold but not beyond */
this_count = threshold - block;
} else {
/* Access only a single hardware sector */
this_count = sdp->sector_size / 512;
printk(KERN_ERR "this_count=%d\n",this_count);
}
}
物理位置最后的8个sector(4KB)会拆分成每个sector下发到scsi cmd,但不知为何会导致线程下刷page卡住,没有写进磁盘设备还是写成功后中断没有合并返回,具体原因待查【卡住时8个单独的sector是写成功的】。不知为何,格式化时写到边界的8个sector是可以返回的,多进程同时写测试边界的8个sector就不返回了吗?
bio=14102864 sectos,1762858 page 7220666368 bytes
in=0
1=1762858
2=1762858
3=0
4=1762858
5=0
6=0
7=0
run=2925699072 byte,714282 page,2925699072 + 4294967296(2^32) = 7220666368
irq=60946
out=60946
eason sd_prep_fn,953,--8,15523832,15523824
eason scsi_io_completion,836,good_bytes=512
eason sd_prep_fn,953,--7,15523833,15523824
eason scsi_io_completion,836,good_bytes=512
eason sd_prep_fn,953,--6,15523834,15523824
eason scsi_io_completion,836,good_bytes=512
eason sd_prep_fn,953,--5,15523835,15523824
eason scsi_io_completion,836,good_bytes=512
eason sd_prep_fn,953,--4,15523836,15523824
eason scsi_io_completion,836,good_bytes=512
eason sd_prep_fn,953,--3,15523837,15523824
eason scsi_io_completion,836,good_bytes=512
eason sd_prep_fn,953,--2,15523838,15523824
eason scsi_io_completion,836,good_bytes=512
eason sd_prep_fn,953,--1,15523839,15523824
eason scsi_io_completion,836,good_bytes=512
查看问题SD卡的sector容量和地址范围如下:
Disk /dev/sdb: 7948 MB, 7948206080 bytes
255 heads, 63 sectors/track, 966 cylinders, total 15523840 sectors == 7948206080 bytes
Units = sectors of 1 * 512 = 512 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 8192 15523839 7757824 c Win95 FAT32 (LBA)
Partition 1 has different physical/logical endings:
phys=(965, 254, 63) logical=(966, 80, 10)
end 15523839刚好到容量范围15523840的边界,所以会在格式化和卡写完时大概率写到边界触发卡住问题。重新在板子上面进行分区:
Disk /dev/sdb: 7948 MB, 7948206080 bytes
255 heads, 63 sectors/track, 966 cylinders, total 15523840 sectors
Units = sectors of 1 * 512 = 512 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 63 15518789 7759363+ 83 Linux
可以看到end的位置已经变化,距离容量边界相差5051个sector,所以任何情况下不会写到边界,在后面测试过程中没有出现问题。我们的SD卡只有1个分区,且是卡厂家出厂的默认值,我们只是把默认的fat32重新格式化了ext4,分区没有动过。
拿到电脑上进行分区和格式化测试如下:
在Linux上格式化后信息:
删除分区重新新建分区:全部按照默认值操作,应该可以设置保留
建立新分区时的默认值:可以看到使用了全部的柱面。
Disk /dev/sdb: 7948 MB, 7948206080 bytes
255 heads, 63 sectors/track, 966 cylinders, total 15523840 sectors
Units = sectors of 1 * 512 = 512 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 * 2048 15523839 7760896 b Win95 FAT32
Partition 1 has different physical/logical endings:
phys=(965, 254, 63) logical=(966, 80, 10)
SD卡出厂默认信息:
可总结出:分区总扇区数 + 起始扇区号 = 设备总扇区数,但是在Linux上面分区时末尾有预留,就不会到访问到边界。问题情况了解了,继续查问题原因:
为啥访问到SD卡物理边界就会导致写文件卡死??
原因
不知为何,格式化时写到边界的8个sector是可以返回的,多进程同时写测试边界的8个sector就不返回了吗?
高版本内核3.18.108没有这个问题,为啥?已经解决此问题了吗
具体原因见:Linux ext4文件系统多线程写文件sync卡住分析