zram 源码分析

大家看到zram 源码不一致,一下仅供参考
zram 提供一种继续内存的block 设备。写到zram中的内存页是经过压缩的,默认是使用lzo压缩。用户空间可以通过sysfs来控制zram的配置.
要使用zram必须要按下面几部来操作。
1:装载模块。通过modprobe zram num_devices=4.这样就会创建四个block device :/dev/zram{0,1,2,3}
2:设定size 大小。通过echo $((50*1024*2024)) > /sys/block/zram0/disksize.如果不设定大小的话,默认是ram size的25%。如果zram0 中已经包含数据,则不能通过上述命令改变disksize。需要通过reset(echo 1 > /sys/block/zram0/reset)命令后,才能改变disksize.
3:激活
mkswap /dev/zram0  .制作swap 
swapon /dev/zram0  .使能swap
可以通过cat下面这些来查询zram的状态
/sys/block/zram<id>/
disksize
num_reads
num_writes
invalid_io
notify_free
discard
zero_pages
orig_data_size
compr_data_size
mem_used_total
关掉zram
swapoff /dev/zram0


可以看一下zram 源码下的makefile
zram-y := zcomp.o zram_drv.o


obj-$(CONFIG_ZRAM) += zram.o
可知要enable zram 只要在kconfig中定义config_zram=y即可
zram 总共有两个文件zcomp.c 和 zram_drv.c组成
static int __init zram_init(void)
{
int ret;


ret = class_register(&zram_control_class);
if (ret) {
pr_err("Unable to register zram-control class\n");
return ret;
}


zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
pr_err("Unable to get major number\n");
class_unregister(&zram_control_class);
return -EBUSY;
}


while (num_devices != 0) {
mutex_lock(&zram_index_mutex);
ret = zram_add();
mutex_unlock(&zram_index_mutex);
if (ret < 0)
goto out_error;
num_devices--;
}


return 0;


out_error:
destroy_devices();
return ret;
}
注册一个zram_control_class,可以在user space 增加或者删除zram
zram_major = register_blkdev(0, "zram"); 注册一个block device。后面会通过zram_add 在zram_major下面增加子设备.
static int zram_add(void)
{
struct zram *zram;
struct request_queue *queue;
int ret, device_id;


zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
if (!zram)
return -ENOMEM;


ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out_free_dev;
device_id = ret;


init_rwsem(&zram->init_lock);


queue = blk_alloc_queue(GFP_KERNEL);
if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
ret = -ENOMEM;
goto out_free_idr;
}


blk_queue_make_request(queue, zram_make_request);


/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {
pr_err("Error allocating disk structure for device %d\n",
device_id);
ret = -ENOMEM;
goto out_free_queue;
}


zram->disk->major = zram_major;
zram->disk->first_minor = device_id;
zram->disk->fops = &zram_devops;
zram->disk->queue = queue;
zram->disk->queue->queuedata = zram;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);


/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
*/
blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
blk_queue_logical_block_size(zram->disk->queue,
ZRAM_LOGICAL_BLOCK_SIZE);
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
/*
* zram_bio_discard() will clear all logical blocks if logical block
* size is identical with physical block size(PAGE_SIZE). But if it is
* different, we will skip discarding some parts of logical blocks in
* the part of the request range which isn't aligned to physical block
* size.  So we can't ensure that all discarded logical blocks are
* zeroed.
*/
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
zram->disk->queue->limits.discard_zeroes_data = 1;
else
zram->disk->queue->limits.discard_zeroes_data = 0;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);


add_disk(zram->disk);


ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
if (ret < 0) {
pr_err("Error creating sysfs group for device %d\n",
device_id);
goto out_free_disk;
}
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram->meta = NULL;


pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;


out_free_disk:
del_gendisk(zram->disk);
put_disk(zram->disk);
out_free_queue:
blk_cleanup_queue(queue);
out_free_idr:
idr_remove(&zram_index_idr, device_id);
out_free_dev:
kfree(zram);
return ret;
}
static const char *default_compressor = "lzo"; 说明默认是用lzo 压缩的。你也可以换成lz4等.
在zcomp.c中总共支持一下几种方式。
static const char * const backends[] = {
"lzo",
#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
"lz4",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE)
"deflate",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
"lz4hc",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_842)
"842",
#endif
NULL
};
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 u32 index, int offset)
{
int ret;
struct page *page;
unsigned char *user_mem, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;


bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
handle_zero_page(bvec);
return 0;
}
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);


if (is_partial_io(bvec))
/* Use  a temporary buffer to decompress the page */
uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);


user_mem = kmap_atomic(page);
if (!is_partial_io(bvec))
uncmem = user_mem;


if (!uncmem) {
pr_err("Unable to allocate temp memory\n");
ret = -ENOMEM;
goto out_cleanup;
}


ret = zram_decompress_page(zram, uncmem, index);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret))
goto out_cleanup;


if (is_partial_io(bvec))
memcpy(user_mem + bvec->bv_offset, uncmem + offset,
bvec->bv_len);


flush_dcache_page(page);
ret = 0;
out_cleanup:
kunmap_atomic(user_mem);
if (is_partial_io(bvec))
kfree(uncmem);
return ret;
}
在调用zram_bvec_read ->zram_decompress_page 来解压内容.
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
  int offset)
{
int ret = 0;
unsigned int clen;
unsigned long handle = 0;
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
struct zcomp_strm *zstrm = NULL;
unsigned long alloced_pages;


page = bvec->bv_page;
if (is_partial_io(bvec)) {
/*
* This is a partial IO. We need to read the full page
* before to write the changes.
*/
uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
if (!uncmem) {
ret = -ENOMEM;
goto out;
}
ret = zram_decompress_page(zram, uncmem, index);
if (ret)
goto out;
}


compress_again:
user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
      bvec->bv_len);
kunmap_atomic(user_mem);
user_mem = NULL;
} else {
uncmem = user_mem;
}


if (page_zero_filled(uncmem)) {
if (user_mem)
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
zram_set_flag(meta, index, ZRAM_ZERO);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);


atomic64_inc(&zram->stats.zero_pages);
ret = 0;
goto out;
}


zstrm = zcomp_stream_get(zram->comp);
ret = zcomp_compress(zstrm, uncmem, &clen);
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
user_mem = NULL;
uncmem = NULL;
}


if (unlikely(ret)) {
pr_err("Compression failed! err=%d\n", ret);
goto out;
}


src = zstrm->buffer;
if (unlikely(clen > max_zpage_size)) {
clen = PAGE_SIZE;
if (is_partial_io(bvec))
src = uncmem;
}


/*
* handle allocation has 2 paths:
* a) fast path is executed with preemption disabled (for
*  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
*  since we can't sleep;
* b) slow path enables preemption and attempts to allocate
*  the page with __GFP_DIRECT_RECLAIM bit set. we have to
*  put per-cpu compression stream and, thus, to re-do
*  the compression once handle is allocated.
*
* if we have a 'non-null' handle here then we are coming
* from the slow path and handle has already been allocated.
*/
if (!handle)
handle = zs_malloc(meta->mem_pool, clen,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE);
if (!handle) {
zcomp_stream_put(zram->comp);
zstrm = NULL;


atomic64_inc(&zram->stats.writestall);


handle = zs_malloc(meta->mem_pool, clen,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
if (handle)
goto compress_again;


pr_err("Error allocating memory for compressed page: %u, size=%u\n",
index, clen);
ret = -ENOMEM;
goto out;
}


alloced_pages = zs_get_total_pages(meta->mem_pool);
update_used_max(zram, alloced_pages);


if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zs_free(meta->mem_pool, handle);
ret = -ENOMEM;
goto out;
}


cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);


if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
copy_page(cmem, src);
kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
}


zcomp_stream_put(zram->comp);
zstrm = NULL;
zs_unmap_object(meta->mem_pool, handle);


/*
* Free memory associated with this sector
* before overwriting unused sectors.
*/
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);


meta->table[index].handle = handle;
zram_set_obj_size(meta, index, clen);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);


/* Update stats */
atomic64_add(clen, &zram->stats.compr_data_size);
atomic64_inc(&zram->stats.pages_stored);
out:
if (zstrm)
zcomp_stream_put(zram->comp);
if (is_partial_io(bvec))
kfree(uncmem);
return ret;
}
在调用zram_bvec_write->zcomp_compress 压缩内容

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值