barebox学习笔记之四 -- 内存管理

1. 内存映射
//Linux kernel提供的系统调用:        
@include/linux/mman.h
void *mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off);
//barebox 提供的调用接口:
void *  memmap(int fd, int flags)

//通过对比可以发现,linux kernel可以指定a region of memory specified by(内存地址,内存长度),
//而文件中映射起始地址的偏移量是由off指定的,长度已由前面的内存长度指定好了.

//而在barebox中,memmap()却没有指定内存地址和长度,也没有指定文件被映射的起始偏移量,因此一旦调用
//被映射的就是整个文件.

@fs/fs.c
void *  memmap(int fd, int flags)
{
        struct device_d *dev;
        struct fs_driver_d *fsdrv;
        FILE *f = &files[fd];   //从files[MAX_FILES]数组中拿到fd对应的FILE结构
        void *retp = (void *)-1;
        int ret;

        if (check_fd(fd))
                return retp;
        
        //FILE::dev在open()的时候就已经初始化好了.
        dev = f->dev;

        //通过dev拿到dev所对应的fs的fs_driver_d实例,也就是你需要实现的那个文件系统对应的fs_driver_d结构.
        fsdrv = dev_to_fs_driver(dev);

        //调用该文件系统所实现的memmap()函数,也就是你所实现的fs_driver_d::memmap().
        if (fsdrv->memmap)
                ret = fsdrv->memmap(dev, f, &retp, flags);
        else
                ret = -EINVAL;

        if (ret)
                errno = -ret;

        return retp; //retp就是memmap()函数的第三个参数,对devfs,就是devfs_memmap()利用参数返回的map指针.
}
EXPORT_SYMBOL(memmap);


static struct fs_driver_d devfs_driver = {
        ...
        .memmap    = devfs_memmap,
        ...        
};

//对于barebox的devfs,会调用到具体设备内嵌的cdev的cdev->ops->memmap()
//也就是设备的驱动实现的memmap()
static int devfs_memmap(struct device_d *_dev, FILE *f, void **map, int flags)
+-- ret = cdev->ops->memmap(cdev, map, flags);
+-- *map = (void *)((unsigned long)*map + (unsigned long)cdev->offset);


//每个设备都会内嵌一个struct cdev结构
struct cdev {
        struct file_operations *ops;
        ...
};

//通常设备内嵌的cdev结构都会在其驱动的xxx_probe()函数中实现.
static int mem_probe(struct device_d *dev)
{
        struct cdev *cdev;

        cdev = xzalloc(sizeof (*cdev));
        dev->priv = cdev;

        cdev->name = (char*)dev->resource[0].name;
        cdev->size = (unsigned long)resource_size(&dev->resource[0]);
        cdev->ops = &memops;  //cdev->ops在这里初始化.
        cdev->dev = dev;

        devfs_create(cdev);

        return 0;
}

static struct driver_d mem_drv = {
        .name  = "mem",
        .probe = mem_probe,
};

//mem设备的初始化函数,在系统初始化期间被调用.
static int mem_init(void)
{
        rw_buf = malloc(RW_BUF_SIZE);
        if(!rw_buf) {
                printf("%s: Out of memory\n", __FUNCTION__);
                return -1;
        }

        add_mem_device("mem", 0, ~0, IORESOURCE_MEM_WRITEABLE);
      //+-- add_generic_device("mem", DEVICE_ID_DYNAMIC, name, start, size, IORESOURCE_MEM | flags, NULL);
      //    +-- res = xzalloc(sizeof(struct resource));
      //    +-- res[0].name = xstrdup(resname);
      //    +-- res[0].start = start;
      //    +-- res[0].end = start + size - 1;
      //    +-- res[0].flags = flags;
      //    +-- add_generic_device_res(devname, id, res, 1, pdata);
      //        +-- dev = alloc_device(devname, id, pdata);
      //        +-- dev->resource = res;
      //        +-- dev->num_resources = nb;
      //        +-- register_device(dev);         //在这里注册mem设备
        register_driver(&mem_drv);                //在这里注册mem驱动... probe()函数被调用.  

        return 0;
}
device_initcall(mem_init);


//这里以mem设备为例看以下用来初始化cdev->ops的函数集memops结构:
@commands/mem.c
static struct file_operations memops = {
        .read  = mem_read,
        .write = mem_write,
        .memmap = generic_memmap_rw,
        .lseek = dev_lseek_default,
};

int generic_memmap_ro(struct cdev *cdev, void **map, int flags)
int generic_memmap_rw(struct cdev *cdev, void **map, int flags)
+-- *map = dev_get_mem_region(cdev->dev, 0);
    +-- res = dev_get_resource(dev, num);
            //+-- for (i = 0; i < dev->num_resources; i++) {
            //        struct resource *res = &dev->resource[i];
            //        if (resource_type(res) == IORESOURCE_MEM) {
            //            if (n == num)
            //                return res;
            //            n++;
            //        }
            //    }
    +-- return (void __force *)res->start;



//对于mem设备来说,*map指针实际上是通过一种名为struct resource结构得到的. 首先看一下其定义
//Resources are tree-like, allowing nesting etc..
struct resource {
        resource_size_t start;
        resource_size_t end;
        const char *name;
        unsigned long flags;
        struct resource *parent;
        struct list_head children;
        struct list_head sibling;
};
//从注释和具体定义来看,resource主要应该是维护内存地址相关的信息以及不同的资源之间形成的树形结构


//下面看一下resource这种结构的使用,首先是其如何进行初始化:
static inline struct device_d *
add_mem_device(const char *name, resource_size_t start, resource_size_t size, unsigned int flags)
+-- add_generic_device("mem", DEVICE_ID_DYNAMIC, name, start, size, IORESOURCE_MEM | flags, NULL);
    +-- res = xzalloc(sizeof(struct resource)); //在设备初始化(创建)的时候创建并初始化resource结构.
    +-- res[0].name = xstrdup(resname);
    +-- res[0].start = start;
    +-- res[0].end = start + size - 1;
    +-- res[0].flags = flags;
    +-- add_generic_device_res(devname, id, res, 1, pdata); 
        +-- dev = alloc_device(devname, id, pdata);
        +-- dev->resource = res;
        +-- dev->num_resources = nb;
        +-- register_device(dev);        //设备被注册
//通常情况下,设备的添加通常都是调用一个add_xxx_device()函数,然后add_xxx_device()进而调用add_generic_device()函数,
//在该函数中会根据传入的参数初始化一个resource结构,进而add_generic_device_res()函数被调用.注册一个拥有资源的设备.这
//里的资源应该特指内存空间(物理内存).
//add_xxx_device()
//+-- add_xxx_device()
//    +-- resource init/alloc...
//    +-- add_generic_device_res()
//        +-- device_d init/alloc...
//        +-- register_device()






2. 内存开辟 -- malloc
// Request a region from the registered sdram
struct resource *
request_sdram_region(const char *name, resource_size_t start, resource_size_t size)
{
        struct memory_bank *bank;

        for_each_memory_bank(bank) {
                struct resource *res;
                res = request_region(bank->res, name, start, start + size - 1);
                if (res)
                        return res;
        }
        return NULL;
}



LIST_HEAD(memory_banks);
void barebox_add_memory_bank(const char *name, resource_size_t start, resource_size_t size)
{
        struct memory_bank *bank = xzalloc(sizeof(*bank));
        struct device_d *dev;

        bank->res = request_iomem_region(name, start, start + size - 1);

        BUG_ON(!bank->res);

        dev = add_mem_device(name, start, size, IORESOURCE_MEM_WRITEABLE);

        bank->dev = dev;
        bank->start = start;
        bank->size = size;

        list_add_tail(&bank->list, &memory_banks);
}


//barebox将整个地址空间[0x000000000,0xffffffff]作为resource的root定义了一个struct resource结构的实例iomem_resource.
// The root resource for the whole io space 
struct resource iomem_resource = {
        .start = 0,
        .end = 0xffffffff,
};

//在这个地址空间中,请求一段内存资源[start,end]的接口函数,定义为request_iomem_region()
//request a region inside the io space
struct resource *
request_iomem_region(const char *name, resource_size_t start, resource_size_t end)
    //从iomem_region中拿出[start,end]之间的部分,做为一个resource返回,并将这个资源命名为name指定的字符串.
+-- request_region(&iomem_resource, name, start, end); 
        //检测请求的内存区域是否与已申请的内存区域冲突.
    +-- list_for_each_entry(r, &parent->children, sibling) {
                if (end < r->start)
                        goto ok;
                if (start > r->end)
                        continue;
                debug("%s: 0x%08x:0x%08x conflicts with 0x%08x:0x%08x\n",
                                __func__, start, end, r->start, r->end);
                return NULL;
        }
    +-- ok:
    +-- debug("%s ok: 0x%08x:0x%08x\n", __func__, start, end);
        //创建一个resource实例,其描述的内存空间定义正是[start,end],并将其加入到全局链表iomem_region->children的尾部
    +-- new = xzalloc(sizeof(*new));
    +-- init_resource(new, name);
    +-- new->start = start;
    +-- new->end = end;
    +-- new->parent = parent;
        //将新建的resource链接到全局链表iomem_region->children的尾部
    +-- list_add_tail(&new->sibling, &r->sibling);
    
    +-- return new;

//iomem_region这个resource描述了整个内存空间[0x00000000,0xffffffff],而iomem_region->children则是内存空间中各个自内存段
//对应的resource结构所构成的链表的表头,给内存段对应的resource结构通过resource->sibling成员链接在一起.并且每个resource->parent
//都指向iomem_region这个root resource.


static int iomem_init(void)
{
        init_resource(&iomem_resource, "iomem");
      //+-- INIT_LIST_HEAD(&res->children);
      //+-- res->parent = NULL;
      //+-- res->name = xstrdup(name);  //iomem_resource->name被置为"iomem" 
        return 0;
}
postcore_initcall(iomem_init);




@arch/arm/lib/arm.c
static int arm_mem_malloc_init(void)
{
        mem_malloc_init((void *)MALLOC_BASE,(void *)(MALLOC_BASE + MALLOC_SIZE - 1));
        return 0;
}
core_initcall(arm_mem_malloc_init);

//在mem_malloc_init()函数里面malloc_start/malloc_brk被赋值为MALLOC_BASE.
//malloc_end被赋值为(MALLOC_BASE + MALLOC_SIZE - 1)
void mem_malloc_init(void *start, void *end)
{
        malloc_start = (unsigned long)start;
        malloc_end = (unsigned long)end;
        malloc_brk = malloc_start;
#ifdef CONFIG_MALLOC_TLSF
        tlsf_mem_pool = tlsf_create(start, end - start + 1);
#endif
}

#ifndef __SANDBOX__
static int mem_malloc_resource(void)
{
        /*
         * Normally it's a bug when one of these fails,
         * but we have some setups where some of these
         * regions are outside of sdram in which case
         * the following fails.
         */
        //申请一段物理地址在[malloc_start,malloc_end]之间的内存,用作动态内存的申请和释放
        request_sdram_region("malloc space",
                        malloc_start,
                        malloc_end - malloc_start + 1);
        request_sdram_region("barebox",
                        (unsigned long)&_stext,
                        (unsigned long)&_etext -
                        (unsigned long)&_stext + 1);
        request_sdram_region("bss",
                        (unsigned long)&__bss_start,
                        (unsigned long)&__bss_stop -
                        (unsigned long)&__bss_start + 1);
#ifdef STACK_BASE
        request_sdram_region("stack", STACK_BASE, STACK_SIZE);
#endif
        return 0;
}
coredevice_initcall(mem_malloc_resource);
#endif



//按照增量移动指针malloc_brk(在[malloc_start,malloc_end]限定的范围内),
//并返回指针移动之前的位置
static void *sbrk_no_zero(ptrdiff_t increment)
{
        unsigned long old = malloc_brk;
        unsigned long new = old + increment;
        if ((new < malloc_start) || (new > malloc_end))
                return NULL;
        malloc_brk = new;
        return (void *)old;
}
//将增量覆盖的内存段置零,并返回该内存段指针.
void *sbrk(ptrdiff_t increment)
{
        void *old = sbrk_no_zero(increment);

        /* Only clear increment, if valid address was returned */
        if (old != NULL)
                memset(old, 0, increment);

        return old;
}




   Bins

    The bins, `av_' are an array of pairs of pointers serving as the
    heads of (initially empty) doubly-linked lists of chunks, laid out
    in a way so that each pair can be treated as if it were in a
    malloc_chunk. (This way, the fd/bk offsets for linking bin heads
    and chunks are the same).

    bins,'av_' 是一个只针对儿组成的数组,用作chunks的双向链表的头(最初是空的),以一种
     方式分布以便每对都被像在一个malloc_chunk里一样对待.(这种方式,为了链接bin heads
    和chunks的fd/bk偏移是相同的).

    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
    8 bytes apart. Larger bins are approximately logarithmically
    spaced. (See the table below.) The `av_' array is never mentioned
    directly in the code, but instead via bin access macros.

    sizes < 512字节的bins包含的chunks都是相同大小的.利用8个字节的空间分开.大一些的bins
    几乎是对数分布的(参见下表.) 'av_'数组从未在代码中被提及,而是使用访问宏来引用av_[]数组.

    Bin layout:

    64 bins of size       8
    32 bins of size      64
    16 bins of size     512
     8 bins of size    4096
     4 bins of size   32768
     2 bins of size  262144
     1 bin  of size what's left

    There is actually a little bit of slop in the numbers in bin_index
    for the sake of speed. This makes no difference elsewhere.

    为了提高速度,实际上bin_index实际上多出了一些比特.这在其他地方也没有什么不同.

    The special chunks `top' and `last_remainder' get their own bins,
    (this is implemented via yet more trickery with the av_ array),
    although `top' is never properly linked to its bin since it is
    always handled specially.

    特殊的chunks 'top'和'last_remainder'有他们自己的bins,(这是通过更加trickery的av_
    数组实现的),尽管'top'从未被很好的链接到它的bin,因为他总是被特殊处理的.

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值