mtd_info

    mtd_info

    用于描述MTD原始设备的数据结构是mtd_info,这其中定义了大量的关于MTD的数据和操作函数。每个MTD原始设备都有一个mtd_info结构(master),其中的priv指针指向一个map_info结构。每个分区被看作一个mtd_info。例如两个MTD原始设备,每个设备3个分区,系统中就有6个mtd_info,它们被放在mtd_table中。mtd_table(mtdcore.c)则是所有MTD原始设备的链表。

Mtd.h

struct mtd_info {
	u_char type;
	uint32_t flags;
	uint64_t size;	 // Total size of the MTD

	/* "Major" erase size for the device. Na茂ve users may take this
	 * to be the only erase size available, or may use the more detailed
	 * information below if they desire
	 */
	uint32_t erasesize;
	/* Minimal writable flash unit size. In case of NOR flash it is 1 (even
	 * though individual bits can be cleared), in case of NAND flash it is
	 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
	 * it is of ECC block size, etc. It is illegal to have writesize = 0.
	 * Any driver registering a struct mtd_info must ensure a writesize of
	 * 1 or larger.
	 */
	uint32_t writesize;

	uint32_t oobsize;   // Amount of OOB data per block (e.g. 16)
	uint32_t oobavail;  // Available OOB bytes per block

	/*
	 * If erasesize is a power of 2 then the shift is stored in
	 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
	 */
	unsigned int erasesize_shift;
	unsigned int writesize_shift;
	/* Masks based on erasesize_shift and writesize_shift */
	unsigned int erasesize_mask;
	unsigned int writesize_mask;

	// Kernel-only stuff starts here.
	const char *name;
	int index;

	/* ecc layout structure pointer - read only ! */
	struct nand_ecclayout *ecclayout;

	/* Data for variable erase regions. If numeraseregions is zero,
	 * it means that the whole device has erasesize as given above.
	 */
	int numeraseregions;
	struct mtd_erase_region_info *eraseregions;

	/*
	 * Erase is an asynchronous operation.  Device drivers are supposed
	 * to call instr->callback() whenever the operation completes, even
	 * if it completes with a failure.
	 * Callers are supposed to pass a callback function and wait for it
	 * to be called before writing to the block.
	 */
	int (*erase) (struct mtd_info *mtd, struct erase_info *instr);

	/* This stuff for eXecute-In-Place */
	/* phys is optional and may be set to NULL */
	int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
			size_t *retlen, void **virt, resource_size_t *phys);

	/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
	void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);

	/* Allow NOMMU mmap() to directly map the device (if not NULL)
	 * - return the address to which the offset maps
	 * - return -ENOSYS to indicate refusal to do the mapping
	 */
	unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
					    unsigned long len,
					    unsigned long offset,
					    unsigned long flags);

	/* Backing device capabilities for this device
	 * - provides mmap capabilities
	 */
	struct backing_dev_info *backing_dev_info;


	int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
	int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);

	/* In blackbox flight recorder like scenarios we want to make successful
	   writes in interrupt context. panic_write() is only intended to be
	   called when its known the kernel is about to panic and we need the
	   write to succeed. Since the kernel is not going to be running for much
	   longer, this function can break locks and delay to ensure the write
	   succeeds (but not sleep). */

	int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);

	int (*read_oob) (struct mtd_info *mtd, loff_t from,
			 struct mtd_oob_ops *ops);
	int (*write_oob) (struct mtd_info *mtd, loff_t to,
			 struct mtd_oob_ops *ops);

	/*
	 * Methods to access the protection register area, present in some
	 * flash devices. The user data is one time programmable but the
	 * factory data is read only.
	 */
	int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
	int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
	int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
	int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
	int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
	int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len);

	/* kvec-based read/write methods.
	   NB: The 'count' parameter is the number of _vectors_, each of
	   which contains an (ofs, len) tuple.
	*/
	int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen);

	/* Sync */
	void (*sync) (struct mtd_info *mtd);

	/* Chip-supported device locking */
	int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
	int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);

	/* Power Management functions */
	int (*suspend) (struct mtd_info *mtd);
	void (*resume) (struct mtd_info *mtd);

	/* Bad block management functions */
	int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
	int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);

	struct notifier_block reboot_notifier;  /* default mode before reboot */

	/* ECC status information */
	struct mtd_ecc_stats ecc_stats;
	/* Subpage shift (NAND) */
	int subpage_sft;

	void *priv;

	struct module *owner;
	struct device dev;
	int usecount;

	/* If the driver is something smart, like UBI, it may need to maintain
	 * its own reference counting. The below functions are only for driver.
	 * The driver may register its callbacks. These callbacks are not
	 * supposed to be called by MTD users */
	int (*get_device) (struct mtd_info *mtd);
	void (*put_device) (struct mtd_info *mtd);
};


 

// SPDX-License-Identifier: GPL-2.0-or-later /* * Simple MTD partitioning layer * * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/kmod.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/err.h> #include <linux/of.h> #include "mtdcore.h" /* * MTD methods which simply translate the effective address and pass through * to the _real_ device. */ #ifdef ENCRYPT_ROOTFS_HEADER #define ENCRYPT_ROOTFS_HEADER_SIZE (512) extern long long rootfs_start; extern int decrypt_rootfs_header_flg; extern unsigned char decrypt_rootfs_header_buffer[ENCRYPT_ROOTFS_HEADER_SIZE]; #endif #ifdef CONFIG_SKIP_SQUASHFS_BAD_BLOCK #define MAX_PARTITION_MAPPING 4 struct part_map{ struct mtd_info *part_mtd; /* Mapping partition mtd */ unsigned *map_table; /* Mapping from logic block to phys block */ unsigned nBlock; /* Logic block number */ }; static struct part_map *part_mapping[MAX_PARTITION_MAPPING]; static int part_mapping_count = -1; loff_t ajust_offset(struct mtd_info *mtd, loff_t from) { unsigned logic_b, phys_b; unsigned index; if(part_mapping_count <= 0) return from; for( index = 0; index < MAX_PARTITION_MAPPING; index++ ) { if(!part_mapping[index] || part_mapping[index]->part_mtd != mtd) continue; /* remap from logic block to physical block */ logic_b = from >> mtd->erasesize_shift; if (logic_b < part_mapping[index]->nBlock) { phys_b = part_mapping[index]->map_table[logic_b]; from = phys_b << mtd->erasesize_shift | (from&(mtd->erasesize-1)); break; } } return from; } static int part_create_partition_mapping ( struct mtd_info *part_mtd ) { struct part_map *map_part; int index; unsigned offset; int logical_b, phys_b; if (!part_mtd || part_mtd->type == MTD_NORFLASH) { printk("null mtd or it is no nand chip!"); return -1; } if (part_mapping_count < 0) { /* Init the part mapping table when this function called first time */ memset(part_mapping, 0, sizeof(struct part_map *) * MAX_PARTITION_MAPPING); part_mapping_count = 0; } for (index = 0; index < MAX_PARTITION_MAPPING; index++) { if (part_mapping[index] == NULL) break; } if (index >= MAX_PARTITION_MAPPING) { printk("partition mapping is full!"); return -1; } map_part = kmalloc(sizeof(struct part_map), GFP_KERNEL); if (!map_part) { printk ("memory allocation error while creating partitions mapping for %s/n", part_mtd->name); return -1; } map_part->map_table = kmalloc(sizeof(unsigned)*(part_mtd->size>>part_mtd->erasesize_shift), GFP_KERNEL); if (!map_part->map_table) { printk ("memory allocation error while creating partitions mapping for %s/n", part_mtd->name); kfree(map_part); return -1; } memset(map_part->map_table, 0xFF, sizeof(unsigned)*(part_mtd->size>>part_mtd->erasesize_shift)); /* Create partition mapping table */ logical_b = 0; for (offset=0; offset<part_mtd->size; offset+=part_mtd->erasesize) { if (mtd_block_isbad(part_mtd, offset)) continue; phys_b = offset >> part_mtd->erasesize_shift; map_part->map_table[logical_b] = phys_b; logical_b++; } map_part->nBlock = logical_b; map_part->part_mtd = part_mtd; part_mapping[index] = map_part; part_mapping_count++; return 0; } static void part_del_partition_mapping( struct mtd_info *part_mtd) { int index; struct part_map *map_part; if (part_mapping_count <= 0) return; for (index = 0; index < MAX_PARTITION_MAPPING; index++) { map_part = part_mapping[index]; if(!map_part || map_part->part_mtd != part_mtd) continue; kfree(map_part->map_table); kfree(map_part); part_mapping[index] = NULL; part_mapping_count--; } } static int part_is_squashfs( struct mtd_info *part_mtd) { u_char buf[16]; size_t retlen; unsigned offset; if (!part_mtd || part_mtd->type == MTD_NORFLASH) { return 0; } #ifdef ENCRYPT_ROOTFS_HEADER if (decrypt_rootfs_header_flg && mtd_get_master_ofs(part_mtd, 0) == rootfs_start) { return !memcmp(decrypt_rootfs_header_buffer, "hsqs", 4); } #endif for (offset=0; offset<part_mtd->erasesize*2; offset+=part_mtd->erasesize) { if (mtd_block_isbad(part_mtd, offset)) continue; mtd_read(part_mtd, offset, 16, &retlen, buf); if(!memcmp(buf, "hsqs", 4)) { return 1; } } return 0; } #endif static inline void free_partition(struct mtd_info *mtd) { kfree(mtd->name); kfree(mtd); } static struct mtd_info *allocate_partition(struct mtd_info *parent, const struct mtd_partition *part, int partno, uint64_t cur_offset) { struct mtd_info *master = mtd_get_master(parent); int wr_alignment = (parent->flags & MTD_NO_ERASE) ? master->writesize : master->erasesize; u64 parent_size = mtd_is_partition(parent) ? parent->part.size : parent->size; struct mtd_info *child; u32 remainder; char *name; u64 tmp; /* allocate the partition structure */ child = kzalloc(sizeof(*child), GFP_KERNEL); name = kstrdup(part->name, GFP_KERNEL); if (!name || !child) { printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", parent->name); kfree(name); kfree(child); return ERR_PTR(-ENOMEM); } /* set up the MTD object for this partition */ child->type = parent->type; child->part.flags = parent->flags & ~part->mask_flags; child->part.flags |= part->add_flags; child->flags = child->part.flags; child->part.size = part->size; child->writesize = parent->writesize; child->writebufsize = parent->writebufsize; child->oobsize = parent->oobsize; child->oobavail = parent->oobavail; child->subpage_sft = parent->subpage_sft; child->name = name; child->owner = parent->owner; /* NOTE: Historically, we didn't arrange MTDs as a tree out of * concern for showing the same data in multiple partitions. * However, it is very useful to have the master node present, * so the MTD_PARTITIONED_MASTER option allows that. The master * will have device nodes etc only if this is set, so make the * parent conditional on that option. Note, this is a way to * distinguish between the parent and its partitions in sysfs. */ child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? &parent->dev : parent->dev.parent; child->dev.of_node = part->of_node; child->parent = parent; child->part.offset = part->offset; INIT_LIST_HEAD(&child->partitions); if (child->part.offset == MTDPART_OFS_APPEND) child->part.offset = cur_offset; if (child->part.offset == MTDPART_OFS_NXTBLK) { tmp = cur_offset; child->part.offset = cur_offset; remainder = do_div(tmp, wr_alignment); if (remainder) { child->part.offset += wr_alignment - remainder; printk(KERN_NOTICE "Moving partition %d: " "0x%012llx -> 0x%012llx\n", partno, (unsigned long long)cur_offset, child->part.offset); } } if (child->part.offset == MTDPART_OFS_RETAIN) { child->part.offset = cur_offset; if (parent_size - child->part.offset >= child->part.size) { child->part.size = parent_size - child->part.offset - child->part.size; } else { printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", part->name, parent_size - child->part.offset, child->part.size); /* register to preserve ordering */ goto out_register; } } if (child->part.size == MTDPART_SIZ_FULL) child->part.size = parent_size - child->part.offset; printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", child->part.offset, child->part.offset + child->part.size, child->name); /* let's do some sanity checks */ if (child->part.offset >= parent_size) { /* let's register it anyway to preserve ordering */ child->part.offset = 0; child->part.size = 0; /* Initialize ->erasesize to make add_mtd_device() happy. */ child->erasesize = parent->erasesize; printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", part->name); goto out_register; } if (child->part.offset + child->part.size > parent->size) { child->part.size = parent_size - child->part.offset; printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", part->name, parent->name, child->part.size); } if (parent->numeraseregions > 1) { /* Deal with variable erase size stuff */ int i, max = parent->numeraseregions; u64 end = child->part.offset + child->part.size; struct mtd_erase_region_info *regions = parent->eraseregions; /* Find the first erase regions which is part of this * partition. */ for (i = 0; i < max && regions[i].offset <= child->part.offset; i++) ; /* The loop searched for the region _behind_ the first one */ if (i > 0) i--; /* Pick biggest erasesize */ for (; i < max && regions[i].offset < end; i++) { if (child->erasesize < regions[i].erasesize) child->erasesize = regions[i].erasesize; } BUG_ON(child->erasesize == 0); } else { /* Single erase size */ child->erasesize = master->erasesize; } /* * Child erasesize might differ from the parent one if the parent * exposes several regions with different erasesize. Adjust * wr_alignment accordingly. */ if (!(child->flags & MTD_NO_ERASE)) wr_alignment = child->erasesize; tmp = mtd_get_master_ofs(child, 0); remainder = do_div(tmp, wr_alignment); if ((child->flags & MTD_WRITEABLE) && remainder) { /* Doesn't start on a boundary of major erase size */ /* FIXME: Let it be writable if it is on a boundary of * _minor_ erase size though */ child->flags &= ~MTD_WRITEABLE; // printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", // part->name); } tmp = mtd_get_master_ofs(child, 0) + child->part.size; remainder = do_div(tmp, wr_alignment); if ((child->flags & MTD_WRITEABLE) && remainder) { child->flags &= ~MTD_WRITEABLE; // printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", // part->name); } child->size = child->part.size; child->ecc_step_size = parent->ecc_step_size; child->ecc_strength = parent->ecc_strength; child->bitflip_threshold = parent->bitflip_threshold; #ifndef CONFIG_MTD_NAND_FASTBOOT if (master->_block_isbad) { uint64_t offs = 0; while (offs < child->part.size) { if (mtd_block_isreserved(child, offs)) child->ecc_stats.bbtblocks++; else if (mtd_block_isbad(child, offs)) child->ecc_stats.badblocks++; offs += child->erasesize; } } #endif out_register: return child; } static ssize_t mtd_partition_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset); } static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL); static const struct attribute *mtd_partition_attrs[] = { &dev_attr_offset.attr, NULL }; static int mtd_add_partition_attrs(struct mtd_info *new) { int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs); if (ret) printk(KERN_WARNING "mtd: failed to create partition attrs, err=%d\n", ret); return ret; } int mtd_add_partition(struct mtd_info *parent, const char *name, long long offset, long long length) { struct mtd_info *master = mtd_get_master(parent); u64 parent_size = mtd_is_partition(parent) ? parent->part.size : parent->size; struct mtd_partition part; struct mtd_info *child; int ret = 0; /* the direct offset is expected */ if (offset == MTDPART_OFS_APPEND || offset == MTDPART_OFS_NXTBLK) return -EINVAL; if (length == MTDPART_SIZ_FULL) length = parent_size - offset; if (length <= 0) return -EINVAL; memset(&part, 0, sizeof(part)); part.name = name; part.size = length; part.offset = offset; child = allocate_partition(parent, &part, -1, offset); if (IS_ERR(child)) return PTR_ERR(child); mutex_lock(&master->master.partitions_lock); list_add_tail(&child->part.node, &parent->partitions); mutex_unlock(&master->master.partitions_lock); ret = add_mtd_device(child); if (ret) goto err_remove_part; mtd_add_partition_attrs(child); return 0; err_remove_part: mutex_lock(&master->master.partitions_lock); list_del(&child->part.node); mutex_unlock(&master->master.partitions_lock); free_partition(child); return ret; } EXPORT_SYMBOL_GPL(mtd_add_partition); /** * __mtd_del_partition - delete MTD partition * * @priv: MTD structure to be deleted * * This function must be called with the partitions mutex locked. */ static int __mtd_del_partition(struct mtd_info *mtd) { struct mtd_info *child, *next; int err; list_for_each_entry_safe(child, next, &mtd->partitions, part.node) { err = __mtd_del_partition(child); if (err) return err; } sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs); err = del_mtd_device(mtd); if (err) return err; list_del(&mtd->part.node); free_partition(mtd); return 0; } /* * This function unregisters and destroy all slave MTD objects which are * attached to the given MTD object, recursively. */ static int __del_mtd_partitions(struct mtd_info *mtd) { struct mtd_info *child, *next; LIST_HEAD(tmp_list); int ret, err = 0; list_for_each_entry_safe(child, next, &mtd->partitions, part.node) { if (mtd_has_partitions(child)) __del_mtd_partitions(child); #ifdef CONFIG_SKIP_SQUASHFS_BAD_BLOCK part_del_partition_mapping(child); #endif pr_info("Deleting %s MTD partition\n", child->name); ret = del_mtd_device(child); if (ret < 0) { pr_err("Error when deleting partition \"%s\" (%d)\n", child->name, ret); err = ret; continue; } list_del(&child->part.node); free_partition(child); } return err; } int del_mtd_partitions(struct mtd_info *mtd) { struct mtd_info *master = mtd_get_master(mtd); int ret; pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name); mutex_lock(&master->master.partitions_lock); ret = __del_mtd_partitions(mtd); mutex_unlock(&master->master.partitions_lock); return ret; } int mtd_del_partition(struct mtd_info *mtd, int partno) { struct mtd_info *child, *master = mtd_get_master(mtd); int ret = -EINVAL; mutex_lock(&master->master.partitions_lock); list_for_each_entry(child, &mtd->partitions, part.node) { if (child->index == partno) { ret = __mtd_del_partition(child); break; } } mutex_unlock(&master->master.partitions_lock); return ret; } EXPORT_SYMBOL_GPL(mtd_del_partition); /* * This function, given a parent MTD object and a partition table, creates * and registers the child MTD objects which are bound to the parent according * to the partition definitions. * * For historical reasons, this function's caller only registers the parent * if the MTD_PARTITIONED_MASTER config option is set. */ int add_mtd_partitions(struct mtd_info *parent, const struct mtd_partition *parts, int nbparts) { struct mtd_info *child, *master = mtd_get_master(parent); uint64_t cur_offset = 0; int i, ret; printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, parent->name); for (i = 0; i < nbparts; i++) { child = allocate_partition(parent, parts + i, i, cur_offset); if (IS_ERR(child)) { ret = PTR_ERR(child); goto err_del_partitions; } mutex_lock(&master->master.partitions_lock); list_add_tail(&child->part.node, &parent->partitions); mutex_unlock(&master->master.partitions_lock); ret = add_mtd_device(child); if (ret) { mutex_lock(&master->master.partitions_lock); list_del(&child->part.node); mutex_unlock(&master->master.partitions_lock); free_partition(child); goto err_del_partitions; } mtd_add_partition_attrs(child); /* Look for subpartitions */ parse_mtd_partitions(child, parts[i].types, NULL); #ifdef CONFIG_SKIP_SQUASHFS_BAD_BLOCK if(part_is_squashfs(child)) { printk("%s is squashfs\n", child->name); part_create_partition_mapping(child); } #endif cur_offset = child->part.offset + child->part.size; } return 0; err_del_partitions: del_mtd_partitions(master); return ret; } static DEFINE_SPINLOCK(part_parser_lock); static LIST_HEAD(part_parsers); static struct mtd_part_parser *mtd_part_parser_get(const char *name) { struct mtd_part_parser *p, *ret = NULL; spin_lock(&part_parser_lock); list_for_each_entry(p, &part_parsers, list) if (!strcmp(p->name, name) && try_module_get(p->owner)) { ret = p; break; } spin_unlock(&part_parser_lock); return ret; } static inline void mtd_part_parser_put(const struct mtd_part_parser *p) { module_put(p->owner); } /* * Many partition parsers just expected the core to kfree() all their data in * one chunk. Do that by default. */ static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts, int nr_parts) { kfree(pparts); } int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner) { p->owner = owner; if (!p->cleanup) p->cleanup = &mtd_part_parser_cleanup_default; spin_lock(&part_parser_lock); list_add(&p->list, &part_parsers); spin_unlock(&part_parser_lock); return 0; } EXPORT_SYMBOL_GPL(__register_mtd_parser); void deregister_mtd_parser(struct mtd_part_parser *p) { spin_lock(&part_parser_lock); list_del(&p->list); spin_unlock(&part_parser_lock); } EXPORT_SYMBOL_GPL(deregister_mtd_parser); /* * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you * are changing this array! */ static const char * const default_mtd_part_types[] = { "cmdlinepart", "ofpart", NULL }; /* Check DT only when looking for subpartitions. */ static const char * const default_subpartition_types[] = { "ofpart", NULL }; static int mtd_part_do_parse(struct mtd_part_parser *parser, struct mtd_info *master, struct mtd_partitions *pparts, struct mtd_part_parser_data *data) { int ret; ret = (*parser->parse_fn)(master, &pparts->parts, data); pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret); if (ret <= 0) return ret; pr_notice("%d %s partitions found on MTD device %s\n", ret, parser->name, master->name); pparts->nr_parts = ret; pparts->parser = parser; return ret; } /** * mtd_part_get_compatible_parser - find MTD parser by a compatible string * * @compat: compatible string describing partitions in a device tree * * MTD parsers can specify supported partitions by providing a table of * compatibility strings. This function finds a parser that advertises support * for a passed value of "compatible". */ static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat) { struct mtd_part_parser *p, *ret = NULL; spin_lock(&part_parser_lock); list_for_each_entry(p, &part_parsers, list) { const struct of_device_id *matches; matches = p->of_match_table; if (!matches) continue; for (; matches->compatible[0]; matches++) { if (!strcmp(matches->compatible, compat) && try_module_get(p->owner)) { ret = p; break; } } if (ret) break; } spin_unlock(&part_parser_lock); return ret; } static int mtd_part_of_parse(struct mtd_info *master, struct mtd_partitions *pparts) { struct mtd_part_parser *parser; struct device_node *np; struct property *prop; const char *compat; const char *fixed = "fixed-partitions"; int ret, err = 0; np = mtd_get_of_node(master); if (mtd_is_partition(master)) of_node_get(np); else np = of_get_child_by_name(np, "partitions"); of_property_for_each_string(np, "compatible", prop, compat) { parser = mtd_part_get_compatible_parser(compat); if (!parser) continue; ret = mtd_part_do_parse(parser, master, pparts, NULL); if (ret > 0) { of_node_put(np); return ret; } mtd_part_parser_put(parser); if (ret < 0 && !err) err = ret; } of_node_put(np); /* * For backward compatibility we have to try the "fixed-partitions" * parser. It supports old DT format with partitions specified as a * direct subnodes of a flash device DT node without any compatibility * specified we could match. */ parser = mtd_part_parser_get(fixed); if (!parser && !request_module("%s", fixed)) parser = mtd_part_parser_get(fixed); if (parser) { ret = mtd_part_do_parse(parser, master, pparts, NULL); if (ret > 0) return ret; mtd_part_parser_put(parser); if (ret < 0 && !err) err = ret; } return err; } /** * parse_mtd_partitions - parse and register MTD partitions * * @master: the master partition (describes whole MTD device) * @types: names of partition parsers to try or %NULL * @data: MTD partition parser-specific data * * This function tries to find & register partitions on MTD device @master. It * uses MTD partition parsers, specified in @types. However, if @types is %NULL, * then the default list of parsers is used. The default list contains only the * "cmdlinepart" and "ofpart" parsers ATM. * Note: If there are more then one parser in @types, the kernel only takes the * partitions parsed out by the first parser. * * This function may return: * o a negative error code in case of failure * o number of found partitions otherwise */ int parse_mtd_partitions(struct mtd_info *master, const char *const *types, struct mtd_part_parser_data *data) { struct mtd_partitions pparts = { }; struct mtd_part_parser *parser; int ret, err = 0; int i = 0; if (!types) types = mtd_is_partition(master) ? default_subpartition_types : default_mtd_part_types; for ( ; *types; types++) { /* * ofpart is a special type that means OF partitioning info * should be used. It requires a bit different logic so it is * handled in a separated function. */ if (!strcmp(*types, "ofpart")) { ret = mtd_part_of_parse(master, &pparts); } else { pr_debug("%s: parsing partitions %s\n", master->name, *types); parser = mtd_part_parser_get(*types); if (!parser && !request_module("%s", *types)) parser = mtd_part_parser_get(*types); pr_debug("%s: got parser %s\n", master->name, parser ? parser->name : NULL); if (!parser) continue; ret = mtd_part_do_parse(parser, master, &pparts, data); if (ret <= 0) mtd_part_parser_put(parser); } /* Found partitions! */ if (ret > 0) { for (i = 0; i < pparts.nr_parts; i++) { printk(KERN_WARNING "partitions[%2d] = " "{.offset = 0x%.8x, .size = 0x%.8x (%7u KiB), .name = %-*s }\n", i, (unsigned int)(pparts.parts[i].offset), (unsigned int)(pparts.parts[i].size), (unsigned int)(pparts.parts[i].size / 1024), 13, pparts.parts[i].name); } printk(KERN_WARNING"Mtd parts default"); err = add_mtd_partitions(master, pparts.parts, pparts.nr_parts); mtd_part_parser_cleanup(&pparts); return err ? err : pparts.nr_parts; } /* * Stash the first error we see; only report it if no parser * succeeds */ if (ret < 0 && !err) err = ret; } return err; } void mtd_part_parser_cleanup(struct mtd_partitions *parts) { const struct mtd_part_parser *parser; if (!parts) return; parser = parts->parser; if (parser) { if (parser->cleanup) parser->cleanup(parts->parts, parts->nr_parts); mtd_part_parser_put(parser); } } /* Returns the size of the entire flash chip */ uint64_t mtd_get_device_size(const struct mtd_info *mtd) { struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); return master->size; } EXPORT_SYMBOL_GPL(mtd_get_device_size);
10-20
struct mtd_info { u_char type; uint32_t flags; uint64_t size; // Total size of the MTD /* "Major" erase size for the device. Naïve users may take this * to be the only erase size available, or may use the more detailed * information below if they desire */ uint32_t erasesize; /* Minimal writable flash unit size. In case of NOR flash it is 1 (even * though individual bits can be cleared), in case of NAND flash it is * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR * it is of ECC block size, etc. It is illegal to have writesize = 0. * Any driver registering a struct mtd_info must ensure a writesize of * 1 or larger. */ uint32_t writesize; /* * Size of the write buffer used by the MTD. MTD devices having a write * buffer can write multiple writesize chunks at a time. E.g. while * writing 4 * writesize bytes to a device with 2 * writesize bytes * buffer the MTD driver can (but doesn't have to) do 2 writesize * operations, but not 4. Currently, all NANDs have writebufsize * equivalent to writesize (NAND page size). Some NOR flashes do have * writebufsize greater than writesize. */ uint32_t writebufsize; uint32_t oobsize; // Amount of OOB data per block (e.g. 16) uint32_t oobavail; // Available OOB bytes per block /* * If erasesize is a power of 2 then the shift is stored in * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. */ unsigned int erasesize_shift; unsigned int writesize_shift; /* Masks based on erasesize_shift and writesize_shift */ unsigned int erasesize_mask; unsigned int writesize_mask; /* * read ops return -EUCLEAN if max number of bitflips corrected on any * one region comprising an ecc step equals or exceeds this value. * Settable by driver, else defaults to ecc_strength. User can override * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed; * see Documentation/ABI/testing/sysfs-class-mtd for more detail. */ unsigned int bitflip_threshold; /* Kernel-only stuff starts here. */ const char *name; int index; /* OOB layout description */ const struct mtd_ooblayout_ops *ooblayout; /* NAND pairing scheme, only provided for MLC/TLC NANDs */ const struct mtd_pairing_scheme *pairing; /* the ecc step size. */ unsigned int ecc_step_size; /* max number of correctible bit errors per ecc step */ unsigned int ecc_strength; /* Data for variable erase regions. If numeraseregions is zero, * it means that the whole device has erasesize as given above. */ int numeraseregions; struct mtd_erase_region_info *eraseregions; /* * Do not call via these pointers, use corresponding mtd_*() * wrappers instead. */ int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); int (*_read_oob) (struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); int (*_write_oob) (struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops); int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf); int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf); int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, u_char *buf); int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len); int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); void (*_sync) (struct mtd_info *mtd); int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs); int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len); int (*_suspend) (struct mtd_info *mtd); void (*_resume) (struct mtd_info *mtd); void (*_reboot) (struct mtd_info *mtd); /* * If the driver is something smart, like UBI, it may need to maintain * its own reference counting. The below functions are only for driver. */ int (*_get_device) (struct mtd_info *mtd); void (*_put_device) (struct mtd_info *mtd); /* * flag indicates a panic write, low level drivers can take appropriate * action if required to ensure writes go through */ bool oops_panic_write; struct notifier_block reboot_notifier; /* default mode before reboot */ /* ECC status information */ struct mtd_ecc_stats ecc_stats; /* Subpage shift (NAND) */ int subpage_sft; void *priv; struct module *owner; struct device dev; int usecount; struct mtd_debug_info dbg; struct nvmem_device *nvmem; /* * Parent device from the MTD partition point of view. * * MTD masters do not have any parent, MTD partitions do. The parent * MTD device can itself be a partition. */ struct mtd_info *parent; /* List of partitions attached to this MTD device */ struct list_head partitions; union { struct mtd_part part; struct mtd_master master; }; };
09-26
评论 2
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值