2 * Simple MTD partitioning layer
4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/kmod.h>
30 #include <linux/mtd/mtd.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/err.h>
36 /* Our partition linked list */
37 static LIST_HEAD(mtd_partitions);
38 static DEFINE_MUTEX(mtd_partitions_mutex);
40 /* Our partition node structure */
43 struct mtd_info *master;
45 struct list_head list;
49 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
50 * the pointer to that structure with this macro.
52 #define PART(x) ((struct mtd_part *)(x))
56 * MTD methods which simply translate the effective address and pass through
57 * to the _real_ device.
60 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
61 size_t *retlen, u_char *buf)
63 struct mtd_part *part = PART(mtd);
64 struct mtd_ecc_stats stats;
67 stats = part->master->ecc_stats;
69 if (from >= mtd->size)
71 else if (from + len > mtd->size)
72 len = mtd->size - from;
73 res = part->master->read(part->master, from + part->offset,
76 if (mtd_is_bitflip(res))
77 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
78 if (mtd_is_eccerr(res))
79 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
84 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
85 size_t *retlen, void **virt, resource_size_t *phys)
87 struct mtd_part *part = PART(mtd);
88 if (from >= mtd->size)
90 else if (from + len > mtd->size)
91 len = mtd->size - from;
92 return mtd_point(part->master, from + part->offset, len, retlen,
96 static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
98 struct mtd_part *part = PART(mtd);
100 mtd_unpoint(part->master, from + part->offset, len);
103 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
105 unsigned long offset,
108 struct mtd_part *part = PART(mtd);
110 offset += part->offset;
111 return mtd_get_unmapped_area(part->master, len, offset, flags);
114 static int part_read_oob(struct mtd_info *mtd, loff_t from,
115 struct mtd_oob_ops *ops)
117 struct mtd_part *part = PART(mtd);
120 if (from >= mtd->size)
122 if (ops->datbuf && from + ops->len > mtd->size)
126 * If OOB is also requested, make sure that we do not read past the end
132 if (ops->mode == MTD_OPS_AUTO_OOB)
136 pages = mtd_div_by_ws(mtd->size, mtd);
137 pages -= mtd_div_by_ws(from, mtd);
138 if (ops->ooboffs + ops->ooblen > pages * len)
142 res = part->master->read_oob(part->master, from + part->offset, ops);
144 if (mtd_is_bitflip(res))
145 mtd->ecc_stats.corrected++;
146 if (mtd_is_eccerr(res))
147 mtd->ecc_stats.failed++;
152 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
153 size_t len, size_t *retlen, u_char *buf)
155 struct mtd_part *part = PART(mtd);
156 return part->master->read_user_prot_reg(part->master, from,
160 static int part_get_user_prot_info(struct mtd_info *mtd,
161 struct otp_info *buf, size_t len)
163 struct mtd_part *part = PART(mtd);
164 return part->master->get_user_prot_info(part->master, buf, len);
167 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
168 size_t len, size_t *retlen, u_char *buf)
170 struct mtd_part *part = PART(mtd);
171 return part->master->read_fact_prot_reg(part->master, from,
175 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
178 struct mtd_part *part = PART(mtd);
179 return part->master->get_fact_prot_info(part->master, buf, len);
182 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
183 size_t *retlen, const u_char *buf)
185 struct mtd_part *part = PART(mtd);
186 if (!(mtd->flags & MTD_WRITEABLE))
190 else if (to + len > mtd->size)
191 len = mtd->size - to;
192 return part->master->write(part->master, to + part->offset,
196 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
197 size_t *retlen, const u_char *buf)
199 struct mtd_part *part = PART(mtd);
200 if (!(mtd->flags & MTD_WRITEABLE))
204 else if (to + len > mtd->size)
205 len = mtd->size - to;
206 return part->master->panic_write(part->master, to + part->offset,
210 static int part_write_oob(struct mtd_info *mtd, loff_t to,
211 struct mtd_oob_ops *ops)
213 struct mtd_part *part = PART(mtd);
215 if (!(mtd->flags & MTD_WRITEABLE))
220 if (ops->datbuf && to + ops->len > mtd->size)
222 return part->master->write_oob(part->master, to + part->offset, ops);
225 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
226 size_t len, size_t *retlen, u_char *buf)
228 struct mtd_part *part = PART(mtd);
229 return part->master->write_user_prot_reg(part->master, from,
233 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
236 struct mtd_part *part = PART(mtd);
237 return part->master->lock_user_prot_reg(part->master, from, len);
240 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
241 unsigned long count, loff_t to, size_t *retlen)
243 struct mtd_part *part = PART(mtd);
244 if (!(mtd->flags & MTD_WRITEABLE))
246 return part->master->writev(part->master, vecs, count,
247 to + part->offset, retlen);
250 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
252 struct mtd_part *part = PART(mtd);
254 if (!(mtd->flags & MTD_WRITEABLE))
256 if (instr->addr >= mtd->size)
258 instr->addr += part->offset;
259 ret = mtd_erase(part->master, instr);
261 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
262 instr->fail_addr -= part->offset;
263 instr->addr -= part->offset;
268 void mtd_erase_callback(struct erase_info *instr)
270 if (instr->mtd->erase == part_erase) {
271 struct mtd_part *part = PART(instr->mtd);
273 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
274 instr->fail_addr -= part->offset;
275 instr->addr -= part->offset;
278 instr->callback(instr);
280 EXPORT_SYMBOL_GPL(mtd_erase_callback);
282 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
284 struct mtd_part *part = PART(mtd);
285 if ((len + ofs) > mtd->size)
287 return part->master->lock(part->master, ofs + part->offset, len);
290 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
292 struct mtd_part *part = PART(mtd);
293 if ((len + ofs) > mtd->size)
295 return part->master->unlock(part->master, ofs + part->offset, len);
298 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
300 struct mtd_part *part = PART(mtd);
301 if ((len + ofs) > mtd->size)
303 return part->master->is_locked(part->master, ofs + part->offset, len);
306 static void part_sync(struct mtd_info *mtd)
308 struct mtd_part *part = PART(mtd);
309 part->master->sync(part->master);
312 static int part_suspend(struct mtd_info *mtd)
314 struct mtd_part *part = PART(mtd);
315 return part->master->suspend(part->master);
318 static void part_resume(struct mtd_info *mtd)
320 struct mtd_part *part = PART(mtd);
321 part->master->resume(part->master);
324 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
326 struct mtd_part *part = PART(mtd);
327 if (ofs >= mtd->size)
330 return part->master->block_isbad(part->master, ofs);
333 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
335 struct mtd_part *part = PART(mtd);
338 if (!(mtd->flags & MTD_WRITEABLE))
340 if (ofs >= mtd->size)
343 res = part->master->block_markbad(part->master, ofs);
345 mtd->ecc_stats.badblocks++;
349 static inline void free_partition(struct mtd_part *p)
356 * This function unregisters and destroy all slave MTD objects which are
357 * attached to the given master MTD object.
360 int del_mtd_partitions(struct mtd_info *master)
362 struct mtd_part *slave, *next;
365 mutex_lock(&mtd_partitions_mutex);
366 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
367 if (slave->master == master) {
368 ret = del_mtd_device(&slave->mtd);
373 list_del(&slave->list);
374 free_partition(slave);
376 mutex_unlock(&mtd_partitions_mutex);
381 static struct mtd_part *allocate_partition(struct mtd_info *master,
382 const struct mtd_partition *part, int partno,
385 struct mtd_part *slave;
388 /* allocate the partition structure */
389 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
390 name = kstrdup(part->name, GFP_KERNEL);
391 if (!name || !slave) {
392 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
396 return ERR_PTR(-ENOMEM);
399 /* set up the MTD object for this partition */
400 slave->mtd.type = master->type;
401 slave->mtd.flags = master->flags & ~part->mask_flags;
402 slave->mtd.size = part->size;
403 slave->mtd.writesize = master->writesize;
404 slave->mtd.writebufsize = master->writebufsize;
405 slave->mtd.oobsize = master->oobsize;
406 slave->mtd.oobavail = master->oobavail;
407 slave->mtd.subpage_sft = master->subpage_sft;
409 slave->mtd.name = name;
410 slave->mtd.owner = master->owner;
411 slave->mtd.backing_dev_info = master->backing_dev_info;
413 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
414 * to have the same data be in two different partitions.
416 slave->mtd.dev.parent = master->dev.parent;
418 slave->mtd.read = part_read;
419 slave->mtd.write = part_write;
421 if (master->panic_write)
422 slave->mtd.panic_write = part_panic_write;
424 if (master->point && master->unpoint) {
425 slave->mtd.point = part_point;
426 slave->mtd.unpoint = part_unpoint;
429 if (master->get_unmapped_area)
430 slave->mtd.get_unmapped_area = part_get_unmapped_area;
431 if (master->read_oob)
432 slave->mtd.read_oob = part_read_oob;
433 if (master->write_oob)
434 slave->mtd.write_oob = part_write_oob;
435 if (master->read_user_prot_reg)
436 slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
437 if (master->read_fact_prot_reg)
438 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
439 if (master->write_user_prot_reg)
440 slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
441 if (master->lock_user_prot_reg)
442 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
443 if (master->get_user_prot_info)
444 slave->mtd.get_user_prot_info = part_get_user_prot_info;
445 if (master->get_fact_prot_info)
446 slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
448 slave->mtd.sync = part_sync;
449 if (!partno && !master->dev.class && master->suspend && master->resume) {
450 slave->mtd.suspend = part_suspend;
451 slave->mtd.resume = part_resume;
454 slave->mtd.writev = part_writev;
456 slave->mtd.lock = part_lock;
458 slave->mtd.unlock = part_unlock;
459 if (master->is_locked)
460 slave->mtd.is_locked = part_is_locked;
461 if (master->block_isbad)
462 slave->mtd.block_isbad = part_block_isbad;
463 if (master->block_markbad)
464 slave->mtd.block_markbad = part_block_markbad;
465 slave->mtd.erase = part_erase;
466 slave->master = master;
467 slave->offset = part->offset;
469 if (slave->offset == MTDPART_OFS_APPEND)
470 slave->offset = cur_offset;
471 if (slave->offset == MTDPART_OFS_NXTBLK) {
472 slave->offset = cur_offset;
473 if (mtd_mod_by_eb(cur_offset, master) != 0) {
474 /* Round up to next erasesize */
475 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
476 printk(KERN_NOTICE "Moving partition %d: "
477 "0x%012llx -> 0x%012llx\n", partno,
478 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
481 if (slave->offset == MTDPART_OFS_RETAIN) {
482 slave->offset = cur_offset;
483 if (master->size - slave->offset >= slave->mtd.size) {
484 slave->mtd.size = master->size - slave->offset
487 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
488 part->name, master->size - slave->offset,
490 /* register to preserve ordering */
494 if (slave->mtd.size == MTDPART_SIZ_FULL)
495 slave->mtd.size = master->size - slave->offset;
497 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
498 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
500 /* let's do some sanity checks */
501 if (slave->offset >= master->size) {
502 /* let's register it anyway to preserve ordering */
505 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
509 if (slave->offset + slave->mtd.size > master->size) {
510 slave->mtd.size = master->size - slave->offset;
511 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
512 part->name, master->name, (unsigned long long)slave->mtd.size);
514 if (master->numeraseregions > 1) {
515 /* Deal with variable erase size stuff */
516 int i, max = master->numeraseregions;
517 u64 end = slave->offset + slave->mtd.size;
518 struct mtd_erase_region_info *regions = master->eraseregions;
520 /* Find the first erase regions which is part of this
522 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
524 /* The loop searched for the region _behind_ the first one */
528 /* Pick biggest erasesize */
529 for (; i < max && regions[i].offset < end; i++) {
530 if (slave->mtd.erasesize < regions[i].erasesize) {
531 slave->mtd.erasesize = regions[i].erasesize;
534 BUG_ON(slave->mtd.erasesize == 0);
536 /* Single erase size */
537 slave->mtd.erasesize = master->erasesize;
540 if ((slave->mtd.flags & MTD_WRITEABLE) &&
541 mtd_mod_by_eb(slave->offset, &slave->mtd)) {
542 /* Doesn't start on a boundary of major erase size */
543 /* FIXME: Let it be writable if it is on a boundary of
544 * _minor_ erase size though */
545 slave->mtd.flags &= ~MTD_WRITEABLE;
546 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
549 if ((slave->mtd.flags & MTD_WRITEABLE) &&
550 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
551 slave->mtd.flags &= ~MTD_WRITEABLE;
552 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
556 slave->mtd.ecclayout = master->ecclayout;
557 if (master->block_isbad) {
560 while (offs < slave->mtd.size) {
561 if (master->block_isbad(master,
562 offs + slave->offset))
563 slave->mtd.ecc_stats.badblocks++;
564 offs += slave->mtd.erasesize;
572 int mtd_add_partition(struct mtd_info *master, char *name,
573 long long offset, long long length)
575 struct mtd_partition part;
576 struct mtd_part *p, *new;
580 /* the direct offset is expected */
581 if (offset == MTDPART_OFS_APPEND ||
582 offset == MTDPART_OFS_NXTBLK)
585 if (length == MTDPART_SIZ_FULL)
586 length = master->size - offset;
593 part.offset = offset;
595 part.ecclayout = NULL;
597 new = allocate_partition(master, &part, -1, offset);
602 end = offset + length;
604 mutex_lock(&mtd_partitions_mutex);
605 list_for_each_entry(p, &mtd_partitions, list)
606 if (p->master == master) {
607 if ((start >= p->offset) &&
608 (start < (p->offset + p->mtd.size)))
611 if ((end >= p->offset) &&
612 (end < (p->offset + p->mtd.size)))
616 list_add(&new->list, &mtd_partitions);
617 mutex_unlock(&mtd_partitions_mutex);
619 add_mtd_device(&new->mtd);
623 mutex_unlock(&mtd_partitions_mutex);
627 EXPORT_SYMBOL_GPL(mtd_add_partition);
629 int mtd_del_partition(struct mtd_info *master, int partno)
631 struct mtd_part *slave, *next;
634 mutex_lock(&mtd_partitions_mutex);
635 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
636 if ((slave->master == master) &&
637 (slave->mtd.index == partno)) {
638 ret = del_mtd_device(&slave->mtd);
642 list_del(&slave->list);
643 free_partition(slave);
646 mutex_unlock(&mtd_partitions_mutex);
650 EXPORT_SYMBOL_GPL(mtd_del_partition);
653 * This function, given a master MTD object and a partition table, creates
654 * and registers slave MTD objects which are bound to the master according to
655 * the partition definitions.
657 * We don't register the master, or expect the caller to have done so,
658 * for reasons of data integrity.
661 int add_mtd_partitions(struct mtd_info *master,
662 const struct mtd_partition *parts,
665 struct mtd_part *slave;
666 uint64_t cur_offset = 0;
669 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
671 for (i = 0; i < nbparts; i++) {
672 slave = allocate_partition(master, parts + i, i, cur_offset);
674 return PTR_ERR(slave);
676 mutex_lock(&mtd_partitions_mutex);
677 list_add(&slave->list, &mtd_partitions);
678 mutex_unlock(&mtd_partitions_mutex);
680 add_mtd_device(&slave->mtd);
682 cur_offset = slave->offset + slave->mtd.size;
688 static DEFINE_SPINLOCK(part_parser_lock);
689 static LIST_HEAD(part_parsers);
691 static struct mtd_part_parser *get_partition_parser(const char *name)
693 struct mtd_part_parser *p, *ret = NULL;
695 spin_lock(&part_parser_lock);
697 list_for_each_entry(p, &part_parsers, list)
698 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
703 spin_unlock(&part_parser_lock);
708 #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
710 int register_mtd_parser(struct mtd_part_parser *p)
712 spin_lock(&part_parser_lock);
713 list_add(&p->list, &part_parsers);
714 spin_unlock(&part_parser_lock);
718 EXPORT_SYMBOL_GPL(register_mtd_parser);
720 int deregister_mtd_parser(struct mtd_part_parser *p)
722 spin_lock(&part_parser_lock);
724 spin_unlock(&part_parser_lock);
727 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
730 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
731 * are changing this array!
733 static const char *default_mtd_part_types[] = {
740 * parse_mtd_partitions - parse MTD partitions
741 * @master: the master partition (describes whole MTD device)
742 * @types: names of partition parsers to try or %NULL
743 * @pparts: array of partitions found is returned here
744 * @data: MTD partition parser-specific data
746 * This function tries to find partition on MTD device @master. It uses MTD
747 * partition parsers, specified in @types. However, if @types is %NULL, then
748 * the default list of parsers is used. The default list contains only the
749 * "cmdlinepart" and "ofpart" parsers ATM.
751 * This function may return:
752 * o a negative error code in case of failure
753 * o zero if no partitions were found
754 * o a positive number of found partitions, in which case on exit @pparts will
755 * point to an array containing this number of &struct mtd_info objects.
757 int parse_mtd_partitions(struct mtd_info *master, const char **types,
758 struct mtd_partition **pparts,
759 struct mtd_part_parser_data *data)
761 struct mtd_part_parser *parser;
765 types = default_mtd_part_types;
767 for ( ; ret <= 0 && *types; types++) {
768 parser = get_partition_parser(*types);
769 if (!parser && !request_module("%s", *types))
770 parser = get_partition_parser(*types);
773 ret = (*parser->parse_fn)(master, pparts, data);
775 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
776 ret, parser->name, master->name);
778 put_partition_parser(parser);
783 int mtd_is_partition(struct mtd_info *mtd)
785 struct mtd_part *part;
788 mutex_lock(&mtd_partitions_mutex);
789 list_for_each_entry(part, &mtd_partitions, list)
790 if (&part->mtd == mtd) {
794 mutex_unlock(&mtd_partitions_mutex);
798 EXPORT_SYMBOL_GPL(mtd_is_partition);