2 * Character-device access to raw MTD devices.
6 #include <linux/device.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/backing-dev.h>
17 #include <linux/compat.h>
18 #include <linux/mount.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/map.h>
22 #include <linux/mtd/compatmac.h>
24 #include <asm/uaccess.h>
26 #define MTD_INODE_FS_MAGIC 0x11307854
27 static struct vfsmount *mtd_inode_mnt __read_mostly;
30 * Data structure to hold the pointer to the mtd device as well
31 * as mode information ofr various use cases.
33 struct mtd_file_info {
36 enum mtd_file_modes mode;
39 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
41 struct mtd_file_info *mfi = file->private_data;
42 struct mtd_info *mtd = mfi->mtd;
48 offset += file->f_pos;
57 if (offset >= 0 && offset <= mtd->size)
58 return file->f_pos = offset;
65 static int mtd_open(struct inode *inode, struct file *file)
67 int minor = iminor(inode);
68 int devnum = minor >> 1;
71 struct mtd_file_info *mfi;
72 struct inode *mtd_ino;
74 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
76 /* You can't open the RO devices RW */
77 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
81 mtd = get_mtd_device(NULL, devnum);
88 if (mtd->type == MTD_ABSENT) {
94 mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
100 if (mtd_ino->i_state & I_NEW) {
101 mtd_ino->i_private = mtd;
102 mtd_ino->i_mode = S_IFCHR;
103 mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
104 unlock_new_inode(mtd_ino);
106 file->f_mapping = mtd_ino->i_mapping;
108 /* You can't open it RW if it's not a writeable device */
109 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
116 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
125 file->private_data = mfi;
132 /*====================================================================*/
134 static int mtd_close(struct inode *inode, struct file *file)
136 struct mtd_file_info *mfi = file->private_data;
137 struct mtd_info *mtd = mfi->mtd;
139 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
141 /* Only sync if opened RW */
142 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
148 file->private_data = NULL;
154 /* FIXME: This _really_ needs to die. In 2.5, we should lock the
155 userspace buffer down and use it directly with readv/writev.
157 #define MAX_KMALLOC_SIZE 0x20000
159 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
161 struct mtd_file_info *mfi = file->private_data;
162 struct mtd_info *mtd = mfi->mtd;
164 size_t total_retlen=0;
169 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
171 if (*ppos + count > mtd->size)
172 count = mtd->size - *ppos;
177 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
178 and pass them directly to the MTD functions */
180 if (count > MAX_KMALLOC_SIZE)
181 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
183 kbuf=kmalloc(count, GFP_KERNEL);
190 if (count > MAX_KMALLOC_SIZE)
191 len = MAX_KMALLOC_SIZE;
196 case MTD_MODE_OTP_FACTORY:
197 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
199 case MTD_MODE_OTP_USER:
200 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
204 struct mtd_oob_ops ops;
206 ops.mode = MTD_OOB_RAW;
211 ret = mtd->read_oob(mtd, *ppos, &ops);
216 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
218 /* Nand returns -EBADMSG on ecc errors, but it returns
219 * the data. For our userspace tools it is important
220 * to dump areas with ecc errors !
221 * For kernel internal usage it also might return -EUCLEAN
222 * to signal the caller that a bitflip has occured and has
223 * been corrected by the ECC algorithm.
224 * Userspace software which accesses NAND this way
225 * must be aware of the fact that it deals with NAND
227 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
229 if (copy_to_user(buf, kbuf, retlen)) {
234 total_retlen += retlen;
252 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
254 struct mtd_file_info *mfi = file->private_data;
255 struct mtd_info *mtd = mfi->mtd;
258 size_t total_retlen=0;
262 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
264 if (*ppos == mtd->size)
267 if (*ppos + count > mtd->size)
268 count = mtd->size - *ppos;
273 if (count > MAX_KMALLOC_SIZE)
274 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
276 kbuf=kmalloc(count, GFP_KERNEL);
283 if (count > MAX_KMALLOC_SIZE)
284 len = MAX_KMALLOC_SIZE;
288 if (copy_from_user(kbuf, buf, len)) {
294 case MTD_MODE_OTP_FACTORY:
297 case MTD_MODE_OTP_USER:
298 if (!mtd->write_user_prot_reg) {
302 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
307 struct mtd_oob_ops ops;
309 ops.mode = MTD_OOB_RAW;
314 ret = mtd->write_oob(mtd, *ppos, &ops);
320 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
324 total_retlen += retlen;
338 /*======================================================================
340 IOCTL calls for getting device parameters.
342 ======================================================================*/
343 static void mtdchar_erase_callback (struct erase_info *instr)
345 wake_up((wait_queue_head_t *)instr->priv);
348 #ifdef CONFIG_HAVE_MTD_OTP
349 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
351 struct mtd_info *mtd = mfi->mtd;
355 case MTD_OTP_FACTORY:
356 if (!mtd->read_fact_prot_reg)
359 mfi->mode = MTD_MODE_OTP_FACTORY;
362 if (!mtd->read_fact_prot_reg)
365 mfi->mode = MTD_MODE_OTP_USER;
375 # define otp_select_filemode(f,m) -EOPNOTSUPP
378 static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
379 uint64_t start, uint32_t length, void __user *ptr,
380 uint32_t __user *retp)
382 struct mtd_oob_ops ops;
386 if (!(file->f_mode & FMODE_WRITE))
395 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
401 ops.ooboffs = start & (mtd->oobsize - 1);
403 ops.mode = MTD_OOB_PLACE;
405 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
408 ops.oobbuf = memdup_user(ptr, length);
409 if (IS_ERR(ops.oobbuf))
410 return PTR_ERR(ops.oobbuf);
412 start &= ~((uint64_t)mtd->oobsize - 1);
413 ret = mtd->write_oob(mtd, start, &ops);
415 if (ops.oobretlen > 0xFFFFFFFFU)
417 retlen = ops.oobretlen;
418 if (copy_to_user(retp, &retlen, sizeof(length)))
425 static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
426 uint32_t length, void __user *ptr, uint32_t __user *retp)
428 struct mtd_oob_ops ops;
437 ret = access_ok(VERIFY_WRITE, ptr,
438 length) ? 0 : -EFAULT;
443 ops.ooboffs = start & (mtd->oobsize - 1);
445 ops.mode = MTD_OOB_PLACE;
447 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
450 ops.oobbuf = kmalloc(length, GFP_KERNEL);
454 start &= ~((uint64_t)mtd->oobsize - 1);
455 ret = mtd->read_oob(mtd, start, &ops);
457 if (put_user(ops.oobretlen, retp))
459 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
467 static int mtd_ioctl(struct inode *inode, struct file *file,
468 u_int cmd, u_long arg)
470 struct mtd_file_info *mfi = file->private_data;
471 struct mtd_info *mtd = mfi->mtd;
472 void __user *argp = (void __user *)arg;
475 struct mtd_info_user info;
477 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
479 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
481 if (!access_ok(VERIFY_READ, argp, size))
485 if (!access_ok(VERIFY_WRITE, argp, size))
490 case MEMGETREGIONCOUNT:
491 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
495 case MEMGETREGIONINFO:
498 struct mtd_erase_region_info *kr;
499 struct region_info_user __user *ur = argp;
501 if (get_user(ur_idx, &(ur->regionindex)))
504 kr = &(mtd->eraseregions[ur_idx]);
506 if (put_user(kr->offset, &(ur->offset))
507 || put_user(kr->erasesize, &(ur->erasesize))
508 || put_user(kr->numblocks, &(ur->numblocks)))
515 info.type = mtd->type;
516 info.flags = mtd->flags;
517 info.size = mtd->size;
518 info.erasesize = mtd->erasesize;
519 info.writesize = mtd->writesize;
520 info.oobsize = mtd->oobsize;
521 /* The below fields are obsolete */
524 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
531 struct erase_info *erase;
533 if(!(file->f_mode & FMODE_WRITE))
536 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
540 wait_queue_head_t waitq;
541 DECLARE_WAITQUEUE(wait, current);
543 init_waitqueue_head(&waitq);
545 if (cmd == MEMERASE64) {
546 struct erase_info_user64 einfo64;
548 if (copy_from_user(&einfo64, argp,
549 sizeof(struct erase_info_user64))) {
553 erase->addr = einfo64.start;
554 erase->len = einfo64.length;
556 struct erase_info_user einfo32;
558 if (copy_from_user(&einfo32, argp,
559 sizeof(struct erase_info_user))) {
563 erase->addr = einfo32.start;
564 erase->len = einfo32.length;
567 erase->callback = mtdchar_erase_callback;
568 erase->priv = (unsigned long)&waitq;
571 FIXME: Allow INTERRUPTIBLE. Which means
572 not having the wait_queue head on the stack.
574 If the wq_head is on the stack, and we
575 leave because we got interrupted, then the
576 wq_head is no longer there when the
577 callback routine tries to wake us up.
579 ret = mtd->erase(mtd, erase);
581 set_current_state(TASK_UNINTERRUPTIBLE);
582 add_wait_queue(&waitq, &wait);
583 if (erase->state != MTD_ERASE_DONE &&
584 erase->state != MTD_ERASE_FAILED)
586 remove_wait_queue(&waitq, &wait);
587 set_current_state(TASK_RUNNING);
589 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
598 struct mtd_oob_buf buf;
599 struct mtd_oob_buf __user *buf_user = argp;
601 /* NOTE: writes return length to buf_user->length */
602 if (copy_from_user(&buf, argp, sizeof(buf)))
605 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
606 buf.ptr, &buf_user->length);
612 struct mtd_oob_buf buf;
613 struct mtd_oob_buf __user *buf_user = argp;
615 /* NOTE: writes return length to buf_user->start */
616 if (copy_from_user(&buf, argp, sizeof(buf)))
619 ret = mtd_do_readoob(mtd, buf.start, buf.length,
620 buf.ptr, &buf_user->start);
626 struct mtd_oob_buf64 buf;
627 struct mtd_oob_buf64 __user *buf_user = argp;
629 if (copy_from_user(&buf, argp, sizeof(buf)))
632 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
633 (void __user *)(uintptr_t)buf.usr_ptr,
640 struct mtd_oob_buf64 buf;
641 struct mtd_oob_buf64 __user *buf_user = argp;
643 if (copy_from_user(&buf, argp, sizeof(buf)))
646 ret = mtd_do_readoob(mtd, buf.start, buf.length,
647 (void __user *)(uintptr_t)buf.usr_ptr,
654 struct erase_info_user einfo;
656 if (copy_from_user(&einfo, argp, sizeof(einfo)))
662 ret = mtd->lock(mtd, einfo.start, einfo.length);
668 struct erase_info_user einfo;
670 if (copy_from_user(&einfo, argp, sizeof(einfo)))
676 ret = mtd->unlock(mtd, einfo.start, einfo.length);
682 struct erase_info_user einfo;
684 if (copy_from_user(&einfo, argp, sizeof(einfo)))
690 ret = mtd->is_locked(mtd, einfo.start, einfo.length);
694 /* Legacy interface */
697 struct nand_oobinfo oi;
701 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
704 oi.useecc = MTD_NANDECC_AUTOPLACE;
705 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
706 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
708 oi.eccbytes = mtd->ecclayout->eccbytes;
710 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
719 if (copy_from_user(&offs, argp, sizeof(loff_t)))
721 if (!mtd->block_isbad)
724 return mtd->block_isbad(mtd, offs);
732 if (copy_from_user(&offs, argp, sizeof(loff_t)))
734 if (!mtd->block_markbad)
737 return mtd->block_markbad(mtd, offs);
741 #ifdef CONFIG_HAVE_MTD_OTP
745 if (copy_from_user(&mode, argp, sizeof(int)))
748 mfi->mode = MTD_MODE_NORMAL;
750 ret = otp_select_filemode(mfi, mode);
756 case OTPGETREGIONCOUNT:
757 case OTPGETREGIONINFO:
759 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
764 case MTD_MODE_OTP_FACTORY:
765 if (mtd->get_fact_prot_info)
766 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
768 case MTD_MODE_OTP_USER:
769 if (mtd->get_user_prot_info)
770 ret = mtd->get_user_prot_info(mtd, buf, 4096);
776 if (cmd == OTPGETREGIONCOUNT) {
777 int nbr = ret / sizeof(struct otp_info);
778 ret = copy_to_user(argp, &nbr, sizeof(int));
780 ret = copy_to_user(argp, buf, ret);
790 struct otp_info oinfo;
792 if (mfi->mode != MTD_MODE_OTP_USER)
794 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
796 if (!mtd->lock_user_prot_reg)
798 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
808 if (copy_to_user(argp, mtd->ecclayout,
809 sizeof(struct nand_ecclayout)))
816 if (copy_to_user(argp, &mtd->ecc_stats,
817 sizeof(struct mtd_ecc_stats)))
827 case MTD_MODE_OTP_FACTORY:
828 case MTD_MODE_OTP_USER:
829 ret = otp_select_filemode(mfi, arg);
833 if (!mtd->read_oob || !mtd->write_oob)
837 case MTD_MODE_NORMAL:
855 struct mtd_oob_buf32 {
858 compat_caddr_t ptr; /* unsigned char* */
861 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
862 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
864 static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
867 struct inode *inode = file->f_path.dentry->d_inode;
868 struct mtd_file_info *mfi = file->private_data;
869 struct mtd_info *mtd = mfi->mtd;
870 void __user *argp = compat_ptr(arg);
878 struct mtd_oob_buf32 buf;
879 struct mtd_oob_buf32 __user *buf_user = argp;
881 if (copy_from_user(&buf, argp, sizeof(buf)))
884 ret = mtd_do_writeoob(file, mtd, buf.start,
885 buf.length, compat_ptr(buf.ptr),
892 struct mtd_oob_buf32 buf;
893 struct mtd_oob_buf32 __user *buf_user = argp;
895 /* NOTE: writes return length to buf->start */
896 if (copy_from_user(&buf, argp, sizeof(buf)))
899 ret = mtd_do_readoob(mtd, buf.start,
900 buf.length, compat_ptr(buf.ptr),
905 ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
913 #endif /* CONFIG_COMPAT */
916 * try to determine where a shared mapping can be made
917 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
921 static unsigned long mtd_get_unmapped_area(struct file *file,
927 struct mtd_file_info *mfi = file->private_data;
928 struct mtd_info *mtd = mfi->mtd;
930 if (mtd->get_unmapped_area) {
931 unsigned long offset;
934 return (unsigned long) -EINVAL;
936 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
937 return (unsigned long) -EINVAL;
939 offset = pgoff << PAGE_SHIFT;
940 if (offset > mtd->size - len)
941 return (unsigned long) -EINVAL;
943 return mtd->get_unmapped_area(mtd, len, offset, flags);
946 /* can't map directly */
947 return (unsigned long) -ENOSYS;
952 * set up a mapping for shared memory segments
954 static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
957 struct mtd_file_info *mfi = file->private_data;
958 struct mtd_info *mtd = mfi->mtd;
959 struct map_info *map = mtd->priv;
964 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
965 off = vma->vm_pgoff << PAGE_SHIFT;
967 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
969 if ((vma->vm_end - vma->vm_start + off) > len)
973 vma->vm_pgoff = off >> PAGE_SHIFT;
974 vma->vm_flags |= VM_IO | VM_RESERVED;
976 #ifdef pgprot_noncached
977 if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
978 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
980 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
981 vma->vm_end - vma->vm_start,
989 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
993 static const struct file_operations mtd_fops = {
994 .owner = THIS_MODULE,
1000 .compat_ioctl = mtd_compat_ioctl,
1003 .release = mtd_close,
1006 .get_unmapped_area = mtd_get_unmapped_area,
1010 static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
1011 const char *dev_name, void *data,
1012 struct vfsmount *mnt)
1014 return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
1018 static struct file_system_type mtd_inodefs_type = {
1019 .name = "mtd_inodefs",
1020 .get_sb = mtd_inodefs_get_sb,
1021 .kill_sb = kill_anon_super,
1024 static void mtdchar_notify_add(struct mtd_info *mtd)
1028 static void mtdchar_notify_remove(struct mtd_info *mtd)
1030 struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
1033 /* Destroy the inode if it exists */
1034 mtd_ino->i_nlink = 0;
1039 static struct mtd_notifier mtdchar_notifier = {
1040 .add = mtdchar_notify_add,
1041 .remove = mtdchar_notify_remove,
1044 static int __init init_mtdchar(void)
1048 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1051 pr_notice("Can't allocate major number %d for "
1052 "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
1056 ret = register_filesystem(&mtd_inodefs_type);
1058 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
1059 goto err_unregister_chdev;
1062 mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
1063 if (IS_ERR(mtd_inode_mnt)) {
1064 ret = PTR_ERR(mtd_inode_mnt);
1065 pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
1066 goto err_unregister_filesystem;
1068 register_mtd_user(&mtdchar_notifier);
1072 err_unregister_filesystem:
1073 unregister_filesystem(&mtd_inodefs_type);
1074 err_unregister_chdev:
1075 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1079 static void __exit cleanup_mtdchar(void)
1081 unregister_mtd_user(&mtdchar_notifier);
1082 mntput(mtd_inode_mnt);
1083 unregister_filesystem(&mtd_inodefs_type);
1084 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1087 module_init(init_mtdchar);
1088 module_exit(cleanup_mtdchar);
1090 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
1092 MODULE_LICENSE("GPL");
1093 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1094 MODULE_DESCRIPTION("Direct character-device access to MTD devices");
1095 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);