2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
30 #include <asm/uaccess.h>
34 # include <linux/efi.h>
38 * Architectures vary in how they handle caching for addresses
39 * outside of main memory.
42 static inline int uncached_access(struct file *file, unsigned long addr)
44 #if defined(CONFIG_IA64)
46 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
48 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
49 #elif defined(CONFIG_MIPS)
51 extern int __uncached_access(struct file *file,
54 return __uncached_access(file, addr);
58 * Accessing memory above the top the kernel knows about or through a file pointer
59 * that was marked O_SYNC will be done non-cached.
61 if (file->f_flags & O_SYNC)
63 return addr >= __pa(high_memory);
67 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
68 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
70 if (addr + count > __pa(high_memory))
76 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
82 #ifdef CONFIG_NONPROMISC_DEVMEM
83 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
85 u64 from = ((u64)pfn) << PAGE_SHIFT;
90 if (!devmem_is_allowed(pfn)) {
92 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
93 current->comm, from, to);
102 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
108 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
113 * This funcion reads the *physical* memory. The f_pos points directly to the
116 static ssize_t read_mem(struct file * file, char __user * buf,
117 size_t count, loff_t *ppos)
119 unsigned long p = *ppos;
123 if (!valid_phys_addr_range(p, count))
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 /* we don't have page 0 mapped on sparc and m68k.. */
133 if (clear_user(buf, sz))
145 * Handle first page in case it's not aligned
147 if (-p & (PAGE_SIZE - 1))
148 sz = -p & (PAGE_SIZE - 1);
152 sz = min_t(unsigned long, sz, count);
154 if (!range_is_allowed(p >> PAGE_SHIFT, count))
158 * On ia64 if a page has been mapped somewhere as
159 * uncached, then it must also be accessed uncached
160 * by the kernel or data corruption may occur
162 ptr = xlate_dev_mem_ptr(p);
166 if (copy_to_user(buf, ptr, sz)) {
167 unxlate_dev_mem_ptr(p, ptr);
171 unxlate_dev_mem_ptr(p, ptr);
183 static ssize_t write_mem(struct file * file, const char __user * buf,
184 size_t count, loff_t *ppos)
186 unsigned long p = *ppos;
188 unsigned long copied;
191 if (!valid_phys_addr_range(p, count))
196 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197 /* we don't have page 0 mapped on sparc and m68k.. */
199 unsigned long sz = PAGE_SIZE - p;
202 /* Hmm. Do something? */
212 * Handle first page in case it's not aligned
214 if (-p & (PAGE_SIZE - 1))
215 sz = -p & (PAGE_SIZE - 1);
219 sz = min_t(unsigned long, sz, count);
221 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
225 * On ia64 if a page has been mapped somewhere as
226 * uncached, then it must also be accessed uncached
227 * by the kernel or data corruption may occur
229 ptr = xlate_dev_mem_ptr(p);
236 copied = copy_from_user(ptr, buf, sz);
238 written += sz - copied;
239 unxlate_dev_mem_ptr(p, ptr);
245 unxlate_dev_mem_ptr(p, ptr);
257 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
258 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
263 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
264 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265 unsigned long size, pgprot_t vma_prot)
267 #ifdef pgprot_noncached
268 unsigned long offset = pfn << PAGE_SHIFT;
270 if (uncached_access(file, offset))
271 return pgprot_noncached(vma_prot);
278 static unsigned long get_unmapped_area_mem(struct file *file,
284 if (!valid_mmap_phys_addr_range(pgoff, len))
285 return (unsigned long) -EINVAL;
286 return pgoff << PAGE_SHIFT;
289 /* can't do an in-place private mapping if there's no MMU */
290 static inline int private_mapping_ok(struct vm_area_struct *vma)
292 return vma->vm_flags & VM_MAYSHARE;
295 #define get_unmapped_area_mem NULL
297 static inline int private_mapping_ok(struct vm_area_struct *vma)
303 void __attribute__((weak))
304 map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
306 /* nothing. architectures can override. */
309 void __attribute__((weak))
310 unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
312 /* nothing. architectures can override. */
315 static void mmap_mem_open(struct vm_area_struct *vma)
317 map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
321 static void mmap_mem_close(struct vm_area_struct *vma)
323 unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
327 static struct vm_operations_struct mmap_mem_ops = {
328 .open = mmap_mem_open,
329 .close = mmap_mem_close
332 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
334 size_t size = vma->vm_end - vma->vm_start;
336 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
339 if (!private_mapping_ok(vma))
342 if (!range_is_allowed(vma->vm_pgoff, size))
345 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
349 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
353 vma->vm_ops = &mmap_mem_ops;
355 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
356 if (remap_pfn_range(vma,
360 vma->vm_page_prot)) {
361 unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
367 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
371 /* Turn a kernel-virtual address into a physical page frame */
372 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
375 * RED-PEN: on some architectures there is more mapped memory
376 * than available in mem_map which pfn_valid checks
377 * for. Perhaps should add a new macro here.
379 * RED-PEN: vmalloc is not supported right now.
385 return mmap_mem(file, vma);
388 #ifdef CONFIG_CRASH_DUMP
390 * Read memory corresponding to the old kernel.
392 static ssize_t read_oldmem(struct file *file, char __user *buf,
393 size_t count, loff_t *ppos)
395 unsigned long pfn, offset;
396 size_t read = 0, csize;
400 pfn = *ppos / PAGE_SIZE;
401 if (pfn > saved_max_pfn)
404 offset = (unsigned long)(*ppos % PAGE_SIZE);
405 if (count > PAGE_SIZE - offset)
406 csize = PAGE_SIZE - offset;
410 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
422 extern long vread(char *buf, char *addr, unsigned long count);
423 extern long vwrite(char *buf, char *addr, unsigned long count);
426 * This function reads the *virtual* memory as seen by the kernel.
428 static ssize_t read_kmem(struct file *file, char __user *buf,
429 size_t count, loff_t *ppos)
431 unsigned long p = *ppos;
432 ssize_t low_count, read, sz;
433 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
436 if (p < (unsigned long) high_memory) {
438 if (count > (unsigned long) high_memory - p)
439 low_count = (unsigned long) high_memory - p;
441 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
442 /* we don't have page 0 mapped on sparc and m68k.. */
443 if (p < PAGE_SIZE && low_count > 0) {
444 size_t tmp = PAGE_SIZE - p;
445 if (tmp > low_count) tmp = low_count;
446 if (clear_user(buf, tmp))
455 while (low_count > 0) {
457 * Handle first page in case it's not aligned
459 if (-p & (PAGE_SIZE - 1))
460 sz = -p & (PAGE_SIZE - 1);
464 sz = min_t(unsigned long, sz, low_count);
467 * On ia64 if a page has been mapped somewhere as
468 * uncached, then it must also be accessed uncached
469 * by the kernel or data corruption may occur
471 kbuf = xlate_dev_kmem_ptr((char *)p);
473 if (copy_to_user(buf, kbuf, sz))
484 kbuf = (char *)__get_free_page(GFP_KERNEL);
492 len = vread(kbuf, (char *)p, len);
495 if (copy_to_user(buf, kbuf, len)) {
496 free_page((unsigned long)kbuf);
504 free_page((unsigned long)kbuf);
511 static inline ssize_t
512 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
513 size_t count, loff_t *ppos)
516 unsigned long copied;
519 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
520 /* we don't have page 0 mapped on sparc and m68k.. */
521 if (realp < PAGE_SIZE) {
522 unsigned long sz = PAGE_SIZE - realp;
525 /* Hmm. Do something? */
537 * Handle first page in case it's not aligned
539 if (-realp & (PAGE_SIZE - 1))
540 sz = -realp & (PAGE_SIZE - 1);
544 sz = min_t(unsigned long, sz, count);
547 * On ia64 if a page has been mapped somewhere as
548 * uncached, then it must also be accessed uncached
549 * by the kernel or data corruption may occur
551 ptr = xlate_dev_kmem_ptr(p);
553 copied = copy_from_user(ptr, buf, sz);
555 written += sz - copied;
573 * This function writes to the *virtual* memory as seen by the kernel.
575 static ssize_t write_kmem(struct file * file, const char __user * buf,
576 size_t count, loff_t *ppos)
578 unsigned long p = *ppos;
582 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
584 if (p < (unsigned long) high_memory) {
587 if (count > (unsigned long) high_memory - p)
588 wrote = (unsigned long) high_memory - p;
590 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
591 if (written != wrote)
600 kbuf = (char *)__get_free_page(GFP_KERNEL);
602 return wrote ? wrote : -ENOMEM;
609 written = copy_from_user(kbuf, buf, len);
613 free_page((unsigned long)kbuf);
617 len = vwrite(kbuf, (char *)p, len);
623 free_page((unsigned long)kbuf);
627 return virtr + wrote;
630 #ifdef CONFIG_DEVPORT
631 static ssize_t read_port(struct file * file, char __user * buf,
632 size_t count, loff_t *ppos)
634 unsigned long i = *ppos;
635 char __user *tmp = buf;
637 if (!access_ok(VERIFY_WRITE, buf, count))
639 while (count-- > 0 && i < 65536) {
640 if (__put_user(inb(i),tmp) < 0)
649 static ssize_t write_port(struct file * file, const char __user * buf,
650 size_t count, loff_t *ppos)
652 unsigned long i = *ppos;
653 const char __user * tmp = buf;
655 if (!access_ok(VERIFY_READ,buf,count))
657 while (count-- > 0 && i < 65536) {
659 if (__get_user(c, tmp)) {
673 static ssize_t read_null(struct file * file, char __user * buf,
674 size_t count, loff_t *ppos)
679 static ssize_t write_null(struct file * file, const char __user * buf,
680 size_t count, loff_t *ppos)
685 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
686 struct splice_desc *sd)
691 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
692 loff_t *ppos, size_t len, unsigned int flags)
694 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
697 static ssize_t read_zero(struct file * file, char __user * buf,
698 size_t count, loff_t *ppos)
705 if (!access_ok(VERIFY_WRITE, buf, count))
710 unsigned long unwritten;
711 size_t chunk = count;
713 if (chunk > PAGE_SIZE)
714 chunk = PAGE_SIZE; /* Just for latency reasons */
715 unwritten = clear_user(buf, chunk);
716 written += chunk - unwritten;
723 return written ? written : -EFAULT;
726 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
731 if (vma->vm_flags & VM_SHARED)
732 return shmem_zero_setup(vma);
736 static ssize_t write_full(struct file * file, const char __user * buf,
737 size_t count, loff_t *ppos)
743 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
744 * can fopen() both devices with "a" now. This was previously impossible.
748 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
750 return file->f_pos = 0;
754 * The memory devices use the full 32/64 bits of the offset, and so we cannot
755 * check against negative addresses: they are ok. The return value is weird,
756 * though, in that case (0).
758 * also note that seeking relative to the "end of file" isn't supported:
759 * it has no meaning, so it returns -EINVAL.
761 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
765 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
768 file->f_pos = offset;
770 force_successful_syscall_return();
773 file->f_pos += offset;
775 force_successful_syscall_return();
780 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
784 static int open_port(struct inode * inode, struct file * filp)
786 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
789 #define zero_lseek null_lseek
790 #define full_lseek null_lseek
791 #define write_zero write_null
792 #define read_full read_zero
793 #define open_mem open_port
794 #define open_kmem open_mem
795 #define open_oldmem open_mem
797 static const struct file_operations mem_fops = {
798 .llseek = memory_lseek,
803 .get_unmapped_area = get_unmapped_area_mem,
806 static const struct file_operations kmem_fops = {
807 .llseek = memory_lseek,
812 .get_unmapped_area = get_unmapped_area_mem,
815 static const struct file_operations null_fops = {
816 .llseek = null_lseek,
819 .splice_write = splice_write_null,
822 #ifdef CONFIG_DEVPORT
823 static const struct file_operations port_fops = {
824 .llseek = memory_lseek,
831 static const struct file_operations zero_fops = {
832 .llseek = zero_lseek,
839 * capabilities for /dev/zero
840 * - permits private mappings, "copies" are taken of the source of zeros
842 static struct backing_dev_info zero_bdi = {
843 .capabilities = BDI_CAP_MAP_COPY,
846 static const struct file_operations full_fops = {
847 .llseek = full_lseek,
852 #ifdef CONFIG_CRASH_DUMP
853 static const struct file_operations oldmem_fops = {
859 static ssize_t kmsg_write(struct file * file, const char __user * buf,
860 size_t count, loff_t *ppos)
865 tmp = kmalloc(count + 1, GFP_KERNEL);
869 if (!copy_from_user(tmp, buf, count)) {
871 ret = printk("%s", tmp);
873 /* printk can add a prefix */
880 static const struct file_operations kmsg_fops = {
884 static int memory_open(struct inode * inode, struct file * filp)
886 switch (iminor(inode)) {
888 filp->f_op = &mem_fops;
889 filp->f_mapping->backing_dev_info =
890 &directly_mappable_cdev_bdi;
893 filp->f_op = &kmem_fops;
894 filp->f_mapping->backing_dev_info =
895 &directly_mappable_cdev_bdi;
898 filp->f_op = &null_fops;
900 #ifdef CONFIG_DEVPORT
902 filp->f_op = &port_fops;
906 filp->f_mapping->backing_dev_info = &zero_bdi;
907 filp->f_op = &zero_fops;
910 filp->f_op = &full_fops;
913 filp->f_op = &random_fops;
916 filp->f_op = &urandom_fops;
919 filp->f_op = &kmsg_fops;
921 #ifdef CONFIG_CRASH_DUMP
923 filp->f_op = &oldmem_fops;
929 if (filp->f_op && filp->f_op->open)
930 return filp->f_op->open(inode,filp);
934 static const struct file_operations memory_fops = {
935 .open = memory_open, /* just a selector for the real open */
938 static const struct {
942 const struct file_operations *fops;
943 } devlist[] = { /* list of minor devices */
944 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
945 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
946 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
947 #ifdef CONFIG_DEVPORT
948 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
950 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
951 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
952 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
953 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
954 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
955 #ifdef CONFIG_CRASH_DUMP
956 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
960 static struct class *mem_class;
962 static int __init chr_dev_init(void)
967 err = bdi_init(&zero_bdi);
971 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
972 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
974 mem_class = class_create(THIS_MODULE, "mem");
975 for (i = 0; i < ARRAY_SIZE(devlist); i++)
976 device_create(mem_class, NULL,
977 MKDEV(MEM_MAJOR, devlist[i].minor),
983 fs_initcall(chr_dev_init);