]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
vmcore: support mmap() on /proc/vmcore
authorHATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
Fri, 7 Jun 2013 00:07:50 +0000 (10:07 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Fri, 7 Jun 2013 05:42:13 +0000 (15:42 +1000)
This patch introduces mmap_vmcore().

Don't permit writable nor executable mapping even with mprotect() because
this mmap() is aimed at reading crash dump memory.  Non-writable mapping
is also requirement of remap_pfn_range() when mapping linear pages on
non-consecutive physical pages; see is_cow_mapping().

Set VM_MIXEDMAP flag to remap memory by remap_pfn_range and by
remap_vmalloc_range_pertial at the same time for a single vma.
do_munmap() can correctly clean partially remapped vma with two functions
in abnormal case.  See zap_pte_range(), vm_normal_page() and their
comments for details.

On x86-32 PAE kernels, mmap() supports at most 16TB memory only.  This
limitation comes from the fact that the third argument of
remap_pfn_range(), pfn, is of 32-bit length on x86-32: unsigned long.

Signed-off-by: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp>
Cc: Lisa Mitchell <lisa.mitchell@hp.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/vmcore.c

index 8ec6483689857377ab1ae4f12df72c87a869b5ba..423796979640b75d782fc989191a80dae672e2b1 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/crash_dump.h>
 #include <linux/list.h>
+#include <linux/vmalloc.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include "internal.h"
@@ -194,9 +195,94 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
        return acc;
 }
 
+static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
+{
+       size_t size = vma->vm_end - vma->vm_start;
+       u64 start, end, len, tsz;
+       struct vmcore *m;
+
+       start = (u64)vma->vm_pgoff << PAGE_SHIFT;
+       end = start + size;
+
+       if (size > vmcore_size || end > vmcore_size)
+               return -EINVAL;
+
+       if (vma->vm_flags & (VM_WRITE | VM_EXEC))
+               return -EPERM;
+
+       vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
+       vma->vm_flags |= VM_MIXEDMAP;
+
+       len = 0;
+
+       if (start < elfcorebuf_sz) {
+               u64 pfn;
+
+               tsz = elfcorebuf_sz - start;
+               if (size < tsz)
+                       tsz = size;
+               pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
+               if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
+                                   vma->vm_page_prot))
+                       return -EAGAIN;
+               size -= tsz;
+               start += tsz;
+               len += tsz;
+
+               if (size == 0)
+                       return 0;
+       }
+
+       if (start < elfcorebuf_sz + elfnotes_sz) {
+               void *kaddr;
+
+               tsz = elfcorebuf_sz + elfnotes_sz - start;
+               if (size < tsz)
+                       tsz = size;
+               kaddr = elfnotes_buf + start - elfcorebuf_sz;
+               if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
+                                               kaddr, tsz)) {
+                       do_munmap(vma->vm_mm, vma->vm_start, len);
+                       return -EAGAIN;
+               }
+               size -= tsz;
+               start += tsz;
+               len += tsz;
+
+               if (size == 0)
+                       return 0;
+       }
+
+       list_for_each_entry(m, &vmcore_list, list) {
+               if (start < m->offset + m->size) {
+                       u64 paddr = 0;
+
+                       tsz = m->offset + m->size - start;
+                       if (size < tsz)
+                               tsz = size;
+                       paddr = m->paddr + start - m->offset;
+                       if (remap_pfn_range(vma, vma->vm_start + len,
+                                           paddr >> PAGE_SHIFT, tsz,
+                                           vma->vm_page_prot)) {
+                               do_munmap(vma->vm_mm, vma->vm_start, len);
+                               return -EAGAIN;
+                       }
+                       size -= tsz;
+                       start += tsz;
+                       len += tsz;
+
+                       if (size == 0)
+                               return 0;
+               }
+       }
+
+       return 0;
+}
+
 static const struct file_operations proc_vmcore_operations = {
        .read           = read_vmcore,
        .llseek         = default_llseek,
+       .mmap           = mmap_vmcore,
 };
 
 static struct vmcore* __init get_new_element(void)