]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/vmalloc.c
Merge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/mchehab...
[karo-tx-linux.git] / mm / vmalloc.c
index 8e05a11155c9208e6daf17644168929105e433da..3130c343088fd823f211f9079f666ac67d1671e9 100644 (file)
@@ -767,3 +767,56 @@ EXPORT_SYMBOL(remap_vmalloc_range);
 void  __attribute__((weak)) vmalloc_sync_all(void)
 {
 }
+
+
+static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+{
+       /* apply_to_page_range() does all the hard work. */
+       return 0;
+}
+
+/**
+ *     alloc_vm_area - allocate a range of kernel address space
+ *     @size:          size of the area
+ *     @returns:       NULL on failure, vm_struct on success
+ *
+ *     This function reserves a range of kernel address space, and
+ *     allocates pagetables to map that range.  No actual mappings
+ *     are created.  If the kernel address space is not shared
+ *     between processes, it syncs the pagetable across all
+ *     processes.
+ */
+struct vm_struct *alloc_vm_area(size_t size)
+{
+       struct vm_struct *area;
+
+       area = get_vm_area(size, VM_IOREMAP);
+       if (area == NULL)
+               return NULL;
+
+       /*
+        * This ensures that page tables are constructed for this region
+        * of kernel virtual address space and mapped into init_mm.
+        */
+       if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
+                               area->size, f, NULL)) {
+               free_vm_area(area);
+               return NULL;
+       }
+
+       /* Make sure the pagetables are constructed in process kernel
+          mappings */
+       vmalloc_sync_all();
+
+       return area;
+}
+EXPORT_SYMBOL_GPL(alloc_vm_area);
+
+void free_vm_area(struct vm_struct *area)
+{
+       struct vm_struct *ret;
+       ret = remove_vm_area(area->addr);
+       BUG_ON(ret != area);
+       kfree(area);
+}
+EXPORT_SYMBOL_GPL(free_vm_area);