]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/vmalloc.c
mm: allow GFP_{FS,IO} for page_cache_read page cache allocation
[karo-tx-linux.git] / mm / vmalloc.c
index 8e3c9c5a3042b6149ff783bb1dfc0c0e08dc4046..58ceeb107960b367f88bebcbb067ded860b1d010 100644 (file)
@@ -441,8 +441,7 @@ nocache:
                if (list_is_last(&first->list, &vmap_area_list))
                        goto found;
 
-               first = list_entry(first->list.next,
-                               struct vmap_area, list);
+               first = list_next_entry(first, list);
        }
 
 found:
@@ -1477,13 +1476,10 @@ static void __vunmap(const void *addr, int deallocate_pages)
                        struct page *page = area->pages[i];
 
                        BUG_ON(!page);
-                       __free_page(page);
+                       __free_kmem_pages(page, 0);
                }
 
-               if (area->flags & VM_VPAGES)
-                       vfree(area->pages);
-               else
-                       kfree(area->pages);
+               kvfree(area->pages);
        }
 
        kfree(area);
@@ -1593,7 +1589,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        if (array_size > PAGE_SIZE) {
                pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
                                PAGE_KERNEL, node, area->caller);
-               area->flags |= VM_VPAGES;
        } else {
                pages = kmalloc_node(array_size, nested_gfp, node);
        }
@@ -1608,9 +1603,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                struct page *page;
 
                if (node == NUMA_NO_NODE)
-                       page = alloc_page(alloc_mask);
+                       page = alloc_kmem_pages(alloc_mask, order);
                else
-                       page = alloc_pages_node(node, alloc_mask, order);
+                       page = alloc_kmem_pages_node(node, alloc_mask, order);
 
                if (unlikely(!page)) {
                        /* Successfully allocated i pages, free them in __vunmap() */
@@ -2559,10 +2554,10 @@ static void *s_start(struct seq_file *m, loff_t *pos)
        struct vmap_area *va;
 
        spin_lock(&vmap_area_lock);
-       va = list_entry((&vmap_area_list)->next, typeof(*va), list);
+       va = list_first_entry(&vmap_area_list, typeof(*va), list);
        while (n > 0 && &va->list != &vmap_area_list) {
                n--;
-               va = list_entry(va->list.next, typeof(*va), list);
+               va = list_next_entry(va, list);
        }
        if (!n && &va->list != &vmap_area_list)
                return va;
@@ -2576,7 +2571,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
        struct vmap_area *va = p, *next;
 
        ++*pos;
-       next = list_entry(va->list.next, typeof(*va), list);
+       next = list_next_entry(va, list);
        if (&next->list != &vmap_area_list)
                return next;
 
@@ -2651,7 +2646,7 @@ static int s_show(struct seq_file *m, void *p)
        if (v->flags & VM_USERMAP)
                seq_puts(m, " user");
 
-       if (v->flags & VM_VPAGES)
+       if (is_vmalloc_addr(v->pages))
                seq_puts(m, " vpages");
 
        show_numa_info(m, v);