]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
hugetlb-use-mmu_gather-instead-of-a-temporary-linked-list-for-accumulating-pages...
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Thu, 3 May 2012 05:43:35 +0000 (15:43 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 3 May 2012 05:46:23 +0000 (15:46 +1000)
Further cleanup of the above patch

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h

index 4b3f38542a3c703949ef506461a2c1c6562129fa..c21e136e8623035d10d317bc77e71792627dfd30 100644 (file)
@@ -100,7 +100,6 @@ static inline unsigned long hugetlb_total_pages(void)
 #define follow_huge_addr(mm, addr, write)      ERR_PTR(-EINVAL)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
 #define hugetlb_prefault(mapping, vma)         ({ BUG(); 0; })
-#define unmap_hugepage_range(vma, start, end, page)    BUG()
 static inline void hugetlb_report_meminfo(struct seq_file *m)
 {
 }
@@ -114,7 +113,11 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
 #define hugetlb_fault(mm, vma, addr, flags)    ({ BUG(); 0; })
 #define huge_pte_offset(mm, address)   0
-#define dequeue_hwpoisoned_huge_page(page)     0
+static inline int dequeue_hwpoisoned_huge_page(struct page *page)
+{
+       return 0;
+}
+
 static inline void copy_huge_page(struct page *dst, struct page *src)
 {
 }
@@ -125,6 +128,7 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
                        struct vm_area_struct *vma, unsigned long start,
                        unsigned long end, struct page *ref_page)
 {
+       BUG();
 }
 
 #endif /* !CONFIG_HUGETLB_PAGE */