]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/page_alloc.c
signalfd: fill in ssi_int for posix timers and message queues
[karo-tx-linux.git] / mm / page_alloc.c
index 401d104d2bb6e12c08e70700eebad915e72c7c2a..c6ecf87ddaf627503ab90f618a455635f8a0f615 100644 (file)
@@ -280,6 +280,23 @@ void prep_compound_page(struct page *page, unsigned long order)
        }
 }
 
+#ifdef CONFIG_HUGETLBFS
+void prep_compound_gigantic_page(struct page *page, unsigned long order)
+{
+       int i;
+       int nr_pages = 1 << order;
+       struct page *p = page + 1;
+
+       set_compound_page_dtor(page, free_compound_page);
+       set_compound_order(page, order);
+       __SetPageHead(page);
+       for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+               __SetPageTail(p);
+               p->first_page = page;
+       }
+}
+#endif
+
 static void destroy_compound_page(struct page *page, unsigned long order)
 {
        int i;
@@ -694,6 +711,9 @@ static int move_freepages(struct zone *zone,
 #endif
 
        for (page = start_page; page <= end_page;) {
+               /* Make sure we are not inadvertently changing nodes */
+               VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
+
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
                        continue;
@@ -826,7 +846,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
  */
 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
                        unsigned long count, struct list_head *list,
-                       int migratetype)
+                       int migratetype, int cold)
 {
        int i;
        
@@ -845,7 +865,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                 * merge IO requests if the physical pages are ordered
                 * properly.
                 */
-               list_add(&page->lru, list);
+               if (likely(cold == 0))
+                       list_add(&page->lru, list);
+               else
+                       list_add_tail(&page->lru, list);
                set_page_private(page, migratetype);
                list = &page->lru;
        }
@@ -1048,7 +1071,8 @@ again:
                local_irq_save(flags);
                if (!pcp->count) {
                        pcp->count = rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
+                                       pcp->batch, &pcp->list,
+                                       migratetype, cold);
                        if (unlikely(!pcp->count))
                                goto failed;
                }
@@ -1067,7 +1091,8 @@ again:
                /* Allocate more to the pcp list if necessary */
                if (unlikely(&page->lru == &pcp->list)) {
                        pcp->count += rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
+                                       pcp->batch, &pcp->list,
+                                       migratetype, cold);
                        page = list_entry(pcp->list.next, struct page, lru);
                }
 
@@ -2516,6 +2541,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
                        continue;
                page = pfn_to_page(pfn);
 
+               /* Watch out for overlapping nodes */
+               if (page_to_nid(page) != zone_to_nid(zone))
+                       continue;
+
                /* Blocks with reserved pages will never free, skip them. */
                if (PageReserved(page))
                        continue;
@@ -2740,7 +2769,7 @@ bad:
                if (dzone == zone)
                        break;
                kfree(zone_pcp(dzone, cpu));
-               zone_pcp(dzone, cpu) = NULL;
+               zone_pcp(dzone, cpu) = &boot_pageset[cpu];
        }
        return -ENOMEM;
 }
@@ -2755,7 +2784,7 @@ static inline void free_zone_pagesets(int cpu)
                /* Free per_cpu_pageset if it is slab allocated */
                if (pset != &boot_pageset[cpu])
                        kfree(pset);
-               zone_pcp(zone, cpu) = NULL;
+               zone_pcp(zone, cpu) = &boot_pageset[cpu];
        }
 }
 
@@ -2925,7 +2954,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
  * was used and there are no special requirements, this is a convenient
  * alternative
  */
-int __meminit early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn)
 {
        int i;
 
@@ -2936,10 +2965,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
                if (start_pfn <= pfn && pfn < end_pfn)
                        return early_node_map[i].nid;
        }
+       /* This is a memory hole */
+       return -1;
+}
+#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
 
+int __meminit early_pfn_to_nid(unsigned long pfn)
+{
+       int nid;
+
+       nid = __early_pfn_to_nid(pfn);
+       if (nid >= 0)
+               return nid;
+       /* just returns 0 */
        return 0;
 }
-#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
+{
+       int nid;
+
+       nid = __early_pfn_to_nid(pfn);
+       if (nid >= 0 && nid != node)
+               return false;
+       return true;
+}
+#endif
 
 /* Basic iterator support to walk early_node_map[] */
 #define for_each_active_range_index_in_nid(i, nid) \
@@ -4064,7 +4116,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 }
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] };
+struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
 EXPORT_SYMBOL(contig_page_data);
 #endif
 
@@ -4200,7 +4252,7 @@ void setup_per_zone_pages_min(void)
        for_each_zone(zone) {
                u64 tmp;
 
-               spin_lock_irqsave(&zone->lru_lock, flags);
+               spin_lock_irqsave(&zone->lock, flags);
                tmp = (u64)pages_min * zone->present_pages;
                do_div(tmp, lowmem_pages);
                if (is_highmem(zone)) {
@@ -4232,7 +4284,7 @@ void setup_per_zone_pages_min(void)
                zone->pages_low   = zone->pages_min + (tmp >> 2);
                zone->pages_high  = zone->pages_min + (tmp >> 1);
                setup_zone_migrate_reserve(zone);
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               spin_unlock_irqrestore(&zone->lock, flags);
        }
 
        /* update totalreserve_pages */
@@ -4362,6 +4414,8 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
        if (!write || (ret == -EINVAL))
                return ret;
        for_each_zone(zone) {
+               if (!populated_zone(zone))
+                       continue;
                for_each_online_cpu(cpu) {
                        unsigned long  high;
                        high = zone->present_pages / percpu_pagelist_fraction;
@@ -4437,7 +4491,7 @@ void *__init alloc_large_system_hash(const char *tablename,
        do {
                size = bucketsize << log2qty;
                if (flags & HASH_EARLY)
-                       table = alloc_bootmem(size);
+                       table = alloc_bootmem_nopanic(size);
                else if (hashdist)
                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
                else {