static void bad_page(struct page *page)
{
printk(KERN_EMERG "Bad page state in process '%s'\n"
- "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
- "Trying to fix it up, but a reboot is needed\n"
- "Backtrace:\n",
+ KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+ KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
+ KERN_EMERG "Backtrace:\n",
current->comm, page, (int)(2*sizeof(unsigned long)),
(unsigned long)page->flags, page->mapping,
page_mapcount(page), page_count(page));
int reserved = 0;
arch_free_page(page, order);
+ if (!PageHighMem(page))
+ mutex_debug_check_no_locks_freed(page_address(page),
+ PAGE_SIZE<<order);
#ifndef CONFIG_MMU
for (i = 1 ; i < (1 << order) ; ++i)
mark = (*z)->pages_high;
if (!zone_watermark_ok(*z, order, mark,
classzone_idx, alloc_flags))
- continue;
+ if (!zone_reclaim_mode ||
+ !zone_reclaim(*z, gfp_mask, order))
+ continue;
}
page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
*
* The caller may dip into page reserves a bit more if the caller
* cannot run direct reclaim, or if the caller has realtime scheduling
- * policy.
+ * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
+ * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
*/
alloc_flags = ALLOC_WMARK_MIN;
if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
prev_node = local_node;
nodes_clear(used_mask);
while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
+ int distance = node_distance(local_node, node);
+
+ /*
+ * If another node is sufficiently far away then it is better
+ * to reclaim pages in a zone before going off node.
+ */
+ if (distance > RECLAIM_DISTANCE)
+ zone_reclaim_mode = 1;
+
/*
* We don't want to pressure a particular node.
* So adding penalty to the first node in same
* distance group to make it round-robin.
*/
- if (node_distance(local_node, node) !=
- node_distance(local_node, prev_node))
+
+ if (distance != node_distance(local_node, prev_node))
node_load[node] += load;
prev_node = node;
load--;
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
-void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
struct page *page;
unsigned long end_pfn = start_pfn + size;
unsigned long pfn;
- for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
if (!early_pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
memmap_init_zone((size), (nid), (zone), (start_pfn))
#endif
-static int __devinit zone_batchsize(struct zone *zone)
+static int __meminit zone_batchsize(struct zone *zone)
{
int batch;
* Dynamically allocate memory for the
* per cpu pageset array in struct zone.
*/
-static int __devinit process_zones(int cpu)
+static int __meminit process_zones(int cpu)
{
struct zone *zone, *dzone;
}
}
-static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
+static int __meminit pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
#endif
-static __devinit
+static __meminit
void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
init_waitqueue_head(zone->wait_table + i);
}
-static __devinit void zone_pcp_init(struct zone *zone)
+static __meminit void zone_pcp_init(struct zone *zone)
{
int cpu;
unsigned long batch = zone_batchsize(zone);
zone->name, zone->present_pages, batch);
}
-static __devinit void init_currently_empty_zone(struct zone *zone,
+static __meminit void init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn, unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;