X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=mm%2Fvmscan.c;h=135bf8ca96ee60ac78783caac94fa30f2bfabfc8;hb=254ce8dc882f8d69e5d49ed4807c94a61976fb15;hp=a740778f688da277d35eb0d15a60168257dbe42e;hpb=2e66fc41169c90d93b7811caf7e7822de6aa2259;p=mv-sheeva.git diff --git a/mm/vmscan.c b/mm/vmscan.c index a740778f688..135bf8ca96e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -70,7 +70,7 @@ struct scan_control { unsigned int priority; /* This context's GFP mask */ - unsigned int gfp_mask; + gfp_t gfp_mask; int may_writepage; @@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker); * * Returns the number of slab objects which we shrunk. */ -static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, +static int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) { struct shrinker *shrinker; @@ -417,7 +417,9 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ - if (PageAnon(page) && !PageSwapCache(page) && sc->may_swap) { + if (PageAnon(page) && !PageSwapCache(page)) { + if (!sc->may_swap) + goto keep_locked; if (!add_to_swap(page)) goto activate_locked; } @@ -511,14 +513,15 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) * PageDirty _after_ making sure that the page is freeable and * not in use by anybody. (pagecache + us == 2) */ - if (page_count(page) != 2 || PageDirty(page)) { - write_unlock_irq(&mapping->tree_lock); - goto keep_locked; - } + if (unlikely(page_count(page) != 2)) + goto cannot_free; + smp_rmb(); + if (unlikely(PageDirty(page))) + goto cannot_free; #ifdef CONFIG_SWAP if (PageSwapCache(page)) { - swp_entry_t swap = { .val = page->private }; + swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); write_unlock_irq(&mapping->tree_lock); swap_free(swap); @@ -538,6 +541,10 @@ free_it: __pagevec_release_nonlru(&freed_pvec); continue; +cannot_free: + write_unlock_irq(&mapping->tree_lock); + goto keep_locked; + activate_locked: SetPageActive(page); pgactivate++; @@ -921,7 +928,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) * holds filesystem locks which prevent writeout this might not work, and the * allocation attempt will fail. */ -int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) +int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) { int priority; int ret = 0; @@ -1258,9 +1265,9 @@ void wakeup_kswapd(struct zone *zone, int order) pgdat->kswapd_max_order = order; if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) return; - if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) + if (!waitqueue_active(&pgdat->kswapd_wait)) return; - wake_up_interruptible(&zone->zone_pgdat->kswapd_wait); + wake_up_interruptible(&pgdat->kswapd_wait); } #ifdef CONFIG_PM @@ -1333,7 +1340,7 @@ module_init(kswapd_init) /* * Try to free up some pages from this zone through reclaim. */ -int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) +int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { struct scan_control sc; int nr_pages = 1 << order;