X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=mm%2Fswap.c;h=91b7e2026f696718d3a51b49d40dd660ea308047;hb=674539115cc88473f623581e1d53c0e2ecef2179;hp=ee6d71ccfa56fe77173bc7806797b5fc01ec1112;hpb=5367f2d67c7d0bf1faae90e6e7b4e2ac3c9b5e0f;p=mv-sheeva.git diff --git a/mm/swap.c b/mm/swap.c index ee6d71ccfa5..91b7e2026f6 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -34,19 +34,22 @@ /* How many pages do we try to swap or page in/out together? */ int page_cluster; -void put_page(struct page *page) +static void put_compound_page(struct page *page) { - if (unlikely(PageCompound(page))) { - page = (struct page *)page_private(page); - if (put_page_testzero(page)) { - void (*dtor)(struct page *page); + page = (struct page *)page_private(page); + if (put_page_testzero(page)) { + void (*dtor)(struct page *page); - dtor = (void (*)(struct page *))page[1].mapping; - (*dtor)(page); - } - return; + dtor = (void (*)(struct page *))page[1].lru.next; + (*dtor)(page); } - if (put_page_testzero(page)) +} + +void put_page(struct page *page) +{ + if (unlikely(PageCompound(page))) + put_compound_page(page); + else if (put_page_testzero(page)) __page_cache_release(page); } EXPORT_SYMBOL(put_page); @@ -174,25 +177,50 @@ void lru_add_drain(void) put_cpu(); } +#ifdef CONFIG_NUMA +static void lru_add_drain_per_cpu(void *dummy) +{ + lru_add_drain(); +} + +/* + * Returns 0 for success + */ +int lru_add_drain_all(void) +{ + return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); +} + +#else + +/* + * Returns 0 for success + */ +int lru_add_drain_all(void) +{ + lru_add_drain(); + return 0; +} +#endif + /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ void fastcall __page_cache_release(struct page *page) { - unsigned long flags; - struct zone *zone = page_zone(page); + if (PageLRU(page)) { + unsigned long flags; + struct zone *zone = page_zone(page); - spin_lock_irqsave(&zone->lru_lock, flags); - if (TestClearPageLRU(page)) + spin_lock_irqsave(&zone->lru_lock, flags); + BUG_ON(!PageLRU(page)); + __ClearPageLRU(page); del_page_from_lru(zone, page); - if (page_count(page) != 0) - page = NULL; - spin_unlock_irqrestore(&zone->lru_lock, flags); - if (page) - free_hot_page(page); + spin_unlock_irqrestore(&zone->lru_lock, flags); + } + free_hot_page(page); } - EXPORT_SYMBOL(__page_cache_release); /* @@ -216,28 +244,40 @@ void release_pages(struct page **pages, int nr, int cold) pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; - struct zone *pagezone; + + if (unlikely(PageCompound(page))) { + if (zone) { + spin_unlock_irq(&zone->lru_lock); + zone = NULL; + } + put_compound_page(page); + continue; + } if (!put_page_testzero(page)) continue; - pagezone = page_zone(page); - if (pagezone != zone) { - if (zone) - spin_unlock_irq(&zone->lru_lock); - zone = pagezone; - spin_lock_irq(&zone->lru_lock); - } - if (TestClearPageLRU(page)) + if (PageLRU(page)) { + struct zone *pagezone = page_zone(page); + if (pagezone != zone) { + if (zone) + spin_unlock_irq(&zone->lru_lock); + zone = pagezone; + spin_lock_irq(&zone->lru_lock); + } + BUG_ON(!PageLRU(page)); + __ClearPageLRU(page); del_page_from_lru(zone, page); - if (page_count(page) == 0) { - if (!pagevec_add(&pages_to_free, page)) { + } + + if (!pagevec_add(&pages_to_free, page)) { + if (zone) { spin_unlock_irq(&zone->lru_lock); - __pagevec_free(&pages_to_free); - pagevec_reinit(&pages_to_free); - zone = NULL; /* No lock is held */ + zone = NULL; } - } + __pagevec_free(&pages_to_free); + pagevec_reinit(&pages_to_free); + } } if (zone) spin_unlock_irq(&zone->lru_lock); @@ -305,8 +345,8 @@ void __pagevec_lru_add(struct pagevec *pvec) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); add_page_to_inactive_list(zone, page); } if (zone) @@ -332,10 +372,10 @@ void __pagevec_lru_add_active(struct pagevec *pvec) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - if (TestSetPageLRU(page)) - BUG(); - if (TestSetPageActive(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); + BUG_ON(PageActive(page)); + SetPageActive(page); add_page_to_active_list(zone, page); } if (zone) @@ -355,7 +395,8 @@ void pagevec_strip(struct pagevec *pvec) struct page *page = pvec->pages[i]; if (PagePrivate(page) && !TestSetPageLocked(page)) { - try_to_release_page(page, 0); + if (PagePrivate(page)) + try_to_release_page(page, 0); unlock_page(page); } } @@ -384,6 +425,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, return pagevec_count(pvec); } +EXPORT_SYMBOL(pagevec_lookup); + unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { @@ -449,13 +492,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount) if (count >= FBC_BATCH || count <= -FBC_BATCH) { spin_lock(&fbc->lock); fbc->count += count; + *pcount = 0; spin_unlock(&fbc->lock); - count = 0; + } else { + *pcount = count; } - *pcount = count; put_cpu(); } EXPORT_SYMBOL(percpu_counter_mod); + +/* + * Add up all the per-cpu counts, return the result. This is a more accurate + * but much slower version of percpu_counter_read_positive() + */ +long percpu_counter_sum(struct percpu_counter *fbc) +{ + long ret; + int cpu; + + spin_lock(&fbc->lock); + ret = fbc->count; + for_each_cpu(cpu) { + long *pcount = per_cpu_ptr(fbc->counters, cpu); + ret += *pcount; + } + spin_unlock(&fbc->lock); + return ret < 0 ? 0 : ret; +} +EXPORT_SYMBOL(percpu_counter_sum); #endif /*