]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/swapfile.c
memcg: clean up existing move charge code
[mv-sheeva.git] / mm / swapfile.c
index b1cd120607230b0770c35e0823d6389782ae734a..21b56945c5d22e81b756381481e18937e2ce3269 100644 (file)
@@ -667,10 +667,10 @@ int try_to_free_swap(struct page *page)
         * original page might be freed under memory pressure, then
         * later read back in from swap, now with the wrong data.
         *
-        * Hibernation clears bits from gfp_allowed_mask to prevent
-        * memory reclaim from writing to disk, so check that here.
+        * Hibration suspends storage while it is writing the image
+        * to disk so check that here.
         */
-       if (!(gfp_allowed_mask & __GFP_IO))
+       if (pm_suspended_storage())
                return 0;
 
        delete_from_swap_cache(page);
@@ -847,12 +847,13 @@ unsigned int count_swap_pages(int type, int free)
 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, swp_entry_t entry, struct page *page)
 {
-       struct mem_cgroup *ptr;
+       struct mem_cgroup *memcg;
        spinlock_t *ptl;
        pte_t *pte;
        int ret = 1;
 
-       if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
+       if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
+                                        GFP_KERNEL, &memcg)) {
                ret = -ENOMEM;
                goto out_nolock;
        }
@@ -860,7 +861,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
                if (ret > 0)
-                       mem_cgroup_cancel_charge_swapin(ptr);
+                       mem_cgroup_cancel_charge_swapin(memcg);
                ret = 0;
                goto out;
        }
@@ -871,7 +872,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        set_pte_at(vma->vm_mm, addr, pte,
                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
        page_add_anon_rmap(page, vma, addr);
-       mem_cgroup_commit_charge_swapin(page, ptr);
+       mem_cgroup_commit_charge_swapin(page, memcg);
        swap_free(entry);
        /*
         * Move the page to the active list so it is not
@@ -931,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (unlikely(pmd_trans_huge(*pmd)))
-                       continue;
-               if (pmd_none_or_clear_bad(pmd))
+               if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                        continue;
                ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
                if (ret)
@@ -2104,7 +2103,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                        p->flags |= SWP_SOLIDSTATE;
                        p->cluster_next = 1 + (random32() % p->highest_bit);
                }
-               if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
+               if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
                        p->flags |= SWP_DISCARDABLE;
        }
 
@@ -2288,58 +2287,6 @@ int swapcache_prepare(swp_entry_t entry)
        return __swap_duplicate(entry, SWAP_HAS_CACHE);
 }
 
-/*
- * swap_lock prevents swap_map being freed. Don't grab an extra
- * reference on the swaphandle, it doesn't matter if it becomes unused.
- */
-int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
-{
-       struct swap_info_struct *si;
-       int our_page_cluster = page_cluster;
-       pgoff_t target, toff;
-       pgoff_t base, end;
-       int nr_pages = 0;
-
-       if (!our_page_cluster)  /* no readahead */
-               return 0;
-
-       si = swap_info[swp_type(entry)];
-       target = swp_offset(entry);
-       base = (target >> our_page_cluster) << our_page_cluster;
-       end = base + (1 << our_page_cluster);
-       if (!base)              /* first page is swap header */
-               base++;
-
-       spin_lock(&swap_lock);
-       if (end > si->max)      /* don't go beyond end of map */
-               end = si->max;
-
-       /* Count contiguous allocated slots above our target */
-       for (toff = target; ++toff < end; nr_pages++) {
-               /* Don't read in free or bad pages */
-               if (!si->swap_map[toff])
-                       break;
-               if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
-                       break;
-       }
-       /* Count contiguous allocated slots below our target */
-       for (toff = target; --toff >= base; nr_pages++) {
-               /* Don't read in free or bad pages */
-               if (!si->swap_map[toff])
-                       break;
-               if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
-                       break;
-       }
-       spin_unlock(&swap_lock);
-
-       /*
-        * Indicate starting offset, and return number of pages to get:
-        * if only 1, say 0, since there's then no readahead to be done.
-        */
-       *offset = ++toff;
-       return nr_pages? ++nr_pages: 0;
-}
-
 /*
  * add_swap_count_continuation - called when a swap count is duplicated
  * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
@@ -2426,9 +2373,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
                if (!(count & COUNT_CONTINUED))
                        goto out;
 
-               map = kmap_atomic(list_page, KM_USER0) + offset;
+               map = kmap_atomic(list_page) + offset;
                count = *map;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
 
                /*
                 * If this continuation count now has some space in it,
@@ -2471,7 +2418,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
 
        offset &= ~PAGE_MASK;
        page = list_entry(head->lru.next, struct page, lru);
-       map = kmap_atomic(page, KM_USER0) + offset;
+       map = kmap_atomic(page) + offset;
 
        if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
                goto init_map;          /* jump over SWAP_CONT_MAX checks */
@@ -2481,26 +2428,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
                 * Think of how you add 1 to 999
                 */
                while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        BUG_ON(page == head);
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                }
                if (*map == SWAP_CONT_MAX) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        if (page == head)
                                return false;   /* add count continuation */
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
 init_map:              *map = 0;               /* we didn't zero the page */
                }
                *map += 1;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
                page = list_entry(page->lru.prev, struct page, lru);
                while (page != head) {
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                        *map = COUNT_CONTINUED;
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.prev, struct page, lru);
                }
                return true;                    /* incremented */
@@ -2511,22 +2458,22 @@ init_map:               *map = 0;               /* we didn't zero the page */
                 */
                BUG_ON(count != COUNT_CONTINUED);
                while (*map == COUNT_CONTINUED) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        BUG_ON(page == head);
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                }
                BUG_ON(*map == 0);
                *map -= 1;
                if (*map == 0)
                        count = 0;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
                page = list_entry(page->lru.prev, struct page, lru);
                while (page != head) {
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                        *map = SWAP_CONT_MAX | count;
                        count = COUNT_CONTINUED;
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.prev, struct page, lru);
                }
                return count == COUNT_CONTINUED;