pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ BUG_ON(!current->mm);
+
pathname = getname(specialfile);
err = PTR_ERR(pathname);
if (IS_ERR(pathname))
spin_unlock(&swap_lock);
goto out_dput;
}
- if (!security_vm_enough_memory(p->pages))
+ if (!security_vm_enough_memory_mm(current->mm, p->pages))
vm_unacct_memory(p->pages);
else {
err = -ENOMEM;
p->flags |= SWP_SOLIDSTATE;
p->cluster_next = 1 + (random32() % p->highest_bit);
}
- if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
+ if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
p->flags |= SWP_DISCARDABLE;
}
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
-/*
- * swap_lock prevents swap_map being freed. Don't grab an extra
- * reference on the swaphandle, it doesn't matter if it becomes unused.
- */
-int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
-{
- struct swap_info_struct *si;
- int our_page_cluster = page_cluster;
- pgoff_t target, toff;
- pgoff_t base, end;
- int nr_pages = 0;
-
- if (!our_page_cluster) /* no readahead */
- return 0;
-
- si = swap_info[swp_type(entry)];
- target = swp_offset(entry);
- base = (target >> our_page_cluster) << our_page_cluster;
- end = base + (1 << our_page_cluster);
- if (!base) /* first page is swap header */
- base++;
-
- spin_lock(&swap_lock);
- if (end > si->max) /* don't go beyond end of map */
- end = si->max;
-
- /* Count contiguous allocated slots above our target */
- for (toff = target; ++toff < end; nr_pages++) {
- /* Don't read in free or bad pages */
- if (!si->swap_map[toff])
- break;
- if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
- break;
- }
- /* Count contiguous allocated slots below our target */
- for (toff = target; --toff >= base; nr_pages++) {
- /* Don't read in free or bad pages */
- if (!si->swap_map[toff])
- break;
- if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
- break;
- }
- spin_unlock(&swap_lock);
-
- /*
- * Indicate starting offset, and return number of pages to get:
- * if only 1, say 0, since there's then no readahead to be done.
- */
- *offset = ++toff;
- return nr_pages? ++nr_pages: 0;
-}
-
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
if (!(count & COUNT_CONTINUED))
goto out;
- map = kmap_atomic(list_page, KM_USER0) + offset;
+ map = kmap_atomic(list_page) + offset;
count = *map;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
/*
* If this continuation count now has some space in it,
offset &= ~PAGE_MASK;
page = list_entry(head->lru.next, struct page, lru);
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
goto init_map; /* jump over SWAP_CONT_MAX checks */
* Think of how you add 1 to 999
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
}
if (*map == SWAP_CONT_MAX) {
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
if (page == head)
return false; /* add count continuation */
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
*map += 1;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
*map = COUNT_CONTINUED;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
return true; /* incremented */
*/
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
}
BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
return count == COUNT_CONTINUED;