After commit
v2.6.36-5896-gd065bd8 "mm: retry page fault when blocking on
disk transfer" we usually wait in page-faults without mmap_sem held, so
all swap-token logic was broken, because it based on using
rwsem_is_locked(&mm->mmap_sem) as sign of in progress page-faults.
Add an atomic counter of in progress page-faults for mm to the mm_struct
with swap-token.
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <>
unsigned int faultstamp;
unsigned int token_priority;
unsigned int last_interval;
+ atomic_t active_swap_token;
unsigned long flags; /* Must use atomic bitops to access the bits */
__put_swap_token(mm);
}
+static inline bool has_active_swap_token(struct mm_struct *mm)
+{
+ return has_swap_token(mm) && atomic_read(&mm->active_swap_token);
+}
+
+static inline bool activate_swap_token(struct mm_struct *mm)
+{
+ if (has_swap_token(mm)) {
+ atomic_inc(&mm->active_swap_token);
+ return true;
+ }
+ return false;
+}
+
+static inline void deactivate_swap_token(struct mm_struct *mm, bool swap_token)
+{
+ if (swap_token)
+ atomic_dec(&mm->active_swap_token);
+}
+
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
extern void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
return 0;
}
+static inline bool has_active_swap_token(struct mm_struct *mm)
+{
+ return false;
+}
+
+static inline bool activate_swap_token(struct mm_struct *mm)
+{
+ return false;
+}
+
+static inline void deactivate_swap_token(struct mm_struct *mm, bool swap_token)
+{
+}
+
static inline void disable_swap_token(struct mem_cgroup *memcg)
{
}
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
+ atomic_set(&mm->active_swap_token, 0);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
struct mem_cgroup *ptr;
int exclusive = 0;
int ret = 0;
+ bool swap_token;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
goto out;
goto out_release;
}
+ swap_token = activate_swap_token(mm);
+
locked = lock_page_or_retry(page, mm, flags);
+
+ deactivate_swap_token(mm, swap_token);
+
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (!locked) {
ret |= VM_FAULT_RETRY;
struct vm_fault vmf;
int ret;
int page_mkwrite = 0;
+ bool swap_token;
/*
* If we do COW later, allocate page befor taking lock_page()
} else
cow_page = NULL;
+ swap_token = activate_swap_token(mm);
+
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = pgoff;
vmf.flags = flags;
}
+ deactivate_swap_token(mm, swap_token);
+
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
/*
return ret;
unwritable_page:
+ deactivate_swap_token(mm, swap_token);
page_cache_release(page);
return ret;
uncharge_out:
+ deactivate_swap_token(mm, swap_token);
/* fs's fault handler get error */
if (cow_page) {
mem_cgroup_uncharge_page(cow_page);
/* Pretend the page is referenced if the task has the
swap token and is in the middle of a page fault. */
- if (mm != current->mm && has_swap_token(mm) &&
- rwsem_is_locked(&mm->mmap_sem))
+ if (mm != current->mm && has_active_swap_token(mm))
referenced++;
(*mapcount)--;