]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp: move khugepaged_mutex out of khugepaged
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Fri, 28 Sep 2012 00:19:16 +0000 (10:19 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 9 Oct 2012 03:12:18 +0000 (14:12 +1100)
Currently, hugepaged_mutex is used really complexly and hard to
understand, actually, it is just used to serialize start_khugepaged and
khugepaged for these reasons:

- khugepaged_thread is shared between them
- the thp disable path (echo never > transparent_hugepage/enabled) is
  nonblocking, so we need to protect khugepaged_thread to get a stable
  running state

These can be avoided by:

- use the lock to serialize the thread creation and cancel
- thp disable path can not finised until the thread exits

Then khugepaged_thread is fully controlled by start_khugepaged, khugepaged
will be happy without the lock

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index d089f9308baa2688f939a9d422fb1e957abb9f65..e08305d1ac865b5b9b91a7ee9c5987f9312c134c 100644 (file)
@@ -140,9 +140,6 @@ static int start_khugepaged(void)
 {
        int err = 0;
        if (khugepaged_enabled()) {
-               int wakeup;
-
-               mutex_lock(&khugepaged_mutex);
                if (!khugepaged_thread)
                        khugepaged_thread = kthread_run(khugepaged, NULL,
                                                        "khugepaged");
@@ -152,15 +149,17 @@ static int start_khugepaged(void)
                        err = PTR_ERR(khugepaged_thread);
                        khugepaged_thread = NULL;
                }
-               wakeup = !list_empty(&khugepaged_scan.mm_head);
-               mutex_unlock(&khugepaged_mutex);
-               if (wakeup)
+
+               if (!list_empty(&khugepaged_scan.mm_head))
                        wake_up_interruptible(&khugepaged_wait);
 
                set_recommended_min_free_kbytes();
-       } else
+       } else if (khugepaged_thread) {
                /* wakeup to exit */
                wake_up_interruptible(&khugepaged_wait);
+               kthread_stop(khugepaged_thread);
+               khugepaged_thread = NULL;
+       }
 
        return err;
 }
@@ -222,7 +221,12 @@ static ssize_t enabled_store(struct kobject *kobj,
                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 
        if (ret > 0) {
-               int err = start_khugepaged();
+               int err;
+
+               mutex_lock(&khugepaged_mutex);
+               err = start_khugepaged();
+               mutex_unlock(&khugepaged_mutex);
+
                if (err)
                        ret = err;
        }
@@ -2393,20 +2397,10 @@ static int khugepaged(void *none)
        set_freezable();
        set_user_nice(current, 19);
 
-       /* serialize with start_khugepaged() */
-       mutex_lock(&khugepaged_mutex);
-
-       for (;;) {
-               mutex_unlock(&khugepaged_mutex);
+       while (!kthread_should_stop()) {
                VM_BUG_ON(khugepaged_thread != current);
                khugepaged_loop();
                VM_BUG_ON(khugepaged_thread != current);
-
-               mutex_lock(&khugepaged_mutex);
-               if (!khugepaged_enabled())
-                       break;
-               if (unlikely(kthread_should_stop()))
-                       break;
        }
 
        spin_lock(&khugepaged_mm_lock);
@@ -2415,10 +2409,6 @@ static int khugepaged(void *none)
        if (mm_slot)
                collect_mm_slot(mm_slot);
        spin_unlock(&khugepaged_mm_lock);
-
-       khugepaged_thread = NULL;
-       mutex_unlock(&khugepaged_mutex);
-
        return 0;
 }