]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sched/numa: Fix comments
authorPeter Zijlstra <peterz@infradead.org>
Mon, 7 Oct 2013 10:28:41 +0000 (11:28 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 9 Oct 2013 10:39:30 +0000 (12:39 +0200)
Fix a 80 column violation and a PTE vs PMD reference.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1381141781-10992-4-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
mm/huge_memory.c

index 2b89cd244b0d75e2e08b2b3d0450d2c8d1f5319a..817cd7bfd517a250bffe55af9a1c9b8451f66e8d 100644 (file)
@@ -988,10 +988,10 @@ void task_numa_work(struct callback_head *work)
 
 out:
        /*
-        * It is possible to reach the end of the VMA list but the last few VMAs are
-        * not guaranteed to the vma_migratable. If they are not, we would find the
-        * !migratable VMA on the next scan but not reset the scanner to the start
-        * so check it now.
+        * It is possible to reach the end of the VMA list but the last few
+        * VMAs are not guaranteed to the vma_migratable. If they are not, we
+        * would find the !migratable VMA on the next scan but not reset the
+        * scanner to the start so check it now.
         */
        if (vma)
                mm->numa_scan_offset = start;
index 7489884682d84a6b5840fef19e90234076fd374e..19dbb08c64a5f7886b9d0968fc974a0078396478 100644 (file)
@@ -1305,7 +1305,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spin_unlock(&mm->page_table_lock);
        lock_page(page);
 
-       /* Confirm the PTE did not while locked */
+       /* Confirm the PMD did not change while page_table_lock was released */
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(pmd, *pmdp))) {
                unlock_page(page);