]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm, highmem: makes flush_all_zero_pkmaps() return index of last flushed entry
authorJoonsoo Kim <js1304@gmail.com>
Fri, 9 Nov 2012 03:04:07 +0000 (14:04 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 12 Nov 2012 04:16:58 +0000 (15:16 +1100)
In current code, after flush_all_zero_pkmaps() is invoked we re-iterate
all pkmaps.  This can be optimized if flush_all_zero_pkmaps() returns an
index of flushed entry.  With this index, we can immediately map highmem
page to virtual address represented by index.  So change return type of
flush_all_zero_pkmaps() and return index of last flushed entry.

Additionally, update last_pkmap_nr to this index.  It is certain that
entry which is below this index is occupied by other mapping, therefore
updating last_pkmap_nr to this index is reasonable optimization.

Signed-off-by: Joonsoo Kim <js1304@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/highmem.h
mm/highmem.c

index ef788b5b4a3504b069818e33145b2b3e578235fb..0683869d6a70e9c7964be3aa37f513e1a7797cbb 100644 (file)
@@ -32,6 +32,7 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
 
 #ifdef CONFIG_HIGHMEM
 #include <asm/highmem.h>
+#define PKMAP_INDEX_INVAL (-1)
 
 /* declarations for linux/mm/highmem.c */
 unsigned int nr_free_highpages(void);
index c7f28cd08f1634230488e2daad0f2087be72817c..264a00a9ccf328b60cfe25befaac5adafe1dcb28 100644 (file)
@@ -107,10 +107,10 @@ struct page *kmap_to_page(void *vaddr)
 }
 EXPORT_SYMBOL(kmap_to_page);
 
-static void flush_all_zero_pkmaps(void)
+static int flush_all_zero_pkmaps(void)
 {
        int i;
-       int need_flush = 0;
+       int index = PKMAP_INDEX_INVAL;
 
        flush_cache_kmaps();
 
@@ -142,10 +142,12 @@ static void flush_all_zero_pkmaps(void)
                          &pkmap_page_table[i]);
 
                set_page_address(page, NULL);
-               need_flush = 1;
+               index = i;
        }
-       if (need_flush)
+       if (index != PKMAP_INDEX_INVAL)
                flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
+
+       return index;
 }
 
 /**
@@ -161,6 +163,7 @@ void kmap_flush_unused(void)
 static inline unsigned long map_new_virtual(struct page *page)
 {
        unsigned long vaddr;
+       int index = PKMAP_INDEX_INVAL;
        int count;
 
 start:
@@ -169,40 +172,45 @@ start:
        for (;;) {
                last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
                if (!last_pkmap_nr) {
-                       flush_all_zero_pkmaps();
-                       count = LAST_PKMAP;
+                       index = flush_all_zero_pkmaps();
+                       if (index != PKMAP_INDEX_INVAL)
+                               break; /* Found a usable entry */
                }
-               if (!pkmap_count[last_pkmap_nr])
+               if (!pkmap_count[last_pkmap_nr]) {
+                       index = last_pkmap_nr;
                        break;  /* Found a usable entry */
-               if (--count)
-                       continue;
-
-               /*
-                * Sleep for somebody else to unmap their entries
-                */
-               {
-                       DECLARE_WAITQUEUE(wait, current);
-
-                       __set_current_state(TASK_UNINTERRUPTIBLE);
-                       add_wait_queue(&pkmap_map_wait, &wait);
-                       unlock_kmap();
-                       schedule();
-                       remove_wait_queue(&pkmap_map_wait, &wait);
-                       lock_kmap();
-
-                       /* Somebody else might have mapped it while we slept */
-                       if (page_address(page))
-                               return (unsigned long)page_address(page);
-
-                       /* Re-start */
-                       goto start;
                }
+               if (--count == 0)
+                       break;
        }
-       vaddr = PKMAP_ADDR(last_pkmap_nr);
+
+       /*
+        * Sleep for somebody else to unmap their entries
+        */
+       if (index == PKMAP_INDEX_INVAL) {
+               DECLARE_WAITQUEUE(wait, current);
+
+               __set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&pkmap_map_wait, &wait);
+               unlock_kmap();
+               schedule();
+               remove_wait_queue(&pkmap_map_wait, &wait);
+               lock_kmap();
+
+               /* Somebody else might have mapped it while we slept */
+               vaddr = (unsigned long)page_address(page);
+               if (vaddr)
+                       return vaddr;
+
+               /* Re-start */
+               goto start;
+       }
+
+       vaddr = PKMAP_ADDR(index);
        set_pte_at(&init_mm, vaddr,
-                  &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+                  &(pkmap_page_table[index]), mk_pte(page, kmap_prot));
 
-       pkmap_count[last_pkmap_nr] = 1;
+       pkmap_count[index] = 1;
        set_page_address(page, (void *)vaddr);
 
        return vaddr;
@@ -324,7 +332,6 @@ struct page_address_map {
        void *virtual;
        struct list_head list;
 };
-
 static struct page_address_map page_address_maps[LAST_PKMAP];
 
 /*