From fed042b4689fad2f11b513dfa845bbbead5e24b7 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Fri, 9 Nov 2012 14:04:08 +1100 Subject: [PATCH] mm-highmem-makes-flush_all_zero_pkmaps-return-index-of-last-flushed-entry-v2 In current code, after flush_all_zero_pkmaps() is invoked, then re-iterate all pkmaps. It can be optimized if flush_all_zero_pkmaps() return index of first flushed entry. With this index, we can immediately map highmem page to virtual address represented by index. So change return type of flush_all_zero_pkmaps() and return index of first flushed entry. Additionally, update last_pkmap_nr to this index. It is certain that entry which is below this index is occupied by other mapping, therefore updating last_pkmap_nr to this index is reasonable optimization. Signed-off-by: Joonsoo Kim Cc: Mel Gorman Cc: Peter Zijlstra Cc: Minchan Kim Signed-off-by: Andrew Morton --- include/linux/highmem.h | 2 +- mm/highmem.c | 24 +++++++++++++++--------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 0683869d6a70..97ad208d91cc 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -32,7 +32,7 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) #ifdef CONFIG_HIGHMEM #include -#define PKMAP_INDEX_INVAL (-1) +#define PKMAP_INVALID_INDEX (LAST_PKMAP) /* declarations for linux/mm/highmem.c */ unsigned int nr_free_highpages(void); diff --git a/mm/highmem.c b/mm/highmem.c index 264a00a9ccf3..91e80022aaad 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -107,10 +107,10 @@ struct page *kmap_to_page(void *vaddr) } EXPORT_SYMBOL(kmap_to_page); -static int flush_all_zero_pkmaps(void) +static unsigned int flush_all_zero_pkmaps(void) { int i; - int index = PKMAP_INDEX_INVAL; + unsigned int index = PKMAP_INVALID_INDEX; flush_cache_kmaps(); @@ -142,9 +142,10 @@ static int flush_all_zero_pkmaps(void) &pkmap_page_table[i]); set_page_address(page, NULL); - index = i; + if (index == PKMAP_INVALID_INDEX) + index = i; } - if (index != PKMAP_INDEX_INVAL) + if (index != PKMAP_INVALID_INDEX) flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); return index; @@ -155,15 +156,19 @@ static int flush_all_zero_pkmaps(void) */ void kmap_flush_unused(void) { + unsigned int index; + lock_kmap(); - flush_all_zero_pkmaps(); + index = flush_all_zero_pkmaps(); + if (index != PKMAP_INVALID_INDEX && (index < last_pkmap_nr)) + last_pkmap_nr = index; unlock_kmap(); } static inline unsigned long map_new_virtual(struct page *page) { unsigned long vaddr; - int index = PKMAP_INDEX_INVAL; + unsigned int index = PKMAP_INVALID_INDEX; int count; start: @@ -173,8 +178,7 @@ start: last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; if (!last_pkmap_nr) { index = flush_all_zero_pkmaps(); - if (index != PKMAP_INDEX_INVAL) - break; /* Found a usable entry */ + break; } if (!pkmap_count[last_pkmap_nr]) { index = last_pkmap_nr; @@ -187,7 +191,7 @@ start: /* * Sleep for somebody else to unmap their entries */ - if (index == PKMAP_INDEX_INVAL) { + if (index == PKMAP_INVALID_INDEX) { DECLARE_WAITQUEUE(wait, current); __set_current_state(TASK_UNINTERRUPTIBLE); @@ -211,6 +215,7 @@ start: &(pkmap_page_table[index]), mk_pte(page, kmap_prot)); pkmap_count[index] = 1; + last_pkmap_nr = index; set_page_address(page, (void *)vaddr); return vaddr; @@ -332,6 +337,7 @@ struct page_address_map { void *virtual; struct list_head list; }; + static struct page_address_map page_address_maps[LAST_PKMAP]; /* -- 2.39.5