]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/arm/mm/highmem.c
Merge branch 'master' into tk71
[mv-sheeva.git] / arch / arm / mm / highmem.c
index 1fbdb55bfd1bd34a480ea0e45b7f3a39a2934c6a..807c0573abbe82533a884f87ea7ea243051a228d 100644 (file)
@@ -36,18 +36,17 @@ void kunmap(struct page *page)
 }
 EXPORT_SYMBOL(kunmap);
 
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
        unsigned int idx;
        unsigned long vaddr;
        void *kmap;
+       int type;
 
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
-
 #ifdef CONFIG_DEBUG_HIGHMEM
        /*
         * There is no cache coherency issue when non VIVT, so force the
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
        if (kmap)
                return kmap;
 
+       type = kmap_atomic_idx_push();
+
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
 
        return (void *)vaddr;
 }
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
+       int idx, type;
 
        if (kvaddr >= (void *)FIXADDR_START) {
+               type = kmap_atomic_idx();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
                if (cache_is_vivt())
                        __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -97,21 +101,23 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 #else
                (void) idx;  /* to kill a warning */
 #endif
+               kmap_atomic_idx_pop();
        } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
                /* this address was obtained through kmap_high_get() */
                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
        }
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       unsigned int idx;
        unsigned long vaddr;
+       int idx, type;
 
        pagefault_disable();
 
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -134,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
        pte = TOP_PTE(vaddr);
        return pte_page(*pte);
 }
-
-#ifdef CONFIG_CPU_CACHE_VIPT
-
-#include <linux/percpu.h>
-
-/*
- * The VIVT cache of a highmem page is always flushed before the page
- * is unmapped. Hence unmapped highmem pages need no cache maintenance
- * in that case.
- *
- * However unmapped pages may still be cached with a VIPT cache, and
- * it is not possible to perform cache maintenance on them using physical
- * addresses unfortunately.  So we have no choice but to set up a temporary
- * virtual mapping for that purpose.
- *
- * Yet this VIPT cache maintenance may be triggered from DMA support
- * functions which are possibly called from interrupt context. As we don't
- * want to keep interrupt disabled all the time when such maintenance is
- * taking place, we therefore allow for some reentrancy by preserving and
- * restoring the previous fixmap entry before the interrupted context is
- * resumed.  If the reentrancy depth is 0 then there is no need to restore
- * the previous fixmap, and leaving the current one in place allow it to
- * be reused the next time without a TLB flush (common with DMA).
- */
-
-static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
-
-void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
-{
-       unsigned int idx, cpu;
-       int *depth;
-       unsigned long vaddr, flags;
-       pte_t pte, *ptep;
-
-       if (!in_interrupt())
-               preempt_disable();
-
-       cpu = smp_processor_id();
-       depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
-
-       idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       ptep = TOP_PTE(vaddr);
-       pte = mk_pte(page, kmap_prot);
-
-       raw_local_irq_save(flags);
-       (*depth)++;
-       if (pte_val(*ptep) == pte_val(pte)) {
-               *saved_pte = pte;
-       } else {
-               *saved_pte = *ptep;
-               set_pte_ext(ptep, pte, 0);
-               local_flush_tlb_kernel_page(vaddr);
-       }
-       raw_local_irq_restore(flags);
-
-       return (void *)vaddr;
-}
-
-void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
-{
-       unsigned int idx, cpu = smp_processor_id();
-       int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
-       unsigned long vaddr, flags;
-       pte_t pte, *ptep;
-
-       idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       ptep = TOP_PTE(vaddr);
-       pte = mk_pte(page, kmap_prot);
-
-       BUG_ON(pte_val(*ptep) != pte_val(pte));
-       BUG_ON(*depth <= 0);
-
-       raw_local_irq_save(flags);
-       (*depth)--;
-       if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
-               set_pte_ext(ptep, saved_pte, 0);
-               local_flush_tlb_kernel_page(vaddr);
-       }
-       raw_local_irq_restore(flags);
-
-       if (!in_interrupt())
-               preempt_enable();
-}
-
-#endif  /* CONFIG_CPU_CACHE_VIPT */