]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/powerpc/mm/highmem.c
Merge branch 'master' into csb1725
[mv-sheeva.git] / arch / powerpc / mm / highmem.c
index 857d4173f9c69358209eaf8793b6b64f4f0cfd89..e7450bdbe83a9380264fc149c4831b587226cd36 100644 (file)
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-       unsigned int idx;
        unsigned long vaddr;
+       int idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -52,26 +52,35 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 }
 EXPORT_SYMBOL(kmap_atomic_prot);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+       int type;
 
        if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
                pagefault_enable();
                return;
        }
 
-       BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+       type = kmap_atomic_idx();
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(&init_mm, vaddr, kmap_pte-idx);
-       local_flush_tlb_page(NULL, vaddr);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       {
+               unsigned int idx;
+
+               idx = type + KM_TYPE_NR * smp_processor_id();
+               BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(&init_mm, vaddr, kmap_pte-idx);
+               local_flush_tlb_page(NULL, vaddr);
+       }
 #endif
+
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);