]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
ARM: Add caller information to ioremap
authorRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 18 Dec 2009 11:10:03 +0000 (11:10 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Mon, 15 Feb 2010 21:39:11 +0000 (21:39 +0000)
This allows the procfs vmallocinfo file to show who created the ioremap
regions.  Note: __builtin_return_address(0) doesn't do what's expected
if its used in an inline function, so we leave __arm_ioremap callers
in such places alone.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/io.h
arch/arm/mach-davinci/io.c
arch/arm/mach-iop13xx/io.c
arch/arm/mach-msm/io.c
arch/arm/mm/ioremap.c
arch/arm/mm/nommu.c
arch/arm/plat-iop/io.c
arch/arm/plat-omap/io.c

index d2a59cfc30ce3f790ccc3b01a781f4fdf75c8baf..c980156f32638f8f031b93e730d0c2e8396aec40 100644 (file)
@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 /*
  * __arm_ioremap takes CPU physical address.
  * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
+ * The _caller variety takes a __builtin_return_address(0) value for
+ * /proc/vmalloc to use - and should only be used in non-inline functions.
  */
-extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
+       size_t, unsigned int, void *);
+extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+       void *);
+
+extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
 extern void __iounmap(volatile void __iomem *addr);
 
 /*
index 49912b48b1b0f7a26c2b65729686687f84bb460a..a1c0b6b99edf03e1fa4ad6847797995a8a349d0f 100644 (file)
@@ -24,7 +24,7 @@ void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
        if (BETWEEN(p, IO_PHYS, IO_SIZE))
                return XLATE(p, IO_PHYS, IO_VIRT);
 
-       return __arm_ioremap(p, size, type);
+       return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(davinci_ioremap);
 
index 52958099781400702fa380324148c842eefaa823..48642e66c5663007129db7bec7cd909e6749fa49 100644 (file)
@@ -61,9 +61,9 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
                                 (cookie - IOP13XX_PCIE_LOWER_MEM_RA));
                break;
        case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA:
-               retval = __arm_ioremap(IOP13XX_PBI_LOWER_MEM_PA +
+               retval = __arm_ioremap_caller(IOP13XX_PBI_LOWER_MEM_PA +
                                       (cookie - IOP13XX_PBI_LOWER_MEM_RA),
-                                      size, mtype);
+                                      size, mtype, __builtin_return_address(0));
                break;
        case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA:
                retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie);
@@ -75,7 +75,8 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
                retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie);
                break;
        default:
-               retval = __arm_ioremap(cookie, size, mtype);
+               retval = __arm_ioremap_caller(cookie, size, mtype,
+                               __builtin_return_address(0));
        }
 
        return retval;
index 1c5e7dac086f1892d9a6c831f8d5cf99dee90453..05f96b780aa6a811584fa3d25cfc4e2ce5ce6e0f 100644 (file)
@@ -76,5 +76,6 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
                        mtype = MT_DEVICE_NONSHARED;
        }
 
-       return __arm_ioremap(phys_addr, size, mtype);
+       return __arm_ioremap_caller(phys_addr, size, mtype,
+               __builtin_return_address(0));
 }
index 0ab75c60f7cfdf4745cc1f9c48d87db61cb93918..28c8b950ef04e84f6d0893712fb22c5394808ee6 100644 (file)
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm)
  * which requires the new ioremap'd region to be referenced, the CPU will
  * reference the _old_ region.
  *
- * Note that get_vm_area() allocates a guard 4K page, so we need to mask
- * the size back to 1MB aligned or we will overflow in the loop below.
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
+ * mask the size back to 1MB aligned or we will overflow in the loop below.
  */
 static void unmap_area_sections(unsigned long virt, unsigned long size)
 {
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
 }
 #endif
 
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- *
- * 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping.  See <asm/pgtable.h> for more information.
- */
-void __iomem *
-__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
-                 unsigned int mtype)
+void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+       unsigned long offset, size_t size, unsigned int mtype, void *caller)
 {
        const struct mem_type *type;
        int err;
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
         */
        size = PAGE_ALIGN(offset + size);
 
-       area = get_vm_area(size, VM_IOREMAP);
+       area = get_vm_area_caller(size, VM_IOREMAP, caller);
        if (!area)
                return NULL;
        addr = (unsigned long)area->addr;
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
        flush_cache_vmap(addr, addr + size);
        return (void __iomem *) (offset + addr);
 }
-EXPORT_SYMBOL(__arm_ioremap_pfn);
 
-void __iomem *
-__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+       unsigned int mtype, void *caller)
 {
        unsigned long last_addr;
        unsigned long offset = phys_addr & ~PAGE_MASK;
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
        if (!size || last_addr < phys_addr)
                return NULL;
 
-       return __arm_ioremap_pfn(pfn, offset, size, mtype);
+       return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+                       caller);
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *
+__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+                 unsigned int mtype)
+{
+       return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+                       __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__arm_ioremap_pfn);
+
+void __iomem *
+__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+{
+       return __arm_ioremap_caller(phys_addr, size, mtype,
+                       __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__arm_ioremap);
 
index 374a8311bc84b0eeadaa37a14004ee53f499ab67..9bfeb6b9509ad3480bd33a664e51b4f94311bfbb 100644 (file)
@@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
 }
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 
+void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
+                          size_t size, unsigned int mtype, void *caller)
+{
+       return __arm_ioremap_pfn(pfn, offset, size, mtype);
+}
+
 void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
                            unsigned int mtype)
 {
@@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
 }
 EXPORT_SYMBOL(__arm_ioremap);
 
+void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
+                           unsigned int mtype, void *caller)
+{
+       return __arm_ioremap(phys_addr, size, mtype);
+}
+
 void __iounmap(volatile void __iomem *addr)
 {
 }
index ed0bbece0d6164d7613a4908cd0f9972986990c5..e15bc17db90b0fc845cf44c0f27dd9e4cec5e51a 100644 (file)
@@ -34,7 +34,8 @@ void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size,
                retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie);
                break;
        default:
-               retval = __arm_ioremap(cookie, size, mtype);
+               retval = __arm_ioremap_caller(cookie, size, mtype,
+                               __builtin_return_address(0));
        }
 
        return retval;
index 0cfd54f519c4012eb0eb6fe518bf19ce0d3ebf42..4cbd4fb3232c86bcc50e27f05246e9577f9daa17 100644 (file)
@@ -128,7 +128,7 @@ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
                        return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
        }
 #endif
-       return __arm_ioremap(p, size, type);
+       return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(omap_ioremap);