/*
* __arm_ioremap takes CPU physical address.
* __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
+ * The _caller variety takes a __builtin_return_address(0) value for
+ * /proc/vmalloc to use - and should only be used in non-inline functions.
*/
-extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
+ size_t, unsigned int, void *);
+extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+ void *);
+
+extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
extern void __iounmap(volatile void __iomem *addr);
/*
if (BETWEEN(p, IO_PHYS, IO_SIZE))
return XLATE(p, IO_PHYS, IO_VIRT);
- return __arm_ioremap(p, size, type);
+ return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
}
EXPORT_SYMBOL(davinci_ioremap);
(cookie - IOP13XX_PCIE_LOWER_MEM_RA));
break;
case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA:
- retval = __arm_ioremap(IOP13XX_PBI_LOWER_MEM_PA +
+ retval = __arm_ioremap_caller(IOP13XX_PBI_LOWER_MEM_PA +
(cookie - IOP13XX_PBI_LOWER_MEM_RA),
- size, mtype);
+ size, mtype, __builtin_return_address(0));
break;
case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA:
retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie);
retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie);
break;
default:
- retval = __arm_ioremap(cookie, size, mtype);
+ retval = __arm_ioremap_caller(cookie, size, mtype,
+ __builtin_return_address(0));
}
return retval;
mtype = MT_DEVICE_NONSHARED;
}
- return __arm_ioremap(phys_addr, size, mtype);
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
}
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
- * Note that get_vm_area() allocates a guard 4K page, so we need to mask
- * the size back to 1MB aligned or we will overflow in the loop below.
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
+ * mask the size back to 1MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
}
#endif
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- *
- * 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping. See <asm/pgtable.h> for more information.
- */
-void __iomem *
-__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
- unsigned int mtype)
+void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+ unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
int err;
*/
size = PAGE_ALIGN(offset + size);
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
flush_cache_vmap(addr, addr + size);
return (void __iomem *) (offset + addr);
}
-EXPORT_SYMBOL(__arm_ioremap_pfn);
-void __iomem *
-__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
unsigned long last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
if (!size || last_addr < phys_addr)
return NULL;
- return __arm_ioremap_pfn(pfn, offset, size, mtype);
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ caller);
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *
+__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned int mtype)
+{
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__arm_ioremap_pfn);
+
+void __iomem *
+__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+{
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap);
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
+void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
+ size_t size, unsigned int mtype, void *caller)
+{
+ return __arm_ioremap_pfn(pfn, offset, size, mtype);
+}
+
void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
unsigned int mtype)
{
}
EXPORT_SYMBOL(__arm_ioremap);
+void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ return __arm_ioremap(phys_addr, size, mtype);
+}
+
void __iounmap(volatile void __iomem *addr)
{
}
retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie);
break;
default:
- retval = __arm_ioremap(cookie, size, mtype);
+ retval = __arm_ioremap_caller(cookie, size, mtype,
+ __builtin_return_address(0));
}
return retval;
return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
}
#endif
- return __arm_ioremap(p, size, type);
+ return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
}
EXPORT_SYMBOL(omap_ioremap);