},
};
+/*
+ * For all ioremap requests of statically mapped regions, intercept ioremap and
+ * return virtual address from the iodesc table.
+ */
+void __iomem *exynos4_ioremap(unsigned long phy, size_t size, unsigned int type)
+{
+ struct map_desc *desc = exynos4_iodesc;
+ unsigned int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(exynos4_iodesc); idx++, desc++)
+ if (desc->pfn == __phys_to_pfn(phy) && desc->type == type)
+ return (void __iomem *)desc->virtual;
+
+ return __arm_ioremap(phy, size, type);
+}
+
static void exynos4_idle(void)
{
if (!need_resched())
#define __mem_pci(a) (a)
#define IO_SPACE_LIMIT (0xFFFFFFFF)
+#define __arch_ioremap exynos4_ioremap
+#define __arch_iounmap __iounmap
+
+void __iomem *exynos4_ioremap(unsigned long phy, size_t size,
+ unsigned int type);
#endif /* __ASM_ARM_ARCH_IO_H */