2 * linux/arch/arm/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
6 * (C) Copyright 1995 1996 Linus Torvalds
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
23 #include <linux/module.h>
24 #include <linux/errno.h>
26 #include <linux/vmalloc.h>
30 #include <asm/cputype.h>
31 #include <asm/cacheflush.h>
32 #include <asm/mmu_context.h>
33 #include <asm/pgalloc.h>
34 #include <asm/tlbflush.h>
35 #include <asm/sizes.h>
36 #include <asm/system_info.h>
38 #include <asm/mach/map.h>
41 int ioremap_page(unsigned long virt, unsigned long phys,
42 const struct mem_type *mtype)
44 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
45 __pgprot(mtype->prot_pte));
47 EXPORT_SYMBOL(ioremap_page);
49 void __check_kvm_seq(struct mm_struct *mm)
54 seq = init_mm.context.kvm_seq;
55 memcpy(pgd_offset(mm, VMALLOC_START),
56 pgd_offset_k(VMALLOC_START),
57 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
58 pgd_index(VMALLOC_START)));
59 mm->context.kvm_seq = seq;
60 } while (seq != init_mm.context.kvm_seq);
63 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
65 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
66 * the other CPUs will not see this change until their next context switch.
67 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
68 * which requires the new ioremap'd region to be referenced, the CPU will
69 * reference the _old_ region.
71 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
72 * mask the size back to 1MB aligned or we will overflow in the loop below.
74 static void unmap_area_sections(unsigned long virt, unsigned long size)
76 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
81 flush_cache_vunmap(addr, end);
82 pgd = pgd_offset_k(addr);
83 pud = pud_offset(pgd, addr);
84 pmdp = pmd_offset(pud, addr);
90 * Clear the PMD from the page table, and
91 * increment the kvm sequence so others
94 * Note: this is still racy on SMP machines.
97 init_mm.context.kvm_seq++;
100 * Free the page table, if there was one.
102 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
103 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
108 } while (addr < end);
111 * Ensure that the active_mm is up to date - we want to
112 * catch any use-after-iounmap cases.
114 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
115 __check_kvm_seq(current->active_mm);
117 flush_tlb_kernel_range(virt, end);
121 remap_area_sections(unsigned long virt, unsigned long pfn,
122 size_t size, const struct mem_type *type)
124 unsigned long addr = virt, end = virt + size;
130 * Remove and free any PTE-based mapping, and
131 * sync the current kernel mapping.
133 unmap_area_sections(virt, size);
135 pgd = pgd_offset_k(addr);
136 pud = pud_offset(pgd, addr);
137 pmd = pmd_offset(pud, addr);
139 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
140 pfn += SZ_1M >> PAGE_SHIFT;
141 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
142 pfn += SZ_1M >> PAGE_SHIFT;
143 flush_pmd_entry(pmd);
147 } while (addr < end);
153 remap_area_supersections(unsigned long virt, unsigned long pfn,
154 size_t size, const struct mem_type *type)
156 unsigned long addr = virt, end = virt + size;
162 * Remove and free any PTE-based mapping, and
163 * sync the current kernel mapping.
165 unmap_area_sections(virt, size);
167 pgd = pgd_offset_k(virt);
168 pud = pud_offset(pgd, addr);
169 pmd = pmd_offset(pud, addr);
171 unsigned long super_pmd_val, i;
173 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
175 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
177 for (i = 0; i < 8; i++) {
178 pmd[0] = __pmd(super_pmd_val);
179 pmd[1] = __pmd(super_pmd_val);
180 flush_pmd_entry(pmd);
186 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
187 } while (addr < end);
193 void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
194 unsigned long offset, size_t size, unsigned int mtype, void *caller)
196 const struct mem_type *type;
199 struct vm_struct * area;
201 #ifndef CONFIG_ARM_LPAE
203 * High mappings must be supersection aligned
205 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
209 type = get_mem_type(mtype);
214 * Page align the mapping size, taking account of any offset.
216 size = PAGE_ALIGN(offset + size);
219 * Try to reuse one of the static mapping whenever possible.
221 read_lock(&vmlist_lock);
222 for (area = vmlist; area; area = area->next) {
223 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
225 if (!(area->flags & VM_ARM_STATIC_MAPPING))
227 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
229 if (__phys_to_pfn(area->phys_addr) > pfn ||
230 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
232 /* we can drop the lock here as we know *area is static */
233 read_unlock(&vmlist_lock);
234 addr = (unsigned long)area->addr;
235 addr += __pfn_to_phys(pfn) - area->phys_addr;
236 return (void __iomem *) (offset + addr);
238 read_unlock(&vmlist_lock);
241 * Don't allow RAM to be mapped - this causes problems with ARMv6+
243 if (WARN_ON(pfn_valid(pfn)))
246 area = get_vm_area_caller(size, VM_IOREMAP, caller);
249 addr = (unsigned long)area->addr;
251 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
252 if (DOMAIN_IO == 0 &&
253 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
254 cpu_is_xsc3()) && pfn >= 0x100000 &&
255 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
256 area->flags |= VM_ARM_SECTION_MAPPING;
257 err = remap_area_supersections(addr, pfn, size, type);
258 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
259 area->flags |= VM_ARM_SECTION_MAPPING;
260 err = remap_area_sections(addr, pfn, size, type);
263 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
264 __pgprot(type->prot_pte));
267 vunmap((void *)addr);
271 flush_cache_vmap(addr, addr + size);
272 return (void __iomem *) (offset + addr);
275 void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
276 unsigned int mtype, void *caller)
278 unsigned long last_addr;
279 unsigned long offset = phys_addr & ~PAGE_MASK;
280 unsigned long pfn = __phys_to_pfn(phys_addr);
283 * Don't allow wraparound or zero size
285 last_addr = phys_addr + size - 1;
286 if (!size || last_addr < phys_addr)
289 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
294 * Remap an arbitrary physical address space into the kernel virtual
295 * address space. Needed when the kernel wants to access high addresses
298 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
299 * have to convert them into an offset in a page-aligned mapping, but the
300 * caller shouldn't need to know that small detail.
303 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
306 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
307 __builtin_return_address(0));
309 EXPORT_SYMBOL(__arm_ioremap_pfn);
311 void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
312 unsigned int, void *) =
313 __arm_ioremap_caller;
316 __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
318 return arch_ioremap_caller(phys_addr, size, mtype,
319 __builtin_return_address(0));
321 EXPORT_SYMBOL(__arm_ioremap);
324 * Remap an arbitrary physical address space into the kernel virtual
325 * address space as memory. Needed when the kernel wants to execute
326 * code in external memory. This is needed for reprogramming source
327 * clocks that would affect normal memory for example. Please see
328 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
331 __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
338 mtype = MT_MEMORY_NONCACHED;
340 return __arm_ioremap_caller(phys_addr, size, mtype,
341 __builtin_return_address(0));
344 void __iounmap(volatile void __iomem *io_addr)
346 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
347 struct vm_struct *vm;
349 read_lock(&vmlist_lock);
350 for (vm = vmlist; vm; vm = vm->next) {
353 if (!(vm->flags & VM_IOREMAP))
355 /* If this is a static mapping we must leave it alone */
356 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
357 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
358 read_unlock(&vmlist_lock);
361 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
363 * If this is a section based mapping we need to handle it
364 * specially as the VM subsystem does not know how to handle
367 if ((vm->addr == addr) &&
368 (vm->flags & VM_ARM_SECTION_MAPPING)) {
369 unmap_area_sections((unsigned long)vm->addr, vm->size);
374 read_unlock(&vmlist_lock);
379 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
381 void __arm_iounmap(volatile void __iomem *io_addr)
383 arch_iounmap(io_addr);
385 EXPORT_SYMBOL(__arm_iounmap);