2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val)
34 unsigned long nrpages = size >> PAGE_SHIFT;
40 err = _set_memory_uc(vaddr, nrpages);
43 err = _set_memory_wc(vaddr, nrpages);
46 err = _set_memory_wb(vaddr, nrpages);
54 * Remap an arbitrary physical address space into the kernel virtual
55 * address space. Needed when the kernel wants to access high addresses
58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59 * have to convert them into an offset in a page-aligned mapping, but the
60 * caller shouldn't need to know that small detail.
62 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
63 unsigned long size, unsigned long prot_val, void *caller)
65 unsigned long pfn, offset, vaddr;
66 resource_size_t last_addr;
67 const resource_size_t unaligned_phys_addr = phys_addr;
68 const unsigned long unaligned_size = size;
69 struct vm_struct *area;
70 unsigned long new_prot_val;
73 void __iomem *ret_addr;
75 /* Don't allow wraparound or zero size */
76 last_addr = phys_addr + size - 1;
77 if (!size || last_addr < phys_addr)
80 if (!phys_addr_valid(phys_addr)) {
81 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
82 (unsigned long long)phys_addr);
88 * Don't remap the low PCI/ISA area, it's always mapped..
90 if (is_ISA_range(phys_addr, last_addr))
91 return (__force void __iomem *)phys_to_virt(phys_addr);
94 * Check if the request spans more than any BAR in the iomem resource
97 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
98 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
101 * Don't allow anybody to remap normal RAM that we're using..
103 for (pfn = phys_addr >> PAGE_SHIFT;
104 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
107 int is_ram = page_is_ram(pfn);
109 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
111 WARN_ON_ONCE(is_ram);
115 * Mappings have to be page-aligned
117 offset = phys_addr & ~PAGE_MASK;
118 phys_addr &= PAGE_MASK;
119 size = PAGE_ALIGN(last_addr+1) - phys_addr;
121 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
122 prot_val, &new_prot_val);
124 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
128 if (prot_val != new_prot_val) {
129 if (!is_new_memtype_allowed(phys_addr, size,
130 prot_val, new_prot_val)) {
132 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
133 (unsigned long long)phys_addr,
134 (unsigned long long)(phys_addr + size),
135 prot_val, new_prot_val);
136 goto err_free_memtype;
138 prot_val = new_prot_val;
144 prot = PAGE_KERNEL_IO_NOCACHE;
146 case _PAGE_CACHE_UC_MINUS:
147 prot = PAGE_KERNEL_IO_UC_MINUS;
150 prot = PAGE_KERNEL_IO_WC;
153 prot = PAGE_KERNEL_IO;
160 area = get_vm_area_caller(size, VM_IOREMAP, caller);
162 goto err_free_memtype;
163 area->phys_addr = phys_addr;
164 vaddr = (unsigned long) area->addr;
166 if (kernel_map_sync_memtype(phys_addr, size, prot_val))
169 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
172 ret_addr = (void __iomem *) (vaddr + offset);
173 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
179 free_memtype(phys_addr, phys_addr + size);
184 * ioremap_nocache - map bus memory into CPU space
185 * @offset: bus address of the memory
186 * @size: size of the resource to map
188 * ioremap_nocache performs a platform specific sequence of operations to
189 * make bus memory CPU accessible via the readb/readw/readl/writeb/
190 * writew/writel functions and the other mmio helpers. The returned
191 * address is not guaranteed to be usable directly as a virtual
194 * This version of ioremap ensures that the memory is marked uncachable
195 * on the CPU as well as honouring existing caching rules from things like
196 * the PCI bus. Note that there are other caches and buffers on many
197 * busses. In particular driver authors should read up on PCI writes
199 * It's useful if some control registers are in such an area and
200 * write combining or read caching is not desirable:
202 * Must be freed with iounmap.
204 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
207 * Ideally, this should be:
208 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
210 * Till we fix all X drivers to use ioremap_wc(), we will use
213 unsigned long val = _PAGE_CACHE_UC_MINUS;
215 return __ioremap_caller(phys_addr, size, val,
216 __builtin_return_address(0));
218 EXPORT_SYMBOL(ioremap_nocache);
221 * ioremap_wc - map memory into CPU space write combined
222 * @offset: bus address of the memory
223 * @size: size of the resource to map
225 * This version of ioremap ensures that the memory is marked write combining.
226 * Write combining allows faster writes to some hardware devices.
228 * Must be freed with iounmap.
230 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
233 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
234 __builtin_return_address(0));
236 return ioremap_nocache(phys_addr, size);
238 EXPORT_SYMBOL(ioremap_wc);
240 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
242 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
243 __builtin_return_address(0));
245 EXPORT_SYMBOL(ioremap_cache);
247 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
248 unsigned long prot_val)
250 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
251 __builtin_return_address(0));
253 EXPORT_SYMBOL(ioremap_prot);
256 * iounmap - Free a IO remapping
257 * @addr: virtual address from ioremap_*
259 * Caller must ensure there is only one unmapping for the same pointer.
261 void iounmap(volatile void __iomem *addr)
263 struct vm_struct *p, *o;
265 if ((void __force *)addr <= high_memory)
269 * __ioremap special-cases the PCI/ISA range by not instantiating a
270 * vm_area and by simply returning an address into the kernel mapping
271 * of ISA space. So handle that here.
273 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
274 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
277 addr = (volatile void __iomem *)
278 (PAGE_MASK & (unsigned long __force)addr);
280 mmiotrace_iounmap(addr);
282 /* Use the vm area unlocked, assuming the caller
283 ensures there isn't another iounmap for the same address
284 in parallel. Reuse of the virtual address is prevented by
285 leaving it in the global lists until we're done with it.
286 cpa takes care of the direct mappings. */
287 read_lock(&vmlist_lock);
288 for (p = vmlist; p; p = p->next) {
289 if (p->addr == (void __force *)addr)
292 read_unlock(&vmlist_lock);
295 printk(KERN_ERR "iounmap: bad address %p\n", addr);
300 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
302 /* Finally remove it */
303 o = remove_vm_area((void __force *)addr);
304 BUG_ON(p != o || o == NULL);
307 EXPORT_SYMBOL(iounmap);
310 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
313 void *xlate_dev_mem_ptr(unsigned long phys)
316 unsigned long start = phys & PAGE_MASK;
318 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
319 if (page_is_ram(start >> PAGE_SHIFT))
322 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
324 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
329 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
331 if (page_is_ram(phys >> PAGE_SHIFT))
334 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
338 static int __initdata early_ioremap_debug;
340 static int __init early_ioremap_debug_setup(char *str)
342 early_ioremap_debug = 1;
346 early_param("early_ioremap_debug", early_ioremap_debug_setup);
348 static __initdata int after_paging_init;
349 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
351 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
353 /* Don't assume we're using swapper_pg_dir at this point */
354 pgd_t *base = __va(read_cr3());
355 pgd_t *pgd = &base[pgd_index(addr)];
356 pud_t *pud = pud_offset(pgd, addr);
357 pmd_t *pmd = pmd_offset(pud, addr);
362 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
364 return &bm_pte[pte_index(addr)];
367 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
369 void __init early_ioremap_init(void)
374 if (early_ioremap_debug)
375 printk(KERN_INFO "early_ioremap_init()\n");
377 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
378 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
380 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
381 memset(bm_pte, 0, sizeof(bm_pte));
382 pmd_populate_kernel(&init_mm, pmd, bm_pte);
385 * The boot-ioremap range spans multiple pmds, for which
386 * we are not prepared:
388 #define __FIXADDR_TOP (-PAGE_SIZE)
389 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
390 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
392 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
394 printk(KERN_WARNING "pmd %p != %p\n",
395 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
396 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
397 fix_to_virt(FIX_BTMAP_BEGIN));
398 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
399 fix_to_virt(FIX_BTMAP_END));
401 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
402 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
407 void __init early_ioremap_reset(void)
409 after_paging_init = 1;
412 static void __init __early_set_fixmap(enum fixed_addresses idx,
413 phys_addr_t phys, pgprot_t flags)
415 unsigned long addr = __fix_to_virt(idx);
418 if (idx >= __end_of_fixed_addresses) {
422 pte = early_ioremap_pte(addr);
424 if (pgprot_val(flags))
425 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
427 pte_clear(&init_mm, addr, pte);
428 __flush_tlb_one(addr);
431 static inline void __init early_set_fixmap(enum fixed_addresses idx,
432 phys_addr_t phys, pgprot_t prot)
434 if (after_paging_init)
435 __set_fixmap(idx, phys, prot);
437 __early_set_fixmap(idx, phys, prot);
440 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
442 if (after_paging_init)
445 __early_set_fixmap(idx, 0, __pgprot(0));
448 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
449 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
451 void __init fixup_early_ioremap(void)
455 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
462 early_ioremap_init();
465 static int __init check_early_ioremap_leak(void)
470 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
477 "Debug warning: early ioremap leak of %d areas detected.\n",
480 "please boot with early_ioremap_debug and report the dmesg.\n");
484 late_initcall(check_early_ioremap_leak);
486 static void __init __iomem *
487 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
489 unsigned long offset;
490 resource_size_t last_addr;
491 unsigned int nrpages;
492 enum fixed_addresses idx0, idx;
495 WARN_ON(system_state != SYSTEM_BOOTING);
498 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
506 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
507 (u64)phys_addr, size);
512 if (early_ioremap_debug) {
513 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
514 (u64)phys_addr, size, slot);
518 /* Don't allow wraparound or zero size */
519 last_addr = phys_addr + size - 1;
520 if (!size || last_addr < phys_addr) {
525 prev_size[slot] = size;
527 * Mappings have to be page-aligned
529 offset = phys_addr & ~PAGE_MASK;
530 phys_addr &= PAGE_MASK;
531 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
534 * Mappings have to fit in the FIX_BTMAP area.
536 nrpages = size >> PAGE_SHIFT;
537 if (nrpages > NR_FIX_BTMAPS) {
545 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
547 while (nrpages > 0) {
548 early_set_fixmap(idx, phys_addr, prot);
549 phys_addr += PAGE_SIZE;
553 if (early_ioremap_debug)
554 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
556 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
557 return prev_map[slot];
560 /* Remap an IO device */
561 void __init __iomem *
562 early_ioremap(resource_size_t phys_addr, unsigned long size)
564 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
568 void __init __iomem *
569 early_memremap(resource_size_t phys_addr, unsigned long size)
571 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
574 void __init early_iounmap(void __iomem *addr, unsigned long size)
576 unsigned long virt_addr;
577 unsigned long offset;
578 unsigned int nrpages;
579 enum fixed_addresses idx;
583 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
584 if (prev_map[i] == addr) {
591 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
597 if (prev_size[slot] != size) {
598 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
599 addr, size, slot, prev_size[slot]);
604 if (early_ioremap_debug) {
605 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
610 virt_addr = (unsigned long)addr;
611 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
615 offset = virt_addr & ~PAGE_MASK;
616 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
618 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
619 while (nrpages > 0) {
620 early_clear_fixmap(idx);
624 prev_map[slot] = NULL;