2 * arch/parisc/mm/ioremap.c
4 * (C) Copyright 1995 1996 Linus Torvalds
5 * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
6 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
9 #include <linux/vmalloc.h>
10 #include <linux/errno.h>
11 #include <linux/module.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
18 remap_area_pte(pte_t *pte, unsigned long address, unsigned long size,
19 unsigned long phys_addr, unsigned long flags)
21 unsigned long end, pfn;
22 pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
23 _PAGE_ACCESSED | flags);
31 BUG_ON(address >= end);
33 pfn = phys_addr >> PAGE_SHIFT;
35 BUG_ON(!pte_none(*pte));
37 set_pte(pte, pfn_pte(pfn, pgprot));
42 } while (address && (address < end));
46 remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size,
47 unsigned long phys_addr, unsigned long flags)
51 address &= ~PGDIR_MASK;
57 BUG_ON(address >= end);
61 pte_t *pte = pte_alloc_kernel(pmd, address);
65 remap_area_pte(pte, address, end - address,
66 address + phys_addr, flags);
68 address = (address + PMD_SIZE) & PMD_MASK;
70 } while (address && (address < end));
76 remap_area_pages(unsigned long address, unsigned long phys_addr,
77 unsigned long size, unsigned long flags)
81 unsigned long end = address + size;
83 BUG_ON(address >= end);
86 dir = pgd_offset_k(address);
95 pud = pud_alloc(&init_mm, dir, address);
99 pmd = pmd_alloc(&init_mm, pud, address);
103 if (remap_area_pmd(pmd, address, end - address,
104 phys_addr + address, flags))
108 address = (address + PGDIR_SIZE) & PGDIR_MASK;
110 } while (address && (address < end));
118 * Generic mapping function (not visible outside):
122 * Remap an arbitrary physical address space into the kernel virtual
125 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
126 * have to convert them into an offset in a page-aligned mapping, but the
127 * caller shouldn't need to know that small detail.
129 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
132 struct vm_struct *area;
133 unsigned long offset, last_addr;
136 unsigned long end = phys_addr + size - 1;
137 /* Support EISA addresses */
138 if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
139 (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
140 phys_addr |= F_EXTEND(0xfc000000);
141 flags |= _PAGE_NO_CACHE;
145 /* Don't allow wraparound or zero size */
146 last_addr = phys_addr + size - 1;
147 if (!size || last_addr < phys_addr)
151 * Don't allow anybody to remap normal RAM that we're using..
153 if (phys_addr < virt_to_phys(high_memory)) {
154 char *t_addr, *t_end;
157 t_addr = __va(phys_addr);
158 t_end = t_addr + (size - 1);
160 for (page = virt_to_page(t_addr);
161 page <= virt_to_page(t_end); page++) {
162 if(!PageReserved(page))
168 * Mappings have to be page-aligned
170 offset = phys_addr & ~PAGE_MASK;
171 phys_addr &= PAGE_MASK;
172 size = PAGE_ALIGN(last_addr) - phys_addr;
177 area = get_vm_area(size, VM_IOREMAP);
182 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
187 return (void __iomem *) (offset + (char *)addr);
189 EXPORT_SYMBOL(__ioremap);
191 void iounmap(const volatile void __iomem *addr)
193 if (addr > high_memory)
194 return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
196 EXPORT_SYMBOL(iounmap);