2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/uaccess.h>
16 #include <asm/pgalloc.h>
18 pte_t *lookup_address(unsigned long address, int *level)
20 pgd_t *pgd = pgd_offset_k(address);
26 pud = pud_offset(pgd, address);
29 pmd = pmd_offset(pud, address);
37 return pte_offset_kernel(pmd, address);
40 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
46 set_pte_atomic(kpte, pte);
47 if (SHARED_KERNEL_PMD)
50 spin_lock_irqsave(&pgd_lock, flags);
51 for (page = pgd_list; page; page = (struct page *)page->index) {
56 pgd = (pgd_t *)page_address(page) + pgd_index(address);
57 pud = pud_offset(pgd, address);
58 pmd = pmd_offset(pud, address);
59 set_pte_atomic((pte_t *)pmd, pte);
61 spin_unlock_irqrestore(&pgd_lock, flags);
65 split_large_page(pte_t *kpte, unsigned long address, pgprot_t ref_prot)
72 base = alloc_pages(GFP_KERNEL, 0);
76 down_write(&init_mm.mmap_sem);
78 * Check for races, another CPU might have split this page
81 tmp = lookup_address(address, &level);
87 address = __pa(address);
88 addr = address & LARGE_PAGE_MASK;
89 pbase = (pte_t *)page_address(base);
90 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
92 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
93 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
96 * Install the new, split up pagetable:
98 set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
102 up_write(&init_mm.mmap_sem);
105 __free_pages(base, 0);
110 static int __change_page_attr(struct page *page, pgprot_t prot)
112 pgprot_t ref_prot = PAGE_KERNEL;
113 struct page *kpte_page;
114 unsigned long address;
119 BUG_ON(PageHighMem(page));
120 address = (unsigned long)page_address(page);
123 kpte = lookup_address(address, &level);
127 oldprot = pte_pgprot(*kpte);
128 kpte_page = virt_to_page(kpte);
129 BUG_ON(PageLRU(kpte_page));
130 BUG_ON(PageCompound(kpte_page));
133 * Better fail early if someone sets the kernel text to NX.
134 * Does not cover __inittext
136 BUG_ON(address >= (unsigned long)&_text &&
137 address < (unsigned long)&_etext &&
138 (pgprot_val(prot) & _PAGE_NX));
140 if ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
141 ref_prot = PAGE_KERNEL_EXEC;
143 ref_prot = canon_pgprot(ref_prot);
144 prot = canon_pgprot(prot);
147 set_pte_atomic(kpte, mk_pte(page, prot));
149 err = split_large_page(kpte, address, ref_prot);
157 * Change the page attributes of an page in the linear mapping.
159 * This should be used when a page is mapped with a different caching policy
160 * than write-back somewhere - some CPUs do not like it when mappings with
161 * different caching policies exist. This changes the page attributes of the
162 * in kernel linear mapping too.
164 * The caller needs to ensure that there are no conflicting mappings elsewhere.
165 * This function only deals with the kernel linear map.
167 * Caller must call global_flush_tlb() after this.
169 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
173 for (i = 0; i < numpages; i++, page++) {
174 err = __change_page_attr(page, prot);
181 EXPORT_SYMBOL(change_page_attr);
183 int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot)
186 unsigned long pfn = (__pa(addr) >> PAGE_SHIFT);
188 for (i = 0; i < numpages; i++) {
189 if (!pfn_valid(pfn + i)) {
194 pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level);
195 BUG_ON(pte && pte_none(*pte));
199 return change_page_attr(virt_to_page(addr), i, prot);
202 static void flush_kernel_map(void *arg)
205 * Flush all to work around Errata in early athlons regarding
206 * large page flushing.
210 if (boot_cpu_data.x86_model >= 4)
214 void global_flush_tlb(void)
216 BUG_ON(irqs_disabled());
218 on_each_cpu(flush_kernel_map, NULL, 1, 1);
220 EXPORT_SYMBOL(global_flush_tlb);
222 #ifdef CONFIG_DEBUG_PAGEALLOC
223 void kernel_map_pages(struct page *page, int numpages, int enable)
225 if (PageHighMem(page))
228 debug_check_no_locks_freed(page_address(page),
229 numpages * PAGE_SIZE);
233 * the return value is ignored - the calls cannot fail,
234 * large pages are disabled at boot time.
236 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
239 * we should perform an IPI and flush all tlbs,
240 * but that can deadlock->flush only current cpu.