]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/x86/mm/pageattr_32.c
ad0868bfa37467b10e00c084588eba8915642426
[mv-sheeva.git] / arch / x86 / mm / pageattr_32.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/uaccess.h>
16 #include <asm/pgalloc.h>
17
18 pte_t *lookup_address(unsigned long address, int *level)
19 {
20         pgd_t *pgd = pgd_offset_k(address);
21         pud_t *pud;
22         pmd_t *pmd;
23
24         if (pgd_none(*pgd))
25                 return NULL;
26         pud = pud_offset(pgd, address);
27         if (pud_none(*pud))
28                 return NULL;
29         pmd = pmd_offset(pud, address);
30         if (pmd_none(*pmd))
31                 return NULL;
32         *level = 2;
33         if (pmd_large(*pmd))
34                 return (pte_t *)pmd;
35         *level = 3;
36
37         return pte_offset_kernel(pmd, address);
38 }
39
40 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
41 {
42         unsigned long flags;
43         struct page *page;
44
45         /* change init_mm */
46         set_pte_atomic(kpte, pte);
47         if (SHARED_KERNEL_PMD)
48                 return;
49
50         spin_lock_irqsave(&pgd_lock, flags);
51         for (page = pgd_list; page; page = (struct page *)page->index) {
52                 pgd_t *pgd;
53                 pud_t *pud;
54                 pmd_t *pmd;
55
56                 pgd = (pgd_t *)page_address(page) + pgd_index(address);
57                 pud = pud_offset(pgd, address);
58                 pmd = pmd_offset(pud, address);
59                 set_pte_atomic((pte_t *)pmd, pte);
60         }
61         spin_unlock_irqrestore(&pgd_lock, flags);
62 }
63
64 static int
65 split_large_page(pte_t *kpte, unsigned long address, pgprot_t ref_prot)
66 {
67         int i, level;
68         unsigned long addr;
69         pte_t *pbase, *tmp;
70         struct page *base;
71
72         base = alloc_pages(GFP_KERNEL, 0);
73         if (!base)
74                 return -ENOMEM;
75
76         down_write(&init_mm.mmap_sem);
77         /*
78          * Check for races, another CPU might have split this page
79          * up for us already:
80          */
81         tmp = lookup_address(address, &level);
82         if (tmp != kpte) {
83                 WARN_ON_ONCE(1);
84                 goto out_unlock;
85         }
86
87         address = __pa(address);
88         addr = address & LARGE_PAGE_MASK;
89         pbase = (pte_t *)page_address(base);
90         paravirt_alloc_pt(&init_mm, page_to_pfn(base));
91
92         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
93                 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
94
95         /*
96          * Install the new, split up pagetable:
97          */
98         set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
99         base = NULL;
100
101 out_unlock:
102         up_write(&init_mm.mmap_sem);
103
104         if (base)
105                 __free_pages(base, 0);
106
107         return 0;
108 }
109
110 static int __change_page_attr(struct page *page, pgprot_t prot)
111 {
112         pgprot_t ref_prot = PAGE_KERNEL;
113         struct page *kpte_page;
114         unsigned long address;
115         int level, err = 0;
116         pgprot_t oldprot;
117         pte_t *kpte;
118
119         BUG_ON(PageHighMem(page));
120         address = (unsigned long)page_address(page);
121
122 repeat:
123         kpte = lookup_address(address, &level);
124         if (!kpte)
125                 return -EINVAL;
126
127         oldprot = pte_pgprot(*kpte);
128         kpte_page = virt_to_page(kpte);
129         BUG_ON(PageLRU(kpte_page));
130         BUG_ON(PageCompound(kpte_page));
131
132         /*
133          * Better fail early if someone sets the kernel text to NX.
134          * Does not cover __inittext
135          */
136         BUG_ON(address >= (unsigned long)&_text &&
137                 address < (unsigned long)&_etext &&
138                (pgprot_val(prot) & _PAGE_NX));
139
140         if ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
141                 ref_prot = PAGE_KERNEL_EXEC;
142
143         ref_prot = canon_pgprot(ref_prot);
144         prot = canon_pgprot(prot);
145
146         if (level == 3) {
147                 set_pte_atomic(kpte, mk_pte(page, prot));
148         } else {
149                 err = split_large_page(kpte, address, ref_prot);
150                 if (!err)
151                         goto repeat;
152         }
153         return err;
154 }
155
156 /*
157  * Change the page attributes of an page in the linear mapping.
158  *
159  * This should be used when a page is mapped with a different caching policy
160  * than write-back somewhere - some CPUs do not like it when mappings with
161  * different caching policies exist. This changes the page attributes of the
162  * in kernel linear mapping too.
163  *
164  * The caller needs to ensure that there are no conflicting mappings elsewhere.
165  * This function only deals with the kernel linear map.
166  *
167  * Caller must call global_flush_tlb() after this.
168  */
169 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
170 {
171         int err = 0, i;
172
173         for (i = 0; i < numpages; i++, page++) {
174                 err = __change_page_attr(page, prot);
175                 if (err)
176                         break;
177         }
178
179         return err;
180 }
181 EXPORT_SYMBOL(change_page_attr);
182
183 int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot)
184 {
185         int i;
186         unsigned long pfn = (__pa(addr) >> PAGE_SHIFT);
187
188         for (i = 0; i < numpages; i++) {
189                 if (!pfn_valid(pfn + i)) {
190                         WARN_ON_ONCE(1);
191                         break;
192                 } else {
193                         int level;
194                         pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level);
195                         BUG_ON(pte && pte_none(*pte));
196                 }
197         }
198
199         return change_page_attr(virt_to_page(addr), i, prot);
200 }
201
202 static void flush_kernel_map(void *arg)
203 {
204         /*
205          * Flush all to work around Errata in early athlons regarding
206          * large page flushing.
207          */
208         __flush_tlb_all();
209
210         if (boot_cpu_data.x86_model >= 4)
211                 wbinvd();
212 }
213
214 void global_flush_tlb(void)
215 {
216         BUG_ON(irqs_disabled());
217
218         on_each_cpu(flush_kernel_map, NULL, 1, 1);
219 }
220 EXPORT_SYMBOL(global_flush_tlb);
221
222 #ifdef CONFIG_DEBUG_PAGEALLOC
223 void kernel_map_pages(struct page *page, int numpages, int enable)
224 {
225         if (PageHighMem(page))
226                 return;
227         if (!enable) {
228                 debug_check_no_locks_freed(page_address(page),
229                                            numpages * PAGE_SIZE);
230         }
231
232         /*
233          * the return value is ignored - the calls cannot fail,
234          * large pages are disabled at boot time.
235          */
236         change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
237
238         /*
239          * we should perform an IPI and flush all tlbs,
240          * but that can deadlock->flush only current cpu.
241          */
242         __flush_tlb_all();
243 }
244 #endif