]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/s390/mm/hugetlbpage.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / arch / s390 / mm / hugetlbpage.c
1 /*
2  *  IBM System z Huge TLB Page Support for Kernel.
3  *
4  *    Copyright IBM Corp. 2007
5  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6  */
7
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10
11 static inline pmd_t __pte_to_pmd(pte_t pte)
12 {
13         int none, young, prot;
14         pmd_t pmd;
15
16         /*
17          * Convert encoding               pte bits        pmd bits
18          *                              .IR...wrdytp    ..R...I...y.
19          * empty                        .10...000000 -> ..0...1...0.
20          * prot-none, clean, old        .11...000001 -> ..0...1...1.
21          * prot-none, clean, young      .11...000101 -> ..1...1...1.
22          * prot-none, dirty, old        .10...001001 -> ..0...1...1.
23          * prot-none, dirty, young      .10...001101 -> ..1...1...1.
24          * read-only, clean, old        .11...010001 -> ..1...1...0.
25          * read-only, clean, young      .01...010101 -> ..1...0...1.
26          * read-only, dirty, old        .11...011001 -> ..1...1...0.
27          * read-only, dirty, young      .01...011101 -> ..1...0...1.
28          * read-write, clean, old       .11...110001 -> ..0...1...0.
29          * read-write, clean, young     .01...110101 -> ..0...0...1.
30          * read-write, dirty, old       .10...111001 -> ..0...1...0.
31          * read-write, dirty, young     .00...111101 -> ..0...0...1.
32          * Huge ptes are dirty by definition, a clean pte is made dirty
33          * by the conversion.
34          */
35         if (pte_present(pte)) {
36                 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
37                 if (pte_val(pte) & _PAGE_INVALID)
38                         pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
39                 none = (pte_val(pte) & _PAGE_PRESENT) &&
40                         !(pte_val(pte) & _PAGE_READ) &&
41                         !(pte_val(pte) & _PAGE_WRITE);
42                 prot = (pte_val(pte) & _PAGE_PROTECT) &&
43                         !(pte_val(pte) & _PAGE_WRITE);
44                 young = pte_val(pte) & _PAGE_YOUNG;
45                 if (none || young)
46                         pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
47                 if (prot || (none && young))
48                         pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
49         } else
50                 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
51         return pmd;
52 }
53
54 static inline pte_t __pmd_to_pte(pmd_t pmd)
55 {
56         pte_t pte;
57
58         /*
59          * Convert encoding       pmd bits        pte bits
60          *                      ..R...I...y.    .IR...wrdytp
61          * empty                ..0...1...0. -> .10...000000
62          * prot-none, old       ..0...1...1. -> .10...001001
63          * prot-none, young     ..1...1...1. -> .10...001101
64          * read-only, old       ..1...1...0. -> .11...011001
65          * read-only, young     ..1...0...1. -> .01...011101
66          * read-write, old      ..0...1...0. -> .10...111001
67          * read-write, young    ..0...0...1. -> .00...111101
68          * Huge ptes are dirty by definition
69          */
70         if (pmd_present(pmd)) {
71                 pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
72                         (pmd_val(pmd) & PAGE_MASK);
73                 if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
74                         pte_val(pte) |= _PAGE_INVALID;
75                 if (pmd_prot_none(pmd)) {
76                         if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
77                                 pte_val(pte) |= _PAGE_YOUNG;
78                 } else {
79                         pte_val(pte) |= _PAGE_READ;
80                         if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
81                                 pte_val(pte) |= _PAGE_PROTECT;
82                         else
83                                 pte_val(pte) |= _PAGE_WRITE;
84                         if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
85                                 pte_val(pte) |= _PAGE_YOUNG;
86                 }
87         } else
88                 pte_val(pte) = _PAGE_INVALID;
89         return pte;
90 }
91
92 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
93                      pte_t *ptep, pte_t pte)
94 {
95         pmd_t pmd;
96
97         pmd = __pte_to_pmd(pte);
98         if (!MACHINE_HAS_HPAGE) {
99                 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
100                 pmd_val(pmd) |= pte_page(pte)[1].index;
101         } else
102                 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
103         *(pmd_t *) ptep = pmd;
104 }
105
106 pte_t huge_ptep_get(pte_t *ptep)
107 {
108         unsigned long origin;
109         pmd_t pmd;
110
111         pmd = *(pmd_t *) ptep;
112         if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
113                 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
114                 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
115                 pmd_val(pmd) |= *(unsigned long *) origin;
116         }
117         return __pmd_to_pte(pmd);
118 }
119
120 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
121                               unsigned long addr, pte_t *ptep)
122 {
123         pmd_t *pmdp = (pmd_t *) ptep;
124         pte_t pte = huge_ptep_get(ptep);
125
126         pmdp_flush_direct(mm, addr, pmdp);
127         pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
128         return pte;
129 }
130
131 int arch_prepare_hugepage(struct page *page)
132 {
133         unsigned long addr = page_to_phys(page);
134         pte_t pte;
135         pte_t *ptep;
136         int i;
137
138         if (MACHINE_HAS_HPAGE)
139                 return 0;
140
141         ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
142         if (!ptep)
143                 return -ENOMEM;
144
145         pte_val(pte) = addr;
146         for (i = 0; i < PTRS_PER_PTE; i++) {
147                 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
148                 pte_val(pte) += PAGE_SIZE;
149         }
150         page[1].index = (unsigned long) ptep;
151         return 0;
152 }
153
154 void arch_release_hugepage(struct page *page)
155 {
156         pte_t *ptep;
157
158         if (MACHINE_HAS_HPAGE)
159                 return;
160
161         ptep = (pte_t *) page[1].index;
162         if (!ptep)
163                 return;
164         clear_table((unsigned long *) ptep, _PAGE_INVALID,
165                     PTRS_PER_PTE * sizeof(pte_t));
166         page_table_free(&init_mm, (unsigned long *) ptep);
167         page[1].index = 0;
168 }
169
170 pte_t *huge_pte_alloc(struct mm_struct *mm,
171                         unsigned long addr, unsigned long sz)
172 {
173         pgd_t *pgdp;
174         pud_t *pudp;
175         pmd_t *pmdp = NULL;
176
177         pgdp = pgd_offset(mm, addr);
178         pudp = pud_alloc(mm, pgdp, addr);
179         if (pudp)
180                 pmdp = pmd_alloc(mm, pudp, addr);
181         return (pte_t *) pmdp;
182 }
183
184 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
185 {
186         pgd_t *pgdp;
187         pud_t *pudp;
188         pmd_t *pmdp = NULL;
189
190         pgdp = pgd_offset(mm, addr);
191         if (pgd_present(*pgdp)) {
192                 pudp = pud_offset(pgdp, addr);
193                 if (pud_present(*pudp))
194                         pmdp = pmd_offset(pudp, addr);
195         }
196         return (pte_t *) pmdp;
197 }
198
199 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
200 {
201         return 0;
202 }
203
204 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
205                               int write)
206 {
207         return ERR_PTR(-EINVAL);
208 }
209
210 int pmd_huge(pmd_t pmd)
211 {
212         if (!MACHINE_HAS_HPAGE)
213                 return 0;
214
215         return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
216 }
217
218 int pud_huge(pud_t pud)
219 {
220         return 0;
221 }
222
223 int pmd_huge_support(void)
224 {
225         return 1;
226 }
227
228 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
229                              pmd_t *pmdp, int write)
230 {
231         struct page *page;
232
233         if (!MACHINE_HAS_HPAGE)
234                 return NULL;
235
236         page = pmd_page(*pmdp);
237         if (page)
238                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
239         return page;
240 }