]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/s390/mm/vmem.c
Merge tag 'efi-urgent' into x86/urgent
[karo-tx-linux.git] / arch / s390 / mm / vmem.c
1 /*
2  *    Copyright IBM Corp. 2006
3  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4  */
5
6 #include <linux/bootmem.h>
7 #include <linux/pfn.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/setup.h>
16 #include <asm/tlbflush.h>
17 #include <asm/sections.h>
18
19 static DEFINE_MUTEX(vmem_mutex);
20
21 struct memory_segment {
22         struct list_head list;
23         unsigned long start;
24         unsigned long size;
25 };
26
27 static LIST_HEAD(mem_segs);
28
29 static void __ref *vmem_alloc_pages(unsigned int order)
30 {
31         if (slab_is_available())
32                 return (void *)__get_free_pages(GFP_KERNEL, order);
33         return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
34 }
35
36 static inline pud_t *vmem_pud_alloc(void)
37 {
38         pud_t *pud = NULL;
39
40 #ifdef CONFIG_64BIT
41         pud = vmem_alloc_pages(2);
42         if (!pud)
43                 return NULL;
44         clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
45 #endif
46         return pud;
47 }
48
49 static inline pmd_t *vmem_pmd_alloc(void)
50 {
51         pmd_t *pmd = NULL;
52
53 #ifdef CONFIG_64BIT
54         pmd = vmem_alloc_pages(2);
55         if (!pmd)
56                 return NULL;
57         clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
58 #endif
59         return pmd;
60 }
61
62 static pte_t __ref *vmem_pte_alloc(unsigned long address)
63 {
64         pte_t *pte;
65
66         if (slab_is_available())
67                 pte = (pte_t *) page_table_alloc(&init_mm, address);
68         else
69                 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
70         if (!pte)
71                 return NULL;
72         clear_table((unsigned long *) pte, _PAGE_INVALID,
73                     PTRS_PER_PTE * sizeof(pte_t));
74         return pte;
75 }
76
77 /*
78  * Add a physical memory range to the 1:1 mapping.
79  */
80 static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
81 {
82         unsigned long end = start + size;
83         unsigned long address = start;
84         pgd_t *pg_dir;
85         pud_t *pu_dir;
86         pmd_t *pm_dir;
87         pte_t *pt_dir;
88         int ret = -ENOMEM;
89
90         while (address < end) {
91                 pg_dir = pgd_offset_k(address);
92                 if (pgd_none(*pg_dir)) {
93                         pu_dir = vmem_pud_alloc();
94                         if (!pu_dir)
95                                 goto out;
96                         pgd_populate(&init_mm, pg_dir, pu_dir);
97                 }
98                 pu_dir = pud_offset(pg_dir, address);
99 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
100                 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
101                     !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
102                         pud_val(*pu_dir) = __pa(address) |
103                                 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
104                                 (ro ? _REGION_ENTRY_PROTECT : 0);
105                         address += PUD_SIZE;
106                         continue;
107                 }
108 #endif
109                 if (pud_none(*pu_dir)) {
110                         pm_dir = vmem_pmd_alloc();
111                         if (!pm_dir)
112                                 goto out;
113                         pud_populate(&init_mm, pu_dir, pm_dir);
114                 }
115                 pm_dir = pmd_offset(pu_dir, address);
116 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
117                 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
118                     !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
119                         pmd_val(*pm_dir) = __pa(address) |
120                                 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
121                                 _SEGMENT_ENTRY_YOUNG |
122                                 (ro ? _SEGMENT_ENTRY_PROTECT : 0);
123                         address += PMD_SIZE;
124                         continue;
125                 }
126 #endif
127                 if (pmd_none(*pm_dir)) {
128                         pt_dir = vmem_pte_alloc(address);
129                         if (!pt_dir)
130                                 goto out;
131                         pmd_populate(&init_mm, pm_dir, pt_dir);
132                 }
133
134                 pt_dir = pte_offset_kernel(pm_dir, address);
135                 pte_val(*pt_dir) = __pa(address) |
136                         pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
137                 address += PAGE_SIZE;
138         }
139         ret = 0;
140 out:
141         return ret;
142 }
143
144 /*
145  * Remove a physical memory range from the 1:1 mapping.
146  * Currently only invalidates page table entries.
147  */
148 static void vmem_remove_range(unsigned long start, unsigned long size)
149 {
150         unsigned long end = start + size;
151         unsigned long address = start;
152         pgd_t *pg_dir;
153         pud_t *pu_dir;
154         pmd_t *pm_dir;
155         pte_t *pt_dir;
156         pte_t  pte;
157
158         pte_val(pte) = _PAGE_INVALID;
159         while (address < end) {
160                 pg_dir = pgd_offset_k(address);
161                 if (pgd_none(*pg_dir)) {
162                         address += PGDIR_SIZE;
163                         continue;
164                 }
165                 pu_dir = pud_offset(pg_dir, address);
166                 if (pud_none(*pu_dir)) {
167                         address += PUD_SIZE;
168                         continue;
169                 }
170                 if (pud_large(*pu_dir)) {
171                         pud_clear(pu_dir);
172                         address += PUD_SIZE;
173                         continue;
174                 }
175                 pm_dir = pmd_offset(pu_dir, address);
176                 if (pmd_none(*pm_dir)) {
177                         address += PMD_SIZE;
178                         continue;
179                 }
180                 if (pmd_large(*pm_dir)) {
181                         pmd_clear(pm_dir);
182                         address += PMD_SIZE;
183                         continue;
184                 }
185                 pt_dir = pte_offset_kernel(pm_dir, address);
186                 *pt_dir = pte;
187                 address += PAGE_SIZE;
188         }
189         flush_tlb_kernel_range(start, end);
190 }
191
192 /*
193  * Add a backed mem_map array to the virtual mem_map array.
194  */
195 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
196 {
197         unsigned long address = start;
198         pgd_t *pg_dir;
199         pud_t *pu_dir;
200         pmd_t *pm_dir;
201         pte_t *pt_dir;
202         int ret = -ENOMEM;
203
204         for (address = start; address < end;) {
205                 pg_dir = pgd_offset_k(address);
206                 if (pgd_none(*pg_dir)) {
207                         pu_dir = vmem_pud_alloc();
208                         if (!pu_dir)
209                                 goto out;
210                         pgd_populate(&init_mm, pg_dir, pu_dir);
211                 }
212
213                 pu_dir = pud_offset(pg_dir, address);
214                 if (pud_none(*pu_dir)) {
215                         pm_dir = vmem_pmd_alloc();
216                         if (!pm_dir)
217                                 goto out;
218                         pud_populate(&init_mm, pu_dir, pm_dir);
219                 }
220
221                 pm_dir = pmd_offset(pu_dir, address);
222                 if (pmd_none(*pm_dir)) {
223 #ifdef CONFIG_64BIT
224                         /* Use 1MB frames for vmemmap if available. We always
225                          * use large frames even if they are only partially
226                          * used.
227                          * Otherwise we would have also page tables since
228                          * vmemmap_populate gets called for each section
229                          * separately. */
230                         if (MACHINE_HAS_EDAT1) {
231                                 void *new_page;
232
233                                 new_page = vmemmap_alloc_block(PMD_SIZE, node);
234                                 if (!new_page)
235                                         goto out;
236                                 pmd_val(*pm_dir) = __pa(new_page) |
237                                         _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
238                                         _SEGMENT_ENTRY_CO;
239                                 address = (address + PMD_SIZE) & PMD_MASK;
240                                 continue;
241                         }
242 #endif
243                         pt_dir = vmem_pte_alloc(address);
244                         if (!pt_dir)
245                                 goto out;
246                         pmd_populate(&init_mm, pm_dir, pt_dir);
247                 } else if (pmd_large(*pm_dir)) {
248                         address = (address + PMD_SIZE) & PMD_MASK;
249                         continue;
250                 }
251
252                 pt_dir = pte_offset_kernel(pm_dir, address);
253                 if (pte_none(*pt_dir)) {
254                         unsigned long new_page;
255
256                         new_page =__pa(vmem_alloc_pages(0));
257                         if (!new_page)
258                                 goto out;
259                         pte_val(*pt_dir) =
260                                 __pa(new_page) | pgprot_val(PAGE_KERNEL);
261                 }
262                 address += PAGE_SIZE;
263         }
264         memset((void *)start, 0, end - start);
265         ret = 0;
266 out:
267         return ret;
268 }
269
270 void vmemmap_free(unsigned long start, unsigned long end)
271 {
272 }
273
274 /*
275  * Add memory segment to the segment list if it doesn't overlap with
276  * an already present segment.
277  */
278 static int insert_memory_segment(struct memory_segment *seg)
279 {
280         struct memory_segment *tmp;
281
282         if (seg->start + seg->size > VMEM_MAX_PHYS ||
283             seg->start + seg->size < seg->start)
284                 return -ERANGE;
285
286         list_for_each_entry(tmp, &mem_segs, list) {
287                 if (seg->start >= tmp->start + tmp->size)
288                         continue;
289                 if (seg->start + seg->size <= tmp->start)
290                         continue;
291                 return -ENOSPC;
292         }
293         list_add(&seg->list, &mem_segs);
294         return 0;
295 }
296
297 /*
298  * Remove memory segment from the segment list.
299  */
300 static void remove_memory_segment(struct memory_segment *seg)
301 {
302         list_del(&seg->list);
303 }
304
305 static void __remove_shared_memory(struct memory_segment *seg)
306 {
307         remove_memory_segment(seg);
308         vmem_remove_range(seg->start, seg->size);
309 }
310
311 int vmem_remove_mapping(unsigned long start, unsigned long size)
312 {
313         struct memory_segment *seg;
314         int ret;
315
316         mutex_lock(&vmem_mutex);
317
318         ret = -ENOENT;
319         list_for_each_entry(seg, &mem_segs, list) {
320                 if (seg->start == start && seg->size == size)
321                         break;
322         }
323
324         if (seg->start != start || seg->size != size)
325                 goto out;
326
327         ret = 0;
328         __remove_shared_memory(seg);
329         kfree(seg);
330 out:
331         mutex_unlock(&vmem_mutex);
332         return ret;
333 }
334
335 int vmem_add_mapping(unsigned long start, unsigned long size)
336 {
337         struct memory_segment *seg;
338         int ret;
339
340         mutex_lock(&vmem_mutex);
341         ret = -ENOMEM;
342         seg = kzalloc(sizeof(*seg), GFP_KERNEL);
343         if (!seg)
344                 goto out;
345         seg->start = start;
346         seg->size = size;
347
348         ret = insert_memory_segment(seg);
349         if (ret)
350                 goto out_free;
351
352         ret = vmem_add_mem(start, size, 0);
353         if (ret)
354                 goto out_remove;
355         goto out;
356
357 out_remove:
358         __remove_shared_memory(seg);
359 out_free:
360         kfree(seg);
361 out:
362         mutex_unlock(&vmem_mutex);
363         return ret;
364 }
365
366 /*
367  * map whole physical memory to virtual memory (identity mapping)
368  * we reserve enough space in the vmalloc area for vmemmap to hotplug
369  * additional memory segments.
370  */
371 void __init vmem_map_init(void)
372 {
373         unsigned long ro_start, ro_end;
374         unsigned long start, end;
375         int i;
376
377         ro_start = PFN_ALIGN((unsigned long)&_stext);
378         ro_end = (unsigned long)&_eshared & PAGE_MASK;
379         for (i = 0; i < MEMORY_CHUNKS; i++) {
380                 if (!memory_chunk[i].size)
381                         continue;
382                 start = memory_chunk[i].addr;
383                 end = memory_chunk[i].addr + memory_chunk[i].size;
384                 if (start >= ro_end || end <= ro_start)
385                         vmem_add_mem(start, end - start, 0);
386                 else if (start >= ro_start && end <= ro_end)
387                         vmem_add_mem(start, end - start, 1);
388                 else if (start >= ro_start) {
389                         vmem_add_mem(start, ro_end - start, 1);
390                         vmem_add_mem(ro_end, end - ro_end, 0);
391                 } else if (end < ro_end) {
392                         vmem_add_mem(start, ro_start - start, 0);
393                         vmem_add_mem(ro_start, end - ro_start, 1);
394                 } else {
395                         vmem_add_mem(start, ro_start - start, 0);
396                         vmem_add_mem(ro_start, ro_end - ro_start, 1);
397                         vmem_add_mem(ro_end, end - ro_end, 0);
398                 }
399         }
400 }
401
402 /*
403  * Convert memory chunk array to a memory segment list so there is a single
404  * list that contains both r/w memory and shared memory segments.
405  */
406 static int __init vmem_convert_memory_chunk(void)
407 {
408         struct memory_segment *seg;
409         int i;
410
411         mutex_lock(&vmem_mutex);
412         for (i = 0; i < MEMORY_CHUNKS; i++) {
413                 if (!memory_chunk[i].size)
414                         continue;
415                 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
416                 if (!seg)
417                         panic("Out of memory...\n");
418                 seg->start = memory_chunk[i].addr;
419                 seg->size = memory_chunk[i].size;
420                 insert_memory_segment(seg);
421         }
422         mutex_unlock(&vmem_mutex);
423         return 0;
424 }
425
426 core_initcall(vmem_convert_memory_chunk);