]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/um/kernel/physmem.c
uml: create as-layout.h
[mv-sheeva.git] / arch / um / kernel / physmem.c
1 /*
2  * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/mm.h"
7 #include "linux/rbtree.h"
8 #include "linux/slab.h"
9 #include "linux/vmalloc.h"
10 #include "linux/bootmem.h"
11 #include "linux/module.h"
12 #include "linux/pfn.h"
13 #include "asm/types.h"
14 #include "asm/pgtable.h"
15 #include "kern_util.h"
16 #include "as-layout.h"
17 #include "user_util.h"
18 #include "mode_kern.h"
19 #include "mem.h"
20 #include "mem_user.h"
21 #include "os.h"
22 #include "kern.h"
23 #include "init.h"
24
25 struct phys_desc {
26         struct rb_node rb;
27         int fd;
28         __u64 offset;
29         void *virt;
30         unsigned long phys;
31         struct list_head list;
32 };
33
34 static struct rb_root phys_mappings = RB_ROOT;
35
36 static struct rb_node **find_rb(void *virt)
37 {
38         struct rb_node **n = &phys_mappings.rb_node;
39         struct phys_desc *d;
40
41         while(*n != NULL){
42                 d = rb_entry(*n, struct phys_desc, rb);
43                 if(d->virt == virt)
44                         return n;
45
46                 if(d->virt > virt)
47                         n = &(*n)->rb_left;
48                 else
49                         n = &(*n)->rb_right;
50         }
51
52         return n;
53 }
54
55 static struct phys_desc *find_phys_mapping(void *virt)
56 {
57         struct rb_node **n = find_rb(virt);
58
59         if(*n == NULL)
60                 return NULL;
61
62         return rb_entry(*n, struct phys_desc, rb);
63 }
64
65 static void insert_phys_mapping(struct phys_desc *desc)
66 {
67         struct rb_node **n = find_rb(desc->virt);
68
69         if(*n != NULL)
70                 panic("Physical remapping for %p already present",
71                       desc->virt);
72
73         rb_link_node(&desc->rb, rb_parent(*n), n);
74         rb_insert_color(&desc->rb, &phys_mappings);
75 }
76
77 LIST_HEAD(descriptor_mappings);
78
79 struct desc_mapping {
80         int fd;
81         struct list_head list;
82         struct list_head pages;
83 };
84
85 static struct desc_mapping *find_mapping(int fd)
86 {
87         struct desc_mapping *desc;
88         struct list_head *ele;
89
90         list_for_each(ele, &descriptor_mappings){
91                 desc = list_entry(ele, struct desc_mapping, list);
92                 if(desc->fd == fd)
93                         return desc;
94         }
95
96         return NULL;
97 }
98
99 static struct desc_mapping *descriptor_mapping(int fd)
100 {
101         struct desc_mapping *desc;
102
103         desc = find_mapping(fd);
104         if(desc != NULL)
105                 return desc;
106
107         desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
108         if(desc == NULL)
109                 return NULL;
110
111         *desc = ((struct desc_mapping)
112                 { .fd =         fd,
113                   .list =       LIST_HEAD_INIT(desc->list),
114                   .pages =      LIST_HEAD_INIT(desc->pages) });
115         list_add(&desc->list, &descriptor_mappings);
116
117         return desc;
118 }
119
120 int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
121 {
122         struct desc_mapping *fd_maps;
123         struct phys_desc *desc;
124         unsigned long phys;
125         int err;
126
127         fd_maps = descriptor_mapping(fd);
128         if(fd_maps == NULL)
129                 return -ENOMEM;
130
131         phys = __pa(virt);
132         desc = find_phys_mapping(virt);
133         if(desc != NULL)
134                 panic("Address 0x%p is already substituted\n", virt);
135
136         err = -ENOMEM;
137         desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
138         if(desc == NULL)
139                 goto out;
140
141         *desc = ((struct phys_desc)
142                 { .fd =                 fd,
143                   .offset =             offset,
144                   .virt =               virt,
145                   .phys =               __pa(virt),
146                   .list =               LIST_HEAD_INIT(desc->list) });
147         insert_phys_mapping(desc);
148
149         list_add(&desc->list, &fd_maps->pages);
150
151         virt = (void *) ((unsigned long) virt & PAGE_MASK);
152         err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
153         if(!err)
154                 goto out;
155
156         rb_erase(&desc->rb, &phys_mappings);
157         kfree(desc);
158  out:
159         return err;
160 }
161
162 static int physmem_fd = -1;
163
164 static void remove_mapping(struct phys_desc *desc)
165 {
166         void *virt = desc->virt;
167         int err;
168
169         rb_erase(&desc->rb, &phys_mappings);
170         list_del(&desc->list);
171         kfree(desc);
172
173         err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
174         if(err)
175                 panic("Failed to unmap block device page from physical memory, "
176                       "errno = %d", -err);
177 }
178
179 int physmem_remove_mapping(void *virt)
180 {
181         struct phys_desc *desc;
182
183         virt = (void *) ((unsigned long) virt & PAGE_MASK);
184         desc = find_phys_mapping(virt);
185         if(desc == NULL)
186                 return 0;
187
188         remove_mapping(desc);
189         return 1;
190 }
191
192 void physmem_forget_descriptor(int fd)
193 {
194         struct desc_mapping *desc;
195         struct phys_desc *page;
196         struct list_head *ele, *next;
197         __u64 offset;
198         void *addr;
199         int err;
200
201         desc = find_mapping(fd);
202         if(desc == NULL)
203                 return;
204
205         list_for_each_safe(ele, next, &desc->pages){
206                 page = list_entry(ele, struct phys_desc, list);
207                 offset = page->offset;
208                 addr = page->virt;
209                 remove_mapping(page);
210                 err = os_seek_file(fd, offset);
211                 if(err)
212                         panic("physmem_forget_descriptor - failed to seek "
213                               "to %lld in fd %d, error = %d\n",
214                               offset, fd, -err);
215                 err = os_read_file(fd, addr, PAGE_SIZE);
216                 if(err < 0)
217                         panic("physmem_forget_descriptor - failed to read "
218                               "from fd %d to 0x%p, error = %d\n",
219                               fd, addr, -err);
220         }
221
222         list_del(&desc->list);
223         kfree(desc);
224 }
225
226 EXPORT_SYMBOL(physmem_forget_descriptor);
227 EXPORT_SYMBOL(physmem_remove_mapping);
228 EXPORT_SYMBOL(physmem_subst_mapping);
229
230 void arch_free_page(struct page *page, int order)
231 {
232         void *virt;
233         int i;
234
235         for(i = 0; i < (1 << order); i++){
236                 virt = __va(page_to_phys(page + i));
237                 physmem_remove_mapping(virt);
238         }
239 }
240
241 int is_remapped(void *virt)
242 {
243         struct phys_desc *desc = find_phys_mapping(virt);
244
245         return desc != NULL;
246 }
247
248 /* Changed during early boot */
249 unsigned long high_physmem;
250
251 extern unsigned long long physmem_size;
252
253 int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
254 {
255         struct page *p, *map;
256         unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
257         unsigned long iomem_len, iomem_pages, total_len, total_pages;
258         int i;
259
260         phys_pages = physmem >> PAGE_SHIFT;
261         phys_len = phys_pages * sizeof(struct page);
262
263         iomem_pages = iomem >> PAGE_SHIFT;
264         iomem_len = iomem_pages * sizeof(struct page);
265
266         highmem_pages = highmem >> PAGE_SHIFT;
267         highmem_len = highmem_pages * sizeof(struct page);
268
269         total_pages = phys_pages + iomem_pages + highmem_pages;
270         total_len = phys_len + iomem_len + highmem_len;
271
272         if(kmalloc_ok){
273                 map = kmalloc(total_len, GFP_KERNEL);
274                 if(map == NULL)
275                         map = vmalloc(total_len);
276         }
277         else map = alloc_bootmem_low_pages(total_len);
278
279         if(map == NULL)
280                 return -ENOMEM;
281
282         for(i = 0; i < total_pages; i++){
283                 p = &map[i];
284                 memset(p, 0, sizeof(struct page));
285                 SetPageReserved(p);
286                 INIT_LIST_HEAD(&p->lru);
287         }
288
289         max_mapnr = total_pages;
290         return 0;
291 }
292
293 /* Changed during early boot */
294 static unsigned long kmem_top = 0;
295
296 unsigned long get_kmem_end(void)
297 {
298         if(kmem_top == 0)
299                 kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
300         return kmem_top;
301 }
302
303 void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
304                 int r, int w, int x)
305 {
306         __u64 offset;
307         int fd, err;
308
309         fd = phys_mapping(phys, &offset);
310         err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
311         if(err) {
312                 if(err == -ENOMEM)
313                         printk("try increasing the host's "
314                                "/proc/sys/vm/max_map_count to <physical "
315                                "memory size>/4096\n");
316                 panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
317                       "err = %d\n", virt, fd, offset, len, r, w, x, err);
318         }
319 }
320
321 extern int __syscall_stub_start;
322
323 void setup_physmem(unsigned long start, unsigned long reserve_end,
324                    unsigned long len, unsigned long long highmem)
325 {
326         unsigned long reserve = reserve_end - start;
327         int pfn = PFN_UP(__pa(reserve_end));
328         int delta = (len - reserve) >> PAGE_SHIFT;
329         int err, offset, bootmap_size;
330
331         physmem_fd = create_mem_file(len + highmem);
332
333         offset = uml_reserved - uml_physmem;
334         err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
335                             len - offset, 1, 1, 0);
336         if(err < 0){
337                 os_print_error(err, "Mapping memory");
338                 exit(1);
339         }
340
341         /* Special kludge - This page will be mapped in to userspace processes
342          * from physmem_fd, so it needs to be written out there.
343          */
344         os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
345         os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
346
347         bootmap_size = init_bootmem(pfn, pfn + delta);
348         free_bootmem(__pa(reserve_end) + bootmap_size,
349                      len - bootmap_size - reserve);
350 }
351
352 int phys_mapping(unsigned long phys, __u64 *offset_out)
353 {
354         struct phys_desc *desc = find_phys_mapping(__va(phys & PAGE_MASK));
355         int fd = -1;
356
357         if(desc != NULL){
358                 fd = desc->fd;
359                 *offset_out = desc->offset;
360         }
361         else if(phys < physmem_size){
362                 fd = physmem_fd;
363                 *offset_out = phys;
364         }
365         else if(phys < __pa(end_iomem)){
366                 struct iomem_region *region = iomem_regions;
367
368                 while(region != NULL){
369                         if((phys >= region->phys) &&
370                            (phys < region->phys + region->size)){
371                                 fd = region->fd;
372                                 *offset_out = phys - region->phys;
373                                 break;
374                         }
375                         region = region->next;
376                 }
377         }
378         else if(phys < __pa(end_iomem) + highmem){
379                 fd = physmem_fd;
380                 *offset_out = phys - iomem_size;
381         }
382
383         return fd;
384 }
385
386 static int __init uml_mem_setup(char *line, int *add)
387 {
388         char *retptr;
389         physmem_size = memparse(line,&retptr);
390         return 0;
391 }
392 __uml_setup("mem=", uml_mem_setup,
393 "mem=<Amount of desired ram>\n"
394 "    This controls how much \"physical\" memory the kernel allocates\n"
395 "    for the system. The size is specified as a number followed by\n"
396 "    one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
397 "    This is not related to the amount of memory in the host.  It can\n"
398 "    be more, and the excess, if it's ever used, will just be swapped out.\n"
399 "       Example: mem=64M\n\n"
400 );
401
402 extern int __init parse_iomem(char *str, int *add);
403
404 __uml_setup("iomem=", parse_iomem,
405 "iomem=<name>,<file>\n"
406 "    Configure <file> as an IO memory region named <name>.\n\n"
407 );
408
409 /*
410  * This list is constructed in parse_iomem and addresses filled in in
411  * setup_iomem, both of which run during early boot.  Afterwards, it's
412  * unchanged.
413  */
414 struct iomem_region *iomem_regions = NULL;
415
416 /* Initialized in parse_iomem */
417 int iomem_size = 0;
418
419 unsigned long find_iomem(char *driver, unsigned long *len_out)
420 {
421         struct iomem_region *region = iomem_regions;
422
423         while(region != NULL){
424                 if(!strcmp(region->driver, driver)){
425                         *len_out = region->size;
426                         return region->virt;
427                 }
428
429                 region = region->next;
430         }
431
432         return 0;
433 }
434
435 int setup_iomem(void)
436 {
437         struct iomem_region *region = iomem_regions;
438         unsigned long iomem_start = high_physmem + PAGE_SIZE;
439         int err;
440
441         while(region != NULL){
442                 err = os_map_memory((void *) iomem_start, region->fd, 0,
443                                     region->size, 1, 1, 0);
444                 if(err)
445                         printk("Mapping iomem region for driver '%s' failed, "
446                                "errno = %d\n", region->driver, -err);
447                 else {
448                         region->virt = iomem_start;
449                         region->phys = __pa(region->virt);
450                 }
451
452                 iomem_start += region->size + PAGE_SIZE;
453                 region = region->next;
454         }
455
456         return 0;
457 }
458
459 __initcall(setup_iomem);