2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/uaccess.h>
10 #include <asm/unistd.h>
13 #include <sysdep/tls.h>
15 static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
17 return syscall(__NR_modify_ldt, func, ptr, bytecount);
20 static long write_ldt_entry(struct mm_id *mm_idp, int func,
21 struct user_desc *desc, void **addr, int done)
25 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
26 (sizeof(*desc) + sizeof(long) - 1) &
30 unsigned long args[] = { func,
31 (unsigned long)stub_addr,
34 res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
42 * In skas mode, we hold our own ldt data in UML.
43 * Thus, the code implementing sys_modify_ldt_skas
44 * is very similar to (and mostly stolen from) sys_modify_ldt
45 * for arch/i386/kernel/ldt.c
46 * The routines copied and modified in part are:
50 * - sys_modify_ldt_skas
53 static int read_ldt(void __user * ptr, unsigned long bytecount)
57 uml_ldt_t *ldt = ¤t->mm->context.arch.ldt;
59 if (!ldt->entry_count)
61 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
62 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
65 mutex_lock(&ldt->lock);
66 if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
67 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
70 if (copy_to_user(ptr, ldt->u.entries, size))
76 for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
81 if (copy_to_user(ptr, ldt->u.pages[i], size)) {
89 mutex_unlock(&ldt->lock);
91 if (bytecount == 0 || err == -EFAULT)
94 if (clear_user(ptr, bytecount))
101 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
105 if (bytecount > 5*LDT_ENTRY_SIZE)
106 bytecount = 5*LDT_ENTRY_SIZE;
110 * UML doesn't support lcall7 and lcall27.
111 * So, we don't really have a default ldt, but emulate
112 * an empty ldt of common host default ldt size.
114 if (clear_user(ptr, bytecount))
120 static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
122 uml_ldt_t *ldt = ¤t->mm->context.arch.ldt;
123 struct mm_id * mm_idp = ¤t->mm->context.id;
125 struct user_desc ldt_info;
126 struct ldt_entry entry0, *ldt_p;
130 if (bytecount != sizeof(ldt_info))
133 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
137 if (ldt_info.entry_number >= LDT_ENTRIES)
139 if (ldt_info.contents == 3) {
142 if (ldt_info.seg_not_present == 0)
146 mutex_lock(&ldt->lock);
148 err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
152 if (ldt_info.entry_number >= ldt->entry_count &&
153 ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
154 for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
155 i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
158 memcpy(&entry0, ldt->u.entries,
160 ldt->u.pages[i] = (struct ldt_entry *)
161 __get_free_page(GFP_KERNEL|__GFP_ZERO);
162 if (!ldt->u.pages[i]) {
164 /* Undo the change in host */
165 memset(&ldt_info, 0, sizeof(ldt_info));
166 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
170 memcpy(ldt->u.pages[0], &entry0,
172 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
173 sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
175 ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
178 if (ldt->entry_count <= ldt_info.entry_number)
179 ldt->entry_count = ldt_info.entry_number + 1;
181 if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
182 ldt_p = ldt->u.entries + ldt_info.entry_number;
184 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
185 ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
187 if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
188 (func == 1 || LDT_empty(&ldt_info))) {
194 ldt_info.useable = 0;
195 ldt_p->a = LDT_entry_a(&ldt_info);
196 ldt_p->b = LDT_entry_b(&ldt_info);
201 mutex_unlock(&ldt->lock);
206 static long do_modify_ldt_skas(int func, void __user *ptr,
207 unsigned long bytecount)
213 ret = read_ldt(ptr, bytecount);
217 ret = write_ldt(ptr, bytecount, func);
220 ret = read_default_ldt(ptr, bytecount);
226 static DEFINE_SPINLOCK(host_ldt_lock);
227 static short dummy_list[9] = {0, -1};
228 static short * host_ldt_entries = NULL;
230 static void ldt_get_host_info(void)
233 struct ldt_entry * ldt;
235 int i, size, k, order;
237 spin_lock(&host_ldt_lock);
239 if (host_ldt_entries != NULL) {
240 spin_unlock(&host_ldt_lock);
243 host_ldt_entries = dummy_list+1;
245 spin_unlock(&host_ldt_lock);
247 for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
250 ldt = (struct ldt_entry *)
251 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
253 printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
258 ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
260 printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
264 /* default_ldt is active, simply write an empty entry 0 */
265 host_ldt_entries = dummy_list;
269 for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
270 if (ldt[i].a != 0 || ldt[i].b != 0)
274 if (size < ARRAY_SIZE(dummy_list))
275 host_ldt_entries = dummy_list;
277 size = (size + 1) * sizeof(dummy_list[0]);
278 tmp = kmalloc(size, GFP_KERNEL);
280 printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
284 host_ldt_entries = tmp;
287 for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
288 if (ldt[i].a != 0 || ldt[i].b != 0)
289 host_ldt_entries[k++] = i;
291 host_ldt_entries[k] = -1;
294 free_pages((unsigned long)ldt, order);
297 long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
299 struct user_desc desc;
306 mutex_init(&new_mm->arch.ldt.lock);
309 memset(&desc, 0, sizeof(desc));
311 * Now we try to retrieve info about the ldt, we
312 * inherited from the host. All ldt-entries found
313 * will be reset in the following loop
316 for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
317 desc.entry_number = *num_p;
318 err = write_ldt_entry(&new_mm->id, 1, &desc,
319 &addr, *(num_p + 1) == -1);
323 new_mm->arch.ldt.entry_count = 0;
329 * Our local LDT is used to supply the data for
330 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
331 * i.e., we have to use the stub for modify_ldt, which
332 * can't handle the big read buffer of up to 64kB.
334 mutex_lock(&from_mm->arch.ldt.lock);
335 if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
336 memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
337 sizeof(new_mm->arch.ldt.u.entries));
339 i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
341 page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
346 new_mm->arch.ldt.u.pages[i] =
347 (struct ldt_entry *) page;
348 memcpy(new_mm->arch.ldt.u.pages[i],
349 from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
352 new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
353 mutex_unlock(&from_mm->arch.ldt.lock);
360 void free_ldt(struct mm_context *mm)
364 if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
365 i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
367 free_page((long) mm->arch.ldt.u.pages[i]);
369 mm->arch.ldt.entry_count = 0;
372 int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
374 return do_modify_ldt_skas(func, ptr, bytecount);