1 /* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/list.h>
11 #include <linux/rculist.h>
12 #include <linux/spinlock.h>
13 #include <linux/hash.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/uaccess.h>
18 #include <linux/ptrace.h>
19 #include <linux/preempt.h>
20 #include <linux/percpu.h>
21 #include <linux/kdebug.h>
22 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <linux/errno.h>
28 #include <asm/debugreg.h>
29 #include <linux/mmiotrace.h>
31 #define KMMIO_PAGE_HASH_BITS 4
32 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
34 struct kmmio_fault_page {
35 struct list_head list;
36 struct kmmio_fault_page *release_next;
37 unsigned long page; /* location of the fault page */
38 pteval_t old_presence; /* page presence prior to arming */
42 * Number of times this page has been registered as a part
43 * of a probe. If zero, page is disarmed and this may be freed.
44 * Used only by writers (RCU) and post_kmmio_handler().
45 * Protected by kmmio_lock, when linked into kmmio_page_table.
50 struct kmmio_delayed_release {
52 struct kmmio_fault_page *release_list;
55 struct kmmio_context {
56 struct kmmio_fault_page *fpage;
57 struct kmmio_probe *probe;
58 unsigned long saved_flags;
63 static DEFINE_SPINLOCK(kmmio_lock);
65 /* Protected by kmmio_lock */
66 unsigned int kmmio_count;
68 /* Read-protected by RCU, write-protected by kmmio_lock. */
69 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
70 static LIST_HEAD(kmmio_probes);
72 static struct list_head *kmmio_page_list(unsigned long page)
74 return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
77 /* Accessed per-cpu */
78 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
81 * this is basically a dynamic stabbing problem:
82 * Could use the existing prio tree code or
83 * Possible better implementations:
84 * The Interval Skip List: A Data Structure for Finding All Intervals That
85 * Overlap a Point (might be simple)
86 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
88 /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
89 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
91 struct kmmio_probe *p;
92 list_for_each_entry_rcu(p, &kmmio_probes, list) {
93 if (addr >= p->addr && addr < (p->addr + p->len))
99 /* You must be holding RCU read lock. */
100 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
102 struct list_head *head;
103 struct kmmio_fault_page *f;
106 head = kmmio_page_list(page);
107 list_for_each_entry_rcu(f, head, list) {
114 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
116 pmdval_t v = pmd_val(*pmd);
118 *old = v & _PAGE_PRESENT;
120 } else /* presume this has been called with clear==true previously */
122 set_pmd(pmd, __pmd(v));
125 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
127 pteval_t v = pte_val(*pte);
129 *old = v & _PAGE_PRESENT;
131 } else /* presume this has been called with clear==true previously */
133 set_pte_atomic(pte, __pte(v));
136 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
139 pte_t *pte = lookup_address(f->page, &level);
142 pr_err("no pte for page 0x%08lx\n", f->page);
148 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
151 clear_pte_presence(pte, clear, &f->old_presence);
154 pr_err("unexpected page level 0x%x.\n", level);
158 __flush_tlb_one(f->page);
163 * Mark the given page as not present. Access to it will trigger a fault.
165 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
166 * protection is ignored here. RCU read lock is assumed held, so the struct
167 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
168 * that double arming the same virtual address (page) cannot occur.
170 * Double disarming on the other hand is allowed, and may occur when a fault
171 * and mmiotrace shutdown happen simultaneously.
173 static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
176 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
178 pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
179 f->page, f->count, !!f->old_presence);
181 ret = clear_page_presence(f, true);
182 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
188 /** Restore the given page to saved presence state. */
189 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
191 int ret = clear_page_presence(f, false);
193 KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
198 * This is being called from do_page_fault().
200 * We may be in an interrupt or a critical section. Also prefecthing may
201 * trigger a page fault. We may be in the middle of process switch.
202 * We cannot take any locks, because we could be executing especially
203 * within a kmmio critical section.
205 * Local interrupts are disabled, so preemption cannot happen.
206 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
209 * Interrupts are disabled on entry as trap3 is an interrupt gate
210 * and they remain disabled throughout this function.
212 int kmmio_handler(struct pt_regs *regs, unsigned long addr)
214 struct kmmio_context *ctx;
215 struct kmmio_fault_page *faultpage;
216 int ret = 0; /* default to fault not handled */
219 * Preemption is now disabled to prevent process switch during
220 * single stepping. We can only handle one active kmmio trace
221 * per cpu, so ensure that we finish it before something else
222 * gets to run. We also hold the RCU read lock over single
223 * stepping to avoid looking up the probe and kmmio_fault_page
229 faultpage = get_kmmio_fault_page(addr);
232 * Either this page fault is not caused by kmmio, or
233 * another CPU just pulled the kmmio probe from under
234 * our feet. The latter case should not be possible.
239 ctx = &get_cpu_var(kmmio_ctx);
241 if (addr == ctx->addr) {
243 * A second fault on the same page means some other
244 * condition needs handling by do_page_fault(), the
245 * page really not being present is the most common.
247 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
248 addr, smp_processor_id());
250 if (!faultpage->old_presence)
251 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
252 addr, smp_processor_id());
255 * Prevent overwriting already in-flight context.
256 * This should not happen, let's hope disarming at
257 * least prevents a panic.
259 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
260 smp_processor_id(), addr);
261 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
262 disarm_kmmio_fault_page(faultpage);
268 ctx->fpage = faultpage;
269 ctx->probe = get_kmmio_probe(addr);
270 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
273 if (ctx->probe && ctx->probe->pre_handler)
274 ctx->probe->pre_handler(ctx->probe, regs, addr);
277 * Enable single-stepping and disable interrupts for the faulting
278 * context. Local interrupts must not get enabled during stepping.
280 regs->flags |= X86_EFLAGS_TF;
281 regs->flags &= ~X86_EFLAGS_IF;
283 /* Now we set present bit in PTE and single step. */
284 disarm_kmmio_fault_page(ctx->fpage);
287 * If another cpu accesses the same page while we are stepping,
288 * the access will not be caught. It will simply succeed and the
289 * only downside is we lose the event. If this becomes a problem,
290 * the user should drop to single cpu before tracing.
293 put_cpu_var(kmmio_ctx);
294 return 1; /* fault handled */
297 put_cpu_var(kmmio_ctx);
300 preempt_enable_no_resched();
305 * Interrupts are disabled on entry as trap1 is an interrupt gate
306 * and they remain disabled throughout this function.
307 * This must always get called as the pair to kmmio_handler().
309 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
312 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
316 * debug traps without an active context are due to either
317 * something external causing them (f.e. using a debugger while
318 * mmio tracing enabled), or erroneous behaviour
320 pr_warning("unexpected debug trap on CPU %d.\n",
325 if (ctx->probe && ctx->probe->post_handler)
326 ctx->probe->post_handler(ctx->probe, condition, regs);
328 /* Prevent racing against release_kmmio_fault_page(). */
329 spin_lock(&kmmio_lock);
330 if (ctx->fpage->count)
331 arm_kmmio_fault_page(ctx->fpage);
332 spin_unlock(&kmmio_lock);
334 regs->flags &= ~X86_EFLAGS_TF;
335 regs->flags |= ctx->saved_flags;
337 /* These were acquired in kmmio_handler(). */
341 preempt_enable_no_resched();
344 * if somebody else is singlestepping across a probe point, flags
345 * will have TF set, in which case, continue the remaining processing
346 * of do_debug, as if this is not a probe hit.
348 if (!(regs->flags & X86_EFLAGS_TF))
351 put_cpu_var(kmmio_ctx);
355 /* You must be holding kmmio_lock. */
356 static int add_kmmio_fault_page(unsigned long page)
358 struct kmmio_fault_page *f;
361 f = get_kmmio_fault_page(page);
364 arm_kmmio_fault_page(f);
369 f = kzalloc(sizeof(*f), GFP_ATOMIC);
376 if (arm_kmmio_fault_page(f)) {
381 list_add_rcu(&f->list, kmmio_page_list(f->page));
386 /* You must be holding kmmio_lock. */
387 static void release_kmmio_fault_page(unsigned long page,
388 struct kmmio_fault_page **release_list)
390 struct kmmio_fault_page *f;
393 f = get_kmmio_fault_page(page);
398 BUG_ON(f->count < 0);
400 disarm_kmmio_fault_page(f);
401 f->release_next = *release_list;
407 * With page-unaligned ioremaps, one or two armed pages may contain
408 * addresses from outside the intended mapping. Events for these addresses
409 * are currently silently dropped. The events may result only from programming
410 * mistakes by accessing addresses before the beginning or past the end of a
413 int register_kmmio_probe(struct kmmio_probe *p)
417 unsigned long size = 0;
418 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
420 spin_lock_irqsave(&kmmio_lock, flags);
421 if (get_kmmio_probe(p->addr)) {
426 list_add_rcu(&p->list, &kmmio_probes);
427 while (size < size_lim) {
428 if (add_kmmio_fault_page(p->addr + size))
429 pr_err("Unable to set page fault.\n");
433 spin_unlock_irqrestore(&kmmio_lock, flags);
435 * XXX: What should I do here?
436 * Here was a call to global_flush_tlb(), but it does not exist
437 * anymore. It seems it's not needed after all.
441 EXPORT_SYMBOL(register_kmmio_probe);
443 static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
445 struct kmmio_delayed_release *dr = container_of(
447 struct kmmio_delayed_release,
449 struct kmmio_fault_page *f = dr->release_list;
451 struct kmmio_fault_page *next = f->release_next;
459 static void remove_kmmio_fault_pages(struct rcu_head *head)
461 struct kmmio_delayed_release *dr =
462 container_of(head, struct kmmio_delayed_release, rcu);
463 struct kmmio_fault_page *f = dr->release_list;
464 struct kmmio_fault_page **prevp = &dr->release_list;
467 spin_lock_irqsave(&kmmio_lock, flags);
470 list_del_rcu(&f->list);
471 prevp = &f->release_next;
473 *prevp = f->release_next;
477 spin_unlock_irqrestore(&kmmio_lock, flags);
479 /* This is the real RCU destroy call. */
480 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
484 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
485 * sure that the callbacks will not be called anymore. Only after that
486 * you may actually release your struct kmmio_probe.
488 * Unregistering a kmmio fault page has three steps:
489 * 1. release_kmmio_fault_page()
490 * Disarm the page, wait a grace period to let all faults finish.
491 * 2. remove_kmmio_fault_pages()
492 * Remove the pages from kmmio_page_table.
493 * 3. rcu_free_kmmio_fault_pages()
494 * Actually free the kmmio_fault_page structs as with RCU.
496 void unregister_kmmio_probe(struct kmmio_probe *p)
499 unsigned long size = 0;
500 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
501 struct kmmio_fault_page *release_list = NULL;
502 struct kmmio_delayed_release *drelease;
504 spin_lock_irqsave(&kmmio_lock, flags);
505 while (size < size_lim) {
506 release_kmmio_fault_page(p->addr + size, &release_list);
509 list_del_rcu(&p->list);
511 spin_unlock_irqrestore(&kmmio_lock, flags);
513 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
515 pr_crit("leaking kmmio_fault_page objects.\n");
518 drelease->release_list = release_list;
521 * This is not really RCU here. We have just disarmed a set of
522 * pages so that they cannot trigger page faults anymore. However,
523 * we cannot remove the pages from kmmio_page_table,
524 * because a probe hit might be in flight on another CPU. The
525 * pages are collected into a list, and they will be removed from
526 * kmmio_page_table when it is certain that no probe hit related to
527 * these pages can be in flight. RCU grace period sounds like a
530 * If we removed the pages too early, kmmio page fault handler might
531 * not find the respective kmmio_fault_page and determine it's not
532 * a kmmio fault, when it actually is. This would lead to madness.
534 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
536 EXPORT_SYMBOL(unregister_kmmio_probe);
539 kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
541 struct die_args *arg = args;
542 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
544 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
545 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
547 * Reset the BS bit in dr6 (pointed by args->err) to
548 * denote completion of processing
557 static struct notifier_block nb_die = {
558 .notifier_call = kmmio_die_notifier
565 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
566 INIT_LIST_HEAD(&kmmio_page_table[i]);
568 return register_die_notifier(&nb_die);
571 void kmmio_cleanup(void)
575 unregister_die_notifier(&nb_die);
576 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
577 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
578 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");