]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/events/uprobes.c
4b8ac5f13320ddc26b9eff7b733d8a8eb46b040f
[karo-tx-linux.git] / kernel / events / uprobes.c
1 /*
2  * User-space Probes (UProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2008-2012
19  * Authors:
20  *      Srikar Dronamraju
21  *      Jim Keniston
22  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23  */
24
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>      /* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/export.h>
31 #include <linux/rmap.h>         /* anon_vma_prepare */
32 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
33 #include <linux/swap.h>         /* try_to_free_swap */
34 #include <linux/ptrace.h>       /* user_enable_single_step */
35 #include <linux/kdebug.h>       /* notifier mechanism */
36 #include "../../mm/internal.h"  /* munlock_vma_page */
37 #include <linux/percpu-rwsem.h>
38 #include <linux/task_work.h>
39 #include <linux/shmem_fs.h>
40
41 #include <linux/uprobes.h>
42
43 #define UINSNS_PER_PAGE                 (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
44 #define MAX_UPROBE_XOL_SLOTS            UINSNS_PER_PAGE
45
46 static struct rb_root uprobes_tree = RB_ROOT;
47 /*
48  * allows us to skip the uprobe_mmap if there are no uprobe events active
49  * at this time.  Probably a fine grained per inode count is better?
50  */
51 #define no_uprobe_events()      RB_EMPTY_ROOT(&uprobes_tree)
52
53 static DEFINE_SPINLOCK(uprobes_treelock);       /* serialize rbtree access */
54
55 #define UPROBES_HASH_SZ 13
56 /* serialize uprobe->pending_list */
57 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
58 #define uprobes_mmap_hash(v)    (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
59
60 static struct percpu_rw_semaphore dup_mmap_sem;
61
62 /* Have a copy of original instruction */
63 #define UPROBE_COPY_INSN        0
64
65 struct uprobe {
66         struct rb_node          rb_node;        /* node in the rb tree */
67         atomic_t                ref;
68         struct rw_semaphore     register_rwsem;
69         struct rw_semaphore     consumer_rwsem;
70         struct list_head        pending_list;
71         struct uprobe_consumer  *consumers;
72         struct inode            *inode;         /* Also hold a ref to inode */
73         loff_t                  offset;
74         unsigned long           flags;
75
76         /*
77          * The generic code assumes that it has two members of unknown type
78          * owned by the arch-specific code:
79          *
80          *      insn -  copy_insn() saves the original instruction here for
81          *              arch_uprobe_analyze_insn().
82          *
83          *      ixol -  potentially modified instruction to execute out of
84          *              line, copied to xol_area by xol_get_insn_slot().
85          */
86         struct arch_uprobe      arch;
87 };
88
89 /*
90  * Execute out of line area: anonymous executable mapping installed
91  * by the probed task to execute the copy of the original instruction
92  * mangled by set_swbp().
93  *
94  * On a breakpoint hit, thread contests for a slot.  It frees the
95  * slot after singlestep. Currently a fixed number of slots are
96  * allocated.
97  */
98 struct xol_area {
99         wait_queue_head_t       wq;             /* if all slots are busy */
100         atomic_t                slot_count;     /* number of in-use slots */
101         unsigned long           *bitmap;        /* 0 = free slot */
102         struct page             *pages[2];
103
104         /*
105          * We keep the vma's vm_start rather than a pointer to the vma
106          * itself.  The probed process or a naughty kernel module could make
107          * the vma go away, and we must handle that reasonably gracefully.
108          */
109         unsigned long           vaddr;          /* Page(s) of instruction slots */
110 };
111
112 /*
113  * valid_vma: Verify if the specified vma is an executable vma
114  * Relax restrictions while unregistering: vm_flags might have
115  * changed after breakpoint was inserted.
116  *      - is_register: indicates if we are in register context.
117  *      - Return 1 if the specified virtual address is in an
118  *        executable vma.
119  */
120 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
121 {
122         vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
123
124         if (is_register)
125                 flags |= VM_WRITE;
126
127         return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
128 }
129
130 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
131 {
132         return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
133 }
134
135 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
136 {
137         return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
138 }
139
140 /**
141  * __replace_page - replace page in vma by new page.
142  * based on replace_page in mm/ksm.c
143  *
144  * @vma:      vma that holds the pte pointing to page
145  * @addr:     address the old @page is mapped at
146  * @page:     the cowed page we are replacing by kpage
147  * @kpage:    the modified page we replace page by
148  *
149  * Returns 0 on success, -EFAULT on failure.
150  */
151 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
152                                 struct page *page, struct page *kpage)
153 {
154         struct mm_struct *mm = vma->vm_mm;
155         spinlock_t *ptl;
156         pte_t *ptep;
157         int err;
158         /* For mmu_notifiers */
159         const unsigned long mmun_start = addr;
160         const unsigned long mmun_end   = addr + PAGE_SIZE;
161         struct mem_cgroup *memcg;
162
163         err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg);
164         if (err)
165                 return err;
166
167         /* For try_to_free_swap() and munlock_vma_page() below */
168         lock_page(page);
169
170         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
171         err = -EAGAIN;
172         ptep = page_check_address(page, mm, addr, &ptl, 0);
173         if (!ptep)
174                 goto unlock;
175
176         get_page(kpage);
177         page_add_new_anon_rmap(kpage, vma, addr);
178         mem_cgroup_commit_charge(kpage, memcg, false);
179         lru_cache_add_active_or_unevictable(kpage, vma);
180
181         if (!PageAnon(page)) {
182                 dec_mm_counter(mm, MM_FILEPAGES);
183                 inc_mm_counter(mm, MM_ANONPAGES);
184         }
185
186         flush_cache_page(vma, addr, pte_pfn(*ptep));
187         ptep_clear_flush_notify(vma, addr, ptep);
188         set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
189
190         page_remove_rmap(page);
191         if (!page_mapped(page))
192                 try_to_free_swap(page);
193         pte_unmap_unlock(ptep, ptl);
194
195         if (vma->vm_flags & VM_LOCKED)
196                 munlock_vma_page(page);
197         put_page(page);
198
199         err = 0;
200  unlock:
201         mem_cgroup_cancel_charge(kpage, memcg);
202         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
203         unlock_page(page);
204         return err;
205 }
206
207 /**
208  * is_swbp_insn - check if instruction is breakpoint instruction.
209  * @insn: instruction to be checked.
210  * Default implementation of is_swbp_insn
211  * Returns true if @insn is a breakpoint instruction.
212  */
213 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
214 {
215         return *insn == UPROBE_SWBP_INSN;
216 }
217
218 /**
219  * is_trap_insn - check if instruction is breakpoint instruction.
220  * @insn: instruction to be checked.
221  * Default implementation of is_trap_insn
222  * Returns true if @insn is a breakpoint instruction.
223  *
224  * This function is needed for the case where an architecture has multiple
225  * trap instructions (like powerpc).
226  */
227 bool __weak is_trap_insn(uprobe_opcode_t *insn)
228 {
229         return is_swbp_insn(insn);
230 }
231
232 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
233 {
234         void *kaddr = kmap_atomic(page);
235         memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
236         kunmap_atomic(kaddr);
237 }
238
239 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
240 {
241         void *kaddr = kmap_atomic(page);
242         memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
243         kunmap_atomic(kaddr);
244 }
245
246 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
247 {
248         uprobe_opcode_t old_opcode;
249         bool is_swbp;
250
251         /*
252          * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
253          * We do not check if it is any other 'trap variant' which could
254          * be conditional trap instruction such as the one powerpc supports.
255          *
256          * The logic is that we do not care if the underlying instruction
257          * is a trap variant; uprobes always wins over any other (gdb)
258          * breakpoint.
259          */
260         copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
261         is_swbp = is_swbp_insn(&old_opcode);
262
263         if (is_swbp_insn(new_opcode)) {
264                 if (is_swbp)            /* register: already installed? */
265                         return 0;
266         } else {
267                 if (!is_swbp)           /* unregister: was it changed by us? */
268                         return 0;
269         }
270
271         return 1;
272 }
273
274 /*
275  * NOTE:
276  * Expect the breakpoint instruction to be the smallest size instruction for
277  * the architecture. If an arch has variable length instruction and the
278  * breakpoint instruction is not of the smallest length instruction
279  * supported by that architecture then we need to modify is_trap_at_addr and
280  * uprobe_write_opcode accordingly. This would never be a problem for archs
281  * that have fixed length instructions.
282  *
283  * uprobe_write_opcode - write the opcode at a given virtual address.
284  * @mm: the probed process address space.
285  * @vaddr: the virtual address to store the opcode.
286  * @opcode: opcode to be written at @vaddr.
287  *
288  * Called with mm->mmap_sem held for write.
289  * Return 0 (success) or a negative errno.
290  */
291 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
292                         uprobe_opcode_t opcode)
293 {
294         struct page *old_page, *new_page;
295         struct vm_area_struct *vma;
296         int ret;
297
298 retry:
299         /* Read the page with vaddr into memory */
300         ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
301         if (ret <= 0)
302                 return ret;
303
304         ret = verify_opcode(old_page, vaddr, &opcode);
305         if (ret <= 0)
306                 goto put_old;
307
308         ret = anon_vma_prepare(vma);
309         if (ret)
310                 goto put_old;
311
312         ret = -ENOMEM;
313         new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
314         if (!new_page)
315                 goto put_old;
316
317         __SetPageUptodate(new_page);
318         copy_highpage(new_page, old_page);
319         copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
320
321         ret = __replace_page(vma, vaddr, old_page, new_page);
322         page_cache_release(new_page);
323 put_old:
324         put_page(old_page);
325
326         if (unlikely(ret == -EAGAIN))
327                 goto retry;
328         return ret;
329 }
330
331 /**
332  * set_swbp - store breakpoint at a given address.
333  * @auprobe: arch specific probepoint information.
334  * @mm: the probed process address space.
335  * @vaddr: the virtual address to insert the opcode.
336  *
337  * For mm @mm, store the breakpoint instruction at @vaddr.
338  * Return 0 (success) or a negative errno.
339  */
340 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
341 {
342         return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
343 }
344
345 /**
346  * set_orig_insn - Restore the original instruction.
347  * @mm: the probed process address space.
348  * @auprobe: arch specific probepoint information.
349  * @vaddr: the virtual address to insert the opcode.
350  *
351  * For mm @mm, restore the original opcode (opcode) at @vaddr.
352  * Return 0 (success) or a negative errno.
353  */
354 int __weak
355 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
356 {
357         return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
358 }
359
360 static struct uprobe *get_uprobe(struct uprobe *uprobe)
361 {
362         atomic_inc(&uprobe->ref);
363         return uprobe;
364 }
365
366 static void put_uprobe(struct uprobe *uprobe)
367 {
368         if (atomic_dec_and_test(&uprobe->ref))
369                 kfree(uprobe);
370 }
371
372 static int match_uprobe(struct uprobe *l, struct uprobe *r)
373 {
374         if (l->inode < r->inode)
375                 return -1;
376
377         if (l->inode > r->inode)
378                 return 1;
379
380         if (l->offset < r->offset)
381                 return -1;
382
383         if (l->offset > r->offset)
384                 return 1;
385
386         return 0;
387 }
388
389 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
390 {
391         struct uprobe u = { .inode = inode, .offset = offset };
392         struct rb_node *n = uprobes_tree.rb_node;
393         struct uprobe *uprobe;
394         int match;
395
396         while (n) {
397                 uprobe = rb_entry(n, struct uprobe, rb_node);
398                 match = match_uprobe(&u, uprobe);
399                 if (!match)
400                         return get_uprobe(uprobe);
401
402                 if (match < 0)
403                         n = n->rb_left;
404                 else
405                         n = n->rb_right;
406         }
407         return NULL;
408 }
409
410 /*
411  * Find a uprobe corresponding to a given inode:offset
412  * Acquires uprobes_treelock
413  */
414 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
415 {
416         struct uprobe *uprobe;
417
418         spin_lock(&uprobes_treelock);
419         uprobe = __find_uprobe(inode, offset);
420         spin_unlock(&uprobes_treelock);
421
422         return uprobe;
423 }
424
425 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
426 {
427         struct rb_node **p = &uprobes_tree.rb_node;
428         struct rb_node *parent = NULL;
429         struct uprobe *u;
430         int match;
431
432         while (*p) {
433                 parent = *p;
434                 u = rb_entry(parent, struct uprobe, rb_node);
435                 match = match_uprobe(uprobe, u);
436                 if (!match)
437                         return get_uprobe(u);
438
439                 if (match < 0)
440                         p = &parent->rb_left;
441                 else
442                         p = &parent->rb_right;
443
444         }
445
446         u = NULL;
447         rb_link_node(&uprobe->rb_node, parent, p);
448         rb_insert_color(&uprobe->rb_node, &uprobes_tree);
449         /* get access + creation ref */
450         atomic_set(&uprobe->ref, 2);
451
452         return u;
453 }
454
455 /*
456  * Acquire uprobes_treelock.
457  * Matching uprobe already exists in rbtree;
458  *      increment (access refcount) and return the matching uprobe.
459  *
460  * No matching uprobe; insert the uprobe in rb_tree;
461  *      get a double refcount (access + creation) and return NULL.
462  */
463 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
464 {
465         struct uprobe *u;
466
467         spin_lock(&uprobes_treelock);
468         u = __insert_uprobe(uprobe);
469         spin_unlock(&uprobes_treelock);
470
471         return u;
472 }
473
474 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
475 {
476         struct uprobe *uprobe, *cur_uprobe;
477
478         uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
479         if (!uprobe)
480                 return NULL;
481
482         uprobe->inode = igrab(inode);
483         uprobe->offset = offset;
484         init_rwsem(&uprobe->register_rwsem);
485         init_rwsem(&uprobe->consumer_rwsem);
486
487         /* add to uprobes_tree, sorted on inode:offset */
488         cur_uprobe = insert_uprobe(uprobe);
489         /* a uprobe exists for this inode:offset combination */
490         if (cur_uprobe) {
491                 kfree(uprobe);
492                 uprobe = cur_uprobe;
493                 iput(inode);
494         }
495
496         return uprobe;
497 }
498
499 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
500 {
501         down_write(&uprobe->consumer_rwsem);
502         uc->next = uprobe->consumers;
503         uprobe->consumers = uc;
504         up_write(&uprobe->consumer_rwsem);
505 }
506
507 /*
508  * For uprobe @uprobe, delete the consumer @uc.
509  * Return true if the @uc is deleted successfully
510  * or return false.
511  */
512 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
513 {
514         struct uprobe_consumer **con;
515         bool ret = false;
516
517         down_write(&uprobe->consumer_rwsem);
518         for (con = &uprobe->consumers; *con; con = &(*con)->next) {
519                 if (*con == uc) {
520                         *con = uc->next;
521                         ret = true;
522                         break;
523                 }
524         }
525         up_write(&uprobe->consumer_rwsem);
526
527         return ret;
528 }
529
530 static int __copy_insn(struct address_space *mapping, struct file *filp,
531                         void *insn, int nbytes, loff_t offset)
532 {
533         struct page *page;
534         /*
535          * Ensure that the page that has the original instruction is populated
536          * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
537          * see uprobe_register().
538          */
539         if (mapping->a_ops->readpage)
540                 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
541         else
542                 page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT);
543         if (IS_ERR(page))
544                 return PTR_ERR(page);
545
546         copy_from_page(page, offset, insn, nbytes);
547         page_cache_release(page);
548
549         return 0;
550 }
551
552 static int copy_insn(struct uprobe *uprobe, struct file *filp)
553 {
554         struct address_space *mapping = uprobe->inode->i_mapping;
555         loff_t offs = uprobe->offset;
556         void *insn = &uprobe->arch.insn;
557         int size = sizeof(uprobe->arch.insn);
558         int len, err = -EIO;
559
560         /* Copy only available bytes, -EIO if nothing was read */
561         do {
562                 if (offs >= i_size_read(uprobe->inode))
563                         break;
564
565                 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
566                 err = __copy_insn(mapping, filp, insn, len, offs);
567                 if (err)
568                         break;
569
570                 insn += len;
571                 offs += len;
572                 size -= len;
573         } while (size);
574
575         return err;
576 }
577
578 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
579                                 struct mm_struct *mm, unsigned long vaddr)
580 {
581         int ret = 0;
582
583         if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
584                 return ret;
585
586         /* TODO: move this into _register, until then we abuse this sem. */
587         down_write(&uprobe->consumer_rwsem);
588         if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
589                 goto out;
590
591         ret = copy_insn(uprobe, file);
592         if (ret)
593                 goto out;
594
595         ret = -ENOTSUPP;
596         if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
597                 goto out;
598
599         ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
600         if (ret)
601                 goto out;
602
603         /* uprobe_write_opcode() assumes we don't cross page boundary */
604         BUG_ON((uprobe->offset & ~PAGE_MASK) +
605                         UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
606
607         smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
608         set_bit(UPROBE_COPY_INSN, &uprobe->flags);
609
610  out:
611         up_write(&uprobe->consumer_rwsem);
612
613         return ret;
614 }
615
616 static inline bool consumer_filter(struct uprobe_consumer *uc,
617                                    enum uprobe_filter_ctx ctx, struct mm_struct *mm)
618 {
619         return !uc->filter || uc->filter(uc, ctx, mm);
620 }
621
622 static bool filter_chain(struct uprobe *uprobe,
623                          enum uprobe_filter_ctx ctx, struct mm_struct *mm)
624 {
625         struct uprobe_consumer *uc;
626         bool ret = false;
627
628         down_read(&uprobe->consumer_rwsem);
629         for (uc = uprobe->consumers; uc; uc = uc->next) {
630                 ret = consumer_filter(uc, ctx, mm);
631                 if (ret)
632                         break;
633         }
634         up_read(&uprobe->consumer_rwsem);
635
636         return ret;
637 }
638
639 static int
640 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
641                         struct vm_area_struct *vma, unsigned long vaddr)
642 {
643         bool first_uprobe;
644         int ret;
645
646         ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
647         if (ret)
648                 return ret;
649
650         /*
651          * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
652          * the task can hit this breakpoint right after __replace_page().
653          */
654         first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
655         if (first_uprobe)
656                 set_bit(MMF_HAS_UPROBES, &mm->flags);
657
658         ret = set_swbp(&uprobe->arch, mm, vaddr);
659         if (!ret)
660                 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
661         else if (first_uprobe)
662                 clear_bit(MMF_HAS_UPROBES, &mm->flags);
663
664         return ret;
665 }
666
667 static int
668 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
669 {
670         set_bit(MMF_RECALC_UPROBES, &mm->flags);
671         return set_orig_insn(&uprobe->arch, mm, vaddr);
672 }
673
674 static inline bool uprobe_is_active(struct uprobe *uprobe)
675 {
676         return !RB_EMPTY_NODE(&uprobe->rb_node);
677 }
678 /*
679  * There could be threads that have already hit the breakpoint. They
680  * will recheck the current insn and restart if find_uprobe() fails.
681  * See find_active_uprobe().
682  */
683 static void delete_uprobe(struct uprobe *uprobe)
684 {
685         if (WARN_ON(!uprobe_is_active(uprobe)))
686                 return;
687
688         spin_lock(&uprobes_treelock);
689         rb_erase(&uprobe->rb_node, &uprobes_tree);
690         spin_unlock(&uprobes_treelock);
691         RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
692         iput(uprobe->inode);
693         put_uprobe(uprobe);
694 }
695
696 struct map_info {
697         struct map_info *next;
698         struct mm_struct *mm;
699         unsigned long vaddr;
700 };
701
702 static inline struct map_info *free_map_info(struct map_info *info)
703 {
704         struct map_info *next = info->next;
705         kfree(info);
706         return next;
707 }
708
709 static struct map_info *
710 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
711 {
712         unsigned long pgoff = offset >> PAGE_SHIFT;
713         struct vm_area_struct *vma;
714         struct map_info *curr = NULL;
715         struct map_info *prev = NULL;
716         struct map_info *info;
717         int more = 0;
718
719  again:
720         i_mmap_lock_read(mapping);
721         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
722                 if (!valid_vma(vma, is_register))
723                         continue;
724
725                 if (!prev && !more) {
726                         /*
727                          * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
728                          * reclaim. This is optimistic, no harm done if it fails.
729                          */
730                         prev = kmalloc(sizeof(struct map_info),
731                                         GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
732                         if (prev)
733                                 prev->next = NULL;
734                 }
735                 if (!prev) {
736                         more++;
737                         continue;
738                 }
739
740                 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
741                         continue;
742
743                 info = prev;
744                 prev = prev->next;
745                 info->next = curr;
746                 curr = info;
747
748                 info->mm = vma->vm_mm;
749                 info->vaddr = offset_to_vaddr(vma, offset);
750         }
751         i_mmap_unlock_read(mapping);
752
753         if (!more)
754                 goto out;
755
756         prev = curr;
757         while (curr) {
758                 mmput(curr->mm);
759                 curr = curr->next;
760         }
761
762         do {
763                 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
764                 if (!info) {
765                         curr = ERR_PTR(-ENOMEM);
766                         goto out;
767                 }
768                 info->next = prev;
769                 prev = info;
770         } while (--more);
771
772         goto again;
773  out:
774         while (prev)
775                 prev = free_map_info(prev);
776         return curr;
777 }
778
779 static int
780 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
781 {
782         bool is_register = !!new;
783         struct map_info *info;
784         int err = 0;
785
786         percpu_down_write(&dup_mmap_sem);
787         info = build_map_info(uprobe->inode->i_mapping,
788                                         uprobe->offset, is_register);
789         if (IS_ERR(info)) {
790                 err = PTR_ERR(info);
791                 goto out;
792         }
793
794         while (info) {
795                 struct mm_struct *mm = info->mm;
796                 struct vm_area_struct *vma;
797
798                 if (err && is_register)
799                         goto free;
800
801                 down_write(&mm->mmap_sem);
802                 vma = find_vma(mm, info->vaddr);
803                 if (!vma || !valid_vma(vma, is_register) ||
804                     file_inode(vma->vm_file) != uprobe->inode)
805                         goto unlock;
806
807                 if (vma->vm_start > info->vaddr ||
808                     vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
809                         goto unlock;
810
811                 if (is_register) {
812                         /* consult only the "caller", new consumer. */
813                         if (consumer_filter(new,
814                                         UPROBE_FILTER_REGISTER, mm))
815                                 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
816                 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
817                         if (!filter_chain(uprobe,
818                                         UPROBE_FILTER_UNREGISTER, mm))
819                                 err |= remove_breakpoint(uprobe, mm, info->vaddr);
820                 }
821
822  unlock:
823                 up_write(&mm->mmap_sem);
824  free:
825                 mmput(mm);
826                 info = free_map_info(info);
827         }
828  out:
829         percpu_up_write(&dup_mmap_sem);
830         return err;
831 }
832
833 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
834 {
835         consumer_add(uprobe, uc);
836         return register_for_each_vma(uprobe, uc);
837 }
838
839 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
840 {
841         int err;
842
843         if (WARN_ON(!consumer_del(uprobe, uc)))
844                 return;
845
846         err = register_for_each_vma(uprobe, NULL);
847         /* TODO : cant unregister? schedule a worker thread */
848         if (!uprobe->consumers && !err)
849                 delete_uprobe(uprobe);
850 }
851
852 /*
853  * uprobe_register - register a probe
854  * @inode: the file in which the probe has to be placed.
855  * @offset: offset from the start of the file.
856  * @uc: information on howto handle the probe..
857  *
858  * Apart from the access refcount, uprobe_register() takes a creation
859  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
860  * inserted into the rbtree (i.e first consumer for a @inode:@offset
861  * tuple).  Creation refcount stops uprobe_unregister from freeing the
862  * @uprobe even before the register operation is complete. Creation
863  * refcount is released when the last @uc for the @uprobe
864  * unregisters.
865  *
866  * Return errno if it cannot successully install probes
867  * else return 0 (success)
868  */
869 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
870 {
871         struct uprobe *uprobe;
872         int ret;
873
874         /* Uprobe must have at least one set consumer */
875         if (!uc->handler && !uc->ret_handler)
876                 return -EINVAL;
877
878         /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
879         if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
880                 return -EIO;
881         /* Racy, just to catch the obvious mistakes */
882         if (offset > i_size_read(inode))
883                 return -EINVAL;
884
885  retry:
886         uprobe = alloc_uprobe(inode, offset);
887         if (!uprobe)
888                 return -ENOMEM;
889         /*
890          * We can race with uprobe_unregister()->delete_uprobe().
891          * Check uprobe_is_active() and retry if it is false.
892          */
893         down_write(&uprobe->register_rwsem);
894         ret = -EAGAIN;
895         if (likely(uprobe_is_active(uprobe))) {
896                 ret = __uprobe_register(uprobe, uc);
897                 if (ret)
898                         __uprobe_unregister(uprobe, uc);
899         }
900         up_write(&uprobe->register_rwsem);
901         put_uprobe(uprobe);
902
903         if (unlikely(ret == -EAGAIN))
904                 goto retry;
905         return ret;
906 }
907 EXPORT_SYMBOL_GPL(uprobe_register);
908
909 /*
910  * uprobe_apply - unregister a already registered probe.
911  * @inode: the file in which the probe has to be removed.
912  * @offset: offset from the start of the file.
913  * @uc: consumer which wants to add more or remove some breakpoints
914  * @add: add or remove the breakpoints
915  */
916 int uprobe_apply(struct inode *inode, loff_t offset,
917                         struct uprobe_consumer *uc, bool add)
918 {
919         struct uprobe *uprobe;
920         struct uprobe_consumer *con;
921         int ret = -ENOENT;
922
923         uprobe = find_uprobe(inode, offset);
924         if (WARN_ON(!uprobe))
925                 return ret;
926
927         down_write(&uprobe->register_rwsem);
928         for (con = uprobe->consumers; con && con != uc ; con = con->next)
929                 ;
930         if (con)
931                 ret = register_for_each_vma(uprobe, add ? uc : NULL);
932         up_write(&uprobe->register_rwsem);
933         put_uprobe(uprobe);
934
935         return ret;
936 }
937
938 /*
939  * uprobe_unregister - unregister a already registered probe.
940  * @inode: the file in which the probe has to be removed.
941  * @offset: offset from the start of the file.
942  * @uc: identify which probe if multiple probes are colocated.
943  */
944 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
945 {
946         struct uprobe *uprobe;
947
948         uprobe = find_uprobe(inode, offset);
949         if (WARN_ON(!uprobe))
950                 return;
951
952         down_write(&uprobe->register_rwsem);
953         __uprobe_unregister(uprobe, uc);
954         up_write(&uprobe->register_rwsem);
955         put_uprobe(uprobe);
956 }
957 EXPORT_SYMBOL_GPL(uprobe_unregister);
958
959 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
960 {
961         struct vm_area_struct *vma;
962         int err = 0;
963
964         down_read(&mm->mmap_sem);
965         for (vma = mm->mmap; vma; vma = vma->vm_next) {
966                 unsigned long vaddr;
967                 loff_t offset;
968
969                 if (!valid_vma(vma, false) ||
970                     file_inode(vma->vm_file) != uprobe->inode)
971                         continue;
972
973                 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
974                 if (uprobe->offset <  offset ||
975                     uprobe->offset >= offset + vma->vm_end - vma->vm_start)
976                         continue;
977
978                 vaddr = offset_to_vaddr(vma, uprobe->offset);
979                 err |= remove_breakpoint(uprobe, mm, vaddr);
980         }
981         up_read(&mm->mmap_sem);
982
983         return err;
984 }
985
986 static struct rb_node *
987 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
988 {
989         struct rb_node *n = uprobes_tree.rb_node;
990
991         while (n) {
992                 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
993
994                 if (inode < u->inode) {
995                         n = n->rb_left;
996                 } else if (inode > u->inode) {
997                         n = n->rb_right;
998                 } else {
999                         if (max < u->offset)
1000                                 n = n->rb_left;
1001                         else if (min > u->offset)
1002                                 n = n->rb_right;
1003                         else
1004                                 break;
1005                 }
1006         }
1007
1008         return n;
1009 }
1010
1011 /*
1012  * For a given range in vma, build a list of probes that need to be inserted.
1013  */
1014 static void build_probe_list(struct inode *inode,
1015                                 struct vm_area_struct *vma,
1016                                 unsigned long start, unsigned long end,
1017                                 struct list_head *head)
1018 {
1019         loff_t min, max;
1020         struct rb_node *n, *t;
1021         struct uprobe *u;
1022
1023         INIT_LIST_HEAD(head);
1024         min = vaddr_to_offset(vma, start);
1025         max = min + (end - start) - 1;
1026
1027         spin_lock(&uprobes_treelock);
1028         n = find_node_in_range(inode, min, max);
1029         if (n) {
1030                 for (t = n; t; t = rb_prev(t)) {
1031                         u = rb_entry(t, struct uprobe, rb_node);
1032                         if (u->inode != inode || u->offset < min)
1033                                 break;
1034                         list_add(&u->pending_list, head);
1035                         get_uprobe(u);
1036                 }
1037                 for (t = n; (t = rb_next(t)); ) {
1038                         u = rb_entry(t, struct uprobe, rb_node);
1039                         if (u->inode != inode || u->offset > max)
1040                                 break;
1041                         list_add(&u->pending_list, head);
1042                         get_uprobe(u);
1043                 }
1044         }
1045         spin_unlock(&uprobes_treelock);
1046 }
1047
1048 /*
1049  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1050  *
1051  * Currently we ignore all errors and always return 0, the callers
1052  * can't handle the failure anyway.
1053  */
1054 int uprobe_mmap(struct vm_area_struct *vma)
1055 {
1056         struct list_head tmp_list;
1057         struct uprobe *uprobe, *u;
1058         struct inode *inode;
1059
1060         if (no_uprobe_events() || !valid_vma(vma, true))
1061                 return 0;
1062
1063         inode = file_inode(vma->vm_file);
1064         if (!inode)
1065                 return 0;
1066
1067         mutex_lock(uprobes_mmap_hash(inode));
1068         build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1069         /*
1070          * We can race with uprobe_unregister(), this uprobe can be already
1071          * removed. But in this case filter_chain() must return false, all
1072          * consumers have gone away.
1073          */
1074         list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1075                 if (!fatal_signal_pending(current) &&
1076                     filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1077                         unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1078                         install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1079                 }
1080                 put_uprobe(uprobe);
1081         }
1082         mutex_unlock(uprobes_mmap_hash(inode));
1083
1084         return 0;
1085 }
1086
1087 static bool
1088 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1089 {
1090         loff_t min, max;
1091         struct inode *inode;
1092         struct rb_node *n;
1093
1094         inode = file_inode(vma->vm_file);
1095
1096         min = vaddr_to_offset(vma, start);
1097         max = min + (end - start) - 1;
1098
1099         spin_lock(&uprobes_treelock);
1100         n = find_node_in_range(inode, min, max);
1101         spin_unlock(&uprobes_treelock);
1102
1103         return !!n;
1104 }
1105
1106 /*
1107  * Called in context of a munmap of a vma.
1108  */
1109 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1110 {
1111         if (no_uprobe_events() || !valid_vma(vma, false))
1112                 return;
1113
1114         if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1115                 return;
1116
1117         if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1118              test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1119                 return;
1120
1121         if (vma_has_uprobes(vma, start, end))
1122                 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1123 }
1124
1125 /* Slot allocation for XOL */
1126 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1127 {
1128         int ret = -EALREADY;
1129
1130         down_write(&mm->mmap_sem);
1131         if (mm->uprobes_state.xol_area)
1132                 goto fail;
1133
1134         if (!area->vaddr) {
1135                 /* Try to map as high as possible, this is only a hint. */
1136                 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1137                                                 PAGE_SIZE, 0, 0);
1138                 if (area->vaddr & ~PAGE_MASK) {
1139                         ret = area->vaddr;
1140                         goto fail;
1141                 }
1142         }
1143
1144         ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1145                                 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, area->pages);
1146         if (ret)
1147                 goto fail;
1148
1149         smp_wmb();      /* pairs with get_xol_area() */
1150         mm->uprobes_state.xol_area = area;
1151  fail:
1152         up_write(&mm->mmap_sem);
1153
1154         return ret;
1155 }
1156
1157 static struct xol_area *__create_xol_area(unsigned long vaddr)
1158 {
1159         struct mm_struct *mm = current->mm;
1160         uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1161         struct xol_area *area;
1162
1163         area = kmalloc(sizeof(*area), GFP_KERNEL);
1164         if (unlikely(!area))
1165                 goto out;
1166
1167         area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1168         if (!area->bitmap)
1169                 goto free_area;
1170
1171         area->pages[0] = alloc_page(GFP_HIGHUSER);
1172         if (!area->pages[0])
1173                 goto free_bitmap;
1174         area->pages[1] = NULL;
1175
1176         area->vaddr = vaddr;
1177         init_waitqueue_head(&area->wq);
1178         /* Reserve the 1st slot for get_trampoline_vaddr() */
1179         set_bit(0, area->bitmap);
1180         atomic_set(&area->slot_count, 1);
1181         copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1182
1183         if (!xol_add_vma(mm, area))
1184                 return area;
1185
1186         __free_page(area->pages[0]);
1187  free_bitmap:
1188         kfree(area->bitmap);
1189  free_area:
1190         kfree(area);
1191  out:
1192         return NULL;
1193 }
1194
1195 /*
1196  * get_xol_area - Allocate process's xol_area if necessary.
1197  * This area will be used for storing instructions for execution out of line.
1198  *
1199  * Returns the allocated area or NULL.
1200  */
1201 static struct xol_area *get_xol_area(void)
1202 {
1203         struct mm_struct *mm = current->mm;
1204         struct xol_area *area;
1205
1206         if (!mm->uprobes_state.xol_area)
1207                 __create_xol_area(0);
1208
1209         area = mm->uprobes_state.xol_area;
1210         smp_read_barrier_depends();     /* pairs with wmb in xol_add_vma() */
1211         return area;
1212 }
1213
1214 /*
1215  * uprobe_clear_state - Free the area allocated for slots.
1216  */
1217 void uprobe_clear_state(struct mm_struct *mm)
1218 {
1219         struct xol_area *area = mm->uprobes_state.xol_area;
1220
1221         if (!area)
1222                 return;
1223
1224         put_page(area->pages[0]);
1225         kfree(area->bitmap);
1226         kfree(area);
1227 }
1228
1229 void uprobe_start_dup_mmap(void)
1230 {
1231         percpu_down_read(&dup_mmap_sem);
1232 }
1233
1234 void uprobe_end_dup_mmap(void)
1235 {
1236         percpu_up_read(&dup_mmap_sem);
1237 }
1238
1239 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1240 {
1241         newmm->uprobes_state.xol_area = NULL;
1242
1243         if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1244                 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1245                 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1246                 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1247         }
1248 }
1249
1250 /*
1251  *  - search for a free slot.
1252  */
1253 static unsigned long xol_take_insn_slot(struct xol_area *area)
1254 {
1255         unsigned long slot_addr;
1256         int slot_nr;
1257
1258         do {
1259                 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1260                 if (slot_nr < UINSNS_PER_PAGE) {
1261                         if (!test_and_set_bit(slot_nr, area->bitmap))
1262                                 break;
1263
1264                         slot_nr = UINSNS_PER_PAGE;
1265                         continue;
1266                 }
1267                 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1268         } while (slot_nr >= UINSNS_PER_PAGE);
1269
1270         slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1271         atomic_inc(&area->slot_count);
1272
1273         return slot_addr;
1274 }
1275
1276 /*
1277  * xol_get_insn_slot - allocate a slot for xol.
1278  * Returns the allocated slot address or 0.
1279  */
1280 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1281 {
1282         struct xol_area *area;
1283         unsigned long xol_vaddr;
1284
1285         area = get_xol_area();
1286         if (!area)
1287                 return 0;
1288
1289         xol_vaddr = xol_take_insn_slot(area);
1290         if (unlikely(!xol_vaddr))
1291                 return 0;
1292
1293         arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1294                               &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1295
1296         return xol_vaddr;
1297 }
1298
1299 /*
1300  * xol_free_insn_slot - If slot was earlier allocated by
1301  * @xol_get_insn_slot(), make the slot available for
1302  * subsequent requests.
1303  */
1304 static void xol_free_insn_slot(struct task_struct *tsk)
1305 {
1306         struct xol_area *area;
1307         unsigned long vma_end;
1308         unsigned long slot_addr;
1309
1310         if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1311                 return;
1312
1313         slot_addr = tsk->utask->xol_vaddr;
1314         if (unlikely(!slot_addr))
1315                 return;
1316
1317         area = tsk->mm->uprobes_state.xol_area;
1318         vma_end = area->vaddr + PAGE_SIZE;
1319         if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1320                 unsigned long offset;
1321                 int slot_nr;
1322
1323                 offset = slot_addr - area->vaddr;
1324                 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1325                 if (slot_nr >= UINSNS_PER_PAGE)
1326                         return;
1327
1328                 clear_bit(slot_nr, area->bitmap);
1329                 atomic_dec(&area->slot_count);
1330                 if (waitqueue_active(&area->wq))
1331                         wake_up(&area->wq);
1332
1333                 tsk->utask->xol_vaddr = 0;
1334         }
1335 }
1336
1337 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1338                                   void *src, unsigned long len)
1339 {
1340         /* Initialize the slot */
1341         copy_to_page(page, vaddr, src, len);
1342
1343         /*
1344          * We probably need flush_icache_user_range() but it needs vma.
1345          * This should work on most of architectures by default. If
1346          * architecture needs to do something different it can define
1347          * its own version of the function.
1348          */
1349         flush_dcache_page(page);
1350 }
1351
1352 /**
1353  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1354  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1355  * instruction.
1356  * Return the address of the breakpoint instruction.
1357  */
1358 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1359 {
1360         return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1361 }
1362
1363 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1364 {
1365         struct uprobe_task *utask = current->utask;
1366
1367         if (unlikely(utask && utask->active_uprobe))
1368                 return utask->vaddr;
1369
1370         return instruction_pointer(regs);
1371 }
1372
1373 static struct return_instance *free_ret_instance(struct return_instance *ri)
1374 {
1375         struct return_instance *next = ri->next;
1376         put_uprobe(ri->uprobe);
1377         kfree(ri);
1378         return next;
1379 }
1380
1381 /*
1382  * Called with no locks held.
1383  * Called in context of a exiting or a exec-ing thread.
1384  */
1385 void uprobe_free_utask(struct task_struct *t)
1386 {
1387         struct uprobe_task *utask = t->utask;
1388         struct return_instance *ri;
1389
1390         if (!utask)
1391                 return;
1392
1393         if (utask->active_uprobe)
1394                 put_uprobe(utask->active_uprobe);
1395
1396         ri = utask->return_instances;
1397         while (ri)
1398                 ri = free_ret_instance(ri);
1399
1400         xol_free_insn_slot(t);
1401         kfree(utask);
1402         t->utask = NULL;
1403 }
1404
1405 /*
1406  * Allocate a uprobe_task object for the task if if necessary.
1407  * Called when the thread hits a breakpoint.
1408  *
1409  * Returns:
1410  * - pointer to new uprobe_task on success
1411  * - NULL otherwise
1412  */
1413 static struct uprobe_task *get_utask(void)
1414 {
1415         if (!current->utask)
1416                 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1417         return current->utask;
1418 }
1419
1420 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1421 {
1422         struct uprobe_task *n_utask;
1423         struct return_instance **p, *o, *n;
1424
1425         n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1426         if (!n_utask)
1427                 return -ENOMEM;
1428         t->utask = n_utask;
1429
1430         p = &n_utask->return_instances;
1431         for (o = o_utask->return_instances; o; o = o->next) {
1432                 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1433                 if (!n)
1434                         return -ENOMEM;
1435
1436                 *n = *o;
1437                 get_uprobe(n->uprobe);
1438                 n->next = NULL;
1439
1440                 *p = n;
1441                 p = &n->next;
1442                 n_utask->depth++;
1443         }
1444
1445         return 0;
1446 }
1447
1448 static void uprobe_warn(struct task_struct *t, const char *msg)
1449 {
1450         pr_warn("uprobe: %s:%d failed to %s\n",
1451                         current->comm, current->pid, msg);
1452 }
1453
1454 static void dup_xol_work(struct callback_head *work)
1455 {
1456         if (current->flags & PF_EXITING)
1457                 return;
1458
1459         if (!__create_xol_area(current->utask->dup_xol_addr))
1460                 uprobe_warn(current, "dup xol area");
1461 }
1462
1463 /*
1464  * Called in context of a new clone/fork from copy_process.
1465  */
1466 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1467 {
1468         struct uprobe_task *utask = current->utask;
1469         struct mm_struct *mm = current->mm;
1470         struct xol_area *area;
1471
1472         t->utask = NULL;
1473
1474         if (!utask || !utask->return_instances)
1475                 return;
1476
1477         if (mm == t->mm && !(flags & CLONE_VFORK))
1478                 return;
1479
1480         if (dup_utask(t, utask))
1481                 return uprobe_warn(t, "dup ret instances");
1482
1483         /* The task can fork() after dup_xol_work() fails */
1484         area = mm->uprobes_state.xol_area;
1485         if (!area)
1486                 return uprobe_warn(t, "dup xol area");
1487
1488         if (mm == t->mm)
1489                 return;
1490
1491         t->utask->dup_xol_addr = area->vaddr;
1492         init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1493         task_work_add(t, &t->utask->dup_xol_work, true);
1494 }
1495
1496 /*
1497  * Current area->vaddr notion assume the trampoline address is always
1498  * equal area->vaddr.
1499  *
1500  * Returns -1 in case the xol_area is not allocated.
1501  */
1502 static unsigned long get_trampoline_vaddr(void)
1503 {
1504         struct xol_area *area;
1505         unsigned long trampoline_vaddr = -1;
1506
1507         area = current->mm->uprobes_state.xol_area;
1508         smp_read_barrier_depends();
1509         if (area)
1510                 trampoline_vaddr = area->vaddr;
1511
1512         return trampoline_vaddr;
1513 }
1514
1515 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1516                                         struct pt_regs *regs)
1517 {
1518         struct return_instance *ri = utask->return_instances;
1519         enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1520
1521         while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1522                 ri = free_ret_instance(ri);
1523                 utask->depth--;
1524         }
1525         utask->return_instances = ri;
1526 }
1527
1528 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1529 {
1530         struct return_instance *ri;
1531         struct uprobe_task *utask;
1532         unsigned long orig_ret_vaddr, trampoline_vaddr;
1533         bool chained;
1534
1535         if (!get_xol_area())
1536                 return;
1537
1538         utask = get_utask();
1539         if (!utask)
1540                 return;
1541
1542         if (utask->depth >= MAX_URETPROBE_DEPTH) {
1543                 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1544                                 " nestedness limit pid/tgid=%d/%d\n",
1545                                 current->pid, current->tgid);
1546                 return;
1547         }
1548
1549         ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1550         if (!ri)
1551                 return;
1552
1553         trampoline_vaddr = get_trampoline_vaddr();
1554         orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1555         if (orig_ret_vaddr == -1)
1556                 goto fail;
1557
1558         /* drop the entries invalidated by longjmp() */
1559         chained = (orig_ret_vaddr == trampoline_vaddr);
1560         cleanup_return_instances(utask, chained, regs);
1561
1562         /*
1563          * We don't want to keep trampoline address in stack, rather keep the
1564          * original return address of first caller thru all the consequent
1565          * instances. This also makes breakpoint unwrapping easier.
1566          */
1567         if (chained) {
1568                 if (!utask->return_instances) {
1569                         /*
1570                          * This situation is not possible. Likely we have an
1571                          * attack from user-space.
1572                          */
1573                         uprobe_warn(current, "handle tail call");
1574                         goto fail;
1575                 }
1576                 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1577         }
1578
1579         ri->uprobe = get_uprobe(uprobe);
1580         ri->func = instruction_pointer(regs);
1581         ri->stack = user_stack_pointer(regs);
1582         ri->orig_ret_vaddr = orig_ret_vaddr;
1583         ri->chained = chained;
1584
1585         utask->depth++;
1586         ri->next = utask->return_instances;
1587         utask->return_instances = ri;
1588
1589         return;
1590  fail:
1591         kfree(ri);
1592 }
1593
1594 /* Prepare to single-step probed instruction out of line. */
1595 static int
1596 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1597 {
1598         struct uprobe_task *utask;
1599         unsigned long xol_vaddr;
1600         int err;
1601
1602         utask = get_utask();
1603         if (!utask)
1604                 return -ENOMEM;
1605
1606         xol_vaddr = xol_get_insn_slot(uprobe);
1607         if (!xol_vaddr)
1608                 return -ENOMEM;
1609
1610         utask->xol_vaddr = xol_vaddr;
1611         utask->vaddr = bp_vaddr;
1612
1613         err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1614         if (unlikely(err)) {
1615                 xol_free_insn_slot(current);
1616                 return err;
1617         }
1618
1619         utask->active_uprobe = uprobe;
1620         utask->state = UTASK_SSTEP;
1621         return 0;
1622 }
1623
1624 /*
1625  * If we are singlestepping, then ensure this thread is not connected to
1626  * non-fatal signals until completion of singlestep.  When xol insn itself
1627  * triggers the signal,  restart the original insn even if the task is
1628  * already SIGKILL'ed (since coredump should report the correct ip).  This
1629  * is even more important if the task has a handler for SIGSEGV/etc, The
1630  * _same_ instruction should be repeated again after return from the signal
1631  * handler, and SSTEP can never finish in this case.
1632  */
1633 bool uprobe_deny_signal(void)
1634 {
1635         struct task_struct *t = current;
1636         struct uprobe_task *utask = t->utask;
1637
1638         if (likely(!utask || !utask->active_uprobe))
1639                 return false;
1640
1641         WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1642
1643         if (signal_pending(t)) {
1644                 spin_lock_irq(&t->sighand->siglock);
1645                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1646                 spin_unlock_irq(&t->sighand->siglock);
1647
1648                 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1649                         utask->state = UTASK_SSTEP_TRAPPED;
1650                         set_tsk_thread_flag(t, TIF_UPROBE);
1651                 }
1652         }
1653
1654         return true;
1655 }
1656
1657 static void mmf_recalc_uprobes(struct mm_struct *mm)
1658 {
1659         struct vm_area_struct *vma;
1660
1661         for (vma = mm->mmap; vma; vma = vma->vm_next) {
1662                 if (!valid_vma(vma, false))
1663                         continue;
1664                 /*
1665                  * This is not strictly accurate, we can race with
1666                  * uprobe_unregister() and see the already removed
1667                  * uprobe if delete_uprobe() was not yet called.
1668                  * Or this uprobe can be filtered out.
1669                  */
1670                 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1671                         return;
1672         }
1673
1674         clear_bit(MMF_HAS_UPROBES, &mm->flags);
1675 }
1676
1677 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1678 {
1679         struct page *page;
1680         uprobe_opcode_t opcode;
1681         int result;
1682
1683         pagefault_disable();
1684         result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
1685                                                         sizeof(opcode));
1686         pagefault_enable();
1687
1688         if (likely(result == 0))
1689                 goto out;
1690
1691         result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
1692         if (result < 0)
1693                 return result;
1694
1695         copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1696         put_page(page);
1697  out:
1698         /* This needs to return true for any variant of the trap insn */
1699         return is_trap_insn(&opcode);
1700 }
1701
1702 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1703 {
1704         struct mm_struct *mm = current->mm;
1705         struct uprobe *uprobe = NULL;
1706         struct vm_area_struct *vma;
1707
1708         down_read(&mm->mmap_sem);
1709         vma = find_vma(mm, bp_vaddr);
1710         if (vma && vma->vm_start <= bp_vaddr) {
1711                 if (valid_vma(vma, false)) {
1712                         struct inode *inode = file_inode(vma->vm_file);
1713                         loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1714
1715                         uprobe = find_uprobe(inode, offset);
1716                 }
1717
1718                 if (!uprobe)
1719                         *is_swbp = is_trap_at_addr(mm, bp_vaddr);
1720         } else {
1721                 *is_swbp = -EFAULT;
1722         }
1723
1724         if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1725                 mmf_recalc_uprobes(mm);
1726         up_read(&mm->mmap_sem);
1727
1728         return uprobe;
1729 }
1730
1731 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1732 {
1733         struct uprobe_consumer *uc;
1734         int remove = UPROBE_HANDLER_REMOVE;
1735         bool need_prep = false; /* prepare return uprobe, when needed */
1736
1737         down_read(&uprobe->register_rwsem);
1738         for (uc = uprobe->consumers; uc; uc = uc->next) {
1739                 int rc = 0;
1740
1741                 if (uc->handler) {
1742                         rc = uc->handler(uc, regs);
1743                         WARN(rc & ~UPROBE_HANDLER_MASK,
1744                                 "bad rc=0x%x from %pf()\n", rc, uc->handler);
1745                 }
1746
1747                 if (uc->ret_handler)
1748                         need_prep = true;
1749
1750                 remove &= rc;
1751         }
1752
1753         if (need_prep && !remove)
1754                 prepare_uretprobe(uprobe, regs); /* put bp at return */
1755
1756         if (remove && uprobe->consumers) {
1757                 WARN_ON(!uprobe_is_active(uprobe));
1758                 unapply_uprobe(uprobe, current->mm);
1759         }
1760         up_read(&uprobe->register_rwsem);
1761 }
1762
1763 static void
1764 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1765 {
1766         struct uprobe *uprobe = ri->uprobe;
1767         struct uprobe_consumer *uc;
1768
1769         down_read(&uprobe->register_rwsem);
1770         for (uc = uprobe->consumers; uc; uc = uc->next) {
1771                 if (uc->ret_handler)
1772                         uc->ret_handler(uc, ri->func, regs);
1773         }
1774         up_read(&uprobe->register_rwsem);
1775 }
1776
1777 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1778 {
1779         bool chained;
1780
1781         do {
1782                 chained = ri->chained;
1783                 ri = ri->next;  /* can't be NULL if chained */
1784         } while (chained);
1785
1786         return ri;
1787 }
1788
1789 static void handle_trampoline(struct pt_regs *regs)
1790 {
1791         struct uprobe_task *utask;
1792         struct return_instance *ri, *next;
1793         bool valid;
1794
1795         utask = current->utask;
1796         if (!utask)
1797                 goto sigill;
1798
1799         ri = utask->return_instances;
1800         if (!ri)
1801                 goto sigill;
1802
1803         do {
1804                 /*
1805                  * We should throw out the frames invalidated by longjmp().
1806                  * If this chain is valid, then the next one should be alive
1807                  * or NULL; the latter case means that nobody but ri->func
1808                  * could hit this trampoline on return. TODO: sigaltstack().
1809                  */
1810                 next = find_next_ret_chain(ri);
1811                 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
1812
1813                 instruction_pointer_set(regs, ri->orig_ret_vaddr);
1814                 do {
1815                         if (valid)
1816                                 handle_uretprobe_chain(ri, regs);
1817                         ri = free_ret_instance(ri);
1818                         utask->depth--;
1819                 } while (ri != next);
1820         } while (!valid);
1821
1822         utask->return_instances = ri;
1823         return;
1824
1825  sigill:
1826         uprobe_warn(current, "handle uretprobe, sending SIGILL.");
1827         force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1828
1829 }
1830
1831 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1832 {
1833         return false;
1834 }
1835
1836 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1837                                         struct pt_regs *regs)
1838 {
1839         return true;
1840 }
1841
1842 /*
1843  * Run handler and ask thread to singlestep.
1844  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1845  */
1846 static void handle_swbp(struct pt_regs *regs)
1847 {
1848         struct uprobe *uprobe;
1849         unsigned long bp_vaddr;
1850         int uninitialized_var(is_swbp);
1851
1852         bp_vaddr = uprobe_get_swbp_addr(regs);
1853         if (bp_vaddr == get_trampoline_vaddr())
1854                 return handle_trampoline(regs);
1855
1856         uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1857         if (!uprobe) {
1858                 if (is_swbp > 0) {
1859                         /* No matching uprobe; signal SIGTRAP. */
1860                         send_sig(SIGTRAP, current, 0);
1861                 } else {
1862                         /*
1863                          * Either we raced with uprobe_unregister() or we can't
1864                          * access this memory. The latter is only possible if
1865                          * another thread plays with our ->mm. In both cases
1866                          * we can simply restart. If this vma was unmapped we
1867                          * can pretend this insn was not executed yet and get
1868                          * the (correct) SIGSEGV after restart.
1869                          */
1870                         instruction_pointer_set(regs, bp_vaddr);
1871                 }
1872                 return;
1873         }
1874
1875         /* change it in advance for ->handler() and restart */
1876         instruction_pointer_set(regs, bp_vaddr);
1877
1878         /*
1879          * TODO: move copy_insn/etc into _register and remove this hack.
1880          * After we hit the bp, _unregister + _register can install the
1881          * new and not-yet-analyzed uprobe at the same address, restart.
1882          */
1883         smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1884         if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1885                 goto out;
1886
1887         /* Tracing handlers use ->utask to communicate with fetch methods */
1888         if (!get_utask())
1889                 goto out;
1890
1891         if (arch_uprobe_ignore(&uprobe->arch, regs))
1892                 goto out;
1893
1894         handler_chain(uprobe, regs);
1895
1896         if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1897                 goto out;
1898
1899         if (!pre_ssout(uprobe, regs, bp_vaddr))
1900                 return;
1901
1902         /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
1903 out:
1904         put_uprobe(uprobe);
1905 }
1906
1907 /*
1908  * Perform required fix-ups and disable singlestep.
1909  * Allow pending signals to take effect.
1910  */
1911 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1912 {
1913         struct uprobe *uprobe;
1914         int err = 0;
1915
1916         uprobe = utask->active_uprobe;
1917         if (utask->state == UTASK_SSTEP_ACK)
1918                 err = arch_uprobe_post_xol(&uprobe->arch, regs);
1919         else if (utask->state == UTASK_SSTEP_TRAPPED)
1920                 arch_uprobe_abort_xol(&uprobe->arch, regs);
1921         else
1922                 WARN_ON_ONCE(1);
1923
1924         put_uprobe(uprobe);
1925         utask->active_uprobe = NULL;
1926         utask->state = UTASK_RUNNING;
1927         xol_free_insn_slot(current);
1928
1929         spin_lock_irq(&current->sighand->siglock);
1930         recalc_sigpending(); /* see uprobe_deny_signal() */
1931         spin_unlock_irq(&current->sighand->siglock);
1932
1933         if (unlikely(err)) {
1934                 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1935                 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1936         }
1937 }
1938
1939 /*
1940  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1941  * allows the thread to return from interrupt. After that handle_swbp()
1942  * sets utask->active_uprobe.
1943  *
1944  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1945  * and allows the thread to return from interrupt.
1946  *
1947  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1948  * uprobe_notify_resume().
1949  */
1950 void uprobe_notify_resume(struct pt_regs *regs)
1951 {
1952         struct uprobe_task *utask;
1953
1954         clear_thread_flag(TIF_UPROBE);
1955
1956         utask = current->utask;
1957         if (utask && utask->active_uprobe)
1958                 handle_singlestep(utask, regs);
1959         else
1960                 handle_swbp(regs);
1961 }
1962
1963 /*
1964  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1965  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1966  */
1967 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1968 {
1969         if (!current->mm)
1970                 return 0;
1971
1972         if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
1973             (!current->utask || !current->utask->return_instances))
1974                 return 0;
1975
1976         set_thread_flag(TIF_UPROBE);
1977         return 1;
1978 }
1979
1980 /*
1981  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1982  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1983  */
1984 int uprobe_post_sstep_notifier(struct pt_regs *regs)
1985 {
1986         struct uprobe_task *utask = current->utask;
1987
1988         if (!current->mm || !utask || !utask->active_uprobe)
1989                 /* task is currently not uprobed */
1990                 return 0;
1991
1992         utask->state = UTASK_SSTEP_ACK;
1993         set_thread_flag(TIF_UPROBE);
1994         return 1;
1995 }
1996
1997 static struct notifier_block uprobe_exception_nb = {
1998         .notifier_call          = arch_uprobe_exception_notify,
1999         .priority               = INT_MAX-1,    /* notified after kprobes, kgdb */
2000 };
2001
2002 static int __init init_uprobes(void)
2003 {
2004         int i;
2005
2006         for (i = 0; i < UPROBES_HASH_SZ; i++)
2007                 mutex_init(&uprobes_mmap_mutex[i]);
2008
2009         if (percpu_init_rwsem(&dup_mmap_sem))
2010                 return -ENOMEM;
2011
2012         return register_die_notifier(&uprobe_exception_nb);
2013 }
2014 __initcall(init_uprobes);