1 #define pr_fmt(fmt) "kcov: " fmt
3 #include <linux/compiler.h>
4 #include <linux/types.h>
5 #include <linux/file.h>
8 #include <linux/printk.h>
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/vmalloc.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/kcov.h>
17 * kcov descriptor (one per opened debugfs file).
18 * State transitions of the descriptor:
19 * - initial state after open()
20 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
21 * - then, mmap() call (several calls are allowed but not useful)
22 * - then, repeated enable/disable for a task (only one task a time allowed)
26 * Reference counter. We keep one for:
27 * - opened file descriptor
28 * - task with enabled coverage (we can't unwire it from another task)
31 /* The lock protects mode, size, area and t. */
36 struct task_struct *t;
40 * Entry point from instrumented code.
41 * This is called once per basic-block/edge.
43 void __sanitizer_cov_trace_pc(void)
45 struct task_struct *t;
50 * We are interested in code coverage as a function of a syscall inputs,
51 * so we ignore code executed in interrupts.
53 if (!t || in_interrupt())
55 mode = READ_ONCE(t->kcov_mode);
56 if (mode == KCOV_MODE_TRACE) {
61 * There is some code that runs in interrupts but for which
62 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
63 * READ_ONCE()/barrier() effectively provides load-acquire wrt
64 * interrupts, there are paired barrier()/WRITE_ONCE() in
65 * kcov_ioctl_locked().
69 /* The first word is number of subsequent PCs. */
70 pos = READ_ONCE(area[0]) + 1;
71 if (likely(pos < t->kcov_size)) {
73 WRITE_ONCE(area[0], pos);
77 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
79 static void kcov_get(struct kcov *kcov)
81 atomic_inc(&kcov->rc);
84 static void kcov_put(struct kcov *kcov)
86 if (atomic_dec_and_test(&kcov->rc)) {
92 void kcov_task_init(struct task_struct *t)
100 void kcov_task_exit(struct task_struct *t)
107 spin_lock(&kcov->lock);
108 if (WARN_ON(kcov->t != t)) {
109 spin_unlock(&kcov->lock);
112 /* Just to not leave dangling references behind. */
115 spin_unlock(&kcov->lock);
119 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
123 struct kcov *kcov = vma->vm_file->private_data;
124 unsigned long size, off;
127 area = vmalloc_user(vma->vm_end - vma->vm_start);
131 spin_lock(&kcov->lock);
132 size = kcov->size * sizeof(unsigned long);
133 if (kcov->mode == 0 || vma->vm_pgoff != 0 ||
134 vma->vm_end - vma->vm_start != size) {
140 vma->vm_flags |= VM_DONTEXPAND;
141 spin_unlock(&kcov->lock);
142 for (off = 0; off < size; off += PAGE_SIZE) {
143 page = vmalloc_to_page(kcov->area + off);
144 if (vm_insert_page(vma, vma->vm_start + off, page))
145 WARN_ONCE(1, "vm_insert_page() failed");
150 spin_unlock(&kcov->lock);
155 static int kcov_open(struct inode *inode, struct file *filep)
159 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
162 atomic_set(&kcov->rc, 1);
163 spin_lock_init(&kcov->lock);
164 filep->private_data = kcov;
165 return nonseekable_open(inode, filep);
168 static int kcov_close(struct inode *inode, struct file *filep)
170 kcov_put(filep->private_data);
174 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
177 struct task_struct *t;
180 case KCOV_INIT_TRACE:
182 * Enable kcov in trace mode and setup buffer size.
183 * Must happen before anything else.
184 * Size must be at least 2 to hold current position and one PC.
186 if (arg < 2 || arg > INT_MAX)
190 kcov->mode = KCOV_MODE_TRACE;
195 * Enable coverage for the current task.
196 * At this point user must have been enabled trace mode,
197 * and mmapped the file. Coverage collection is disabled only
198 * at task exit or voluntary by KCOV_DISABLE. After that it can
199 * be enabled for another task.
201 if (arg != 0 || kcov->mode == 0 || kcov->area == NULL)
206 /* Cache in task struct for performance. */
207 t->kcov_size = kcov->size;
208 t->kcov_area = kcov->area;
209 /* See comment in __sanitizer_cov_trace_pc(). */
211 WRITE_ONCE(t->kcov_mode, kcov->mode);
214 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
218 /* Disable coverage for the current task. */
219 if (arg != 0 || current->kcov != kcov)
222 if (WARN_ON(kcov->t != t))
233 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
238 kcov = filep->private_data;
239 spin_lock(&kcov->lock);
240 res = kcov_ioctl_locked(kcov, cmd, arg);
241 spin_unlock(&kcov->lock);
245 static const struct file_operations kcov_fops = {
247 .unlocked_ioctl = kcov_ioctl,
249 .release = kcov_close,
252 static int __init kcov_init(void)
254 if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
255 pr_err("failed to create kcov in debugfs\n");
261 device_initcall(kcov_init);