2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <trace/block.h>
27 #include <linux/uaccess.h>
28 #include "trace_output.h"
30 static unsigned int blktrace_seq __read_mostly = 1;
32 static struct trace_array *blk_tr;
33 static bool blk_tracer_enabled __read_mostly;
35 /* Select an alternative, minimalistic output than the original one */
36 #define TRACE_BLK_OPT_CLASSIC 0x1
38 static struct tracer_opt blk_tracer_opts[] = {
39 /* Default disable the minimalistic output */
40 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
44 static struct tracer_flags blk_tracer_flags = {
46 .opts = blk_tracer_opts,
49 /* Global reference count of probes */
50 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
52 static void blk_register_tracepoints(void);
53 static void blk_unregister_tracepoints(void);
56 * Send out a notify message.
58 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
59 const void *data, size_t len)
61 struct blk_io_trace *t;
66 t = relay_reserve(bt->rchan, sizeof(*t) + len);
68 const int cpu = smp_processor_id();
70 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
71 t->time = ktime_to_ns(ktime_get());
77 memcpy((void *) t + sizeof(*t), data, len);
82 * Send out a notify for this process, if we haven't done so since a trace
85 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
87 tsk->btrace_seq = blktrace_seq;
88 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
91 static void trace_note_time(struct blk_trace *bt)
98 words[0] = now.tv_sec;
99 words[1] = now.tv_nsec;
101 local_irq_save(flags);
102 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
103 local_irq_restore(flags);
106 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
115 ftrace_vprintk(fmt, args);
123 local_irq_save(flags);
124 buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
126 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
129 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
130 local_irq_restore(flags);
132 EXPORT_SYMBOL_GPL(__trace_note_message);
134 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
137 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
139 if (sector < bt->start_lba || sector > bt->end_lba)
141 if (bt->pid && pid != bt->pid)
148 * Data direction bit lookup
150 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
151 BLK_TC_ACT(BLK_TC_WRITE) };
153 /* The ilog2() calls fall out because they're constant */
154 #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
155 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
158 * The worker for the various blk_add_trace*() types. Fills out a
159 * blk_io_trace structure and places it in a per-cpu subbuffer.
161 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
162 int rw, u32 what, int error, int pdu_len, void *pdu_data)
164 struct task_struct *tsk = current;
165 struct ring_buffer_event *event = NULL;
166 struct blk_io_trace *t;
167 unsigned long flags = 0;
168 unsigned long *sequence;
172 if (unlikely(bt->trace_state != Blktrace_running ||
173 !blk_tracer_enabled))
176 what |= ddir_act[rw & WRITE];
177 what |= MASK_TC_BIT(rw, BARRIER);
178 what |= MASK_TC_BIT(rw, SYNCIO);
179 what |= MASK_TC_BIT(rw, AHEAD);
180 what |= MASK_TC_BIT(rw, META);
181 what |= MASK_TC_BIT(rw, DISCARD);
184 if (unlikely(act_log_check(bt, what, sector, pid)))
186 cpu = raw_smp_processor_id();
189 tracing_record_cmdline(current);
191 pc = preempt_count();
192 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
193 sizeof(*t) + pdu_len,
197 t = ring_buffer_event_data(event);
202 * A word about the locking here - we disable interrupts to reserve
203 * some space in the relay per-cpu buffer, to prevent an irq
204 * from coming in and stepping on our toes.
206 local_irq_save(flags);
208 if (unlikely(tsk->btrace_seq != blktrace_seq))
209 trace_note_tsk(bt, tsk);
211 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
213 sequence = per_cpu_ptr(bt->sequence, cpu);
215 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
216 t->sequence = ++(*sequence);
217 t->time = ktime_to_ns(ktime_get());
220 * These two are not needed in ftrace as they are in the
221 * generic trace_entry, filled by tracing_generic_entry_update,
222 * but for the trace_event->bin() synthesizer benefit we do it
233 t->pdu_len = pdu_len;
236 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
239 trace_buffer_unlock_commit(blk_tr, event, 0, pc);
244 local_irq_restore(flags);
247 static struct dentry *blk_tree_root;
248 static DEFINE_MUTEX(blk_tree_mutex);
250 static void blk_trace_cleanup(struct blk_trace *bt)
252 debugfs_remove(bt->msg_file);
253 debugfs_remove(bt->dropped_file);
254 relay_close(bt->rchan);
255 free_percpu(bt->sequence);
256 free_percpu(bt->msg_data);
258 if (atomic_dec_and_test(&blk_probes_ref))
259 blk_unregister_tracepoints();
262 int blk_trace_remove(struct request_queue *q)
264 struct blk_trace *bt;
266 bt = xchg(&q->blk_trace, NULL);
270 if (bt->trace_state == Blktrace_setup ||
271 bt->trace_state == Blktrace_stopped)
272 blk_trace_cleanup(bt);
276 EXPORT_SYMBOL_GPL(blk_trace_remove);
278 static int blk_dropped_open(struct inode *inode, struct file *filp)
280 filp->private_data = inode->i_private;
285 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
286 size_t count, loff_t *ppos)
288 struct blk_trace *bt = filp->private_data;
291 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
293 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
296 static const struct file_operations blk_dropped_fops = {
297 .owner = THIS_MODULE,
298 .open = blk_dropped_open,
299 .read = blk_dropped_read,
302 static int blk_msg_open(struct inode *inode, struct file *filp)
304 filp->private_data = inode->i_private;
309 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
310 size_t count, loff_t *ppos)
313 struct blk_trace *bt;
315 if (count > BLK_TN_MAX_MSG)
318 msg = kmalloc(count, GFP_KERNEL);
322 if (copy_from_user(msg, buffer, count)) {
327 bt = filp->private_data;
328 __trace_note_message(bt, "%s", msg);
334 static const struct file_operations blk_msg_fops = {
335 .owner = THIS_MODULE,
336 .open = blk_msg_open,
337 .write = blk_msg_write,
341 * Keep track of how many times we encountered a full subbuffer, to aid
342 * the user space app in telling how many lost events there were.
344 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
345 void *prev_subbuf, size_t prev_padding)
347 struct blk_trace *bt;
349 if (!relay_buf_full(buf))
352 bt = buf->chan->private_data;
353 atomic_inc(&bt->dropped);
357 static int blk_remove_buf_file_callback(struct dentry *dentry)
359 struct dentry *parent = dentry->d_parent;
360 debugfs_remove(dentry);
363 * this will fail for all but the last file, but that is ok. what we
364 * care about is the top level buts->name directory going away, when
365 * the last trace file is gone. Then we don't have to rmdir() that
366 * manually on trace stop, so it nicely solves the issue with
367 * force killing of running traces.
370 debugfs_remove(parent);
374 static struct dentry *blk_create_buf_file_callback(const char *filename,
375 struct dentry *parent,
377 struct rchan_buf *buf,
380 return debugfs_create_file(filename, mode, parent, buf,
381 &relay_file_operations);
384 static struct rchan_callbacks blk_relay_callbacks = {
385 .subbuf_start = blk_subbuf_start_callback,
386 .create_buf_file = blk_create_buf_file_callback,
387 .remove_buf_file = blk_remove_buf_file_callback,
391 * Setup everything required to start tracing
393 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
394 struct blk_user_trace_setup *buts)
396 struct blk_trace *old_bt, *bt = NULL;
397 struct dentry *dir = NULL;
400 if (!buts->buf_size || !buts->buf_nr)
403 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
404 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
407 * some device names have larger paths - convert the slashes
408 * to underscores for this to work as expected
410 for (i = 0; i < strlen(buts->name); i++)
411 if (buts->name[i] == '/')
415 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
419 bt->sequence = alloc_percpu(unsigned long);
423 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
429 mutex_lock(&blk_tree_mutex);
430 if (!blk_tree_root) {
431 blk_tree_root = debugfs_create_dir("block", NULL);
432 if (!blk_tree_root) {
433 mutex_unlock(&blk_tree_mutex);
437 mutex_unlock(&blk_tree_mutex);
439 dir = debugfs_create_dir(buts->name, blk_tree_root);
446 atomic_set(&bt->dropped, 0);
449 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
451 if (!bt->dropped_file)
454 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
458 bt->rchan = relay_open("trace", dir, buts->buf_size,
459 buts->buf_nr, &blk_relay_callbacks, bt);
463 bt->act_mask = buts->act_mask;
465 bt->act_mask = (u16) -1;
467 bt->start_lba = buts->start_lba;
468 bt->end_lba = buts->end_lba;
473 bt->trace_state = Blktrace_setup;
476 old_bt = xchg(&q->blk_trace, bt);
478 (void) xchg(&q->blk_trace, old_bt);
482 if (atomic_add_return(1, &blk_probes_ref) == 1)
483 blk_register_tracepoints();
489 debugfs_remove(bt->msg_file);
490 if (bt->dropped_file)
491 debugfs_remove(bt->dropped_file);
492 free_percpu(bt->sequence);
493 free_percpu(bt->msg_data);
495 relay_close(bt->rchan);
501 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
504 struct blk_user_trace_setup buts;
507 ret = copy_from_user(&buts, arg, sizeof(buts));
511 ret = do_blk_trace_setup(q, name, dev, &buts);
515 if (copy_to_user(arg, &buts, sizeof(buts)))
520 EXPORT_SYMBOL_GPL(blk_trace_setup);
522 int blk_trace_startstop(struct request_queue *q, int start)
525 struct blk_trace *bt = q->blk_trace;
531 * For starting a trace, we can transition from a setup or stopped
532 * trace. For stopping a trace, the state must be running
536 if (bt->trace_state == Blktrace_setup ||
537 bt->trace_state == Blktrace_stopped) {
540 bt->trace_state = Blktrace_running;
546 if (bt->trace_state == Blktrace_running) {
547 bt->trace_state = Blktrace_stopped;
548 relay_flush(bt->rchan);
555 EXPORT_SYMBOL_GPL(blk_trace_startstop);
558 * blk_trace_ioctl: - handle the ioctls associated with tracing
559 * @bdev: the block device
560 * @cmd: the ioctl cmd
561 * @arg: the argument data, if any
564 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
566 struct request_queue *q;
568 char b[BDEVNAME_SIZE];
570 q = bdev_get_queue(bdev);
574 mutex_lock(&bdev->bd_mutex);
579 ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
584 ret = blk_trace_startstop(q, start);
586 case BLKTRACETEARDOWN:
587 ret = blk_trace_remove(q);
594 mutex_unlock(&bdev->bd_mutex);
599 * blk_trace_shutdown: - stop and cleanup trace structures
600 * @q: the request queue associated with the device
603 void blk_trace_shutdown(struct request_queue *q)
606 blk_trace_startstop(q, 0);
616 * blk_add_trace_rq - Add a trace for a request oriented action
617 * @q: queue the io is for
618 * @rq: the source request
622 * Records an action against a request. Will log the bio offset + size.
625 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
628 struct blk_trace *bt = q->blk_trace;
629 int rw = rq->cmd_flags & 0x03;
634 if (blk_discard_rq(rq))
635 rw |= (1 << BIO_RW_DISCARD);
637 if (blk_pc_request(rq)) {
638 what |= BLK_TC_ACT(BLK_TC_PC);
639 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
640 sizeof(rq->cmd), rq->cmd);
642 what |= BLK_TC_ACT(BLK_TC_FS);
643 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
644 rw, what, rq->errors, 0, NULL);
648 static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
650 blk_add_trace_rq(q, rq, BLK_TA_ABORT);
653 static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
655 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
658 static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
660 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
663 static void blk_add_trace_rq_requeue(struct request_queue *q,
666 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
669 static void blk_add_trace_rq_complete(struct request_queue *q,
672 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
676 * blk_add_trace_bio - Add a trace for a bio oriented action
677 * @q: queue the io is for
678 * @bio: the source bio
682 * Records an action against a bio. Will log the bio offset + size.
685 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
688 struct blk_trace *bt = q->blk_trace;
693 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
694 !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
697 static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
699 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
702 static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
704 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
707 static void blk_add_trace_bio_backmerge(struct request_queue *q,
710 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
713 static void blk_add_trace_bio_frontmerge(struct request_queue *q,
716 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
719 static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
721 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
724 static void blk_add_trace_getrq(struct request_queue *q,
725 struct bio *bio, int rw)
728 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
730 struct blk_trace *bt = q->blk_trace;
733 __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
738 static void blk_add_trace_sleeprq(struct request_queue *q,
739 struct bio *bio, int rw)
742 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
744 struct blk_trace *bt = q->blk_trace;
747 __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
752 static void blk_add_trace_plug(struct request_queue *q)
754 struct blk_trace *bt = q->blk_trace;
757 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
760 static void blk_add_trace_unplug_io(struct request_queue *q)
762 struct blk_trace *bt = q->blk_trace;
765 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
766 __be64 rpdu = cpu_to_be64(pdu);
768 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
769 sizeof(rpdu), &rpdu);
773 static void blk_add_trace_unplug_timer(struct request_queue *q)
775 struct blk_trace *bt = q->blk_trace;
778 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
779 __be64 rpdu = cpu_to_be64(pdu);
781 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
782 sizeof(rpdu), &rpdu);
786 static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
789 struct blk_trace *bt = q->blk_trace;
792 __be64 rpdu = cpu_to_be64(pdu);
794 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
795 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
796 sizeof(rpdu), &rpdu);
801 * blk_add_trace_remap - Add a trace for a remap operation
802 * @q: queue the io is for
803 * @bio: the source bio
804 * @dev: target device
805 * @from: source sector
809 * Device mapper or raid target sometimes need to split a bio because
810 * it spans a stripe (or similar). Add a trace for that action.
813 static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
814 dev_t dev, sector_t from, sector_t to)
816 struct blk_trace *bt = q->blk_trace;
817 struct blk_io_trace_remap r;
822 r.device = cpu_to_be32(dev);
823 r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
824 r.sector = cpu_to_be64(to);
826 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
827 !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
831 * blk_add_driver_data - Add binary message with driver-specific data
832 * @q: queue the io is for
834 * @data: driver-specific data
835 * @len: length of driver-specific data
838 * Some drivers might want to write driver-specific data per request.
841 void blk_add_driver_data(struct request_queue *q,
843 void *data, size_t len)
845 struct blk_trace *bt = q->blk_trace;
850 if (blk_pc_request(rq))
851 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
852 rq->errors, len, data);
854 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
855 0, BLK_TA_DRV_DATA, rq->errors, len, data);
857 EXPORT_SYMBOL_GPL(blk_add_driver_data);
859 static void blk_register_tracepoints(void)
863 ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
865 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
867 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
869 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
871 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
873 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
875 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
877 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
879 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
881 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
883 ret = register_trace_block_getrq(blk_add_trace_getrq);
885 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
887 ret = register_trace_block_plug(blk_add_trace_plug);
889 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
891 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
893 ret = register_trace_block_split(blk_add_trace_split);
895 ret = register_trace_block_remap(blk_add_trace_remap);
899 static void blk_unregister_tracepoints(void)
901 unregister_trace_block_remap(blk_add_trace_remap);
902 unregister_trace_block_split(blk_add_trace_split);
903 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
904 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
905 unregister_trace_block_plug(blk_add_trace_plug);
906 unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
907 unregister_trace_block_getrq(blk_add_trace_getrq);
908 unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
909 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
910 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
911 unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
912 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
913 unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
914 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
915 unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
916 unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
917 unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
919 tracepoint_synchronize_unregister();
923 * struct blk_io_tracer formatting routines
926 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
929 int tc = t->action >> BLK_TC_SHIFT;
931 if (tc & BLK_TC_DISCARD)
933 else if (tc & BLK_TC_WRITE)
940 if (tc & BLK_TC_AHEAD)
942 if (tc & BLK_TC_BARRIER)
944 if (tc & BLK_TC_SYNC)
946 if (tc & BLK_TC_META)
953 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
955 return (const struct blk_io_trace *)ent;
958 static inline const void *pdu_start(const struct trace_entry *ent)
960 return te_blk_io_trace(ent) + 1;
963 static inline u32 t_sec(const struct trace_entry *ent)
965 return te_blk_io_trace(ent)->bytes >> 9;
968 static inline unsigned long long t_sector(const struct trace_entry *ent)
970 return te_blk_io_trace(ent)->sector;
973 static inline __u16 t_error(const struct trace_entry *ent)
975 return te_blk_io_trace(ent)->error;
978 static __u64 get_pdu_int(const struct trace_entry *ent)
980 const __u64 *val = pdu_start(ent);
981 return be64_to_cpu(*val);
984 static void get_pdu_remap(const struct trace_entry *ent,
985 struct blk_io_trace_remap *r)
987 const struct blk_io_trace_remap *__r = pdu_start(ent);
988 __u64 sector = __r->sector;
990 r->device = be32_to_cpu(__r->device);
991 r->device_from = be32_to_cpu(__r->device_from);
992 r->sector = be64_to_cpu(sector);
995 static int blk_log_action_iter(struct trace_iterator *iter, const char *act)
998 unsigned long long ts = ns2usecs(iter->ts);
999 unsigned long usec_rem = do_div(ts, USEC_PER_SEC);
1000 unsigned secs = (unsigned long)ts;
1001 const struct trace_entry *ent = iter->ent;
1002 const struct blk_io_trace *t = (const struct blk_io_trace *)ent;
1006 return trace_seq_printf(&iter->seq,
1007 "%3d,%-3d %2d %5d.%06lu %5u %2s %3s ",
1008 MAJOR(t->device), MINOR(t->device), iter->cpu,
1009 secs, usec_rem, ent->pid, act, rwbs);
1012 static int blk_log_action_seq(struct trace_seq *s, const struct blk_io_trace *t,
1017 return trace_seq_printf(s, "%3d,%-3d %2s %3s ",
1018 MAJOR(t->device), MINOR(t->device), act, rwbs);
1021 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1023 char cmd[TASK_COMM_LEN];
1025 trace_find_cmdline(ent->pid, cmd);
1028 return trace_seq_printf(s, "%llu + %u [%s]\n",
1029 t_sector(ent), t_sec(ent), cmd);
1030 return trace_seq_printf(s, "[%s]\n", cmd);
1033 static int blk_log_with_error(struct trace_seq *s,
1034 const struct trace_entry *ent)
1037 return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent),
1038 t_sec(ent), t_error(ent));
1039 return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent));
1042 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1044 struct blk_io_trace_remap r = { .device = 0, };
1046 get_pdu_remap(ent, &r);
1047 return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1049 t_sec(ent), MAJOR(r.device), MINOR(r.device),
1050 (unsigned long long)r.sector);
1053 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1055 char cmd[TASK_COMM_LEN];
1057 trace_find_cmdline(ent->pid, cmd);
1059 return trace_seq_printf(s, "[%s]\n", cmd);
1062 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1064 char cmd[TASK_COMM_LEN];
1066 trace_find_cmdline(ent->pid, cmd);
1068 return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1071 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1073 char cmd[TASK_COMM_LEN];
1075 trace_find_cmdline(ent->pid, cmd);
1077 return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1078 get_pdu_int(ent), cmd);
1082 * struct tracer operations
1085 static void blk_tracer_print_header(struct seq_file *m)
1087 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1089 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1093 static void blk_tracer_start(struct trace_array *tr)
1095 if (atomic_add_return(1, &blk_probes_ref) == 1)
1096 blk_register_tracepoints();
1097 trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1100 static int blk_tracer_init(struct trace_array *tr)
1103 blk_tracer_start(tr);
1104 blk_tracer_enabled = true;
1108 static void blk_tracer_stop(struct trace_array *tr)
1110 trace_flags |= TRACE_ITER_CONTEXT_INFO;
1111 if (atomic_dec_and_test(&blk_probes_ref))
1112 blk_unregister_tracepoints();
1115 static void blk_tracer_reset(struct trace_array *tr)
1117 if (!atomic_read(&blk_probes_ref))
1120 blk_tracer_enabled = false;
1121 blk_tracer_stop(tr);
1124 static const struct {
1126 int (*print)(struct trace_seq *s, const struct trace_entry *ent);
1128 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1129 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1130 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1131 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1132 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1133 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1134 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1135 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1136 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1137 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1138 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1139 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1140 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1141 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1142 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1145 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1148 struct trace_seq *s = &iter->seq;
1149 const struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1150 const u16 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
1153 if (!trace_print_context(iter))
1154 return TRACE_TYPE_PARTIAL_LINE;
1156 if (unlikely(what == 0 || what > ARRAY_SIZE(what2act)))
1157 ret = trace_seq_printf(s, "Bad pc action %x\n", what);
1159 const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
1160 ret = blk_log_action_seq(s, t, what2act[what].act[long_act]);
1162 ret = what2act[what].print(s, iter->ent);
1165 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1168 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1170 struct trace_seq *s = &iter->seq;
1171 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1172 const int offset = offsetof(struct blk_io_trace, sector);
1173 struct blk_io_trace old = {
1174 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1178 if (!trace_seq_putmem(s, &old, offset))
1180 return trace_seq_putmem(s, &t->sector,
1181 sizeof(old) - offset + t->pdu_len);
1184 static enum print_line_t
1185 blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
1187 return blk_trace_synthesize_old_trace(iter) ?
1188 TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1191 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1193 const struct blk_io_trace *t;
1197 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1198 return TRACE_TYPE_UNHANDLED;
1200 t = (const struct blk_io_trace *)iter->ent;
1201 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
1203 if (unlikely(what == 0 || what > ARRAY_SIZE(what2act)))
1204 ret = trace_seq_printf(&iter->seq, "Bad pc action %x\n", what);
1206 const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
1207 ret = blk_log_action_iter(iter, what2act[what].act[long_act]);
1209 ret = what2act[what].print(&iter->seq, iter->ent);
1212 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1215 static struct tracer blk_tracer __read_mostly = {
1217 .init = blk_tracer_init,
1218 .reset = blk_tracer_reset,
1219 .start = blk_tracer_start,
1220 .stop = blk_tracer_stop,
1221 .print_header = blk_tracer_print_header,
1222 .print_line = blk_tracer_print_line,
1223 .flags = &blk_tracer_flags,
1226 static struct trace_event trace_blk_event = {
1228 .trace = blk_trace_event_print,
1229 .binary = blk_trace_event_print_binary,
1232 static int __init init_blk_tracer(void)
1234 if (!register_ftrace_event(&trace_blk_event)) {
1235 pr_warning("Warning: could not register block events\n");
1239 if (register_tracer(&blk_tracer) != 0) {
1240 pr_warning("Warning: could not register the block tracer\n");
1241 unregister_ftrace_event(&trace_blk_event);
1248 device_initcall(init_blk_tracer);
1250 static int blk_trace_remove_queue(struct request_queue *q)
1252 struct blk_trace *bt;
1254 bt = xchg(&q->blk_trace, NULL);
1263 * Setup everything required to start tracing
1265 static int blk_trace_setup_queue(struct request_queue *q, dev_t dev)
1267 struct blk_trace *old_bt, *bt = NULL;
1269 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1274 bt->act_mask = (u16)-1;
1275 bt->end_lba = -1ULL;
1276 bt->trace_state = Blktrace_running;
1278 old_bt = xchg(&q->blk_trace, bt);
1279 if (old_bt != NULL) {
1280 (void)xchg(&q->blk_trace, old_bt);
1289 * sysfs interface to enable and configure tracing
1292 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1293 struct device_attribute *attr,
1295 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1296 struct device_attribute *attr,
1297 const char *buf, size_t count);
1298 #define BLK_TRACE_DEVICE_ATTR(_name) \
1299 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1300 sysfs_blk_trace_attr_show, \
1301 sysfs_blk_trace_attr_store)
1303 static BLK_TRACE_DEVICE_ATTR(enable);
1304 static BLK_TRACE_DEVICE_ATTR(act_mask);
1305 static BLK_TRACE_DEVICE_ATTR(pid);
1306 static BLK_TRACE_DEVICE_ATTR(start_lba);
1307 static BLK_TRACE_DEVICE_ATTR(end_lba);
1309 static struct attribute *blk_trace_attrs[] = {
1310 &dev_attr_enable.attr,
1311 &dev_attr_act_mask.attr,
1313 &dev_attr_start_lba.attr,
1314 &dev_attr_end_lba.attr,
1318 struct attribute_group blk_trace_attr_group = {
1320 .attrs = blk_trace_attrs,
1323 static const struct {
1327 { BLK_TC_READ, "read" },
1328 { BLK_TC_WRITE, "write" },
1329 { BLK_TC_BARRIER, "barrier" },
1330 { BLK_TC_SYNC, "sync" },
1331 { BLK_TC_QUEUE, "queue" },
1332 { BLK_TC_REQUEUE, "requeue" },
1333 { BLK_TC_ISSUE, "issue" },
1334 { BLK_TC_COMPLETE, "complete" },
1335 { BLK_TC_FS, "fs" },
1336 { BLK_TC_PC, "pc" },
1337 { BLK_TC_AHEAD, "ahead" },
1338 { BLK_TC_META, "meta" },
1339 { BLK_TC_DISCARD, "discard" },
1340 { BLK_TC_DRV_DATA, "drv_data" },
1343 static int blk_trace_str2mask(const char *str)
1349 s = kstrdup(str, GFP_KERNEL);
1355 token = strsep(&s, ",");
1362 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1363 if (strcasecmp(token, mask_maps[i].str) == 0) {
1364 mask |= mask_maps[i].mask;
1368 if (i == ARRAY_SIZE(mask_maps)) {
1378 static ssize_t blk_trace_mask2str(char *buf, int mask)
1383 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1384 if (mask & mask_maps[i].mask) {
1385 p += sprintf(p, "%s%s",
1386 (p == buf) ? "" : ",", mask_maps[i].str);
1394 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1396 if (bdev->bd_disk == NULL)
1399 return bdev_get_queue(bdev);
1402 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1403 struct device_attribute *attr,
1406 struct hd_struct *p = dev_to_part(dev);
1407 struct request_queue *q;
1408 struct block_device *bdev;
1409 ssize_t ret = -ENXIO;
1412 bdev = bdget(part_devt(p));
1414 goto out_unlock_kernel;
1416 q = blk_trace_get_queue(bdev);
1420 mutex_lock(&bdev->bd_mutex);
1422 if (attr == &dev_attr_enable) {
1423 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1424 goto out_unlock_bdev;
1427 if (q->blk_trace == NULL)
1428 ret = sprintf(buf, "disabled\n");
1429 else if (attr == &dev_attr_act_mask)
1430 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1431 else if (attr == &dev_attr_pid)
1432 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1433 else if (attr == &dev_attr_start_lba)
1434 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1435 else if (attr == &dev_attr_end_lba)
1436 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1439 mutex_unlock(&bdev->bd_mutex);
1447 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1448 struct device_attribute *attr,
1449 const char *buf, size_t count)
1451 struct block_device *bdev;
1452 struct request_queue *q;
1453 struct hd_struct *p;
1455 ssize_t ret = -EINVAL;
1460 if (attr == &dev_attr_act_mask) {
1461 if (sscanf(buf, "%llx", &value) != 1) {
1462 /* Assume it is a list of trace category names */
1463 ret = blk_trace_str2mask(buf);
1468 } else if (sscanf(buf, "%llu", &value) != 1)
1474 p = dev_to_part(dev);
1475 bdev = bdget(part_devt(p));
1477 goto out_unlock_kernel;
1479 q = blk_trace_get_queue(bdev);
1483 mutex_lock(&bdev->bd_mutex);
1485 if (attr == &dev_attr_enable) {
1487 ret = blk_trace_setup_queue(q, bdev->bd_dev);
1489 ret = blk_trace_remove_queue(q);
1490 goto out_unlock_bdev;
1494 if (q->blk_trace == NULL)
1495 ret = blk_trace_setup_queue(q, bdev->bd_dev);
1498 if (attr == &dev_attr_act_mask)
1499 q->blk_trace->act_mask = value;
1500 else if (attr == &dev_attr_pid)
1501 q->blk_trace->pid = value;
1502 else if (attr == &dev_attr_start_lba)
1503 q->blk_trace->start_lba = value;
1504 else if (attr == &dev_attr_end_lba)
1505 q->blk_trace->end_lba = value;
1509 mutex_unlock(&bdev->bd_mutex);
1515 return ret ? ret : count;