1 #include <linux/module.h>
3 #include <linux/moduleparam.h>
4 #include <linux/sched.h>
6 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/blk-mq.h>
10 #include <linux/hrtimer.h>
13 struct list_head list;
14 struct llist_node ll_list;
15 struct call_single_data csd;
19 struct nullb_queue *nq;
23 unsigned long *tag_map;
24 wait_queue_head_t wait;
25 unsigned int queue_depth;
27 struct nullb_cmd *cmds;
31 struct list_head list;
33 struct request_queue *q;
35 struct blk_mq_tag_set tag_set;
37 unsigned int queue_depth;
40 struct nullb_queue *queues;
41 unsigned int nr_queues;
44 static LIST_HEAD(nullb_list);
45 static struct mutex lock;
46 static int null_major;
47 static int nullb_indexes;
49 struct completion_queue {
50 struct llist_head list;
55 * These are per-cpu for now, they will need to be configured by the
56 * complete_queues parameter and appropriately mapped.
58 static DEFINE_PER_CPU(struct completion_queue, completion_queues);
72 static int submit_queues;
73 module_param(submit_queues, int, S_IRUGO);
74 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
76 static int home_node = NUMA_NO_NODE;
77 module_param(home_node, int, S_IRUGO);
78 MODULE_PARM_DESC(home_node, "Home node for the device");
80 static int queue_mode = NULL_Q_MQ;
81 module_param(queue_mode, int, S_IRUGO);
82 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
85 module_param(gb, int, S_IRUGO);
86 MODULE_PARM_DESC(gb, "Size in GB");
89 module_param(bs, int, S_IRUGO);
90 MODULE_PARM_DESC(bs, "Block size (in bytes)");
92 static int nr_devices = 2;
93 module_param(nr_devices, int, S_IRUGO);
94 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
96 static int irqmode = NULL_IRQ_SOFTIRQ;
97 module_param(irqmode, int, S_IRUGO);
98 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
100 static int completion_nsec = 10000;
101 module_param(completion_nsec, int, S_IRUGO);
102 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
104 static int hw_queue_depth = 64;
105 module_param(hw_queue_depth, int, S_IRUGO);
106 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
108 static bool use_per_node_hctx = false;
109 module_param(use_per_node_hctx, bool, S_IRUGO);
110 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
112 static void put_tag(struct nullb_queue *nq, unsigned int tag)
114 clear_bit_unlock(tag, nq->tag_map);
116 if (waitqueue_active(&nq->wait))
120 static unsigned int get_tag(struct nullb_queue *nq)
125 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
126 if (tag >= nq->queue_depth)
128 } while (test_and_set_bit_lock(tag, nq->tag_map));
133 static void free_cmd(struct nullb_cmd *cmd)
135 put_tag(cmd->nq, cmd->tag);
138 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
140 struct nullb_cmd *cmd;
145 cmd = &nq->cmds[tag];
154 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
156 struct nullb_cmd *cmd;
159 cmd = __alloc_cmd(nq);
160 if (cmd || !can_wait)
164 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
165 cmd = __alloc_cmd(nq);
172 finish_wait(&nq->wait, &wait);
176 static void end_cmd(struct nullb_cmd *cmd)
178 switch (queue_mode) {
180 blk_mq_end_request(cmd->rq, 0);
183 INIT_LIST_HEAD(&cmd->rq->queuelist);
184 blk_end_request_all(cmd->rq, 0);
187 bio_endio(cmd->bio, 0);
194 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
196 struct completion_queue *cq;
197 struct llist_node *entry;
198 struct nullb_cmd *cmd;
200 cq = &per_cpu(completion_queues, smp_processor_id());
202 while ((entry = llist_del_all(&cq->list)) != NULL) {
203 entry = llist_reverse_order(entry);
205 cmd = container_of(entry, struct nullb_cmd, ll_list);
211 return HRTIMER_NORESTART;
214 static void null_cmd_end_timer(struct nullb_cmd *cmd)
216 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
218 cmd->ll_list.next = NULL;
219 if (llist_add(&cmd->ll_list, &cq->list)) {
220 ktime_t kt = ktime_set(0, completion_nsec);
222 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL);
228 static void null_softirq_done_fn(struct request *rq)
230 if (queue_mode == NULL_Q_MQ)
231 end_cmd(blk_mq_rq_to_pdu(rq));
233 end_cmd(rq->special);
236 static inline void null_handle_cmd(struct nullb_cmd *cmd)
238 /* Complete IO by inline, softirq or timer */
240 case NULL_IRQ_SOFTIRQ:
241 switch (queue_mode) {
243 blk_mq_complete_request(cmd->rq);
246 blk_complete_request(cmd->rq);
250 * XXX: no proper submitting cpu information available.
260 null_cmd_end_timer(cmd);
265 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
269 if (nullb->nr_queues != 1)
270 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
272 return &nullb->queues[index];
275 static void null_queue_bio(struct request_queue *q, struct bio *bio)
277 struct nullb *nullb = q->queuedata;
278 struct nullb_queue *nq = nullb_to_queue(nullb);
279 struct nullb_cmd *cmd;
281 cmd = alloc_cmd(nq, 1);
284 null_handle_cmd(cmd);
287 static int null_rq_prep_fn(struct request_queue *q, struct request *req)
289 struct nullb *nullb = q->queuedata;
290 struct nullb_queue *nq = nullb_to_queue(nullb);
291 struct nullb_cmd *cmd;
293 cmd = alloc_cmd(nq, 0);
300 return BLKPREP_DEFER;
303 static void null_request_fn(struct request_queue *q)
307 while ((rq = blk_fetch_request(q)) != NULL) {
308 struct nullb_cmd *cmd = rq->special;
310 spin_unlock_irq(q->queue_lock);
311 null_handle_cmd(cmd);
312 spin_lock_irq(q->queue_lock);
316 static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq,
319 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
322 cmd->nq = hctx->driver_data;
324 blk_mq_start_request(rq);
326 null_handle_cmd(cmd);
327 return BLK_MQ_RQ_QUEUE_OK;
330 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
335 init_waitqueue_head(&nq->wait);
336 nq->queue_depth = nullb->queue_depth;
339 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
342 struct nullb *nullb = data;
343 struct nullb_queue *nq = &nullb->queues[index];
345 hctx->driver_data = nq;
346 null_init_queue(nullb, nq);
352 static struct blk_mq_ops null_mq_ops = {
353 .queue_rq = null_queue_rq,
354 .map_queue = blk_mq_map_queue,
355 .init_hctx = null_init_hctx,
356 .complete = null_softirq_done_fn,
359 static void null_del_dev(struct nullb *nullb)
361 list_del_init(&nullb->list);
363 del_gendisk(nullb->disk);
364 blk_cleanup_queue(nullb->q);
365 if (queue_mode == NULL_Q_MQ)
366 blk_mq_free_tag_set(&nullb->tag_set);
367 put_disk(nullb->disk);
371 static int null_open(struct block_device *bdev, fmode_t mode)
376 static void null_release(struct gendisk *disk, fmode_t mode)
380 static const struct block_device_operations null_fops = {
381 .owner = THIS_MODULE,
383 .release = null_release,
386 static int setup_commands(struct nullb_queue *nq)
388 struct nullb_cmd *cmd;
391 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
395 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
396 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
402 for (i = 0; i < nq->queue_depth; i++) {
404 INIT_LIST_HEAD(&cmd->list);
405 cmd->ll_list.next = NULL;
412 static void cleanup_queue(struct nullb_queue *nq)
418 static void cleanup_queues(struct nullb *nullb)
422 for (i = 0; i < nullb->nr_queues; i++)
423 cleanup_queue(&nullb->queues[i]);
425 kfree(nullb->queues);
428 static int setup_queues(struct nullb *nullb)
430 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
435 nullb->nr_queues = 0;
436 nullb->queue_depth = hw_queue_depth;
441 static int init_driver_queues(struct nullb *nullb)
443 struct nullb_queue *nq;
446 for (i = 0; i < submit_queues; i++) {
447 nq = &nullb->queues[i];
449 null_init_queue(nullb, nq);
451 ret = setup_commands(nq);
459 static int null_add_dev(void)
461 struct gendisk *disk;
466 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
472 spin_lock_init(&nullb->lock);
474 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
475 submit_queues = nr_online_nodes;
477 rv = setup_queues(nullb);
481 if (queue_mode == NULL_Q_MQ) {
482 nullb->tag_set.ops = &null_mq_ops;
483 nullb->tag_set.nr_hw_queues = submit_queues;
484 nullb->tag_set.queue_depth = hw_queue_depth;
485 nullb->tag_set.numa_node = home_node;
486 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
487 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
488 nullb->tag_set.driver_data = nullb;
490 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
492 goto out_cleanup_queues;
494 nullb->q = blk_mq_init_queue(&nullb->tag_set);
497 goto out_cleanup_tags;
499 } else if (queue_mode == NULL_Q_BIO) {
500 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
503 goto out_cleanup_queues;
505 blk_queue_make_request(nullb->q, null_queue_bio);
506 rv = init_driver_queues(nullb);
508 goto out_cleanup_blk_queue;
510 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
513 goto out_cleanup_queues;
515 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
516 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
517 rv = init_driver_queues(nullb);
519 goto out_cleanup_blk_queue;
522 nullb->q->queuedata = nullb;
523 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
524 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
526 disk = nullb->disk = alloc_disk_node(1, home_node);
529 goto out_cleanup_blk_queue;
533 list_add_tail(&nullb->list, &nullb_list);
534 nullb->index = nullb_indexes++;
537 blk_queue_logical_block_size(nullb->q, bs);
538 blk_queue_physical_block_size(nullb->q, bs);
540 size = gb * 1024 * 1024 * 1024ULL;
541 sector_div(size, bs);
542 set_capacity(disk, size);
544 disk->flags |= GENHD_FL_EXT_DEVT;
545 disk->major = null_major;
546 disk->first_minor = nullb->index;
547 disk->fops = &null_fops;
548 disk->private_data = nullb;
549 disk->queue = nullb->q;
550 sprintf(disk->disk_name, "nullb%d", nullb->index);
554 out_cleanup_blk_queue:
555 blk_cleanup_queue(nullb->q);
557 if (queue_mode == NULL_Q_MQ)
558 blk_mq_free_tag_set(&nullb->tag_set);
560 cleanup_queues(nullb);
567 static int __init null_init(void)
571 if (bs > PAGE_SIZE) {
572 pr_warn("null_blk: invalid block size\n");
573 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
577 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
578 if (submit_queues < nr_online_nodes) {
579 pr_warn("null_blk: submit_queues param is set to %u.",
581 submit_queues = nr_online_nodes;
583 } else if (submit_queues > nr_cpu_ids)
584 submit_queues = nr_cpu_ids;
585 else if (!submit_queues)
590 /* Initialize a separate list for each CPU for issuing softirqs */
591 for_each_possible_cpu(i) {
592 struct completion_queue *cq = &per_cpu(completion_queues, i);
594 init_llist_head(&cq->list);
596 if (irqmode != NULL_IRQ_TIMER)
599 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
600 cq->timer.function = null_cmd_timer_expired;
603 null_major = register_blkdev(0, "nullb");
607 for (i = 0; i < nr_devices; i++) {
608 if (null_add_dev()) {
609 unregister_blkdev(null_major, "nullb");
614 pr_info("null: module loaded\n");
618 static void __exit null_exit(void)
622 unregister_blkdev(null_major, "nullb");
625 while (!list_empty(&nullb_list)) {
626 nullb = list_entry(nullb_list.next, struct nullb, list);
632 module_init(null_init);
633 module_exit(null_exit);
635 MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
636 MODULE_LICENSE("GPL");