]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/block/null_blk.c
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / drivers / block / null_blk.c
1 #include <linux/module.h>
2
3 #include <linux/moduleparam.h>
4 #include <linux/sched.h>
5 #include <linux/fs.h>
6 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/blk-mq.h>
10 #include <linux/hrtimer.h>
11 #include <linux/lightnvm.h>
12
13 struct nullb_cmd {
14         struct list_head list;
15         struct llist_node ll_list;
16         struct call_single_data csd;
17         struct request *rq;
18         struct bio *bio;
19         unsigned int tag;
20         struct nullb_queue *nq;
21         struct hrtimer timer;
22 };
23
24 struct nullb_queue {
25         unsigned long *tag_map;
26         wait_queue_head_t wait;
27         unsigned int queue_depth;
28
29         struct nullb_cmd *cmds;
30 };
31
32 struct nullb {
33         struct list_head list;
34         unsigned int index;
35         struct request_queue *q;
36         struct gendisk *disk;
37         struct nvm_dev *ndev;
38         struct blk_mq_tag_set *tag_set;
39         struct blk_mq_tag_set __tag_set;
40         struct hrtimer timer;
41         unsigned int queue_depth;
42         spinlock_t lock;
43
44         struct nullb_queue *queues;
45         unsigned int nr_queues;
46         char disk_name[DISK_NAME_LEN];
47 };
48
49 static LIST_HEAD(nullb_list);
50 static struct mutex lock;
51 static int null_major;
52 static int nullb_indexes;
53 static struct kmem_cache *ppa_cache;
54 static struct blk_mq_tag_set tag_set;
55
56 enum {
57         NULL_IRQ_NONE           = 0,
58         NULL_IRQ_SOFTIRQ        = 1,
59         NULL_IRQ_TIMER          = 2,
60 };
61
62 enum {
63         NULL_Q_BIO              = 0,
64         NULL_Q_RQ               = 1,
65         NULL_Q_MQ               = 2,
66 };
67
68 static int submit_queues;
69 module_param(submit_queues, int, S_IRUGO);
70 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
71
72 static int home_node = NUMA_NO_NODE;
73 module_param(home_node, int, S_IRUGO);
74 MODULE_PARM_DESC(home_node, "Home node for the device");
75
76 static int queue_mode = NULL_Q_MQ;
77
78 static int null_param_store_val(const char *str, int *val, int min, int max)
79 {
80         int ret, new_val;
81
82         ret = kstrtoint(str, 10, &new_val);
83         if (ret)
84                 return -EINVAL;
85
86         if (new_val < min || new_val > max)
87                 return -EINVAL;
88
89         *val = new_val;
90         return 0;
91 }
92
93 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
94 {
95         return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
96 }
97
98 static const struct kernel_param_ops null_queue_mode_param_ops = {
99         .set    = null_set_queue_mode,
100         .get    = param_get_int,
101 };
102
103 device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
104 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
105
106 static int gb = 250;
107 module_param(gb, int, S_IRUGO);
108 MODULE_PARM_DESC(gb, "Size in GB");
109
110 static int bs = 512;
111 module_param(bs, int, S_IRUGO);
112 MODULE_PARM_DESC(bs, "Block size (in bytes)");
113
114 static int nr_devices = 1;
115 module_param(nr_devices, int, S_IRUGO);
116 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
117
118 static bool use_lightnvm;
119 module_param(use_lightnvm, bool, S_IRUGO);
120 MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
121
122 static bool blocking;
123 module_param(blocking, bool, S_IRUGO);
124 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
125
126 static bool shared_tags;
127 module_param(shared_tags, bool, S_IRUGO);
128 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
129
130 static int irqmode = NULL_IRQ_SOFTIRQ;
131
132 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
133 {
134         return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
135                                         NULL_IRQ_TIMER);
136 }
137
138 static const struct kernel_param_ops null_irqmode_param_ops = {
139         .set    = null_set_irqmode,
140         .get    = param_get_int,
141 };
142
143 device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
144 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
145
146 static unsigned long completion_nsec = 10000;
147 module_param(completion_nsec, ulong, S_IRUGO);
148 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
149
150 static int hw_queue_depth = 64;
151 module_param(hw_queue_depth, int, S_IRUGO);
152 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
153
154 static bool use_per_node_hctx = false;
155 module_param(use_per_node_hctx, bool, S_IRUGO);
156 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
157
158 static void put_tag(struct nullb_queue *nq, unsigned int tag)
159 {
160         clear_bit_unlock(tag, nq->tag_map);
161
162         if (waitqueue_active(&nq->wait))
163                 wake_up(&nq->wait);
164 }
165
166 static unsigned int get_tag(struct nullb_queue *nq)
167 {
168         unsigned int tag;
169
170         do {
171                 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
172                 if (tag >= nq->queue_depth)
173                         return -1U;
174         } while (test_and_set_bit_lock(tag, nq->tag_map));
175
176         return tag;
177 }
178
179 static void free_cmd(struct nullb_cmd *cmd)
180 {
181         put_tag(cmd->nq, cmd->tag);
182 }
183
184 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
185
186 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
187 {
188         struct nullb_cmd *cmd;
189         unsigned int tag;
190
191         tag = get_tag(nq);
192         if (tag != -1U) {
193                 cmd = &nq->cmds[tag];
194                 cmd->tag = tag;
195                 cmd->nq = nq;
196                 if (irqmode == NULL_IRQ_TIMER) {
197                         hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
198                                      HRTIMER_MODE_REL);
199                         cmd->timer.function = null_cmd_timer_expired;
200                 }
201                 return cmd;
202         }
203
204         return NULL;
205 }
206
207 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
208 {
209         struct nullb_cmd *cmd;
210         DEFINE_WAIT(wait);
211
212         cmd = __alloc_cmd(nq);
213         if (cmd || !can_wait)
214                 return cmd;
215
216         do {
217                 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
218                 cmd = __alloc_cmd(nq);
219                 if (cmd)
220                         break;
221
222                 io_schedule();
223         } while (1);
224
225         finish_wait(&nq->wait, &wait);
226         return cmd;
227 }
228
229 static void end_cmd(struct nullb_cmd *cmd)
230 {
231         struct request_queue *q = NULL;
232
233         if (cmd->rq)
234                 q = cmd->rq->q;
235
236         switch (queue_mode)  {
237         case NULL_Q_MQ:
238                 blk_mq_end_request(cmd->rq, BLK_STS_OK);
239                 return;
240         case NULL_Q_RQ:
241                 INIT_LIST_HEAD(&cmd->rq->queuelist);
242                 blk_end_request_all(cmd->rq, BLK_STS_OK);
243                 break;
244         case NULL_Q_BIO:
245                 bio_endio(cmd->bio);
246                 break;
247         }
248
249         free_cmd(cmd);
250
251         /* Restart queue if needed, as we are freeing a tag */
252         if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
253                 unsigned long flags;
254
255                 spin_lock_irqsave(q->queue_lock, flags);
256                 blk_start_queue_async(q);
257                 spin_unlock_irqrestore(q->queue_lock, flags);
258         }
259 }
260
261 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
262 {
263         end_cmd(container_of(timer, struct nullb_cmd, timer));
264
265         return HRTIMER_NORESTART;
266 }
267
268 static void null_cmd_end_timer(struct nullb_cmd *cmd)
269 {
270         ktime_t kt = completion_nsec;
271
272         hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
273 }
274
275 static void null_softirq_done_fn(struct request *rq)
276 {
277         if (queue_mode == NULL_Q_MQ)
278                 end_cmd(blk_mq_rq_to_pdu(rq));
279         else
280                 end_cmd(rq->special);
281 }
282
283 static inline void null_handle_cmd(struct nullb_cmd *cmd)
284 {
285         /* Complete IO by inline, softirq or timer */
286         switch (irqmode) {
287         case NULL_IRQ_SOFTIRQ:
288                 switch (queue_mode)  {
289                 case NULL_Q_MQ:
290                         blk_mq_complete_request(cmd->rq);
291                         break;
292                 case NULL_Q_RQ:
293                         blk_complete_request(cmd->rq);
294                         break;
295                 case NULL_Q_BIO:
296                         /*
297                          * XXX: no proper submitting cpu information available.
298                          */
299                         end_cmd(cmd);
300                         break;
301                 }
302                 break;
303         case NULL_IRQ_NONE:
304                 end_cmd(cmd);
305                 break;
306         case NULL_IRQ_TIMER:
307                 null_cmd_end_timer(cmd);
308                 break;
309         }
310 }
311
312 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
313 {
314         int index = 0;
315
316         if (nullb->nr_queues != 1)
317                 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
318
319         return &nullb->queues[index];
320 }
321
322 static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
323 {
324         struct nullb *nullb = q->queuedata;
325         struct nullb_queue *nq = nullb_to_queue(nullb);
326         struct nullb_cmd *cmd;
327
328         cmd = alloc_cmd(nq, 1);
329         cmd->bio = bio;
330
331         null_handle_cmd(cmd);
332         return BLK_QC_T_NONE;
333 }
334
335 static int null_rq_prep_fn(struct request_queue *q, struct request *req)
336 {
337         struct nullb *nullb = q->queuedata;
338         struct nullb_queue *nq = nullb_to_queue(nullb);
339         struct nullb_cmd *cmd;
340
341         cmd = alloc_cmd(nq, 0);
342         if (cmd) {
343                 cmd->rq = req;
344                 req->special = cmd;
345                 return BLKPREP_OK;
346         }
347         blk_stop_queue(q);
348
349         return BLKPREP_DEFER;
350 }
351
352 static void null_request_fn(struct request_queue *q)
353 {
354         struct request *rq;
355
356         while ((rq = blk_fetch_request(q)) != NULL) {
357                 struct nullb_cmd *cmd = rq->special;
358
359                 spin_unlock_irq(q->queue_lock);
360                 null_handle_cmd(cmd);
361                 spin_lock_irq(q->queue_lock);
362         }
363 }
364
365 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
366                          const struct blk_mq_queue_data *bd)
367 {
368         struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
369
370         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
371
372         if (irqmode == NULL_IRQ_TIMER) {
373                 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
374                 cmd->timer.function = null_cmd_timer_expired;
375         }
376         cmd->rq = bd->rq;
377         cmd->nq = hctx->driver_data;
378
379         blk_mq_start_request(bd->rq);
380
381         null_handle_cmd(cmd);
382         return BLK_STS_OK;
383 }
384
385 static const struct blk_mq_ops null_mq_ops = {
386         .queue_rq       = null_queue_rq,
387         .complete       = null_softirq_done_fn,
388 };
389
390 static void cleanup_queue(struct nullb_queue *nq)
391 {
392         kfree(nq->tag_map);
393         kfree(nq->cmds);
394 }
395
396 static void cleanup_queues(struct nullb *nullb)
397 {
398         int i;
399
400         for (i = 0; i < nullb->nr_queues; i++)
401                 cleanup_queue(&nullb->queues[i]);
402
403         kfree(nullb->queues);
404 }
405
406 #ifdef CONFIG_NVM
407
408 static void null_lnvm_end_io(struct request *rq, blk_status_t status)
409 {
410         struct nvm_rq *rqd = rq->end_io_data;
411
412         /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
413         rqd->error = status ? -EIO : 0;
414         nvm_end_io(rqd);
415
416         blk_put_request(rq);
417 }
418
419 static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
420 {
421         struct request_queue *q = dev->q;
422         struct request *rq;
423         struct bio *bio = rqd->bio;
424
425         rq = blk_mq_alloc_request(q,
426                 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
427         if (IS_ERR(rq))
428                 return -ENOMEM;
429
430         blk_init_request_from_bio(rq, bio);
431
432         rq->end_io_data = rqd;
433
434         blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
435
436         return 0;
437 }
438
439 static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
440 {
441         sector_t size = gb * 1024 * 1024 * 1024ULL;
442         sector_t blksize;
443         struct nvm_id_group *grp;
444
445         id->ver_id = 0x1;
446         id->vmnt = 0;
447         id->cap = 0x2;
448         id->dom = 0x1;
449
450         id->ppaf.blk_offset = 0;
451         id->ppaf.blk_len = 16;
452         id->ppaf.pg_offset = 16;
453         id->ppaf.pg_len = 16;
454         id->ppaf.sect_offset = 32;
455         id->ppaf.sect_len = 8;
456         id->ppaf.pln_offset = 40;
457         id->ppaf.pln_len = 8;
458         id->ppaf.lun_offset = 48;
459         id->ppaf.lun_len = 8;
460         id->ppaf.ch_offset = 56;
461         id->ppaf.ch_len = 8;
462
463         sector_div(size, bs); /* convert size to pages */
464         size >>= 8; /* concert size to pgs pr blk */
465         grp = &id->grp;
466         grp->mtype = 0;
467         grp->fmtype = 0;
468         grp->num_ch = 1;
469         grp->num_pg = 256;
470         blksize = size;
471         size >>= 16;
472         grp->num_lun = size + 1;
473         sector_div(blksize, grp->num_lun);
474         grp->num_blk = blksize;
475         grp->num_pln = 1;
476
477         grp->fpg_sz = bs;
478         grp->csecs = bs;
479         grp->trdt = 25000;
480         grp->trdm = 25000;
481         grp->tprt = 500000;
482         grp->tprm = 500000;
483         grp->tbet = 1500000;
484         grp->tbem = 1500000;
485         grp->mpos = 0x010101; /* single plane rwe */
486         grp->cpar = hw_queue_depth;
487
488         return 0;
489 }
490
491 static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
492 {
493         mempool_t *virtmem_pool;
494
495         virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
496         if (!virtmem_pool) {
497                 pr_err("null_blk: Unable to create virtual memory pool\n");
498                 return NULL;
499         }
500
501         return virtmem_pool;
502 }
503
504 static void null_lnvm_destroy_dma_pool(void *pool)
505 {
506         mempool_destroy(pool);
507 }
508
509 static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
510                                 gfp_t mem_flags, dma_addr_t *dma_handler)
511 {
512         return mempool_alloc(pool, mem_flags);
513 }
514
515 static void null_lnvm_dev_dma_free(void *pool, void *entry,
516                                                         dma_addr_t dma_handler)
517 {
518         mempool_free(entry, pool);
519 }
520
521 static struct nvm_dev_ops null_lnvm_dev_ops = {
522         .identity               = null_lnvm_id,
523         .submit_io              = null_lnvm_submit_io,
524
525         .create_dma_pool        = null_lnvm_create_dma_pool,
526         .destroy_dma_pool       = null_lnvm_destroy_dma_pool,
527         .dev_dma_alloc          = null_lnvm_dev_dma_alloc,
528         .dev_dma_free           = null_lnvm_dev_dma_free,
529
530         /* Simulate nvme protocol restriction */
531         .max_phys_sect          = 64,
532 };
533
534 static int null_nvm_register(struct nullb *nullb)
535 {
536         struct nvm_dev *dev;
537         int rv;
538
539         dev = nvm_alloc_dev(0);
540         if (!dev)
541                 return -ENOMEM;
542
543         dev->q = nullb->q;
544         memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
545         dev->ops = &null_lnvm_dev_ops;
546
547         rv = nvm_register(dev);
548         if (rv) {
549                 kfree(dev);
550                 return rv;
551         }
552         nullb->ndev = dev;
553         return 0;
554 }
555
556 static void null_nvm_unregister(struct nullb *nullb)
557 {
558         nvm_unregister(nullb->ndev);
559 }
560 #else
561 static int null_nvm_register(struct nullb *nullb)
562 {
563         pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
564         return -EINVAL;
565 }
566 static void null_nvm_unregister(struct nullb *nullb) {}
567 #endif /* CONFIG_NVM */
568
569 static void null_del_dev(struct nullb *nullb)
570 {
571         list_del_init(&nullb->list);
572
573         if (use_lightnvm)
574                 null_nvm_unregister(nullb);
575         else
576                 del_gendisk(nullb->disk);
577         blk_cleanup_queue(nullb->q);
578         if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
579                 blk_mq_free_tag_set(nullb->tag_set);
580         if (!use_lightnvm)
581                 put_disk(nullb->disk);
582         cleanup_queues(nullb);
583         kfree(nullb);
584 }
585
586 static int null_open(struct block_device *bdev, fmode_t mode)
587 {
588         return 0;
589 }
590
591 static void null_release(struct gendisk *disk, fmode_t mode)
592 {
593 }
594
595 static const struct block_device_operations null_fops = {
596         .owner =        THIS_MODULE,
597         .open =         null_open,
598         .release =      null_release,
599 };
600
601 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
602 {
603         BUG_ON(!nullb);
604         BUG_ON(!nq);
605
606         init_waitqueue_head(&nq->wait);
607         nq->queue_depth = nullb->queue_depth;
608 }
609
610 static void null_init_queues(struct nullb *nullb)
611 {
612         struct request_queue *q = nullb->q;
613         struct blk_mq_hw_ctx *hctx;
614         struct nullb_queue *nq;
615         int i;
616
617         queue_for_each_hw_ctx(q, hctx, i) {
618                 if (!hctx->nr_ctx || !hctx->tags)
619                         continue;
620                 nq = &nullb->queues[i];
621                 hctx->driver_data = nq;
622                 null_init_queue(nullb, nq);
623                 nullb->nr_queues++;
624         }
625 }
626
627 static int setup_commands(struct nullb_queue *nq)
628 {
629         struct nullb_cmd *cmd;
630         int i, tag_size;
631
632         nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
633         if (!nq->cmds)
634                 return -ENOMEM;
635
636         tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
637         nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
638         if (!nq->tag_map) {
639                 kfree(nq->cmds);
640                 return -ENOMEM;
641         }
642
643         for (i = 0; i < nq->queue_depth; i++) {
644                 cmd = &nq->cmds[i];
645                 INIT_LIST_HEAD(&cmd->list);
646                 cmd->ll_list.next = NULL;
647                 cmd->tag = -1U;
648         }
649
650         return 0;
651 }
652
653 static int setup_queues(struct nullb *nullb)
654 {
655         nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
656                                                                 GFP_KERNEL);
657         if (!nullb->queues)
658                 return -ENOMEM;
659
660         nullb->nr_queues = 0;
661         nullb->queue_depth = hw_queue_depth;
662
663         return 0;
664 }
665
666 static int init_driver_queues(struct nullb *nullb)
667 {
668         struct nullb_queue *nq;
669         int i, ret = 0;
670
671         for (i = 0; i < submit_queues; i++) {
672                 nq = &nullb->queues[i];
673
674                 null_init_queue(nullb, nq);
675
676                 ret = setup_commands(nq);
677                 if (ret)
678                         return ret;
679                 nullb->nr_queues++;
680         }
681         return 0;
682 }
683
684 static int null_gendisk_register(struct nullb *nullb)
685 {
686         struct gendisk *disk;
687         sector_t size;
688
689         disk = nullb->disk = alloc_disk_node(1, home_node);
690         if (!disk)
691                 return -ENOMEM;
692         size = gb * 1024 * 1024 * 1024ULL;
693         set_capacity(disk, size >> 9);
694
695         disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
696         disk->major             = null_major;
697         disk->first_minor       = nullb->index;
698         disk->fops              = &null_fops;
699         disk->private_data      = nullb;
700         disk->queue             = nullb->q;
701         strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
702
703         add_disk(disk);
704         return 0;
705 }
706
707 static int null_init_tag_set(struct blk_mq_tag_set *set)
708 {
709         set->ops = &null_mq_ops;
710         set->nr_hw_queues = submit_queues;
711         set->queue_depth = hw_queue_depth;
712         set->numa_node = home_node;
713         set->cmd_size   = sizeof(struct nullb_cmd);
714         set->flags = BLK_MQ_F_SHOULD_MERGE;
715         set->driver_data = NULL;
716
717         if (blocking)
718                 set->flags |= BLK_MQ_F_BLOCKING;
719
720         return blk_mq_alloc_tag_set(set);
721 }
722
723 static int null_add_dev(void)
724 {
725         struct nullb *nullb;
726         int rv;
727
728         nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
729         if (!nullb) {
730                 rv = -ENOMEM;
731                 goto out;
732         }
733
734         spin_lock_init(&nullb->lock);
735
736         if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
737                 submit_queues = nr_online_nodes;
738
739         rv = setup_queues(nullb);
740         if (rv)
741                 goto out_free_nullb;
742
743         if (queue_mode == NULL_Q_MQ) {
744                 if (shared_tags) {
745                         nullb->tag_set = &tag_set;
746                         rv = 0;
747                 } else {
748                         nullb->tag_set = &nullb->__tag_set;
749                         rv = null_init_tag_set(nullb->tag_set);
750                 }
751
752                 if (rv)
753                         goto out_cleanup_queues;
754
755                 nullb->q = blk_mq_init_queue(nullb->tag_set);
756                 if (IS_ERR(nullb->q)) {
757                         rv = -ENOMEM;
758                         goto out_cleanup_tags;
759                 }
760                 null_init_queues(nullb);
761         } else if (queue_mode == NULL_Q_BIO) {
762                 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
763                 if (!nullb->q) {
764                         rv = -ENOMEM;
765                         goto out_cleanup_queues;
766                 }
767                 blk_queue_make_request(nullb->q, null_queue_bio);
768                 rv = init_driver_queues(nullb);
769                 if (rv)
770                         goto out_cleanup_blk_queue;
771         } else {
772                 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
773                 if (!nullb->q) {
774                         rv = -ENOMEM;
775                         goto out_cleanup_queues;
776                 }
777                 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
778                 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
779                 rv = init_driver_queues(nullb);
780                 if (rv)
781                         goto out_cleanup_blk_queue;
782         }
783
784         nullb->q->queuedata = nullb;
785         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
786         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
787
788         mutex_lock(&lock);
789         nullb->index = nullb_indexes++;
790         mutex_unlock(&lock);
791
792         blk_queue_logical_block_size(nullb->q, bs);
793         blk_queue_physical_block_size(nullb->q, bs);
794
795         sprintf(nullb->disk_name, "nullb%d", nullb->index);
796
797         if (use_lightnvm)
798                 rv = null_nvm_register(nullb);
799         else
800                 rv = null_gendisk_register(nullb);
801
802         if (rv)
803                 goto out_cleanup_blk_queue;
804
805         mutex_lock(&lock);
806         list_add_tail(&nullb->list, &nullb_list);
807         mutex_unlock(&lock);
808
809         return 0;
810 out_cleanup_blk_queue:
811         blk_cleanup_queue(nullb->q);
812 out_cleanup_tags:
813         if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
814                 blk_mq_free_tag_set(nullb->tag_set);
815 out_cleanup_queues:
816         cleanup_queues(nullb);
817 out_free_nullb:
818         kfree(nullb);
819 out:
820         return rv;
821 }
822
823 static int __init null_init(void)
824 {
825         int ret = 0;
826         unsigned int i;
827         struct nullb *nullb;
828
829         if (bs > PAGE_SIZE) {
830                 pr_warn("null_blk: invalid block size\n");
831                 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
832                 bs = PAGE_SIZE;
833         }
834
835         if (use_lightnvm && bs != 4096) {
836                 pr_warn("null_blk: LightNVM only supports 4k block size\n");
837                 pr_warn("null_blk: defaults block size to 4k\n");
838                 bs = 4096;
839         }
840
841         if (use_lightnvm && queue_mode != NULL_Q_MQ) {
842                 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
843                 pr_warn("null_blk: defaults queue mode to blk-mq\n");
844                 queue_mode = NULL_Q_MQ;
845         }
846
847         if (queue_mode == NULL_Q_MQ && shared_tags)
848                 null_init_tag_set(&tag_set);
849
850         if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
851                 if (submit_queues < nr_online_nodes) {
852                         pr_warn("null_blk: submit_queues param is set to %u.",
853                                                         nr_online_nodes);
854                         submit_queues = nr_online_nodes;
855                 }
856         } else if (submit_queues > nr_cpu_ids)
857                 submit_queues = nr_cpu_ids;
858         else if (!submit_queues)
859                 submit_queues = 1;
860
861         mutex_init(&lock);
862
863         null_major = register_blkdev(0, "nullb");
864         if (null_major < 0)
865                 return null_major;
866
867         if (use_lightnvm) {
868                 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
869                                                                 0, 0, NULL);
870                 if (!ppa_cache) {
871                         pr_err("null_blk: unable to create ppa cache\n");
872                         ret = -ENOMEM;
873                         goto err_ppa;
874                 }
875         }
876
877         for (i = 0; i < nr_devices; i++) {
878                 ret = null_add_dev();
879                 if (ret)
880                         goto err_dev;
881         }
882
883         pr_info("null: module loaded\n");
884         return 0;
885
886 err_dev:
887         while (!list_empty(&nullb_list)) {
888                 nullb = list_entry(nullb_list.next, struct nullb, list);
889                 null_del_dev(nullb);
890         }
891         kmem_cache_destroy(ppa_cache);
892 err_ppa:
893         unregister_blkdev(null_major, "nullb");
894         return ret;
895 }
896
897 static void __exit null_exit(void)
898 {
899         struct nullb *nullb;
900
901         unregister_blkdev(null_major, "nullb");
902
903         mutex_lock(&lock);
904         while (!list_empty(&nullb_list)) {
905                 nullb = list_entry(nullb_list.next, struct nullb, list);
906                 null_del_dev(nullb);
907         }
908         mutex_unlock(&lock);
909
910         if (queue_mode == NULL_Q_MQ && shared_tags)
911                 blk_mq_free_tag_set(&tag_set);
912
913         kmem_cache_destroy(ppa_cache);
914 }
915
916 module_init(null_init);
917 module_exit(null_exit);
918
919 MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
920 MODULE_LICENSE("GPL");