]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/lightnvm/core.c
lightnvm: make nvm_map_* return void
[karo-tx-linux.git] / drivers / lightnvm / core.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; see the file COPYING.  If not, write to
16  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17  * USA.
18  *
19  */
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/moduleparam.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
29
30 static LIST_HEAD(nvm_tgt_types);
31 static DECLARE_RWSEM(nvm_tgtt_lock);
32 static LIST_HEAD(nvm_devices);
33 static DECLARE_RWSEM(nvm_lock);
34
35 /* Map between virtual and physical channel and lun */
36 struct nvm_ch_map {
37         int ch_off;
38         int nr_luns;
39         int *lun_offs;
40 };
41
42 struct nvm_dev_map {
43         struct nvm_ch_map *chnls;
44         int nr_chnls;
45 };
46
47 struct nvm_area {
48         struct list_head list;
49         sector_t begin;
50         sector_t end;   /* end is excluded */
51 };
52
53 enum {
54         TRANS_TGT_TO_DEV =      0x0,
55         TRANS_DEV_TO_TGT =      0x1,
56 };
57
58 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
59 {
60         struct nvm_target *tgt;
61
62         list_for_each_entry(tgt, &dev->targets, list)
63                 if (!strcmp(name, tgt->disk->disk_name))
64                         return tgt;
65
66         return NULL;
67 }
68
69 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
70 {
71         int i;
72
73         for (i = lun_begin; i <= lun_end; i++) {
74                 if (test_and_set_bit(i, dev->lun_map)) {
75                         pr_err("nvm: lun %d already allocated\n", i);
76                         goto err;
77                 }
78         }
79
80         return 0;
81 err:
82         while (--i > lun_begin)
83                 clear_bit(i, dev->lun_map);
84
85         return -EBUSY;
86 }
87
88 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
89                                  int lun_end)
90 {
91         int i;
92
93         for (i = lun_begin; i <= lun_end; i++)
94                 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
95 }
96
97 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
98 {
99         struct nvm_dev *dev = tgt_dev->parent;
100         struct nvm_dev_map *dev_map = tgt_dev->map;
101         int i, j;
102
103         for (i = 0; i < dev_map->nr_chnls; i++) {
104                 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
105                 int *lun_offs = ch_map->lun_offs;
106                 int ch = i + ch_map->ch_off;
107
108                 for (j = 0; j < ch_map->nr_luns; j++) {
109                         int lun = j + lun_offs[j];
110                         int lunid = (ch * dev->geo.luns_per_chnl) + lun;
111
112                         WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
113                 }
114
115                 kfree(ch_map->lun_offs);
116         }
117
118         kfree(dev_map->chnls);
119         kfree(dev_map);
120
121         kfree(tgt_dev->luns);
122         kfree(tgt_dev);
123 }
124
125 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
126                                               int lun_begin, int lun_end)
127 {
128         struct nvm_tgt_dev *tgt_dev = NULL;
129         struct nvm_dev_map *dev_rmap = dev->rmap;
130         struct nvm_dev_map *dev_map;
131         struct ppa_addr *luns;
132         int nr_luns = lun_end - lun_begin + 1;
133         int luns_left = nr_luns;
134         int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
135         int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
136         int bch = lun_begin / dev->geo.luns_per_chnl;
137         int blun = lun_begin % dev->geo.luns_per_chnl;
138         int lunid = 0;
139         int lun_balanced = 1;
140         int prev_nr_luns;
141         int i, j;
142
143         nr_chnls = nr_luns / dev->geo.luns_per_chnl;
144         nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
145
146         dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
147         if (!dev_map)
148                 goto err_dev;
149
150         dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
151                                                                 GFP_KERNEL);
152         if (!dev_map->chnls)
153                 goto err_chnls;
154
155         luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
156         if (!luns)
157                 goto err_luns;
158
159         prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
160                                         dev->geo.luns_per_chnl : luns_left;
161         for (i = 0; i < nr_chnls; i++) {
162                 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
163                 int *lun_roffs = ch_rmap->lun_offs;
164                 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
165                 int *lun_offs;
166                 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
167                                         dev->geo.luns_per_chnl : luns_left;
168
169                 if (lun_balanced && prev_nr_luns != luns_in_chnl)
170                         lun_balanced = 0;
171
172                 ch_map->ch_off = ch_rmap->ch_off = bch;
173                 ch_map->nr_luns = luns_in_chnl;
174
175                 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
176                 if (!lun_offs)
177                         goto err_ch;
178
179                 for (j = 0; j < luns_in_chnl; j++) {
180                         luns[lunid].ppa = 0;
181                         luns[lunid].g.ch = i;
182                         luns[lunid++].g.lun = j;
183
184                         lun_offs[j] = blun;
185                         lun_roffs[j + blun] = blun;
186                 }
187
188                 ch_map->lun_offs = lun_offs;
189
190                 /* when starting a new channel, lun offset is reset */
191                 blun = 0;
192                 luns_left -= luns_in_chnl;
193         }
194
195         dev_map->nr_chnls = nr_chnls;
196
197         tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
198         if (!tgt_dev)
199                 goto err_ch;
200
201         memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
202         /* Target device only owns a portion of the physical device */
203         tgt_dev->geo.nr_chnls = nr_chnls;
204         tgt_dev->geo.nr_luns = nr_luns;
205         tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
206         tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
207         tgt_dev->q = dev->q;
208         tgt_dev->map = dev_map;
209         tgt_dev->luns = luns;
210         memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
211
212         tgt_dev->parent = dev;
213
214         return tgt_dev;
215 err_ch:
216         while (--i > 0)
217                 kfree(dev_map->chnls[i].lun_offs);
218         kfree(luns);
219 err_luns:
220         kfree(dev_map->chnls);
221 err_chnls:
222         kfree(dev_map);
223 err_dev:
224         return tgt_dev;
225 }
226
227 static const struct block_device_operations nvm_fops = {
228         .owner          = THIS_MODULE,
229 };
230
231 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
232 {
233         struct nvm_ioctl_create_simple *s = &create->conf.s;
234         struct request_queue *tqueue;
235         struct gendisk *tdisk;
236         struct nvm_tgt_type *tt;
237         struct nvm_target *t;
238         struct nvm_tgt_dev *tgt_dev;
239         void *targetdata;
240
241         tt = nvm_find_target_type(create->tgttype, 1);
242         if (!tt) {
243                 pr_err("nvm: target type %s not found\n", create->tgttype);
244                 return -EINVAL;
245         }
246
247         mutex_lock(&dev->mlock);
248         t = nvm_find_target(dev, create->tgtname);
249         if (t) {
250                 pr_err("nvm: target name already exists.\n");
251                 mutex_unlock(&dev->mlock);
252                 return -EINVAL;
253         }
254         mutex_unlock(&dev->mlock);
255
256         if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
257                 return -ENOMEM;
258
259         t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
260         if (!t)
261                 goto err_reserve;
262
263         tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
264         if (!tgt_dev) {
265                 pr_err("nvm: could not create target device\n");
266                 goto err_t;
267         }
268
269         tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
270         if (!tqueue)
271                 goto err_dev;
272         blk_queue_make_request(tqueue, tt->make_rq);
273
274         tdisk = alloc_disk(0);
275         if (!tdisk)
276                 goto err_queue;
277
278         sprintf(tdisk->disk_name, "%s", create->tgtname);
279         tdisk->flags = GENHD_FL_EXT_DEVT;
280         tdisk->major = 0;
281         tdisk->first_minor = 0;
282         tdisk->fops = &nvm_fops;
283         tdisk->queue = tqueue;
284
285         targetdata = tt->init(tgt_dev, tdisk);
286         if (IS_ERR(targetdata))
287                 goto err_init;
288
289         tdisk->private_data = targetdata;
290         tqueue->queuedata = targetdata;
291
292         blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
293
294         set_capacity(tdisk, tt->capacity(targetdata));
295         add_disk(tdisk);
296
297         t->type = tt;
298         t->disk = tdisk;
299         t->dev = tgt_dev;
300
301         mutex_lock(&dev->mlock);
302         list_add_tail(&t->list, &dev->targets);
303         mutex_unlock(&dev->mlock);
304
305         return 0;
306 err_init:
307         put_disk(tdisk);
308 err_queue:
309         blk_cleanup_queue(tqueue);
310 err_dev:
311         kfree(tgt_dev);
312 err_t:
313         kfree(t);
314 err_reserve:
315         nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
316         return -ENOMEM;
317 }
318
319 static void __nvm_remove_target(struct nvm_target *t)
320 {
321         struct nvm_tgt_type *tt = t->type;
322         struct gendisk *tdisk = t->disk;
323         struct request_queue *q = tdisk->queue;
324
325         del_gendisk(tdisk);
326         blk_cleanup_queue(q);
327
328         if (tt->exit)
329                 tt->exit(tdisk->private_data);
330
331         nvm_remove_tgt_dev(t->dev);
332         put_disk(tdisk);
333
334         list_del(&t->list);
335         kfree(t);
336 }
337
338 /**
339  * nvm_remove_tgt - Removes a target from the media manager
340  * @dev:        device
341  * @remove:     ioctl structure with target name to remove.
342  *
343  * Returns:
344  * 0: on success
345  * 1: on not found
346  * <0: on error
347  */
348 static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
349 {
350         struct nvm_target *t;
351
352         mutex_lock(&dev->mlock);
353         t = nvm_find_target(dev, remove->tgtname);
354         if (!t) {
355                 mutex_unlock(&dev->mlock);
356                 return 1;
357         }
358         __nvm_remove_target(t);
359         mutex_unlock(&dev->mlock);
360
361         return 0;
362 }
363
364 static int nvm_register_map(struct nvm_dev *dev)
365 {
366         struct nvm_dev_map *rmap;
367         int i, j;
368
369         rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
370         if (!rmap)
371                 goto err_rmap;
372
373         rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
374                                                                 GFP_KERNEL);
375         if (!rmap->chnls)
376                 goto err_chnls;
377
378         for (i = 0; i < dev->geo.nr_chnls; i++) {
379                 struct nvm_ch_map *ch_rmap;
380                 int *lun_roffs;
381                 int luns_in_chnl = dev->geo.luns_per_chnl;
382
383                 ch_rmap = &rmap->chnls[i];
384
385                 ch_rmap->ch_off = -1;
386                 ch_rmap->nr_luns = luns_in_chnl;
387
388                 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
389                 if (!lun_roffs)
390                         goto err_ch;
391
392                 for (j = 0; j < luns_in_chnl; j++)
393                         lun_roffs[j] = -1;
394
395                 ch_rmap->lun_offs = lun_roffs;
396         }
397
398         dev->rmap = rmap;
399
400         return 0;
401 err_ch:
402         while (--i >= 0)
403                 kfree(rmap->chnls[i].lun_offs);
404 err_chnls:
405         kfree(rmap);
406 err_rmap:
407         return -ENOMEM;
408 }
409
410 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
411 {
412         struct nvm_dev_map *dev_map = tgt_dev->map;
413         struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
414         int lun_off = ch_map->lun_offs[p->g.lun];
415
416         p->g.ch += ch_map->ch_off;
417         p->g.lun += lun_off;
418 }
419
420 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
421 {
422         struct nvm_dev *dev = tgt_dev->parent;
423         struct nvm_dev_map *dev_rmap = dev->rmap;
424         struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
425         int lun_roff = ch_rmap->lun_offs[p->g.lun];
426
427         p->g.ch -= ch_rmap->ch_off;
428         p->g.lun -= lun_roff;
429 }
430
431 static void nvm_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
432                         int flag)
433 {
434         int i;
435
436         if (rqd->nr_ppas == 1) {
437                 if (flag == TRANS_TGT_TO_DEV)
438                         nvm_map_to_dev(tgt_dev, &rqd->ppa_addr);
439                 else
440                         nvm_map_to_tgt(tgt_dev, &rqd->ppa_addr);
441                 return;
442         }
443
444         for (i = 0; i < rqd->nr_ppas; i++) {
445                 if (flag == TRANS_TGT_TO_DEV)
446                         nvm_map_to_dev(tgt_dev, &rqd->ppa_list[i]);
447                 else
448                         nvm_map_to_tgt(tgt_dev, &rqd->ppa_list[i]);
449         }
450 }
451
452 static struct ppa_addr nvm_trans_ppa(struct nvm_tgt_dev *tgt_dev,
453                                      struct ppa_addr p, int dir)
454 {
455         struct ppa_addr ppa = p;
456
457         if (dir == TRANS_TGT_TO_DEV)
458                 nvm_map_to_dev(tgt_dev, &ppa);
459         else
460                 nvm_map_to_tgt(tgt_dev, &ppa);
461
462         return ppa;
463 }
464
465 void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
466                      int len)
467 {
468         struct nvm_geo *geo = &dev->geo;
469         struct nvm_dev_map *dev_rmap = dev->rmap;
470         u64 i;
471
472         for (i = 0; i < len; i++) {
473                 struct nvm_ch_map *ch_rmap;
474                 int *lun_roffs;
475                 struct ppa_addr gaddr;
476                 u64 pba = le64_to_cpu(entries[i]);
477                 int off;
478                 u64 diff;
479
480                 if (!pba)
481                         continue;
482
483                 gaddr = linear_to_generic_addr(geo, pba);
484                 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
485                 lun_roffs = ch_rmap->lun_offs;
486
487                 off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
488
489                 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
490                                 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
491
492                 entries[i] -= cpu_to_le64(diff);
493         }
494 }
495 EXPORT_SYMBOL(nvm_part_to_tgt);
496
497 struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
498 {
499         struct nvm_tgt_type *tmp, *tt = NULL;
500
501         if (lock)
502                 down_write(&nvm_tgtt_lock);
503
504         list_for_each_entry(tmp, &nvm_tgt_types, list)
505                 if (!strcmp(name, tmp->name)) {
506                         tt = tmp;
507                         break;
508                 }
509
510         if (lock)
511                 up_write(&nvm_tgtt_lock);
512         return tt;
513 }
514 EXPORT_SYMBOL(nvm_find_target_type);
515
516 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
517 {
518         int ret = 0;
519
520         down_write(&nvm_tgtt_lock);
521         if (nvm_find_target_type(tt->name, 0))
522                 ret = -EEXIST;
523         else
524                 list_add(&tt->list, &nvm_tgt_types);
525         up_write(&nvm_tgtt_lock);
526
527         return ret;
528 }
529 EXPORT_SYMBOL(nvm_register_tgt_type);
530
531 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
532 {
533         if (!tt)
534                 return;
535
536         down_write(&nvm_lock);
537         list_del(&tt->list);
538         up_write(&nvm_lock);
539 }
540 EXPORT_SYMBOL(nvm_unregister_tgt_type);
541
542 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
543                                                         dma_addr_t *dma_handler)
544 {
545         return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
546                                                                 dma_handler);
547 }
548 EXPORT_SYMBOL(nvm_dev_dma_alloc);
549
550 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
551 {
552         dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
553 }
554 EXPORT_SYMBOL(nvm_dev_dma_free);
555
556 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
557 {
558         struct nvm_dev *dev;
559
560         list_for_each_entry(dev, &nvm_devices, devices)
561                 if (!strcmp(name, dev->name))
562                         return dev;
563
564         return NULL;
565 }
566
567 static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
568                                          struct nvm_rq *rqd)
569 {
570         struct nvm_dev *dev = tgt_dev->parent;
571         int i;
572
573         if (rqd->nr_ppas > 1) {
574                 for (i = 0; i < rqd->nr_ppas; i++) {
575                         rqd->ppa_list[i] = nvm_trans_ppa(tgt_dev,
576                                         rqd->ppa_list[i], TRANS_TGT_TO_DEV);
577                         rqd->ppa_list[i] = generic_to_dev_addr(dev,
578                                                         rqd->ppa_list[i]);
579                 }
580         } else {
581                 rqd->ppa_addr = nvm_trans_ppa(tgt_dev, rqd->ppa_addr,
582                                                 TRANS_TGT_TO_DEV);
583                 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
584         }
585 }
586
587 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
588                        int nr_ppas, int type)
589 {
590         struct nvm_dev *dev = tgt_dev->parent;
591         struct nvm_rq rqd;
592         int ret;
593
594         if (nr_ppas > dev->ops->max_phys_sect) {
595                 pr_err("nvm: unable to update all blocks atomically\n");
596                 return -EINVAL;
597         }
598
599         memset(&rqd, 0, sizeof(struct nvm_rq));
600
601         nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
602         nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
603
604         ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
605         nvm_free_rqd_ppalist(dev, &rqd);
606         if (ret) {
607                 pr_err("nvm: failed bb mark\n");
608                 return -EINVAL;
609         }
610
611         return 0;
612 }
613 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
614
615 int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
616 {
617         struct nvm_dev *dev = tgt_dev->parent;
618
619         return dev->ops->max_phys_sect;
620 }
621 EXPORT_SYMBOL(nvm_max_phys_sects);
622
623 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
624 {
625         struct nvm_dev *dev = tgt_dev->parent;
626
627         if (!dev->ops->submit_io)
628                 return -ENODEV;
629
630         /* Convert address space */
631         nvm_generic_to_addr_mode(dev, rqd);
632
633         rqd->dev = tgt_dev;
634         return dev->ops->submit_io(dev, rqd);
635 }
636 EXPORT_SYMBOL(nvm_submit_io);
637
638 int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags)
639 {
640         struct nvm_dev *dev = tgt_dev->parent;
641         struct nvm_rq rqd;
642         int ret;
643
644         if (!dev->ops->erase_block)
645                 return 0;
646
647         nvm_map_to_dev(tgt_dev, ppas);
648
649         memset(&rqd, 0, sizeof(struct nvm_rq));
650
651         ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1);
652         if (ret)
653                 return ret;
654
655         nvm_generic_to_addr_mode(dev, &rqd);
656
657         rqd.flags = flags;
658
659         ret = dev->ops->erase_block(dev, &rqd);
660
661         nvm_free_rqd_ppalist(dev, &rqd);
662
663         return ret;
664 }
665 EXPORT_SYMBOL(nvm_erase_blk);
666
667 int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
668                     nvm_l2p_update_fn *update_l2p, void *priv)
669 {
670         struct nvm_dev *dev = tgt_dev->parent;
671
672         if (!dev->ops->get_l2p_tbl)
673                 return 0;
674
675         return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
676 }
677 EXPORT_SYMBOL(nvm_get_l2p_tbl);
678
679 int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
680 {
681         struct nvm_dev *dev = tgt_dev->parent;
682         struct nvm_geo *geo = &dev->geo;
683         struct nvm_area *area, *prev, *next;
684         sector_t begin = 0;
685         sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
686
687         if (len > max_sectors)
688                 return -EINVAL;
689
690         area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
691         if (!area)
692                 return -ENOMEM;
693
694         prev = NULL;
695
696         spin_lock(&dev->lock);
697         list_for_each_entry(next, &dev->area_list, list) {
698                 if (begin + len > next->begin) {
699                         begin = next->end;
700                         prev = next;
701                         continue;
702                 }
703                 break;
704         }
705
706         if ((begin + len) > max_sectors) {
707                 spin_unlock(&dev->lock);
708                 kfree(area);
709                 return -EINVAL;
710         }
711
712         area->begin = *lba = begin;
713         area->end = begin + len;
714
715         if (prev) /* insert into sorted order */
716                 list_add(&area->list, &prev->list);
717         else
718                 list_add(&area->list, &dev->area_list);
719         spin_unlock(&dev->lock);
720
721         return 0;
722 }
723 EXPORT_SYMBOL(nvm_get_area);
724
725 void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
726 {
727         struct nvm_dev *dev = tgt_dev->parent;
728         struct nvm_area *area;
729
730         spin_lock(&dev->lock);
731         list_for_each_entry(area, &dev->area_list, list) {
732                 if (area->begin != begin)
733                         continue;
734
735                 list_del(&area->list);
736                 spin_unlock(&dev->lock);
737                 kfree(area);
738                 return;
739         }
740         spin_unlock(&dev->lock);
741 }
742 EXPORT_SYMBOL(nvm_put_area);
743
744 void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
745 {
746         int i;
747
748         if (rqd->nr_ppas > 1) {
749                 for (i = 0; i < rqd->nr_ppas; i++)
750                         rqd->ppa_list[i] = dev_to_generic_addr(dev,
751                                                         rqd->ppa_list[i]);
752         } else {
753                 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
754         }
755 }
756 EXPORT_SYMBOL(nvm_addr_to_generic_mode);
757
758 void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
759 {
760         int i;
761
762         if (rqd->nr_ppas > 1) {
763                 for (i = 0; i < rqd->nr_ppas; i++)
764                         rqd->ppa_list[i] = generic_to_dev_addr(dev,
765                                                         rqd->ppa_list[i]);
766         } else {
767                 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
768         }
769 }
770 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
771
772 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
773                         const struct ppa_addr *ppas, int nr_ppas, int vblk)
774 {
775         struct nvm_geo *geo = &dev->geo;
776         int i, plane_cnt, pl_idx;
777         struct ppa_addr ppa;
778
779         if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
780                 rqd->nr_ppas = nr_ppas;
781                 rqd->ppa_addr = ppas[0];
782
783                 return 0;
784         }
785
786         rqd->nr_ppas = nr_ppas;
787         rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
788         if (!rqd->ppa_list) {
789                 pr_err("nvm: failed to allocate dma memory\n");
790                 return -ENOMEM;
791         }
792
793         if (!vblk) {
794                 for (i = 0; i < nr_ppas; i++)
795                         rqd->ppa_list[i] = ppas[i];
796         } else {
797                 plane_cnt = geo->plane_mode;
798                 rqd->nr_ppas *= plane_cnt;
799
800                 for (i = 0; i < nr_ppas; i++) {
801                         for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
802                                 ppa = ppas[i];
803                                 ppa.g.pl = pl_idx;
804                                 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
805                         }
806                 }
807         }
808
809         return 0;
810 }
811 EXPORT_SYMBOL(nvm_set_rqd_ppalist);
812
813 void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
814 {
815         if (!rqd->ppa_list)
816                 return;
817
818         nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
819 }
820 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
821
822 void nvm_end_io(struct nvm_rq *rqd, int error)
823 {
824         struct nvm_tgt_dev *tgt_dev = rqd->dev;
825         struct nvm_tgt_instance *ins = rqd->ins;
826
827         /* Convert address space */
828         if (tgt_dev)
829                 nvm_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
830
831         rqd->error = error;
832         ins->tt->end_io(rqd);
833 }
834 EXPORT_SYMBOL(nvm_end_io);
835
836 /*
837  * folds a bad block list from its plane representation to its virtual
838  * block representation. The fold is done in place and reduced size is
839  * returned.
840  *
841  * If any of the planes status are bad or grown bad block, the virtual block
842  * is marked bad. If not bad, the first plane state acts as the block state.
843  */
844 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
845 {
846         struct nvm_geo *geo = &dev->geo;
847         int blk, offset, pl, blktype;
848
849         if (nr_blks != geo->blks_per_lun * geo->plane_mode)
850                 return -EINVAL;
851
852         for (blk = 0; blk < geo->blks_per_lun; blk++) {
853                 offset = blk * geo->plane_mode;
854                 blktype = blks[offset];
855
856                 /* Bad blocks on any planes take precedence over other types */
857                 for (pl = 0; pl < geo->plane_mode; pl++) {
858                         if (blks[offset + pl] &
859                                         (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
860                                 blktype = blks[offset + pl];
861                                 break;
862                         }
863                 }
864
865                 blks[blk] = blktype;
866         }
867
868         return geo->blks_per_lun;
869 }
870 EXPORT_SYMBOL(nvm_bb_tbl_fold);
871
872 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
873                        u8 *blks)
874 {
875         struct nvm_dev *dev = tgt_dev->parent;
876
877         ppa = nvm_trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
878         ppa = generic_to_dev_addr(dev, ppa);
879
880         return dev->ops->get_bb_tbl(dev, ppa, blks);
881 }
882 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
883
884 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
885 {
886         struct nvm_geo *geo = &dev->geo;
887         int i;
888
889         dev->lps_per_blk = geo->pgs_per_blk;
890         dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
891         if (!dev->lptbl)
892                 return -ENOMEM;
893
894         /* Just a linear array */
895         for (i = 0; i < dev->lps_per_blk; i++)
896                 dev->lptbl[i] = i;
897
898         return 0;
899 }
900
901 static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
902 {
903         int i, p;
904         struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
905
906         if (!mlc->num_pairs)
907                 return 0;
908
909         dev->lps_per_blk = mlc->num_pairs;
910         dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
911         if (!dev->lptbl)
912                 return -ENOMEM;
913
914         /* The lower page table encoding consists of a list of bytes, where each
915          * has a lower and an upper half. The first half byte maintains the
916          * increment value and every value after is an offset added to the
917          * previous incrementation value
918          */
919         dev->lptbl[0] = mlc->pairs[0] & 0xF;
920         for (i = 1; i < dev->lps_per_blk; i++) {
921                 p = mlc->pairs[i >> 1];
922                 if (i & 0x1) /* upper */
923                         dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
924                 else /* lower */
925                         dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
926         }
927
928         return 0;
929 }
930
931 static int nvm_core_init(struct nvm_dev *dev)
932 {
933         struct nvm_id *id = &dev->identity;
934         struct nvm_id_group *grp = &id->groups[0];
935         struct nvm_geo *geo = &dev->geo;
936         int ret;
937
938         /* Whole device values */
939         geo->nr_chnls = grp->num_ch;
940         geo->luns_per_chnl = grp->num_lun;
941
942         /* Generic device values */
943         geo->pgs_per_blk = grp->num_pg;
944         geo->blks_per_lun = grp->num_blk;
945         geo->nr_planes = grp->num_pln;
946         geo->fpg_size = grp->fpg_sz;
947         geo->pfpg_size = grp->fpg_sz * grp->num_pln;
948         geo->sec_size = grp->csecs;
949         geo->oob_size = grp->sos;
950         geo->sec_per_pg = grp->fpg_sz / grp->csecs;
951         geo->mccap = grp->mccap;
952         memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
953
954         geo->plane_mode = NVM_PLANE_SINGLE;
955         geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
956
957         if (grp->mpos & 0x020202)
958                 geo->plane_mode = NVM_PLANE_DOUBLE;
959         if (grp->mpos & 0x040404)
960                 geo->plane_mode = NVM_PLANE_QUAD;
961
962         if (grp->mtype != 0) {
963                 pr_err("nvm: memory type not supported\n");
964                 return -EINVAL;
965         }
966
967         /* calculated values */
968         geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
969         geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
970         geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
971         geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
972
973         dev->total_secs = geo->nr_luns * geo->sec_per_lun;
974         dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
975                                         sizeof(unsigned long), GFP_KERNEL);
976         if (!dev->lun_map)
977                 return -ENOMEM;
978
979         switch (grp->fmtype) {
980         case NVM_ID_FMTYPE_SLC:
981                 if (nvm_init_slc_tbl(dev, grp)) {
982                         ret = -ENOMEM;
983                         goto err_fmtype;
984                 }
985                 break;
986         case NVM_ID_FMTYPE_MLC:
987                 if (nvm_init_mlc_tbl(dev, grp)) {
988                         ret = -ENOMEM;
989                         goto err_fmtype;
990                 }
991                 break;
992         default:
993                 pr_err("nvm: flash type not supported\n");
994                 ret = -EINVAL;
995                 goto err_fmtype;
996         }
997
998         INIT_LIST_HEAD(&dev->area_list);
999         INIT_LIST_HEAD(&dev->targets);
1000         mutex_init(&dev->mlock);
1001         spin_lock_init(&dev->lock);
1002
1003         ret = nvm_register_map(dev);
1004         if (ret)
1005                 goto err_fmtype;
1006
1007         blk_queue_logical_block_size(dev->q, geo->sec_size);
1008         return 0;
1009 err_fmtype:
1010         kfree(dev->lun_map);
1011         return ret;
1012 }
1013
1014 void nvm_free(struct nvm_dev *dev)
1015 {
1016         if (!dev)
1017                 return;
1018
1019         if (dev->dma_pool)
1020                 dev->ops->destroy_dma_pool(dev->dma_pool);
1021
1022         kfree(dev->rmap);
1023         kfree(dev->lptbl);
1024         kfree(dev->lun_map);
1025         kfree(dev);
1026 }
1027
1028 static int nvm_init(struct nvm_dev *dev)
1029 {
1030         struct nvm_geo *geo = &dev->geo;
1031         int ret = -EINVAL;
1032
1033         if (dev->ops->identity(dev, &dev->identity)) {
1034                 pr_err("nvm: device could not be identified\n");
1035                 goto err;
1036         }
1037
1038         pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
1039                         dev->identity.ver_id, dev->identity.vmnt,
1040                                                         dev->identity.cgrps);
1041
1042         if (dev->identity.ver_id != 1) {
1043                 pr_err("nvm: device not supported by kernel.");
1044                 goto err;
1045         }
1046
1047         if (dev->identity.cgrps != 1) {
1048                 pr_err("nvm: only one group configuration supported.");
1049                 goto err;
1050         }
1051
1052         ret = nvm_core_init(dev);
1053         if (ret) {
1054                 pr_err("nvm: could not initialize core structures.\n");
1055                 goto err;
1056         }
1057
1058         pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
1059                         dev->name, geo->sec_per_pg, geo->nr_planes,
1060                         geo->pgs_per_blk, geo->blks_per_lun,
1061                         geo->nr_luns, geo->nr_chnls);
1062         return 0;
1063 err:
1064         pr_err("nvm: failed to initialize nvm\n");
1065         return ret;
1066 }
1067
1068 struct nvm_dev *nvm_alloc_dev(int node)
1069 {
1070         return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1071 }
1072 EXPORT_SYMBOL(nvm_alloc_dev);
1073
1074 int nvm_register(struct nvm_dev *dev)
1075 {
1076         int ret;
1077
1078         if (!dev->q || !dev->ops)
1079                 return -EINVAL;
1080
1081         if (dev->ops->max_phys_sect > 256) {
1082                 pr_info("nvm: max sectors supported is 256.\n");
1083                 return -EINVAL;
1084         }
1085
1086         if (dev->ops->max_phys_sect > 1) {
1087                 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
1088                 if (!dev->dma_pool) {
1089                         pr_err("nvm: could not create dma pool\n");
1090                         return -ENOMEM;
1091                 }
1092         }
1093
1094         ret = nvm_init(dev);
1095         if (ret)
1096                 goto err_init;
1097
1098         /* register device with a supported media manager */
1099         down_write(&nvm_lock);
1100         list_add(&dev->devices, &nvm_devices);
1101         up_write(&nvm_lock);
1102
1103         return 0;
1104 err_init:
1105         dev->ops->destroy_dma_pool(dev->dma_pool);
1106         return ret;
1107 }
1108 EXPORT_SYMBOL(nvm_register);
1109
1110 void nvm_unregister(struct nvm_dev *dev)
1111 {
1112         struct nvm_target *t, *tmp;
1113
1114         mutex_lock(&dev->mlock);
1115         list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1116                 if (t->dev->parent != dev)
1117                         continue;
1118                 __nvm_remove_target(t);
1119         }
1120         mutex_unlock(&dev->mlock);
1121
1122         down_write(&nvm_lock);
1123         list_del(&dev->devices);
1124         up_write(&nvm_lock);
1125
1126         nvm_free(dev);
1127 }
1128 EXPORT_SYMBOL(nvm_unregister);
1129
1130 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1131 {
1132         struct nvm_dev *dev;
1133         struct nvm_ioctl_create_simple *s;
1134
1135         down_write(&nvm_lock);
1136         dev = nvm_find_nvm_dev(create->dev);
1137         up_write(&nvm_lock);
1138
1139         if (!dev) {
1140                 pr_err("nvm: device not found\n");
1141                 return -EINVAL;
1142         }
1143
1144         if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
1145                 pr_err("nvm: config type not valid\n");
1146                 return -EINVAL;
1147         }
1148         s = &create->conf.s;
1149
1150         if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
1151                 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
1152                         s->lun_begin, s->lun_end, dev->geo.nr_luns);
1153                 return -EINVAL;
1154         }
1155
1156         return nvm_create_tgt(dev, create);
1157 }
1158
1159 static long nvm_ioctl_info(struct file *file, void __user *arg)
1160 {
1161         struct nvm_ioctl_info *info;
1162         struct nvm_tgt_type *tt;
1163         int tgt_iter = 0;
1164
1165         if (!capable(CAP_SYS_ADMIN))
1166                 return -EPERM;
1167
1168         info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1169         if (IS_ERR(info))
1170                 return -EFAULT;
1171
1172         info->version[0] = NVM_VERSION_MAJOR;
1173         info->version[1] = NVM_VERSION_MINOR;
1174         info->version[2] = NVM_VERSION_PATCH;
1175
1176         down_write(&nvm_lock);
1177         list_for_each_entry(tt, &nvm_tgt_types, list) {
1178                 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1179
1180                 tgt->version[0] = tt->version[0];
1181                 tgt->version[1] = tt->version[1];
1182                 tgt->version[2] = tt->version[2];
1183                 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1184
1185                 tgt_iter++;
1186         }
1187
1188         info->tgtsize = tgt_iter;
1189         up_write(&nvm_lock);
1190
1191         if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1192                 kfree(info);
1193                 return -EFAULT;
1194         }
1195
1196         kfree(info);
1197         return 0;
1198 }
1199
1200 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1201 {
1202         struct nvm_ioctl_get_devices *devices;
1203         struct nvm_dev *dev;
1204         int i = 0;
1205
1206         if (!capable(CAP_SYS_ADMIN))
1207                 return -EPERM;
1208
1209         devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1210         if (!devices)
1211                 return -ENOMEM;
1212
1213         down_write(&nvm_lock);
1214         list_for_each_entry(dev, &nvm_devices, devices) {
1215                 struct nvm_ioctl_device_info *info = &devices->info[i];
1216
1217                 sprintf(info->devname, "%s", dev->name);
1218
1219                 /* kept for compatibility */
1220                 info->bmversion[0] = 1;
1221                 info->bmversion[1] = 0;
1222                 info->bmversion[2] = 0;
1223                 sprintf(info->bmname, "%s", "gennvm");
1224                 i++;
1225
1226                 if (i > 31) {
1227                         pr_err("nvm: max 31 devices can be reported.\n");
1228                         break;
1229                 }
1230         }
1231         up_write(&nvm_lock);
1232
1233         devices->nr_devices = i;
1234
1235         if (copy_to_user(arg, devices,
1236                          sizeof(struct nvm_ioctl_get_devices))) {
1237                 kfree(devices);
1238                 return -EFAULT;
1239         }
1240
1241         kfree(devices);
1242         return 0;
1243 }
1244
1245 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1246 {
1247         struct nvm_ioctl_create create;
1248
1249         if (!capable(CAP_SYS_ADMIN))
1250                 return -EPERM;
1251
1252         if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1253                 return -EFAULT;
1254
1255         create.dev[DISK_NAME_LEN - 1] = '\0';
1256         create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1257         create.tgtname[DISK_NAME_LEN - 1] = '\0';
1258
1259         if (create.flags != 0) {
1260                 pr_err("nvm: no flags supported\n");
1261                 return -EINVAL;
1262         }
1263
1264         return __nvm_configure_create(&create);
1265 }
1266
1267 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1268 {
1269         struct nvm_ioctl_remove remove;
1270         struct nvm_dev *dev;
1271         int ret = 0;
1272
1273         if (!capable(CAP_SYS_ADMIN))
1274                 return -EPERM;
1275
1276         if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1277                 return -EFAULT;
1278
1279         remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1280
1281         if (remove.flags != 0) {
1282                 pr_err("nvm: no flags supported\n");
1283                 return -EINVAL;
1284         }
1285
1286         list_for_each_entry(dev, &nvm_devices, devices) {
1287                 ret = nvm_remove_tgt(dev, &remove);
1288                 if (!ret)
1289                         break;
1290         }
1291
1292         return ret;
1293 }
1294
1295 /* kept for compatibility reasons */
1296 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1297 {
1298         struct nvm_ioctl_dev_init init;
1299
1300         if (!capable(CAP_SYS_ADMIN))
1301                 return -EPERM;
1302
1303         if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1304                 return -EFAULT;
1305
1306         if (init.flags != 0) {
1307                 pr_err("nvm: no flags supported\n");
1308                 return -EINVAL;
1309         }
1310
1311         return 0;
1312 }
1313
1314 /* Kept for compatibility reasons */
1315 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1316 {
1317         struct nvm_ioctl_dev_factory fact;
1318
1319         if (!capable(CAP_SYS_ADMIN))
1320                 return -EPERM;
1321
1322         if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1323                 return -EFAULT;
1324
1325         fact.dev[DISK_NAME_LEN - 1] = '\0';
1326
1327         if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1328                 return -EINVAL;
1329
1330         return 0;
1331 }
1332
1333 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1334 {
1335         void __user *argp = (void __user *)arg;
1336
1337         switch (cmd) {
1338         case NVM_INFO:
1339                 return nvm_ioctl_info(file, argp);
1340         case NVM_GET_DEVICES:
1341                 return nvm_ioctl_get_devices(file, argp);
1342         case NVM_DEV_CREATE:
1343                 return nvm_ioctl_dev_create(file, argp);
1344         case NVM_DEV_REMOVE:
1345                 return nvm_ioctl_dev_remove(file, argp);
1346         case NVM_DEV_INIT:
1347                 return nvm_ioctl_dev_init(file, argp);
1348         case NVM_DEV_FACTORY:
1349                 return nvm_ioctl_dev_factory(file, argp);
1350         }
1351         return 0;
1352 }
1353
1354 static const struct file_operations _ctl_fops = {
1355         .open = nonseekable_open,
1356         .unlocked_ioctl = nvm_ctl_ioctl,
1357         .owner = THIS_MODULE,
1358         .llseek  = noop_llseek,
1359 };
1360
1361 static struct miscdevice _nvm_misc = {
1362         .minor          = MISC_DYNAMIC_MINOR,
1363         .name           = "lightnvm",
1364         .nodename       = "lightnvm/control",
1365         .fops           = &_ctl_fops,
1366 };
1367 builtin_misc_device(_nvm_misc);