]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/nvme/host/lightnvm.c
Merge branch 'fixes-base' into fixes
[karo-tx-linux.git] / drivers / nvme / host / lightnvm.c
1 /*
2  * nvme-lightnvm.c - LightNVM NVMe device
3  *
4  * Copyright (C) 2014-2015 IT University of Copenhagen
5  * Initial release: Matias Bjorling <mb@lightnvm.io>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; see the file COPYING.  If not, write to
18  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19  * USA.
20  *
21  */
22
23 #include "nvme.h"
24
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29
30 enum nvme_nvm_admin_opcode {
31         nvme_nvm_admin_identity         = 0xe2,
32         nvme_nvm_admin_get_l2p_tbl      = 0xea,
33         nvme_nvm_admin_get_bb_tbl       = 0xf2,
34         nvme_nvm_admin_set_bb_tbl       = 0xf1,
35 };
36
37 struct nvme_nvm_hb_rw {
38         __u8                    opcode;
39         __u8                    flags;
40         __u16                   command_id;
41         __le32                  nsid;
42         __u64                   rsvd2;
43         __le64                  metadata;
44         __le64                  prp1;
45         __le64                  prp2;
46         __le64                  spba;
47         __le16                  length;
48         __le16                  control;
49         __le32                  dsmgmt;
50         __le64                  slba;
51 };
52
53 struct nvme_nvm_ph_rw {
54         __u8                    opcode;
55         __u8                    flags;
56         __u16                   command_id;
57         __le32                  nsid;
58         __u64                   rsvd2;
59         __le64                  metadata;
60         __le64                  prp1;
61         __le64                  prp2;
62         __le64                  spba;
63         __le16                  length;
64         __le16                  control;
65         __le32                  dsmgmt;
66         __le64                  resv;
67 };
68
69 struct nvme_nvm_identity {
70         __u8                    opcode;
71         __u8                    flags;
72         __u16                   command_id;
73         __le32                  nsid;
74         __u64                   rsvd[2];
75         __le64                  prp1;
76         __le64                  prp2;
77         __le32                  chnl_off;
78         __u32                   rsvd11[5];
79 };
80
81 struct nvme_nvm_l2ptbl {
82         __u8                    opcode;
83         __u8                    flags;
84         __u16                   command_id;
85         __le32                  nsid;
86         __le32                  cdw2[4];
87         __le64                  prp1;
88         __le64                  prp2;
89         __le64                  slba;
90         __le32                  nlb;
91         __le16                  cdw14[6];
92 };
93
94 struct nvme_nvm_getbbtbl {
95         __u8                    opcode;
96         __u8                    flags;
97         __u16                   command_id;
98         __le32                  nsid;
99         __u64                   rsvd[2];
100         __le64                  prp1;
101         __le64                  prp2;
102         __le64                  spba;
103         __u32                   rsvd4[4];
104 };
105
106 struct nvme_nvm_setbbtbl {
107         __u8                    opcode;
108         __u8                    flags;
109         __u16                   command_id;
110         __le32                  nsid;
111         __le64                  rsvd[2];
112         __le64                  prp1;
113         __le64                  prp2;
114         __le64                  spba;
115         __le16                  nlb;
116         __u8                    value;
117         __u8                    rsvd3;
118         __u32                   rsvd4[3];
119 };
120
121 struct nvme_nvm_erase_blk {
122         __u8                    opcode;
123         __u8                    flags;
124         __u16                   command_id;
125         __le32                  nsid;
126         __u64                   rsvd[2];
127         __le64                  prp1;
128         __le64                  prp2;
129         __le64                  spba;
130         __le16                  length;
131         __le16                  control;
132         __le32                  dsmgmt;
133         __le64                  resv;
134 };
135
136 struct nvme_nvm_command {
137         union {
138                 struct nvme_common_command common;
139                 struct nvme_nvm_identity identity;
140                 struct nvme_nvm_hb_rw hb_rw;
141                 struct nvme_nvm_ph_rw ph_rw;
142                 struct nvme_nvm_l2ptbl l2p;
143                 struct nvme_nvm_getbbtbl get_bb;
144                 struct nvme_nvm_setbbtbl set_bb;
145                 struct nvme_nvm_erase_blk erase;
146         };
147 };
148
149 struct nvme_nvm_completion {
150         __le64  result;         /* Used by LightNVM to return ppa completions */
151         __le16  sq_head;        /* how much of this queue may be reclaimed */
152         __le16  sq_id;          /* submission queue that generated this entry */
153         __u16   command_id;     /* of the command which completed */
154         __le16  status;         /* did the command fail, and if so, why? */
155 };
156
157 #define NVME_NVM_LP_MLC_PAIRS 886
158 struct nvme_nvm_lp_mlc {
159         __u16                   num_pairs;
160         __u8                    pairs[NVME_NVM_LP_MLC_PAIRS];
161 };
162
163 struct nvme_nvm_lp_tbl {
164         __u8                    id[8];
165         struct nvme_nvm_lp_mlc  mlc;
166 };
167
168 struct nvme_nvm_id_group {
169         __u8                    mtype;
170         __u8                    fmtype;
171         __le16                  res16;
172         __u8                    num_ch;
173         __u8                    num_lun;
174         __u8                    num_pln;
175         __u8                    rsvd1;
176         __le16                  num_blk;
177         __le16                  num_pg;
178         __le16                  fpg_sz;
179         __le16                  csecs;
180         __le16                  sos;
181         __le16                  rsvd2;
182         __le32                  trdt;
183         __le32                  trdm;
184         __le32                  tprt;
185         __le32                  tprm;
186         __le32                  tbet;
187         __le32                  tbem;
188         __le32                  mpos;
189         __le32                  mccap;
190         __le16                  cpar;
191         __u8                    reserved[10];
192         struct nvme_nvm_lp_tbl lptbl;
193 } __packed;
194
195 struct nvme_nvm_addr_format {
196         __u8                    ch_offset;
197         __u8                    ch_len;
198         __u8                    lun_offset;
199         __u8                    lun_len;
200         __u8                    pln_offset;
201         __u8                    pln_len;
202         __u8                    blk_offset;
203         __u8                    blk_len;
204         __u8                    pg_offset;
205         __u8                    pg_len;
206         __u8                    sect_offset;
207         __u8                    sect_len;
208         __u8                    res[4];
209 } __packed;
210
211 struct nvme_nvm_id {
212         __u8                    ver_id;
213         __u8                    vmnt;
214         __u8                    cgrps;
215         __u8                    res;
216         __le32                  cap;
217         __le32                  dom;
218         struct nvme_nvm_addr_format ppaf;
219         __u8                    resv[228];
220         struct nvme_nvm_id_group groups[4];
221 } __packed;
222
223 struct nvme_nvm_bb_tbl {
224         __u8    tblid[4];
225         __le16  verid;
226         __le16  revid;
227         __le32  rvsd1;
228         __le32  tblks;
229         __le32  tfact;
230         __le32  tgrown;
231         __le32  tdresv;
232         __le32  thresv;
233         __le32  rsvd2[8];
234         __u8    blk[0];
235 };
236
237 /*
238  * Check we didn't inadvertently grow the command struct
239  */
240 static inline void _nvme_nvm_check_size(void)
241 {
242         BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
243         BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
244         BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
245         BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
246         BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
247         BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
248         BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
249         BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
250         BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
251         BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
252         BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
253 }
254
255 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
256 {
257         struct nvme_nvm_id_group *src;
258         struct nvm_id_group *dst;
259         int i, end;
260
261         end = min_t(u32, 4, nvm_id->cgrps);
262
263         for (i = 0; i < end; i++) {
264                 src = &nvme_nvm_id->groups[i];
265                 dst = &nvm_id->groups[i];
266
267                 dst->mtype = src->mtype;
268                 dst->fmtype = src->fmtype;
269                 dst->num_ch = src->num_ch;
270                 dst->num_lun = src->num_lun;
271                 dst->num_pln = src->num_pln;
272
273                 dst->num_pg = le16_to_cpu(src->num_pg);
274                 dst->num_blk = le16_to_cpu(src->num_blk);
275                 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
276                 dst->csecs = le16_to_cpu(src->csecs);
277                 dst->sos = le16_to_cpu(src->sos);
278
279                 dst->trdt = le32_to_cpu(src->trdt);
280                 dst->trdm = le32_to_cpu(src->trdm);
281                 dst->tprt = le32_to_cpu(src->tprt);
282                 dst->tprm = le32_to_cpu(src->tprm);
283                 dst->tbet = le32_to_cpu(src->tbet);
284                 dst->tbem = le32_to_cpu(src->tbem);
285                 dst->mpos = le32_to_cpu(src->mpos);
286                 dst->mccap = le32_to_cpu(src->mccap);
287
288                 dst->cpar = le16_to_cpu(src->cpar);
289
290                 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
291                         memcpy(dst->lptbl.id, src->lptbl.id, 8);
292                         dst->lptbl.mlc.num_pairs =
293                                         le16_to_cpu(src->lptbl.mlc.num_pairs);
294
295                         if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
296                                 pr_err("nvm: number of MLC pairs not supported\n");
297                                 return -EINVAL;
298                         }
299
300                         memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
301                                                 dst->lptbl.mlc.num_pairs);
302                 }
303         }
304
305         return 0;
306 }
307
308 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
309 {
310         struct nvme_ns *ns = nvmdev->q->queuedata;
311         struct nvme_nvm_id *nvme_nvm_id;
312         struct nvme_nvm_command c = {};
313         int ret;
314
315         c.identity.opcode = nvme_nvm_admin_identity;
316         c.identity.nsid = cpu_to_le32(ns->ns_id);
317         c.identity.chnl_off = 0;
318
319         nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
320         if (!nvme_nvm_id)
321                 return -ENOMEM;
322
323         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
324                                 nvme_nvm_id, sizeof(struct nvme_nvm_id));
325         if (ret) {
326                 ret = -EIO;
327                 goto out;
328         }
329
330         nvm_id->ver_id = nvme_nvm_id->ver_id;
331         nvm_id->vmnt = nvme_nvm_id->vmnt;
332         nvm_id->cgrps = nvme_nvm_id->cgrps;
333         nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
334         nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
335         memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
336                                         sizeof(struct nvme_nvm_addr_format));
337
338         ret = init_grps(nvm_id, nvme_nvm_id);
339 out:
340         kfree(nvme_nvm_id);
341         return ret;
342 }
343
344 static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
345                                 nvm_l2p_update_fn *update_l2p, void *priv)
346 {
347         struct nvme_ns *ns = nvmdev->q->queuedata;
348         struct nvme_nvm_command c = {};
349         u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
350         u32 nlb_pr_rq = len / sizeof(u64);
351         u64 cmd_slba = slba;
352         void *entries;
353         int ret = 0;
354
355         c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
356         c.l2p.nsid = cpu_to_le32(ns->ns_id);
357         entries = kmalloc(len, GFP_KERNEL);
358         if (!entries)
359                 return -ENOMEM;
360
361         while (nlb) {
362                 u32 cmd_nlb = min(nlb_pr_rq, nlb);
363
364                 c.l2p.slba = cpu_to_le64(cmd_slba);
365                 c.l2p.nlb = cpu_to_le32(cmd_nlb);
366
367                 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
368                                 (struct nvme_command *)&c, entries, len);
369                 if (ret) {
370                         dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
371                                                                         ret);
372                         ret = -EIO;
373                         goto out;
374                 }
375
376                 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
377                         ret = -EINTR;
378                         goto out;
379                 }
380
381                 cmd_slba += cmd_nlb;
382                 nlb -= cmd_nlb;
383         }
384
385 out:
386         kfree(entries);
387         return ret;
388 }
389
390 static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
391                                                 int nr_dst_blks, u8 *dst_blks,
392                                                 int nr_src_blks, u8 *src_blks)
393 {
394         int blk, offset, pl, blktype;
395
396         for (blk = 0; blk < nr_dst_blks; blk++) {
397                 offset = blk * nvmdev->plane_mode;
398                 blktype = src_blks[offset];
399
400                 /* Bad blocks on any planes take precedence over other types */
401                 for (pl = 0; pl < nvmdev->plane_mode; pl++) {
402                         if (src_blks[offset + pl] &
403                                         (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
404                                 blktype = src_blks[offset + pl];
405                                 break;
406                         }
407                 }
408
409                 dst_blks[blk] = blktype;
410         }
411 }
412
413 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
414                                 int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
415                                 void *priv)
416 {
417         struct request_queue *q = nvmdev->q;
418         struct nvme_ns *ns = q->queuedata;
419         struct nvme_ctrl *ctrl = ns->ctrl;
420         struct nvme_nvm_command c = {};
421         struct nvme_nvm_bb_tbl *bb_tbl;
422         u8 *dst_blks = NULL;
423         int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
424         int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
425         int ret = 0;
426
427         c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
428         c.get_bb.nsid = cpu_to_le32(ns->ns_id);
429         c.get_bb.spba = cpu_to_le64(ppa.ppa);
430
431         bb_tbl = kzalloc(tblsz, GFP_KERNEL);
432         if (!bb_tbl)
433                 return -ENOMEM;
434
435         dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
436         if (!dst_blks) {
437                 ret = -ENOMEM;
438                 goto out;
439         }
440
441         ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
442                                                                 bb_tbl, tblsz);
443         if (ret) {
444                 dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
445                 ret = -EIO;
446                 goto out;
447         }
448
449         if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
450                 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
451                 dev_err(ctrl->dev, "bbt format mismatch\n");
452                 ret = -EINVAL;
453                 goto out;
454         }
455
456         if (le16_to_cpu(bb_tbl->verid) != 1) {
457                 ret = -EINVAL;
458                 dev_err(ctrl->dev, "bbt version not supported\n");
459                 goto out;
460         }
461
462         if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
463                 ret = -EINVAL;
464                 dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
465                                 le32_to_cpu(bb_tbl->tblks), nr_src_blks);
466                 goto out;
467         }
468
469         nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
470                                                 nr_src_blks, bb_tbl->blk);
471
472         ppa = dev_to_generic_addr(nvmdev, ppa);
473         ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
474
475 out:
476         kfree(dst_blks);
477         kfree(bb_tbl);
478         return ret;
479 }
480
481 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
482                                                                 int type)
483 {
484         struct nvme_ns *ns = nvmdev->q->queuedata;
485         struct nvme_nvm_command c = {};
486         int ret = 0;
487
488         c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
489         c.set_bb.nsid = cpu_to_le32(ns->ns_id);
490         c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
491         c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
492         c.set_bb.value = type;
493
494         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
495                                                                 NULL, 0);
496         if (ret)
497                 dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
498         return ret;
499 }
500
501 static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
502                                 struct nvme_ns *ns, struct nvme_nvm_command *c)
503 {
504         c->ph_rw.opcode = rqd->opcode;
505         c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
506         c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
507         c->ph_rw.control = cpu_to_le16(rqd->flags);
508         c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
509
510         if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
511                 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
512                                                 rqd->bio->bi_iter.bi_sector));
513 }
514
515 static void nvme_nvm_end_io(struct request *rq, int error)
516 {
517         struct nvm_rq *rqd = rq->end_io_data;
518         struct nvme_nvm_completion *cqe = rq->special;
519
520         if (cqe)
521                 rqd->ppa_status = le64_to_cpu(cqe->result);
522
523         nvm_end_io(rqd, error);
524
525         kfree(rq->cmd);
526         blk_mq_free_request(rq);
527 }
528
529 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
530 {
531         struct request_queue *q = dev->q;
532         struct nvme_ns *ns = q->queuedata;
533         struct request *rq;
534         struct bio *bio = rqd->bio;
535         struct nvme_nvm_command *cmd;
536
537         rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
538         if (IS_ERR(rq))
539                 return -ENOMEM;
540
541         cmd = kzalloc(sizeof(struct nvme_nvm_command) +
542                                 sizeof(struct nvme_nvm_completion), GFP_KERNEL);
543         if (!cmd) {
544                 blk_mq_free_request(rq);
545                 return -ENOMEM;
546         }
547
548         rq->cmd_type = REQ_TYPE_DRV_PRIV;
549         rq->ioprio = bio_prio(bio);
550
551         if (bio_has_data(bio))
552                 rq->nr_phys_segments = bio_phys_segments(q, bio);
553
554         rq->__data_len = bio->bi_iter.bi_size;
555         rq->bio = rq->biotail = bio;
556
557         nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
558
559         rq->cmd = (unsigned char *)cmd;
560         rq->cmd_len = sizeof(struct nvme_nvm_command);
561         rq->special = cmd + 1;
562
563         rq->end_io_data = rqd;
564
565         blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
566
567         return 0;
568 }
569
570 static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
571 {
572         struct request_queue *q = dev->q;
573         struct nvme_ns *ns = q->queuedata;
574         struct nvme_nvm_command c = {};
575
576         c.erase.opcode = NVM_OP_ERASE;
577         c.erase.nsid = cpu_to_le32(ns->ns_id);
578         c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
579         c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
580
581         return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
582 }
583
584 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
585 {
586         struct nvme_ns *ns = nvmdev->q->queuedata;
587
588         return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
589 }
590
591 static void nvme_nvm_destroy_dma_pool(void *pool)
592 {
593         struct dma_pool *dma_pool = pool;
594
595         dma_pool_destroy(dma_pool);
596 }
597
598 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
599                                     gfp_t mem_flags, dma_addr_t *dma_handler)
600 {
601         return dma_pool_alloc(pool, mem_flags, dma_handler);
602 }
603
604 static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
605                                                         dma_addr_t dma_handler)
606 {
607         dma_pool_free(pool, ppa_list, dma_handler);
608 }
609
610 static struct nvm_dev_ops nvme_nvm_dev_ops = {
611         .identity               = nvme_nvm_identity,
612
613         .get_l2p_tbl            = nvme_nvm_get_l2p_tbl,
614
615         .get_bb_tbl             = nvme_nvm_get_bb_tbl,
616         .set_bb_tbl             = nvme_nvm_set_bb_tbl,
617
618         .submit_io              = nvme_nvm_submit_io,
619         .erase_block            = nvme_nvm_erase_block,
620
621         .create_dma_pool        = nvme_nvm_create_dma_pool,
622         .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
623         .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
624         .dev_dma_free           = nvme_nvm_dev_dma_free,
625
626         .max_phys_sect          = 64,
627 };
628
629 int nvme_nvm_register(struct request_queue *q, char *disk_name)
630 {
631         return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
632 }
633
634 void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
635 {
636         nvm_unregister(disk_name);
637 }
638
639 /* move to shared place when used in multiple places. */
640 #define PCI_VENDOR_ID_CNEX 0x1d1d
641 #define PCI_DEVICE_ID_CNEX_WL 0x2807
642 #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
643
644 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
645 {
646         struct nvme_ctrl *ctrl = ns->ctrl;
647         /* XXX: this is poking into PCI structures from generic code! */
648         struct pci_dev *pdev = to_pci_dev(ctrl->dev);
649
650         /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
651         if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
652                                 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
653                                                         id->vs[0] == 0x1)
654                 return 1;
655
656         /* CNEX Labs - PCI ID + Vendor specific bit */
657         if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
658                                 pdev->device == PCI_DEVICE_ID_CNEX_WL &&
659                                                         id->vs[0] == 0x1)
660                 return 1;
661
662         return 0;
663 }