2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
30 enum nvme_nvm_admin_opcode {
31 nvme_nvm_admin_identity = 0xe2,
32 nvme_nvm_admin_get_l2p_tbl = 0xea,
33 nvme_nvm_admin_get_bb_tbl = 0xf2,
34 nvme_nvm_admin_set_bb_tbl = 0xf1,
37 struct nvme_nvm_hb_rw {
53 struct nvme_nvm_ph_rw {
69 struct nvme_nvm_identity {
81 struct nvme_nvm_l2ptbl {
94 struct nvme_nvm_getbbtbl {
106 struct nvme_nvm_setbbtbl {
121 struct nvme_nvm_erase_blk {
136 struct nvme_nvm_command {
138 struct nvme_common_command common;
139 struct nvme_nvm_identity identity;
140 struct nvme_nvm_hb_rw hb_rw;
141 struct nvme_nvm_ph_rw ph_rw;
142 struct nvme_nvm_l2ptbl l2p;
143 struct nvme_nvm_getbbtbl get_bb;
144 struct nvme_nvm_setbbtbl set_bb;
145 struct nvme_nvm_erase_blk erase;
149 #define NVME_NVM_LP_MLC_PAIRS 886
150 struct nvme_nvm_lp_mlc {
152 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
155 struct nvme_nvm_lp_tbl {
157 struct nvme_nvm_lp_mlc mlc;
160 struct nvme_nvm_id_group {
184 struct nvme_nvm_lp_tbl lptbl;
187 struct nvme_nvm_addr_format {
210 struct nvme_nvm_addr_format ppaf;
212 struct nvme_nvm_id_group groups[4];
215 struct nvme_nvm_bb_tbl {
230 * Check we didn't inadvertently grow the command struct
232 static inline void _nvme_nvm_check_size(void)
234 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
235 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
236 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
237 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
238 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
239 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
240 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
241 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
242 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
243 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
244 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
247 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
249 struct nvme_nvm_id_group *src;
250 struct nvm_id_group *dst;
253 end = min_t(u32, 4, nvm_id->cgrps);
255 for (i = 0; i < end; i++) {
256 src = &nvme_nvm_id->groups[i];
257 dst = &nvm_id->groups[i];
259 dst->mtype = src->mtype;
260 dst->fmtype = src->fmtype;
261 dst->num_ch = src->num_ch;
262 dst->num_lun = src->num_lun;
263 dst->num_pln = src->num_pln;
265 dst->num_pg = le16_to_cpu(src->num_pg);
266 dst->num_blk = le16_to_cpu(src->num_blk);
267 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
268 dst->csecs = le16_to_cpu(src->csecs);
269 dst->sos = le16_to_cpu(src->sos);
271 dst->trdt = le32_to_cpu(src->trdt);
272 dst->trdm = le32_to_cpu(src->trdm);
273 dst->tprt = le32_to_cpu(src->tprt);
274 dst->tprm = le32_to_cpu(src->tprm);
275 dst->tbet = le32_to_cpu(src->tbet);
276 dst->tbem = le32_to_cpu(src->tbem);
277 dst->mpos = le32_to_cpu(src->mpos);
278 dst->mccap = le32_to_cpu(src->mccap);
280 dst->cpar = le16_to_cpu(src->cpar);
282 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
283 memcpy(dst->lptbl.id, src->lptbl.id, 8);
284 dst->lptbl.mlc.num_pairs =
285 le16_to_cpu(src->lptbl.mlc.num_pairs);
287 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
288 pr_err("nvm: number of MLC pairs not supported\n");
292 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
293 dst->lptbl.mlc.num_pairs);
300 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
302 struct nvme_ns *ns = nvmdev->q->queuedata;
303 struct nvme_nvm_id *nvme_nvm_id;
304 struct nvme_nvm_command c = {};
307 c.identity.opcode = nvme_nvm_admin_identity;
308 c.identity.nsid = cpu_to_le32(ns->ns_id);
309 c.identity.chnl_off = 0;
311 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
315 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
316 nvme_nvm_id, sizeof(struct nvme_nvm_id));
322 nvm_id->ver_id = nvme_nvm_id->ver_id;
323 nvm_id->vmnt = nvme_nvm_id->vmnt;
324 nvm_id->cgrps = nvme_nvm_id->cgrps;
325 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
326 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
327 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
328 sizeof(struct nvme_nvm_addr_format));
330 ret = init_grps(nvm_id, nvme_nvm_id);
336 static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
337 nvm_l2p_update_fn *update_l2p, void *priv)
339 struct nvme_ns *ns = nvmdev->q->queuedata;
340 struct nvme_nvm_command c = {};
341 u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
342 u32 nlb_pr_rq = len / sizeof(u64);
347 c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
348 c.l2p.nsid = cpu_to_le32(ns->ns_id);
349 entries = kmalloc(len, GFP_KERNEL);
354 u32 cmd_nlb = min(nlb_pr_rq, nlb);
355 u64 elba = slba + cmd_nlb;
357 c.l2p.slba = cpu_to_le64(cmd_slba);
358 c.l2p.nlb = cpu_to_le32(cmd_nlb);
360 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
361 (struct nvme_command *)&c, entries, len);
363 dev_err(ns->ctrl->device,
364 "L2P table transfer failed (%d)\n", ret);
369 if (unlikely(elba > nvmdev->total_secs)) {
370 pr_err("nvm: L2P data from device is out of bounds!\n");
374 /* Transform physical address to target address space */
375 nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb);
377 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
391 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
394 struct request_queue *q = nvmdev->q;
395 struct nvm_geo *geo = &nvmdev->geo;
396 struct nvme_ns *ns = q->queuedata;
397 struct nvme_ctrl *ctrl = ns->ctrl;
398 struct nvme_nvm_command c = {};
399 struct nvme_nvm_bb_tbl *bb_tbl;
400 int nr_blks = geo->blks_per_lun * geo->plane_mode;
401 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
404 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
405 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
406 c.get_bb.spba = cpu_to_le64(ppa.ppa);
408 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
412 ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
415 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
420 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
421 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
422 dev_err(ctrl->device, "bbt format mismatch\n");
427 if (le16_to_cpu(bb_tbl->verid) != 1) {
429 dev_err(ctrl->device, "bbt version not supported\n");
433 if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
435 dev_err(ctrl->device,
436 "bbt unsuspected blocks returned (%u!=%u)",
437 le32_to_cpu(bb_tbl->tblks), nr_blks);
441 memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
447 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
448 int nr_ppas, int type)
450 struct nvme_ns *ns = nvmdev->q->queuedata;
451 struct nvme_nvm_command c = {};
454 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
455 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
456 c.set_bb.spba = cpu_to_le64(ppas->ppa);
457 c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
458 c.set_bb.value = type;
460 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
463 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
468 static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
469 struct nvme_ns *ns, struct nvme_nvm_command *c)
471 c->ph_rw.opcode = rqd->opcode;
472 c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
473 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
474 c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
475 c->ph_rw.control = cpu_to_le16(rqd->flags);
476 c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
478 if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
479 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
480 rqd->bio->bi_iter.bi_sector));
483 static void nvme_nvm_end_io(struct request *rq, int error)
485 struct nvm_rq *rqd = rq->end_io_data;
487 rqd->ppa_status = nvme_req(rq)->result.u64;
488 nvm_end_io(rqd, error);
490 kfree(nvme_req(rq)->cmd);
491 blk_mq_free_request(rq);
494 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
496 struct request_queue *q = dev->q;
497 struct nvme_ns *ns = q->queuedata;
499 struct bio *bio = rqd->bio;
500 struct nvme_nvm_command *cmd;
502 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
506 rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
511 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
513 rq->ioprio = bio_prio(bio);
514 if (bio_has_data(bio))
515 rq->nr_phys_segments = bio_phys_segments(q, bio);
517 rq->__data_len = bio->bi_iter.bi_size;
518 rq->bio = rq->biotail = bio;
520 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
522 rq->end_io_data = rqd;
524 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
529 static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
531 struct request_queue *q = dev->q;
532 struct nvme_ns *ns = q->queuedata;
533 struct nvme_nvm_command c = {};
535 c.erase.opcode = NVM_OP_ERASE;
536 c.erase.nsid = cpu_to_le32(ns->ns_id);
537 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
538 c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
539 c.erase.control = cpu_to_le16(rqd->flags);
541 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
544 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
546 struct nvme_ns *ns = nvmdev->q->queuedata;
548 return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
551 static void nvme_nvm_destroy_dma_pool(void *pool)
553 struct dma_pool *dma_pool = pool;
555 dma_pool_destroy(dma_pool);
558 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
559 gfp_t mem_flags, dma_addr_t *dma_handler)
561 return dma_pool_alloc(pool, mem_flags, dma_handler);
564 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
565 dma_addr_t dma_handler)
567 dma_pool_free(pool, addr, dma_handler);
570 static struct nvm_dev_ops nvme_nvm_dev_ops = {
571 .identity = nvme_nvm_identity,
573 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
575 .get_bb_tbl = nvme_nvm_get_bb_tbl,
576 .set_bb_tbl = nvme_nvm_set_bb_tbl,
578 .submit_io = nvme_nvm_submit_io,
579 .erase_block = nvme_nvm_erase_block,
581 .create_dma_pool = nvme_nvm_create_dma_pool,
582 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
583 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
584 .dev_dma_free = nvme_nvm_dev_dma_free,
589 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
591 struct request_queue *q = ns->queue;
594 dev = nvm_alloc_dev(node);
599 memcpy(dev->name, disk_name, DISK_NAME_LEN);
600 dev->ops = &nvme_nvm_dev_ops;
601 dev->private_data = ns;
604 return nvm_register(dev);
607 void nvme_nvm_unregister(struct nvme_ns *ns)
609 nvm_unregister(ns->ndev);
612 static ssize_t nvm_dev_attr_show(struct device *dev,
613 struct device_attribute *dattr, char *page)
615 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
616 struct nvm_dev *ndev = ns->ndev;
618 struct nvm_id_group *grp;
619 struct attribute *attr;
624 id = &ndev->identity;
625 grp = &id->groups[0];
628 if (strcmp(attr->name, "version") == 0) {
629 return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
630 } else if (strcmp(attr->name, "vendor_opcode") == 0) {
631 return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
632 } else if (strcmp(attr->name, "capabilities") == 0) {
633 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
634 } else if (strcmp(attr->name, "device_mode") == 0) {
635 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
636 } else if (strcmp(attr->name, "media_manager") == 0) {
638 return scnprintf(page, PAGE_SIZE, "%s\n", "none");
639 return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
640 } else if (strcmp(attr->name, "ppa_format") == 0) {
641 return scnprintf(page, PAGE_SIZE,
642 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
643 id->ppaf.ch_offset, id->ppaf.ch_len,
644 id->ppaf.lun_offset, id->ppaf.lun_len,
645 id->ppaf.pln_offset, id->ppaf.pln_len,
646 id->ppaf.blk_offset, id->ppaf.blk_len,
647 id->ppaf.pg_offset, id->ppaf.pg_len,
648 id->ppaf.sect_offset, id->ppaf.sect_len);
649 } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
650 return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
651 } else if (strcmp(attr->name, "flash_media_type") == 0) {
652 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
653 } else if (strcmp(attr->name, "num_channels") == 0) {
654 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
655 } else if (strcmp(attr->name, "num_luns") == 0) {
656 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
657 } else if (strcmp(attr->name, "num_planes") == 0) {
658 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
659 } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
660 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
661 } else if (strcmp(attr->name, "num_pages") == 0) {
662 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
663 } else if (strcmp(attr->name, "page_size") == 0) {
664 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
665 } else if (strcmp(attr->name, "hw_sector_size") == 0) {
666 return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
667 } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
668 return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
669 } else if (strcmp(attr->name, "read_typ") == 0) {
670 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
671 } else if (strcmp(attr->name, "read_max") == 0) {
672 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
673 } else if (strcmp(attr->name, "prog_typ") == 0) {
674 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
675 } else if (strcmp(attr->name, "prog_max") == 0) {
676 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
677 } else if (strcmp(attr->name, "erase_typ") == 0) {
678 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
679 } else if (strcmp(attr->name, "erase_max") == 0) {
680 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
681 } else if (strcmp(attr->name, "multiplane_modes") == 0) {
682 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
683 } else if (strcmp(attr->name, "media_capabilities") == 0) {
684 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
685 } else if (strcmp(attr->name, "max_phys_secs") == 0) {
686 return scnprintf(page, PAGE_SIZE, "%u\n",
687 ndev->ops->max_phys_sect);
689 return scnprintf(page,
691 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
696 #define NVM_DEV_ATTR_RO(_name) \
697 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
699 static NVM_DEV_ATTR_RO(version);
700 static NVM_DEV_ATTR_RO(vendor_opcode);
701 static NVM_DEV_ATTR_RO(capabilities);
702 static NVM_DEV_ATTR_RO(device_mode);
703 static NVM_DEV_ATTR_RO(ppa_format);
704 static NVM_DEV_ATTR_RO(media_manager);
706 static NVM_DEV_ATTR_RO(media_type);
707 static NVM_DEV_ATTR_RO(flash_media_type);
708 static NVM_DEV_ATTR_RO(num_channels);
709 static NVM_DEV_ATTR_RO(num_luns);
710 static NVM_DEV_ATTR_RO(num_planes);
711 static NVM_DEV_ATTR_RO(num_blocks);
712 static NVM_DEV_ATTR_RO(num_pages);
713 static NVM_DEV_ATTR_RO(page_size);
714 static NVM_DEV_ATTR_RO(hw_sector_size);
715 static NVM_DEV_ATTR_RO(oob_sector_size);
716 static NVM_DEV_ATTR_RO(read_typ);
717 static NVM_DEV_ATTR_RO(read_max);
718 static NVM_DEV_ATTR_RO(prog_typ);
719 static NVM_DEV_ATTR_RO(prog_max);
720 static NVM_DEV_ATTR_RO(erase_typ);
721 static NVM_DEV_ATTR_RO(erase_max);
722 static NVM_DEV_ATTR_RO(multiplane_modes);
723 static NVM_DEV_ATTR_RO(media_capabilities);
724 static NVM_DEV_ATTR_RO(max_phys_secs);
726 static struct attribute *nvm_dev_attrs[] = {
727 &dev_attr_version.attr,
728 &dev_attr_vendor_opcode.attr,
729 &dev_attr_capabilities.attr,
730 &dev_attr_device_mode.attr,
731 &dev_attr_media_manager.attr,
733 &dev_attr_ppa_format.attr,
734 &dev_attr_media_type.attr,
735 &dev_attr_flash_media_type.attr,
736 &dev_attr_num_channels.attr,
737 &dev_attr_num_luns.attr,
738 &dev_attr_num_planes.attr,
739 &dev_attr_num_blocks.attr,
740 &dev_attr_num_pages.attr,
741 &dev_attr_page_size.attr,
742 &dev_attr_hw_sector_size.attr,
743 &dev_attr_oob_sector_size.attr,
744 &dev_attr_read_typ.attr,
745 &dev_attr_read_max.attr,
746 &dev_attr_prog_typ.attr,
747 &dev_attr_prog_max.attr,
748 &dev_attr_erase_typ.attr,
749 &dev_attr_erase_max.attr,
750 &dev_attr_multiplane_modes.attr,
751 &dev_attr_media_capabilities.attr,
752 &dev_attr_max_phys_secs.attr,
756 static const struct attribute_group nvm_dev_attr_group = {
758 .attrs = nvm_dev_attrs,
761 int nvme_nvm_register_sysfs(struct nvme_ns *ns)
763 return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
764 &nvm_dev_attr_group);
767 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
769 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
770 &nvm_dev_attr_group);
773 /* move to shared place when used in multiple places. */
774 #define PCI_VENDOR_ID_CNEX 0x1d1d
775 #define PCI_DEVICE_ID_CNEX_WL 0x2807
776 #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
778 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
780 struct nvme_ctrl *ctrl = ns->ctrl;
781 /* XXX: this is poking into PCI structures from generic code! */
782 struct pci_dev *pdev = to_pci_dev(ctrl->dev);
784 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
785 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
786 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
790 /* CNEX Labs - PCI ID + Vendor specific bit */
791 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
792 pdev->device == PCI_DEVICE_ID_CNEX_WL &&