2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
19 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
23 * This read/write semaphore is used to synchronize access to configuration
24 * information on a target system that will result in discovery log page
25 * information change for at least one host.
26 * The full list of resources to protected by this semaphore is:
29 * - per-subsystem allowed hosts list
30 * - allow_any_host subsystem attribute
32 * - the nvmet_transports array
34 * When updating any of those lists/structures write lock should be obtained,
35 * while when reading (popolating discovery log page or checking host-subsystem
36 * link) read lock is obtained to allow concurrent reads.
38 DECLARE_RWSEM(nvmet_config_sem);
40 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
41 const char *subsysnqn);
43 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
46 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
47 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
51 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
53 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
54 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
58 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
60 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
63 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
65 struct nvmet_req *req;
68 mutex_lock(&ctrl->lock);
69 if (!ctrl->nr_async_event_cmds) {
70 mutex_unlock(&ctrl->lock);
74 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
75 mutex_unlock(&ctrl->lock);
76 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
80 static void nvmet_async_event_work(struct work_struct *work)
82 struct nvmet_ctrl *ctrl =
83 container_of(work, struct nvmet_ctrl, async_event_work);
84 struct nvmet_async_event *aen;
85 struct nvmet_req *req;
88 mutex_lock(&ctrl->lock);
89 aen = list_first_entry_or_null(&ctrl->async_events,
90 struct nvmet_async_event, entry);
91 if (!aen || !ctrl->nr_async_event_cmds) {
92 mutex_unlock(&ctrl->lock);
96 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
97 nvmet_set_result(req, nvmet_async_event_result(aen));
99 list_del(&aen->entry);
102 mutex_unlock(&ctrl->lock);
103 nvmet_req_complete(req, 0);
107 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
108 u8 event_info, u8 log_page)
110 struct nvmet_async_event *aen;
112 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
116 aen->event_type = event_type;
117 aen->event_info = event_info;
118 aen->log_page = log_page;
120 mutex_lock(&ctrl->lock);
121 list_add_tail(&aen->entry, &ctrl->async_events);
122 mutex_unlock(&ctrl->lock);
124 schedule_work(&ctrl->async_event_work);
127 int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
131 down_write(&nvmet_config_sem);
132 if (nvmet_transports[ops->type])
135 nvmet_transports[ops->type] = ops;
136 up_write(&nvmet_config_sem);
140 EXPORT_SYMBOL_GPL(nvmet_register_transport);
142 void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
144 down_write(&nvmet_config_sem);
145 nvmet_transports[ops->type] = NULL;
146 up_write(&nvmet_config_sem);
148 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
150 int nvmet_enable_port(struct nvmet_port *port)
152 struct nvmet_fabrics_ops *ops;
155 lockdep_assert_held(&nvmet_config_sem);
157 ops = nvmet_transports[port->disc_addr.trtype];
159 up_write(&nvmet_config_sem);
160 request_module("nvmet-transport-%d", port->disc_addr.trtype);
161 down_write(&nvmet_config_sem);
162 ops = nvmet_transports[port->disc_addr.trtype];
164 pr_err("transport type %d not supported\n",
165 port->disc_addr.trtype);
170 if (!try_module_get(ops->owner))
173 ret = ops->add_port(port);
175 module_put(ops->owner);
179 port->enabled = true;
183 void nvmet_disable_port(struct nvmet_port *port)
185 struct nvmet_fabrics_ops *ops;
187 lockdep_assert_held(&nvmet_config_sem);
189 port->enabled = false;
191 ops = nvmet_transports[port->disc_addr.trtype];
192 ops->remove_port(port);
193 module_put(ops->owner);
196 static void nvmet_keep_alive_timer(struct work_struct *work)
198 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
199 struct nvmet_ctrl, ka_work);
201 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
202 ctrl->cntlid, ctrl->kato);
204 nvmet_ctrl_fatal_error(ctrl);
207 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
209 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
210 ctrl->cntlid, ctrl->kato);
212 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
213 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
216 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
218 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
220 cancel_delayed_work_sync(&ctrl->ka_work);
223 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
228 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
229 if (ns->nsid == le32_to_cpu(nsid))
236 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
241 ns = __nvmet_find_namespace(ctrl, nsid);
243 percpu_ref_get(&ns->ref);
249 static void nvmet_destroy_namespace(struct percpu_ref *ref)
251 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
253 complete(&ns->disable_done);
256 void nvmet_put_namespace(struct nvmet_ns *ns)
258 percpu_ref_put(&ns->ref);
261 int nvmet_ns_enable(struct nvmet_ns *ns)
263 struct nvmet_subsys *subsys = ns->subsys;
264 struct nvmet_ctrl *ctrl;
267 mutex_lock(&subsys->lock);
271 ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
273 if (IS_ERR(ns->bdev)) {
274 pr_err("nvmet: failed to open block device %s: (%ld)\n",
275 ns->device_path, PTR_ERR(ns->bdev));
276 ret = PTR_ERR(ns->bdev);
281 ns->size = i_size_read(ns->bdev->bd_inode);
282 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
284 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
289 if (ns->nsid > subsys->max_nsid)
290 subsys->max_nsid = ns->nsid;
293 * The namespaces list needs to be sorted to simplify the implementation
294 * of the Identify Namepace List subcommand.
296 if (list_empty(&subsys->namespaces)) {
297 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
299 struct nvmet_ns *old;
301 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
302 BUG_ON(ns->nsid == old->nsid);
303 if (ns->nsid < old->nsid)
307 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
310 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
311 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
316 mutex_unlock(&subsys->lock);
319 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
324 void nvmet_ns_disable(struct nvmet_ns *ns)
326 struct nvmet_subsys *subsys = ns->subsys;
327 struct nvmet_ctrl *ctrl;
329 mutex_lock(&subsys->lock);
334 list_del_rcu(&ns->dev_link);
335 mutex_unlock(&subsys->lock);
338 * Now that we removed the namespaces from the lookup list, we
339 * can kill the per_cpu ref and wait for any remaining references
340 * to be dropped, as well as a RCU grace period for anyone only
341 * using the namepace under rcu_read_lock(). Note that we can't
342 * use call_rcu here as we need to ensure the namespaces have
343 * been fully destroyed before unloading the module.
345 percpu_ref_kill(&ns->ref);
347 wait_for_completion(&ns->disable_done);
348 percpu_ref_exit(&ns->ref);
350 mutex_lock(&subsys->lock);
351 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
352 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
355 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
357 mutex_unlock(&subsys->lock);
360 void nvmet_ns_free(struct nvmet_ns *ns)
362 nvmet_ns_disable(ns);
364 kfree(ns->device_path);
368 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
372 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
376 INIT_LIST_HEAD(&ns->dev_link);
377 init_completion(&ns->disable_done);
385 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
388 nvmet_set_status(req, status);
390 /* XXX: need to fill in something useful for sq_head */
391 req->rsp->sq_head = 0;
392 if (likely(req->sq)) /* may happen during early failure */
393 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
394 req->rsp->command_id = req->cmd->common.command_id;
397 nvmet_put_namespace(req->ns);
398 req->ops->queue_response(req);
401 void nvmet_req_complete(struct nvmet_req *req, u16 status)
403 __nvmet_req_complete(req, status);
404 percpu_ref_put(&req->sq->ref);
406 EXPORT_SYMBOL_GPL(nvmet_req_complete);
408 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
417 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
426 void nvmet_sq_destroy(struct nvmet_sq *sq)
429 * If this is the admin queue, complete all AERs so that our
430 * queue doesn't have outstanding requests on it.
432 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
433 nvmet_async_events_free(sq->ctrl);
434 percpu_ref_kill(&sq->ref);
435 wait_for_completion(&sq->free_done);
436 percpu_ref_exit(&sq->ref);
439 nvmet_ctrl_put(sq->ctrl);
440 sq->ctrl = NULL; /* allows reusing the queue later */
443 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
445 static void nvmet_sq_free(struct percpu_ref *ref)
447 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
449 complete(&sq->free_done);
452 int nvmet_sq_init(struct nvmet_sq *sq)
456 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
458 pr_err("percpu_ref init failed!\n");
461 init_completion(&sq->free_done);
465 EXPORT_SYMBOL_GPL(nvmet_sq_init);
467 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
468 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
470 u8 flags = req->cmd->common.flags;
478 req->rsp->status = 0;
480 /* no support for fused commands yet */
481 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
482 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
486 /* either variant of SGLs is fine, as we don't support metadata */
487 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
488 (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
489 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
493 if (unlikely(!req->sq->ctrl))
494 /* will return an error for any Non-connect command: */
495 status = nvmet_parse_connect_cmd(req);
496 else if (likely(req->sq->qid != 0))
497 status = nvmet_parse_io_cmd(req);
498 else if (req->cmd->common.opcode == nvme_fabrics_command)
499 status = nvmet_parse_fabrics_cmd(req);
500 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
501 status = nvmet_parse_discovery_cmd(req);
503 status = nvmet_parse_admin_cmd(req);
508 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
509 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
516 __nvmet_req_complete(req, status);
519 EXPORT_SYMBOL_GPL(nvmet_req_init);
521 static inline bool nvmet_cc_en(u32 cc)
526 static inline u8 nvmet_cc_css(u32 cc)
528 return (cc >> 4) & 0x7;
531 static inline u8 nvmet_cc_mps(u32 cc)
533 return (cc >> 7) & 0xf;
536 static inline u8 nvmet_cc_ams(u32 cc)
538 return (cc >> 11) & 0x7;
541 static inline u8 nvmet_cc_shn(u32 cc)
543 return (cc >> 14) & 0x3;
546 static inline u8 nvmet_cc_iosqes(u32 cc)
548 return (cc >> 16) & 0xf;
551 static inline u8 nvmet_cc_iocqes(u32 cc)
553 return (cc >> 20) & 0xf;
556 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
558 lockdep_assert_held(&ctrl->lock);
560 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
561 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
562 nvmet_cc_mps(ctrl->cc) != 0 ||
563 nvmet_cc_ams(ctrl->cc) != 0 ||
564 nvmet_cc_css(ctrl->cc) != 0) {
565 ctrl->csts = NVME_CSTS_CFS;
569 ctrl->csts = NVME_CSTS_RDY;
572 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
574 lockdep_assert_held(&ctrl->lock);
576 /* XXX: tear down queues? */
577 ctrl->csts &= ~NVME_CSTS_RDY;
581 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
585 mutex_lock(&ctrl->lock);
589 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
590 nvmet_start_ctrl(ctrl);
591 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
592 nvmet_clear_ctrl(ctrl);
593 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
594 nvmet_clear_ctrl(ctrl);
595 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
597 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
598 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
599 mutex_unlock(&ctrl->lock);
602 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
604 /* command sets supported: NVMe command set: */
605 ctrl->cap = (1ULL << 37);
606 /* CC.EN timeout in 500msec units: */
607 ctrl->cap |= (15ULL << 24);
608 /* maximum queue entries supported: */
609 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
612 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
613 struct nvmet_req *req, struct nvmet_ctrl **ret)
615 struct nvmet_subsys *subsys;
616 struct nvmet_ctrl *ctrl;
619 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
621 pr_warn("connect request for invalid subsystem %s!\n",
623 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
624 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
627 mutex_lock(&subsys->lock);
628 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
629 if (ctrl->cntlid == cntlid) {
630 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
631 pr_warn("hostnqn mismatch.\n");
634 if (!kref_get_unless_zero(&ctrl->ref))
642 pr_warn("could not find controller %d for subsys %s / host %s\n",
643 cntlid, subsysnqn, hostnqn);
644 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
645 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
648 mutex_unlock(&subsys->lock);
649 nvmet_subsys_put(subsys);
653 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
656 struct nvmet_host_link *p;
658 if (subsys->allow_any_host)
661 list_for_each_entry(p, &subsys->hosts, entry) {
662 if (!strcmp(nvmet_host_name(p->host), hostnqn))
669 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
672 struct nvmet_subsys_link *s;
674 list_for_each_entry(s, &req->port->subsystems, entry) {
675 if (__nvmet_host_allowed(s->subsys, hostnqn))
682 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
685 lockdep_assert_held(&nvmet_config_sem);
687 if (subsys->type == NVME_NQN_DISC)
688 return nvmet_host_discovery_allowed(req, hostnqn);
690 return __nvmet_host_allowed(subsys, hostnqn);
693 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
694 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
696 struct nvmet_subsys *subsys;
697 struct nvmet_ctrl *ctrl;
701 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
702 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
704 pr_warn("connect request for invalid subsystem %s!\n",
706 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
710 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
711 down_read(&nvmet_config_sem);
712 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
713 pr_info("connect by host %s for subsystem %s not allowed\n",
715 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
716 up_read(&nvmet_config_sem);
717 goto out_put_subsystem;
719 up_read(&nvmet_config_sem);
721 status = NVME_SC_INTERNAL;
722 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
724 goto out_put_subsystem;
725 mutex_init(&ctrl->lock);
727 nvmet_init_cap(ctrl);
729 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
730 INIT_LIST_HEAD(&ctrl->async_events);
732 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
733 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
735 /* generate a random serial number as our controllers are ephemeral: */
736 get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
738 kref_init(&ctrl->ref);
739 ctrl->subsys = subsys;
741 ctrl->cqs = kcalloc(subsys->max_qid + 1,
742 sizeof(struct nvmet_cq *),
747 ctrl->sqs = kcalloc(subsys->max_qid + 1,
748 sizeof(struct nvmet_sq *),
753 ret = ida_simple_get(&cntlid_ida,
754 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
757 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
762 ctrl->ops = req->ops;
763 if (ctrl->subsys->type == NVME_NQN_DISC) {
764 /* Don't accept keep-alive timeout for discovery controllers */
766 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
771 * Discovery controllers use some arbitrary high value in order
772 * to cleanup stale discovery sessions
774 * From the latest base diff RC:
775 * "The Keep Alive command is not supported by
776 * Discovery controllers. A transport may specify a
777 * fixed Discovery controller activity timeout value
778 * (e.g., 2 minutes). If no commands are received
779 * by a Discovery controller within that time
780 * period, the controller may perform the
781 * actions for Keep Alive Timer expiration".
783 ctrl->kato = NVMET_DISC_KATO;
785 /* keep-alive timeout in seconds */
786 ctrl->kato = DIV_ROUND_UP(kato, 1000);
788 nvmet_start_keep_alive_timer(ctrl);
790 mutex_lock(&subsys->lock);
791 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
792 mutex_unlock(&subsys->lock);
804 nvmet_subsys_put(subsys);
809 static void nvmet_ctrl_free(struct kref *ref)
811 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
812 struct nvmet_subsys *subsys = ctrl->subsys;
814 nvmet_stop_keep_alive_timer(ctrl);
816 mutex_lock(&subsys->lock);
817 list_del(&ctrl->subsys_entry);
818 mutex_unlock(&subsys->lock);
820 flush_work(&ctrl->async_event_work);
821 cancel_work_sync(&ctrl->fatal_err_work);
823 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
824 nvmet_subsys_put(subsys);
831 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
833 kref_put(&ctrl->ref, nvmet_ctrl_free);
836 static void nvmet_fatal_error_handler(struct work_struct *work)
838 struct nvmet_ctrl *ctrl =
839 container_of(work, struct nvmet_ctrl, fatal_err_work);
841 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
842 ctrl->ops->delete_ctrl(ctrl);
845 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
847 mutex_lock(&ctrl->lock);
848 if (!(ctrl->csts & NVME_CSTS_CFS)) {
849 ctrl->csts |= NVME_CSTS_CFS;
850 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
851 schedule_work(&ctrl->fatal_err_work);
853 mutex_unlock(&ctrl->lock);
855 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
857 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
858 const char *subsysnqn)
860 struct nvmet_subsys_link *p;
865 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
867 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
869 return nvmet_disc_subsys;
872 down_read(&nvmet_config_sem);
873 list_for_each_entry(p, &port->subsystems, entry) {
874 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
876 if (!kref_get_unless_zero(&p->subsys->ref))
878 up_read(&nvmet_config_sem);
882 up_read(&nvmet_config_sem);
886 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
887 enum nvme_subsys_type type)
889 struct nvmet_subsys *subsys;
891 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
895 subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
899 subsys->max_qid = NVMET_NR_QUEUES;
905 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
910 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
912 if (!subsys->subsysnqn) {
917 kref_init(&subsys->ref);
919 mutex_init(&subsys->lock);
920 INIT_LIST_HEAD(&subsys->namespaces);
921 INIT_LIST_HEAD(&subsys->ctrls);
922 INIT_LIST_HEAD(&subsys->hosts);
927 static void nvmet_subsys_free(struct kref *ref)
929 struct nvmet_subsys *subsys =
930 container_of(ref, struct nvmet_subsys, ref);
932 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
934 kfree(subsys->subsysnqn);
938 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
940 struct nvmet_ctrl *ctrl;
942 mutex_lock(&subsys->lock);
943 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
944 ctrl->ops->delete_ctrl(ctrl);
945 mutex_unlock(&subsys->lock);
948 void nvmet_subsys_put(struct nvmet_subsys *subsys)
950 kref_put(&subsys->ref, nvmet_subsys_free);
953 static int __init nvmet_init(void)
957 error = nvmet_init_discovery();
961 error = nvmet_init_configfs();
963 goto out_exit_discovery;
967 nvmet_exit_discovery();
972 static void __exit nvmet_exit(void)
974 nvmet_exit_configfs();
975 nvmet_exit_discovery();
976 ida_destroy(&cntlid_ida);
978 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
979 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
982 module_init(nvmet_init);
983 module_exit(nvmet_exit);
985 MODULE_LICENSE("GPL v2");