2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 * Copyright (C) 2015 Arrikto, Inc.
5 * Copyright (C) 2017 Chinamobile, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/spinlock.h>
22 #include <linux/module.h>
23 #include <linux/idr.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/parser.h>
27 #include <linux/vmalloc.h>
28 #include <linux/uio_driver.h>
29 #include <linux/radix-tree.h>
30 #include <linux/stringify.h>
31 #include <linux/bitops.h>
32 #include <linux/highmem.h>
33 #include <linux/configfs.h>
34 #include <linux/mutex.h>
35 #include <linux/kthread.h>
36 #include <net/genetlink.h>
37 #include <scsi/scsi_common.h>
38 #include <scsi/scsi_proto.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41 #include <target/target_core_backend.h>
43 #include <linux/target_core_user.h>
46 * Define a shared-memory interface for LIO to pass SCSI commands and
47 * data to userspace for processing. This is to allow backends that
48 * are too complex for in-kernel support to be possible.
50 * It uses the UIO framework to do a lot of the device-creation and
51 * introspection work for us.
53 * See the .h file for how the ring is laid out. Note that while the
54 * command ring is defined, the particulars of the data area are
55 * not. Offset values in the command entry point to other locations
56 * internal to the mmap()ed area. There is separate space outside the
57 * command ring for data buffers. This leaves maximum flexibility for
58 * moving buffer allocations, or even page flipping or other
59 * allocation techniques, without altering the command ring layout.
62 * The user process must be assumed to be malicious. There's no way to
63 * prevent it breaking the command ring protocol if it wants, but in
64 * order to prevent other issues we must only ever read *data* from
65 * the shared memory area, not offsets or sizes. This applies to
66 * command ring entries as well as the mailbox. Extra code needed for
67 * this may have a 'UAM' comment.
70 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
72 /* For cmd area, the size is fixed 8MB */
73 #define CMDR_SIZE (8 * 1024 * 1024)
76 * For data area, the block size is PAGE_SIZE and
77 * the total size is 256K * PAGE_SIZE.
79 #define DATA_BLOCK_SIZE PAGE_SIZE
80 #define DATA_BLOCK_BITS (256 * 1024)
81 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
82 #define DATA_BLOCK_INIT_BITS 128
84 /* The total size of the ring is 8M + 256K * PAGE_SIZE */
85 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
87 /* Default maximum of the global data blocks(512K * PAGE_SIZE) */
88 #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
90 static struct device *tcmu_root_device;
96 #define TCMU_CONFIG_LEN 256
99 struct list_head node;
101 struct se_device se_dev;
106 #define TCMU_DEV_BIT_OPEN 0
107 #define TCMU_DEV_BIT_BROKEN 1
110 struct uio_info uio_info;
114 struct tcmu_mailbox *mb_addr;
117 u32 cmdr_last_cleaned;
118 /* Offset of data area from start of mb */
119 /* Must add data_off and mb_addr to get the address */
123 wait_queue_head_t wait_cmdr;
124 struct mutex cmdr_lock;
129 DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
130 struct radix_tree_root data_blocks;
133 spinlock_t commands_lock;
135 struct timer_list timeout;
136 unsigned int cmd_time_out;
138 char dev_config[TCMU_CONFIG_LEN];
141 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
143 #define CMDR_OFF sizeof(struct tcmu_mailbox)
146 struct se_cmd *se_cmd;
147 struct tcmu_dev *tcmu_dev;
151 /* Can't use se_cmd when cleaning up expired cmds, because if
152 cmd has been completed then accessing se_cmd is off limits */
157 unsigned long deadline;
159 #define TCMU_CMD_BIT_EXPIRED 0
163 static struct task_struct *unmap_thread;
164 static wait_queue_head_t unmap_wait;
165 static DEFINE_MUTEX(root_udev_mutex);
166 static LIST_HEAD(root_udev);
168 static atomic_t global_db_count = ATOMIC_INIT(0);
170 static struct kmem_cache *tcmu_cmd_cache;
172 /* multicast group */
173 enum tcmu_multicast_groups {
177 static const struct genl_multicast_group tcmu_mcgrps[] = {
178 [TCMU_MCGRP_CONFIG] = { .name = "config", },
181 /* Our generic netlink family */
182 static struct genl_family tcmu_genl_family __ro_after_init = {
183 .module = THIS_MODULE,
187 .maxattr = TCMU_ATTR_MAX,
188 .mcgrps = tcmu_mcgrps,
189 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
193 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
194 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
195 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
196 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
198 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
200 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
203 for (i = 0; i < len; i++)
204 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
207 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
208 struct tcmu_cmd *tcmu_cmd)
213 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
214 if (dbi == udev->dbi_thresh)
217 page = radix_tree_lookup(&udev->data_blocks, dbi);
220 if (atomic_add_return(1, &global_db_count) >
221 TCMU_GLOBAL_MAX_BLOCKS) {
222 atomic_dec(&global_db_count);
226 /* try to get new page from the mm */
227 page = alloc_page(GFP_KERNEL);
231 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
239 if (dbi > udev->dbi_max)
242 set_bit(dbi, udev->data_bitmap);
243 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
248 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
249 struct tcmu_cmd *tcmu_cmd)
253 udev->waiting_global = false;
255 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
256 if (!tcmu_get_empty_block(udev, tcmu_cmd))
262 udev->waiting_global = true;
263 /* Try to wake up the unmap thread */
264 wake_up(&unmap_wait);
268 static inline struct page *
269 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
271 return radix_tree_lookup(&udev->data_blocks, dbi);
274 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
276 kfree(tcmu_cmd->dbi);
277 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
280 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
282 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
283 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
285 if (se_cmd->se_cmd_flags & SCF_BIDI) {
286 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
287 data_length += round_up(se_cmd->t_bidi_data_sg->length,
294 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
296 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
298 return data_length / DATA_BLOCK_SIZE;
301 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
303 struct se_device *se_dev = se_cmd->se_dev;
304 struct tcmu_dev *udev = TCMU_DEV(se_dev);
305 struct tcmu_cmd *tcmu_cmd;
308 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
312 tcmu_cmd->se_cmd = se_cmd;
313 tcmu_cmd->tcmu_dev = udev;
314 if (udev->cmd_time_out)
315 tcmu_cmd->deadline = jiffies +
316 msecs_to_jiffies(udev->cmd_time_out);
318 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
319 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
320 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
322 if (!tcmu_cmd->dbi) {
323 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
327 idr_preload(GFP_KERNEL);
328 spin_lock_irq(&udev->commands_lock);
329 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
330 USHRT_MAX, GFP_NOWAIT);
331 spin_unlock_irq(&udev->commands_lock);
335 tcmu_free_cmd(tcmu_cmd);
338 tcmu_cmd->cmd_id = cmd_id;
343 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
345 unsigned long offset = offset_in_page(vaddr);
347 size = round_up(size+offset, PAGE_SIZE);
351 flush_dcache_page(virt_to_page(vaddr));
357 * Some ring helper functions. We don't assume size is a power of 2 so
358 * we can't use circ_buf.h.
360 static inline size_t spc_used(size_t head, size_t tail, size_t size)
362 int diff = head - tail;
370 static inline size_t spc_free(size_t head, size_t tail, size_t size)
372 /* Keep 1 byte unused or we can't tell full from empty */
373 return (size - spc_used(head, tail, size) - 1);
376 static inline size_t head_to_end(size_t head, size_t size)
381 static inline void new_iov(struct iovec **iov, int *iov_cnt,
382 struct tcmu_dev *udev)
391 memset(iovec, 0, sizeof(struct iovec));
394 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
396 /* offset is relative to mb_addr */
397 static inline size_t get_block_offset_user(struct tcmu_dev *dev,
398 int dbi, int remaining)
400 return dev->data_off + dbi * DATA_BLOCK_SIZE +
401 DATA_BLOCK_SIZE - remaining;
404 static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
406 return (size_t)iov->iov_base + iov->iov_len;
409 static int scatter_data_area(struct tcmu_dev *udev,
410 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
411 unsigned int data_nents, struct iovec **iov,
412 int *iov_cnt, bool copy_data)
415 int block_remaining = 0;
416 void *from, *to = NULL;
417 size_t copy_bytes, to_offset, offset;
418 struct scatterlist *sg;
421 for_each_sg(data_sg, sg, data_nents, i) {
422 int sg_remaining = sg->length;
423 from = kmap_atomic(sg_page(sg)) + sg->offset;
424 while (sg_remaining > 0) {
425 if (block_remaining == 0) {
429 block_remaining = DATA_BLOCK_SIZE;
430 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
431 page = tcmu_get_block_page(udev, dbi);
432 to = kmap_atomic(page);
435 copy_bytes = min_t(size_t, sg_remaining,
437 to_offset = get_block_offset_user(udev, dbi,
439 offset = DATA_BLOCK_SIZE - block_remaining;
440 to = (void *)(unsigned long)to + offset;
443 to_offset == iov_tail(udev, *iov)) {
444 (*iov)->iov_len += copy_bytes;
446 new_iov(iov, iov_cnt, udev);
447 (*iov)->iov_base = (void __user *)to_offset;
448 (*iov)->iov_len = copy_bytes;
451 memcpy(to, from + sg->length - sg_remaining,
453 tcmu_flush_dcache_range(to, copy_bytes);
455 sg_remaining -= copy_bytes;
456 block_remaining -= copy_bytes;
458 kunmap_atomic(from - sg->offset);
466 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
469 struct se_cmd *se_cmd = cmd->se_cmd;
471 int block_remaining = 0;
472 void *from = NULL, *to;
473 size_t copy_bytes, offset;
474 struct scatterlist *sg, *data_sg;
476 unsigned int data_nents;
480 data_sg = se_cmd->t_data_sg;
481 data_nents = se_cmd->t_data_nents;
485 * For bidi case, the first count blocks are for Data-Out
486 * buffer blocks, and before gathering the Data-In buffer
487 * the Data-Out buffer blocks should be discarded.
489 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
491 data_sg = se_cmd->t_bidi_data_sg;
492 data_nents = se_cmd->t_bidi_data_nents;
495 tcmu_cmd_set_dbi_cur(cmd, count);
497 for_each_sg(data_sg, sg, data_nents, i) {
498 int sg_remaining = sg->length;
499 to = kmap_atomic(sg_page(sg)) + sg->offset;
500 while (sg_remaining > 0) {
501 if (block_remaining == 0) {
505 block_remaining = DATA_BLOCK_SIZE;
506 dbi = tcmu_cmd_get_dbi(cmd);
507 page = tcmu_get_block_page(udev, dbi);
508 from = kmap_atomic(page);
510 copy_bytes = min_t(size_t, sg_remaining,
512 offset = DATA_BLOCK_SIZE - block_remaining;
513 from = (void *)(unsigned long)from + offset;
514 tcmu_flush_dcache_range(from, copy_bytes);
515 memcpy(to + sg->length - sg_remaining, from,
518 sg_remaining -= copy_bytes;
519 block_remaining -= copy_bytes;
521 kunmap_atomic(to - sg->offset);
527 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
529 return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh));
533 * We can't queue a command until we have space available on the cmd ring *and*
534 * space available on the data area.
536 * Called with ring lock held.
538 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
539 size_t cmd_size, size_t data_needed)
541 struct tcmu_mailbox *mb = udev->mb_addr;
542 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
544 size_t space, cmd_needed;
547 tcmu_flush_dcache_range(mb, sizeof(*mb));
549 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
552 * If cmd end-of-ring space is too small then we need space for a NOP plus
553 * original cmd - cmds are internally contiguous.
555 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
556 cmd_needed = cmd_size;
558 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
560 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
561 if (space < cmd_needed) {
562 pr_debug("no cmd space: %u %u %u\n", cmd_head,
563 udev->cmdr_last_cleaned, udev->cmdr_size);
567 /* try to check and get the data blocks as needed */
568 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
569 if (space < data_needed) {
570 unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh;
573 if (blocks_left < blocks_needed) {
574 pr_debug("no data space: only %lu available, but ask for %zu\n",
575 blocks_left * DATA_BLOCK_SIZE,
580 /* Try to expand the thresh */
581 if (!udev->dbi_thresh) {
582 /* From idle state */
583 uint32_t init_thresh = DATA_BLOCK_INIT_BITS;
585 udev->dbi_thresh = max(blocks_needed, init_thresh);
588 * Grow the data area by max(blocks needed,
589 * dbi_thresh / 2), but limited to the max
590 * DATA_BLOCK_BITS size.
592 grow = max(blocks_needed, udev->dbi_thresh / 2);
593 udev->dbi_thresh += grow;
594 if (udev->dbi_thresh > DATA_BLOCK_BITS)
595 udev->dbi_thresh = DATA_BLOCK_BITS;
599 if (!tcmu_get_empty_blocks(udev, cmd))
605 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
607 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
608 sizeof(struct tcmu_cmd_entry));
611 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
612 size_t base_command_size)
614 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
617 command_size = base_command_size +
618 round_up(scsi_command_size(se_cmd->t_task_cdb),
621 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
626 static sense_reason_t
627 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
629 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
630 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
631 size_t base_command_size, command_size;
632 struct tcmu_mailbox *mb;
633 struct tcmu_cmd_entry *entry;
638 bool copy_to_data_area;
639 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
641 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
642 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
645 * Must be a certain minimum size for response sense info, but
646 * also may be larger if the iov array is large.
648 * We prepare as many iovs as possbile for potential uses here,
649 * because it's expensive to tell how many regions are freed in
650 * the bitmap & global data pool, as the size calculated here
651 * will only be used to do the checks.
653 * The size will be recalculated later as actually needed to save
656 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
657 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
659 mutex_lock(&udev->cmdr_lock);
662 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
663 if ((command_size > (udev->cmdr_size / 2)) ||
664 data_length > udev->data_size) {
665 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
666 "cmd ring/data area\n", command_size, data_length,
667 udev->cmdr_size, udev->data_size);
668 mutex_unlock(&udev->cmdr_lock);
669 return TCM_INVALID_CDB_FIELD;
672 while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
676 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
678 pr_debug("sleeping for ring space\n");
679 mutex_unlock(&udev->cmdr_lock);
680 if (udev->cmd_time_out)
681 ret = schedule_timeout(
682 msecs_to_jiffies(udev->cmd_time_out));
684 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
685 finish_wait(&udev->wait_cmdr, &__wait);
687 pr_warn("tcmu: command timed out\n");
688 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
691 mutex_lock(&udev->cmdr_lock);
693 /* We dropped cmdr_lock, cmd_head is stale */
694 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
697 /* Insert a PAD if end-of-ring space is too small */
698 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
699 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
701 entry = (void *) mb + CMDR_OFF + cmd_head;
702 tcmu_flush_dcache_range(entry, sizeof(*entry));
703 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
704 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
705 entry->hdr.cmd_id = 0; /* not used for PAD */
706 entry->hdr.kflags = 0;
707 entry->hdr.uflags = 0;
709 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
711 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
712 WARN_ON(cmd_head != 0);
715 entry = (void *) mb + CMDR_OFF + cmd_head;
716 tcmu_flush_dcache_range(entry, sizeof(*entry));
717 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
718 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
719 entry->hdr.kflags = 0;
720 entry->hdr.uflags = 0;
722 /* Handle allocating space from the data area */
723 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
724 iov = &entry->req.iov[0];
726 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
727 || se_cmd->se_cmd_flags & SCF_BIDI);
728 ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
729 se_cmd->t_data_nents, &iov, &iov_cnt,
732 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
733 mutex_unlock(&udev->cmdr_lock);
735 pr_err("tcmu: alloc and scatter data failed\n");
736 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
738 entry->req.iov_cnt = iov_cnt;
739 entry->req.iov_dif_cnt = 0;
741 /* Handle BIDI commands */
742 if (se_cmd->se_cmd_flags & SCF_BIDI) {
745 ret = scatter_data_area(udev, tcmu_cmd,
746 se_cmd->t_bidi_data_sg,
747 se_cmd->t_bidi_data_nents,
748 &iov, &iov_cnt, false);
750 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
751 mutex_unlock(&udev->cmdr_lock);
753 pr_err("tcmu: alloc and scatter bidi data failed\n");
754 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
756 entry->req.iov_bidi_cnt = iov_cnt;
760 * Recalaulate the command's base size and size according
761 * to the actual needs
763 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
764 entry->req.iov_bidi_cnt);
765 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
767 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
769 /* All offsets relative to mb_addr, not start of entry! */
770 cdb_off = CMDR_OFF + cmd_head + base_command_size;
771 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
772 entry->req.cdb_off = cdb_off;
773 tcmu_flush_dcache_range(entry, sizeof(*entry));
775 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
776 tcmu_flush_dcache_range(mb, sizeof(*mb));
777 mutex_unlock(&udev->cmdr_lock);
779 /* TODO: only if FLUSH and FUA? */
780 uio_event_notify(&udev->uio_info);
782 if (udev->cmd_time_out)
783 mod_timer(&udev->timeout, round_jiffies_up(jiffies +
784 msecs_to_jiffies(udev->cmd_time_out)));
789 static sense_reason_t
790 tcmu_queue_cmd(struct se_cmd *se_cmd)
792 struct se_device *se_dev = se_cmd->se_dev;
793 struct tcmu_dev *udev = TCMU_DEV(se_dev);
794 struct tcmu_cmd *tcmu_cmd;
797 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
799 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
801 ret = tcmu_queue_cmd_ring(tcmu_cmd);
802 if (ret != TCM_NO_SENSE) {
803 pr_err("TCMU: Could not queue command\n");
804 spin_lock_irq(&udev->commands_lock);
805 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
806 spin_unlock_irq(&udev->commands_lock);
808 tcmu_free_cmd(tcmu_cmd);
814 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
816 struct se_cmd *se_cmd = cmd->se_cmd;
817 struct tcmu_dev *udev = cmd->tcmu_dev;
820 * cmd has been completed already from timeout, just reclaim
821 * data area space and free cmd
823 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
826 tcmu_cmd_reset_dbi_cur(cmd);
828 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
829 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
831 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
832 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
833 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
834 se_cmd->scsi_sense_length);
835 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
836 /* Get Data-In buffer before clean up */
837 gather_data_area(udev, cmd, true);
838 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
839 gather_data_area(udev, cmd, false);
840 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
842 } else if (se_cmd->data_direction != DMA_NONE) {
843 pr_warn("TCMU: data direction was %d!\n",
844 se_cmd->data_direction);
847 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
851 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
855 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
857 struct tcmu_mailbox *mb;
860 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
861 pr_err("ring broken, not handling completions\n");
866 tcmu_flush_dcache_range(mb, sizeof(*mb));
868 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
870 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
871 struct tcmu_cmd *cmd;
873 tcmu_flush_dcache_range(entry, sizeof(*entry));
875 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
876 UPDATE_HEAD(udev->cmdr_last_cleaned,
877 tcmu_hdr_get_len(entry->hdr.len_op),
881 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
883 spin_lock(&udev->commands_lock);
884 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
885 spin_unlock(&udev->commands_lock);
888 pr_err("cmd_id not found, ring is broken\n");
889 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
893 tcmu_handle_completion(cmd, entry);
895 UPDATE_HEAD(udev->cmdr_last_cleaned,
896 tcmu_hdr_get_len(entry->hdr.len_op),
902 if (mb->cmd_tail == mb->cmd_head)
903 del_timer(&udev->timeout); /* no more pending cmds */
905 wake_up(&udev->wait_cmdr);
910 static int tcmu_check_expired_cmd(int id, void *p, void *data)
912 struct tcmu_cmd *cmd = p;
914 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
917 if (!time_after(jiffies, cmd->deadline))
920 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
921 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
927 static void tcmu_device_timedout(unsigned long data)
929 struct tcmu_dev *udev = (struct tcmu_dev *)data;
932 spin_lock_irqsave(&udev->commands_lock, flags);
933 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
934 spin_unlock_irqrestore(&udev->commands_lock, flags);
936 /* Try to wake up the ummap thread */
937 wake_up(&unmap_wait);
940 * We don't need to wakeup threads on wait_cmdr since they have their
945 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
947 struct tcmu_hba *tcmu_hba;
949 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
953 tcmu_hba->host_id = host_id;
954 hba->hba_ptr = tcmu_hba;
959 static void tcmu_detach_hba(struct se_hba *hba)
965 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
967 struct tcmu_dev *udev;
969 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
973 udev->name = kstrdup(name, GFP_KERNEL);
980 udev->cmd_time_out = TCMU_TIME_OUT;
982 init_waitqueue_head(&udev->wait_cmdr);
983 mutex_init(&udev->cmdr_lock);
985 idr_init(&udev->commands);
986 spin_lock_init(&udev->commands_lock);
988 setup_timer(&udev->timeout, tcmu_device_timedout,
989 (unsigned long)udev);
991 return &udev->se_dev;
994 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
996 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
998 mutex_lock(&tcmu_dev->cmdr_lock);
999 tcmu_handle_completions(tcmu_dev);
1000 mutex_unlock(&tcmu_dev->cmdr_lock);
1006 * mmap code from uio.c. Copied here because we want to hook mmap()
1007 * and this stuff must come along.
1009 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1011 struct tcmu_dev *udev = vma->vm_private_data;
1012 struct uio_info *info = &udev->uio_info;
1014 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1015 if (info->mem[vma->vm_pgoff].size == 0)
1017 return (int)vma->vm_pgoff;
1022 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1027 mutex_lock(&udev->cmdr_lock);
1028 page = tcmu_get_block_page(udev, dbi);
1030 mutex_unlock(&udev->cmdr_lock);
1035 * Normally it shouldn't be here:
1036 * Only when the userspace has touched the blocks which
1037 * are out of the tcmu_cmd's data iov[], and will return
1040 pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi);
1041 pr_warn("Mostly it will be a bug of userspace, please have a check!\n");
1043 if (dbi >= udev->dbi_thresh) {
1044 /* Extern the udev->dbi_thresh to dbi + 1 */
1045 udev->dbi_thresh = dbi + 1;
1046 udev->dbi_max = dbi;
1049 page = radix_tree_lookup(&udev->data_blocks, dbi);
1051 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1053 mutex_unlock(&udev->cmdr_lock);
1057 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
1059 mutex_unlock(&udev->cmdr_lock);
1065 * Since this case is rare in page fault routine, here we
1066 * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS
1067 * to reduce possible page fault call trace.
1069 atomic_inc(&global_db_count);
1071 mutex_unlock(&udev->cmdr_lock);
1076 static int tcmu_vma_fault(struct vm_fault *vmf)
1078 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1079 struct uio_info *info = &udev->uio_info;
1081 unsigned long offset;
1084 int mi = tcmu_find_mem_index(vmf->vma);
1086 return VM_FAULT_SIGBUS;
1089 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1092 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1094 if (offset < udev->data_off) {
1095 /* For the vmalloc()ed cmd area pages */
1096 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1097 page = vmalloc_to_page(addr);
1101 /* For the dynamically growing data area pages */
1102 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1103 page = tcmu_try_get_block_page(udev, dbi);
1105 return VM_FAULT_NOPAGE;
1113 static const struct vm_operations_struct tcmu_vm_ops = {
1114 .fault = tcmu_vma_fault,
1117 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1119 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1121 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1122 vma->vm_ops = &tcmu_vm_ops;
1124 vma->vm_private_data = udev;
1126 /* Ensure the mmap is exactly the right size */
1127 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
1133 static int tcmu_open(struct uio_info *info, struct inode *inode)
1135 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1137 /* O_EXCL not supported for char devs, so fake it? */
1138 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1141 udev->inode = inode;
1148 static int tcmu_release(struct uio_info *info, struct inode *inode)
1150 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1152 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1154 pr_debug("close\n");
1159 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
1161 struct sk_buff *skb;
1165 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1169 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1173 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
1177 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
1181 genlmsg_end(skb, msg_header);
1183 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1184 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1186 /* We don't care if no one is listening */
1196 static int tcmu_configure_device(struct se_device *dev)
1198 struct tcmu_dev *udev = TCMU_DEV(dev);
1199 struct tcmu_hba *hba = udev->hba->hba_ptr;
1200 struct uio_info *info;
1201 struct tcmu_mailbox *mb;
1207 info = &udev->uio_info;
1209 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
1211 size += 1; /* for \0 */
1212 str = kmalloc(size, GFP_KERNEL);
1216 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
1218 if (udev->dev_config[0])
1219 snprintf(str + used, size - used, "/%s", udev->dev_config);
1223 udev->mb_addr = vzalloc(CMDR_SIZE);
1224 if (!udev->mb_addr) {
1229 /* mailbox fits in first part of CMDR space */
1230 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1231 udev->data_off = CMDR_SIZE;
1232 udev->data_size = DATA_SIZE;
1233 udev->dbi_thresh = 0; /* Default in Idle state */
1234 udev->waiting_global = false;
1236 /* Initialise the mailbox of the ring buffer */
1238 mb->version = TCMU_MAILBOX_VERSION;
1239 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
1240 mb->cmdr_off = CMDR_OFF;
1241 mb->cmdr_size = udev->cmdr_size;
1243 WARN_ON(!PAGE_ALIGNED(udev->data_off));
1244 WARN_ON(udev->data_size % PAGE_SIZE);
1245 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1247 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1249 info->version = __stringify(TCMU_MAILBOX_VERSION);
1251 info->mem[0].name = "tcm-user command & data buffer";
1252 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1253 info->mem[0].size = TCMU_RING_SIZE;
1254 info->mem[0].memtype = UIO_MEM_NONE;
1256 info->irqcontrol = tcmu_irqcontrol;
1257 info->irq = UIO_IRQ_CUSTOM;
1259 info->mmap = tcmu_mmap;
1260 info->open = tcmu_open;
1261 info->release = tcmu_release;
1263 ret = uio_register_device(tcmu_root_device, info);
1267 /* User can set hw_block_size before enable the device */
1268 if (dev->dev_attrib.hw_block_size == 0)
1269 dev->dev_attrib.hw_block_size = 512;
1270 /* Other attributes can be configured in userspace */
1271 if (!dev->dev_attrib.hw_max_sectors)
1272 dev->dev_attrib.hw_max_sectors = 128;
1273 dev->dev_attrib.hw_queue_depth = 128;
1275 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
1276 udev->uio_info.uio_dev->minor);
1280 mutex_lock(&root_udev_mutex);
1281 list_add(&udev->node, &root_udev);
1282 mutex_unlock(&root_udev_mutex);
1287 uio_unregister_device(&udev->uio_info);
1289 vfree(udev->mb_addr);
1296 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1298 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1299 kmem_cache_free(tcmu_cmd_cache, cmd);
1305 static void tcmu_dev_call_rcu(struct rcu_head *p)
1307 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1308 struct tcmu_dev *udev = TCMU_DEV(dev);
1313 static bool tcmu_dev_configured(struct tcmu_dev *udev)
1315 return udev->uio_info.uio_dev ? true : false;
1318 static void tcmu_blocks_release(struct tcmu_dev *udev)
1323 /* Try to release all block pages */
1324 mutex_lock(&udev->cmdr_lock);
1325 for (i = 0; i <= udev->dbi_max; i++) {
1326 page = radix_tree_delete(&udev->data_blocks, i);
1329 atomic_dec(&global_db_count);
1332 mutex_unlock(&udev->cmdr_lock);
1335 static void tcmu_free_device(struct se_device *dev)
1337 struct tcmu_dev *udev = TCMU_DEV(dev);
1338 struct tcmu_cmd *cmd;
1339 bool all_expired = true;
1342 del_timer_sync(&udev->timeout);
1344 mutex_lock(&root_udev_mutex);
1345 list_del(&udev->node);
1346 mutex_unlock(&root_udev_mutex);
1348 vfree(udev->mb_addr);
1350 /* Upper layer should drain all requests before calling this */
1351 spin_lock_irq(&udev->commands_lock);
1352 idr_for_each_entry(&udev->commands, cmd, i) {
1353 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1354 all_expired = false;
1356 idr_destroy(&udev->commands);
1357 spin_unlock_irq(&udev->commands_lock);
1358 WARN_ON(!all_expired);
1360 tcmu_blocks_release(udev);
1362 if (tcmu_dev_configured(udev)) {
1363 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
1364 udev->uio_info.uio_dev->minor);
1366 uio_unregister_device(&udev->uio_info);
1367 kfree(udev->uio_info.name);
1370 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1374 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1378 static match_table_t tokens = {
1379 {Opt_dev_config, "dev_config=%s"},
1380 {Opt_dev_size, "dev_size=%u"},
1381 {Opt_hw_block_size, "hw_block_size=%u"},
1382 {Opt_hw_max_sectors, "hw_max_sectors=%u"},
1386 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
1388 unsigned long tmp_ul;
1392 arg_p = match_strdup(arg);
1396 ret = kstrtoul(arg_p, 0, &tmp_ul);
1399 pr_err("kstrtoul() failed for dev attrib\n");
1403 pr_err("dev attrib must be nonzero\n");
1406 *dev_attrib = tmp_ul;
1410 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1411 const char *page, ssize_t count)
1413 struct tcmu_dev *udev = TCMU_DEV(dev);
1414 char *orig, *ptr, *opts, *arg_p;
1415 substring_t args[MAX_OPT_ARGS];
1418 opts = kstrdup(page, GFP_KERNEL);
1424 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1428 token = match_token(ptr, tokens, args);
1430 case Opt_dev_config:
1431 if (match_strlcpy(udev->dev_config, &args[0],
1432 TCMU_CONFIG_LEN) == 0) {
1436 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
1439 arg_p = match_strdup(&args[0]);
1444 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
1447 pr_err("kstrtoul() failed for dev_size=\n");
1449 case Opt_hw_block_size:
1450 ret = tcmu_set_dev_attrib(&args[0],
1451 &(dev->dev_attrib.hw_block_size));
1453 case Opt_hw_max_sectors:
1454 ret = tcmu_set_dev_attrib(&args[0],
1455 &(dev->dev_attrib.hw_max_sectors));
1466 return (!ret) ? count : ret;
1469 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
1471 struct tcmu_dev *udev = TCMU_DEV(dev);
1474 bl = sprintf(b + bl, "Config: %s ",
1475 udev->dev_config[0] ? udev->dev_config : "NULL");
1476 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1481 static sector_t tcmu_get_blocks(struct se_device *dev)
1483 struct tcmu_dev *udev = TCMU_DEV(dev);
1485 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1486 dev->dev_attrib.block_size);
1489 static sense_reason_t
1490 tcmu_parse_cdb(struct se_cmd *cmd)
1492 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
1495 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
1497 struct se_dev_attrib *da = container_of(to_config_group(item),
1498 struct se_dev_attrib, da_group);
1499 struct tcmu_dev *udev = container_of(da->da_dev,
1500 struct tcmu_dev, se_dev);
1502 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
1505 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
1508 struct se_dev_attrib *da = container_of(to_config_group(item),
1509 struct se_dev_attrib, da_group);
1510 struct tcmu_dev *udev = container_of(da->da_dev,
1511 struct tcmu_dev, se_dev);
1515 if (da->da_dev->export_count) {
1516 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
1520 ret = kstrtou32(page, 0, &val);
1524 udev->cmd_time_out = val * MSEC_PER_SEC;
1527 CONFIGFS_ATTR(tcmu_, cmd_time_out);
1529 static struct configfs_attribute **tcmu_attrs;
1531 static struct target_backend_ops tcmu_ops = {
1533 .owner = THIS_MODULE,
1534 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1535 .attach_hba = tcmu_attach_hba,
1536 .detach_hba = tcmu_detach_hba,
1537 .alloc_device = tcmu_alloc_device,
1538 .configure_device = tcmu_configure_device,
1539 .free_device = tcmu_free_device,
1540 .parse_cdb = tcmu_parse_cdb,
1541 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1542 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1543 .get_device_type = sbc_get_device_type,
1544 .get_blocks = tcmu_get_blocks,
1545 .tb_dev_attrib_attrs = NULL,
1548 static int unmap_thread_fn(void *data)
1550 struct tcmu_dev *udev;
1552 uint32_t start, end, block;
1557 DEFINE_WAIT(__wait);
1559 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
1561 finish_wait(&unmap_wait, &__wait);
1563 if (kthread_should_stop())
1566 mutex_lock(&root_udev_mutex);
1567 list_for_each_entry(udev, &root_udev, node) {
1568 mutex_lock(&udev->cmdr_lock);
1570 /* Try to complete the finished commands first */
1571 tcmu_handle_completions(udev);
1573 /* Skip the udevs waiting the global pool or in idle */
1574 if (udev->waiting_global || !udev->dbi_thresh) {
1575 mutex_unlock(&udev->cmdr_lock);
1579 end = udev->dbi_max + 1;
1580 block = find_last_bit(udev->data_bitmap, end);
1581 if (block == udev->dbi_max) {
1583 * The last bit is dbi_max, so there is
1584 * no need to shrink any blocks.
1586 mutex_unlock(&udev->cmdr_lock);
1588 } else if (block == end) {
1589 /* The current udev will goto idle state */
1590 udev->dbi_thresh = start = 0;
1593 udev->dbi_thresh = start = block + 1;
1594 udev->dbi_max = block;
1597 /* Here will truncate the data area from off */
1598 off = udev->data_off + start * DATA_BLOCK_SIZE;
1599 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
1601 /* Release the block pages */
1602 for (i = start; i < end; i++) {
1603 page = radix_tree_delete(&udev->data_blocks, i);
1606 atomic_dec(&global_db_count);
1609 mutex_unlock(&udev->cmdr_lock);
1613 * Try to wake up the udevs who are waiting
1614 * for the global data pool.
1616 list_for_each_entry(udev, &root_udev, node) {
1617 if (udev->waiting_global)
1618 wake_up(&udev->wait_cmdr);
1620 mutex_unlock(&root_udev_mutex);
1626 static int __init tcmu_module_init(void)
1628 int ret, i, len = 0;
1630 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1632 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1633 sizeof(struct tcmu_cmd),
1634 __alignof__(struct tcmu_cmd),
1636 if (!tcmu_cmd_cache)
1639 tcmu_root_device = root_device_register("tcm_user");
1640 if (IS_ERR(tcmu_root_device)) {
1641 ret = PTR_ERR(tcmu_root_device);
1642 goto out_free_cache;
1645 ret = genl_register_family(&tcmu_genl_family);
1647 goto out_unreg_device;
1650 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1651 len += sizeof(struct configfs_attribute *);
1653 len += sizeof(struct configfs_attribute *) * 2;
1655 tcmu_attrs = kzalloc(len, GFP_KERNEL);
1658 goto out_unreg_genl;
1661 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1662 tcmu_attrs[i] = passthrough_attrib_attrs[i];
1664 tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
1665 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
1667 ret = transport_backend_register(&tcmu_ops);
1671 init_waitqueue_head(&unmap_wait);
1672 unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap");
1673 if (IS_ERR(unmap_thread)) {
1674 ret = PTR_ERR(unmap_thread);
1675 goto out_unreg_transport;
1680 out_unreg_transport:
1681 target_backend_unregister(&tcmu_ops);
1685 genl_unregister_family(&tcmu_genl_family);
1687 root_device_unregister(tcmu_root_device);
1689 kmem_cache_destroy(tcmu_cmd_cache);
1694 static void __exit tcmu_module_exit(void)
1696 kthread_stop(unmap_thread);
1697 target_backend_unregister(&tcmu_ops);
1699 genl_unregister_family(&tcmu_genl_family);
1700 root_device_unregister(tcmu_root_device);
1701 kmem_cache_destroy(tcmu_cmd_cache);
1704 MODULE_DESCRIPTION("TCM USER subsystem plugin");
1705 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1706 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1707 MODULE_LICENSE("GPL");
1709 module_init(tcmu_module_init);
1710 module_exit(tcmu_module_exit);