1 /*******************************************************************************
2 * Filename: target_core_alua.c
4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
6 * (c) Copyright 2009-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/configfs.h>
29 #include <linux/export.h>
30 #include <linux/file.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <asm/unaligned.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
39 #include "target_core_internal.h"
40 #include "target_core_alua.h"
41 #include "target_core_ua.h"
43 static sense_reason_t core_alua_check_transition(int state, int valid,
45 static int core_alua_set_tg_pt_secondary_state(
46 struct se_lun *lun, int explicit, int offline);
48 static char *core_alua_dump_state(int state);
50 static void __target_attach_tg_pt_gp(struct se_lun *lun,
51 struct t10_alua_tg_pt_gp *tg_pt_gp);
53 static u16 alua_lu_gps_counter;
54 static u32 alua_lu_gps_count;
56 static DEFINE_SPINLOCK(lu_gps_lock);
57 static LIST_HEAD(lu_gps_list);
59 struct t10_alua_lu_gp *default_lu_gp;
64 * See sbc3r35 section 5.23
67 target_emulate_report_referrals(struct se_cmd *cmd)
69 struct se_device *dev = cmd->se_dev;
70 struct t10_alua_lba_map *map;
71 struct t10_alua_lba_map_member *map_mem;
75 if (cmd->data_length < 4) {
76 pr_warn("REPORT REFERRALS allocation length %u too"
77 " small\n", cmd->data_length);
78 return TCM_INVALID_CDB_FIELD;
81 buf = transport_kmap_data_sg(cmd);
83 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
86 spin_lock(&dev->t10_alua.lba_map_lock);
87 if (list_empty(&dev->t10_alua.lba_map_list)) {
88 spin_unlock(&dev->t10_alua.lba_map_lock);
89 transport_kunmap_data_sg(cmd);
91 return TCM_UNSUPPORTED_SCSI_OPCODE;
94 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
96 int desc_num = off + 3;
100 if (cmd->data_length > off)
101 put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
103 if (cmd->data_length > off)
104 put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
108 list_for_each_entry(map_mem, &map->lba_map_mem_list,
110 int alua_state = map_mem->lba_map_mem_alua_state;
111 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
113 if (cmd->data_length > off)
114 buf[off] = alua_state & 0x0f;
116 if (cmd->data_length > off)
117 buf[off] = (alua_pg_id >> 8) & 0xff;
119 if (cmd->data_length > off)
120 buf[off] = (alua_pg_id & 0xff);
125 if (cmd->data_length > desc_num)
126 buf[desc_num] = pg_num;
128 spin_unlock(&dev->t10_alua.lba_map_lock);
131 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
133 put_unaligned_be16(rd_len, &buf[2]);
135 transport_kunmap_data_sg(cmd);
137 target_complete_cmd(cmd, GOOD);
142 * REPORT_TARGET_PORT_GROUPS
144 * See spc4r17 section 6.27
147 target_emulate_report_target_port_groups(struct se_cmd *cmd)
149 struct se_device *dev = cmd->se_dev;
150 struct t10_alua_tg_pt_gp *tg_pt_gp;
154 int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
157 * Skip over RESERVED area to first Target port group descriptor
158 * depending on the PARAMETER DATA FORMAT type..
165 if (cmd->data_length < off) {
166 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
167 " small for %s header\n", cmd->data_length,
168 (ext_hdr) ? "extended" : "normal");
169 return TCM_INVALID_CDB_FIELD;
171 buf = transport_kmap_data_sg(cmd);
173 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
175 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
176 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
179 * Check if the Target port group and Target port descriptor list
180 * based on tg_pt_gp_members count will fit into the response payload.
181 * Otherwise, bump rd_len to let the initiator know we have exceeded
182 * the allocation length and the response is truncated.
184 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
186 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
190 * PREF: Preferred target port bit, determine if this
191 * bit should be set for port group.
193 if (tg_pt_gp->tg_pt_gp_pref)
196 * Set the ASYMMETRIC ACCESS State
198 buf[off++] |= (atomic_read(
199 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
201 * Set supported ASYMMETRIC ACCESS State bits
203 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
207 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
208 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
210 off++; /* Skip over Reserved */
214 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
216 * Vendor Specific field
222 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
225 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
226 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
229 * Start Target Port descriptor format
231 * See spc4r17 section 6.2.7 Table 247
233 off += 2; /* Skip over Obsolete */
235 * Set RELATIVE TARGET PORT IDENTIFIER
237 buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
238 buf[off++] = (lun->lun_rtpi & 0xff);
241 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
243 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
245 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
247 put_unaligned_be32(rd_len, &buf[0]);
250 * Fill in the Extended header parameter data format if requested
255 * Set the implicit transition time (in seconds) for the application
256 * client to use as a base for it's transition timeout value.
258 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
259 * this CDB was received upon to determine this value individually
260 * for ALUA target port group.
262 spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
263 tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
265 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
266 spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
268 transport_kunmap_data_sg(cmd);
270 target_complete_cmd(cmd, GOOD);
275 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
277 * See spc4r17 section 6.35
280 target_emulate_set_target_port_groups(struct se_cmd *cmd)
282 struct se_device *dev = cmd->se_dev;
283 struct se_lun *l_lun = cmd->se_lun;
284 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
285 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
288 sense_reason_t rc = TCM_NO_SENSE;
289 u32 len = 4; /* Skip over RESERVED area in header */
290 int alua_access_state, primary = 0, valid_states;
293 if (cmd->data_length < 4) {
294 pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
295 " small\n", cmd->data_length);
296 return TCM_INVALID_PARAMETER_LIST;
299 buf = transport_kmap_data_sg(cmd);
301 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
304 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
305 * for the local tg_pt_gp.
307 spin_lock(&l_lun->lun_tg_pt_gp_lock);
308 l_tg_pt_gp = l_lun->lun_tg_pt_gp;
310 spin_unlock(&l_lun->lun_tg_pt_gp_lock);
311 pr_err("Unable to access l_lun->tg_pt_gp\n");
312 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
316 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
317 spin_unlock(&l_lun->lun_tg_pt_gp_lock);
318 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
319 " while TPGS_EXPLICIT_ALUA is disabled\n");
320 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
323 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
324 spin_unlock(&l_lun->lun_tg_pt_gp_lock);
326 ptr = &buf[4]; /* Skip over RESERVED area in header */
328 while (len < cmd->data_length) {
330 alua_access_state = (ptr[0] & 0x0f);
332 * Check the received ALUA access state, and determine if
333 * the state is a primary or secondary target port asymmetric
336 rc = core_alua_check_transition(alua_access_state,
337 valid_states, &primary);
340 * If the SET TARGET PORT GROUPS attempts to establish
341 * an invalid combination of target port asymmetric
342 * access states or attempts to establish an
343 * unsupported target port asymmetric access state,
344 * then the command shall be terminated with CHECK
345 * CONDITION status, with the sense key set to ILLEGAL
346 * REQUEST, and the additional sense code set to INVALID
347 * FIELD IN PARAMETER LIST.
353 * If the ASYMMETRIC ACCESS STATE field (see table 267)
354 * specifies a primary target port asymmetric access state,
355 * then the TARGET PORT GROUP OR TARGET PORT field specifies
356 * a primary target port group for which the primary target
357 * port asymmetric access state shall be changed. If the
358 * ASYMMETRIC ACCESS STATE field specifies a secondary target
359 * port asymmetric access state, then the TARGET PORT GROUP OR
360 * TARGET PORT field specifies the relative target port
361 * identifier (see 3.1.120) of the target port for which the
362 * secondary target port asymmetric access state shall be
366 tg_pt_id = get_unaligned_be16(ptr + 2);
368 * Locate the matching target port group ID from
369 * the global tg_pt_gp list
371 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
372 list_for_each_entry(tg_pt_gp,
373 &dev->t10_alua.tg_pt_gps_list,
375 if (!tg_pt_gp->tg_pt_gp_valid_id)
378 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
381 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
383 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
385 if (!core_alua_do_port_transition(tg_pt_gp,
387 alua_access_state, 1))
390 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
391 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
394 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
400 * the Target Port in question for the the incoming
401 * SET_TARGET_PORT_GROUPS op.
403 rtpi = get_unaligned_be16(ptr + 2);
405 * Locate the matching relative target port identifier
406 * for the struct se_device storage object.
408 spin_lock(&dev->se_port_lock);
409 list_for_each_entry(lun, &dev->dev_sep_list,
411 if (lun->lun_rtpi != rtpi)
415 spin_unlock(&dev->se_port_lock);
417 if (!core_alua_set_tg_pt_secondary_state(
421 spin_lock(&dev->se_port_lock);
424 spin_unlock(&dev->se_port_lock);
428 rc = TCM_INVALID_PARAMETER_LIST;
437 transport_kunmap_data_sg(cmd);
439 target_complete_cmd(cmd, GOOD);
443 static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
446 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
447 * The ALUA additional sense code qualifier (ASCQ) is determined
448 * by the ALUA primary or secondary access state..
450 pr_debug("[%s]: ALUA TG Port not available, "
451 "SenseKey: NOT_READY, ASC/ASCQ: "
453 cmd->se_tfo->get_fabric_name(), alua_ascq);
455 cmd->scsi_asc = 0x04;
456 cmd->scsi_ascq = alua_ascq;
459 static inline void core_alua_state_nonoptimized(
462 int nonop_delay_msecs)
465 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
466 * later to determine if processing of this cmd needs to be
467 * temporarily delayed for the Active/NonOptimized primary access state.
469 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
470 cmd->alua_nonop_delay = nonop_delay_msecs;
473 static inline int core_alua_state_lba_dependent(
475 struct t10_alua_tg_pt_gp *tg_pt_gp)
477 struct se_device *dev = cmd->se_dev;
478 u64 segment_size, segment_mult, sectors, lba;
480 /* Only need to check for cdb actually containing LBAs */
481 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
484 spin_lock(&dev->t10_alua.lba_map_lock);
485 segment_size = dev->t10_alua.lba_map_segment_size;
486 segment_mult = dev->t10_alua.lba_map_segment_multiplier;
487 sectors = cmd->data_length / dev->dev_attrib.block_size;
489 lba = cmd->t_task_lba;
490 while (lba < cmd->t_task_lba + sectors) {
491 struct t10_alua_lba_map *cur_map = NULL, *map;
492 struct t10_alua_lba_map_member *map_mem;
494 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
496 u64 start_lba, last_lba;
497 u64 first_lba = map->lba_map_first_lba;
501 start_lba = do_div(tmp, segment_size * segment_mult);
503 last_lba = first_lba + segment_size - 1;
504 if (start_lba >= first_lba &&
505 start_lba <= last_lba) {
511 last_lba = map->lba_map_last_lba;
512 if (lba >= first_lba && lba <= last_lba) {
520 spin_unlock(&dev->t10_alua.lba_map_lock);
521 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
524 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
526 if (map_mem->lba_map_mem_alua_pg_id !=
527 tg_pt_gp->tg_pt_gp_id)
529 switch(map_mem->lba_map_mem_alua_state) {
530 case ALUA_ACCESS_STATE_STANDBY:
531 spin_unlock(&dev->t10_alua.lba_map_lock);
532 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
534 case ALUA_ACCESS_STATE_UNAVAILABLE:
535 spin_unlock(&dev->t10_alua.lba_map_lock);
536 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
543 spin_unlock(&dev->t10_alua.lba_map_lock);
547 static inline int core_alua_state_standby(
552 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
553 * spc4r17 section 5.9.2.4.4
562 case RECEIVE_DIAGNOSTIC:
563 case SEND_DIAGNOSTIC:
566 case SERVICE_ACTION_IN_16:
567 switch (cdb[1] & 0x1f) {
568 case SAI_READ_CAPACITY_16:
571 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
575 switch (cdb[1] & 0x1f) {
576 case MI_REPORT_TARGET_PGS:
579 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
582 case MAINTENANCE_OUT:
584 case MO_SET_TARGET_PGS:
587 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
591 case PERSISTENT_RESERVE_IN:
592 case PERSISTENT_RESERVE_OUT:
597 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
604 static inline int core_alua_state_unavailable(
609 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
610 * spc4r17 section 5.9.2.4.5
617 switch (cdb[1] & 0x1f) {
618 case MI_REPORT_TARGET_PGS:
621 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
624 case MAINTENANCE_OUT:
626 case MO_SET_TARGET_PGS:
629 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
637 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
644 static inline int core_alua_state_transition(
649 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
650 * spc4r17 section 5.9.2.5
657 switch (cdb[1] & 0x1f) {
658 case MI_REPORT_TARGET_PGS:
661 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
669 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
677 * return 1: Is used to signal LUN not accessible, and check condition/not ready
678 * return 0: Used to signal success
679 * return -1: Used to signal failure, and invalid cdb field
682 target_alua_state_check(struct se_cmd *cmd)
684 struct se_device *dev = cmd->se_dev;
685 unsigned char *cdb = cmd->t_task_cdb;
686 struct se_lun *lun = cmd->se_lun;
687 struct t10_alua_tg_pt_gp *tg_pt_gp;
688 int out_alua_state, nonop_delay_msecs;
690 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
692 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
696 * First, check for a struct se_port specific secondary ALUA target port
697 * access state: OFFLINE
699 if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
700 pr_debug("ALUA: Got secondary offline status for local"
702 set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
703 return TCM_CHECK_CONDITION_NOT_READY;
706 if (!lun->lun_tg_pt_gp)
709 spin_lock(&lun->lun_tg_pt_gp_lock);
710 tg_pt_gp = lun->lun_tg_pt_gp;
711 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
712 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
714 // XXX: keeps using tg_pt_gp witout reference after unlock
715 spin_unlock(&lun->lun_tg_pt_gp_lock);
717 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
718 * statement so the compiler knows explicitly to check this case first.
719 * For the Optimized ALUA access state case, we want to process the
720 * incoming fabric cmd ASAP..
722 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
725 switch (out_alua_state) {
726 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
727 core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
729 case ALUA_ACCESS_STATE_STANDBY:
730 if (core_alua_state_standby(cmd, cdb))
731 return TCM_CHECK_CONDITION_NOT_READY;
733 case ALUA_ACCESS_STATE_UNAVAILABLE:
734 if (core_alua_state_unavailable(cmd, cdb))
735 return TCM_CHECK_CONDITION_NOT_READY;
737 case ALUA_ACCESS_STATE_TRANSITION:
738 if (core_alua_state_transition(cmd, cdb))
739 return TCM_CHECK_CONDITION_NOT_READY;
741 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
742 if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
743 return TCM_CHECK_CONDITION_NOT_READY;
746 * OFFLINE is a secondary ALUA target port group access state, that is
747 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
749 case ALUA_ACCESS_STATE_OFFLINE:
751 pr_err("Unknown ALUA access state: 0x%02x\n",
753 return TCM_INVALID_CDB_FIELD;
760 * Check implicit and explicit ALUA state change request.
762 static sense_reason_t
763 core_alua_check_transition(int state, int valid, int *primary)
766 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
767 * defined as primary target port asymmetric access states.
770 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
771 if (!(valid & ALUA_AO_SUP))
775 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
776 if (!(valid & ALUA_AN_SUP))
780 case ALUA_ACCESS_STATE_STANDBY:
781 if (!(valid & ALUA_S_SUP))
785 case ALUA_ACCESS_STATE_UNAVAILABLE:
786 if (!(valid & ALUA_U_SUP))
790 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
791 if (!(valid & ALUA_LBD_SUP))
795 case ALUA_ACCESS_STATE_OFFLINE:
797 * OFFLINE state is defined as a secondary target port
798 * asymmetric access state.
800 if (!(valid & ALUA_O_SUP))
804 case ALUA_ACCESS_STATE_TRANSITION:
806 * Transitioning is set internally, and
807 * cannot be selected manually.
811 pr_err("Unknown ALUA access state: 0x%02x\n", state);
812 return TCM_INVALID_PARAMETER_LIST;
818 pr_err("ALUA access state %s not supported",
819 core_alua_dump_state(state));
820 return TCM_INVALID_PARAMETER_LIST;
823 static char *core_alua_dump_state(int state)
826 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
827 return "Active/Optimized";
828 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
829 return "Active/NonOptimized";
830 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
831 return "LBA Dependent";
832 case ALUA_ACCESS_STATE_STANDBY:
834 case ALUA_ACCESS_STATE_UNAVAILABLE:
835 return "Unavailable";
836 case ALUA_ACCESS_STATE_OFFLINE:
838 case ALUA_ACCESS_STATE_TRANSITION:
839 return "Transitioning";
847 char *core_alua_dump_status(int status)
850 case ALUA_STATUS_NONE:
852 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
853 return "Altered by Explicit STPG";
854 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
855 return "Altered by Implicit ALUA";
864 * Used by fabric modules to determine when we need to delay processing
865 * for the Active/NonOptimized paths..
867 int core_alua_check_nonop_delay(
870 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
875 * The ALUA Active/NonOptimized access state delay can be disabled
876 * in via configfs with a value of zero
878 if (!cmd->alua_nonop_delay)
881 * struct se_cmd->alua_nonop_delay gets set by a target port group
882 * defined interval in core_alua_state_nonoptimized()
884 msleep_interruptible(cmd->alua_nonop_delay);
887 EXPORT_SYMBOL(core_alua_check_nonop_delay);
889 static int core_alua_write_tpg_metadata(
891 unsigned char *md_buf,
894 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
898 pr_err("filp_open(%s) for ALUA metadata failed\n", path);
901 ret = kernel_write(file, md_buf, md_buf_len, 0);
903 pr_err("Error writing ALUA metadata file: %s\n", path);
905 return (ret < 0) ? -EIO : 0;
909 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
911 static int core_alua_update_tpg_primary_metadata(
912 struct t10_alua_tg_pt_gp *tg_pt_gp)
914 unsigned char *md_buf;
915 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
916 char path[ALUA_METADATA_PATH_LEN];
919 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
921 pr_err("Unable to allocate buf for ALUA metadata\n");
925 memset(path, 0, ALUA_METADATA_PATH_LEN);
927 len = snprintf(md_buf, ALUA_MD_BUF_LEN,
929 "alua_access_state=0x%02x\n"
930 "alua_access_status=0x%02x\n",
931 tg_pt_gp->tg_pt_gp_id,
932 tg_pt_gp->tg_pt_gp_alua_pending_state,
933 tg_pt_gp->tg_pt_gp_alua_access_status);
935 snprintf(path, ALUA_METADATA_PATH_LEN,
936 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
937 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
939 rc = core_alua_write_tpg_metadata(path, md_buf, len);
944 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
946 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
947 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
948 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
949 struct se_dev_entry *se_deve;
951 struct se_lun_acl *lacl;
952 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
953 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
955 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
956 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
959 * After an implicit target port asymmetric access state
960 * change, a device server shall establish a unit attention
961 * condition for the initiator port associated with every I_T
962 * nexus with the additional sense code set to ASYMMETRIC
963 * ACCESS STATE CHANGED.
965 * After an explicit target port asymmetric access state
966 * change, a device server shall establish a unit attention
967 * condition with the additional sense code set to ASYMMETRIC
968 * ACCESS STATE CHANGED for the initiator port associated with
969 * every I_T nexus other than the I_T nexus on which the SET
970 * TARGET PORT GROUPS command
972 atomic_inc_mb(&lun->lun_active);
973 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
975 spin_lock_bh(&lun->lun_deve_lock);
976 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
977 lacl = rcu_dereference_check(se_deve->se_lun_acl,
978 lockdep_is_held(&lun->lun_deve_lock));
980 * se_deve->se_lun_acl pointer may be NULL for a
981 * entry created without explicit Node+MappedLUN ACLs
986 if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
987 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
988 (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
989 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
990 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
991 (tg_pt_gp->tg_pt_gp_alua_lun == lun))
994 core_scsi3_ua_allocate(lacl->se_lun_nacl,
995 se_deve->mapped_lun, 0x2A,
996 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
998 spin_unlock_bh(&lun->lun_deve_lock);
1000 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1001 atomic_dec_mb(&lun->lun_active);
1003 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1005 * Update the ALUA metadata buf that has been allocated in
1006 * core_alua_do_port_transition(), this metadata will be written
1009 * Note that there is the case where we do not want to update the
1010 * metadata when the saved metadata is being parsed in userspace
1011 * when setting the existing port access state and access status.
1013 * Also note that the failure to write out the ALUA metadata to
1014 * struct file does NOT affect the actual ALUA transition.
1016 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1017 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1018 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1019 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1022 * Set the current primary ALUA access state to the requested new state
1024 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1025 tg_pt_gp->tg_pt_gp_alua_pending_state);
1027 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1028 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1029 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1030 tg_pt_gp->tg_pt_gp_id,
1031 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1032 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1033 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1034 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1035 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1037 if (tg_pt_gp->tg_pt_gp_transition_complete)
1038 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1041 static int core_alua_do_transition_tg_pt(
1042 struct t10_alua_tg_pt_gp *tg_pt_gp,
1046 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1047 DECLARE_COMPLETION_ONSTACK(wait);
1049 /* Nothing to be done here */
1050 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1053 if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1057 * Flush any pending transitions
1059 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1060 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1061 ALUA_ACCESS_STATE_TRANSITION) {
1063 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1064 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1065 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1066 wait_for_completion(&wait);
1067 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1072 * Save the old primary ALUA access state, and set the current state
1073 * to ALUA_ACCESS_STATE_TRANSITION.
1075 tg_pt_gp->tg_pt_gp_alua_previous_state =
1076 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1077 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1079 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1080 ALUA_ACCESS_STATE_TRANSITION);
1081 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1082 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1083 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1086 * Check for the optional ALUA primary state transition delay
1088 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1089 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1092 * Take a reference for workqueue item
1094 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1095 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1096 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1098 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1099 unsigned long transition_tmo;
1101 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1102 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1103 &tg_pt_gp->tg_pt_gp_transition_work,
1106 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1107 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1108 &tg_pt_gp->tg_pt_gp_transition_work, 0);
1109 wait_for_completion(&wait);
1110 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1116 int core_alua_do_port_transition(
1117 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1118 struct se_device *l_dev,
1119 struct se_lun *l_lun,
1120 struct se_node_acl *l_nacl,
1124 struct se_device *dev;
1125 struct t10_alua_lu_gp *lu_gp;
1126 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1127 struct t10_alua_tg_pt_gp *tg_pt_gp;
1128 int primary, valid_states, rc = 0;
1130 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1131 if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
1134 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1135 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1136 lu_gp = local_lu_gp_mem->lu_gp;
1137 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1138 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1140 * For storage objects that are members of the 'default_lu_gp',
1141 * we only do transition on the passed *l_tp_pt_gp, and not
1142 * on all of the matching target port groups IDs in default_lu_gp.
1144 if (!lu_gp->lu_gp_id) {
1146 * core_alua_do_transition_tg_pt() will always return
1149 l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1150 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1151 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1152 new_state, explicit);
1153 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1157 * For all other LU groups aside from 'default_lu_gp', walk all of
1158 * the associated storage objects looking for a matching target port
1159 * group ID from the local target port group.
1161 spin_lock(&lu_gp->lu_gp_lock);
1162 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1165 dev = lu_gp_mem->lu_gp_mem_dev;
1166 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1167 spin_unlock(&lu_gp->lu_gp_lock);
1169 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1170 list_for_each_entry(tg_pt_gp,
1171 &dev->t10_alua.tg_pt_gps_list,
1174 if (!tg_pt_gp->tg_pt_gp_valid_id)
1177 * If the target behavior port asymmetric access state
1178 * is changed for any target port group accessible via
1179 * a logical unit within a LU group, the target port
1180 * behavior group asymmetric access states for the same
1181 * target port group accessible via other logical units
1182 * in that LU group will also change.
1184 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1187 if (l_tg_pt_gp == tg_pt_gp) {
1188 tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1189 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1191 tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1192 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1194 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1195 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1197 * core_alua_do_transition_tg_pt() will always return
1200 rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1201 new_state, explicit);
1203 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1204 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1208 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1210 spin_lock(&lu_gp->lu_gp_lock);
1211 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1213 spin_unlock(&lu_gp->lu_gp_lock);
1216 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1217 " Group IDs: %hu %s transition to primary state: %s\n",
1218 config_item_name(&lu_gp->lu_gp_group.cg_item),
1219 l_tg_pt_gp->tg_pt_gp_id,
1220 (explicit) ? "explicit" : "implicit",
1221 core_alua_dump_state(new_state));
1224 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1228 static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1230 struct se_portal_group *se_tpg = lun->lun_tpg;
1231 unsigned char *md_buf;
1232 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1235 mutex_lock(&lun->lun_tg_pt_md_mutex);
1237 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1239 pr_err("Unable to allocate buf for ALUA metadata\n");
1244 memset(path, 0, ALUA_METADATA_PATH_LEN);
1245 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1247 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1248 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1250 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1251 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1252 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1254 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1255 "alua_tg_pt_status=0x%02x\n",
1256 atomic_read(&lun->lun_tg_pt_secondary_offline),
1257 lun->lun_tg_pt_secondary_stat);
1259 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1260 se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1263 rc = core_alua_write_tpg_metadata(path, md_buf, len);
1267 mutex_unlock(&lun->lun_tg_pt_md_mutex);
1271 static int core_alua_set_tg_pt_secondary_state(
1276 struct t10_alua_tg_pt_gp *tg_pt_gp;
1277 int trans_delay_msecs;
1279 spin_lock(&lun->lun_tg_pt_gp_lock);
1280 tg_pt_gp = lun->lun_tg_pt_gp;
1282 spin_unlock(&lun->lun_tg_pt_gp_lock);
1283 pr_err("Unable to complete secondary state"
1287 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1289 * Set the secondary ALUA target port access state to OFFLINE
1290 * or release the previously secondary state for struct se_lun
1293 atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1295 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1297 lun->lun_tg_pt_secondary_stat = (explicit) ?
1298 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1299 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1301 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1302 " to secondary access state: %s\n", (explicit) ? "explicit" :
1303 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1304 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1306 spin_unlock(&lun->lun_tg_pt_gp_lock);
1308 * Do the optional transition delay after we set the secondary
1309 * ALUA access state.
1311 if (trans_delay_msecs != 0)
1312 msleep_interruptible(trans_delay_msecs);
1314 * See if we need to update the ALUA fabric port metadata for
1315 * secondary state and status
1317 if (lun->lun_tg_pt_secondary_write_md)
1318 core_alua_update_tpg_secondary_metadata(lun);
1323 struct t10_alua_lba_map *
1324 core_alua_allocate_lba_map(struct list_head *list,
1325 u64 first_lba, u64 last_lba)
1327 struct t10_alua_lba_map *lba_map;
1329 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1331 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1332 return ERR_PTR(-ENOMEM);
1334 INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1335 lba_map->lba_map_first_lba = first_lba;
1336 lba_map->lba_map_last_lba = last_lba;
1338 list_add_tail(&lba_map->lba_map_list, list);
1343 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1344 int pg_id, int state)
1346 struct t10_alua_lba_map_member *lba_map_mem;
1348 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1350 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1351 pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1356 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1358 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1361 lba_map_mem->lba_map_mem_alua_state = state;
1362 lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1364 list_add_tail(&lba_map_mem->lba_map_mem_list,
1365 &lba_map->lba_map_mem_list);
1370 core_alua_free_lba_map(struct list_head *lba_list)
1372 struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1373 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1375 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1377 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1378 &lba_map->lba_map_mem_list,
1380 list_del(&lba_map_mem->lba_map_mem_list);
1381 kmem_cache_free(t10_alua_lba_map_mem_cache,
1384 list_del(&lba_map->lba_map_list);
1385 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1390 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1391 int segment_size, int segment_mult)
1393 struct list_head old_lba_map_list;
1394 struct t10_alua_tg_pt_gp *tg_pt_gp;
1395 int activate = 0, supported;
1397 INIT_LIST_HEAD(&old_lba_map_list);
1398 spin_lock(&dev->t10_alua.lba_map_lock);
1399 dev->t10_alua.lba_map_segment_size = segment_size;
1400 dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1401 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1403 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1406 spin_unlock(&dev->t10_alua.lba_map_lock);
1407 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1408 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1411 if (!tg_pt_gp->tg_pt_gp_valid_id)
1413 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1415 supported |= ALUA_LBD_SUP;
1417 supported &= ~ALUA_LBD_SUP;
1418 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1420 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1421 core_alua_free_lba_map(&old_lba_map_list);
1424 struct t10_alua_lu_gp *
1425 core_alua_allocate_lu_gp(const char *name, int def_group)
1427 struct t10_alua_lu_gp *lu_gp;
1429 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1431 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1432 return ERR_PTR(-ENOMEM);
1434 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1435 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1436 spin_lock_init(&lu_gp->lu_gp_lock);
1437 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1440 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1441 lu_gp->lu_gp_valid_id = 1;
1442 alua_lu_gps_count++;
1448 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1450 struct t10_alua_lu_gp *lu_gp_tmp;
1453 * The lu_gp->lu_gp_id may only be set once..
1455 if (lu_gp->lu_gp_valid_id) {
1456 pr_warn("ALUA LU Group already has a valid ID,"
1457 " ignoring request\n");
1461 spin_lock(&lu_gps_lock);
1462 if (alua_lu_gps_count == 0x0000ffff) {
1463 pr_err("Maximum ALUA alua_lu_gps_count:"
1464 " 0x0000ffff reached\n");
1465 spin_unlock(&lu_gps_lock);
1466 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1470 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1471 alua_lu_gps_counter++;
1473 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1474 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1478 pr_warn("ALUA Logical Unit Group ID: %hu"
1479 " already exists, ignoring request\n",
1481 spin_unlock(&lu_gps_lock);
1486 lu_gp->lu_gp_id = lu_gp_id_tmp;
1487 lu_gp->lu_gp_valid_id = 1;
1488 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1489 alua_lu_gps_count++;
1490 spin_unlock(&lu_gps_lock);
1495 static struct t10_alua_lu_gp_member *
1496 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1498 struct t10_alua_lu_gp_member *lu_gp_mem;
1500 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1502 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1503 return ERR_PTR(-ENOMEM);
1505 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1506 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1507 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1509 lu_gp_mem->lu_gp_mem_dev = dev;
1510 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1515 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1517 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1519 * Once we have reached this point, config_item_put() has
1520 * already been called from target_core_alua_drop_lu_gp().
1522 * Here, we remove the *lu_gp from the global list so that
1523 * no associations can be made while we are releasing
1524 * struct t10_alua_lu_gp.
1526 spin_lock(&lu_gps_lock);
1527 list_del(&lu_gp->lu_gp_node);
1528 alua_lu_gps_count--;
1529 spin_unlock(&lu_gps_lock);
1531 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1532 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1533 * released with core_alua_put_lu_gp_from_name()
1535 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1538 * Release reference to struct t10_alua_lu_gp * from all associated
1541 spin_lock(&lu_gp->lu_gp_lock);
1542 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1543 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1544 if (lu_gp_mem->lu_gp_assoc) {
1545 list_del(&lu_gp_mem->lu_gp_mem_list);
1546 lu_gp->lu_gp_members--;
1547 lu_gp_mem->lu_gp_assoc = 0;
1549 spin_unlock(&lu_gp->lu_gp_lock);
1552 * lu_gp_mem is associated with a single
1553 * struct se_device->dev_alua_lu_gp_mem, and is released when
1554 * struct se_device is released via core_alua_free_lu_gp_mem().
1556 * If the passed lu_gp does NOT match the default_lu_gp, assume
1557 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1559 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1560 if (lu_gp != default_lu_gp)
1561 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1564 lu_gp_mem->lu_gp = NULL;
1565 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1567 spin_lock(&lu_gp->lu_gp_lock);
1569 spin_unlock(&lu_gp->lu_gp_lock);
1571 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1574 void core_alua_free_lu_gp_mem(struct se_device *dev)
1576 struct t10_alua_lu_gp *lu_gp;
1577 struct t10_alua_lu_gp_member *lu_gp_mem;
1579 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1583 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1586 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1587 lu_gp = lu_gp_mem->lu_gp;
1589 spin_lock(&lu_gp->lu_gp_lock);
1590 if (lu_gp_mem->lu_gp_assoc) {
1591 list_del(&lu_gp_mem->lu_gp_mem_list);
1592 lu_gp->lu_gp_members--;
1593 lu_gp_mem->lu_gp_assoc = 0;
1595 spin_unlock(&lu_gp->lu_gp_lock);
1596 lu_gp_mem->lu_gp = NULL;
1598 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1600 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1603 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1605 struct t10_alua_lu_gp *lu_gp;
1606 struct config_item *ci;
1608 spin_lock(&lu_gps_lock);
1609 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1610 if (!lu_gp->lu_gp_valid_id)
1612 ci = &lu_gp->lu_gp_group.cg_item;
1613 if (!strcmp(config_item_name(ci), name)) {
1614 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1615 spin_unlock(&lu_gps_lock);
1619 spin_unlock(&lu_gps_lock);
1624 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1626 spin_lock(&lu_gps_lock);
1627 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1628 spin_unlock(&lu_gps_lock);
1632 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1634 void __core_alua_attach_lu_gp_mem(
1635 struct t10_alua_lu_gp_member *lu_gp_mem,
1636 struct t10_alua_lu_gp *lu_gp)
1638 spin_lock(&lu_gp->lu_gp_lock);
1639 lu_gp_mem->lu_gp = lu_gp;
1640 lu_gp_mem->lu_gp_assoc = 1;
1641 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1642 lu_gp->lu_gp_members++;
1643 spin_unlock(&lu_gp->lu_gp_lock);
1647 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1649 void __core_alua_drop_lu_gp_mem(
1650 struct t10_alua_lu_gp_member *lu_gp_mem,
1651 struct t10_alua_lu_gp *lu_gp)
1653 spin_lock(&lu_gp->lu_gp_lock);
1654 list_del(&lu_gp_mem->lu_gp_mem_list);
1655 lu_gp_mem->lu_gp = NULL;
1656 lu_gp_mem->lu_gp_assoc = 0;
1657 lu_gp->lu_gp_members--;
1658 spin_unlock(&lu_gp->lu_gp_lock);
1661 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1662 const char *name, int def_group)
1664 struct t10_alua_tg_pt_gp *tg_pt_gp;
1666 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1668 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1671 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1672 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1673 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1674 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1675 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1676 INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1677 core_alua_do_transition_tg_pt_work);
1678 tg_pt_gp->tg_pt_gp_dev = dev;
1679 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1680 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1682 * Enable both explicit and implicit ALUA support by default
1684 tg_pt_gp->tg_pt_gp_alua_access_type =
1685 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1687 * Set the default Active/NonOptimized Delay in milliseconds
1689 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1690 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1691 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1694 * Enable all supported states
1696 tg_pt_gp->tg_pt_gp_alua_supported_states =
1697 ALUA_T_SUP | ALUA_O_SUP |
1698 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1701 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1702 tg_pt_gp->tg_pt_gp_id =
1703 dev->t10_alua.alua_tg_pt_gps_counter++;
1704 tg_pt_gp->tg_pt_gp_valid_id = 1;
1705 dev->t10_alua.alua_tg_pt_gps_count++;
1706 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1707 &dev->t10_alua.tg_pt_gps_list);
1708 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1714 int core_alua_set_tg_pt_gp_id(
1715 struct t10_alua_tg_pt_gp *tg_pt_gp,
1718 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1719 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1720 u16 tg_pt_gp_id_tmp;
1723 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1725 if (tg_pt_gp->tg_pt_gp_valid_id) {
1726 pr_warn("ALUA TG PT Group already has a valid ID,"
1727 " ignoring request\n");
1731 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1732 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1733 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1734 " 0x0000ffff reached\n");
1735 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1736 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1740 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1741 dev->t10_alua.alua_tg_pt_gps_counter++;
1743 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1745 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1749 pr_err("ALUA Target Port Group ID: %hu already"
1750 " exists, ignoring request\n", tg_pt_gp_id);
1751 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1756 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1757 tg_pt_gp->tg_pt_gp_valid_id = 1;
1758 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1759 &dev->t10_alua.tg_pt_gps_list);
1760 dev->t10_alua.alua_tg_pt_gps_count++;
1761 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1766 void core_alua_free_tg_pt_gp(
1767 struct t10_alua_tg_pt_gp *tg_pt_gp)
1769 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1770 struct se_lun *lun, *next;
1773 * Once we have reached this point, config_item_put() has already
1774 * been called from target_core_alua_drop_tg_pt_gp().
1776 * Here we remove *tg_pt_gp from the global list so that
1777 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1778 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1780 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1781 list_del(&tg_pt_gp->tg_pt_gp_list);
1782 dev->t10_alua.alua_tg_pt_gps_counter--;
1783 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1785 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1788 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1789 * core_alua_get_tg_pt_gp_by_name() in
1790 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1791 * to be released with core_alua_put_tg_pt_gp_from_name().
1793 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1797 * Release reference to struct t10_alua_tg_pt_gp from all associated
1800 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1801 list_for_each_entry_safe(lun, next,
1802 &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1803 list_del_init(&lun->lun_tg_pt_gp_link);
1804 tg_pt_gp->tg_pt_gp_members--;
1806 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1808 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1809 * assume we want to re-associate a given tg_pt_gp_mem with
1812 spin_lock(&lun->lun_tg_pt_gp_lock);
1813 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1814 __target_attach_tg_pt_gp(lun,
1815 dev->t10_alua.default_tg_pt_gp);
1817 lun->lun_tg_pt_gp = NULL;
1818 spin_unlock(&lun->lun_tg_pt_gp_lock);
1820 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1822 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1824 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1827 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1828 struct se_device *dev, const char *name)
1830 struct t10_alua_tg_pt_gp *tg_pt_gp;
1831 struct config_item *ci;
1833 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1834 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1836 if (!tg_pt_gp->tg_pt_gp_valid_id)
1838 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1839 if (!strcmp(config_item_name(ci), name)) {
1840 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1841 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1845 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1850 static void core_alua_put_tg_pt_gp_from_name(
1851 struct t10_alua_tg_pt_gp *tg_pt_gp)
1853 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1855 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1856 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1857 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1860 static void __target_attach_tg_pt_gp(struct se_lun *lun,
1861 struct t10_alua_tg_pt_gp *tg_pt_gp)
1863 assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1865 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1866 lun->lun_tg_pt_gp = tg_pt_gp;
1867 list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1868 tg_pt_gp->tg_pt_gp_members++;
1869 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1872 void target_attach_tg_pt_gp(struct se_lun *lun,
1873 struct t10_alua_tg_pt_gp *tg_pt_gp)
1875 spin_lock(&lun->lun_tg_pt_gp_lock);
1876 __target_attach_tg_pt_gp(lun, tg_pt_gp);
1877 spin_unlock(&lun->lun_tg_pt_gp_lock);
1880 static void __target_detach_tg_pt_gp(struct se_lun *lun,
1881 struct t10_alua_tg_pt_gp *tg_pt_gp)
1883 assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1885 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1886 list_del_init(&lun->lun_tg_pt_gp_link);
1887 tg_pt_gp->tg_pt_gp_members--;
1888 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1890 lun->lun_tg_pt_gp = NULL;
1893 void target_detach_tg_pt_gp(struct se_lun *lun)
1895 struct t10_alua_tg_pt_gp *tg_pt_gp;
1897 spin_lock(&lun->lun_tg_pt_gp_lock);
1898 tg_pt_gp = lun->lun_tg_pt_gp;
1900 __target_detach_tg_pt_gp(lun, tg_pt_gp);
1901 spin_unlock(&lun->lun_tg_pt_gp_lock);
1904 ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1906 struct config_item *tg_pt_ci;
1907 struct t10_alua_tg_pt_gp *tg_pt_gp;
1910 spin_lock(&lun->lun_tg_pt_gp_lock);
1911 tg_pt_gp = lun->lun_tg_pt_gp;
1913 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1914 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1915 " %hu\nTG Port Primary Access State: %s\nTG Port "
1916 "Primary Access Status: %s\nTG Port Secondary Access"
1917 " State: %s\nTG Port Secondary Access Status: %s\n",
1918 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1919 core_alua_dump_state(atomic_read(
1920 &tg_pt_gp->tg_pt_gp_alua_access_state)),
1921 core_alua_dump_status(
1922 tg_pt_gp->tg_pt_gp_alua_access_status),
1923 atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1925 core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1927 spin_unlock(&lun->lun_tg_pt_gp_lock);
1932 ssize_t core_alua_store_tg_pt_gp_info(
1937 struct se_portal_group *tpg = lun->lun_tpg;
1938 struct se_device *dev = lun->lun_se_dev;
1939 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1940 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1943 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
1944 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1947 if (count > TG_PT_GROUP_NAME_BUF) {
1948 pr_err("ALUA Target Port Group alias too large!\n");
1951 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1952 memcpy(buf, page, count);
1954 * Any ALUA target port group alias besides "NULL" means we will be
1955 * making a new group association.
1957 if (strcmp(strstrip(buf), "NULL")) {
1959 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1960 * struct t10_alua_tg_pt_gp. This reference is released with
1961 * core_alua_put_tg_pt_gp_from_name() below.
1963 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1969 spin_lock(&lun->lun_tg_pt_gp_lock);
1970 tg_pt_gp = lun->lun_tg_pt_gp;
1973 * Clearing an existing tg_pt_gp association, and replacing
1974 * with the default_tg_pt_gp.
1976 if (!tg_pt_gp_new) {
1977 pr_debug("Target_Core_ConfigFS: Moving"
1978 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1979 " alua/%s, ID: %hu back to"
1980 " default_tg_pt_gp\n",
1981 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1982 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1983 config_item_name(&lun->lun_group.cg_item),
1985 &tg_pt_gp->tg_pt_gp_group.cg_item),
1986 tg_pt_gp->tg_pt_gp_id);
1988 __target_detach_tg_pt_gp(lun, tg_pt_gp);
1989 __target_attach_tg_pt_gp(lun,
1990 dev->t10_alua.default_tg_pt_gp);
1991 spin_unlock(&lun->lun_tg_pt_gp_lock);
1995 __target_detach_tg_pt_gp(lun, tg_pt_gp);
1999 __target_attach_tg_pt_gp(lun, tg_pt_gp_new);
2000 spin_unlock(&lun->lun_tg_pt_gp_lock);
2001 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
2002 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
2003 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2004 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2005 config_item_name(&lun->lun_group.cg_item),
2006 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
2007 tg_pt_gp_new->tg_pt_gp_id);
2009 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2013 ssize_t core_alua_show_access_type(
2014 struct t10_alua_tg_pt_gp *tg_pt_gp,
2017 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2018 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2019 return sprintf(page, "Implicit and Explicit\n");
2020 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2021 return sprintf(page, "Implicit\n");
2022 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2023 return sprintf(page, "Explicit\n");
2025 return sprintf(page, "None\n");
2028 ssize_t core_alua_store_access_type(
2029 struct t10_alua_tg_pt_gp *tg_pt_gp,
2036 ret = kstrtoul(page, 0, &tmp);
2038 pr_err("Unable to extract alua_access_type\n");
2041 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2042 pr_err("Illegal value for alua_access_type:"
2047 tg_pt_gp->tg_pt_gp_alua_access_type =
2048 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2050 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2052 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2054 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2059 ssize_t core_alua_show_nonop_delay_msecs(
2060 struct t10_alua_tg_pt_gp *tg_pt_gp,
2063 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2066 ssize_t core_alua_store_nonop_delay_msecs(
2067 struct t10_alua_tg_pt_gp *tg_pt_gp,
2074 ret = kstrtoul(page, 0, &tmp);
2076 pr_err("Unable to extract nonop_delay_msecs\n");
2079 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2080 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2081 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2082 ALUA_MAX_NONOP_DELAY_MSECS);
2085 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2090 ssize_t core_alua_show_trans_delay_msecs(
2091 struct t10_alua_tg_pt_gp *tg_pt_gp,
2094 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2097 ssize_t core_alua_store_trans_delay_msecs(
2098 struct t10_alua_tg_pt_gp *tg_pt_gp,
2105 ret = kstrtoul(page, 0, &tmp);
2107 pr_err("Unable to extract trans_delay_msecs\n");
2110 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2111 pr_err("Passed trans_delay_msecs: %lu, exceeds"
2112 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2113 ALUA_MAX_TRANS_DELAY_MSECS);
2116 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2121 ssize_t core_alua_show_implicit_trans_secs(
2122 struct t10_alua_tg_pt_gp *tg_pt_gp,
2125 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2128 ssize_t core_alua_store_implicit_trans_secs(
2129 struct t10_alua_tg_pt_gp *tg_pt_gp,
2136 ret = kstrtoul(page, 0, &tmp);
2138 pr_err("Unable to extract implicit_trans_secs\n");
2141 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2142 pr_err("Passed implicit_trans_secs: %lu, exceeds"
2143 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2144 ALUA_MAX_IMPLICIT_TRANS_SECS);
2147 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2152 ssize_t core_alua_show_preferred_bit(
2153 struct t10_alua_tg_pt_gp *tg_pt_gp,
2156 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2159 ssize_t core_alua_store_preferred_bit(
2160 struct t10_alua_tg_pt_gp *tg_pt_gp,
2167 ret = kstrtoul(page, 0, &tmp);
2169 pr_err("Unable to extract preferred ALUA value\n");
2172 if ((tmp != 0) && (tmp != 1)) {
2173 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2176 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2181 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2183 return sprintf(page, "%d\n",
2184 atomic_read(&lun->lun_tg_pt_secondary_offline));
2187 ssize_t core_alua_store_offline_bit(
2192 struct se_device *dev = lun->lun_se_dev;
2196 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
2197 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2200 ret = kstrtoul(page, 0, &tmp);
2202 pr_err("Unable to extract alua_tg_pt_offline value\n");
2205 if ((tmp != 0) && (tmp != 1)) {
2206 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2211 ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2218 ssize_t core_alua_show_secondary_status(
2222 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2225 ssize_t core_alua_store_secondary_status(
2233 ret = kstrtoul(page, 0, &tmp);
2235 pr_err("Unable to extract alua_tg_pt_status\n");
2238 if ((tmp != ALUA_STATUS_NONE) &&
2239 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2240 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2241 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2245 lun->lun_tg_pt_secondary_stat = (int)tmp;
2250 ssize_t core_alua_show_secondary_write_metadata(
2254 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2257 ssize_t core_alua_store_secondary_write_metadata(
2265 ret = kstrtoul(page, 0, &tmp);
2267 pr_err("Unable to extract alua_tg_pt_write_md\n");
2270 if ((tmp != 0) && (tmp != 1)) {
2271 pr_err("Illegal value for alua_tg_pt_write_md:"
2275 lun->lun_tg_pt_secondary_write_md = (int)tmp;
2280 int core_setup_alua(struct se_device *dev)
2282 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
2283 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2284 struct t10_alua_lu_gp_member *lu_gp_mem;
2287 * Associate this struct se_device with the default ALUA
2290 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2291 if (IS_ERR(lu_gp_mem))
2292 return PTR_ERR(lu_gp_mem);
2294 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2295 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2297 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2299 pr_debug("%s: Adding to default ALUA LU Group:"
2300 " core/alua/lu_gps/default_lu_gp\n",
2301 dev->transport->name);