]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/target/target_core_tpg.c
target: Convert se_portal_group->tpg_lun_list[] to RCU hlist
[karo-tx-linux.git] / drivers / target / target_core_tpg.c
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
37
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_fabric.h>
41
42 #include "target_core_internal.h"
43 #include "target_core_pr.h"
44
45 extern struct se_device *g_lun0_dev;
46
47 static DEFINE_SPINLOCK(tpg_lock);
48 static LIST_HEAD(tpg_list);
49
50 /*      __core_tpg_get_initiator_node_acl():
51  *
52  *      spin_lock_bh(&tpg->acl_node_lock); must be held when calling
53  */
54 struct se_node_acl *__core_tpg_get_initiator_node_acl(
55         struct se_portal_group *tpg,
56         const char *initiatorname)
57 {
58         struct se_node_acl *acl;
59
60         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
61                 if (!strcmp(acl->initiatorname, initiatorname))
62                         return acl;
63         }
64
65         return NULL;
66 }
67
68 /*      core_tpg_get_initiator_node_acl():
69  *
70  *
71  */
72 struct se_node_acl *core_tpg_get_initiator_node_acl(
73         struct se_portal_group *tpg,
74         unsigned char *initiatorname)
75 {
76         struct se_node_acl *acl;
77
78         spin_lock_irq(&tpg->acl_node_lock);
79         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
80         spin_unlock_irq(&tpg->acl_node_lock);
81
82         return acl;
83 }
84 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
85
86 /*      core_tpg_add_node_to_devs():
87  *
88  *
89  */
90 void core_tpg_add_node_to_devs(
91         struct se_node_acl *acl,
92         struct se_portal_group *tpg)
93 {
94         u32 lun_access = 0;
95         struct se_lun *lun;
96         struct se_device *dev;
97
98         mutex_lock(&tpg->tpg_lun_mutex);
99         hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
100                 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
101                         continue;
102
103                 dev = lun->lun_se_dev;
104                 /*
105                  * By default in LIO-Target $FABRIC_MOD,
106                  * demo_mode_write_protect is ON, or READ_ONLY;
107                  */
108                 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
109                         lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
110                 } else {
111                         /*
112                          * Allow only optical drives to issue R/W in default RO
113                          * demo mode.
114                          */
115                         if (dev->transport->get_device_type(dev) == TYPE_DISK)
116                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
117                         else
118                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
119                 }
120
121                 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
122                         " access for LUN in Demo Mode\n",
123                         tpg->se_tpg_tfo->get_fabric_name(),
124                         tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
125                         (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
126                         "READ-WRITE" : "READ-ONLY");
127
128                 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
129                                                  lun_access, acl, tpg);
130                 /*
131                  * Check to see if there are any existing persistent reservation
132                  * APTPL pre-registrations that need to be enabled for this dynamic
133                  * LUN ACL now..
134                  */
135                 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
136                                                     lun->unpacked_lun);
137         }
138         mutex_unlock(&tpg->tpg_lun_mutex);
139 }
140
141 /*      core_set_queue_depth_for_node():
142  *
143  *
144  */
145 static int core_set_queue_depth_for_node(
146         struct se_portal_group *tpg,
147         struct se_node_acl *acl)
148 {
149         if (!acl->queue_depth) {
150                 pr_err("Queue depth for %s Initiator Node: %s is 0,"
151                         "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
152                         acl->initiatorname);
153                 acl->queue_depth = 1;
154         }
155
156         return 0;
157 }
158
159 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
160                 const unsigned char *initiatorname)
161 {
162         struct se_node_acl *acl;
163
164         acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
165                         GFP_KERNEL);
166         if (!acl)
167                 return NULL;
168
169         INIT_LIST_HEAD(&acl->acl_list);
170         INIT_LIST_HEAD(&acl->acl_sess_list);
171         INIT_HLIST_HEAD(&acl->lun_entry_hlist);
172         kref_init(&acl->acl_kref);
173         init_completion(&acl->acl_free_comp);
174         spin_lock_init(&acl->nacl_sess_lock);
175         mutex_init(&acl->lun_entry_mutex);
176         atomic_set(&acl->acl_pr_ref_count, 0);
177         if (tpg->se_tpg_tfo->tpg_get_default_depth)
178                 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
179         else
180                 acl->queue_depth = 1;
181         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
182         acl->se_tpg = tpg;
183         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
184
185         tpg->se_tpg_tfo->set_default_node_attributes(acl);
186
187         if (core_set_queue_depth_for_node(tpg, acl) < 0)
188                 goto out_free_acl;
189
190         return acl;
191
192 out_free_acl:
193         kfree(acl);
194         return NULL;
195 }
196
197 static void target_add_node_acl(struct se_node_acl *acl)
198 {
199         struct se_portal_group *tpg = acl->se_tpg;
200
201         spin_lock_irq(&tpg->acl_node_lock);
202         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
203         tpg->num_node_acls++;
204         spin_unlock_irq(&tpg->acl_node_lock);
205
206         pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
207                 " Initiator Node: %s\n",
208                 tpg->se_tpg_tfo->get_fabric_name(),
209                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
210                 acl->dynamic_node_acl ? "DYNAMIC" : "",
211                 acl->queue_depth,
212                 tpg->se_tpg_tfo->get_fabric_name(),
213                 acl->initiatorname);
214 }
215
216 struct se_node_acl *core_tpg_check_initiator_node_acl(
217         struct se_portal_group *tpg,
218         unsigned char *initiatorname)
219 {
220         struct se_node_acl *acl;
221
222         acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
223         if (acl)
224                 return acl;
225
226         if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
227                 return NULL;
228
229         acl = target_alloc_node_acl(tpg, initiatorname);
230         if (!acl)
231                 return NULL;
232         acl->dynamic_node_acl = 1;
233
234         /*
235          * Here we only create demo-mode MappedLUNs from the active
236          * TPG LUNs if the fabric is not explicitly asking for
237          * tpg_check_demo_mode_login_only() == 1.
238          */
239         if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
240             (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
241                 core_tpg_add_node_to_devs(acl, tpg);
242
243         target_add_node_acl(acl);
244         return acl;
245 }
246 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
247
248 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
249 {
250         while (atomic_read(&nacl->acl_pr_ref_count) != 0)
251                 cpu_relax();
252 }
253
254 struct se_node_acl *core_tpg_add_initiator_node_acl(
255         struct se_portal_group *tpg,
256         const char *initiatorname)
257 {
258         struct se_node_acl *acl;
259
260         spin_lock_irq(&tpg->acl_node_lock);
261         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
262         if (acl) {
263                 if (acl->dynamic_node_acl) {
264                         acl->dynamic_node_acl = 0;
265                         pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
266                                 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
267                                 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
268                         spin_unlock_irq(&tpg->acl_node_lock);
269                         return acl;
270                 }
271
272                 pr_err("ACL entry for %s Initiator"
273                         " Node %s already exists for TPG %u, ignoring"
274                         " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
275                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
276                 spin_unlock_irq(&tpg->acl_node_lock);
277                 return ERR_PTR(-EEXIST);
278         }
279         spin_unlock_irq(&tpg->acl_node_lock);
280
281         acl = target_alloc_node_acl(tpg, initiatorname);
282         if (!acl)
283                 return ERR_PTR(-ENOMEM);
284
285         target_add_node_acl(acl);
286         return acl;
287 }
288
289 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
290 {
291         struct se_portal_group *tpg = acl->se_tpg;
292         LIST_HEAD(sess_list);
293         struct se_session *sess, *sess_tmp;
294         unsigned long flags;
295         int rc;
296
297         spin_lock_irq(&tpg->acl_node_lock);
298         if (acl->dynamic_node_acl) {
299                 acl->dynamic_node_acl = 0;
300         }
301         list_del(&acl->acl_list);
302         tpg->num_node_acls--;
303         spin_unlock_irq(&tpg->acl_node_lock);
304
305         spin_lock_irqsave(&acl->nacl_sess_lock, flags);
306         acl->acl_stop = 1;
307
308         list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
309                                 sess_acl_list) {
310                 if (sess->sess_tearing_down != 0)
311                         continue;
312
313                 target_get_session(sess);
314                 list_move(&sess->sess_acl_list, &sess_list);
315         }
316         spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
317
318         list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
319                 list_del(&sess->sess_acl_list);
320
321                 rc = tpg->se_tpg_tfo->shutdown_session(sess);
322                 target_put_session(sess);
323                 if (!rc)
324                         continue;
325                 target_put_session(sess);
326         }
327         target_put_nacl(acl);
328         /*
329          * Wait for last target_put_nacl() to complete in target_complete_nacl()
330          * for active fabric session transport_deregister_session() callbacks.
331          */
332         wait_for_completion(&acl->acl_free_comp);
333
334         core_tpg_wait_for_nacl_pr_ref(acl);
335         core_free_device_list_for_node(acl, tpg);
336
337         pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
338                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
339                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
340                 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
341
342         kfree(acl);
343 }
344
345 /*      core_tpg_set_initiator_node_queue_depth():
346  *
347  *
348  */
349 int core_tpg_set_initiator_node_queue_depth(
350         struct se_portal_group *tpg,
351         unsigned char *initiatorname,
352         u32 queue_depth,
353         int force)
354 {
355         struct se_session *sess, *init_sess = NULL;
356         struct se_node_acl *acl;
357         unsigned long flags;
358         int dynamic_acl = 0;
359
360         spin_lock_irq(&tpg->acl_node_lock);
361         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
362         if (!acl) {
363                 pr_err("Access Control List entry for %s Initiator"
364                         " Node %s does not exists for TPG %hu, ignoring"
365                         " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
366                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
367                 spin_unlock_irq(&tpg->acl_node_lock);
368                 return -ENODEV;
369         }
370         if (acl->dynamic_node_acl) {
371                 acl->dynamic_node_acl = 0;
372                 dynamic_acl = 1;
373         }
374         spin_unlock_irq(&tpg->acl_node_lock);
375
376         spin_lock_irqsave(&tpg->session_lock, flags);
377         list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
378                 if (sess->se_node_acl != acl)
379                         continue;
380
381                 if (!force) {
382                         pr_err("Unable to change queue depth for %s"
383                                 " Initiator Node: %s while session is"
384                                 " operational.  To forcefully change the queue"
385                                 " depth and force session reinstatement"
386                                 " use the \"force=1\" parameter.\n",
387                                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
388                         spin_unlock_irqrestore(&tpg->session_lock, flags);
389
390                         spin_lock_irq(&tpg->acl_node_lock);
391                         if (dynamic_acl)
392                                 acl->dynamic_node_acl = 1;
393                         spin_unlock_irq(&tpg->acl_node_lock);
394                         return -EEXIST;
395                 }
396                 /*
397                  * Determine if the session needs to be closed by our context.
398                  */
399                 if (!tpg->se_tpg_tfo->shutdown_session(sess))
400                         continue;
401
402                 init_sess = sess;
403                 break;
404         }
405
406         /*
407          * User has requested to change the queue depth for a Initiator Node.
408          * Change the value in the Node's struct se_node_acl, and call
409          * core_set_queue_depth_for_node() to add the requested queue depth.
410          *
411          * Finally call  tpg->se_tpg_tfo->close_session() to force session
412          * reinstatement to occur if there is an active session for the
413          * $FABRIC_MOD Initiator Node in question.
414          */
415         acl->queue_depth = queue_depth;
416
417         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
418                 spin_unlock_irqrestore(&tpg->session_lock, flags);
419                 /*
420                  * Force session reinstatement if
421                  * core_set_queue_depth_for_node() failed, because we assume
422                  * the $FABRIC_MOD has already the set session reinstatement
423                  * bit from tpg->se_tpg_tfo->shutdown_session() called above.
424                  */
425                 if (init_sess)
426                         tpg->se_tpg_tfo->close_session(init_sess);
427
428                 spin_lock_irq(&tpg->acl_node_lock);
429                 if (dynamic_acl)
430                         acl->dynamic_node_acl = 1;
431                 spin_unlock_irq(&tpg->acl_node_lock);
432                 return -EINVAL;
433         }
434         spin_unlock_irqrestore(&tpg->session_lock, flags);
435         /*
436          * If the $FABRIC_MOD session for the Initiator Node ACL exists,
437          * forcefully shutdown the $FABRIC_MOD session/nexus.
438          */
439         if (init_sess)
440                 tpg->se_tpg_tfo->close_session(init_sess);
441
442         pr_debug("Successfully changed queue depth to: %d for Initiator"
443                 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
444                 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
445                 tpg->se_tpg_tfo->tpg_get_tag(tpg));
446
447         spin_lock_irq(&tpg->acl_node_lock);
448         if (dynamic_acl)
449                 acl->dynamic_node_acl = 1;
450         spin_unlock_irq(&tpg->acl_node_lock);
451
452         return 0;
453 }
454 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
455
456 /*      core_tpg_set_initiator_node_tag():
457  *
458  *      Initiator nodeacl tags are not used internally, but may be used by
459  *      userspace to emulate aliases or groups.
460  *      Returns length of newly-set tag or -EINVAL.
461  */
462 int core_tpg_set_initiator_node_tag(
463         struct se_portal_group *tpg,
464         struct se_node_acl *acl,
465         const char *new_tag)
466 {
467         if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
468                 return -EINVAL;
469
470         if (!strncmp("NULL", new_tag, 4)) {
471                 acl->acl_tag[0] = '\0';
472                 return 0;
473         }
474
475         return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
476 }
477 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
478
479 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
480 {
481         struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
482
483         complete(&lun->lun_ref_comp);
484 }
485
486 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
487 {
488         /* Set in core_dev_setup_virtual_lun0() */
489         struct se_device *dev = g_lun0_dev;
490         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
491         u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
492         int ret;
493
494         lun->unpacked_lun = 0;
495         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
496         atomic_set(&lun->lun_acl_count, 0);
497         init_completion(&lun->lun_shutdown_comp);
498         INIT_LIST_HEAD(&lun->lun_acl_list);
499         spin_lock_init(&lun->lun_acl_lock);
500         spin_lock_init(&lun->lun_sep_lock);
501         init_completion(&lun->lun_ref_comp);
502
503         ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
504         if (ret < 0)
505                 return ret;
506
507         return 0;
508 }
509
510 int core_tpg_register(
511         const struct target_core_fabric_ops *tfo,
512         struct se_wwn *se_wwn,
513         struct se_portal_group *se_tpg,
514         int proto_id)
515 {
516         INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
517         se_tpg->proto_id = proto_id;
518         se_tpg->se_tpg_tfo = tfo;
519         se_tpg->se_tpg_wwn = se_wwn;
520         atomic_set(&se_tpg->tpg_pr_ref_count, 0);
521         INIT_LIST_HEAD(&se_tpg->acl_node_list);
522         INIT_LIST_HEAD(&se_tpg->se_tpg_node);
523         INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
524         spin_lock_init(&se_tpg->acl_node_lock);
525         spin_lock_init(&se_tpg->session_lock);
526         mutex_init(&se_tpg->tpg_lun_mutex);
527
528         if (se_tpg->proto_id >= 0) {
529                 if (core_tpg_setup_virtual_lun0(se_tpg) < 0)
530                         return -ENOMEM;
531         }
532
533         spin_lock_bh(&tpg_lock);
534         list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
535         spin_unlock_bh(&tpg_lock);
536
537         pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
538                  "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
539                 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
540                 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
541
542         return 0;
543 }
544 EXPORT_SYMBOL(core_tpg_register);
545
546 int core_tpg_deregister(struct se_portal_group *se_tpg)
547 {
548         const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
549         struct se_node_acl *nacl, *nacl_tmp;
550
551         pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
552                  "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
553                 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
554                 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
555
556         spin_lock_bh(&tpg_lock);
557         list_del(&se_tpg->se_tpg_node);
558         spin_unlock_bh(&tpg_lock);
559
560         while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
561                 cpu_relax();
562         /*
563          * Release any remaining demo-mode generated se_node_acl that have
564          * not been released because of TFO->tpg_check_demo_mode_cache() == 1
565          * in transport_deregister_session().
566          */
567         spin_lock_irq(&se_tpg->acl_node_lock);
568         list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
569                         acl_list) {
570                 list_del(&nacl->acl_list);
571                 se_tpg->num_node_acls--;
572                 spin_unlock_irq(&se_tpg->acl_node_lock);
573
574                 core_tpg_wait_for_nacl_pr_ref(nacl);
575                 core_free_device_list_for_node(nacl, se_tpg);
576                 kfree(nacl);
577
578                 spin_lock_irq(&se_tpg->acl_node_lock);
579         }
580         spin_unlock_irq(&se_tpg->acl_node_lock);
581
582         if (se_tpg->proto_id >= 0)
583                 core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
584
585         return 0;
586 }
587 EXPORT_SYMBOL(core_tpg_deregister);
588
589 struct se_lun *core_tpg_alloc_lun(
590         struct se_portal_group *tpg,
591         u32 unpacked_lun)
592 {
593         struct se_lun *lun;
594
595         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
596                 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
597                         "-1: %u for Target Portal Group: %u\n",
598                         tpg->se_tpg_tfo->get_fabric_name(),
599                         unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
600                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
601                 return ERR_PTR(-EOVERFLOW);
602         }
603
604         lun = kzalloc(sizeof(*lun), GFP_KERNEL);
605         if (!lun) {
606                 pr_err("Unable to allocate se_lun memory\n");
607                 return ERR_PTR(-ENOMEM);
608         }
609         lun->unpacked_lun = unpacked_lun;
610         lun->lun_link_magic = SE_LUN_LINK_MAGIC;
611         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
612         atomic_set(&lun->lun_acl_count, 0);
613         init_completion(&lun->lun_shutdown_comp);
614         INIT_LIST_HEAD(&lun->lun_acl_list);
615         spin_lock_init(&lun->lun_acl_lock);
616         spin_lock_init(&lun->lun_sep_lock);
617         init_completion(&lun->lun_ref_comp);
618
619         return lun;
620 }
621
622 int core_tpg_add_lun(
623         struct se_portal_group *tpg,
624         struct se_lun *lun,
625         u32 lun_access,
626         struct se_device *dev)
627 {
628         int ret;
629
630         ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
631                               GFP_KERNEL);
632         if (ret < 0)
633                 return ret;
634
635         ret = core_dev_export(dev, tpg, lun);
636         if (ret < 0) {
637                 percpu_ref_exit(&lun->lun_ref);
638                 return ret;
639         }
640
641         mutex_lock(&tpg->tpg_lun_mutex);
642         lun->lun_access = lun_access;
643         lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
644         if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
645                 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
646         mutex_unlock(&tpg->tpg_lun_mutex);
647
648         return 0;
649 }
650
651 void core_tpg_remove_lun(
652         struct se_portal_group *tpg,
653         struct se_lun *lun)
654 {
655         struct se_device *dev = lun->lun_se_dev;
656
657         core_clear_lun_from_tpg(lun, tpg);
658         transport_clear_lun_ref(lun);
659
660         core_dev_unexport(lun->lun_se_dev, tpg, lun);
661
662         mutex_lock(&tpg->tpg_lun_mutex);
663         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
664         if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
665                 hlist_del_rcu(&lun->link);
666         mutex_unlock(&tpg->tpg_lun_mutex);
667
668         percpu_ref_exit(&lun->lun_ref);
669 }