]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/target/target_core_tpg.c
drm/nouveau: allocate kernel's notifier object at end of block
[mv-sheeva.git] / drivers / target / target_core_tpg.c
index abfa81a57115a6863b05915db39f9011e042ee20..c26f67467623d80c324f93440f1c4bdd7d8cc46e 100644 (file)
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
        spin_lock_init(&acl->device_list_lock);
        spin_lock_init(&acl->nacl_sess_lock);
        atomic_set(&acl->acl_pr_ref_count, 0);
-       atomic_set(&acl->mib_ref_count, 0);
        acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
        acl->se_tpg = tpg;
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
                cpu_relax();
 }
 
-void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
-{
-       while (atomic_read(&nacl->mib_ref_count) != 0)
-               cpu_relax();
-}
-
 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
 {
        int i, ret;
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl(
        spin_unlock_bh(&tpg->session_lock);
 
        core_tpg_wait_for_nacl_pr_ref(acl);
-       core_tpg_wait_for_mib_ref(acl);
        core_clear_initiator_node_from_tpg(acl, tpg);
        core_free_device_list_for_node(acl, tpg);
 
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register);
 
 int core_tpg_deregister(struct se_portal_group *se_tpg)
 {
+       struct se_node_acl *nacl, *nacl_tmp;
+
        printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
                " for endpoint: %s Portal Tag %u\n",
                (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
 
        while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
                cpu_relax();
+       /*
+        * Release any remaining demo-mode generated se_node_acl that have
+        * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+        * in transport_deregister_session().
+        */
+       spin_lock_bh(&se_tpg->acl_node_lock);
+       list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+                       acl_list) {
+               list_del(&nacl->acl_list);
+               se_tpg->num_node_acls--;
+               spin_unlock_bh(&se_tpg->acl_node_lock);
+
+               core_tpg_wait_for_nacl_pr_ref(nacl);
+               core_free_device_list_for_node(nacl, se_tpg);
+               TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+               spin_lock_bh(&se_tpg->acl_node_lock);
+       }
+       spin_unlock_bh(&se_tpg->acl_node_lock);
 
        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
                core_tpg_release_virtual_lun0(se_tpg);