]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/scsi/scsi_transport_fc.c
Merge branch 'master' into tk71
[mv-sheeva.git] / drivers / scsi / scsi_transport_fc.c
index d7e470a061803673d3517d02e80912e2b145ab58..5c3ccfc6b6220d99e08f21d24458e23866876392 100644 (file)
@@ -52,6 +52,25 @@ static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
 static void fc_bsg_remove(struct request_queue *);
 static void fc_bsg_goose_queue(struct fc_rport *);
 
+/*
+ * Module Parameters
+ */
+
+/*
+ * dev_loss_tmo: the default number of seconds that the FC transport
+ *   should insulate the loss of a remote port.
+ *   The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
+ */
+static unsigned int fc_dev_loss_tmo = 60;              /* seconds */
+
+module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(dev_loss_tmo,
+                "Maximum number of seconds that the FC transport should"
+                " insulate the loss of a remote port. Once this value is"
+                " exceeded, the scsi target is removed. Value should be"
+                " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
+                " fast_io_fail_tmo is not set.");
+
 /*
  * Redefine so that we can have same named attributes in the
  * sdev/starget/host objects.
@@ -408,6 +427,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
        if (!fc_host->work_q)
                return -ENOMEM;
 
+       fc_host->dev_loss_tmo = fc_dev_loss_tmo;
        snprintf(fc_host->devloss_work_q_name,
                 sizeof(fc_host->devloss_work_q_name),
                 "fc_dl_%d", shost->host_no);
@@ -461,25 +481,6 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
                               NULL,
                               NULL);
 
-/*
- * Module Parameters
- */
-
-/*
- * dev_loss_tmo: the default number of seconds that the FC transport
- *   should insulate the loss of a remote port.
- *   The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
- */
-static unsigned int fc_dev_loss_tmo = 60;              /* seconds */
-
-module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(dev_loss_tmo,
-                "Maximum number of seconds that the FC transport should"
-                " insulate the loss of a remote port. Once this value is"
-                " exceeded, the scsi target is removed. Value should be"
-                " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
-                " fast_io_fail_tmo is not set.");
-
 /*
  * Netlink Infrastructure
  */
@@ -830,24 +831,32 @@ static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
 /*
  * dev_loss_tmo attribute
  */
-fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
-static ssize_t
-store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
-                           const char *buf, size_t count)
+static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+       char *cp;
+
+       *val = simple_strtoul(buf, &cp, 0);
+       if ((*cp && (*cp != '\n')) || (*val < 0))
+               return -EINVAL;
+       /*
+        * Check for overflow; dev_loss_tmo is u32
+        */
+       if (*val > UINT_MAX)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
+                                    unsigned long val)
 {
-       unsigned long val;
-       struct fc_rport *rport = transport_class_to_rport(dev);
        struct Scsi_Host *shost = rport_to_shost(rport);
        struct fc_internal *i = to_fc_internal(shost->transportt);
-       char *cp;
+
        if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
            (rport->port_state == FC_PORTSTATE_DELETED) ||
            (rport->port_state == FC_PORTSTATE_NOTPRESENT))
                return -EBUSY;
-       val = simple_strtoul(buf, &cp, 0);
-       if ((*cp && (*cp != '\n')) || (val < 0))
-               return -EINVAL;
-
        /*
         * Check for overflow; dev_loss_tmo is u32
         */
@@ -863,6 +872,25 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        i->f->set_rport_dev_loss_tmo(rport, val);
+       return 0;
+}
+
+fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct fc_rport *rport = transport_class_to_rport(dev);
+       unsigned long val;
+       int rc;
+
+       rc = fc_str_to_dev_loss(buf, &val);
+       if (rc)
+               return rc;
+
+       rc = fc_rport_set_dev_loss_tmo(rport, val);
+       if (rc)
+               return rc;
        return count;
 }
 static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
@@ -1608,8 +1636,35 @@ store_fc_private_host_issue_lip(struct device *dev,
 static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
                        store_fc_private_host_issue_lip);
 
-fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
+static ssize_t
+store_fc_private_host_dev_loss_tmo(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = transport_class_to_shost(dev);
+       struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+       struct fc_rport *rport;
+       unsigned long val, flags;
+       int rc;
 
+       rc = fc_str_to_dev_loss(buf, &val);
+       if (rc)
+               return rc;
+
+       fc_host_dev_loss_tmo(shost) = val;
+       spin_lock_irqsave(shost->host_lock, flags);
+       list_for_each_entry(rport, &fc_host->rports, peers)
+               fc_rport_set_dev_loss_tmo(rport, val);
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       return count;
+}
+
+fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, );
+static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
+                     show_fc_host_dev_loss_tmo,
+                     store_fc_private_host_dev_loss_tmo);
+
+fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
 
 /*
  * Host Statistics Management
@@ -2165,6 +2220,7 @@ fc_attach_transport(struct fc_function_template *ft)
        SETUP_HOST_ATTRIBUTE_RW(system_hostname);
 
        /* Transport-managed attributes */
+       SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
        SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
        if (ft->issue_fc_host_lip)
                SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
@@ -2525,7 +2581,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
 
        rport->maxframe_size = -1;
        rport->supported_classes = FC_COS_UNSPECIFIED;
-       rport->dev_loss_tmo = fc_dev_loss_tmo;
+       rport->dev_loss_tmo = fc_host->dev_loss_tmo;
        memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
        memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
        rport->port_id = ids->port_id;
@@ -3773,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
                  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
        if (flagset)
                queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-       __blk_run_queue(rport->rqst_q);
+       __blk_run_queue(rport->rqst_q, false);
        if (flagset)
                queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
        spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
@@ -4044,11 +4100,54 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
 /**
  * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
  * @q: the request_queue that is to be torn down.
+ *
+ * Notes:
+ *   Before unregistering the queue empty any requests that are blocked
+ *
+ *
  */
 static void
 fc_bsg_remove(struct request_queue *q)
 {
+       struct request *req; /* block request */
+       int counts; /* totals for request_list count and starved */
+
        if (q) {
+               /* Stop taking in new requests */
+               spin_lock_irq(q->queue_lock);
+               blk_stop_queue(q);
+
+               /* drain all requests in the queue */
+               while (1) {
+                       /* need the lock to fetch a request
+                        * this may fetch the same reqeust as the previous pass
+                        */
+                       req = blk_fetch_request(q);
+                       /* save requests in use and starved */
+                       counts = q->rq.count[0] + q->rq.count[1] +
+                               q->rq.starved[0] + q->rq.starved[1];
+                       spin_unlock_irq(q->queue_lock);
+                       /* any requests still outstanding? */
+                       if (counts == 0)
+                               break;
+
+                       /* This may be the same req as the previous iteration,
+                        * always send the blk_end_request_all after a prefetch.
+                        * It is not okay to not end the request because the
+                        * prefetch started the request.
+                        */
+                       if (req) {
+                               /* return -ENXIO to indicate that this queue is
+                                * going away
+                                */
+                               req->errors = -ENXIO;
+                               blk_end_request_all(req, -ENXIO);
+                       }
+
+                       msleep(200); /* allow bsg to possibly finish */
+                       spin_lock_irq(q->queue_lock);
+               }
+
                bsg_unregister_queue(q);
                blk_cleanup_queue(q);
        }