]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/infiniband/ulp/srp/ib_srp.c
Merge branch 'master' into csb1725
[mv-sheeva.git] / drivers / infiniband / ulp / srp / ib_srp.c
index 7f8f16bad75339388160db5b0095d8e0d131dd96..1e1e347a771531aa767f912a5fe38d3c2f2ac2dc 100644 (file)
@@ -291,7 +291,7 @@ static void srp_free_target_ib(struct srp_target_port *target)
 
        for (i = 0; i < SRP_RQ_SIZE; ++i)
                srp_free_iu(target->srp_host, target->rx_ring[i]);
-       for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
+       for (i = 0; i < SRP_SQ_SIZE; ++i)
                srp_free_iu(target->srp_host, target->tx_ring[i]);
 }
 
@@ -811,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
        return len;
 }
 
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.  Lock cannot be dropped between call here and
+ * call to __srp_post_send().
+ *
+ * Note:
+ * An upper limit for the number of allocated information units for each
+ * request type is:
+ * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
+ *   more than Scsi_Host.can_queue requests.
+ * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
+ * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
+ *   one unanswered SRP request to an initiator.
+ */
+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
+                                     enum srp_iu_type iu_type)
+{
+       s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
+       struct srp_iu *iu;
+
+       srp_send_completion(target->send_cq, target);
+
+       if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+               return NULL;
+
+       /* Initiator responses to target requests do not consume credits */
+       if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
+               ++target->zero_req_lim;
+               return NULL;
+       }
+
+       iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
+       iu->type = iu_type;
+       return iu;
+}
+
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.
+ */
+static int __srp_post_send(struct srp_target_port *target,
+                          struct srp_iu *iu, int len)
+{
+       struct ib_sge list;
+       struct ib_send_wr wr, *bad_wr;
+       int ret = 0;
+
+       list.addr   = iu->dma;
+       list.length = len;
+       list.lkey   = target->srp_host->srp_dev->mr->lkey;
+
+       wr.next       = NULL;
+       wr.wr_id      = target->tx_head & SRP_SQ_MASK;
+       wr.sg_list    = &list;
+       wr.num_sge    = 1;
+       wr.opcode     = IB_WR_SEND;
+       wr.send_flags = IB_SEND_SIGNALED;
+
+       ret = ib_post_send(target->qp, &wr, &bad_wr);
+
+       if (!ret) {
+               ++target->tx_head;
+               if (iu->type != SRP_IU_RSP)
+                       --target->req_lim;
+       }
+
+       return ret;
+}
+
 static int srp_post_recv(struct srp_target_port *target)
 {
        unsigned long flags;
@@ -822,7 +891,7 @@ static int srp_post_recv(struct srp_target_port *target)
 
        spin_lock_irqsave(target->scsi_host->host_lock, flags);
 
-       next     = target->rx_head & (SRP_RQ_SIZE - 1);
+       next     = target->rx_head & SRP_RQ_MASK;
        wr.wr_id = next;
        iu       = target->rx_ring[next];
 
@@ -896,6 +965,71 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
        spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 }
 
+static int srp_response_common(struct srp_target_port *target, s32 req_delta,
+                              void *rsp, int len)
+{
+       struct ib_device *dev;
+       unsigned long flags;
+       struct srp_iu *iu;
+       int err = 1;
+
+       dev = target->srp_host->srp_dev->dev;
+
+       spin_lock_irqsave(target->scsi_host->host_lock, flags);
+       target->req_lim += req_delta;
+
+       iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+       if (!iu) {
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+                            "no IU available to send response\n");
+               goto out;
+       }
+
+       ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
+       memcpy(iu->buf, rsp, len);
+       ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
+
+       err = __srp_post_send(target, iu, len);
+       if (err)
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+                            "unable to post response: %d\n", err);
+
+out:
+       spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+       return err;
+}
+
+static void srp_process_cred_req(struct srp_target_port *target,
+                                struct srp_cred_req *req)
+{
+       struct srp_cred_rsp rsp = {
+               .opcode = SRP_CRED_RSP,
+               .tag = req->tag,
+       };
+       s32 delta = be32_to_cpu(req->req_lim_delta);
+
+       if (srp_response_common(target, delta, &rsp, sizeof rsp))
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+                            "problems processing SRP_CRED_REQ\n");
+}
+
+static void srp_process_aer_req(struct srp_target_port *target,
+                               struct srp_aer_req *req)
+{
+       struct srp_aer_rsp rsp = {
+               .opcode = SRP_AER_RSP,
+               .tag = req->tag,
+       };
+       s32 delta = be32_to_cpu(req->req_lim_delta);
+
+       shost_printk(KERN_ERR, target->scsi_host, PFX
+                    "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
+
+       if (srp_response_common(target, delta, &rsp, sizeof rsp))
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+                            "problems processing SRP_AER_REQ\n");
+}
+
 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 {
        struct ib_device *dev;
@@ -923,6 +1057,14 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
                srp_process_rsp(target, iu->buf);
                break;
 
+       case SRP_CRED_REQ:
+               srp_process_cred_req(target, iu->buf);
+               break;
+
+       case SRP_AER_REQ:
+               srp_process_aer_req(target, iu->buf);
+               break;
+
        case SRP_T_LOGOUT:
                /* XXX Handle target logout */
                shost_printk(KERN_WARNING, target->scsi_host,
@@ -981,62 +1123,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
        }
 }
 
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.  Lock cannot be dropped between call here and
- * call to __srp_post_send().
- */
-static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
-                                       enum srp_request_type req_type)
-{
-       s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
-
-       srp_send_completion(target->send_cq, target);
-
-       if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
-               return NULL;
-
-       if (target->req_lim < min) {
-               ++target->zero_req_lim;
-               return NULL;
-       }
-
-       return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
-}
-
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
-                          struct srp_iu *iu, int len)
-{
-       struct ib_sge list;
-       struct ib_send_wr wr, *bad_wr;
-       int ret = 0;
-
-       list.addr   = iu->dma;
-       list.length = len;
-       list.lkey   = target->srp_host->srp_dev->mr->lkey;
-
-       wr.next       = NULL;
-       wr.wr_id      = target->tx_head & SRP_SQ_SIZE;
-       wr.sg_list    = &list;
-       wr.num_sge    = 1;
-       wr.opcode     = IB_WR_SEND;
-       wr.send_flags = IB_SEND_SIGNALED;
-
-       ret = ib_post_send(target->qp, &wr, &bad_wr);
-
-       if (!ret) {
-               ++target->tx_head;
-               --target->req_lim;
-       }
-
-       return ret;
-}
-
-static int srp_queuecommand(struct scsi_cmnd *scmnd,
+static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
                            void (*done)(struct scsi_cmnd *))
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
@@ -1056,7 +1143,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
                return 0;
        }
 
-       iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL);
+       iu = __srp_get_tx_iu(target, SRP_IU_CMD);
        if (!iu)
                goto err;
 
@@ -1064,7 +1151,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
        ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
                                   DMA_TO_DEVICE);
 
-       req = list_entry(target->free_reqs.next, struct srp_request, list);
+       req = list_first_entry(&target->free_reqs, struct srp_request, list);
 
        scmnd->scsi_done     = done;
        scmnd->result        = 0;
@@ -1109,6 +1196,8 @@ err:
        return SCSI_MLQUEUE_HOST_BUSY;
 }
 
+static DEF_SCSI_QCMD(srp_queuecommand)
+
 static int srp_alloc_iu_bufs(struct srp_target_port *target)
 {
        int i;
@@ -1121,7 +1210,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
                        goto err;
        }
 
-       for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+       for (i = 0; i < SRP_SQ_SIZE; ++i) {
                target->tx_ring[i] = srp_alloc_iu(target->srp_host,
                                                  srp_max_iu_len,
                                                  GFP_KERNEL, DMA_TO_DEVICE);
@@ -1137,7 +1226,7 @@ err:
                target->rx_ring[i] = NULL;
        }
 
-       for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+       for (i = 0; i < SRP_SQ_SIZE; ++i) {
                srp_free_iu(target->srp_host, target->tx_ring[i]);
                target->tx_ring[i] = NULL;
        }
@@ -1252,8 +1341,13 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
                        target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
                        target->req_lim       = be32_to_cpu(rsp->req_lim_delta);
 
-                       target->scsi_host->can_queue = min(target->req_lim,
-                                                          target->scsi_host->can_queue);
+                       /*
+                        * Reserve credits for task management so we don't
+                        * bounce requests back to the SCSI mid-layer.
+                        */
+                       target->scsi_host->can_queue
+                               = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
+                                     target->scsi_host->can_queue);
                } else {
                        shost_printk(KERN_WARNING, target->scsi_host,
                                    PFX "Unhandled RSP opcode %#x\n", opcode);
@@ -1350,6 +1444,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
 static int srp_send_tsk_mgmt(struct srp_target_port *target,
                             struct srp_request *req, u8 func)
 {
+       struct ib_device *dev = target->srp_host->srp_dev->dev;
        struct srp_iu *iu;
        struct srp_tsk_mgmt *tsk_mgmt;
 
@@ -1363,10 +1458,12 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
 
        init_completion(&req->done);
 
-       iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT);
+       iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
        if (!iu)
                goto out;
 
+       ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
+                                  DMA_TO_DEVICE);
        tsk_mgmt = iu->buf;
        memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 
@@ -1376,6 +1473,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
        tsk_mgmt->tsk_mgmt_func = func;
        tsk_mgmt->task_tag      = req->index;
 
+       ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
+                                     DMA_TO_DEVICE);
        if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
                goto out;
 
@@ -1626,9 +1725,9 @@ static struct scsi_host_template srp_template = {
        .eh_abort_handler               = srp_abort,
        .eh_device_reset_handler        = srp_reset_device,
        .eh_host_reset_handler          = srp_reset_host,
-       .can_queue                      = SRP_SQ_SIZE,
+       .can_queue                      = SRP_CMD_SQ_SIZE,
        .this_id                        = -1,
-       .cmd_per_lun                    = SRP_SQ_SIZE,
+       .cmd_per_lun                    = SRP_CMD_SQ_SIZE,
        .use_clustering                 = ENABLE_CLUSTERING,
        .shost_attrs                    = srp_host_attrs
 };
@@ -1813,7 +1912,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
                                printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
                                goto out;
                        }
-                       target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
+                       target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
                        break;
 
                case SRP_OPT_IO_CLASS:
@@ -1891,7 +1990,7 @@ static ssize_t srp_create_target(struct device *dev,
 
        INIT_LIST_HEAD(&target->free_reqs);
        INIT_LIST_HEAD(&target->req_queue);
-       for (i = 0; i < SRP_SQ_SIZE; ++i) {
+       for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
                target->req_ring[i].index = i;
                list_add_tail(&target->req_ring[i].list, &target->free_reqs);
        }
@@ -2159,6 +2258,9 @@ static int __init srp_init_module(void)
 {
        int ret;
 
+       BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+
        if (srp_sg_tablesize > 255) {
                printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
                srp_sg_tablesize = 255;