]> git.karo-electronics.de Git - linux-beck.git/blobdiff - drivers/infiniband/ulp/srp/ib_srp.c
Merge branch 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ickle...
[linux-beck.git] / drivers / infiniband / ulp / srp / ib_srp.c
index a54eee9324b61833b13164b93a8696bf2689c8ea..1e1e347a771531aa767f912a5fe38d3c2f2ac2dc 100644 (file)
@@ -83,10 +83,6 @@ static void srp_remove_one(struct ib_device *device);
 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
-static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
-                                     enum srp_iu_type iu_type);
-static int __srp_post_send(struct srp_target_port *target,
-                          struct srp_iu *iu, int len);
 
 static struct scsi_transport_template *ib_srp_transport_template;
 
@@ -815,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
        return len;
 }
 
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.  Lock cannot be dropped between call here and
+ * call to __srp_post_send().
+ *
+ * Note:
+ * An upper limit for the number of allocated information units for each
+ * request type is:
+ * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
+ *   more than Scsi_Host.can_queue requests.
+ * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
+ * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
+ *   one unanswered SRP request to an initiator.
+ */
+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
+                                     enum srp_iu_type iu_type)
+{
+       s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
+       struct srp_iu *iu;
+
+       srp_send_completion(target->send_cq, target);
+
+       if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+               return NULL;
+
+       /* Initiator responses to target requests do not consume credits */
+       if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
+               ++target->zero_req_lim;
+               return NULL;
+       }
+
+       iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
+       iu->type = iu_type;
+       return iu;
+}
+
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.
+ */
+static int __srp_post_send(struct srp_target_port *target,
+                          struct srp_iu *iu, int len)
+{
+       struct ib_sge list;
+       struct ib_send_wr wr, *bad_wr;
+       int ret = 0;
+
+       list.addr   = iu->dma;
+       list.length = len;
+       list.lkey   = target->srp_host->srp_dev->mr->lkey;
+
+       wr.next       = NULL;
+       wr.wr_id      = target->tx_head & SRP_SQ_MASK;
+       wr.sg_list    = &list;
+       wr.num_sge    = 1;
+       wr.opcode     = IB_WR_SEND;
+       wr.send_flags = IB_SEND_SIGNALED;
+
+       ret = ib_post_send(target->qp, &wr, &bad_wr);
+
+       if (!ret) {
+               ++target->tx_head;
+               if (iu->type != SRP_IU_RSP)
+                       --target->req_lim;
+       }
+
+       return ret;
+}
+
 static int srp_post_recv(struct srp_target_port *target)
 {
        unsigned long flags;
@@ -1058,76 +1123,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
        }
 }
 
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.  Lock cannot be dropped between call here and
- * call to __srp_post_send().
- *
- * Note:
- * An upper limit for the number of allocated information units for each
- * request type is:
- * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
- *   more than Scsi_Host.can_queue requests.
- * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
- * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
- *   one unanswered SRP request to an initiator.
- */
-static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
-                                     enum srp_iu_type iu_type)
-{
-       s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
-       struct srp_iu *iu;
-
-       srp_send_completion(target->send_cq, target);
-
-       if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
-               return NULL;
-
-       /* Initiator responses to target requests do not consume credits */
-       if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
-               ++target->zero_req_lim;
-               return NULL;
-       }
-
-       iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
-       iu->type = iu_type;
-       return iu;
-}
-
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
-                          struct srp_iu *iu, int len)
-{
-       struct ib_sge list;
-       struct ib_send_wr wr, *bad_wr;
-       int ret = 0;
-
-       list.addr   = iu->dma;
-       list.length = len;
-       list.lkey   = target->srp_host->srp_dev->mr->lkey;
-
-       wr.next       = NULL;
-       wr.wr_id      = target->tx_head & SRP_SQ_MASK;
-       wr.sg_list    = &list;
-       wr.num_sge    = 1;
-       wr.opcode     = IB_WR_SEND;
-       wr.send_flags = IB_SEND_SIGNALED;
-
-       ret = ib_post_send(target->qp, &wr, &bad_wr);
-
-       if (!ret) {
-               ++target->tx_head;
-               if (iu->type != SRP_IU_RSP)
-                       --target->req_lim;
-       }
-
-       return ret;
-}
-
-static int srp_queuecommand(struct scsi_cmnd *scmnd,
+static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
                            void (*done)(struct scsi_cmnd *))
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
@@ -1155,7 +1151,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
        ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
                                   DMA_TO_DEVICE);
 
-       req = list_entry(target->free_reqs.next, struct srp_request, list);
+       req = list_first_entry(&target->free_reqs, struct srp_request, list);
 
        scmnd->scsi_done     = done;
        scmnd->result        = 0;
@@ -1200,6 +1196,8 @@ err:
        return SCSI_MLQUEUE_HOST_BUSY;
 }
 
+static DEF_SCSI_QCMD(srp_queuecommand)
+
 static int srp_alloc_iu_bufs(struct srp_target_port *target)
 {
        int i;
@@ -1343,8 +1341,13 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
                        target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
                        target->req_lim       = be32_to_cpu(rsp->req_lim_delta);
 
-                       target->scsi_host->can_queue = min(target->req_lim,
-                                                          target->scsi_host->can_queue);
+                       /*
+                        * Reserve credits for task management so we don't
+                        * bounce requests back to the SCSI mid-layer.
+                        */
+                       target->scsi_host->can_queue
+                               = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
+                                     target->scsi_host->can_queue);
                } else {
                        shost_printk(KERN_WARNING, target->scsi_host,
                                    PFX "Unhandled RSP opcode %#x\n", opcode);
@@ -1441,6 +1444,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
 static int srp_send_tsk_mgmt(struct srp_target_port *target,
                             struct srp_request *req, u8 func)
 {
+       struct ib_device *dev = target->srp_host->srp_dev->dev;
        struct srp_iu *iu;
        struct srp_tsk_mgmt *tsk_mgmt;
 
@@ -1458,6 +1462,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
        if (!iu)
                goto out;
 
+       ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
+                                  DMA_TO_DEVICE);
        tsk_mgmt = iu->buf;
        memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 
@@ -1467,6 +1473,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
        tsk_mgmt->tsk_mgmt_func = func;
        tsk_mgmt->task_tag      = req->index;
 
+       ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
+                                     DMA_TO_DEVICE);
        if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
                goto out;