]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/infiniband/ulp/srp/ib_srp.c
Merge branch 'core-futexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mv-sheeva.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/err.h>
37 #include <linux/string.h>
38 #include <linux/parser.h>
39 #include <linux/random.h>
40 #include <linux/jiffies.h>
41
42 #include <asm/atomic.h>
43
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_device.h>
46 #include <scsi/scsi_dbg.h>
47 #include <scsi/srp.h>
48 #include <scsi/scsi_transport_srp.h>
49
50 #include "ib_srp.h"
51
52 #define DRV_NAME        "ib_srp"
53 #define PFX             DRV_NAME ": "
54 #define DRV_VERSION     "0.2"
55 #define DRV_RELDATE     "November 1, 2005"
56
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
59                    "v" DRV_VERSION " (" DRV_RELDATE ")");
60 MODULE_LICENSE("Dual BSD/GPL");
61
62 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
63 static int srp_max_iu_len;
64
65 module_param(srp_sg_tablesize, int, 0444);
66 MODULE_PARM_DESC(srp_sg_tablesize,
67                  "Max number of gather/scatter entries per I/O (default is 12, max 255)");
68
69 static int topspin_workarounds = 1;
70
71 module_param(topspin_workarounds, int, 0444);
72 MODULE_PARM_DESC(topspin_workarounds,
73                  "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
74
75 static int mellanox_workarounds = 1;
76
77 module_param(mellanox_workarounds, int, 0444);
78 MODULE_PARM_DESC(mellanox_workarounds,
79                  "Enable workarounds for Mellanox SRP target bugs if != 0");
80
81 static void srp_add_one(struct ib_device *device);
82 static void srp_remove_one(struct ib_device *device);
83 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
84 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
85 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
86
87 static struct scsi_transport_template *ib_srp_transport_template;
88
89 static struct ib_client srp_client = {
90         .name   = "srp",
91         .add    = srp_add_one,
92         .remove = srp_remove_one
93 };
94
95 static struct ib_sa_client srp_sa_client;
96
97 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
98 {
99         return (struct srp_target_port *) host->hostdata;
100 }
101
102 static const char *srp_target_info(struct Scsi_Host *host)
103 {
104         return host_to_target(host)->target_name;
105 }
106
107 static int srp_target_is_topspin(struct srp_target_port *target)
108 {
109         static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
110         static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
111
112         return topspin_workarounds &&
113                 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
114                  !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
115 }
116
117 static int srp_target_is_mellanox(struct srp_target_port *target)
118 {
119         static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
120
121         return mellanox_workarounds &&
122                 !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui);
123 }
124
125 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
126                                    gfp_t gfp_mask,
127                                    enum dma_data_direction direction)
128 {
129         struct srp_iu *iu;
130
131         iu = kmalloc(sizeof *iu, gfp_mask);
132         if (!iu)
133                 goto out;
134
135         iu->buf = kzalloc(size, gfp_mask);
136         if (!iu->buf)
137                 goto out_free_iu;
138
139         iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
140                                     direction);
141         if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
142                 goto out_free_buf;
143
144         iu->size      = size;
145         iu->direction = direction;
146
147         return iu;
148
149 out_free_buf:
150         kfree(iu->buf);
151 out_free_iu:
152         kfree(iu);
153 out:
154         return NULL;
155 }
156
157 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
158 {
159         if (!iu)
160                 return;
161
162         ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
163                             iu->direction);
164         kfree(iu->buf);
165         kfree(iu);
166 }
167
168 static void srp_qp_event(struct ib_event *event, void *context)
169 {
170         printk(KERN_ERR PFX "QP event %d\n", event->event);
171 }
172
173 static int srp_init_qp(struct srp_target_port *target,
174                        struct ib_qp *qp)
175 {
176         struct ib_qp_attr *attr;
177         int ret;
178
179         attr = kmalloc(sizeof *attr, GFP_KERNEL);
180         if (!attr)
181                 return -ENOMEM;
182
183         ret = ib_find_pkey(target->srp_host->srp_dev->dev,
184                            target->srp_host->port,
185                            be16_to_cpu(target->path.pkey),
186                            &attr->pkey_index);
187         if (ret)
188                 goto out;
189
190         attr->qp_state        = IB_QPS_INIT;
191         attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
192                                     IB_ACCESS_REMOTE_WRITE);
193         attr->port_num        = target->srp_host->port;
194
195         ret = ib_modify_qp(qp, attr,
196                            IB_QP_STATE          |
197                            IB_QP_PKEY_INDEX     |
198                            IB_QP_ACCESS_FLAGS   |
199                            IB_QP_PORT);
200
201 out:
202         kfree(attr);
203         return ret;
204 }
205
206 static int srp_new_cm_id(struct srp_target_port *target)
207 {
208         struct ib_cm_id *new_cm_id;
209
210         new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
211                                     srp_cm_handler, target);
212         if (IS_ERR(new_cm_id))
213                 return PTR_ERR(new_cm_id);
214
215         if (target->cm_id)
216                 ib_destroy_cm_id(target->cm_id);
217         target->cm_id = new_cm_id;
218
219         return 0;
220 }
221
222 static int srp_create_target_ib(struct srp_target_port *target)
223 {
224         struct ib_qp_init_attr *init_attr;
225         int ret;
226
227         init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
228         if (!init_attr)
229                 return -ENOMEM;
230
231         target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
232                                        srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
233         if (IS_ERR(target->recv_cq)) {
234                 ret = PTR_ERR(target->recv_cq);
235                 goto err;
236         }
237
238         target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
239                                        srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
240         if (IS_ERR(target->send_cq)) {
241                 ret = PTR_ERR(target->send_cq);
242                 goto err_recv_cq;
243         }
244
245         ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
246
247         init_attr->event_handler       = srp_qp_event;
248         init_attr->cap.max_send_wr     = SRP_SQ_SIZE;
249         init_attr->cap.max_recv_wr     = SRP_RQ_SIZE;
250         init_attr->cap.max_recv_sge    = 1;
251         init_attr->cap.max_send_sge    = 1;
252         init_attr->sq_sig_type         = IB_SIGNAL_ALL_WR;
253         init_attr->qp_type             = IB_QPT_RC;
254         init_attr->send_cq             = target->send_cq;
255         init_attr->recv_cq             = target->recv_cq;
256
257         target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
258         if (IS_ERR(target->qp)) {
259                 ret = PTR_ERR(target->qp);
260                 goto err_send_cq;
261         }
262
263         ret = srp_init_qp(target, target->qp);
264         if (ret)
265                 goto err_qp;
266
267         kfree(init_attr);
268         return 0;
269
270 err_qp:
271         ib_destroy_qp(target->qp);
272
273 err_send_cq:
274         ib_destroy_cq(target->send_cq);
275
276 err_recv_cq:
277         ib_destroy_cq(target->recv_cq);
278
279 err:
280         kfree(init_attr);
281         return ret;
282 }
283
284 static void srp_free_target_ib(struct srp_target_port *target)
285 {
286         int i;
287
288         ib_destroy_qp(target->qp);
289         ib_destroy_cq(target->send_cq);
290         ib_destroy_cq(target->recv_cq);
291
292         for (i = 0; i < SRP_RQ_SIZE; ++i)
293                 srp_free_iu(target->srp_host, target->rx_ring[i]);
294         for (i = 0; i < SRP_SQ_SIZE; ++i)
295                 srp_free_iu(target->srp_host, target->tx_ring[i]);
296 }
297
298 static void srp_path_rec_completion(int status,
299                                     struct ib_sa_path_rec *pathrec,
300                                     void *target_ptr)
301 {
302         struct srp_target_port *target = target_ptr;
303
304         target->status = status;
305         if (status)
306                 shost_printk(KERN_ERR, target->scsi_host,
307                              PFX "Got failed path rec status %d\n", status);
308         else
309                 target->path = *pathrec;
310         complete(&target->done);
311 }
312
313 static int srp_lookup_path(struct srp_target_port *target)
314 {
315         target->path.numb_path = 1;
316
317         init_completion(&target->done);
318
319         target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
320                                                    target->srp_host->srp_dev->dev,
321                                                    target->srp_host->port,
322                                                    &target->path,
323                                                    IB_SA_PATH_REC_SERVICE_ID    |
324                                                    IB_SA_PATH_REC_DGID          |
325                                                    IB_SA_PATH_REC_SGID          |
326                                                    IB_SA_PATH_REC_NUMB_PATH     |
327                                                    IB_SA_PATH_REC_PKEY,
328                                                    SRP_PATH_REC_TIMEOUT_MS,
329                                                    GFP_KERNEL,
330                                                    srp_path_rec_completion,
331                                                    target, &target->path_query);
332         if (target->path_query_id < 0)
333                 return target->path_query_id;
334
335         wait_for_completion(&target->done);
336
337         if (target->status < 0)
338                 shost_printk(KERN_WARNING, target->scsi_host,
339                              PFX "Path record query failed\n");
340
341         return target->status;
342 }
343
344 static int srp_send_req(struct srp_target_port *target)
345 {
346         struct {
347                 struct ib_cm_req_param param;
348                 struct srp_login_req   priv;
349         } *req = NULL;
350         int status;
351
352         req = kzalloc(sizeof *req, GFP_KERNEL);
353         if (!req)
354                 return -ENOMEM;
355
356         req->param.primary_path               = &target->path;
357         req->param.alternate_path             = NULL;
358         req->param.service_id                 = target->service_id;
359         req->param.qp_num                     = target->qp->qp_num;
360         req->param.qp_type                    = target->qp->qp_type;
361         req->param.private_data               = &req->priv;
362         req->param.private_data_len           = sizeof req->priv;
363         req->param.flow_control               = 1;
364
365         get_random_bytes(&req->param.starting_psn, 4);
366         req->param.starting_psn              &= 0xffffff;
367
368         /*
369          * Pick some arbitrary defaults here; we could make these
370          * module parameters if anyone cared about setting them.
371          */
372         req->param.responder_resources        = 4;
373         req->param.remote_cm_response_timeout = 20;
374         req->param.local_cm_response_timeout  = 20;
375         req->param.retry_count                = 7;
376         req->param.rnr_retry_count            = 7;
377         req->param.max_cm_retries             = 15;
378
379         req->priv.opcode        = SRP_LOGIN_REQ;
380         req->priv.tag           = 0;
381         req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
382         req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
383                                               SRP_BUF_FORMAT_INDIRECT);
384         /*
385          * In the published SRP specification (draft rev. 16a), the
386          * port identifier format is 8 bytes of ID extension followed
387          * by 8 bytes of GUID.  Older drafts put the two halves in the
388          * opposite order, so that the GUID comes first.
389          *
390          * Targets conforming to these obsolete drafts can be
391          * recognized by the I/O Class they report.
392          */
393         if (target->io_class == SRP_REV10_IB_IO_CLASS) {
394                 memcpy(req->priv.initiator_port_id,
395                        &target->path.sgid.global.interface_id, 8);
396                 memcpy(req->priv.initiator_port_id + 8,
397                        &target->initiator_ext, 8);
398                 memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
399                 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
400         } else {
401                 memcpy(req->priv.initiator_port_id,
402                        &target->initiator_ext, 8);
403                 memcpy(req->priv.initiator_port_id + 8,
404                        &target->path.sgid.global.interface_id, 8);
405                 memcpy(req->priv.target_port_id,     &target->id_ext, 8);
406                 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
407         }
408
409         /*
410          * Topspin/Cisco SRP targets will reject our login unless we
411          * zero out the first 8 bytes of our initiator port ID and set
412          * the second 8 bytes to the local node GUID.
413          */
414         if (srp_target_is_topspin(target)) {
415                 shost_printk(KERN_DEBUG, target->scsi_host,
416                              PFX "Topspin/Cisco initiator port ID workaround "
417                              "activated for target GUID %016llx\n",
418                              (unsigned long long) be64_to_cpu(target->ioc_guid));
419                 memset(req->priv.initiator_port_id, 0, 8);
420                 memcpy(req->priv.initiator_port_id + 8,
421                        &target->srp_host->srp_dev->dev->node_guid, 8);
422         }
423
424         status = ib_send_cm_req(target->cm_id, &req->param);
425
426         kfree(req);
427
428         return status;
429 }
430
431 static void srp_disconnect_target(struct srp_target_port *target)
432 {
433         /* XXX should send SRP_I_LOGOUT request */
434
435         init_completion(&target->done);
436         if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
437                 shost_printk(KERN_DEBUG, target->scsi_host,
438                              PFX "Sending CM DREQ failed\n");
439                 return;
440         }
441         wait_for_completion(&target->done);
442 }
443
444 static void srp_remove_work(struct work_struct *work)
445 {
446         struct srp_target_port *target =
447                 container_of(work, struct srp_target_port, work);
448
449         spin_lock_irq(target->scsi_host->host_lock);
450         if (target->state != SRP_TARGET_DEAD) {
451                 spin_unlock_irq(target->scsi_host->host_lock);
452                 return;
453         }
454         target->state = SRP_TARGET_REMOVED;
455         spin_unlock_irq(target->scsi_host->host_lock);
456
457         spin_lock(&target->srp_host->target_lock);
458         list_del(&target->list);
459         spin_unlock(&target->srp_host->target_lock);
460
461         srp_remove_host(target->scsi_host);
462         scsi_remove_host(target->scsi_host);
463         ib_destroy_cm_id(target->cm_id);
464         srp_free_target_ib(target);
465         scsi_host_put(target->scsi_host);
466 }
467
468 static int srp_connect_target(struct srp_target_port *target)
469 {
470         int retries = 3;
471         int ret;
472
473         ret = srp_lookup_path(target);
474         if (ret)
475                 return ret;
476
477         while (1) {
478                 init_completion(&target->done);
479                 ret = srp_send_req(target);
480                 if (ret)
481                         return ret;
482                 wait_for_completion(&target->done);
483
484                 /*
485                  * The CM event handling code will set status to
486                  * SRP_PORT_REDIRECT if we get a port redirect REJ
487                  * back, or SRP_DLID_REDIRECT if we get a lid/qp
488                  * redirect REJ back.
489                  */
490                 switch (target->status) {
491                 case 0:
492                         return 0;
493
494                 case SRP_PORT_REDIRECT:
495                         ret = srp_lookup_path(target);
496                         if (ret)
497                                 return ret;
498                         break;
499
500                 case SRP_DLID_REDIRECT:
501                         break;
502
503                 case SRP_STALE_CONN:
504                         /* Our current CM id was stale, and is now in timewait.
505                          * Try to reconnect with a new one.
506                          */
507                         if (!retries-- || srp_new_cm_id(target)) {
508                                 shost_printk(KERN_ERR, target->scsi_host, PFX
509                                              "giving up on stale connection\n");
510                                 target->status = -ECONNRESET;
511                                 return target->status;
512                         }
513
514                         shost_printk(KERN_ERR, target->scsi_host, PFX
515                                      "retrying stale connection\n");
516                         break;
517
518                 default:
519                         return target->status;
520                 }
521         }
522 }
523
524 static void srp_unmap_data(struct scsi_cmnd *scmnd,
525                            struct srp_target_port *target,
526                            struct srp_request *req)
527 {
528         if (!scsi_sglist(scmnd) ||
529             (scmnd->sc_data_direction != DMA_TO_DEVICE &&
530              scmnd->sc_data_direction != DMA_FROM_DEVICE))
531                 return;
532
533         if (req->fmr) {
534                 ib_fmr_pool_unmap(req->fmr);
535                 req->fmr = NULL;
536         }
537
538         ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd),
539                         scsi_sg_count(scmnd), scmnd->sc_data_direction);
540 }
541
542 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
543 {
544         srp_unmap_data(req->scmnd, target, req);
545         list_move_tail(&req->list, &target->free_reqs);
546 }
547
548 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
549 {
550         req->scmnd->result = DID_RESET << 16;
551         req->scmnd->scsi_done(req->scmnd);
552         srp_remove_req(target, req);
553 }
554
555 static int srp_reconnect_target(struct srp_target_port *target)
556 {
557         struct ib_qp_attr qp_attr;
558         struct srp_request *req, *tmp;
559         struct ib_wc wc;
560         int ret;
561
562         spin_lock_irq(target->scsi_host->host_lock);
563         if (target->state != SRP_TARGET_LIVE) {
564                 spin_unlock_irq(target->scsi_host->host_lock);
565                 return -EAGAIN;
566         }
567         target->state = SRP_TARGET_CONNECTING;
568         spin_unlock_irq(target->scsi_host->host_lock);
569
570         srp_disconnect_target(target);
571         /*
572          * Now get a new local CM ID so that we avoid confusing the
573          * target in case things are really fouled up.
574          */
575         ret = srp_new_cm_id(target);
576         if (ret)
577                 goto err;
578
579         qp_attr.qp_state = IB_QPS_RESET;
580         ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
581         if (ret)
582                 goto err;
583
584         ret = srp_init_qp(target, target->qp);
585         if (ret)
586                 goto err;
587
588         while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
589                 ; /* nothing */
590         while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
591                 ; /* nothing */
592
593         spin_lock_irq(target->scsi_host->host_lock);
594         list_for_each_entry_safe(req, tmp, &target->req_queue, list)
595                 srp_reset_req(target, req);
596         spin_unlock_irq(target->scsi_host->host_lock);
597
598         target->rx_head  = 0;
599         target->tx_head  = 0;
600         target->tx_tail  = 0;
601
602         target->qp_in_error = 0;
603         ret = srp_connect_target(target);
604         if (ret)
605                 goto err;
606
607         spin_lock_irq(target->scsi_host->host_lock);
608         if (target->state == SRP_TARGET_CONNECTING) {
609                 ret = 0;
610                 target->state = SRP_TARGET_LIVE;
611         } else
612                 ret = -EAGAIN;
613         spin_unlock_irq(target->scsi_host->host_lock);
614
615         return ret;
616
617 err:
618         shost_printk(KERN_ERR, target->scsi_host,
619                      PFX "reconnect failed (%d), removing target port.\n", ret);
620
621         /*
622          * We couldn't reconnect, so kill our target port off.
623          * However, we have to defer the real removal because we might
624          * be in the context of the SCSI error handler now, which
625          * would deadlock if we call scsi_remove_host().
626          */
627         spin_lock_irq(target->scsi_host->host_lock);
628         if (target->state == SRP_TARGET_CONNECTING) {
629                 target->state = SRP_TARGET_DEAD;
630                 INIT_WORK(&target->work, srp_remove_work);
631                 schedule_work(&target->work);
632         }
633         spin_unlock_irq(target->scsi_host->host_lock);
634
635         return ret;
636 }
637
638 static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
639                        int sg_cnt, struct srp_request *req,
640                        struct srp_direct_buf *buf)
641 {
642         u64 io_addr = 0;
643         u64 *dma_pages;
644         u32 len;
645         int page_cnt;
646         int i, j;
647         int ret;
648         struct srp_device *dev = target->srp_host->srp_dev;
649         struct ib_device *ibdev = dev->dev;
650         struct scatterlist *sg;
651
652         if (!dev->fmr_pool)
653                 return -ENODEV;
654
655         if (srp_target_is_mellanox(target) &&
656             (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask))
657                 return -EINVAL;
658
659         len = page_cnt = 0;
660         scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
661                 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
662
663                 if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) {
664                         if (i > 0)
665                                 return -EINVAL;
666                         else
667                                 ++page_cnt;
668                 }
669                 if ((ib_sg_dma_address(ibdev, sg) + dma_len) &
670                     ~dev->fmr_page_mask) {
671                         if (i < sg_cnt - 1)
672                                 return -EINVAL;
673                         else
674                                 ++page_cnt;
675                 }
676
677                 len += dma_len;
678         }
679
680         page_cnt += len >> dev->fmr_page_shift;
681         if (page_cnt > SRP_FMR_SIZE)
682                 return -ENOMEM;
683
684         dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
685         if (!dma_pages)
686                 return -ENOMEM;
687
688         page_cnt = 0;
689         scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
690                 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
691
692                 for (j = 0; j < dma_len; j += dev->fmr_page_size)
693                         dma_pages[page_cnt++] =
694                                 (ib_sg_dma_address(ibdev, sg) &
695                                  dev->fmr_page_mask) + j;
696         }
697
698         req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
699                                         dma_pages, page_cnt, io_addr);
700         if (IS_ERR(req->fmr)) {
701                 ret = PTR_ERR(req->fmr);
702                 req->fmr = NULL;
703                 goto out;
704         }
705
706         buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
707                                ~dev->fmr_page_mask);
708         buf->key = cpu_to_be32(req->fmr->fmr->rkey);
709         buf->len = cpu_to_be32(len);
710
711         ret = 0;
712
713 out:
714         kfree(dma_pages);
715
716         return ret;
717 }
718
719 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
720                         struct srp_request *req)
721 {
722         struct scatterlist *scat;
723         struct srp_cmd *cmd = req->cmd->buf;
724         int len, nents, count;
725         u8 fmt = SRP_DATA_DESC_DIRECT;
726         struct srp_device *dev;
727         struct ib_device *ibdev;
728
729         if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
730                 return sizeof (struct srp_cmd);
731
732         if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
733             scmnd->sc_data_direction != DMA_TO_DEVICE) {
734                 shost_printk(KERN_WARNING, target->scsi_host,
735                              PFX "Unhandled data direction %d\n",
736                              scmnd->sc_data_direction);
737                 return -EINVAL;
738         }
739
740         nents = scsi_sg_count(scmnd);
741         scat  = scsi_sglist(scmnd);
742
743         dev = target->srp_host->srp_dev;
744         ibdev = dev->dev;
745
746         count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
747
748         fmt = SRP_DATA_DESC_DIRECT;
749         len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
750
751         if (count == 1) {
752                 /*
753                  * The midlayer only generated a single gather/scatter
754                  * entry, or DMA mapping coalesced everything to a
755                  * single entry.  So a direct descriptor along with
756                  * the DMA MR suffices.
757                  */
758                 struct srp_direct_buf *buf = (void *) cmd->add_data;
759
760                 buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
761                 buf->key = cpu_to_be32(dev->mr->rkey);
762                 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
763         } else if (srp_map_fmr(target, scat, count, req,
764                                (void *) cmd->add_data)) {
765                 /*
766                  * FMR mapping failed, and the scatterlist has more
767                  * than one entry.  Generate an indirect memory
768                  * descriptor.
769                  */
770                 struct srp_indirect_buf *buf = (void *) cmd->add_data;
771                 struct scatterlist *sg;
772                 u32 datalen = 0;
773                 int i;
774
775                 fmt = SRP_DATA_DESC_INDIRECT;
776                 len = sizeof (struct srp_cmd) +
777                         sizeof (struct srp_indirect_buf) +
778                         count * sizeof (struct srp_direct_buf);
779
780                 scsi_for_each_sg(scmnd, sg, count, i) {
781                         unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
782
783                         buf->desc_list[i].va  =
784                                 cpu_to_be64(ib_sg_dma_address(ibdev, sg));
785                         buf->desc_list[i].key =
786                                 cpu_to_be32(dev->mr->rkey);
787                         buf->desc_list[i].len = cpu_to_be32(dma_len);
788                         datalen += dma_len;
789                 }
790
791                 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
792                         cmd->data_out_desc_cnt = count;
793                 else
794                         cmd->data_in_desc_cnt = count;
795
796                 buf->table_desc.va  =
797                         cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
798                 buf->table_desc.key =
799                         cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
800                 buf->table_desc.len =
801                         cpu_to_be32(count * sizeof (struct srp_direct_buf));
802
803                 buf->len = cpu_to_be32(datalen);
804         }
805
806         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
807                 cmd->buf_fmt = fmt << 4;
808         else
809                 cmd->buf_fmt = fmt;
810
811         return len;
812 }
813
814 /*
815  * Must be called with target->scsi_host->host_lock held to protect
816  * req_lim and tx_head.  Lock cannot be dropped between call here and
817  * call to __srp_post_send().
818  *
819  * Note:
820  * An upper limit for the number of allocated information units for each
821  * request type is:
822  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
823  *   more than Scsi_Host.can_queue requests.
824  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
825  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
826  *   one unanswered SRP request to an initiator.
827  */
828 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
829                                       enum srp_iu_type iu_type)
830 {
831         s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
832         struct srp_iu *iu;
833
834         srp_send_completion(target->send_cq, target);
835
836         if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
837                 return NULL;
838
839         /* Initiator responses to target requests do not consume credits */
840         if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
841                 ++target->zero_req_lim;
842                 return NULL;
843         }
844
845         iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
846         iu->type = iu_type;
847         return iu;
848 }
849
850 /*
851  * Must be called with target->scsi_host->host_lock held to protect
852  * req_lim and tx_head.
853  */
854 static int __srp_post_send(struct srp_target_port *target,
855                            struct srp_iu *iu, int len)
856 {
857         struct ib_sge list;
858         struct ib_send_wr wr, *bad_wr;
859         int ret = 0;
860
861         list.addr   = iu->dma;
862         list.length = len;
863         list.lkey   = target->srp_host->srp_dev->mr->lkey;
864
865         wr.next       = NULL;
866         wr.wr_id      = target->tx_head & SRP_SQ_MASK;
867         wr.sg_list    = &list;
868         wr.num_sge    = 1;
869         wr.opcode     = IB_WR_SEND;
870         wr.send_flags = IB_SEND_SIGNALED;
871
872         ret = ib_post_send(target->qp, &wr, &bad_wr);
873
874         if (!ret) {
875                 ++target->tx_head;
876                 if (iu->type != SRP_IU_RSP)
877                         --target->req_lim;
878         }
879
880         return ret;
881 }
882
883 static int srp_post_recv(struct srp_target_port *target)
884 {
885         unsigned long flags;
886         struct srp_iu *iu;
887         struct ib_sge list;
888         struct ib_recv_wr wr, *bad_wr;
889         unsigned int next;
890         int ret;
891
892         spin_lock_irqsave(target->scsi_host->host_lock, flags);
893
894         next     = target->rx_head & SRP_RQ_MASK;
895         wr.wr_id = next;
896         iu       = target->rx_ring[next];
897
898         list.addr   = iu->dma;
899         list.length = iu->size;
900         list.lkey   = target->srp_host->srp_dev->mr->lkey;
901
902         wr.next     = NULL;
903         wr.sg_list  = &list;
904         wr.num_sge  = 1;
905
906         ret = ib_post_recv(target->qp, &wr, &bad_wr);
907         if (!ret)
908                 ++target->rx_head;
909
910         spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
911
912         return ret;
913 }
914
915 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
916 {
917         struct srp_request *req;
918         struct scsi_cmnd *scmnd;
919         unsigned long flags;
920         s32 delta;
921
922         delta = (s32) be32_to_cpu(rsp->req_lim_delta);
923
924         spin_lock_irqsave(target->scsi_host->host_lock, flags);
925
926         target->req_lim += delta;
927
928         req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
929
930         if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
931                 if (be32_to_cpu(rsp->resp_data_len) < 4)
932                         req->tsk_status = -1;
933                 else
934                         req->tsk_status = rsp->data[3];
935                 complete(&req->done);
936         } else {
937                 scmnd = req->scmnd;
938                 if (!scmnd)
939                         shost_printk(KERN_ERR, target->scsi_host,
940                                      "Null scmnd for RSP w/tag %016llx\n",
941                                      (unsigned long long) rsp->tag);
942                 scmnd->result = rsp->status;
943
944                 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
945                         memcpy(scmnd->sense_buffer, rsp->data +
946                                be32_to_cpu(rsp->resp_data_len),
947                                min_t(int, be32_to_cpu(rsp->sense_data_len),
948                                      SCSI_SENSE_BUFFERSIZE));
949                 }
950
951                 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
952                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
953                 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
954                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
955
956                 if (!req->tsk_mgmt) {
957                         scmnd->host_scribble = (void *) -1L;
958                         scmnd->scsi_done(scmnd);
959
960                         srp_remove_req(target, req);
961                 } else
962                         req->cmd_done = 1;
963         }
964
965         spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
966 }
967
968 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
969                                void *rsp, int len)
970 {
971         struct ib_device *dev;
972         unsigned long flags;
973         struct srp_iu *iu;
974         int err = 1;
975
976         dev = target->srp_host->srp_dev->dev;
977
978         spin_lock_irqsave(target->scsi_host->host_lock, flags);
979         target->req_lim += req_delta;
980
981         iu = __srp_get_tx_iu(target, SRP_IU_RSP);
982         if (!iu) {
983                 shost_printk(KERN_ERR, target->scsi_host, PFX
984                              "no IU available to send response\n");
985                 goto out;
986         }
987
988         ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
989         memcpy(iu->buf, rsp, len);
990         ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
991
992         err = __srp_post_send(target, iu, len);
993         if (err)
994                 shost_printk(KERN_ERR, target->scsi_host, PFX
995                              "unable to post response: %d\n", err);
996
997 out:
998         spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
999         return err;
1000 }
1001
1002 static void srp_process_cred_req(struct srp_target_port *target,
1003                                  struct srp_cred_req *req)
1004 {
1005         struct srp_cred_rsp rsp = {
1006                 .opcode = SRP_CRED_RSP,
1007                 .tag = req->tag,
1008         };
1009         s32 delta = be32_to_cpu(req->req_lim_delta);
1010
1011         if (srp_response_common(target, delta, &rsp, sizeof rsp))
1012                 shost_printk(KERN_ERR, target->scsi_host, PFX
1013                              "problems processing SRP_CRED_REQ\n");
1014 }
1015
1016 static void srp_process_aer_req(struct srp_target_port *target,
1017                                 struct srp_aer_req *req)
1018 {
1019         struct srp_aer_rsp rsp = {
1020                 .opcode = SRP_AER_RSP,
1021                 .tag = req->tag,
1022         };
1023         s32 delta = be32_to_cpu(req->req_lim_delta);
1024
1025         shost_printk(KERN_ERR, target->scsi_host, PFX
1026                      "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1027
1028         if (srp_response_common(target, delta, &rsp, sizeof rsp))
1029                 shost_printk(KERN_ERR, target->scsi_host, PFX
1030                              "problems processing SRP_AER_REQ\n");
1031 }
1032
1033 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1034 {
1035         struct ib_device *dev;
1036         struct srp_iu *iu;
1037         int res;
1038         u8 opcode;
1039
1040         iu = target->rx_ring[wc->wr_id];
1041
1042         dev = target->srp_host->srp_dev->dev;
1043         ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1044                                    DMA_FROM_DEVICE);
1045
1046         opcode = *(u8 *) iu->buf;
1047
1048         if (0) {
1049                 shost_printk(KERN_ERR, target->scsi_host,
1050                              PFX "recv completion, opcode 0x%02x\n", opcode);
1051                 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1052                                iu->buf, wc->byte_len, true);
1053         }
1054
1055         switch (opcode) {
1056         case SRP_RSP:
1057                 srp_process_rsp(target, iu->buf);
1058                 break;
1059
1060         case SRP_CRED_REQ:
1061                 srp_process_cred_req(target, iu->buf);
1062                 break;
1063
1064         case SRP_AER_REQ:
1065                 srp_process_aer_req(target, iu->buf);
1066                 break;
1067
1068         case SRP_T_LOGOUT:
1069                 /* XXX Handle target logout */
1070                 shost_printk(KERN_WARNING, target->scsi_host,
1071                              PFX "Got target logout request\n");
1072                 break;
1073
1074         default:
1075                 shost_printk(KERN_WARNING, target->scsi_host,
1076                              PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1077                 break;
1078         }
1079
1080         ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1081                                       DMA_FROM_DEVICE);
1082
1083         res = srp_post_recv(target);
1084         if (res != 0)
1085                 shost_printk(KERN_ERR, target->scsi_host,
1086                              PFX "Recv failed with error code %d\n", res);
1087 }
1088
1089 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1090 {
1091         struct srp_target_port *target = target_ptr;
1092         struct ib_wc wc;
1093
1094         ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1095         while (ib_poll_cq(cq, 1, &wc) > 0) {
1096                 if (wc.status) {
1097                         shost_printk(KERN_ERR, target->scsi_host,
1098                                      PFX "failed receive status %d\n",
1099                                      wc.status);
1100                         target->qp_in_error = 1;
1101                         break;
1102                 }
1103
1104                 srp_handle_recv(target, &wc);
1105         }
1106 }
1107
1108 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1109 {
1110         struct srp_target_port *target = target_ptr;
1111         struct ib_wc wc;
1112
1113         while (ib_poll_cq(cq, 1, &wc) > 0) {
1114                 if (wc.status) {
1115                         shost_printk(KERN_ERR, target->scsi_host,
1116                                      PFX "failed send status %d\n",
1117                                      wc.status);
1118                         target->qp_in_error = 1;
1119                         break;
1120                 }
1121
1122                 ++target->tx_tail;
1123         }
1124 }
1125
1126 static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1127                             void (*done)(struct scsi_cmnd *))
1128 {
1129         struct srp_target_port *target = host_to_target(scmnd->device->host);
1130         struct srp_request *req;
1131         struct srp_iu *iu;
1132         struct srp_cmd *cmd;
1133         struct ib_device *dev;
1134         int len;
1135
1136         if (target->state == SRP_TARGET_CONNECTING)
1137                 goto err;
1138
1139         if (target->state == SRP_TARGET_DEAD ||
1140             target->state == SRP_TARGET_REMOVED) {
1141                 scmnd->result = DID_BAD_TARGET << 16;
1142                 done(scmnd);
1143                 return 0;
1144         }
1145
1146         iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1147         if (!iu)
1148                 goto err;
1149
1150         dev = target->srp_host->srp_dev->dev;
1151         ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1152                                    DMA_TO_DEVICE);
1153
1154         req = list_first_entry(&target->free_reqs, struct srp_request, list);
1155
1156         scmnd->scsi_done     = done;
1157         scmnd->result        = 0;
1158         scmnd->host_scribble = (void *) (long) req->index;
1159
1160         cmd = iu->buf;
1161         memset(cmd, 0, sizeof *cmd);
1162
1163         cmd->opcode = SRP_CMD;
1164         cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
1165         cmd->tag    = req->index;
1166         memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1167
1168         req->scmnd    = scmnd;
1169         req->cmd      = iu;
1170         req->cmd_done = 0;
1171         req->tsk_mgmt = NULL;
1172
1173         len = srp_map_data(scmnd, target, req);
1174         if (len < 0) {
1175                 shost_printk(KERN_ERR, target->scsi_host,
1176                              PFX "Failed to map data\n");
1177                 goto err;
1178         }
1179
1180         ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1181                                       DMA_TO_DEVICE);
1182
1183         if (__srp_post_send(target, iu, len)) {
1184                 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1185                 goto err_unmap;
1186         }
1187
1188         list_move_tail(&req->list, &target->req_queue);
1189
1190         return 0;
1191
1192 err_unmap:
1193         srp_unmap_data(scmnd, target, req);
1194
1195 err:
1196         return SCSI_MLQUEUE_HOST_BUSY;
1197 }
1198
1199 static DEF_SCSI_QCMD(srp_queuecommand)
1200
1201 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1202 {
1203         int i;
1204
1205         for (i = 0; i < SRP_RQ_SIZE; ++i) {
1206                 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1207                                                   target->max_ti_iu_len,
1208                                                   GFP_KERNEL, DMA_FROM_DEVICE);
1209                 if (!target->rx_ring[i])
1210                         goto err;
1211         }
1212
1213         for (i = 0; i < SRP_SQ_SIZE; ++i) {
1214                 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1215                                                   srp_max_iu_len,
1216                                                   GFP_KERNEL, DMA_TO_DEVICE);
1217                 if (!target->tx_ring[i])
1218                         goto err;
1219         }
1220
1221         return 0;
1222
1223 err:
1224         for (i = 0; i < SRP_RQ_SIZE; ++i) {
1225                 srp_free_iu(target->srp_host, target->rx_ring[i]);
1226                 target->rx_ring[i] = NULL;
1227         }
1228
1229         for (i = 0; i < SRP_SQ_SIZE; ++i) {
1230                 srp_free_iu(target->srp_host, target->tx_ring[i]);
1231                 target->tx_ring[i] = NULL;
1232         }
1233
1234         return -ENOMEM;
1235 }
1236
1237 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1238                                struct ib_cm_event *event,
1239                                struct srp_target_port *target)
1240 {
1241         struct Scsi_Host *shost = target->scsi_host;
1242         struct ib_class_port_info *cpi;
1243         int opcode;
1244
1245         switch (event->param.rej_rcvd.reason) {
1246         case IB_CM_REJ_PORT_CM_REDIRECT:
1247                 cpi = event->param.rej_rcvd.ari;
1248                 target->path.dlid = cpi->redirect_lid;
1249                 target->path.pkey = cpi->redirect_pkey;
1250                 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1251                 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1252
1253                 target->status = target->path.dlid ?
1254                         SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1255                 break;
1256
1257         case IB_CM_REJ_PORT_REDIRECT:
1258                 if (srp_target_is_topspin(target)) {
1259                         /*
1260                          * Topspin/Cisco SRP gateways incorrectly send
1261                          * reject reason code 25 when they mean 24
1262                          * (port redirect).
1263                          */
1264                         memcpy(target->path.dgid.raw,
1265                                event->param.rej_rcvd.ari, 16);
1266
1267                         shost_printk(KERN_DEBUG, shost,
1268                                      PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1269                                      (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1270                                      (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1271
1272                         target->status = SRP_PORT_REDIRECT;
1273                 } else {
1274                         shost_printk(KERN_WARNING, shost,
1275                                      "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1276                         target->status = -ECONNRESET;
1277                 }
1278                 break;
1279
1280         case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1281                 shost_printk(KERN_WARNING, shost,
1282                             "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1283                 target->status = -ECONNRESET;
1284                 break;
1285
1286         case IB_CM_REJ_CONSUMER_DEFINED:
1287                 opcode = *(u8 *) event->private_data;
1288                 if (opcode == SRP_LOGIN_REJ) {
1289                         struct srp_login_rej *rej = event->private_data;
1290                         u32 reason = be32_to_cpu(rej->reason);
1291
1292                         if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1293                                 shost_printk(KERN_WARNING, shost,
1294                                              PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1295                         else
1296                                 shost_printk(KERN_WARNING, shost,
1297                                             PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1298                 } else
1299                         shost_printk(KERN_WARNING, shost,
1300                                      "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1301                                      " opcode 0x%02x\n", opcode);
1302                 target->status = -ECONNRESET;
1303                 break;
1304
1305         case IB_CM_REJ_STALE_CONN:
1306                 shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
1307                 target->status = SRP_STALE_CONN;
1308                 break;
1309
1310         default:
1311                 shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
1312                              event->param.rej_rcvd.reason);
1313                 target->status = -ECONNRESET;
1314         }
1315 }
1316
1317 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1318 {
1319         struct srp_target_port *target = cm_id->context;
1320         struct ib_qp_attr *qp_attr = NULL;
1321         int attr_mask = 0;
1322         int comp = 0;
1323         int opcode = 0;
1324         int i;
1325
1326         switch (event->event) {
1327         case IB_CM_REQ_ERROR:
1328                 shost_printk(KERN_DEBUG, target->scsi_host,
1329                              PFX "Sending CM REQ failed\n");
1330                 comp = 1;
1331                 target->status = -ECONNRESET;
1332                 break;
1333
1334         case IB_CM_REP_RECEIVED:
1335                 comp = 1;
1336                 opcode = *(u8 *) event->private_data;
1337
1338                 if (opcode == SRP_LOGIN_RSP) {
1339                         struct srp_login_rsp *rsp = event->private_data;
1340
1341                         target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1342                         target->req_lim       = be32_to_cpu(rsp->req_lim_delta);
1343
1344                         /*
1345                          * Reserve credits for task management so we don't
1346                          * bounce requests back to the SCSI mid-layer.
1347                          */
1348                         target->scsi_host->can_queue
1349                                 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1350                                       target->scsi_host->can_queue);
1351                 } else {
1352                         shost_printk(KERN_WARNING, target->scsi_host,
1353                                     PFX "Unhandled RSP opcode %#x\n", opcode);
1354                         target->status = -ECONNRESET;
1355                         break;
1356                 }
1357
1358                 if (!target->rx_ring[0]) {
1359                         target->status = srp_alloc_iu_bufs(target);
1360                         if (target->status)
1361                                 break;
1362                 }
1363
1364                 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1365                 if (!qp_attr) {
1366                         target->status = -ENOMEM;
1367                         break;
1368                 }
1369
1370                 qp_attr->qp_state = IB_QPS_RTR;
1371                 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1372                 if (target->status)
1373                         break;
1374
1375                 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1376                 if (target->status)
1377                         break;
1378
1379                 for (i = 0; i < SRP_RQ_SIZE; i++) {
1380                         target->status = srp_post_recv(target);
1381                         if (target->status)
1382                                 break;
1383                 }
1384                 if (target->status)
1385                         break;
1386
1387                 qp_attr->qp_state = IB_QPS_RTS;
1388                 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1389                 if (target->status)
1390                         break;
1391
1392                 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1393                 if (target->status)
1394                         break;
1395
1396                 target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1397                 if (target->status)
1398                         break;
1399
1400                 break;
1401
1402         case IB_CM_REJ_RECEIVED:
1403                 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1404                 comp = 1;
1405
1406                 srp_cm_rej_handler(cm_id, event, target);
1407                 break;
1408
1409         case IB_CM_DREQ_RECEIVED:
1410                 shost_printk(KERN_WARNING, target->scsi_host,
1411                              PFX "DREQ received - connection closed\n");
1412                 if (ib_send_cm_drep(cm_id, NULL, 0))
1413                         shost_printk(KERN_ERR, target->scsi_host,
1414                                      PFX "Sending CM DREP failed\n");
1415                 break;
1416
1417         case IB_CM_TIMEWAIT_EXIT:
1418                 shost_printk(KERN_ERR, target->scsi_host,
1419                              PFX "connection closed\n");
1420
1421                 comp = 1;
1422                 target->status = 0;
1423                 break;
1424
1425         case IB_CM_MRA_RECEIVED:
1426         case IB_CM_DREQ_ERROR:
1427         case IB_CM_DREP_RECEIVED:
1428                 break;
1429
1430         default:
1431                 shost_printk(KERN_WARNING, target->scsi_host,
1432                              PFX "Unhandled CM event %d\n", event->event);
1433                 break;
1434         }
1435
1436         if (comp)
1437                 complete(&target->done);
1438
1439         kfree(qp_attr);
1440
1441         return 0;
1442 }
1443
1444 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1445                              struct srp_request *req, u8 func)
1446 {
1447         struct ib_device *dev = target->srp_host->srp_dev->dev;
1448         struct srp_iu *iu;
1449         struct srp_tsk_mgmt *tsk_mgmt;
1450
1451         spin_lock_irq(target->scsi_host->host_lock);
1452
1453         if (target->state == SRP_TARGET_DEAD ||
1454             target->state == SRP_TARGET_REMOVED) {
1455                 req->scmnd->result = DID_BAD_TARGET << 16;
1456                 goto out;
1457         }
1458
1459         init_completion(&req->done);
1460
1461         iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1462         if (!iu)
1463                 goto out;
1464
1465         ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1466                                    DMA_TO_DEVICE);
1467         tsk_mgmt = iu->buf;
1468         memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1469
1470         tsk_mgmt->opcode        = SRP_TSK_MGMT;
1471         tsk_mgmt->lun           = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1472         tsk_mgmt->tag           = req->index | SRP_TAG_TSK_MGMT;
1473         tsk_mgmt->tsk_mgmt_func = func;
1474         tsk_mgmt->task_tag      = req->index;
1475
1476         ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1477                                       DMA_TO_DEVICE);
1478         if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1479                 goto out;
1480
1481         req->tsk_mgmt = iu;
1482
1483         spin_unlock_irq(target->scsi_host->host_lock);
1484
1485         if (!wait_for_completion_timeout(&req->done,
1486                                          msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1487                 return -1;
1488
1489         return 0;
1490
1491 out:
1492         spin_unlock_irq(target->scsi_host->host_lock);
1493         return -1;
1494 }
1495
1496 static int srp_find_req(struct srp_target_port *target,
1497                         struct scsi_cmnd *scmnd,
1498                         struct srp_request **req)
1499 {
1500         if (scmnd->host_scribble == (void *) -1L)
1501                 return -1;
1502
1503         *req = &target->req_ring[(long) scmnd->host_scribble];
1504
1505         return 0;
1506 }
1507
1508 static int srp_abort(struct scsi_cmnd *scmnd)
1509 {
1510         struct srp_target_port *target = host_to_target(scmnd->device->host);
1511         struct srp_request *req;
1512         int ret = SUCCESS;
1513
1514         shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1515
1516         if (target->qp_in_error)
1517                 return FAILED;
1518         if (srp_find_req(target, scmnd, &req))
1519                 return FAILED;
1520         if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1521                 return FAILED;
1522
1523         spin_lock_irq(target->scsi_host->host_lock);
1524
1525         if (req->cmd_done) {
1526                 srp_remove_req(target, req);
1527                 scmnd->scsi_done(scmnd);
1528         } else if (!req->tsk_status) {
1529                 srp_remove_req(target, req);
1530                 scmnd->result = DID_ABORT << 16;
1531         } else
1532                 ret = FAILED;
1533
1534         spin_unlock_irq(target->scsi_host->host_lock);
1535
1536         return ret;
1537 }
1538
1539 static int srp_reset_device(struct scsi_cmnd *scmnd)
1540 {
1541         struct srp_target_port *target = host_to_target(scmnd->device->host);
1542         struct srp_request *req, *tmp;
1543
1544         shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1545
1546         if (target->qp_in_error)
1547                 return FAILED;
1548         if (srp_find_req(target, scmnd, &req))
1549                 return FAILED;
1550         if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1551                 return FAILED;
1552         if (req->tsk_status)
1553                 return FAILED;
1554
1555         spin_lock_irq(target->scsi_host->host_lock);
1556
1557         list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1558                 if (req->scmnd->device == scmnd->device)
1559                         srp_reset_req(target, req);
1560
1561         spin_unlock_irq(target->scsi_host->host_lock);
1562
1563         return SUCCESS;
1564 }
1565
1566 static int srp_reset_host(struct scsi_cmnd *scmnd)
1567 {
1568         struct srp_target_port *target = host_to_target(scmnd->device->host);
1569         int ret = FAILED;
1570
1571         shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1572
1573         if (!srp_reconnect_target(target))
1574                 ret = SUCCESS;
1575
1576         return ret;
1577 }
1578
1579 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1580                            char *buf)
1581 {
1582         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1583
1584         if (target->state == SRP_TARGET_DEAD ||
1585             target->state == SRP_TARGET_REMOVED)
1586                 return -ENODEV;
1587
1588         return sprintf(buf, "0x%016llx\n",
1589                        (unsigned long long) be64_to_cpu(target->id_ext));
1590 }
1591
1592 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1593                              char *buf)
1594 {
1595         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1596
1597         if (target->state == SRP_TARGET_DEAD ||
1598             target->state == SRP_TARGET_REMOVED)
1599                 return -ENODEV;
1600
1601         return sprintf(buf, "0x%016llx\n",
1602                        (unsigned long long) be64_to_cpu(target->ioc_guid));
1603 }
1604
1605 static ssize_t show_service_id(struct device *dev,
1606                                struct device_attribute *attr, char *buf)
1607 {
1608         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1609
1610         if (target->state == SRP_TARGET_DEAD ||
1611             target->state == SRP_TARGET_REMOVED)
1612                 return -ENODEV;
1613
1614         return sprintf(buf, "0x%016llx\n",
1615                        (unsigned long long) be64_to_cpu(target->service_id));
1616 }
1617
1618 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1619                          char *buf)
1620 {
1621         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1622
1623         if (target->state == SRP_TARGET_DEAD ||
1624             target->state == SRP_TARGET_REMOVED)
1625                 return -ENODEV;
1626
1627         return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1628 }
1629
1630 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1631                          char *buf)
1632 {
1633         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1634
1635         if (target->state == SRP_TARGET_DEAD ||
1636             target->state == SRP_TARGET_REMOVED)
1637                 return -ENODEV;
1638
1639         return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1640 }
1641
1642 static ssize_t show_orig_dgid(struct device *dev,
1643                               struct device_attribute *attr, char *buf)
1644 {
1645         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1646
1647         if (target->state == SRP_TARGET_DEAD ||
1648             target->state == SRP_TARGET_REMOVED)
1649                 return -ENODEV;
1650
1651         return sprintf(buf, "%pI6\n", target->orig_dgid);
1652 }
1653
1654 static ssize_t show_req_lim(struct device *dev,
1655                             struct device_attribute *attr, char *buf)
1656 {
1657         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1658
1659         if (target->state == SRP_TARGET_DEAD ||
1660             target->state == SRP_TARGET_REMOVED)
1661                 return -ENODEV;
1662
1663         return sprintf(buf, "%d\n", target->req_lim);
1664 }
1665
1666 static ssize_t show_zero_req_lim(struct device *dev,
1667                                  struct device_attribute *attr, char *buf)
1668 {
1669         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1670
1671         if (target->state == SRP_TARGET_DEAD ||
1672             target->state == SRP_TARGET_REMOVED)
1673                 return -ENODEV;
1674
1675         return sprintf(buf, "%d\n", target->zero_req_lim);
1676 }
1677
1678 static ssize_t show_local_ib_port(struct device *dev,
1679                                   struct device_attribute *attr, char *buf)
1680 {
1681         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1682
1683         return sprintf(buf, "%d\n", target->srp_host->port);
1684 }
1685
1686 static ssize_t show_local_ib_device(struct device *dev,
1687                                     struct device_attribute *attr, char *buf)
1688 {
1689         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1690
1691         return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1692 }
1693
1694 static DEVICE_ATTR(id_ext,          S_IRUGO, show_id_ext,          NULL);
1695 static DEVICE_ATTR(ioc_guid,        S_IRUGO, show_ioc_guid,        NULL);
1696 static DEVICE_ATTR(service_id,      S_IRUGO, show_service_id,      NULL);
1697 static DEVICE_ATTR(pkey,            S_IRUGO, show_pkey,            NULL);
1698 static DEVICE_ATTR(dgid,            S_IRUGO, show_dgid,            NULL);
1699 static DEVICE_ATTR(orig_dgid,       S_IRUGO, show_orig_dgid,       NULL);
1700 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
1701 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,    NULL);
1702 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
1703 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1704
1705 static struct device_attribute *srp_host_attrs[] = {
1706         &dev_attr_id_ext,
1707         &dev_attr_ioc_guid,
1708         &dev_attr_service_id,
1709         &dev_attr_pkey,
1710         &dev_attr_dgid,
1711         &dev_attr_orig_dgid,
1712         &dev_attr_req_lim,
1713         &dev_attr_zero_req_lim,
1714         &dev_attr_local_ib_port,
1715         &dev_attr_local_ib_device,
1716         NULL
1717 };
1718
1719 static struct scsi_host_template srp_template = {
1720         .module                         = THIS_MODULE,
1721         .name                           = "InfiniBand SRP initiator",
1722         .proc_name                      = DRV_NAME,
1723         .info                           = srp_target_info,
1724         .queuecommand                   = srp_queuecommand,
1725         .eh_abort_handler               = srp_abort,
1726         .eh_device_reset_handler        = srp_reset_device,
1727         .eh_host_reset_handler          = srp_reset_host,
1728         .can_queue                      = SRP_CMD_SQ_SIZE,
1729         .this_id                        = -1,
1730         .cmd_per_lun                    = SRP_CMD_SQ_SIZE,
1731         .use_clustering                 = ENABLE_CLUSTERING,
1732         .shost_attrs                    = srp_host_attrs
1733 };
1734
1735 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1736 {
1737         struct srp_rport_identifiers ids;
1738         struct srp_rport *rport;
1739
1740         sprintf(target->target_name, "SRP.T10:%016llX",
1741                  (unsigned long long) be64_to_cpu(target->id_ext));
1742
1743         if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
1744                 return -ENODEV;
1745
1746         memcpy(ids.port_id, &target->id_ext, 8);
1747         memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1748         ids.roles = SRP_RPORT_ROLE_TARGET;
1749         rport = srp_rport_add(target->scsi_host, &ids);
1750         if (IS_ERR(rport)) {
1751                 scsi_remove_host(target->scsi_host);
1752                 return PTR_ERR(rport);
1753         }
1754
1755         spin_lock(&host->target_lock);
1756         list_add_tail(&target->list, &host->target_list);
1757         spin_unlock(&host->target_lock);
1758
1759         target->state = SRP_TARGET_LIVE;
1760
1761         scsi_scan_target(&target->scsi_host->shost_gendev,
1762                          0, target->scsi_id, SCAN_WILD_CARD, 0);
1763
1764         return 0;
1765 }
1766
1767 static void srp_release_dev(struct device *dev)
1768 {
1769         struct srp_host *host =
1770                 container_of(dev, struct srp_host, dev);
1771
1772         complete(&host->released);
1773 }
1774
1775 static struct class srp_class = {
1776         .name    = "infiniband_srp",
1777         .dev_release = srp_release_dev
1778 };
1779
1780 /*
1781  * Target ports are added by writing
1782  *
1783  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1784  *     pkey=<P_Key>,service_id=<service ID>
1785  *
1786  * to the add_target sysfs attribute.
1787  */
1788 enum {
1789         SRP_OPT_ERR             = 0,
1790         SRP_OPT_ID_EXT          = 1 << 0,
1791         SRP_OPT_IOC_GUID        = 1 << 1,
1792         SRP_OPT_DGID            = 1 << 2,
1793         SRP_OPT_PKEY            = 1 << 3,
1794         SRP_OPT_SERVICE_ID      = 1 << 4,
1795         SRP_OPT_MAX_SECT        = 1 << 5,
1796         SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1797         SRP_OPT_IO_CLASS        = 1 << 7,
1798         SRP_OPT_INITIATOR_EXT   = 1 << 8,
1799         SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
1800                                    SRP_OPT_IOC_GUID     |
1801                                    SRP_OPT_DGID         |
1802                                    SRP_OPT_PKEY         |
1803                                    SRP_OPT_SERVICE_ID),
1804 };
1805
1806 static const match_table_t srp_opt_tokens = {
1807         { SRP_OPT_ID_EXT,               "id_ext=%s"             },
1808         { SRP_OPT_IOC_GUID,             "ioc_guid=%s"           },
1809         { SRP_OPT_DGID,                 "dgid=%s"               },
1810         { SRP_OPT_PKEY,                 "pkey=%x"               },
1811         { SRP_OPT_SERVICE_ID,           "service_id=%s"         },
1812         { SRP_OPT_MAX_SECT,             "max_sect=%d"           },
1813         { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
1814         { SRP_OPT_IO_CLASS,             "io_class=%x"           },
1815         { SRP_OPT_INITIATOR_EXT,        "initiator_ext=%s"      },
1816         { SRP_OPT_ERR,                  NULL                    }
1817 };
1818
1819 static int srp_parse_options(const char *buf, struct srp_target_port *target)
1820 {
1821         char *options, *sep_opt;
1822         char *p;
1823         char dgid[3];
1824         substring_t args[MAX_OPT_ARGS];
1825         int opt_mask = 0;
1826         int token;
1827         int ret = -EINVAL;
1828         int i;
1829
1830         options = kstrdup(buf, GFP_KERNEL);
1831         if (!options)
1832                 return -ENOMEM;
1833
1834         sep_opt = options;
1835         while ((p = strsep(&sep_opt, ",")) != NULL) {
1836                 if (!*p)
1837                         continue;
1838
1839                 token = match_token(p, srp_opt_tokens, args);
1840                 opt_mask |= token;
1841
1842                 switch (token) {
1843                 case SRP_OPT_ID_EXT:
1844                         p = match_strdup(args);
1845                         if (!p) {
1846                                 ret = -ENOMEM;
1847                                 goto out;
1848                         }
1849                         target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1850                         kfree(p);
1851                         break;
1852
1853                 case SRP_OPT_IOC_GUID:
1854                         p = match_strdup(args);
1855                         if (!p) {
1856                                 ret = -ENOMEM;
1857                                 goto out;
1858                         }
1859                         target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1860                         kfree(p);
1861                         break;
1862
1863                 case SRP_OPT_DGID:
1864                         p = match_strdup(args);
1865                         if (!p) {
1866                                 ret = -ENOMEM;
1867                                 goto out;
1868                         }
1869                         if (strlen(p) != 32) {
1870                                 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1871                                 kfree(p);
1872                                 goto out;
1873                         }
1874
1875                         for (i = 0; i < 16; ++i) {
1876                                 strlcpy(dgid, p + i * 2, 3);
1877                                 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1878                         }
1879                         kfree(p);
1880                         memcpy(target->orig_dgid, target->path.dgid.raw, 16);
1881                         break;
1882
1883                 case SRP_OPT_PKEY:
1884                         if (match_hex(args, &token)) {
1885                                 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1886                                 goto out;
1887                         }
1888                         target->path.pkey = cpu_to_be16(token);
1889                         break;
1890
1891                 case SRP_OPT_SERVICE_ID:
1892                         p = match_strdup(args);
1893                         if (!p) {
1894                                 ret = -ENOMEM;
1895                                 goto out;
1896                         }
1897                         target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
1898                         target->path.service_id = target->service_id;
1899                         kfree(p);
1900                         break;
1901
1902                 case SRP_OPT_MAX_SECT:
1903                         if (match_int(args, &token)) {
1904                                 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1905                                 goto out;
1906                         }
1907                         target->scsi_host->max_sectors = token;
1908                         break;
1909
1910                 case SRP_OPT_MAX_CMD_PER_LUN:
1911                         if (match_int(args, &token)) {
1912                                 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1913                                 goto out;
1914                         }
1915                         target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
1916                         break;
1917
1918                 case SRP_OPT_IO_CLASS:
1919                         if (match_hex(args, &token)) {
1920                                 printk(KERN_WARNING PFX "bad  IO class parameter '%s' \n", p);
1921                                 goto out;
1922                         }
1923                         if (token != SRP_REV10_IB_IO_CLASS &&
1924                             token != SRP_REV16A_IB_IO_CLASS) {
1925                                 printk(KERN_WARNING PFX "unknown IO class parameter value"
1926                                        " %x specified (use %x or %x).\n",
1927                                        token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1928                                 goto out;
1929                         }
1930                         target->io_class = token;
1931                         break;
1932
1933                 case SRP_OPT_INITIATOR_EXT:
1934                         p = match_strdup(args);
1935                         if (!p) {
1936                                 ret = -ENOMEM;
1937                                 goto out;
1938                         }
1939                         target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1940                         kfree(p);
1941                         break;
1942
1943                 default:
1944                         printk(KERN_WARNING PFX "unknown parameter or missing value "
1945                                "'%s' in target creation request\n", p);
1946                         goto out;
1947                 }
1948         }
1949
1950         if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1951                 ret = 0;
1952         else
1953                 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1954                         if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1955                             !(srp_opt_tokens[i].token & opt_mask))
1956                                 printk(KERN_WARNING PFX "target creation request is "
1957                                        "missing parameter '%s'\n",
1958                                        srp_opt_tokens[i].pattern);
1959
1960 out:
1961         kfree(options);
1962         return ret;
1963 }
1964
1965 static ssize_t srp_create_target(struct device *dev,
1966                                  struct device_attribute *attr,
1967                                  const char *buf, size_t count)
1968 {
1969         struct srp_host *host =
1970                 container_of(dev, struct srp_host, dev);
1971         struct Scsi_Host *target_host;
1972         struct srp_target_port *target;
1973         int ret;
1974         int i;
1975
1976         target_host = scsi_host_alloc(&srp_template,
1977                                       sizeof (struct srp_target_port));
1978         if (!target_host)
1979                 return -ENOMEM;
1980
1981         target_host->transportt = ib_srp_transport_template;
1982         target_host->max_lun     = SRP_MAX_LUN;
1983         target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
1984
1985         target = host_to_target(target_host);
1986
1987         target->io_class   = SRP_REV16A_IB_IO_CLASS;
1988         target->scsi_host  = target_host;
1989         target->srp_host   = host;
1990
1991         INIT_LIST_HEAD(&target->free_reqs);
1992         INIT_LIST_HEAD(&target->req_queue);
1993         for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1994                 target->req_ring[i].index = i;
1995                 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1996         }
1997
1998         ret = srp_parse_options(buf, target);
1999         if (ret)
2000                 goto err;
2001
2002         ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid);
2003
2004         shost_printk(KERN_DEBUG, target->scsi_host, PFX
2005                      "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2006                      "service_id %016llx dgid %pI6\n",
2007                (unsigned long long) be64_to_cpu(target->id_ext),
2008                (unsigned long long) be64_to_cpu(target->ioc_guid),
2009                be16_to_cpu(target->path.pkey),
2010                (unsigned long long) be64_to_cpu(target->service_id),
2011                target->path.dgid.raw);
2012
2013         ret = srp_create_target_ib(target);
2014         if (ret)
2015                 goto err;
2016
2017         ret = srp_new_cm_id(target);
2018         if (ret)
2019                 goto err_free;
2020
2021         target->qp_in_error = 0;
2022         ret = srp_connect_target(target);
2023         if (ret) {
2024                 shost_printk(KERN_ERR, target->scsi_host,
2025                              PFX "Connection failed\n");
2026                 goto err_cm_id;
2027         }
2028
2029         ret = srp_add_target(host, target);
2030         if (ret)
2031                 goto err_disconnect;
2032
2033         return count;
2034
2035 err_disconnect:
2036         srp_disconnect_target(target);
2037
2038 err_cm_id:
2039         ib_destroy_cm_id(target->cm_id);
2040
2041 err_free:
2042         srp_free_target_ib(target);
2043
2044 err:
2045         scsi_host_put(target_host);
2046
2047         return ret;
2048 }
2049
2050 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2051
2052 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2053                           char *buf)
2054 {
2055         struct srp_host *host = container_of(dev, struct srp_host, dev);
2056
2057         return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2058 }
2059
2060 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2061
2062 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2063                          char *buf)
2064 {
2065         struct srp_host *host = container_of(dev, struct srp_host, dev);
2066
2067         return sprintf(buf, "%d\n", host->port);
2068 }
2069
2070 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2071
2072 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2073 {
2074         struct srp_host *host;
2075
2076         host = kzalloc(sizeof *host, GFP_KERNEL);
2077         if (!host)
2078                 return NULL;
2079
2080         INIT_LIST_HEAD(&host->target_list);
2081         spin_lock_init(&host->target_lock);
2082         init_completion(&host->released);
2083         host->srp_dev = device;
2084         host->port = port;
2085
2086         host->dev.class = &srp_class;
2087         host->dev.parent = device->dev->dma_device;
2088         dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2089
2090         if (device_register(&host->dev))
2091                 goto free_host;
2092         if (device_create_file(&host->dev, &dev_attr_add_target))
2093                 goto err_class;
2094         if (device_create_file(&host->dev, &dev_attr_ibdev))
2095                 goto err_class;
2096         if (device_create_file(&host->dev, &dev_attr_port))
2097                 goto err_class;
2098
2099         return host;
2100
2101 err_class:
2102         device_unregister(&host->dev);
2103
2104 free_host:
2105         kfree(host);
2106
2107         return NULL;
2108 }
2109
2110 static void srp_add_one(struct ib_device *device)
2111 {
2112         struct srp_device *srp_dev;
2113         struct ib_device_attr *dev_attr;
2114         struct ib_fmr_pool_param fmr_param;
2115         struct srp_host *host;
2116         int s, e, p;
2117
2118         dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2119         if (!dev_attr)
2120                 return;
2121
2122         if (ib_query_device(device, dev_attr)) {
2123                 printk(KERN_WARNING PFX "Query device failed for %s\n",
2124                        device->name);
2125                 goto free_attr;
2126         }
2127
2128         srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2129         if (!srp_dev)
2130                 goto free_attr;
2131
2132         /*
2133          * Use the smallest page size supported by the HCA, down to a
2134          * minimum of 512 bytes (which is the smallest sector that a
2135          * SCSI command will ever carry).
2136          */
2137         srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
2138         srp_dev->fmr_page_size  = 1 << srp_dev->fmr_page_shift;
2139         srp_dev->fmr_page_mask  = ~((u64) srp_dev->fmr_page_size - 1);
2140
2141         INIT_LIST_HEAD(&srp_dev->dev_list);
2142
2143         srp_dev->dev = device;
2144         srp_dev->pd  = ib_alloc_pd(device);
2145         if (IS_ERR(srp_dev->pd))
2146                 goto free_dev;
2147
2148         srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2149                                     IB_ACCESS_LOCAL_WRITE |
2150                                     IB_ACCESS_REMOTE_READ |
2151                                     IB_ACCESS_REMOTE_WRITE);
2152         if (IS_ERR(srp_dev->mr))
2153                 goto err_pd;
2154
2155         memset(&fmr_param, 0, sizeof fmr_param);
2156         fmr_param.pool_size         = SRP_FMR_POOL_SIZE;
2157         fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
2158         fmr_param.cache             = 1;
2159         fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
2160         fmr_param.page_shift        = srp_dev->fmr_page_shift;
2161         fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
2162                                        IB_ACCESS_REMOTE_WRITE |
2163                                        IB_ACCESS_REMOTE_READ);
2164
2165         srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2166         if (IS_ERR(srp_dev->fmr_pool))
2167                 srp_dev->fmr_pool = NULL;
2168
2169         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2170                 s = 0;
2171                 e = 0;
2172         } else {
2173                 s = 1;
2174                 e = device->phys_port_cnt;
2175         }
2176
2177         for (p = s; p <= e; ++p) {
2178                 host = srp_add_port(srp_dev, p);
2179                 if (host)
2180                         list_add_tail(&host->list, &srp_dev->dev_list);
2181         }
2182
2183         ib_set_client_data(device, &srp_client, srp_dev);
2184
2185         goto free_attr;
2186
2187 err_pd:
2188         ib_dealloc_pd(srp_dev->pd);
2189
2190 free_dev:
2191         kfree(srp_dev);
2192
2193 free_attr:
2194         kfree(dev_attr);
2195 }
2196
2197 static void srp_remove_one(struct ib_device *device)
2198 {
2199         struct srp_device *srp_dev;
2200         struct srp_host *host, *tmp_host;
2201         LIST_HEAD(target_list);
2202         struct srp_target_port *target, *tmp_target;
2203
2204         srp_dev = ib_get_client_data(device, &srp_client);
2205
2206         list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2207                 device_unregister(&host->dev);
2208                 /*
2209                  * Wait for the sysfs entry to go away, so that no new
2210                  * target ports can be created.
2211                  */
2212                 wait_for_completion(&host->released);
2213
2214                 /*
2215                  * Mark all target ports as removed, so we stop queueing
2216                  * commands and don't try to reconnect.
2217                  */
2218                 spin_lock(&host->target_lock);
2219                 list_for_each_entry(target, &host->target_list, list) {
2220                         spin_lock_irq(target->scsi_host->host_lock);
2221                         target->state = SRP_TARGET_REMOVED;
2222                         spin_unlock_irq(target->scsi_host->host_lock);
2223                 }
2224                 spin_unlock(&host->target_lock);
2225
2226                 /*
2227                  * Wait for any reconnection tasks that may have
2228                  * started before we marked our target ports as
2229                  * removed, and any target port removal tasks.
2230                  */
2231                 flush_scheduled_work();
2232
2233                 list_for_each_entry_safe(target, tmp_target,
2234                                          &host->target_list, list) {
2235                         srp_remove_host(target->scsi_host);
2236                         scsi_remove_host(target->scsi_host);
2237                         srp_disconnect_target(target);
2238                         ib_destroy_cm_id(target->cm_id);
2239                         srp_free_target_ib(target);
2240                         scsi_host_put(target->scsi_host);
2241                 }
2242
2243                 kfree(host);
2244         }
2245
2246         if (srp_dev->fmr_pool)
2247                 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2248         ib_dereg_mr(srp_dev->mr);
2249         ib_dealloc_pd(srp_dev->pd);
2250
2251         kfree(srp_dev);
2252 }
2253
2254 static struct srp_function_template ib_srp_transport_functions = {
2255 };
2256
2257 static int __init srp_init_module(void)
2258 {
2259         int ret;
2260
2261         BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
2262         BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
2263
2264         if (srp_sg_tablesize > 255) {
2265                 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
2266                 srp_sg_tablesize = 255;
2267         }
2268
2269         ib_srp_transport_template =
2270                 srp_attach_transport(&ib_srp_transport_functions);
2271         if (!ib_srp_transport_template)
2272                 return -ENOMEM;
2273
2274         srp_template.sg_tablesize = srp_sg_tablesize;
2275         srp_max_iu_len = (sizeof (struct srp_cmd) +
2276                           sizeof (struct srp_indirect_buf) +
2277                           srp_sg_tablesize * 16);
2278
2279         ret = class_register(&srp_class);
2280         if (ret) {
2281                 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2282                 srp_release_transport(ib_srp_transport_template);
2283                 return ret;
2284         }
2285
2286         ib_sa_register_client(&srp_sa_client);
2287
2288         ret = ib_register_client(&srp_client);
2289         if (ret) {
2290                 printk(KERN_ERR PFX "couldn't register IB client\n");
2291                 srp_release_transport(ib_srp_transport_template);
2292                 ib_sa_unregister_client(&srp_sa_client);
2293                 class_unregister(&srp_class);
2294                 return ret;
2295         }
2296
2297         return 0;
2298 }
2299
2300 static void __exit srp_cleanup_module(void)
2301 {
2302         ib_unregister_client(&srp_client);
2303         ib_sa_unregister_client(&srp_sa_client);
2304         class_unregister(&srp_class);
2305         srp_release_transport(ib_srp_transport_template);
2306 }
2307
2308 module_init(srp_init_module);
2309 module_exit(srp_cleanup_module);