]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/scsi/ibmvscsi/ibmvfc.c
Merge branch 'intel_idle+snb' into idle-release
[linux-beck.git] / drivers / scsi / ibmvscsi / ibmvfc.c
1 /*
2  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
3  *
4  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) IBM Corporation, 2008
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/kthread.h>
31 #include <linux/slab.h>
32 #include <linux/of.h>
33 #include <linux/pm.h>
34 #include <linux/stringify.h>
35 #include <asm/firmware.h>
36 #include <asm/irq.h>
37 #include <asm/vio.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_transport_fc.h>
44 #include <scsi/scsi_bsg_fc.h>
45 #include "ibmvfc.h"
46
47 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
48 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
49 static unsigned int max_lun = IBMVFC_MAX_LUN;
50 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
51 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
52 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
53 static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
54 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
55 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
56 static LIST_HEAD(ibmvfc_head);
57 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
58 static struct scsi_transport_template *ibmvfc_transport_template;
59
60 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
61 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
62 MODULE_LICENSE("GPL");
63 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
64
65 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
66 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
67                  "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
68 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
69 MODULE_PARM_DESC(default_timeout,
70                  "Default timeout in seconds for initialization and EH commands. "
71                  "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
72 module_param_named(max_requests, max_requests, uint, S_IRUGO);
73 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
74                  "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
75 module_param_named(max_lun, max_lun, uint, S_IRUGO);
76 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
77                  "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
78 module_param_named(max_targets, max_targets, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
80                  "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
81 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
82 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
83                  "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
84 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(debug, "Enable driver debug information. "
86                  "[Default=" __stringify(IBMVFC_DEBUG) "]");
87 module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
88 MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
89                  "transport should insulate the loss of a remote port. Once this "
90                  "value is exceeded, the scsi target is removed. "
91                  "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
92 module_param_named(log_level, log_level, uint, 0);
93 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
94                  "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
95
96 static const struct {
97         u16 status;
98         u16 error;
99         u8 result;
100         u8 retry;
101         int log;
102         char *name;
103 } cmd_status [] = {
104         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
105         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
106         { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
107         { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
108         { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
109         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
110         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
111         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
112         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
113         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
114         { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
115         { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
116         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
117         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
118
119         { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
120         { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
121         { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
122         { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
123         { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
124         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
125         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
126         { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
127         { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
128         { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
129
130         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
131         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
132         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
133         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
134         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
135         { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
136         { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
137         { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
138         { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
139         { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
140         { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
141
142         { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
143 };
144
145 static void ibmvfc_npiv_login(struct ibmvfc_host *);
146 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
147 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
148 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
149 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
150
151 static const char *unknown_error = "unknown error";
152
153 #ifdef CONFIG_SCSI_IBMVFC_TRACE
154 /**
155  * ibmvfc_trc_start - Log a start trace entry
156  * @evt:                ibmvfc event struct
157  *
158  **/
159 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
160 {
161         struct ibmvfc_host *vhost = evt->vhost;
162         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
163         struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
164         struct ibmvfc_trace_entry *entry;
165
166         entry = &vhost->trace[vhost->trace_index++];
167         entry->evt = evt;
168         entry->time = jiffies;
169         entry->fmt = evt->crq.format;
170         entry->type = IBMVFC_TRC_START;
171
172         switch (entry->fmt) {
173         case IBMVFC_CMD_FORMAT:
174                 entry->op_code = vfc_cmd->iu.cdb[0];
175                 entry->scsi_id = vfc_cmd->tgt_scsi_id;
176                 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
177                 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
178                 entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
179                 break;
180         case IBMVFC_MAD_FORMAT:
181                 entry->op_code = mad->opcode;
182                 break;
183         default:
184                 break;
185         };
186 }
187
188 /**
189  * ibmvfc_trc_end - Log an end trace entry
190  * @evt:                ibmvfc event struct
191  *
192  **/
193 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
194 {
195         struct ibmvfc_host *vhost = evt->vhost;
196         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
197         struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
198         struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
199
200         entry->evt = evt;
201         entry->time = jiffies;
202         entry->fmt = evt->crq.format;
203         entry->type = IBMVFC_TRC_END;
204
205         switch (entry->fmt) {
206         case IBMVFC_CMD_FORMAT:
207                 entry->op_code = vfc_cmd->iu.cdb[0];
208                 entry->scsi_id = vfc_cmd->tgt_scsi_id;
209                 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
210                 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
211                 entry->u.end.status = vfc_cmd->status;
212                 entry->u.end.error = vfc_cmd->error;
213                 entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
214                 entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
215                 entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
216                 break;
217         case IBMVFC_MAD_FORMAT:
218                 entry->op_code = mad->opcode;
219                 entry->u.end.status = mad->status;
220                 break;
221         default:
222                 break;
223
224         };
225 }
226
227 #else
228 #define ibmvfc_trc_start(evt) do { } while (0)
229 #define ibmvfc_trc_end(evt) do { } while (0)
230 #endif
231
232 /**
233  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
234  * @status:             status / error class
235  * @error:              error
236  *
237  * Return value:
238  *      index into cmd_status / -EINVAL on failure
239  **/
240 static int ibmvfc_get_err_index(u16 status, u16 error)
241 {
242         int i;
243
244         for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
245                 if ((cmd_status[i].status & status) == cmd_status[i].status &&
246                     cmd_status[i].error == error)
247                         return i;
248
249         return -EINVAL;
250 }
251
252 /**
253  * ibmvfc_get_cmd_error - Find the error description for the fcp response
254  * @status:             status / error class
255  * @error:              error
256  *
257  * Return value:
258  *      error description string
259  **/
260 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
261 {
262         int rc = ibmvfc_get_err_index(status, error);
263         if (rc >= 0)
264                 return cmd_status[rc].name;
265         return unknown_error;
266 }
267
268 /**
269  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
270  * @vfc_cmd:    ibmvfc command struct
271  *
272  * Return value:
273  *      SCSI result value to return for completed command
274  **/
275 static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
276 {
277         int err;
278         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
279         int fc_rsp_len = rsp->fcp_rsp_len;
280
281         if ((rsp->flags & FCP_RSP_LEN_VALID) &&
282             ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
283              rsp->data.info.rsp_code))
284                 return DID_ERROR << 16;
285
286         err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
287         if (err >= 0)
288                 return rsp->scsi_status | (cmd_status[err].result << 16);
289         return rsp->scsi_status | (DID_ERROR << 16);
290 }
291
292 /**
293  * ibmvfc_retry_cmd - Determine if error status is retryable
294  * @status:             status / error class
295  * @error:              error
296  *
297  * Return value:
298  *      1 if error should be retried / 0 if it should not
299  **/
300 static int ibmvfc_retry_cmd(u16 status, u16 error)
301 {
302         int rc = ibmvfc_get_err_index(status, error);
303
304         if (rc >= 0)
305                 return cmd_status[rc].retry;
306         return 1;
307 }
308
309 static const char *unknown_fc_explain = "unknown fc explain";
310
311 static const struct {
312         u16 fc_explain;
313         char *name;
314 } ls_explain [] = {
315         { 0x00, "no additional explanation" },
316         { 0x01, "service parameter error - options" },
317         { 0x03, "service parameter error - initiator control" },
318         { 0x05, "service parameter error - recipient control" },
319         { 0x07, "service parameter error - received data field size" },
320         { 0x09, "service parameter error - concurrent seq" },
321         { 0x0B, "service parameter error - credit" },
322         { 0x0D, "invalid N_Port/F_Port_Name" },
323         { 0x0E, "invalid node/Fabric Name" },
324         { 0x0F, "invalid common service parameters" },
325         { 0x11, "invalid association header" },
326         { 0x13, "association header required" },
327         { 0x15, "invalid originator S_ID" },
328         { 0x17, "invalid OX_ID-RX-ID combination" },
329         { 0x19, "command (request) already in progress" },
330         { 0x1E, "N_Port Login requested" },
331         { 0x1F, "Invalid N_Port_ID" },
332 };
333
334 static const struct {
335         u16 fc_explain;
336         char *name;
337 } gs_explain [] = {
338         { 0x00, "no additional explanation" },
339         { 0x01, "port identifier not registered" },
340         { 0x02, "port name not registered" },
341         { 0x03, "node name not registered" },
342         { 0x04, "class of service not registered" },
343         { 0x06, "initial process associator not registered" },
344         { 0x07, "FC-4 TYPEs not registered" },
345         { 0x08, "symbolic port name not registered" },
346         { 0x09, "symbolic node name not registered" },
347         { 0x0A, "port type not registered" },
348         { 0xF0, "authorization exception" },
349         { 0xF1, "authentication exception" },
350         { 0xF2, "data base full" },
351         { 0xF3, "data base empty" },
352         { 0xF4, "processing request" },
353         { 0xF5, "unable to verify connection" },
354         { 0xF6, "devices not in a common zone" },
355 };
356
357 /**
358  * ibmvfc_get_ls_explain - Return the FC Explain description text
359  * @status:     FC Explain status
360  *
361  * Returns:
362  *      error string
363  **/
364 static const char *ibmvfc_get_ls_explain(u16 status)
365 {
366         int i;
367
368         for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
369                 if (ls_explain[i].fc_explain == status)
370                         return ls_explain[i].name;
371
372         return unknown_fc_explain;
373 }
374
375 /**
376  * ibmvfc_get_gs_explain - Return the FC Explain description text
377  * @status:     FC Explain status
378  *
379  * Returns:
380  *      error string
381  **/
382 static const char *ibmvfc_get_gs_explain(u16 status)
383 {
384         int i;
385
386         for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
387                 if (gs_explain[i].fc_explain == status)
388                         return gs_explain[i].name;
389
390         return unknown_fc_explain;
391 }
392
393 static const struct {
394         enum ibmvfc_fc_type fc_type;
395         char *name;
396 } fc_type [] = {
397         { IBMVFC_FABRIC_REJECT, "fabric reject" },
398         { IBMVFC_PORT_REJECT, "port reject" },
399         { IBMVFC_LS_REJECT, "ELS reject" },
400         { IBMVFC_FABRIC_BUSY, "fabric busy" },
401         { IBMVFC_PORT_BUSY, "port busy" },
402         { IBMVFC_BASIC_REJECT, "basic reject" },
403 };
404
405 static const char *unknown_fc_type = "unknown fc type";
406
407 /**
408  * ibmvfc_get_fc_type - Return the FC Type description text
409  * @status:     FC Type error status
410  *
411  * Returns:
412  *      error string
413  **/
414 static const char *ibmvfc_get_fc_type(u16 status)
415 {
416         int i;
417
418         for (i = 0; i < ARRAY_SIZE(fc_type); i++)
419                 if (fc_type[i].fc_type == status)
420                         return fc_type[i].name;
421
422         return unknown_fc_type;
423 }
424
425 /**
426  * ibmvfc_set_tgt_action - Set the next init action for the target
427  * @tgt:                ibmvfc target struct
428  * @action:             action to perform
429  *
430  **/
431 static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
432                                   enum ibmvfc_target_action action)
433 {
434         switch (tgt->action) {
435         case IBMVFC_TGT_ACTION_DEL_RPORT:
436                 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
437                         tgt->action = action;
438         case IBMVFC_TGT_ACTION_DELETED_RPORT:
439                 break;
440         default:
441                 if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
442                         tgt->add_rport = 0;
443                 tgt->action = action;
444                 break;
445         }
446 }
447
448 /**
449  * ibmvfc_set_host_state - Set the state for the host
450  * @vhost:              ibmvfc host struct
451  * @state:              state to set host to
452  *
453  * Returns:
454  *      0 if state changed / non-zero if not changed
455  **/
456 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
457                                   enum ibmvfc_host_state state)
458 {
459         int rc = 0;
460
461         switch (vhost->state) {
462         case IBMVFC_HOST_OFFLINE:
463                 rc = -EINVAL;
464                 break;
465         default:
466                 vhost->state = state;
467                 break;
468         };
469
470         return rc;
471 }
472
473 /**
474  * ibmvfc_set_host_action - Set the next init action for the host
475  * @vhost:              ibmvfc host struct
476  * @action:             action to perform
477  *
478  **/
479 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
480                                    enum ibmvfc_host_action action)
481 {
482         switch (action) {
483         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
484                 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
485                         vhost->action = action;
486                 break;
487         case IBMVFC_HOST_ACTION_LOGO_WAIT:
488                 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
489                         vhost->action = action;
490                 break;
491         case IBMVFC_HOST_ACTION_INIT_WAIT:
492                 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
493                         vhost->action = action;
494                 break;
495         case IBMVFC_HOST_ACTION_QUERY:
496                 switch (vhost->action) {
497                 case IBMVFC_HOST_ACTION_INIT_WAIT:
498                 case IBMVFC_HOST_ACTION_NONE:
499                 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
500                         vhost->action = action;
501                         break;
502                 default:
503                         break;
504                 };
505                 break;
506         case IBMVFC_HOST_ACTION_TGT_INIT:
507                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
508                         vhost->action = action;
509                 break;
510         case IBMVFC_HOST_ACTION_INIT:
511         case IBMVFC_HOST_ACTION_TGT_DEL:
512                 switch (vhost->action) {
513                 case IBMVFC_HOST_ACTION_RESET:
514                 case IBMVFC_HOST_ACTION_REENABLE:
515                         break;
516                 default:
517                         vhost->action = action;
518                         break;
519                 };
520                 break;
521         case IBMVFC_HOST_ACTION_LOGO:
522         case IBMVFC_HOST_ACTION_QUERY_TGTS:
523         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
524         case IBMVFC_HOST_ACTION_NONE:
525         case IBMVFC_HOST_ACTION_RESET:
526         case IBMVFC_HOST_ACTION_REENABLE:
527         default:
528                 vhost->action = action;
529                 break;
530         };
531 }
532
533 /**
534  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
535  * @vhost:              ibmvfc host struct
536  *
537  * Return value:
538  *      nothing
539  **/
540 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
541 {
542         if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
543                 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
544                         scsi_block_requests(vhost->host);
545                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
546                 }
547         } else
548                 vhost->reinit = 1;
549
550         wake_up(&vhost->work_wait_q);
551 }
552
553 /**
554  * ibmvfc_link_down - Handle a link down event from the adapter
555  * @vhost:      ibmvfc host struct
556  * @state:      ibmvfc host state to enter
557  *
558  **/
559 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
560                              enum ibmvfc_host_state state)
561 {
562         struct ibmvfc_target *tgt;
563
564         ENTER;
565         scsi_block_requests(vhost->host);
566         list_for_each_entry(tgt, &vhost->targets, queue)
567                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
568         ibmvfc_set_host_state(vhost, state);
569         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
570         vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
571         wake_up(&vhost->work_wait_q);
572         LEAVE;
573 }
574
575 /**
576  * ibmvfc_init_host - Start host initialization
577  * @vhost:              ibmvfc host struct
578  *
579  * Return value:
580  *      nothing
581  **/
582 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
583 {
584         struct ibmvfc_target *tgt;
585
586         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
587                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
588                         dev_err(vhost->dev,
589                                 "Host initialization retries exceeded. Taking adapter offline\n");
590                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
591                         return;
592                 }
593         }
594
595         if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
596                 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
597                 vhost->async_crq.cur = 0;
598
599                 list_for_each_entry(tgt, &vhost->targets, queue)
600                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
601                 scsi_block_requests(vhost->host);
602                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
603                 vhost->job_step = ibmvfc_npiv_login;
604                 wake_up(&vhost->work_wait_q);
605         }
606 }
607
608 /**
609  * ibmvfc_send_crq - Send a CRQ
610  * @vhost:      ibmvfc host struct
611  * @word1:      the first 64 bits of the data
612  * @word2:      the second 64 bits of the data
613  *
614  * Return value:
615  *      0 on success / other on failure
616  **/
617 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
618 {
619         struct vio_dev *vdev = to_vio_dev(vhost->dev);
620         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
621 }
622
623 /**
624  * ibmvfc_send_crq_init - Send a CRQ init message
625  * @vhost:      ibmvfc host struct
626  *
627  * Return value:
628  *      0 on success / other on failure
629  **/
630 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
631 {
632         ibmvfc_dbg(vhost, "Sending CRQ init\n");
633         return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
634 }
635
636 /**
637  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
638  * @vhost:      ibmvfc host struct
639  *
640  * Return value:
641  *      0 on success / other on failure
642  **/
643 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
644 {
645         ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
646         return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
647 }
648
649 /**
650  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
651  * @vhost:      ibmvfc host struct
652  *
653  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
654  * the crq with the hypervisor.
655  **/
656 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
657 {
658         long rc = 0;
659         struct vio_dev *vdev = to_vio_dev(vhost->dev);
660         struct ibmvfc_crq_queue *crq = &vhost->crq;
661
662         ibmvfc_dbg(vhost, "Releasing CRQ\n");
663         free_irq(vdev->irq, vhost);
664         tasklet_kill(&vhost->tasklet);
665         do {
666                 if (rc)
667                         msleep(100);
668                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
669         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
670
671         vhost->state = IBMVFC_NO_CRQ;
672         vhost->logged_in = 0;
673         dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
674         free_page((unsigned long)crq->msgs);
675 }
676
677 /**
678  * ibmvfc_reenable_crq_queue - reenables the CRQ
679  * @vhost:      ibmvfc host struct
680  *
681  * Return value:
682  *      0 on success / other on failure
683  **/
684 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
685 {
686         int rc = 0;
687         struct vio_dev *vdev = to_vio_dev(vhost->dev);
688
689         /* Re-enable the CRQ */
690         do {
691                 if (rc)
692                         msleep(100);
693                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
694         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
695
696         if (rc)
697                 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
698
699         return rc;
700 }
701
702 /**
703  * ibmvfc_reset_crq - resets a crq after a failure
704  * @vhost:      ibmvfc host struct
705  *
706  * Return value:
707  *      0 on success / other on failure
708  **/
709 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
710 {
711         int rc = 0;
712         unsigned long flags;
713         struct vio_dev *vdev = to_vio_dev(vhost->dev);
714         struct ibmvfc_crq_queue *crq = &vhost->crq;
715
716         /* Close the CRQ */
717         do {
718                 if (rc)
719                         msleep(100);
720                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
721         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
722
723         spin_lock_irqsave(vhost->host->host_lock, flags);
724         vhost->state = IBMVFC_NO_CRQ;
725         vhost->logged_in = 0;
726         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
727
728         /* Clean out the queue */
729         memset(crq->msgs, 0, PAGE_SIZE);
730         crq->cur = 0;
731
732         /* And re-open it again */
733         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
734                                 crq->msg_token, PAGE_SIZE);
735
736         if (rc == H_CLOSED)
737                 /* Adapter is good, but other end is not ready */
738                 dev_warn(vhost->dev, "Partner adapter not ready\n");
739         else if (rc != 0)
740                 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
741         spin_unlock_irqrestore(vhost->host->host_lock, flags);
742
743         return rc;
744 }
745
746 /**
747  * ibmvfc_valid_event - Determines if event is valid.
748  * @pool:       event_pool that contains the event
749  * @evt:        ibmvfc event to be checked for validity
750  *
751  * Return value:
752  *      1 if event is valid / 0 if event is not valid
753  **/
754 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
755                               struct ibmvfc_event *evt)
756 {
757         int index = evt - pool->events;
758         if (index < 0 || index >= pool->size)   /* outside of bounds */
759                 return 0;
760         if (evt != pool->events + index)        /* unaligned */
761                 return 0;
762         return 1;
763 }
764
765 /**
766  * ibmvfc_free_event - Free the specified event
767  * @evt:        ibmvfc_event to be freed
768  *
769  **/
770 static void ibmvfc_free_event(struct ibmvfc_event *evt)
771 {
772         struct ibmvfc_host *vhost = evt->vhost;
773         struct ibmvfc_event_pool *pool = &vhost->pool;
774
775         BUG_ON(!ibmvfc_valid_event(pool, evt));
776         BUG_ON(atomic_inc_return(&evt->free) != 1);
777         list_add_tail(&evt->queue, &vhost->free);
778 }
779
780 /**
781  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
782  * @evt:        ibmvfc event struct
783  *
784  * This function does not setup any error status, that must be done
785  * before this function gets called.
786  **/
787 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
788 {
789         struct scsi_cmnd *cmnd = evt->cmnd;
790
791         if (cmnd) {
792                 scsi_dma_unmap(cmnd);
793                 cmnd->scsi_done(cmnd);
794         }
795
796         if (evt->eh_comp)
797                 complete(evt->eh_comp);
798
799         ibmvfc_free_event(evt);
800 }
801
802 /**
803  * ibmvfc_fail_request - Fail request with specified error code
804  * @evt:                ibmvfc event struct
805  * @error_code: error code to fail request with
806  *
807  * Return value:
808  *      none
809  **/
810 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
811 {
812         if (evt->cmnd) {
813                 evt->cmnd->result = (error_code << 16);
814                 evt->done = ibmvfc_scsi_eh_done;
815         } else
816                 evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
817
818         list_del(&evt->queue);
819         del_timer(&evt->timer);
820         ibmvfc_trc_end(evt);
821         evt->done(evt);
822 }
823
824 /**
825  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
826  * @vhost:              ibmvfc host struct
827  * @error_code: error code to fail requests with
828  *
829  * Return value:
830  *      none
831  **/
832 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
833 {
834         struct ibmvfc_event *evt, *pos;
835
836         ibmvfc_dbg(vhost, "Purging all requests\n");
837         list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
838                 ibmvfc_fail_request(evt, error_code);
839 }
840
841 /**
842  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
843  * @vhost:      struct ibmvfc host to reset
844  **/
845 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
846 {
847         ibmvfc_purge_requests(vhost, DID_ERROR);
848         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
849         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
850 }
851
852 /**
853  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
854  * @vhost:      struct ibmvfc host to reset
855  **/
856 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
857 {
858         if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
859             !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
860                 scsi_block_requests(vhost->host);
861                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
862                 vhost->job_step = ibmvfc_npiv_logout;
863                 wake_up(&vhost->work_wait_q);
864         } else
865                 ibmvfc_hard_reset_host(vhost);
866 }
867
868 /**
869  * ibmvfc_reset_host - Reset the connection to the server
870  * @vhost:      ibmvfc host struct
871  **/
872 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
873 {
874         unsigned long flags;
875
876         spin_lock_irqsave(vhost->host->host_lock, flags);
877         __ibmvfc_reset_host(vhost);
878         spin_unlock_irqrestore(vhost->host->host_lock, flags);
879 }
880
881 /**
882  * ibmvfc_retry_host_init - Retry host initialization if allowed
883  * @vhost:      ibmvfc host struct
884  *
885  * Returns: 1 if init will be retried / 0 if not
886  *
887  **/
888 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
889 {
890         int retry = 0;
891
892         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
893                 vhost->delay_init = 1;
894                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
895                         dev_err(vhost->dev,
896                                 "Host initialization retries exceeded. Taking adapter offline\n");
897                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
898                 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
899                         __ibmvfc_reset_host(vhost);
900                 else {
901                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
902                         retry = 1;
903                 }
904         }
905
906         wake_up(&vhost->work_wait_q);
907         return retry;
908 }
909
910 /**
911  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
912  * @starget:    scsi target struct
913  *
914  * Return value:
915  *      ibmvfc_target struct / NULL if not found
916  **/
917 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
918 {
919         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
920         struct ibmvfc_host *vhost = shost_priv(shost);
921         struct ibmvfc_target *tgt;
922
923         list_for_each_entry(tgt, &vhost->targets, queue)
924                 if (tgt->target_id == starget->id) {
925                         kref_get(&tgt->kref);
926                         return tgt;
927                 }
928         return NULL;
929 }
930
931 /**
932  * ibmvfc_get_target - Find the specified scsi_target
933  * @starget:    scsi target struct
934  *
935  * Return value:
936  *      ibmvfc_target struct / NULL if not found
937  **/
938 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
939 {
940         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
941         struct ibmvfc_target *tgt;
942         unsigned long flags;
943
944         spin_lock_irqsave(shost->host_lock, flags);
945         tgt = __ibmvfc_get_target(starget);
946         spin_unlock_irqrestore(shost->host_lock, flags);
947         return tgt;
948 }
949
950 /**
951  * ibmvfc_get_host_speed - Get host port speed
952  * @shost:              scsi host struct
953  *
954  * Return value:
955  *      none
956  **/
957 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
958 {
959         struct ibmvfc_host *vhost = shost_priv(shost);
960         unsigned long flags;
961
962         spin_lock_irqsave(shost->host_lock, flags);
963         if (vhost->state == IBMVFC_ACTIVE) {
964                 switch (vhost->login_buf->resp.link_speed / 100) {
965                 case 1:
966                         fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
967                         break;
968                 case 2:
969                         fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
970                         break;
971                 case 4:
972                         fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
973                         break;
974                 case 8:
975                         fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
976                         break;
977                 case 10:
978                         fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
979                         break;
980                 case 16:
981                         fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
982                         break;
983                 default:
984                         ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
985                                    vhost->login_buf->resp.link_speed / 100);
986                         fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
987                         break;
988                 }
989         } else
990                 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
991         spin_unlock_irqrestore(shost->host_lock, flags);
992 }
993
994 /**
995  * ibmvfc_get_host_port_state - Get host port state
996  * @shost:              scsi host struct
997  *
998  * Return value:
999  *      none
1000  **/
1001 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1002 {
1003         struct ibmvfc_host *vhost = shost_priv(shost);
1004         unsigned long flags;
1005
1006         spin_lock_irqsave(shost->host_lock, flags);
1007         switch (vhost->state) {
1008         case IBMVFC_INITIALIZING:
1009         case IBMVFC_ACTIVE:
1010                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1011                 break;
1012         case IBMVFC_LINK_DOWN:
1013                 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1014                 break;
1015         case IBMVFC_LINK_DEAD:
1016         case IBMVFC_HOST_OFFLINE:
1017                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1018                 break;
1019         case IBMVFC_HALTED:
1020                 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1021                 break;
1022         case IBMVFC_NO_CRQ:
1023                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1024                 break;
1025         default:
1026                 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1027                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1028                 break;
1029         }
1030         spin_unlock_irqrestore(shost->host_lock, flags);
1031 }
1032
1033 /**
1034  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1035  * @rport:              rport struct
1036  * @timeout:    timeout value
1037  *
1038  * Return value:
1039  *      none
1040  **/
1041 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1042 {
1043         if (timeout)
1044                 rport->dev_loss_tmo = timeout;
1045         else
1046                 rport->dev_loss_tmo = 1;
1047 }
1048
1049 /**
1050  * ibmvfc_release_tgt - Free memory allocated for a target
1051  * @kref:               kref struct
1052  *
1053  **/
1054 static void ibmvfc_release_tgt(struct kref *kref)
1055 {
1056         struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1057         kfree(tgt);
1058 }
1059
1060 /**
1061  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1062  * @starget:    scsi target struct
1063  *
1064  * Return value:
1065  *      none
1066  **/
1067 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1068 {
1069         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1070         fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1071         if (tgt)
1072                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1073 }
1074
1075 /**
1076  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1077  * @starget:    scsi target struct
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1083 {
1084         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1085         fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1086         if (tgt)
1087                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1088 }
1089
1090 /**
1091  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1092  * @starget:    scsi target struct
1093  *
1094  * Return value:
1095  *      none
1096  **/
1097 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1098 {
1099         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1100         fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1101         if (tgt)
1102                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1103 }
1104
1105 /**
1106  * ibmvfc_wait_while_resetting - Wait while the host resets
1107  * @vhost:              ibmvfc host struct
1108  *
1109  * Return value:
1110  *      0 on success / other on failure
1111  **/
1112 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1113 {
1114         long timeout = wait_event_timeout(vhost->init_wait_q,
1115                                           ((vhost->state == IBMVFC_ACTIVE ||
1116                                             vhost->state == IBMVFC_HOST_OFFLINE ||
1117                                             vhost->state == IBMVFC_LINK_DEAD) &&
1118                                            vhost->action == IBMVFC_HOST_ACTION_NONE),
1119                                           (init_timeout * HZ));
1120
1121         return timeout ? 0 : -EIO;
1122 }
1123
1124 /**
1125  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1126  * @shost:              scsi host struct
1127  *
1128  * Return value:
1129  *      0 on success / other on failure
1130  **/
1131 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1132 {
1133         struct ibmvfc_host *vhost = shost_priv(shost);
1134
1135         dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1136         ibmvfc_reset_host(vhost);
1137         return ibmvfc_wait_while_resetting(vhost);
1138 }
1139
1140 /**
1141  * ibmvfc_gather_partition_info - Gather info about the LPAR
1142  *
1143  * Return value:
1144  *      none
1145  **/
1146 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1147 {
1148         struct device_node *rootdn;
1149         const char *name;
1150         const unsigned int *num;
1151
1152         rootdn = of_find_node_by_path("/");
1153         if (!rootdn)
1154                 return;
1155
1156         name = of_get_property(rootdn, "ibm,partition-name", NULL);
1157         if (name)
1158                 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1159         num = of_get_property(rootdn, "ibm,partition-no", NULL);
1160         if (num)
1161                 vhost->partition_number = *num;
1162         of_node_put(rootdn);
1163 }
1164
1165 /**
1166  * ibmvfc_set_login_info - Setup info for NPIV login
1167  * @vhost:      ibmvfc host struct
1168  *
1169  * Return value:
1170  *      none
1171  **/
1172 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1173 {
1174         struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1175         struct device_node *of_node = vhost->dev->of_node;
1176         const char *location;
1177
1178         memset(login_info, 0, sizeof(*login_info));
1179
1180         login_info->ostype = IBMVFC_OS_LINUX;
1181         login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
1182         login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
1183         login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
1184         login_info->partition_num = vhost->partition_number;
1185         login_info->vfc_frame_version = 1;
1186         login_info->fcp_version = 3;
1187         login_info->flags = IBMVFC_FLUSH_ON_HALT;
1188         if (vhost->client_migrated)
1189                 login_info->flags |= IBMVFC_CLIENT_MIGRATED;
1190
1191         login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1192         login_info->capabilities = IBMVFC_CAN_MIGRATE;
1193         login_info->async.va = vhost->async_crq.msg_token;
1194         login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
1195         strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1196         strncpy(login_info->device_name,
1197                 dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1198
1199         location = of_get_property(of_node, "ibm,loc-code", NULL);
1200         location = location ? location : dev_name(vhost->dev);
1201         strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1202 }
1203
1204 /**
1205  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1206  * @vhost:      ibmvfc host who owns the event pool
1207  *
1208  * Returns zero on success.
1209  **/
1210 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1211 {
1212         int i;
1213         struct ibmvfc_event_pool *pool = &vhost->pool;
1214
1215         ENTER;
1216         pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1217         pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1218         if (!pool->events)
1219                 return -ENOMEM;
1220
1221         pool->iu_storage = dma_alloc_coherent(vhost->dev,
1222                                               pool->size * sizeof(*pool->iu_storage),
1223                                               &pool->iu_token, 0);
1224
1225         if (!pool->iu_storage) {
1226                 kfree(pool->events);
1227                 return -ENOMEM;
1228         }
1229
1230         for (i = 0; i < pool->size; ++i) {
1231                 struct ibmvfc_event *evt = &pool->events[i];
1232                 atomic_set(&evt->free, 1);
1233                 evt->crq.valid = 0x80;
1234                 evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
1235                 evt->xfer_iu = pool->iu_storage + i;
1236                 evt->vhost = vhost;
1237                 evt->ext_list = NULL;
1238                 list_add_tail(&evt->queue, &vhost->free);
1239         }
1240
1241         LEAVE;
1242         return 0;
1243 }
1244
1245 /**
1246  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1247  * @vhost:      ibmvfc host who owns the event pool
1248  *
1249  **/
1250 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1251 {
1252         int i;
1253         struct ibmvfc_event_pool *pool = &vhost->pool;
1254
1255         ENTER;
1256         for (i = 0; i < pool->size; ++i) {
1257                 list_del(&pool->events[i].queue);
1258                 BUG_ON(atomic_read(&pool->events[i].free) != 1);
1259                 if (pool->events[i].ext_list)
1260                         dma_pool_free(vhost->sg_pool,
1261                                       pool->events[i].ext_list,
1262                                       pool->events[i].ext_list_token);
1263         }
1264
1265         kfree(pool->events);
1266         dma_free_coherent(vhost->dev,
1267                           pool->size * sizeof(*pool->iu_storage),
1268                           pool->iu_storage, pool->iu_token);
1269         LEAVE;
1270 }
1271
1272 /**
1273  * ibmvfc_get_event - Gets the next free event in pool
1274  * @vhost:      ibmvfc host struct
1275  *
1276  * Returns a free event from the pool.
1277  **/
1278 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1279 {
1280         struct ibmvfc_event *evt;
1281
1282         BUG_ON(list_empty(&vhost->free));
1283         evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1284         atomic_set(&evt->free, 0);
1285         list_del(&evt->queue);
1286         return evt;
1287 }
1288
1289 /**
1290  * ibmvfc_init_event - Initialize fields in an event struct that are always
1291  *                              required.
1292  * @evt:        The event
1293  * @done:       Routine to call when the event is responded to
1294  * @format:     SRP or MAD format
1295  **/
1296 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1297                               void (*done) (struct ibmvfc_event *), u8 format)
1298 {
1299         evt->cmnd = NULL;
1300         evt->sync_iu = NULL;
1301         evt->crq.format = format;
1302         evt->done = done;
1303         evt->eh_comp = NULL;
1304 }
1305
1306 /**
1307  * ibmvfc_map_sg_list - Initialize scatterlist
1308  * @scmd:       scsi command struct
1309  * @nseg:       number of scatterlist segments
1310  * @md: memory descriptor list to initialize
1311  **/
1312 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1313                                struct srp_direct_buf *md)
1314 {
1315         int i;
1316         struct scatterlist *sg;
1317
1318         scsi_for_each_sg(scmd, sg, nseg, i) {
1319                 md[i].va = sg_dma_address(sg);
1320                 md[i].len = sg_dma_len(sg);
1321                 md[i].key = 0;
1322         }
1323 }
1324
1325 /**
1326  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
1327  * @scmd:               Scsi_Cmnd with the scatterlist
1328  * @evt:                ibmvfc event struct
1329  * @vfc_cmd:    vfc_cmd that contains the memory descriptor
1330  * @dev:                device for which to map dma memory
1331  *
1332  * Returns:
1333  *      0 on success / non-zero on failure
1334  **/
1335 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1336                               struct ibmvfc_event *evt,
1337                               struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1338 {
1339
1340         int sg_mapped;
1341         struct srp_direct_buf *data = &vfc_cmd->ioba;
1342         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1343
1344         sg_mapped = scsi_dma_map(scmd);
1345         if (!sg_mapped) {
1346                 vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
1347                 return 0;
1348         } else if (unlikely(sg_mapped < 0)) {
1349                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1350                         scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1351                 return sg_mapped;
1352         }
1353
1354         if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1355                 vfc_cmd->flags |= IBMVFC_WRITE;
1356                 vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
1357         } else {
1358                 vfc_cmd->flags |= IBMVFC_READ;
1359                 vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
1360         }
1361
1362         if (sg_mapped == 1) {
1363                 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1364                 return 0;
1365         }
1366
1367         vfc_cmd->flags |= IBMVFC_SCATTERLIST;
1368
1369         if (!evt->ext_list) {
1370                 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1371                                                &evt->ext_list_token);
1372
1373                 if (!evt->ext_list) {
1374                         scsi_dma_unmap(scmd);
1375                         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1376                                 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1377                         return -ENOMEM;
1378                 }
1379         }
1380
1381         ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1382
1383         data->va = evt->ext_list_token;
1384         data->len = sg_mapped * sizeof(struct srp_direct_buf);
1385         data->key = 0;
1386         return 0;
1387 }
1388
1389 /**
1390  * ibmvfc_timeout - Internal command timeout handler
1391  * @evt:        struct ibmvfc_event that timed out
1392  *
1393  * Called when an internally generated command times out
1394  **/
1395 static void ibmvfc_timeout(struct ibmvfc_event *evt)
1396 {
1397         struct ibmvfc_host *vhost = evt->vhost;
1398         dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1399         ibmvfc_reset_host(vhost);
1400 }
1401
1402 /**
1403  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1404  * @evt:                event to be sent
1405  * @vhost:              ibmvfc host struct
1406  * @timeout:    timeout in seconds - 0 means do not time command
1407  *
1408  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1409  **/
1410 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1411                              struct ibmvfc_host *vhost, unsigned long timeout)
1412 {
1413         u64 *crq_as_u64 = (u64 *) &evt->crq;
1414         int rc;
1415
1416         /* Copy the IU into the transfer area */
1417         *evt->xfer_iu = evt->iu;
1418         if (evt->crq.format == IBMVFC_CMD_FORMAT)
1419                 evt->xfer_iu->cmd.tag = (u64)evt;
1420         else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1421                 evt->xfer_iu->mad_common.tag = (u64)evt;
1422         else
1423                 BUG();
1424
1425         list_add_tail(&evt->queue, &vhost->sent);
1426         init_timer(&evt->timer);
1427
1428         if (timeout) {
1429                 evt->timer.data = (unsigned long) evt;
1430                 evt->timer.expires = jiffies + (timeout * HZ);
1431                 evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
1432                 add_timer(&evt->timer);
1433         }
1434
1435         mb();
1436
1437         if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1438                 list_del(&evt->queue);
1439                 del_timer(&evt->timer);
1440
1441                 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1442                  * Firmware will send a CRQ with a transport event (0xFF) to
1443                  * tell this client what has happened to the transport. This
1444                  * will be handled in ibmvfc_handle_crq()
1445                  */
1446                 if (rc == H_CLOSED) {
1447                         if (printk_ratelimit())
1448                                 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1449                         if (evt->cmnd)
1450                                 scsi_dma_unmap(evt->cmnd);
1451                         ibmvfc_free_event(evt);
1452                         return SCSI_MLQUEUE_HOST_BUSY;
1453                 }
1454
1455                 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1456                 if (evt->cmnd) {
1457                         evt->cmnd->result = DID_ERROR << 16;
1458                         evt->done = ibmvfc_scsi_eh_done;
1459                 } else
1460                         evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
1461
1462                 evt->done(evt);
1463         } else
1464                 ibmvfc_trc_start(evt);
1465
1466         return 0;
1467 }
1468
1469 /**
1470  * ibmvfc_log_error - Log an error for the failed command if appropriate
1471  * @evt:        ibmvfc event to log
1472  *
1473  **/
1474 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1475 {
1476         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1477         struct ibmvfc_host *vhost = evt->vhost;
1478         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1479         struct scsi_cmnd *cmnd = evt->cmnd;
1480         const char *err = unknown_error;
1481         int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
1482         int logerr = 0;
1483         int rsp_code = 0;
1484
1485         if (index >= 0) {
1486                 logerr = cmd_status[index].log;
1487                 err = cmd_status[index].name;
1488         }
1489
1490         if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1491                 return;
1492
1493         if (rsp->flags & FCP_RSP_LEN_VALID)
1494                 rsp_code = rsp->data.info.rsp_code;
1495
1496         scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
1497                     "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1498                     cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
1499                     rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1500 }
1501
1502 /**
1503  * ibmvfc_relogin - Log back into the specified device
1504  * @sdev:       scsi device struct
1505  *
1506  **/
1507 static void ibmvfc_relogin(struct scsi_device *sdev)
1508 {
1509         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1510         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1511         struct ibmvfc_target *tgt;
1512
1513         list_for_each_entry(tgt, &vhost->targets, queue) {
1514                 if (rport == tgt->rport) {
1515                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
1516                         break;
1517                 }
1518         }
1519
1520         ibmvfc_reinit_host(vhost);
1521 }
1522
1523 /**
1524  * ibmvfc_scsi_done - Handle responses from commands
1525  * @evt:        ibmvfc event to be handled
1526  *
1527  * Used as a callback when sending scsi cmds.
1528  **/
1529 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1530 {
1531         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1532         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1533         struct scsi_cmnd *cmnd = evt->cmnd;
1534         u32 rsp_len = 0;
1535         u32 sense_len = rsp->fcp_sense_len;
1536
1537         if (cmnd) {
1538                 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
1539                         scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
1540                 else if (rsp->flags & FCP_RESID_UNDER)
1541                         scsi_set_resid(cmnd, rsp->fcp_resid);
1542                 else
1543                         scsi_set_resid(cmnd, 0);
1544
1545                 if (vfc_cmd->status) {
1546                         cmnd->result = ibmvfc_get_err_result(vfc_cmd);
1547
1548                         if (rsp->flags & FCP_RSP_LEN_VALID)
1549                                 rsp_len = rsp->fcp_rsp_len;
1550                         if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1551                                 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1552                         if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1553                                 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1554                         if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1555                                 ibmvfc_relogin(cmnd->device);
1556
1557                         if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1558                                 cmnd->result = (DID_ERROR << 16);
1559
1560                         ibmvfc_log_error(evt);
1561                 }
1562
1563                 if (!cmnd->result &&
1564                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1565                         cmnd->result = (DID_ERROR << 16);
1566
1567                 scsi_dma_unmap(cmnd);
1568                 cmnd->scsi_done(cmnd);
1569         }
1570
1571         if (evt->eh_comp)
1572                 complete(evt->eh_comp);
1573
1574         ibmvfc_free_event(evt);
1575 }
1576
1577 /**
1578  * ibmvfc_host_chkready - Check if the host can accept commands
1579  * @vhost:       struct ibmvfc host
1580  *
1581  * Returns:
1582  *      1 if host can accept command / 0 if not
1583  **/
1584 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1585 {
1586         int result = 0;
1587
1588         switch (vhost->state) {
1589         case IBMVFC_LINK_DEAD:
1590         case IBMVFC_HOST_OFFLINE:
1591                 result = DID_NO_CONNECT << 16;
1592                 break;
1593         case IBMVFC_NO_CRQ:
1594         case IBMVFC_INITIALIZING:
1595         case IBMVFC_HALTED:
1596         case IBMVFC_LINK_DOWN:
1597                 result = DID_REQUEUE << 16;
1598                 break;
1599         case IBMVFC_ACTIVE:
1600                 result = 0;
1601                 break;
1602         };
1603
1604         return result;
1605 }
1606
1607 /**
1608  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1609  * @cmnd:       struct scsi_cmnd to be executed
1610  * @done:       Callback function to be called when cmnd is completed
1611  *
1612  * Returns:
1613  *      0 on success / other on failure
1614  **/
1615 static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
1616                                void (*done) (struct scsi_cmnd *))
1617 {
1618         struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1619         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1620         struct ibmvfc_cmd *vfc_cmd;
1621         struct ibmvfc_event *evt;
1622         u8 tag[2];
1623         int rc;
1624
1625         if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1626             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1627                 cmnd->result = rc;
1628                 done(cmnd);
1629                 return 0;
1630         }
1631
1632         cmnd->result = (DID_OK << 16);
1633         evt = ibmvfc_get_event(vhost);
1634         ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1635         evt->cmnd = cmnd;
1636         cmnd->scsi_done = done;
1637         vfc_cmd = &evt->iu.cmd;
1638         memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1639         vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1640         vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
1641         vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
1642         vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
1643         vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
1644         vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
1645         vfc_cmd->tgt_scsi_id = rport->port_id;
1646         vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
1647         int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
1648         memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
1649
1650         if (scsi_populate_tag_msg(cmnd, tag)) {
1651                 vfc_cmd->task_tag = tag[1];
1652                 switch (tag[0]) {
1653                 case MSG_SIMPLE_TAG:
1654                         vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1655                         break;
1656                 case MSG_HEAD_TAG:
1657                         vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
1658                         break;
1659                 case MSG_ORDERED_TAG:
1660                         vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
1661                         break;
1662                 };
1663         }
1664
1665         if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1666                 return ibmvfc_send_event(evt, vhost, 0);
1667
1668         ibmvfc_free_event(evt);
1669         if (rc == -ENOMEM)
1670                 return SCSI_MLQUEUE_HOST_BUSY;
1671
1672         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1673                 scmd_printk(KERN_ERR, cmnd,
1674                             "Failed to map DMA buffer for command. rc=%d\n", rc);
1675
1676         cmnd->result = DID_ERROR << 16;
1677         done(cmnd);
1678         return 0;
1679 }
1680
1681 /**
1682  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1683  * @evt:        ibmvfc event struct
1684  *
1685  **/
1686 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1687 {
1688         /* copy the response back */
1689         if (evt->sync_iu)
1690                 *evt->sync_iu = *evt->xfer_iu;
1691
1692         complete(&evt->comp);
1693 }
1694
1695 /**
1696  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1697  * @evt:        struct ibmvfc_event
1698  *
1699  **/
1700 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1701 {
1702         struct ibmvfc_host *vhost = evt->vhost;
1703
1704         ibmvfc_free_event(evt);
1705         vhost->aborting_passthru = 0;
1706         dev_info(vhost->dev, "Passthru command cancelled\n");
1707 }
1708
1709 /**
1710  * ibmvfc_bsg_timeout - Handle a BSG timeout
1711  * @job:        struct fc_bsg_job that timed out
1712  *
1713  * Returns:
1714  *      0 on success / other on failure
1715  **/
1716 static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
1717 {
1718         struct ibmvfc_host *vhost = shost_priv(job->shost);
1719         unsigned long port_id = (unsigned long)job->dd_data;
1720         struct ibmvfc_event *evt;
1721         struct ibmvfc_tmf *tmf;
1722         unsigned long flags;
1723         int rc;
1724
1725         ENTER;
1726         spin_lock_irqsave(vhost->host->host_lock, flags);
1727         if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1728                 __ibmvfc_reset_host(vhost);
1729                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1730                 return 0;
1731         }
1732
1733         vhost->aborting_passthru = 1;
1734         evt = ibmvfc_get_event(vhost);
1735         ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1736
1737         tmf = &evt->iu.tmf;
1738         memset(tmf, 0, sizeof(*tmf));
1739         tmf->common.version = 1;
1740         tmf->common.opcode = IBMVFC_TMF_MAD;
1741         tmf->common.length = sizeof(*tmf);
1742         tmf->scsi_id = port_id;
1743         tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
1744         tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY;
1745         rc = ibmvfc_send_event(evt, vhost, default_timeout);
1746
1747         if (rc != 0) {
1748                 vhost->aborting_passthru = 0;
1749                 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
1750                 rc = -EIO;
1751         } else
1752                 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
1753                          port_id);
1754
1755         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1756
1757         LEAVE;
1758         return rc;
1759 }
1760
1761 /**
1762  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
1763  * @vhost:              struct ibmvfc_host to send command
1764  * @port_id:    port ID to send command
1765  *
1766  * Returns:
1767  *      0 on success / other on failure
1768  **/
1769 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
1770 {
1771         struct ibmvfc_port_login *plogi;
1772         struct ibmvfc_target *tgt;
1773         struct ibmvfc_event *evt;
1774         union ibmvfc_iu rsp_iu;
1775         unsigned long flags;
1776         int rc = 0, issue_login = 1;
1777
1778         ENTER;
1779         spin_lock_irqsave(vhost->host->host_lock, flags);
1780         list_for_each_entry(tgt, &vhost->targets, queue) {
1781                 if (tgt->scsi_id == port_id) {
1782                         issue_login = 0;
1783                         break;
1784                 }
1785         }
1786
1787         if (!issue_login)
1788                 goto unlock_out;
1789         if (unlikely((rc = ibmvfc_host_chkready(vhost))))
1790                 goto unlock_out;
1791
1792         evt = ibmvfc_get_event(vhost);
1793         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1794         plogi = &evt->iu.plogi;
1795         memset(plogi, 0, sizeof(*plogi));
1796         plogi->common.version = 1;
1797         plogi->common.opcode = IBMVFC_PORT_LOGIN;
1798         plogi->common.length = sizeof(*plogi);
1799         plogi->scsi_id = port_id;
1800         evt->sync_iu = &rsp_iu;
1801         init_completion(&evt->comp);
1802
1803         rc = ibmvfc_send_event(evt, vhost, default_timeout);
1804         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1805
1806         if (rc)
1807                 return -EIO;
1808
1809         wait_for_completion(&evt->comp);
1810
1811         if (rsp_iu.plogi.common.status)
1812                 rc = -EIO;
1813
1814         spin_lock_irqsave(vhost->host->host_lock, flags);
1815         ibmvfc_free_event(evt);
1816 unlock_out:
1817         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1818         LEAVE;
1819         return rc;
1820 }
1821
1822 /**
1823  * ibmvfc_bsg_request - Handle a BSG request
1824  * @job:        struct fc_bsg_job to be executed
1825  *
1826  * Returns:
1827  *      0 on success / other on failure
1828  **/
1829 static int ibmvfc_bsg_request(struct fc_bsg_job *job)
1830 {
1831         struct ibmvfc_host *vhost = shost_priv(job->shost);
1832         struct fc_rport *rport = job->rport;
1833         struct ibmvfc_passthru_mad *mad;
1834         struct ibmvfc_event *evt;
1835         union ibmvfc_iu rsp_iu;
1836         unsigned long flags, port_id = -1;
1837         unsigned int code = job->request->msgcode;
1838         int rc = 0, req_seg, rsp_seg, issue_login = 0;
1839         u32 fc_flags, rsp_len;
1840
1841         ENTER;
1842         job->reply->reply_payload_rcv_len = 0;
1843         if (rport)
1844                 port_id = rport->port_id;
1845
1846         switch (code) {
1847         case FC_BSG_HST_ELS_NOLOGIN:
1848                 port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
1849                         (job->request->rqst_data.h_els.port_id[1] << 8) |
1850                         job->request->rqst_data.h_els.port_id[2];
1851         case FC_BSG_RPT_ELS:
1852                 fc_flags = IBMVFC_FC_ELS;
1853                 break;
1854         case FC_BSG_HST_CT:
1855                 issue_login = 1;
1856                 port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
1857                         (job->request->rqst_data.h_ct.port_id[1] << 8) |
1858                         job->request->rqst_data.h_ct.port_id[2];
1859         case FC_BSG_RPT_CT:
1860                 fc_flags = IBMVFC_FC_CT_IU;
1861                 break;
1862         default:
1863                 return -ENOTSUPP;
1864         };
1865
1866         if (port_id == -1)
1867                 return -EINVAL;
1868         if (!mutex_trylock(&vhost->passthru_mutex))
1869                 return -EBUSY;
1870
1871         job->dd_data = (void *)port_id;
1872         req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
1873                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
1874
1875         if (!req_seg) {
1876                 mutex_unlock(&vhost->passthru_mutex);
1877                 return -ENOMEM;
1878         }
1879
1880         rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
1881                              job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1882
1883         if (!rsp_seg) {
1884                 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1885                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
1886                 mutex_unlock(&vhost->passthru_mutex);
1887                 return -ENOMEM;
1888         }
1889
1890         if (req_seg > 1 || rsp_seg > 1) {
1891                 rc = -EINVAL;
1892                 goto out;
1893         }
1894
1895         if (issue_login)
1896                 rc = ibmvfc_bsg_plogi(vhost, port_id);
1897
1898         spin_lock_irqsave(vhost->host->host_lock, flags);
1899
1900         if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
1901             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1902                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1903                 goto out;
1904         }
1905
1906         evt = ibmvfc_get_event(vhost);
1907         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1908         mad = &evt->iu.passthru;
1909
1910         memset(mad, 0, sizeof(*mad));
1911         mad->common.version = 1;
1912         mad->common.opcode = IBMVFC_PASSTHRU;
1913         mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
1914
1915         mad->cmd_ioba.va = (u64)evt->crq.ioba +
1916                 offsetof(struct ibmvfc_passthru_mad, iu);
1917         mad->cmd_ioba.len = sizeof(mad->iu);
1918
1919         mad->iu.cmd_len = job->request_payload.payload_len;
1920         mad->iu.rsp_len = job->reply_payload.payload_len;
1921         mad->iu.flags = fc_flags;
1922         mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
1923
1924         mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list);
1925         mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list);
1926         mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list);
1927         mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list);
1928         mad->iu.scsi_id = port_id;
1929         mad->iu.tag = (u64)evt;
1930         rsp_len = mad->iu.rsp.len;
1931
1932         evt->sync_iu = &rsp_iu;
1933         init_completion(&evt->comp);
1934         rc = ibmvfc_send_event(evt, vhost, 0);
1935         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1936
1937         if (rc) {
1938                 rc = -EIO;
1939                 goto out;
1940         }
1941
1942         wait_for_completion(&evt->comp);
1943
1944         if (rsp_iu.passthru.common.status)
1945                 rc = -EIO;
1946         else
1947                 job->reply->reply_payload_rcv_len = rsp_len;
1948
1949         spin_lock_irqsave(vhost->host->host_lock, flags);
1950         ibmvfc_free_event(evt);
1951         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1952         job->reply->result = rc;
1953         job->job_done(job);
1954         rc = 0;
1955 out:
1956         dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1957                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
1958         dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
1959                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1960         mutex_unlock(&vhost->passthru_mutex);
1961         LEAVE;
1962         return rc;
1963 }
1964
1965 /**
1966  * ibmvfc_reset_device - Reset the device with the specified reset type
1967  * @sdev:       scsi device to reset
1968  * @type:       reset type
1969  * @desc:       reset type description for log messages
1970  *
1971  * Returns:
1972  *      0 on success / other on failure
1973  **/
1974 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1975 {
1976         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1977         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1978         struct ibmvfc_cmd *tmf;
1979         struct ibmvfc_event *evt = NULL;
1980         union ibmvfc_iu rsp_iu;
1981         struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1982         int rsp_rc = -EBUSY;
1983         unsigned long flags;
1984         int rsp_code = 0;
1985
1986         spin_lock_irqsave(vhost->host->host_lock, flags);
1987         if (vhost->state == IBMVFC_ACTIVE) {
1988                 evt = ibmvfc_get_event(vhost);
1989                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1990
1991                 tmf = &evt->iu.cmd;
1992                 memset(tmf, 0, sizeof(*tmf));
1993                 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1994                 tmf->resp.len = sizeof(tmf->rsp);
1995                 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1996                 tmf->payload_len = sizeof(tmf->iu);
1997                 tmf->resp_len = sizeof(tmf->rsp);
1998                 tmf->cancel_key = (unsigned long)sdev->hostdata;
1999                 tmf->tgt_scsi_id = rport->port_id;
2000                 int_to_scsilun(sdev->lun, &tmf->iu.lun);
2001                 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
2002                 tmf->iu.tmf_flags = type;
2003                 evt->sync_iu = &rsp_iu;
2004
2005                 init_completion(&evt->comp);
2006                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2007         }
2008         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2009
2010         if (rsp_rc != 0) {
2011                 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2012                             desc, rsp_rc);
2013                 return -EIO;
2014         }
2015
2016         sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2017         wait_for_completion(&evt->comp);
2018
2019         if (rsp_iu.cmd.status)
2020                 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2021
2022         if (rsp_code) {
2023                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2024                         rsp_code = fc_rsp->data.info.rsp_code;
2025
2026                 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2027                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2028                             desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
2029                             rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
2030                             fc_rsp->scsi_status);
2031                 rsp_rc = -EIO;
2032         } else
2033                 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2034
2035         spin_lock_irqsave(vhost->host->host_lock, flags);
2036         ibmvfc_free_event(evt);
2037         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2038         return rsp_rc;
2039 }
2040
2041 /**
2042  * ibmvfc_match_rport - Match function for specified remote port
2043  * @evt:        ibmvfc event struct
2044  * @device:     device to match (rport)
2045  *
2046  * Returns:
2047  *      1 if event matches rport / 0 if event does not match rport
2048  **/
2049 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2050 {
2051         struct fc_rport *cmd_rport;
2052
2053         if (evt->cmnd) {
2054                 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2055                 if (cmd_rport == rport)
2056                         return 1;
2057         }
2058         return 0;
2059 }
2060
2061 /**
2062  * ibmvfc_match_target - Match function for specified target
2063  * @evt:        ibmvfc event struct
2064  * @device:     device to match (starget)
2065  *
2066  * Returns:
2067  *      1 if event matches starget / 0 if event does not match starget
2068  **/
2069 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2070 {
2071         if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2072                 return 1;
2073         return 0;
2074 }
2075
2076 /**
2077  * ibmvfc_match_lun - Match function for specified LUN
2078  * @evt:        ibmvfc event struct
2079  * @device:     device to match (sdev)
2080  *
2081  * Returns:
2082  *      1 if event matches sdev / 0 if event does not match sdev
2083  **/
2084 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2085 {
2086         if (evt->cmnd && evt->cmnd->device == device)
2087                 return 1;
2088         return 0;
2089 }
2090
2091 /**
2092  * ibmvfc_wait_for_ops - Wait for ops to complete
2093  * @vhost:      ibmvfc host struct
2094  * @device:     device to match (starget or sdev)
2095  * @match:      match function
2096  *
2097  * Returns:
2098  *      SUCCESS / FAILED
2099  **/
2100 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2101                                int (*match) (struct ibmvfc_event *, void *))
2102 {
2103         struct ibmvfc_event *evt;
2104         DECLARE_COMPLETION_ONSTACK(comp);
2105         int wait;
2106         unsigned long flags;
2107         signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2108
2109         ENTER;
2110         do {
2111                 wait = 0;
2112                 spin_lock_irqsave(vhost->host->host_lock, flags);
2113                 list_for_each_entry(evt, &vhost->sent, queue) {
2114                         if (match(evt, device)) {
2115                                 evt->eh_comp = &comp;
2116                                 wait++;
2117                         }
2118                 }
2119                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2120
2121                 if (wait) {
2122                         timeout = wait_for_completion_timeout(&comp, timeout);
2123
2124                         if (!timeout) {
2125                                 wait = 0;
2126                                 spin_lock_irqsave(vhost->host->host_lock, flags);
2127                                 list_for_each_entry(evt, &vhost->sent, queue) {
2128                                         if (match(evt, device)) {
2129                                                 evt->eh_comp = NULL;
2130                                                 wait++;
2131                                         }
2132                                 }
2133                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2134                                 if (wait)
2135                                         dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2136                                 LEAVE;
2137                                 return wait ? FAILED : SUCCESS;
2138                         }
2139                 }
2140         } while (wait);
2141
2142         LEAVE;
2143         return SUCCESS;
2144 }
2145
2146 /**
2147  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2148  * @sdev:       scsi device to cancel commands
2149  * @type:       type of error recovery being performed
2150  *
2151  * This sends a cancel to the VIOS for the specified device. This does
2152  * NOT send any abort to the actual device. That must be done separately.
2153  *
2154  * Returns:
2155  *      0 on success / other on failure
2156  **/
2157 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2158 {
2159         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2160         struct scsi_target *starget = scsi_target(sdev);
2161         struct fc_rport *rport = starget_to_rport(starget);
2162         struct ibmvfc_tmf *tmf;
2163         struct ibmvfc_event *evt, *found_evt;
2164         union ibmvfc_iu rsp;
2165         int rsp_rc = -EBUSY;
2166         unsigned long flags;
2167         u16 status;
2168
2169         ENTER;
2170         spin_lock_irqsave(vhost->host->host_lock, flags);
2171         found_evt = NULL;
2172         list_for_each_entry(evt, &vhost->sent, queue) {
2173                 if (evt->cmnd && evt->cmnd->device == sdev) {
2174                         found_evt = evt;
2175                         break;
2176                 }
2177         }
2178
2179         if (!found_evt) {
2180                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2181                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2182                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2183                 return 0;
2184         }
2185
2186         if (vhost->state == IBMVFC_ACTIVE) {
2187                 evt = ibmvfc_get_event(vhost);
2188                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2189
2190                 tmf = &evt->iu.tmf;
2191                 memset(tmf, 0, sizeof(*tmf));
2192                 tmf->common.version = 1;
2193                 tmf->common.opcode = IBMVFC_TMF_MAD;
2194                 tmf->common.length = sizeof(*tmf);
2195                 tmf->scsi_id = rport->port_id;
2196                 int_to_scsilun(sdev->lun, &tmf->lun);
2197                 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
2198                 tmf->cancel_key = (unsigned long)sdev->hostdata;
2199                 tmf->my_cancel_key = (unsigned long)starget->hostdata;
2200
2201                 evt->sync_iu = &rsp;
2202                 init_completion(&evt->comp);
2203                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2204         }
2205
2206         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2207
2208         if (rsp_rc != 0) {
2209                 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2210                 return -EIO;
2211         }
2212
2213         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2214
2215         wait_for_completion(&evt->comp);
2216         status = rsp.mad_common.status;
2217         spin_lock_irqsave(vhost->host->host_lock, flags);
2218         ibmvfc_free_event(evt);
2219         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2220
2221         if (status != IBMVFC_MAD_SUCCESS) {
2222                 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2223                 return -EIO;
2224         }
2225
2226         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2227         return 0;
2228 }
2229
2230 /**
2231  * ibmvfc_match_key - Match function for specified cancel key
2232  * @evt:        ibmvfc event struct
2233  * @key:        cancel key to match
2234  *
2235  * Returns:
2236  *      1 if event matches key / 0 if event does not match key
2237  **/
2238 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2239 {
2240         unsigned long cancel_key = (unsigned long)key;
2241
2242         if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2243             evt->iu.cmd.cancel_key == cancel_key)
2244                 return 1;
2245         return 0;
2246 }
2247
2248 /**
2249  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2250  * @sdev:       scsi device to abort commands
2251  *
2252  * This sends an Abort Task Set to the VIOS for the specified device. This does
2253  * NOT send any cancel to the VIOS. That must be done separately.
2254  *
2255  * Returns:
2256  *      0 on success / other on failure
2257  **/
2258 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2259 {
2260         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2261         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2262         struct ibmvfc_cmd *tmf;
2263         struct ibmvfc_event *evt, *found_evt;
2264         union ibmvfc_iu rsp_iu;
2265         struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
2266         int rc, rsp_rc = -EBUSY;
2267         unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2268         int rsp_code = 0;
2269
2270         spin_lock_irqsave(vhost->host->host_lock, flags);
2271         found_evt = NULL;
2272         list_for_each_entry(evt, &vhost->sent, queue) {
2273                 if (evt->cmnd && evt->cmnd->device == sdev) {
2274                         found_evt = evt;
2275                         break;
2276                 }
2277         }
2278
2279         if (!found_evt) {
2280                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2281                         sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2282                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2283                 return 0;
2284         }
2285
2286         if (vhost->state == IBMVFC_ACTIVE) {
2287                 evt = ibmvfc_get_event(vhost);
2288                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2289
2290                 tmf = &evt->iu.cmd;
2291                 memset(tmf, 0, sizeof(*tmf));
2292                 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
2293                 tmf->resp.len = sizeof(tmf->rsp);
2294                 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
2295                 tmf->payload_len = sizeof(tmf->iu);
2296                 tmf->resp_len = sizeof(tmf->rsp);
2297                 tmf->cancel_key = (unsigned long)sdev->hostdata;
2298                 tmf->tgt_scsi_id = rport->port_id;
2299                 int_to_scsilun(sdev->lun, &tmf->iu.lun);
2300                 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
2301                 tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
2302                 evt->sync_iu = &rsp_iu;
2303
2304                 init_completion(&evt->comp);
2305                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2306         }
2307
2308         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2309
2310         if (rsp_rc != 0) {
2311                 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2312                 return -EIO;
2313         }
2314
2315         sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2316         timeout = wait_for_completion_timeout(&evt->comp, timeout);
2317
2318         if (!timeout) {
2319                 rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2320                 if (!rc) {
2321                         rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2322                         if (rc == SUCCESS)
2323                                 rc = 0;
2324                 }
2325
2326                 if (rc) {
2327                         sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2328                         ibmvfc_reset_host(vhost);
2329                         rsp_rc = 0;
2330                         goto out;
2331                 }
2332         }
2333
2334         if (rsp_iu.cmd.status)
2335                 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2336
2337         if (rsp_code) {
2338                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2339                         rsp_code = fc_rsp->data.info.rsp_code;
2340
2341                 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2342                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2343                             ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
2344                             rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
2345                             fc_rsp->scsi_status);
2346                 rsp_rc = -EIO;
2347         } else
2348                 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2349
2350 out:
2351         spin_lock_irqsave(vhost->host->host_lock, flags);
2352         ibmvfc_free_event(evt);
2353         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2354         return rsp_rc;
2355 }
2356
2357 /**
2358  * ibmvfc_eh_abort_handler - Abort a command
2359  * @cmd:        scsi command to abort
2360  *
2361  * Returns:
2362  *      SUCCESS / FAILED
2363  **/
2364 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2365 {
2366         struct scsi_device *sdev = cmd->device;
2367         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2368         int cancel_rc, abort_rc;
2369         int rc = FAILED;
2370
2371         ENTER;
2372         fc_block_scsi_eh(cmd);
2373         ibmvfc_wait_while_resetting(vhost);
2374         cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2375         abort_rc = ibmvfc_abort_task_set(sdev);
2376
2377         if (!cancel_rc && !abort_rc)
2378                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2379
2380         LEAVE;
2381         return rc;
2382 }
2383
2384 /**
2385  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2386  * @cmd:        scsi command struct
2387  *
2388  * Returns:
2389  *      SUCCESS / FAILED
2390  **/
2391 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2392 {
2393         struct scsi_device *sdev = cmd->device;
2394         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2395         int cancel_rc, reset_rc;
2396         int rc = FAILED;
2397
2398         ENTER;
2399         fc_block_scsi_eh(cmd);
2400         ibmvfc_wait_while_resetting(vhost);
2401         cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2402         reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2403
2404         if (!cancel_rc && !reset_rc)
2405                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2406
2407         LEAVE;
2408         return rc;
2409 }
2410
2411 /**
2412  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2413  * @sdev:       scsi device struct
2414  * @data:       return code
2415  *
2416  **/
2417 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2418 {
2419         unsigned long *rc = data;
2420         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2421 }
2422
2423 /**
2424  * ibmvfc_eh_target_reset_handler - Reset the target
2425  * @cmd:        scsi command struct
2426  *
2427  * Returns:
2428  *      SUCCESS / FAILED
2429  **/
2430 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2431 {
2432         struct scsi_device *sdev = cmd->device;
2433         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2434         struct scsi_target *starget = scsi_target(sdev);
2435         int reset_rc;
2436         int rc = FAILED;
2437         unsigned long cancel_rc = 0;
2438
2439         ENTER;
2440         fc_block_scsi_eh(cmd);
2441         ibmvfc_wait_while_resetting(vhost);
2442         starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2443         reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2444
2445         if (!cancel_rc && !reset_rc)
2446                 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2447
2448         LEAVE;
2449         return rc;
2450 }
2451
2452 /**
2453  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2454  * @cmd:        struct scsi_cmnd having problems
2455  *
2456  **/
2457 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2458 {
2459         int rc;
2460         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2461
2462         fc_block_scsi_eh(cmd);
2463         dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2464         rc = ibmvfc_issue_fc_host_lip(vhost->host);
2465         return rc ? FAILED : SUCCESS;
2466 }
2467
2468 /**
2469  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2470  * @rport:              rport struct
2471  *
2472  * Return value:
2473  *      none
2474  **/
2475 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2476 {
2477         struct Scsi_Host *shost = rport_to_shost(rport);
2478         struct ibmvfc_host *vhost = shost_priv(shost);
2479         struct fc_rport *dev_rport;
2480         struct scsi_device *sdev;
2481         unsigned long rc;
2482
2483         ENTER;
2484         shost_for_each_device(sdev, shost) {
2485                 dev_rport = starget_to_rport(scsi_target(sdev));
2486                 if (dev_rport != rport)
2487                         continue;
2488                 ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2489                 ibmvfc_abort_task_set(sdev);
2490         }
2491
2492         rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
2493
2494         if (rc == FAILED)
2495                 ibmvfc_issue_fc_host_lip(shost);
2496         LEAVE;
2497 }
2498
2499 static const struct {
2500         enum ibmvfc_async_event ae;
2501         const char *desc;
2502 } ae_desc [] = {
2503         { IBMVFC_AE_ELS_PLOGI,          "PLOGI" },
2504         { IBMVFC_AE_ELS_LOGO,           "LOGO" },
2505         { IBMVFC_AE_ELS_PRLO,           "PRLO" },
2506         { IBMVFC_AE_SCN_NPORT,          "N-Port SCN" },
2507         { IBMVFC_AE_SCN_GROUP,          "Group SCN" },
2508         { IBMVFC_AE_SCN_DOMAIN,         "Domain SCN" },
2509         { IBMVFC_AE_SCN_FABRIC,         "Fabric SCN" },
2510         { IBMVFC_AE_LINK_UP,            "Link Up" },
2511         { IBMVFC_AE_LINK_DOWN,          "Link Down" },
2512         { IBMVFC_AE_LINK_DEAD,          "Link Dead" },
2513         { IBMVFC_AE_HALT,                       "Halt" },
2514         { IBMVFC_AE_RESUME,             "Resume" },
2515         { IBMVFC_AE_ADAPTER_FAILED,     "Adapter Failed" },
2516 };
2517
2518 static const char *unknown_ae = "Unknown async";
2519
2520 /**
2521  * ibmvfc_get_ae_desc - Get text description for async event
2522  * @ae: async event
2523  *
2524  **/
2525 static const char *ibmvfc_get_ae_desc(u64 ae)
2526 {
2527         int i;
2528
2529         for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2530                 if (ae_desc[i].ae == ae)
2531                         return ae_desc[i].desc;
2532
2533         return unknown_ae;
2534 }
2535
2536 /**
2537  * ibmvfc_handle_async - Handle an async event from the adapter
2538  * @crq:        crq to process
2539  * @vhost:      ibmvfc host struct
2540  *
2541  **/
2542 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2543                                 struct ibmvfc_host *vhost)
2544 {
2545         const char *desc = ibmvfc_get_ae_desc(crq->event);
2546         struct ibmvfc_target *tgt;
2547
2548         ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
2549                    " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2550
2551         switch (crq->event) {
2552         case IBMVFC_AE_RESUME:
2553                 switch (crq->link_state) {
2554                 case IBMVFC_AE_LS_LINK_DOWN:
2555                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2556                         break;
2557                 case IBMVFC_AE_LS_LINK_DEAD:
2558                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2559                         break;
2560                 case IBMVFC_AE_LS_LINK_UP:
2561                 case IBMVFC_AE_LS_LINK_BOUNCED:
2562                 default:
2563                         vhost->events_to_log |= IBMVFC_AE_LINKUP;
2564                         vhost->delay_init = 1;
2565                         __ibmvfc_reset_host(vhost);
2566                         break;
2567                 };
2568
2569                 break;
2570         case IBMVFC_AE_LINK_UP:
2571                 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2572                 vhost->delay_init = 1;
2573                 __ibmvfc_reset_host(vhost);
2574                 break;
2575         case IBMVFC_AE_SCN_FABRIC:
2576         case IBMVFC_AE_SCN_DOMAIN:
2577                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2578                 vhost->delay_init = 1;
2579                 __ibmvfc_reset_host(vhost);
2580                 break;
2581         case IBMVFC_AE_SCN_NPORT:
2582         case IBMVFC_AE_SCN_GROUP:
2583                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2584                 ibmvfc_reinit_host(vhost);
2585                 break;
2586         case IBMVFC_AE_ELS_LOGO:
2587         case IBMVFC_AE_ELS_PRLO:
2588         case IBMVFC_AE_ELS_PLOGI:
2589                 list_for_each_entry(tgt, &vhost->targets, queue) {
2590                         if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2591                                 break;
2592                         if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
2593                                 continue;
2594                         if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
2595                                 continue;
2596                         if (crq->node_name && tgt->ids.node_name != crq->node_name)
2597                                 continue;
2598                         if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO)
2599                                 tgt->logo_rcvd = 1;
2600                         if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) {
2601                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2602                                 ibmvfc_reinit_host(vhost);
2603                         }
2604                 }
2605                 break;
2606         case IBMVFC_AE_LINK_DOWN:
2607         case IBMVFC_AE_ADAPTER_FAILED:
2608                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2609                 break;
2610         case IBMVFC_AE_LINK_DEAD:
2611                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2612                 break;
2613         case IBMVFC_AE_HALT:
2614                 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2615                 break;
2616         default:
2617                 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
2618                 break;
2619         };
2620 }
2621
2622 /**
2623  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2624  * @crq:        Command/Response queue
2625  * @vhost:      ibmvfc host struct
2626  *
2627  **/
2628 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2629 {
2630         long rc;
2631         struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
2632
2633         switch (crq->valid) {
2634         case IBMVFC_CRQ_INIT_RSP:
2635                 switch (crq->format) {
2636                 case IBMVFC_CRQ_INIT:
2637                         dev_info(vhost->dev, "Partner initialized\n");
2638                         /* Send back a response */
2639                         rc = ibmvfc_send_crq_init_complete(vhost);
2640                         if (rc == 0)
2641                                 ibmvfc_init_host(vhost);
2642                         else
2643                                 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2644                         break;
2645                 case IBMVFC_CRQ_INIT_COMPLETE:
2646                         dev_info(vhost->dev, "Partner initialization complete\n");
2647                         ibmvfc_init_host(vhost);
2648                         break;
2649                 default:
2650                         dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2651                 }
2652                 return;
2653         case IBMVFC_CRQ_XPORT_EVENT:
2654                 vhost->state = IBMVFC_NO_CRQ;
2655                 vhost->logged_in = 0;
2656                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2657                 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2658                         /* We need to re-setup the interpartition connection */
2659                         dev_info(vhost->dev, "Re-enabling adapter\n");
2660                         vhost->client_migrated = 1;
2661                         ibmvfc_purge_requests(vhost, DID_REQUEUE);
2662                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2663                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2664                 } else {
2665                         dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
2666                         ibmvfc_purge_requests(vhost, DID_ERROR);
2667                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2668                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2669                 }
2670                 return;
2671         case IBMVFC_CRQ_CMD_RSP:
2672                 break;
2673         default:
2674                 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2675                 return;
2676         }
2677
2678         if (crq->format == IBMVFC_ASYNC_EVENT)
2679                 return;
2680
2681         /* The only kind of payload CRQs we should get are responses to
2682          * things we send. Make sure this response is to something we
2683          * actually sent
2684          */
2685         if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2686                 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
2687                         crq->ioba);
2688                 return;
2689         }
2690
2691         if (unlikely(atomic_read(&evt->free))) {
2692                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
2693                         crq->ioba);
2694                 return;
2695         }
2696
2697         del_timer(&evt->timer);
2698         list_del(&evt->queue);
2699         ibmvfc_trc_end(evt);
2700         evt->done(evt);
2701 }
2702
2703 /**
2704  * ibmvfc_scan_finished - Check if the device scan is done.
2705  * @shost:      scsi host struct
2706  * @time:       current elapsed time
2707  *
2708  * Returns:
2709  *      0 if scan is not done / 1 if scan is done
2710  **/
2711 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2712 {
2713         unsigned long flags;
2714         struct ibmvfc_host *vhost = shost_priv(shost);
2715         int done = 0;
2716
2717         spin_lock_irqsave(shost->host_lock, flags);
2718         if (time >= (init_timeout * HZ)) {
2719                 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2720                          "continuing initialization\n", init_timeout);
2721                 done = 1;
2722         }
2723
2724         if (vhost->scan_complete)
2725                 done = 1;
2726         spin_unlock_irqrestore(shost->host_lock, flags);
2727         return done;
2728 }
2729
2730 /**
2731  * ibmvfc_slave_alloc - Setup the device's task set value
2732  * @sdev:       struct scsi_device device to configure
2733  *
2734  * Set the device's task set value so that error handling works as
2735  * expected.
2736  *
2737  * Returns:
2738  *      0 on success / -ENXIO if device does not exist
2739  **/
2740 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2741 {
2742         struct Scsi_Host *shost = sdev->host;
2743         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2744         struct ibmvfc_host *vhost = shost_priv(shost);
2745         unsigned long flags = 0;
2746
2747         if (!rport || fc_remote_port_chkready(rport))
2748                 return -ENXIO;
2749
2750         spin_lock_irqsave(shost->host_lock, flags);
2751         sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2752         spin_unlock_irqrestore(shost->host_lock, flags);
2753         return 0;
2754 }
2755
2756 /**
2757  * ibmvfc_target_alloc - Setup the target's task set value
2758  * @starget:    struct scsi_target
2759  *
2760  * Set the target's task set value so that error handling works as
2761  * expected.
2762  *
2763  * Returns:
2764  *      0 on success / -ENXIO if device does not exist
2765  **/
2766 static int ibmvfc_target_alloc(struct scsi_target *starget)
2767 {
2768         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2769         struct ibmvfc_host *vhost = shost_priv(shost);
2770         unsigned long flags = 0;
2771
2772         spin_lock_irqsave(shost->host_lock, flags);
2773         starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2774         spin_unlock_irqrestore(shost->host_lock, flags);
2775         return 0;
2776 }
2777
2778 /**
2779  * ibmvfc_slave_configure - Configure the device
2780  * @sdev:       struct scsi_device device to configure
2781  *
2782  * Enable allow_restart for a device if it is a disk. Adjust the
2783  * queue_depth here also.
2784  *
2785  * Returns:
2786  *      0
2787  **/
2788 static int ibmvfc_slave_configure(struct scsi_device *sdev)
2789 {
2790         struct Scsi_Host *shost = sdev->host;
2791         struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2792         unsigned long flags = 0;
2793
2794         spin_lock_irqsave(shost->host_lock, flags);
2795         if (sdev->type == TYPE_DISK)
2796                 sdev->allow_restart = 1;
2797
2798         if (sdev->tagged_supported) {
2799                 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2800                 scsi_activate_tcq(sdev, sdev->queue_depth);
2801         } else
2802                 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2803
2804         rport->dev_loss_tmo = dev_loss_tmo;
2805         spin_unlock_irqrestore(shost->host_lock, flags);
2806         return 0;
2807 }
2808
2809 /**
2810  * ibmvfc_change_queue_depth - Change the device's queue depth
2811  * @sdev:       scsi device struct
2812  * @qdepth:     depth to set
2813  * @reason:     calling context
2814  *
2815  * Return value:
2816  *      actual depth set
2817  **/
2818 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth,
2819                                      int reason)
2820 {
2821         if (reason != SCSI_QDEPTH_DEFAULT)
2822                 return -EOPNOTSUPP;
2823
2824         if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2825                 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2826
2827         scsi_adjust_queue_depth(sdev, 0, qdepth);
2828         return sdev->queue_depth;
2829 }
2830
2831 /**
2832  * ibmvfc_change_queue_type - Change the device's queue type
2833  * @sdev:               scsi device struct
2834  * @tag_type:   type of tags to use
2835  *
2836  * Return value:
2837  *      actual queue type set
2838  **/
2839 static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
2840 {
2841         if (sdev->tagged_supported) {
2842                 scsi_set_tag_type(sdev, tag_type);
2843
2844                 if (tag_type)
2845                         scsi_activate_tcq(sdev, sdev->queue_depth);
2846                 else
2847                         scsi_deactivate_tcq(sdev, sdev->queue_depth);
2848         } else
2849                 tag_type = 0;
2850
2851         return tag_type;
2852 }
2853
2854 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2855                                                  struct device_attribute *attr, char *buf)
2856 {
2857         struct Scsi_Host *shost = class_to_shost(dev);
2858         struct ibmvfc_host *vhost = shost_priv(shost);
2859
2860         return snprintf(buf, PAGE_SIZE, "%s\n",
2861                         vhost->login_buf->resp.partition_name);
2862 }
2863
2864 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2865                                             struct device_attribute *attr, char *buf)
2866 {
2867         struct Scsi_Host *shost = class_to_shost(dev);
2868         struct ibmvfc_host *vhost = shost_priv(shost);
2869
2870         return snprintf(buf, PAGE_SIZE, "%s\n",
2871                         vhost->login_buf->resp.device_name);
2872 }
2873
2874 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2875                                          struct device_attribute *attr, char *buf)
2876 {
2877         struct Scsi_Host *shost = class_to_shost(dev);
2878         struct ibmvfc_host *vhost = shost_priv(shost);
2879
2880         return snprintf(buf, PAGE_SIZE, "%s\n",
2881                         vhost->login_buf->resp.port_loc_code);
2882 }
2883
2884 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2885                                          struct device_attribute *attr, char *buf)
2886 {
2887         struct Scsi_Host *shost = class_to_shost(dev);
2888         struct ibmvfc_host *vhost = shost_priv(shost);
2889
2890         return snprintf(buf, PAGE_SIZE, "%s\n",
2891                         vhost->login_buf->resp.drc_name);
2892 }
2893
2894 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2895                                              struct device_attribute *attr, char *buf)
2896 {
2897         struct Scsi_Host *shost = class_to_shost(dev);
2898         struct ibmvfc_host *vhost = shost_priv(shost);
2899         return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2900 }
2901
2902 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
2903                                              struct device_attribute *attr, char *buf)
2904 {
2905         struct Scsi_Host *shost = class_to_shost(dev);
2906         struct ibmvfc_host *vhost = shost_priv(shost);
2907         return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
2908 }
2909
2910 /**
2911  * ibmvfc_show_log_level - Show the adapter's error logging level
2912  * @dev:        class device struct
2913  * @buf:        buffer
2914  *
2915  * Return value:
2916  *      number of bytes printed to buffer
2917  **/
2918 static ssize_t ibmvfc_show_log_level(struct device *dev,
2919                                      struct device_attribute *attr, char *buf)
2920 {
2921         struct Scsi_Host *shost = class_to_shost(dev);
2922         struct ibmvfc_host *vhost = shost_priv(shost);
2923         unsigned long flags = 0;
2924         int len;
2925
2926         spin_lock_irqsave(shost->host_lock, flags);
2927         len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
2928         spin_unlock_irqrestore(shost->host_lock, flags);
2929         return len;
2930 }
2931
2932 /**
2933  * ibmvfc_store_log_level - Change the adapter's error logging level
2934  * @dev:        class device struct
2935  * @buf:        buffer
2936  *
2937  * Return value:
2938  *      number of bytes printed to buffer
2939  **/
2940 static ssize_t ibmvfc_store_log_level(struct device *dev,
2941                                       struct device_attribute *attr,
2942                                       const char *buf, size_t count)
2943 {
2944         struct Scsi_Host *shost = class_to_shost(dev);
2945         struct ibmvfc_host *vhost = shost_priv(shost);
2946         unsigned long flags = 0;
2947
2948         spin_lock_irqsave(shost->host_lock, flags);
2949         vhost->log_level = simple_strtoul(buf, NULL, 10);
2950         spin_unlock_irqrestore(shost->host_lock, flags);
2951         return strlen(buf);
2952 }
2953
2954 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
2955 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
2956 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
2957 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
2958 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
2959 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
2960 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
2961                    ibmvfc_show_log_level, ibmvfc_store_log_level);
2962
2963 #ifdef CONFIG_SCSI_IBMVFC_TRACE
2964 /**
2965  * ibmvfc_read_trace - Dump the adapter trace
2966  * @filp:               open sysfs file
2967  * @kobj:               kobject struct
2968  * @bin_attr:   bin_attribute struct
2969  * @buf:                buffer
2970  * @off:                offset
2971  * @count:              buffer size
2972  *
2973  * Return value:
2974  *      number of bytes printed to buffer
2975  **/
2976 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
2977                                  struct bin_attribute *bin_attr,
2978                                  char *buf, loff_t off, size_t count)
2979 {
2980         struct device *dev = container_of(kobj, struct device, kobj);
2981         struct Scsi_Host *shost = class_to_shost(dev);
2982         struct ibmvfc_host *vhost = shost_priv(shost);
2983         unsigned long flags = 0;
2984         int size = IBMVFC_TRACE_SIZE;
2985         char *src = (char *)vhost->trace;
2986
2987         if (off > size)
2988                 return 0;
2989         if (off + count > size) {
2990                 size -= off;
2991                 count = size;
2992         }
2993
2994         spin_lock_irqsave(shost->host_lock, flags);
2995         memcpy(buf, &src[off], count);
2996         spin_unlock_irqrestore(shost->host_lock, flags);
2997         return count;
2998 }
2999
3000 static struct bin_attribute ibmvfc_trace_attr = {
3001         .attr = {
3002                 .name = "trace",
3003                 .mode = S_IRUGO,
3004         },
3005         .size = 0,
3006         .read = ibmvfc_read_trace,
3007 };
3008 #endif
3009
3010 static struct device_attribute *ibmvfc_attrs[] = {
3011         &dev_attr_partition_name,
3012         &dev_attr_device_name,
3013         &dev_attr_port_loc_code,
3014         &dev_attr_drc_name,
3015         &dev_attr_npiv_version,
3016         &dev_attr_capabilities,
3017         &dev_attr_log_level,
3018         NULL
3019 };
3020
3021 static struct scsi_host_template driver_template = {
3022         .module = THIS_MODULE,
3023         .name = "IBM POWER Virtual FC Adapter",
3024         .proc_name = IBMVFC_NAME,
3025         .queuecommand = ibmvfc_queuecommand,
3026         .eh_abort_handler = ibmvfc_eh_abort_handler,
3027         .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3028         .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3029         .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3030         .slave_alloc = ibmvfc_slave_alloc,
3031         .slave_configure = ibmvfc_slave_configure,
3032         .target_alloc = ibmvfc_target_alloc,
3033         .scan_finished = ibmvfc_scan_finished,
3034         .change_queue_depth = ibmvfc_change_queue_depth,
3035         .change_queue_type = ibmvfc_change_queue_type,
3036         .cmd_per_lun = 16,
3037         .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3038         .this_id = -1,
3039         .sg_tablesize = SG_ALL,
3040         .max_sectors = IBMVFC_MAX_SECTORS,
3041         .use_clustering = ENABLE_CLUSTERING,
3042         .shost_attrs = ibmvfc_attrs,
3043 };
3044
3045 /**
3046  * ibmvfc_next_async_crq - Returns the next entry in async queue
3047  * @vhost:      ibmvfc host struct
3048  *
3049  * Returns:
3050  *      Pointer to next entry in queue / NULL if empty
3051  **/
3052 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3053 {
3054         struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
3055         struct ibmvfc_async_crq *crq;
3056
3057         crq = &async_crq->msgs[async_crq->cur];
3058         if (crq->valid & 0x80) {
3059                 if (++async_crq->cur == async_crq->size)
3060                         async_crq->cur = 0;
3061                 rmb();
3062         } else
3063                 crq = NULL;
3064
3065         return crq;
3066 }
3067
3068 /**
3069  * ibmvfc_next_crq - Returns the next entry in message queue
3070  * @vhost:      ibmvfc host struct
3071  *
3072  * Returns:
3073  *      Pointer to next entry in queue / NULL if empty
3074  **/
3075 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3076 {
3077         struct ibmvfc_crq_queue *queue = &vhost->crq;
3078         struct ibmvfc_crq *crq;
3079
3080         crq = &queue->msgs[queue->cur];
3081         if (crq->valid & 0x80) {
3082                 if (++queue->cur == queue->size)
3083                         queue->cur = 0;
3084                 rmb();
3085         } else
3086                 crq = NULL;
3087
3088         return crq;
3089 }
3090
3091 /**
3092  * ibmvfc_interrupt - Interrupt handler
3093  * @irq:                number of irq to handle, not used
3094  * @dev_instance: ibmvfc_host that received interrupt
3095  *
3096  * Returns:
3097  *      IRQ_HANDLED
3098  **/
3099 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3100 {
3101         struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3102         unsigned long flags;
3103
3104         spin_lock_irqsave(vhost->host->host_lock, flags);
3105         vio_disable_interrupts(to_vio_dev(vhost->dev));
3106         tasklet_schedule(&vhost->tasklet);
3107         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3108         return IRQ_HANDLED;
3109 }
3110
3111 /**
3112  * ibmvfc_tasklet - Interrupt handler tasklet
3113  * @data:               ibmvfc host struct
3114  *
3115  * Returns:
3116  *      Nothing
3117  **/
3118 static void ibmvfc_tasklet(void *data)
3119 {
3120         struct ibmvfc_host *vhost = data;
3121         struct vio_dev *vdev = to_vio_dev(vhost->dev);
3122         struct ibmvfc_crq *crq;
3123         struct ibmvfc_async_crq *async;
3124         unsigned long flags;
3125         int done = 0;
3126
3127         spin_lock_irqsave(vhost->host->host_lock, flags);
3128         while (!done) {
3129                 /* Pull all the valid messages off the async CRQ */
3130                 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3131                         ibmvfc_handle_async(async, vhost);
3132                         async->valid = 0;
3133                         wmb();
3134                 }
3135
3136                 /* Pull all the valid messages off the CRQ */
3137                 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3138                         ibmvfc_handle_crq(crq, vhost);
3139                         crq->valid = 0;
3140                         wmb();
3141                 }
3142
3143                 vio_enable_interrupts(vdev);
3144                 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3145                         vio_disable_interrupts(vdev);
3146                         ibmvfc_handle_async(async, vhost);
3147                         async->valid = 0;
3148                         wmb();
3149                 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3150                         vio_disable_interrupts(vdev);
3151                         ibmvfc_handle_crq(crq, vhost);
3152                         crq->valid = 0;
3153                         wmb();
3154                 } else
3155                         done = 1;
3156         }
3157
3158         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3159 }
3160
3161 /**
3162  * ibmvfc_init_tgt - Set the next init job step for the target
3163  * @tgt:                ibmvfc target struct
3164  * @job_step:   job step to perform
3165  *
3166  **/
3167 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3168                             void (*job_step) (struct ibmvfc_target *))
3169 {
3170         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
3171         tgt->job_step = job_step;
3172         wake_up(&tgt->vhost->work_wait_q);
3173 }
3174
3175 /**
3176  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3177  * @tgt:                ibmvfc target struct
3178  * @job_step:   initialization job step
3179  *
3180  * Returns: 1 if step will be retried / 0 if not
3181  *
3182  **/
3183 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3184                                   void (*job_step) (struct ibmvfc_target *))
3185 {
3186         if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3187                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3188                 wake_up(&tgt->vhost->work_wait_q);
3189                 return 0;
3190         } else
3191                 ibmvfc_init_tgt(tgt, job_step);
3192         return 1;
3193 }
3194
3195 /* Defined in FC-LS */
3196 static const struct {
3197         int code;
3198         int retry;
3199         int logged_in;
3200 } prli_rsp [] = {
3201         { 0, 1, 0 },
3202         { 1, 0, 1 },
3203         { 2, 1, 0 },
3204         { 3, 1, 0 },
3205         { 4, 0, 0 },
3206         { 5, 0, 0 },
3207         { 6, 0, 1 },
3208         { 7, 0, 0 },
3209         { 8, 1, 0 },
3210 };
3211
3212 /**
3213  * ibmvfc_get_prli_rsp - Find PRLI response index
3214  * @flags:      PRLI response flags
3215  *
3216  **/
3217 static int ibmvfc_get_prli_rsp(u16 flags)
3218 {
3219         int i;
3220         int code = (flags & 0x0f00) >> 8;
3221
3222         for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3223                 if (prli_rsp[i].code == code)
3224                         return i;
3225
3226         return 0;
3227 }
3228
3229 /**
3230  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3231  * @evt:        ibmvfc event struct
3232  *
3233  **/
3234 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3235 {
3236         struct ibmvfc_target *tgt = evt->tgt;
3237         struct ibmvfc_host *vhost = evt->vhost;
3238         struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3239         struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3240         u32 status = rsp->common.status;
3241         int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3242
3243         vhost->discovery_threads--;
3244         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3245         switch (status) {
3246         case IBMVFC_MAD_SUCCESS:
3247                 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3248                         parms->type, parms->flags, parms->service_parms);
3249
3250                 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3251                         index = ibmvfc_get_prli_rsp(parms->flags);
3252                         if (prli_rsp[index].logged_in) {
3253                                 if (parms->flags & IBMVFC_PRLI_EST_IMG_PAIR) {
3254                                         tgt->need_login = 0;
3255                                         tgt->ids.roles = 0;
3256                                         if (parms->service_parms & IBMVFC_PRLI_TARGET_FUNC)
3257                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3258                                         if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
3259                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3260                                         tgt->add_rport = 1;
3261                                 } else
3262                                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3263                         } else if (prli_rsp[index].retry)
3264                                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3265                         else
3266                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3267                 } else
3268                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3269                 break;
3270         case IBMVFC_MAD_DRIVER_FAILED:
3271                 break;
3272         case IBMVFC_MAD_CRQ_ERROR:
3273                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3274                 break;
3275         case IBMVFC_MAD_FAILED:
3276         default:
3277                 if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED)
3278                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3279                 else if (tgt->logo_rcvd)
3280                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3281                 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3282                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3283                 else
3284                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3285
3286                 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3287                         ibmvfc_get_cmd_error(rsp->status, rsp->error),
3288                         rsp->status, rsp->error, status);
3289                 break;
3290         };
3291
3292         kref_put(&tgt->kref, ibmvfc_release_tgt);
3293         ibmvfc_free_event(evt);
3294         wake_up(&vhost->work_wait_q);
3295 }
3296
3297 /**
3298  * ibmvfc_tgt_send_prli - Send a process login
3299  * @tgt:        ibmvfc target struct
3300  *
3301  **/
3302 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3303 {
3304         struct ibmvfc_process_login *prli;
3305         struct ibmvfc_host *vhost = tgt->vhost;
3306         struct ibmvfc_event *evt;
3307
3308         if (vhost->discovery_threads >= disc_threads)
3309                 return;
3310
3311         kref_get(&tgt->kref);
3312         evt = ibmvfc_get_event(vhost);
3313         vhost->discovery_threads++;
3314         ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
3315         evt->tgt = tgt;
3316         prli = &evt->iu.prli;
3317         memset(prli, 0, sizeof(*prli));
3318         prli->common.version = 1;
3319         prli->common.opcode = IBMVFC_PROCESS_LOGIN;
3320         prli->common.length = sizeof(*prli);
3321         prli->scsi_id = tgt->scsi_id;
3322
3323         prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
3324         prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
3325         prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
3326
3327         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3328         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3329                 vhost->discovery_threads--;
3330                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3331                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3332         } else
3333                 tgt_dbg(tgt, "Sent process login\n");
3334 }
3335
3336 /**
3337  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
3338  * @evt:        ibmvfc event struct
3339  *
3340  **/
3341 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3342 {
3343         struct ibmvfc_target *tgt = evt->tgt;
3344         struct ibmvfc_host *vhost = evt->vhost;
3345         struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
3346         u32 status = rsp->common.status;
3347         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3348
3349         vhost->discovery_threads--;
3350         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3351         switch (status) {
3352         case IBMVFC_MAD_SUCCESS:
3353                 tgt_dbg(tgt, "Port Login succeeded\n");
3354                 if (tgt->ids.port_name &&
3355                     tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
3356                         vhost->reinit = 1;
3357                         tgt_dbg(tgt, "Port re-init required\n");
3358                         break;
3359                 }
3360                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3361                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3362                 tgt->ids.port_id = tgt->scsi_id;
3363                 memcpy(&tgt->service_parms, &rsp->service_parms,
3364                        sizeof(tgt->service_parms));
3365                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3366                        sizeof(tgt->service_parms_change));
3367                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3368                 break;
3369         case IBMVFC_MAD_DRIVER_FAILED:
3370                 break;
3371         case IBMVFC_MAD_CRQ_ERROR:
3372                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3373                 break;
3374         case IBMVFC_MAD_FAILED:
3375         default:
3376                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3377                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3378                 else
3379                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3380
3381                 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3382                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3383                         ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3384                         ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
3385                 break;
3386         };
3387
3388         kref_put(&tgt->kref, ibmvfc_release_tgt);
3389         ibmvfc_free_event(evt);
3390         wake_up(&vhost->work_wait_q);
3391 }
3392
3393 /**
3394  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
3395  * @tgt:        ibmvfc target struct
3396  *
3397  **/
3398 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
3399 {
3400         struct ibmvfc_port_login *plogi;
3401         struct ibmvfc_host *vhost = tgt->vhost;
3402         struct ibmvfc_event *evt;
3403
3404         if (vhost->discovery_threads >= disc_threads)
3405                 return;
3406
3407         kref_get(&tgt->kref);
3408         tgt->logo_rcvd = 0;
3409         evt = ibmvfc_get_event(vhost);
3410         vhost->discovery_threads++;
3411         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3412         ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
3413         evt->tgt = tgt;
3414         plogi = &evt->iu.plogi;
3415         memset(plogi, 0, sizeof(*plogi));
3416         plogi->common.version = 1;
3417         plogi->common.opcode = IBMVFC_PORT_LOGIN;
3418         plogi->common.length = sizeof(*plogi);
3419         plogi->scsi_id = tgt->scsi_id;
3420
3421         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3422                 vhost->discovery_threads--;
3423                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3424                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3425         } else
3426                 tgt_dbg(tgt, "Sent port login\n");
3427 }
3428
3429 /**
3430  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
3431  * @evt:        ibmvfc event struct
3432  *
3433  **/
3434 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
3435 {
3436         struct ibmvfc_target *tgt = evt->tgt;
3437         struct ibmvfc_host *vhost = evt->vhost;
3438         struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
3439         u32 status = rsp->common.status;
3440
3441         vhost->discovery_threads--;
3442         ibmvfc_free_event(evt);
3443         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3444
3445         switch (status) {
3446         case IBMVFC_MAD_SUCCESS:
3447                 tgt_dbg(tgt, "Implicit Logout succeeded\n");
3448                 break;
3449         case IBMVFC_MAD_DRIVER_FAILED:
3450                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3451                 wake_up(&vhost->work_wait_q);
3452                 return;
3453         case IBMVFC_MAD_FAILED:
3454         default:
3455                 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
3456                 break;
3457         };
3458
3459         if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
3460                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
3461         else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
3462                  tgt->scsi_id != tgt->new_scsi_id)
3463                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3464         kref_put(&tgt->kref, ibmvfc_release_tgt);
3465         wake_up(&vhost->work_wait_q);
3466 }
3467
3468 /**
3469  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
3470  * @tgt:                ibmvfc target struct
3471  *
3472  **/
3473 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
3474 {
3475         struct ibmvfc_implicit_logout *mad;
3476         struct ibmvfc_host *vhost = tgt->vhost;
3477         struct ibmvfc_event *evt;
3478
3479         if (vhost->discovery_threads >= disc_threads)
3480                 return;
3481
3482         kref_get(&tgt->kref);
3483         evt = ibmvfc_get_event(vhost);
3484         vhost->discovery_threads++;
3485         ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
3486         evt->tgt = tgt;
3487         mad = &evt->iu.implicit_logout;
3488         memset(mad, 0, sizeof(*mad));
3489         mad->common.version = 1;
3490         mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
3491         mad->common.length = sizeof(*mad);
3492         mad->old_scsi_id = tgt->scsi_id;
3493
3494         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3495         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3496                 vhost->discovery_threads--;
3497                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3498                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3499         } else
3500                 tgt_dbg(tgt, "Sent Implicit Logout\n");
3501 }
3502
3503 /**
3504  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
3505  * @mad:        ibmvfc passthru mad struct
3506  * @tgt:        ibmvfc target struct
3507  *
3508  * Returns:
3509  *      1 if PLOGI needed / 0 if PLOGI not needed
3510  **/
3511 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
3512                                     struct ibmvfc_target *tgt)
3513 {
3514         if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
3515                    sizeof(tgt->ids.port_name)))
3516                 return 1;
3517         if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
3518                    sizeof(tgt->ids.node_name)))
3519                 return 1;
3520         if (mad->fc_iu.response[6] != tgt->scsi_id)
3521                 return 1;
3522         return 0;
3523 }
3524
3525 /**
3526  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
3527  * @evt:        ibmvfc event struct
3528  *
3529  **/
3530 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3531 {
3532         struct ibmvfc_target *tgt = evt->tgt;
3533         struct ibmvfc_host *vhost = evt->vhost;
3534         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3535         u32 status = mad->common.status;
3536         u8 fc_reason, fc_explain;
3537
3538         vhost->discovery_threads--;
3539         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3540         del_timer(&tgt->timer);
3541
3542         switch (status) {
3543         case IBMVFC_MAD_SUCCESS:
3544                 tgt_dbg(tgt, "ADISC succeeded\n");
3545                 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3546                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3547                 break;
3548         case IBMVFC_MAD_DRIVER_FAILED:
3549                 break;
3550         case IBMVFC_MAD_FAILED:
3551         default:
3552                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3553                 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
3554                 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
3555                 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3556                          ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
3557                          mad->iu.status, mad->iu.error,
3558                          ibmvfc_get_fc_type(fc_reason), fc_reason,
3559                          ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3560                 break;
3561         };
3562
3563         kref_put(&tgt->kref, ibmvfc_release_tgt);
3564         ibmvfc_free_event(evt);
3565         wake_up(&vhost->work_wait_q);
3566 }
3567
3568 /**
3569  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
3570  * @evt:                ibmvfc event struct
3571  *
3572  **/
3573 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
3574 {
3575         struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
3576
3577         memset(mad, 0, sizeof(*mad));
3578         mad->common.version = 1;
3579         mad->common.opcode = IBMVFC_PASSTHRU;
3580         mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
3581         mad->cmd_ioba.va = (u64)evt->crq.ioba +
3582                 offsetof(struct ibmvfc_passthru_mad, iu);
3583         mad->cmd_ioba.len = sizeof(mad->iu);
3584         mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
3585         mad->iu.rsp_len = sizeof(mad->fc_iu.response);
3586         mad->iu.cmd.va = (u64)evt->crq.ioba +
3587                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3588                 offsetof(struct ibmvfc_passthru_fc_iu, payload);
3589         mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
3590         mad->iu.rsp.va = (u64)evt->crq.ioba +
3591                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3592                 offsetof(struct ibmvfc_passthru_fc_iu, response);
3593         mad->iu.rsp.len = sizeof(mad->fc_iu.response);
3594 }
3595
3596 /**
3597  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
3598  * @evt:                ibmvfc event struct
3599  *
3600  * Just cleanup this event struct. Everything else is handled by
3601  * the ADISC completion handler. If the ADISC never actually comes
3602  * back, we still have the timer running on the ADISC event struct
3603  * which will fire and cause the CRQ to get reset.
3604  *
3605  **/
3606 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
3607 {
3608         struct ibmvfc_host *vhost = evt->vhost;
3609         struct ibmvfc_target *tgt = evt->tgt;
3610
3611         tgt_dbg(tgt, "ADISC cancel complete\n");
3612         vhost->abort_threads--;
3613         ibmvfc_free_event(evt);
3614         kref_put(&tgt->kref, ibmvfc_release_tgt);
3615         wake_up(&vhost->work_wait_q);
3616 }
3617
3618 /**
3619  * ibmvfc_adisc_timeout - Handle an ADISC timeout
3620  * @tgt:                ibmvfc target struct
3621  *
3622  * If an ADISC times out, send a cancel. If the cancel times
3623  * out, reset the CRQ. When the ADISC comes back as cancelled,
3624  * log back into the target.
3625  **/
3626 static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
3627 {
3628         struct ibmvfc_host *vhost = tgt->vhost;
3629         struct ibmvfc_event *evt;
3630         struct ibmvfc_tmf *tmf;
3631         unsigned long flags;
3632         int rc;
3633
3634         tgt_dbg(tgt, "ADISC timeout\n");
3635         spin_lock_irqsave(vhost->host->host_lock, flags);
3636         if (vhost->abort_threads >= disc_threads ||
3637             tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
3638             vhost->state != IBMVFC_INITIALIZING ||
3639             vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
3640                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3641                 return;
3642         }
3643
3644         vhost->abort_threads++;
3645         kref_get(&tgt->kref);
3646         evt = ibmvfc_get_event(vhost);
3647         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
3648
3649         evt->tgt = tgt;
3650         tmf = &evt->iu.tmf;
3651         memset(tmf, 0, sizeof(*tmf));
3652         tmf->common.version = 1;
3653         tmf->common.opcode = IBMVFC_TMF_MAD;
3654         tmf->common.length = sizeof(*tmf);
3655         tmf->scsi_id = tgt->scsi_id;
3656         tmf->cancel_key = tgt->cancel_key;
3657
3658         rc = ibmvfc_send_event(evt, vhost, default_timeout);
3659
3660         if (rc) {
3661                 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
3662                 vhost->abort_threads--;
3663                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3664                 __ibmvfc_reset_host(vhost);
3665         } else
3666                 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
3667         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3668 }
3669
3670 /**
3671  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
3672  * @tgt:                ibmvfc target struct
3673  *
3674  * When sending an ADISC we end up with two timers running. The
3675  * first timer is the timer in the ibmvfc target struct. If this
3676  * fires, we send a cancel to the target. The second timer is the
3677  * timer on the ibmvfc event for the ADISC, which is longer. If that
3678  * fires, it means the ADISC timed out and our attempt to cancel it
3679  * also failed, so we need to reset the CRQ.
3680  **/
3681 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
3682 {
3683         struct ibmvfc_passthru_mad *mad;
3684         struct ibmvfc_host *vhost = tgt->vhost;
3685         struct ibmvfc_event *evt;
3686
3687         if (vhost->discovery_threads >= disc_threads)
3688                 return;
3689
3690         kref_get(&tgt->kref);
3691         evt = ibmvfc_get_event(vhost);
3692         vhost->discovery_threads++;
3693         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
3694         evt->tgt = tgt;
3695
3696         ibmvfc_init_passthru(evt);
3697         mad = &evt->iu.passthru;
3698         mad->iu.flags = IBMVFC_FC_ELS;
3699         mad->iu.scsi_id = tgt->scsi_id;
3700         mad->iu.cancel_key = tgt->cancel_key;
3701
3702         mad->fc_iu.payload[0] = IBMVFC_ADISC;
3703         memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
3704                sizeof(vhost->login_buf->resp.port_name));
3705         memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
3706                sizeof(vhost->login_buf->resp.node_name));
3707         mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
3708
3709         if (timer_pending(&tgt->timer))
3710                 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
3711         else {
3712                 tgt->timer.data = (unsigned long) tgt;
3713                 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
3714                 tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout;
3715                 add_timer(&tgt->timer);
3716         }
3717
3718         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3719         if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
3720                 vhost->discovery_threads--;
3721                 del_timer(&tgt->timer);
3722                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3723                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3724         } else
3725                 tgt_dbg(tgt, "Sent ADISC\n");
3726 }
3727
3728 /**
3729  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
3730  * @evt:        ibmvfc event struct
3731  *
3732  **/
3733 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3734 {
3735         struct ibmvfc_target *tgt = evt->tgt;
3736         struct ibmvfc_host *vhost = evt->vhost;
3737         struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3738         u32 status = rsp->common.status;
3739         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3740
3741         vhost->discovery_threads--;
3742         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3743         switch (status) {
3744         case IBMVFC_MAD_SUCCESS:
3745                 tgt_dbg(tgt, "Query Target succeeded\n");
3746                 tgt->new_scsi_id = rsp->scsi_id;
3747                 if (rsp->scsi_id != tgt->scsi_id)
3748                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3749                 else
3750                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
3751                 break;
3752         case IBMVFC_MAD_DRIVER_FAILED:
3753                 break;
3754         case IBMVFC_MAD_CRQ_ERROR:
3755                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3756                 break;
3757         case IBMVFC_MAD_FAILED:
3758         default:
3759                 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
3760                     rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
3761                     rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
3762                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3763                 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3764                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3765                 else
3766                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3767
3768                 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3769                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3770                         ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3771                         ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3772                 break;
3773         };
3774
3775         kref_put(&tgt->kref, ibmvfc_release_tgt);
3776         ibmvfc_free_event(evt);
3777         wake_up(&vhost->work_wait_q);
3778 }
3779
3780 /**
3781  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
3782  * @tgt:        ibmvfc target struct
3783  *
3784  **/
3785 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
3786 {
3787         struct ibmvfc_query_tgt *query_tgt;
3788         struct ibmvfc_host *vhost = tgt->vhost;
3789         struct ibmvfc_event *evt;
3790
3791         if (vhost->discovery_threads >= disc_threads)
3792                 return;
3793
3794         kref_get(&tgt->kref);
3795         evt = ibmvfc_get_event(vhost);
3796         vhost->discovery_threads++;
3797         evt->tgt = tgt;
3798         ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
3799         query_tgt = &evt->iu.query_tgt;
3800         memset(query_tgt, 0, sizeof(*query_tgt));
3801         query_tgt->common.version = 1;
3802         query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
3803         query_tgt->common.length = sizeof(*query_tgt);
3804         query_tgt->wwpn = tgt->ids.port_name;
3805
3806         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3807         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3808                 vhost->discovery_threads--;
3809                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3810                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3811         } else
3812                 tgt_dbg(tgt, "Sent Query Target\n");
3813 }
3814
3815 /**
3816  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
3817  * @vhost:              ibmvfc host struct
3818  * @scsi_id:    SCSI ID to allocate target for
3819  *
3820  * Returns:
3821  *      0 on success / other on failure
3822  **/
3823 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3824 {
3825         struct ibmvfc_target *tgt;
3826         unsigned long flags;
3827
3828         spin_lock_irqsave(vhost->host->host_lock, flags);
3829         list_for_each_entry(tgt, &vhost->targets, queue) {
3830                 if (tgt->scsi_id == scsi_id) {
3831                         if (tgt->need_login)
3832                                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3833                         goto unlock_out;
3834                 }
3835         }
3836         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3837
3838         tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
3839         if (!tgt) {
3840                 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
3841                         scsi_id);
3842                 return -ENOMEM;
3843         }
3844
3845         memset(tgt, 0, sizeof(*tgt));
3846         tgt->scsi_id = scsi_id;
3847         tgt->new_scsi_id = scsi_id;
3848         tgt->vhost = vhost;
3849         tgt->need_login = 1;
3850         tgt->cancel_key = vhost->task_set++;
3851         init_timer(&tgt->timer);
3852         kref_init(&tgt->kref);
3853         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3854         spin_lock_irqsave(vhost->host->host_lock, flags);
3855         list_add_tail(&tgt->queue, &vhost->targets);
3856
3857 unlock_out:
3858         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3859         return 0;
3860 }
3861
3862 /**
3863  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
3864  * @vhost:              ibmvfc host struct
3865  *
3866  * Returns:
3867  *      0 on success / other on failure
3868  **/
3869 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
3870 {
3871         int i, rc;
3872
3873         for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
3874                 rc = ibmvfc_alloc_target(vhost,
3875                                          vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
3876
3877         return rc;
3878 }
3879
3880 /**
3881  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
3882  * @evt:        ibmvfc event struct
3883  *
3884  **/
3885 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3886 {
3887         struct ibmvfc_host *vhost = evt->vhost;
3888         struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3889         u32 mad_status = rsp->common.status;
3890         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3891
3892         switch (mad_status) {
3893         case IBMVFC_MAD_SUCCESS:
3894                 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
3895                 vhost->num_targets = rsp->num_written;
3896                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3897                 break;
3898         case IBMVFC_MAD_FAILED:
3899                 level += ibmvfc_retry_host_init(vhost);
3900                 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3901                            ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3902                 break;
3903         case IBMVFC_MAD_DRIVER_FAILED:
3904                 break;
3905         default:
3906                 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
3907                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3908                 break;
3909         }
3910
3911         ibmvfc_free_event(evt);
3912         wake_up(&vhost->work_wait_q);
3913 }
3914
3915 /**
3916  * ibmvfc_discover_targets - Send Discover Targets MAD
3917  * @vhost:      ibmvfc host struct
3918  *
3919  **/
3920 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
3921 {
3922         struct ibmvfc_discover_targets *mad;
3923         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3924
3925         ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
3926         mad = &evt->iu.discover_targets;
3927         memset(mad, 0, sizeof(*mad));
3928         mad->common.version = 1;
3929         mad->common.opcode = IBMVFC_DISC_TARGETS;
3930         mad->common.length = sizeof(*mad);
3931         mad->bufflen = vhost->disc_buf_sz;
3932         mad->buffer.va = vhost->disc_buf_dma;
3933         mad->buffer.len = vhost->disc_buf_sz;
3934         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3935
3936         if (!ibmvfc_send_event(evt, vhost, default_timeout))
3937                 ibmvfc_dbg(vhost, "Sent discover targets\n");
3938         else
3939                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3940 }
3941
3942 /**
3943  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
3944  * @evt:        ibmvfc event struct
3945  *
3946  **/
3947 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3948 {
3949         struct ibmvfc_host *vhost = evt->vhost;
3950         u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3951         struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3952         unsigned int npiv_max_sectors;
3953         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3954
3955         switch (mad_status) {
3956         case IBMVFC_MAD_SUCCESS:
3957                 ibmvfc_free_event(evt);
3958                 break;
3959         case IBMVFC_MAD_FAILED:
3960                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3961                         level += ibmvfc_retry_host_init(vhost);
3962                 else
3963                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3964                 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
3965                            ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3966                 ibmvfc_free_event(evt);
3967                 return;
3968         case IBMVFC_MAD_CRQ_ERROR:
3969                 ibmvfc_retry_host_init(vhost);
3970         case IBMVFC_MAD_DRIVER_FAILED:
3971                 ibmvfc_free_event(evt);
3972                 return;
3973         default:
3974                 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
3975                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3976                 ibmvfc_free_event(evt);
3977                 return;
3978         }
3979
3980         vhost->client_migrated = 0;
3981
3982         if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
3983                 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
3984                         rsp->flags);
3985                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3986                 wake_up(&vhost->work_wait_q);
3987                 return;
3988         }
3989
3990         if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
3991                 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
3992                         rsp->max_cmds);
3993                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3994                 wake_up(&vhost->work_wait_q);
3995                 return;
3996         }
3997
3998         vhost->logged_in = 1;
3999         npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
4000         dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
4001                  rsp->partition_name, rsp->device_name, rsp->port_loc_code,
4002                  rsp->drc_name, npiv_max_sectors);
4003
4004         fc_host_fabric_name(vhost->host) = rsp->node_name;
4005         fc_host_node_name(vhost->host) = rsp->node_name;
4006         fc_host_port_name(vhost->host) = rsp->port_name;
4007         fc_host_port_id(vhost->host) = rsp->scsi_id;
4008         fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
4009         fc_host_supported_classes(vhost->host) = 0;
4010         if (rsp->service_parms.class1_parms[0] & 0x80000000)
4011                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
4012         if (rsp->service_parms.class2_parms[0] & 0x80000000)
4013                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
4014         if (rsp->service_parms.class3_parms[0] & 0x80000000)
4015                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
4016         fc_host_maxframe_size(vhost->host) =
4017                 rsp->service_parms.common.bb_rcv_sz & 0x0fff;
4018
4019         vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
4020         vhost->host->max_sectors = npiv_max_sectors;
4021         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4022         wake_up(&vhost->work_wait_q);
4023 }
4024
4025 /**
4026  * ibmvfc_npiv_login - Sends NPIV login
4027  * @vhost:      ibmvfc host struct
4028  *
4029  **/
4030 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
4031 {
4032         struct ibmvfc_npiv_login_mad *mad;
4033         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4034
4035         ibmvfc_gather_partition_info(vhost);
4036         ibmvfc_set_login_info(vhost);
4037         ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
4038
4039         memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
4040         mad = &evt->iu.npiv_login;
4041         memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
4042         mad->common.version = 1;
4043         mad->common.opcode = IBMVFC_NPIV_LOGIN;
4044         mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
4045         mad->buffer.va = vhost->login_buf_dma;
4046         mad->buffer.len = sizeof(*vhost->login_buf);
4047
4048         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4049
4050         if (!ibmvfc_send_event(evt, vhost, default_timeout))
4051                 ibmvfc_dbg(vhost, "Sent NPIV login\n");
4052         else
4053                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4054 };
4055
4056 /**
4057  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
4058  * @vhost:              ibmvfc host struct
4059  *
4060  **/
4061 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
4062 {
4063         struct ibmvfc_host *vhost = evt->vhost;
4064         u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
4065
4066         ibmvfc_free_event(evt);
4067
4068         switch (mad_status) {
4069         case IBMVFC_MAD_SUCCESS:
4070                 if (list_empty(&vhost->sent) &&
4071                     vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
4072                         ibmvfc_init_host(vhost);
4073                         return;
4074                 }
4075                 break;
4076         case IBMVFC_MAD_FAILED:
4077         case IBMVFC_MAD_NOT_SUPPORTED:
4078         case IBMVFC_MAD_CRQ_ERROR:
4079         case IBMVFC_MAD_DRIVER_FAILED:
4080         default:
4081                 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
4082                 break;
4083         }
4084
4085         ibmvfc_hard_reset_host(vhost);
4086 }
4087
4088 /**
4089  * ibmvfc_npiv_logout - Issue an NPIV Logout
4090  * @vhost:              ibmvfc host struct
4091  *
4092  **/
4093 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
4094 {
4095         struct ibmvfc_npiv_logout_mad *mad;
4096         struct ibmvfc_event *evt;
4097
4098         evt = ibmvfc_get_event(vhost);
4099         ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
4100
4101         mad = &evt->iu.npiv_logout;
4102         memset(mad, 0, sizeof(*mad));
4103         mad->common.version = 1;
4104         mad->common.opcode = IBMVFC_NPIV_LOGOUT;
4105         mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
4106
4107         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
4108
4109         if (!ibmvfc_send_event(evt, vhost, default_timeout))
4110                 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
4111         else
4112                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4113 }
4114
4115 /**
4116  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
4117  * @vhost:              ibmvfc host struct
4118  *
4119  * Returns:
4120  *      1 if work to do / 0 if not
4121  **/
4122 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
4123 {
4124         struct ibmvfc_target *tgt;
4125
4126         list_for_each_entry(tgt, &vhost->targets, queue) {
4127                 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
4128                     tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4129                         return 1;
4130         }
4131
4132         return 0;
4133 }
4134
4135 /**
4136  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
4137  * @vhost:              ibmvfc host struct
4138  *
4139  * Returns:
4140  *      1 if work to do / 0 if not
4141  **/
4142 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4143 {
4144         struct ibmvfc_target *tgt;
4145
4146         if (kthread_should_stop())
4147                 return 1;
4148         switch (vhost->action) {
4149         case IBMVFC_HOST_ACTION_NONE:
4150         case IBMVFC_HOST_ACTION_INIT_WAIT:
4151         case IBMVFC_HOST_ACTION_LOGO_WAIT:
4152                 return 0;
4153         case IBMVFC_HOST_ACTION_TGT_INIT:
4154         case IBMVFC_HOST_ACTION_QUERY_TGTS:
4155                 if (vhost->discovery_threads == disc_threads)
4156                         return 0;
4157                 list_for_each_entry(tgt, &vhost->targets, queue)
4158                         if (tgt->action == IBMVFC_TGT_ACTION_INIT)
4159                                 return 1;
4160                 list_for_each_entry(tgt, &vhost->targets, queue)
4161                         if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4162                                 return 0;
4163                 return 1;
4164         case IBMVFC_HOST_ACTION_LOGO:
4165         case IBMVFC_HOST_ACTION_INIT:
4166         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4167         case IBMVFC_HOST_ACTION_TGT_DEL:
4168         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4169         case IBMVFC_HOST_ACTION_QUERY:
4170         case IBMVFC_HOST_ACTION_RESET:
4171         case IBMVFC_HOST_ACTION_REENABLE:
4172         default:
4173                 break;
4174         };
4175
4176         return 1;
4177 }
4178
4179 /**
4180  * ibmvfc_work_to_do - Is there task level work to do?
4181  * @vhost:              ibmvfc host struct
4182  *
4183  * Returns:
4184  *      1 if work to do / 0 if not
4185  **/
4186 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4187 {
4188         unsigned long flags;
4189         int rc;
4190
4191         spin_lock_irqsave(vhost->host->host_lock, flags);
4192         rc = __ibmvfc_work_to_do(vhost);
4193         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4194         return rc;
4195 }
4196
4197 /**
4198  * ibmvfc_log_ae - Log async events if necessary
4199  * @vhost:              ibmvfc host struct
4200  * @events:             events to log
4201  *
4202  **/
4203 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
4204 {
4205         if (events & IBMVFC_AE_RSCN)
4206                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
4207         if ((events & IBMVFC_AE_LINKDOWN) &&
4208             vhost->state >= IBMVFC_HALTED)
4209                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
4210         if ((events & IBMVFC_AE_LINKUP) &&
4211             vhost->state == IBMVFC_INITIALIZING)
4212                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
4213 }
4214
4215 /**
4216  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
4217  * @tgt:                ibmvfc target struct
4218  *
4219  **/
4220 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
4221 {
4222         struct ibmvfc_host *vhost = tgt->vhost;
4223         struct fc_rport *rport;
4224         unsigned long flags;
4225
4226         tgt_dbg(tgt, "Adding rport\n");
4227         rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
4228         spin_lock_irqsave(vhost->host->host_lock, flags);
4229
4230         if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4231                 tgt_dbg(tgt, "Deleting rport\n");
4232                 list_del(&tgt->queue);
4233                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4234                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4235                 fc_remote_port_delete(rport);
4236                 del_timer_sync(&tgt->timer);
4237                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4238                 return;
4239         } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
4240                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4241                 return;
4242         }
4243
4244         if (rport) {
4245                 tgt_dbg(tgt, "rport add succeeded\n");
4246                 tgt->rport = rport;
4247                 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
4248                 rport->supported_classes = 0;
4249                 tgt->target_id = rport->scsi_target_id;
4250                 if (tgt->service_parms.class1_parms[0] & 0x80000000)
4251                         rport->supported_classes |= FC_COS_CLASS1;
4252                 if (tgt->service_parms.class2_parms[0] & 0x80000000)
4253                         rport->supported_classes |= FC_COS_CLASS2;
4254                 if (tgt->service_parms.class3_parms[0] & 0x80000000)
4255                         rport->supported_classes |= FC_COS_CLASS3;
4256                 if (rport->rqst_q)
4257                         blk_queue_max_segments(rport->rqst_q, 1);
4258         } else
4259                 tgt_dbg(tgt, "rport add failed\n");
4260         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4261 }
4262
4263 /**
4264  * ibmvfc_do_work - Do task level work
4265  * @vhost:              ibmvfc host struct
4266  *
4267  **/
4268 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4269 {
4270         struct ibmvfc_target *tgt;
4271         unsigned long flags;
4272         struct fc_rport *rport;
4273         int rc;
4274
4275         ibmvfc_log_ae(vhost, vhost->events_to_log);
4276         spin_lock_irqsave(vhost->host->host_lock, flags);
4277         vhost->events_to_log = 0;
4278         switch (vhost->action) {
4279         case IBMVFC_HOST_ACTION_NONE:
4280         case IBMVFC_HOST_ACTION_LOGO_WAIT:
4281         case IBMVFC_HOST_ACTION_INIT_WAIT:
4282                 break;
4283         case IBMVFC_HOST_ACTION_RESET:
4284                 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4285                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4286                 rc = ibmvfc_reset_crq(vhost);
4287                 spin_lock_irqsave(vhost->host->host_lock, flags);
4288                 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4289                     (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4290                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4291                         dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4292                 }
4293                 break;
4294         case IBMVFC_HOST_ACTION_REENABLE:
4295                 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4296                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4297                 rc = ibmvfc_reenable_crq_queue(vhost);
4298                 spin_lock_irqsave(vhost->host->host_lock, flags);
4299                 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
4300                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4301                         dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
4302                 }
4303                 break;
4304         case IBMVFC_HOST_ACTION_LOGO:
4305                 vhost->job_step(vhost);
4306                 break;
4307         case IBMVFC_HOST_ACTION_INIT:
4308                 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
4309                 if (vhost->delay_init) {
4310                         vhost->delay_init = 0;
4311                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4312                         ssleep(15);
4313                         return;
4314                 } else
4315                         vhost->job_step(vhost);
4316                 break;
4317         case IBMVFC_HOST_ACTION_QUERY:
4318                 list_for_each_entry(tgt, &vhost->targets, queue)
4319                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
4320                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
4321                 break;
4322         case IBMVFC_HOST_ACTION_QUERY_TGTS:
4323                 list_for_each_entry(tgt, &vhost->targets, queue) {
4324                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4325                                 tgt->job_step(tgt);
4326                                 break;
4327                         }
4328                 }
4329
4330                 if (!ibmvfc_dev_init_to_do(vhost))
4331                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
4332                 break;
4333         case IBMVFC_HOST_ACTION_TGT_DEL:
4334         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4335                 list_for_each_entry(tgt, &vhost->targets, queue) {
4336                         if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4337                                 tgt_dbg(tgt, "Deleting rport\n");
4338                                 rport = tgt->rport;
4339                                 tgt->rport = NULL;
4340                                 list_del(&tgt->queue);
4341                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4342                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4343                                 if (rport)
4344                                         fc_remote_port_delete(rport);
4345                                 del_timer_sync(&tgt->timer);
4346                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4347                                 return;
4348                         }
4349                 }
4350
4351                 if (vhost->state == IBMVFC_INITIALIZING) {
4352                         if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
4353                                 if (vhost->reinit) {
4354                                         vhost->reinit = 0;
4355                                         scsi_block_requests(vhost->host);
4356                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4357                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4358                                 } else {
4359                                         ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
4360                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4361                                         wake_up(&vhost->init_wait_q);
4362                                         schedule_work(&vhost->rport_add_work_q);
4363                                         vhost->init_retries = 0;
4364                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4365                                         scsi_unblock_requests(vhost->host);
4366                                 }
4367
4368                                 return;
4369                         } else {
4370                                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
4371                                 vhost->job_step = ibmvfc_discover_targets;
4372                         }
4373                 } else {
4374                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4375                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4376                         scsi_unblock_requests(vhost->host);
4377                         wake_up(&vhost->init_wait_q);
4378                         return;
4379                 }
4380                 break;
4381         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4382                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
4383                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4384                 ibmvfc_alloc_targets(vhost);
4385                 spin_lock_irqsave(vhost->host->host_lock, flags);
4386                 break;
4387         case IBMVFC_HOST_ACTION_TGT_INIT:
4388                 list_for_each_entry(tgt, &vhost->targets, queue) {
4389                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4390                                 tgt->job_step(tgt);
4391                                 break;
4392                         }
4393                 }
4394
4395                 if (!ibmvfc_dev_init_to_do(vhost))
4396                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
4397                 break;
4398         default:
4399                 break;
4400         };
4401
4402         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4403 }
4404
4405 /**
4406  * ibmvfc_work - Do task level work
4407  * @data:               ibmvfc host struct
4408  *
4409  * Returns:
4410  *      zero
4411  **/
4412 static int ibmvfc_work(void *data)
4413 {
4414         struct ibmvfc_host *vhost = data;
4415         int rc;
4416
4417         set_user_nice(current, -20);
4418
4419         while (1) {
4420                 rc = wait_event_interruptible(vhost->work_wait_q,
4421                                               ibmvfc_work_to_do(vhost));
4422
4423                 BUG_ON(rc);
4424
4425                 if (kthread_should_stop())
4426                         break;
4427
4428                 ibmvfc_do_work(vhost);
4429         }
4430
4431         ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
4432         return 0;
4433 }
4434
4435 /**
4436  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
4437  * @vhost:      ibmvfc host struct
4438  *
4439  * Allocates a page for messages, maps it for dma, and registers
4440  * the crq with the hypervisor.
4441  *
4442  * Return value:
4443  *      zero on success / other on failure
4444  **/
4445 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
4446 {
4447         int rc, retrc = -ENOMEM;
4448         struct device *dev = vhost->dev;
4449         struct vio_dev *vdev = to_vio_dev(dev);
4450         struct ibmvfc_crq_queue *crq = &vhost->crq;
4451
4452         ENTER;
4453         crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
4454
4455         if (!crq->msgs)
4456                 return -ENOMEM;
4457
4458         crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4459         crq->msg_token = dma_map_single(dev, crq->msgs,
4460                                         PAGE_SIZE, DMA_BIDIRECTIONAL);
4461
4462         if (dma_mapping_error(dev, crq->msg_token))
4463                 goto map_failed;
4464
4465         retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4466                                         crq->msg_token, PAGE_SIZE);
4467
4468         if (rc == H_RESOURCE)
4469                 /* maybe kexecing and resource is busy. try a reset */
4470                 retrc = rc = ibmvfc_reset_crq(vhost);
4471
4472         if (rc == H_CLOSED)
4473                 dev_warn(dev, "Partner adapter not ready\n");
4474         else if (rc) {
4475                 dev_warn(dev, "Error %d opening adapter\n", rc);
4476                 goto reg_crq_failed;
4477         }
4478
4479         retrc = 0;
4480
4481         tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
4482
4483         if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
4484                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
4485                 goto req_irq_failed;
4486         }
4487
4488         if ((rc = vio_enable_interrupts(vdev))) {
4489                 dev_err(dev, "Error %d enabling interrupts\n", rc);
4490                 goto req_irq_failed;
4491         }
4492
4493         crq->cur = 0;
4494         LEAVE;
4495         return retrc;
4496
4497 req_irq_failed:
4498         tasklet_kill(&vhost->tasklet);
4499         do {
4500                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4501         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4502 reg_crq_failed:
4503         dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4504 map_failed:
4505         free_page((unsigned long)crq->msgs);
4506         return retrc;
4507 }
4508
4509 /**
4510  * ibmvfc_free_mem - Free memory for vhost
4511  * @vhost:      ibmvfc host struct
4512  *
4513  * Return value:
4514  *      none
4515  **/
4516 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
4517 {
4518         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
4519
4520         ENTER;
4521         mempool_destroy(vhost->tgt_pool);
4522         kfree(vhost->trace);
4523         dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
4524                           vhost->disc_buf_dma);
4525         dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
4526                           vhost->login_buf, vhost->login_buf_dma);
4527         dma_pool_destroy(vhost->sg_pool);
4528         dma_unmap_single(vhost->dev, async_q->msg_token,
4529                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
4530         free_page((unsigned long)async_q->msgs);
4531         LEAVE;
4532 }
4533
4534 /**
4535  * ibmvfc_alloc_mem - Allocate memory for vhost
4536  * @vhost:      ibmvfc host struct
4537  *
4538  * Return value:
4539  *      0 on success / non-zero on failure
4540  **/
4541 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
4542 {
4543         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
4544         struct device *dev = vhost->dev;
4545
4546         ENTER;
4547         async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
4548         if (!async_q->msgs) {
4549                 dev_err(dev, "Couldn't allocate async queue.\n");
4550                 goto nomem;
4551         }
4552
4553         async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
4554         async_q->msg_token = dma_map_single(dev, async_q->msgs,
4555                                             async_q->size * sizeof(*async_q->msgs),
4556                                             DMA_BIDIRECTIONAL);
4557
4558         if (dma_mapping_error(dev, async_q->msg_token)) {
4559                 dev_err(dev, "Failed to map async queue\n");
4560                 goto free_async_crq;
4561         }
4562
4563         vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
4564                                          SG_ALL * sizeof(struct srp_direct_buf),
4565                                          sizeof(struct srp_direct_buf), 0);
4566
4567         if (!vhost->sg_pool) {
4568                 dev_err(dev, "Failed to allocate sg pool\n");
4569                 goto unmap_async_crq;
4570         }
4571
4572         vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
4573                                               &vhost->login_buf_dma, GFP_KERNEL);
4574
4575         if (!vhost->login_buf) {
4576                 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
4577                 goto free_sg_pool;
4578         }
4579
4580         vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
4581         vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
4582                                              &vhost->disc_buf_dma, GFP_KERNEL);
4583
4584         if (!vhost->disc_buf) {
4585                 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
4586                 goto free_login_buffer;
4587         }
4588
4589         vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
4590                                sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
4591
4592         if (!vhost->trace)
4593                 goto free_disc_buffer;
4594
4595         vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
4596                                                       sizeof(struct ibmvfc_target));
4597
4598         if (!vhost->tgt_pool) {
4599                 dev_err(dev, "Couldn't allocate target memory pool\n");
4600                 goto free_trace;
4601         }
4602
4603         LEAVE;
4604         return 0;
4605
4606 free_trace:
4607         kfree(vhost->trace);
4608 free_disc_buffer:
4609         dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
4610                           vhost->disc_buf_dma);
4611 free_login_buffer:
4612         dma_free_coherent(dev, sizeof(*vhost->login_buf),
4613                           vhost->login_buf, vhost->login_buf_dma);
4614 free_sg_pool:
4615         dma_pool_destroy(vhost->sg_pool);
4616 unmap_async_crq:
4617         dma_unmap_single(dev, async_q->msg_token,
4618                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
4619 free_async_crq:
4620         free_page((unsigned long)async_q->msgs);
4621 nomem:
4622         LEAVE;
4623         return -ENOMEM;
4624 }
4625
4626 /**
4627  * ibmvfc_rport_add_thread - Worker thread for rport adds
4628  * @work:       work struct
4629  *
4630  **/
4631 static void ibmvfc_rport_add_thread(struct work_struct *work)
4632 {
4633         struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
4634                                                  rport_add_work_q);
4635         struct ibmvfc_target *tgt;
4636         struct fc_rport *rport;
4637         unsigned long flags;
4638         int did_work;
4639
4640         ENTER;
4641         spin_lock_irqsave(vhost->host->host_lock, flags);
4642         do {
4643                 did_work = 0;
4644                 if (vhost->state != IBMVFC_ACTIVE)
4645                         break;
4646
4647                 list_for_each_entry(tgt, &vhost->targets, queue) {
4648                         if (tgt->add_rport) {
4649                                 did_work = 1;
4650                                 tgt->add_rport = 0;
4651                                 kref_get(&tgt->kref);
4652                                 rport = tgt->rport;
4653                                 if (!rport) {
4654                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4655                                         ibmvfc_tgt_add_rport(tgt);
4656                                 } else if (get_device(&rport->dev)) {
4657                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4658                                         tgt_dbg(tgt, "Setting rport roles\n");
4659                                         fc_remote_port_rolechg(rport, tgt->ids.roles);
4660                                         put_device(&rport->dev);
4661                                 }
4662
4663                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4664                                 spin_lock_irqsave(vhost->host->host_lock, flags);
4665                                 break;
4666                         }
4667                 }
4668         } while(did_work);
4669
4670         if (vhost->state == IBMVFC_ACTIVE)
4671                 vhost->scan_complete = 1;
4672         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4673         LEAVE;
4674 }
4675
4676 /**
4677  * ibmvfc_probe - Adapter hot plug add entry point
4678  * @vdev:       vio device struct
4679  * @id: vio device id struct
4680  *
4681  * Return value:
4682  *      0 on success / non-zero on failure
4683  **/
4684 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4685 {
4686         struct ibmvfc_host *vhost;
4687         struct Scsi_Host *shost;
4688         struct device *dev = &vdev->dev;
4689         int rc = -ENOMEM;
4690
4691         ENTER;
4692         shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
4693         if (!shost) {
4694                 dev_err(dev, "Couldn't allocate host data\n");
4695                 goto out;
4696         }
4697
4698         shost->transportt = ibmvfc_transport_template;
4699         shost->can_queue = max_requests;
4700         shost->max_lun = max_lun;
4701         shost->max_id = max_targets;
4702         shost->max_sectors = IBMVFC_MAX_SECTORS;
4703         shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
4704         shost->unique_id = shost->host_no;
4705
4706         vhost = shost_priv(shost);
4707         INIT_LIST_HEAD(&vhost->sent);
4708         INIT_LIST_HEAD(&vhost->free);
4709         INIT_LIST_HEAD(&vhost->targets);
4710         sprintf(vhost->name, IBMVFC_NAME);
4711         vhost->host = shost;
4712         vhost->dev = dev;
4713         vhost->partition_number = -1;
4714         vhost->log_level = log_level;
4715         vhost->task_set = 1;
4716         strcpy(vhost->partition_name, "UNKNOWN");
4717         init_waitqueue_head(&vhost->work_wait_q);
4718         init_waitqueue_head(&vhost->init_wait_q);
4719         INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4720         mutex_init(&vhost->passthru_mutex);
4721
4722         if ((rc = ibmvfc_alloc_mem(vhost)))
4723                 goto free_scsi_host;
4724
4725         vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
4726                                          shost->host_no);
4727
4728         if (IS_ERR(vhost->work_thread)) {
4729                 dev_err(dev, "Couldn't create kernel thread: %ld\n",
4730                         PTR_ERR(vhost->work_thread));
4731                 goto free_host_mem;
4732         }
4733
4734         if ((rc = ibmvfc_init_crq(vhost))) {
4735                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
4736                 goto kill_kthread;
4737         }
4738
4739         if ((rc = ibmvfc_init_event_pool(vhost))) {
4740                 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
4741                 goto release_crq;
4742         }
4743
4744         if ((rc = scsi_add_host(shost, dev)))
4745                 goto release_event_pool;
4746
4747         if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
4748                                            &ibmvfc_trace_attr))) {
4749                 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
4750                 goto remove_shost;
4751         }
4752
4753         if (shost_to_fc_host(shost)->rqst_q)
4754                 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
4755         dev_set_drvdata(dev, vhost);
4756         spin_lock(&ibmvfc_driver_lock);
4757         list_add_tail(&vhost->queue, &ibmvfc_head);
4758         spin_unlock(&ibmvfc_driver_lock);
4759
4760         ibmvfc_send_crq_init(vhost);
4761         scsi_scan_host(shost);
4762         return 0;
4763
4764 remove_shost:
4765         scsi_remove_host(shost);
4766 release_event_pool:
4767         ibmvfc_free_event_pool(vhost);
4768 release_crq:
4769         ibmvfc_release_crq_queue(vhost);
4770 kill_kthread:
4771         kthread_stop(vhost->work_thread);
4772 free_host_mem:
4773         ibmvfc_free_mem(vhost);
4774 free_scsi_host:
4775         scsi_host_put(shost);
4776 out:
4777         LEAVE;
4778         return rc;
4779 }
4780
4781 /**
4782  * ibmvfc_remove - Adapter hot plug remove entry point
4783  * @vdev:       vio device struct
4784  *
4785  * Return value:
4786  *      0
4787  **/
4788 static int ibmvfc_remove(struct vio_dev *vdev)
4789 {
4790         struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
4791         unsigned long flags;
4792
4793         ENTER;
4794         ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
4795
4796         spin_lock_irqsave(vhost->host->host_lock, flags);
4797         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
4798         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4799
4800         ibmvfc_wait_while_resetting(vhost);
4801         ibmvfc_release_crq_queue(vhost);
4802         kthread_stop(vhost->work_thread);
4803         fc_remove_host(vhost->host);
4804         scsi_remove_host(vhost->host);
4805
4806         spin_lock_irqsave(vhost->host->host_lock, flags);
4807         ibmvfc_purge_requests(vhost, DID_ERROR);
4808         ibmvfc_free_event_pool(vhost);
4809         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4810
4811         ibmvfc_free_mem(vhost);
4812         spin_lock(&ibmvfc_driver_lock);
4813         list_del(&vhost->queue);
4814         spin_unlock(&ibmvfc_driver_lock);
4815         scsi_host_put(vhost->host);
4816         LEAVE;
4817         return 0;
4818 }
4819
4820 /**
4821  * ibmvfc_resume - Resume from suspend
4822  * @dev:        device struct
4823  *
4824  * We may have lost an interrupt across suspend/resume, so kick the
4825  * interrupt handler
4826  *
4827  */
4828 static int ibmvfc_resume(struct device *dev)
4829 {
4830         unsigned long flags;
4831         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
4832         struct vio_dev *vdev = to_vio_dev(dev);
4833
4834         spin_lock_irqsave(vhost->host->host_lock, flags);
4835         vio_disable_interrupts(vdev);
4836         tasklet_schedule(&vhost->tasklet);
4837         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4838         return 0;
4839 }
4840
4841 /**
4842  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
4843  * @vdev:       vio device struct
4844  *
4845  * Return value:
4846  *      Number of bytes the driver will need to DMA map at the same time in
4847  *      order to perform well.
4848  */
4849 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
4850 {
4851         unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
4852         return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
4853 }
4854
4855 static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
4856         {"fcp", "IBM,vfc-client"},
4857         { "", "" }
4858 };
4859 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
4860
4861 static struct dev_pm_ops ibmvfc_pm_ops = {
4862         .resume = ibmvfc_resume
4863 };
4864
4865 static struct vio_driver ibmvfc_driver = {
4866         .id_table = ibmvfc_device_table,
4867         .probe = ibmvfc_probe,
4868         .remove = ibmvfc_remove,
4869         .get_desired_dma = ibmvfc_get_desired_dma,
4870         .driver = {
4871                 .name = IBMVFC_NAME,
4872                 .owner = THIS_MODULE,
4873                 .pm = &ibmvfc_pm_ops,
4874         }
4875 };
4876
4877 static struct fc_function_template ibmvfc_transport_functions = {
4878         .show_host_fabric_name = 1,
4879         .show_host_node_name = 1,
4880         .show_host_port_name = 1,
4881         .show_host_supported_classes = 1,
4882         .show_host_port_type = 1,
4883         .show_host_port_id = 1,
4884         .show_host_maxframe_size = 1,
4885
4886         .get_host_port_state = ibmvfc_get_host_port_state,
4887         .show_host_port_state = 1,
4888
4889         .get_host_speed = ibmvfc_get_host_speed,
4890         .show_host_speed = 1,
4891
4892         .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
4893         .terminate_rport_io = ibmvfc_terminate_rport_io,
4894
4895         .show_rport_maxframe_size = 1,
4896         .show_rport_supported_classes = 1,
4897
4898         .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
4899         .show_rport_dev_loss_tmo = 1,
4900
4901         .get_starget_node_name = ibmvfc_get_starget_node_name,
4902         .show_starget_node_name = 1,
4903
4904         .get_starget_port_name = ibmvfc_get_starget_port_name,
4905         .show_starget_port_name = 1,
4906
4907         .get_starget_port_id = ibmvfc_get_starget_port_id,
4908         .show_starget_port_id = 1,
4909
4910         .bsg_request = ibmvfc_bsg_request,
4911         .bsg_timeout = ibmvfc_bsg_timeout,
4912 };
4913
4914 /**
4915  * ibmvfc_module_init - Initialize the ibmvfc module
4916  *
4917  * Return value:
4918  *      0 on success / other on failure
4919  **/
4920 static int __init ibmvfc_module_init(void)
4921 {
4922         int rc;
4923
4924         if (!firmware_has_feature(FW_FEATURE_VIO))
4925                 return -ENODEV;
4926
4927         printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
4928                IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
4929
4930         ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
4931         if (!ibmvfc_transport_template)
4932                 return -ENOMEM;
4933
4934         rc = vio_register_driver(&ibmvfc_driver);
4935         if (rc)
4936                 fc_release_transport(ibmvfc_transport_template);
4937         return rc;
4938 }
4939
4940 /**
4941  * ibmvfc_module_exit - Teardown the ibmvfc module
4942  *
4943  * Return value:
4944  *      nothing
4945  **/
4946 static void __exit ibmvfc_module_exit(void)
4947 {
4948         vio_unregister_driver(&ibmvfc_driver);
4949         fc_release_transport(ibmvfc_transport_template);
4950 }
4951
4952 module_init(ibmvfc_module_init);
4953 module_exit(ibmvfc_module_exit);