]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/bfa/bfa_ioc.c
Merge SCSI misc branch into isci-for-3.6 tag
[karo-tx-linux.git] / drivers / scsi / bfa / bfa_ioc.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_ioc.h"
21 #include "bfi_reg.h"
22 #include "bfa_defs.h"
23 #include "bfa_defs_svc.h"
24
25 BFA_TRC_FILE(CNA, IOC);
26
27 /*
28  * IOC local definitions
29  */
30 #define BFA_IOC_TOV             3000    /* msecs */
31 #define BFA_IOC_HWSEM_TOV       500     /* msecs */
32 #define BFA_IOC_HB_TOV          500     /* msecs */
33 #define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV        BFA_TIMER_FREQ
35
36 #define bfa_ioc_timer_start(__ioc)                                      \
37         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
38                         bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
40
41 #define bfa_hb_timer_start(__ioc)                                       \
42         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,         \
43                         bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc)        bfa_timer_stop(&(__ioc)->hb_timer)
45
46 #define BFA_DBG_FWTRC_OFF(_fn)  (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
47
48 /*
49  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50  */
51
52 #define bfa_ioc_firmware_lock(__ioc)                    \
53                         ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc)                  \
55                         ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc)              \
59                         ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc)               \
61                         ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc)                \
63                         ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc)               \
65                         ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc)                 \
67                         ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc)            \
69                         ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
70
71 #define bfa_ioc_mbox_cmd_pending(__ioc)         \
72                         (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73                         readl((__ioc)->ioc_regs.hfn_mbox_cmd))
74
75 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
76
77 /*
78  * forward declarations
79  */
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82 static void bfa_ioc_timeout(void *ioc);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92                                 enum bfa_ioc_event_e event);
93 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
97
98 /*
99  * IOC state machine definitions/declarations
100  */
101 enum ioc_event {
102         IOC_E_RESET             = 1,    /*  IOC reset request           */
103         IOC_E_ENABLE            = 2,    /*  IOC enable request          */
104         IOC_E_DISABLE           = 3,    /*  IOC disable request */
105         IOC_E_DETACH            = 4,    /*  driver detach cleanup       */
106         IOC_E_ENABLED           = 5,    /*  f/w enabled         */
107         IOC_E_FWRSP_GETATTR     = 6,    /*  IOC get attribute response  */
108         IOC_E_DISABLED          = 7,    /*  f/w disabled                */
109         IOC_E_PFFAILED          = 8,    /*  failure notice by iocpf sm  */
110         IOC_E_HBFAIL            = 9,    /*  heartbeat failure           */
111         IOC_E_HWERROR           = 10,   /*  hardware error interrupt    */
112         IOC_E_TIMEOUT           = 11,   /*  timeout                     */
113         IOC_E_HWFAILED          = 12,   /*  PCI mapping failure notice  */
114 };
115
116 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
126
127 static struct bfa_sm_table_s ioc_sm_table[] = {
128         {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
129         {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
130         {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
131         {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
132         {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
133         {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
134         {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
135         {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
136         {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
137         {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
138 };
139
140 /*
141  * IOCPF state machine definitions/declarations
142  */
143
144 #define bfa_iocpf_timer_start(__ioc)                                    \
145         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
146                         bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
147 #define bfa_iocpf_timer_stop(__ioc)     bfa_timer_stop(&(__ioc)->ioc_timer)
148
149 #define bfa_iocpf_poll_timer_start(__ioc)                               \
150         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
151                         bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
152
153 #define bfa_sem_timer_start(__ioc)                                      \
154         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,        \
155                         bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
156 #define bfa_sem_timer_stop(__ioc)       bfa_timer_stop(&(__ioc)->sem_timer)
157
158 /*
159  * Forward declareations for iocpf state machine
160  */
161 static void bfa_iocpf_timeout(void *ioc_arg);
162 static void bfa_iocpf_sem_timeout(void *ioc_arg);
163 static void bfa_iocpf_poll_timeout(void *ioc_arg);
164
165 /*
166  * IOCPF state machine events
167  */
168 enum iocpf_event {
169         IOCPF_E_ENABLE          = 1,    /*  IOCPF enable request        */
170         IOCPF_E_DISABLE         = 2,    /*  IOCPF disable request       */
171         IOCPF_E_STOP            = 3,    /*  stop on driver detach       */
172         IOCPF_E_FWREADY         = 4,    /*  f/w initialization done     */
173         IOCPF_E_FWRSP_ENABLE    = 5,    /*  enable f/w response */
174         IOCPF_E_FWRSP_DISABLE   = 6,    /*  disable f/w response        */
175         IOCPF_E_FAIL            = 7,    /*  failure notice by ioc sm    */
176         IOCPF_E_INITFAIL        = 8,    /*  init fail notice by ioc sm  */
177         IOCPF_E_GETATTRFAIL     = 9,    /*  init fail notice by ioc sm  */
178         IOCPF_E_SEMLOCKED       = 10,   /*  h/w semaphore is locked     */
179         IOCPF_E_TIMEOUT         = 11,   /*  f/w response timeout        */
180         IOCPF_E_SEM_ERROR       = 12,   /*  h/w sem mapping error       */
181 };
182
183 /*
184  * IOCPF states
185  */
186 enum bfa_iocpf_state {
187         BFA_IOCPF_RESET         = 1,    /*  IOC is in reset state */
188         BFA_IOCPF_SEMWAIT       = 2,    /*  Waiting for IOC h/w semaphore */
189         BFA_IOCPF_HWINIT        = 3,    /*  IOC h/w is being initialized */
190         BFA_IOCPF_READY         = 4,    /*  IOCPF is initialized */
191         BFA_IOCPF_INITFAIL      = 5,    /*  IOCPF failed */
192         BFA_IOCPF_FAIL          = 6,    /*  IOCPF failed */
193         BFA_IOCPF_DISABLING     = 7,    /*  IOCPF is being disabled */
194         BFA_IOCPF_DISABLED      = 8,    /*  IOCPF is disabled */
195         BFA_IOCPF_FWMISMATCH    = 9,    /*  IOC f/w different from drivers */
196 };
197
198 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
206                                                 enum iocpf_event);
207 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
209 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
211 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
212                                                 enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
214
215 static struct bfa_sm_table_s iocpf_sm_table[] = {
216         {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
217         {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
218         {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
219         {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
220         {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
221         {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
222         {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
223         {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
224         {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
225         {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
226         {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
227         {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
228         {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
229         {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
230 };
231
232 /*
233  * IOC State Machine
234  */
235
236 /*
237  * Beginning state. IOC uninit state.
238  */
239
240 static void
241 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
242 {
243 }
244
245 /*
246  * IOC is in uninit state.
247  */
248 static void
249 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
250 {
251         bfa_trc(ioc, event);
252
253         switch (event) {
254         case IOC_E_RESET:
255                 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
256                 break;
257
258         default:
259                 bfa_sm_fault(ioc, event);
260         }
261 }
262 /*
263  * Reset entry actions -- initialize state machine
264  */
265 static void
266 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
267 {
268         bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
269 }
270
271 /*
272  * IOC is in reset state.
273  */
274 static void
275 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
276 {
277         bfa_trc(ioc, event);
278
279         switch (event) {
280         case IOC_E_ENABLE:
281                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
282                 break;
283
284         case IOC_E_DISABLE:
285                 bfa_ioc_disable_comp(ioc);
286                 break;
287
288         case IOC_E_DETACH:
289                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
290                 break;
291
292         default:
293                 bfa_sm_fault(ioc, event);
294         }
295 }
296
297
298 static void
299 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
300 {
301         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
302 }
303
304 /*
305  * Host IOC function is being enabled, awaiting response from firmware.
306  * Semaphore is acquired.
307  */
308 static void
309 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
310 {
311         bfa_trc(ioc, event);
312
313         switch (event) {
314         case IOC_E_ENABLED:
315                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
316                 break;
317
318         case IOC_E_PFFAILED:
319                 /* !!! fall through !!! */
320         case IOC_E_HWERROR:
321                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
322                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
323                 if (event != IOC_E_PFFAILED)
324                         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
325                 break;
326
327         case IOC_E_HWFAILED:
328                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
329                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
330                 break;
331
332         case IOC_E_DISABLE:
333                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
334                 break;
335
336         case IOC_E_DETACH:
337                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
338                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
339                 break;
340
341         case IOC_E_ENABLE:
342                 break;
343
344         default:
345                 bfa_sm_fault(ioc, event);
346         }
347 }
348
349
350 static void
351 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
352 {
353         bfa_ioc_timer_start(ioc);
354         bfa_ioc_send_getattr(ioc);
355 }
356
357 /*
358  * IOC configuration in progress. Timer is active.
359  */
360 static void
361 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
362 {
363         bfa_trc(ioc, event);
364
365         switch (event) {
366         case IOC_E_FWRSP_GETATTR:
367                 bfa_ioc_timer_stop(ioc);
368                 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
369                 break;
370
371         case IOC_E_PFFAILED:
372         case IOC_E_HWERROR:
373                 bfa_ioc_timer_stop(ioc);
374                 /* !!! fall through !!! */
375         case IOC_E_TIMEOUT:
376                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
377                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
378                 if (event != IOC_E_PFFAILED)
379                         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
380                 break;
381
382         case IOC_E_DISABLE:
383                 bfa_ioc_timer_stop(ioc);
384                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
385                 break;
386
387         case IOC_E_ENABLE:
388                 break;
389
390         default:
391                 bfa_sm_fault(ioc, event);
392         }
393 }
394
395 static void
396 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
397 {
398         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
399
400         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
401         bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
402         bfa_ioc_hb_monitor(ioc);
403         BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
404         bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
405 }
406
407 static void
408 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
409 {
410         bfa_trc(ioc, event);
411
412         switch (event) {
413         case IOC_E_ENABLE:
414                 break;
415
416         case IOC_E_DISABLE:
417                 bfa_hb_timer_stop(ioc);
418                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
419                 break;
420
421         case IOC_E_PFFAILED:
422         case IOC_E_HWERROR:
423                 bfa_hb_timer_stop(ioc);
424                 /* !!! fall through !!! */
425         case IOC_E_HBFAIL:
426                 if (ioc->iocpf.auto_recover)
427                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
428                 else
429                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
430
431                 bfa_ioc_fail_notify(ioc);
432
433                 if (event != IOC_E_PFFAILED)
434                         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
435                 break;
436
437         default:
438                 bfa_sm_fault(ioc, event);
439         }
440 }
441
442
443 static void
444 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
445 {
446         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
447         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
448         BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
449         bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
450 }
451
452 /*
453  * IOC is being disabled
454  */
455 static void
456 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
457 {
458         bfa_trc(ioc, event);
459
460         switch (event) {
461         case IOC_E_DISABLED:
462                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
463                 break;
464
465         case IOC_E_HWERROR:
466                 /*
467                  * No state change.  Will move to disabled state
468                  * after iocpf sm completes failure processing and
469                  * moves to disabled state.
470                  */
471                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
472                 break;
473
474         case IOC_E_HWFAILED:
475                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
476                 bfa_ioc_disable_comp(ioc);
477                 break;
478
479         default:
480                 bfa_sm_fault(ioc, event);
481         }
482 }
483
484 /*
485  * IOC disable completion entry.
486  */
487 static void
488 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
489 {
490         bfa_ioc_disable_comp(ioc);
491 }
492
493 static void
494 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
495 {
496         bfa_trc(ioc, event);
497
498         switch (event) {
499         case IOC_E_ENABLE:
500                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
501                 break;
502
503         case IOC_E_DISABLE:
504                 ioc->cbfn->disable_cbfn(ioc->bfa);
505                 break;
506
507         case IOC_E_DETACH:
508                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
509                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
510                 break;
511
512         default:
513                 bfa_sm_fault(ioc, event);
514         }
515 }
516
517
518 static void
519 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
520 {
521         bfa_trc(ioc, 0);
522 }
523
524 /*
525  * Hardware initialization retry.
526  */
527 static void
528 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
529 {
530         bfa_trc(ioc, event);
531
532         switch (event) {
533         case IOC_E_ENABLED:
534                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
535                 break;
536
537         case IOC_E_PFFAILED:
538         case IOC_E_HWERROR:
539                 /*
540                  * Initialization retry failed.
541                  */
542                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
543                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
544                 if (event != IOC_E_PFFAILED)
545                         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
546                 break;
547
548         case IOC_E_HWFAILED:
549                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
550                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
551                 break;
552
553         case IOC_E_ENABLE:
554                 break;
555
556         case IOC_E_DISABLE:
557                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
558                 break;
559
560         case IOC_E_DETACH:
561                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
562                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
563                 break;
564
565         default:
566                 bfa_sm_fault(ioc, event);
567         }
568 }
569
570
571 static void
572 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
573 {
574         bfa_trc(ioc, 0);
575 }
576
577 /*
578  * IOC failure.
579  */
580 static void
581 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
582 {
583         bfa_trc(ioc, event);
584
585         switch (event) {
586
587         case IOC_E_ENABLE:
588                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
589                 break;
590
591         case IOC_E_DISABLE:
592                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
593                 break;
594
595         case IOC_E_DETACH:
596                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
597                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
598                 break;
599
600         case IOC_E_HWERROR:
601         case IOC_E_HWFAILED:
602                 /*
603                  * HB failure / HW error notification, ignore.
604                  */
605                 break;
606         default:
607                 bfa_sm_fault(ioc, event);
608         }
609 }
610
611 static void
612 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
613 {
614         bfa_trc(ioc, 0);
615 }
616
617 static void
618 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
619 {
620         bfa_trc(ioc, event);
621
622         switch (event) {
623         case IOC_E_ENABLE:
624                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
625                 break;
626
627         case IOC_E_DISABLE:
628                 ioc->cbfn->disable_cbfn(ioc->bfa);
629                 break;
630
631         case IOC_E_DETACH:
632                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
633                 break;
634
635         case IOC_E_HWERROR:
636                 /* Ignore - already in hwfail state */
637                 break;
638
639         default:
640                 bfa_sm_fault(ioc, event);
641         }
642 }
643
644 /*
645  * IOCPF State Machine
646  */
647
648 /*
649  * Reset entry actions -- initialize state machine
650  */
651 static void
652 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
653 {
654         iocpf->fw_mismatch_notified = BFA_FALSE;
655         iocpf->auto_recover = bfa_auto_recover;
656 }
657
658 /*
659  * Beginning state. IOC is in reset state.
660  */
661 static void
662 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
663 {
664         struct bfa_ioc_s *ioc = iocpf->ioc;
665
666         bfa_trc(ioc, event);
667
668         switch (event) {
669         case IOCPF_E_ENABLE:
670                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
671                 break;
672
673         case IOCPF_E_STOP:
674                 break;
675
676         default:
677                 bfa_sm_fault(ioc, event);
678         }
679 }
680
681 /*
682  * Semaphore should be acquired for version check.
683  */
684 static void
685 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
686 {
687         struct bfi_ioc_image_hdr_s      fwhdr;
688         u32     r32, fwstate, pgnum, pgoff, loff = 0;
689         int     i;
690
691         /*
692          * Spin on init semaphore to serialize.
693          */
694         r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
695         while (r32 & 0x1) {
696                 udelay(20);
697                 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
698         }
699
700         /* h/w sem init */
701         fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
702         if (fwstate == BFI_IOC_UNINIT) {
703                 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
704                 goto sem_get;
705         }
706
707         bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
708
709         if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
710                 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
711                 goto sem_get;
712         }
713
714         /*
715          * Clear fwver hdr
716          */
717         pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
718         pgoff = PSS_SMEM_PGOFF(loff);
719         writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
720
721         for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
722                 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
723                 loff += sizeof(u32);
724         }
725
726         bfa_trc(iocpf->ioc, fwstate);
727         bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
728         writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
729         writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
730
731         /*
732          * Unlock the hw semaphore. Should be here only once per boot.
733          */
734         readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
735         writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
736
737         /*
738          * unlock init semaphore.
739          */
740         writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
741
742 sem_get:
743         bfa_ioc_hw_sem_get(iocpf->ioc);
744 }
745
746 /*
747  * Awaiting h/w semaphore to continue with version check.
748  */
749 static void
750 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
751 {
752         struct bfa_ioc_s *ioc = iocpf->ioc;
753
754         bfa_trc(ioc, event);
755
756         switch (event) {
757         case IOCPF_E_SEMLOCKED:
758                 if (bfa_ioc_firmware_lock(ioc)) {
759                         if (bfa_ioc_sync_start(ioc)) {
760                                 bfa_ioc_sync_join(ioc);
761                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
762                         } else {
763                                 bfa_ioc_firmware_unlock(ioc);
764                                 writel(1, ioc->ioc_regs.ioc_sem_reg);
765                                 bfa_sem_timer_start(ioc);
766                         }
767                 } else {
768                         writel(1, ioc->ioc_regs.ioc_sem_reg);
769                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
770                 }
771                 break;
772
773         case IOCPF_E_SEM_ERROR:
774                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
775                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
776                 break;
777
778         case IOCPF_E_DISABLE:
779                 bfa_sem_timer_stop(ioc);
780                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
781                 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
782                 break;
783
784         case IOCPF_E_STOP:
785                 bfa_sem_timer_stop(ioc);
786                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
787                 break;
788
789         default:
790                 bfa_sm_fault(ioc, event);
791         }
792 }
793
794 /*
795  * Notify enable completion callback.
796  */
797 static void
798 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
799 {
800         /*
801          * Call only the first time sm enters fwmismatch state.
802          */
803         if (iocpf->fw_mismatch_notified == BFA_FALSE)
804                 bfa_ioc_pf_fwmismatch(iocpf->ioc);
805
806         iocpf->fw_mismatch_notified = BFA_TRUE;
807         bfa_iocpf_timer_start(iocpf->ioc);
808 }
809
810 /*
811  * Awaiting firmware version match.
812  */
813 static void
814 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
815 {
816         struct bfa_ioc_s *ioc = iocpf->ioc;
817
818         bfa_trc(ioc, event);
819
820         switch (event) {
821         case IOCPF_E_TIMEOUT:
822                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
823                 break;
824
825         case IOCPF_E_DISABLE:
826                 bfa_iocpf_timer_stop(ioc);
827                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
828                 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
829                 break;
830
831         case IOCPF_E_STOP:
832                 bfa_iocpf_timer_stop(ioc);
833                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
834                 break;
835
836         default:
837                 bfa_sm_fault(ioc, event);
838         }
839 }
840
841 /*
842  * Request for semaphore.
843  */
844 static void
845 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
846 {
847         bfa_ioc_hw_sem_get(iocpf->ioc);
848 }
849
850 /*
851  * Awaiting semaphore for h/w initialzation.
852  */
853 static void
854 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
855 {
856         struct bfa_ioc_s *ioc = iocpf->ioc;
857
858         bfa_trc(ioc, event);
859
860         switch (event) {
861         case IOCPF_E_SEMLOCKED:
862                 if (bfa_ioc_sync_complete(ioc)) {
863                         bfa_ioc_sync_join(ioc);
864                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
865                 } else {
866                         writel(1, ioc->ioc_regs.ioc_sem_reg);
867                         bfa_sem_timer_start(ioc);
868                 }
869                 break;
870
871         case IOCPF_E_SEM_ERROR:
872                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
873                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
874                 break;
875
876         case IOCPF_E_DISABLE:
877                 bfa_sem_timer_stop(ioc);
878                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
879                 break;
880
881         default:
882                 bfa_sm_fault(ioc, event);
883         }
884 }
885
886 static void
887 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
888 {
889         iocpf->poll_time = 0;
890         bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
891 }
892
893 /*
894  * Hardware is being initialized. Interrupts are enabled.
895  * Holding hardware semaphore lock.
896  */
897 static void
898 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
899 {
900         struct bfa_ioc_s *ioc = iocpf->ioc;
901
902         bfa_trc(ioc, event);
903
904         switch (event) {
905         case IOCPF_E_FWREADY:
906                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
907                 break;
908
909         case IOCPF_E_TIMEOUT:
910                 writel(1, ioc->ioc_regs.ioc_sem_reg);
911                 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
912                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
913                 break;
914
915         case IOCPF_E_DISABLE:
916                 bfa_iocpf_timer_stop(ioc);
917                 bfa_ioc_sync_leave(ioc);
918                 writel(1, ioc->ioc_regs.ioc_sem_reg);
919                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
920                 break;
921
922         default:
923                 bfa_sm_fault(ioc, event);
924         }
925 }
926
927 static void
928 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
929 {
930         bfa_iocpf_timer_start(iocpf->ioc);
931         /*
932          * Enable Interrupts before sending fw IOC ENABLE cmd.
933          */
934         iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
935         bfa_ioc_send_enable(iocpf->ioc);
936 }
937
938 /*
939  * Host IOC function is being enabled, awaiting response from firmware.
940  * Semaphore is acquired.
941  */
942 static void
943 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
944 {
945         struct bfa_ioc_s *ioc = iocpf->ioc;
946
947         bfa_trc(ioc, event);
948
949         switch (event) {
950         case IOCPF_E_FWRSP_ENABLE:
951                 bfa_iocpf_timer_stop(ioc);
952                 writel(1, ioc->ioc_regs.ioc_sem_reg);
953                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
954                 break;
955
956         case IOCPF_E_INITFAIL:
957                 bfa_iocpf_timer_stop(ioc);
958                 /*
959                  * !!! fall through !!!
960                  */
961
962         case IOCPF_E_TIMEOUT:
963                 writel(1, ioc->ioc_regs.ioc_sem_reg);
964                 if (event == IOCPF_E_TIMEOUT)
965                         bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
966                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
967                 break;
968
969         case IOCPF_E_DISABLE:
970                 bfa_iocpf_timer_stop(ioc);
971                 writel(1, ioc->ioc_regs.ioc_sem_reg);
972                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
973                 break;
974
975         default:
976                 bfa_sm_fault(ioc, event);
977         }
978 }
979
980 static void
981 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
982 {
983         bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
984 }
985
986 static void
987 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
988 {
989         struct bfa_ioc_s *ioc = iocpf->ioc;
990
991         bfa_trc(ioc, event);
992
993         switch (event) {
994         case IOCPF_E_DISABLE:
995                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
996                 break;
997
998         case IOCPF_E_GETATTRFAIL:
999                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1000                 break;
1001
1002         case IOCPF_E_FAIL:
1003                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1004                 break;
1005
1006         default:
1007                 bfa_sm_fault(ioc, event);
1008         }
1009 }
1010
1011 static void
1012 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1013 {
1014         bfa_iocpf_timer_start(iocpf->ioc);
1015         bfa_ioc_send_disable(iocpf->ioc);
1016 }
1017
1018 /*
1019  * IOC is being disabled
1020  */
1021 static void
1022 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1023 {
1024         struct bfa_ioc_s *ioc = iocpf->ioc;
1025
1026         bfa_trc(ioc, event);
1027
1028         switch (event) {
1029         case IOCPF_E_FWRSP_DISABLE:
1030                 bfa_iocpf_timer_stop(ioc);
1031                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1032                 break;
1033
1034         case IOCPF_E_FAIL:
1035                 bfa_iocpf_timer_stop(ioc);
1036                 /*
1037                  * !!! fall through !!!
1038                  */
1039
1040         case IOCPF_E_TIMEOUT:
1041                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1042                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1043                 break;
1044
1045         case IOCPF_E_FWRSP_ENABLE:
1046                 break;
1047
1048         default:
1049                 bfa_sm_fault(ioc, event);
1050         }
1051 }
1052
1053 static void
1054 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1055 {
1056         bfa_ioc_hw_sem_get(iocpf->ioc);
1057 }
1058
1059 /*
1060  * IOC hb ack request is being removed.
1061  */
1062 static void
1063 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1064 {
1065         struct bfa_ioc_s *ioc = iocpf->ioc;
1066
1067         bfa_trc(ioc, event);
1068
1069         switch (event) {
1070         case IOCPF_E_SEMLOCKED:
1071                 bfa_ioc_sync_leave(ioc);
1072                 writel(1, ioc->ioc_regs.ioc_sem_reg);
1073                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1074                 break;
1075
1076         case IOCPF_E_SEM_ERROR:
1077                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1078                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1079                 break;
1080
1081         case IOCPF_E_FAIL:
1082                 break;
1083
1084         default:
1085                 bfa_sm_fault(ioc, event);
1086         }
1087 }
1088
1089 /*
1090  * IOC disable completion entry.
1091  */
1092 static void
1093 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1094 {
1095         bfa_ioc_mbox_flush(iocpf->ioc);
1096         bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1097 }
1098
1099 static void
1100 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1101 {
1102         struct bfa_ioc_s *ioc = iocpf->ioc;
1103
1104         bfa_trc(ioc, event);
1105
1106         switch (event) {
1107         case IOCPF_E_ENABLE:
1108                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1109                 break;
1110
1111         case IOCPF_E_STOP:
1112                 bfa_ioc_firmware_unlock(ioc);
1113                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1114                 break;
1115
1116         default:
1117                 bfa_sm_fault(ioc, event);
1118         }
1119 }
1120
1121 static void
1122 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1123 {
1124         bfa_ioc_debug_save_ftrc(iocpf->ioc);
1125         bfa_ioc_hw_sem_get(iocpf->ioc);
1126 }
1127
1128 /*
1129  * Hardware initialization failed.
1130  */
1131 static void
1132 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1133 {
1134         struct bfa_ioc_s *ioc = iocpf->ioc;
1135
1136         bfa_trc(ioc, event);
1137
1138         switch (event) {
1139         case IOCPF_E_SEMLOCKED:
1140                 bfa_ioc_notify_fail(ioc);
1141                 bfa_ioc_sync_leave(ioc);
1142                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1143                 writel(1, ioc->ioc_regs.ioc_sem_reg);
1144                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1145                 break;
1146
1147         case IOCPF_E_SEM_ERROR:
1148                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1149                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1150                 break;
1151
1152         case IOCPF_E_DISABLE:
1153                 bfa_sem_timer_stop(ioc);
1154                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1155                 break;
1156
1157         case IOCPF_E_STOP:
1158                 bfa_sem_timer_stop(ioc);
1159                 bfa_ioc_firmware_unlock(ioc);
1160                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1161                 break;
1162
1163         case IOCPF_E_FAIL:
1164                 break;
1165
1166         default:
1167                 bfa_sm_fault(ioc, event);
1168         }
1169 }
1170
1171 static void
1172 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1173 {
1174         bfa_trc(iocpf->ioc, 0);
1175 }
1176
1177 /*
1178  * Hardware initialization failed.
1179  */
1180 static void
1181 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1182 {
1183         struct bfa_ioc_s *ioc = iocpf->ioc;
1184
1185         bfa_trc(ioc, event);
1186
1187         switch (event) {
1188         case IOCPF_E_DISABLE:
1189                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1190                 break;
1191
1192         case IOCPF_E_STOP:
1193                 bfa_ioc_firmware_unlock(ioc);
1194                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1195                 break;
1196
1197         default:
1198                 bfa_sm_fault(ioc, event);
1199         }
1200 }
1201
1202 static void
1203 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1204 {
1205         /*
1206          * Mark IOC as failed in hardware and stop firmware.
1207          */
1208         bfa_ioc_lpu_stop(iocpf->ioc);
1209
1210         /*
1211          * Flush any queued up mailbox requests.
1212          */
1213         bfa_ioc_mbox_flush(iocpf->ioc);
1214
1215         bfa_ioc_hw_sem_get(iocpf->ioc);
1216 }
1217
1218 static void
1219 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1220 {
1221         struct bfa_ioc_s *ioc = iocpf->ioc;
1222
1223         bfa_trc(ioc, event);
1224
1225         switch (event) {
1226         case IOCPF_E_SEMLOCKED:
1227                 bfa_ioc_sync_ack(ioc);
1228                 bfa_ioc_notify_fail(ioc);
1229                 if (!iocpf->auto_recover) {
1230                         bfa_ioc_sync_leave(ioc);
1231                         writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1232                         writel(1, ioc->ioc_regs.ioc_sem_reg);
1233                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1234                 } else {
1235                         if (bfa_ioc_sync_complete(ioc))
1236                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1237                         else {
1238                                 writel(1, ioc->ioc_regs.ioc_sem_reg);
1239                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1240                         }
1241                 }
1242                 break;
1243
1244         case IOCPF_E_SEM_ERROR:
1245                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1246                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1247                 break;
1248
1249         case IOCPF_E_DISABLE:
1250                 bfa_sem_timer_stop(ioc);
1251                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1252                 break;
1253
1254         case IOCPF_E_FAIL:
1255                 break;
1256
1257         default:
1258                 bfa_sm_fault(ioc, event);
1259         }
1260 }
1261
1262 static void
1263 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1264 {
1265         bfa_trc(iocpf->ioc, 0);
1266 }
1267
1268 /*
1269  * IOC is in failed state.
1270  */
1271 static void
1272 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1273 {
1274         struct bfa_ioc_s *ioc = iocpf->ioc;
1275
1276         bfa_trc(ioc, event);
1277
1278         switch (event) {
1279         case IOCPF_E_DISABLE:
1280                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1281                 break;
1282
1283         default:
1284                 bfa_sm_fault(ioc, event);
1285         }
1286 }
1287
1288 /*
1289  *  BFA IOC private functions
1290  */
1291
1292 /*
1293  * Notify common modules registered for notification.
1294  */
1295 static void
1296 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1297 {
1298         struct bfa_ioc_notify_s *notify;
1299         struct list_head        *qe;
1300
1301         list_for_each(qe, &ioc->notify_q) {
1302                 notify = (struct bfa_ioc_notify_s *)qe;
1303                 notify->cbfn(notify->cbarg, event);
1304         }
1305 }
1306
1307 static void
1308 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1309 {
1310         ioc->cbfn->disable_cbfn(ioc->bfa);
1311         bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1312 }
1313
1314 bfa_boolean_t
1315 bfa_ioc_sem_get(void __iomem *sem_reg)
1316 {
1317         u32 r32;
1318         int cnt = 0;
1319 #define BFA_SEM_SPINCNT 3000
1320
1321         r32 = readl(sem_reg);
1322
1323         while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1324                 cnt++;
1325                 udelay(2);
1326                 r32 = readl(sem_reg);
1327         }
1328
1329         if (!(r32 & 1))
1330                 return BFA_TRUE;
1331
1332         return BFA_FALSE;
1333 }
1334
1335 static void
1336 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1337 {
1338         u32     r32;
1339
1340         /*
1341          * First read to the semaphore register will return 0, subsequent reads
1342          * will return 1. Semaphore is released by writing 1 to the register
1343          */
1344         r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1345         if (r32 == ~0) {
1346                 WARN_ON(r32 == ~0);
1347                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1348                 return;
1349         }
1350         if (!(r32 & 1)) {
1351                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1352                 return;
1353         }
1354
1355         bfa_sem_timer_start(ioc);
1356 }
1357
1358 /*
1359  * Initialize LPU local memory (aka secondary memory / SRAM)
1360  */
1361 static void
1362 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1363 {
1364         u32     pss_ctl;
1365         int             i;
1366 #define PSS_LMEM_INIT_TIME  10000
1367
1368         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1369         pss_ctl &= ~__PSS_LMEM_RESET;
1370         pss_ctl |= __PSS_LMEM_INIT_EN;
1371
1372         /*
1373          * i2c workaround 12.5khz clock
1374          */
1375         pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1376         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1377
1378         /*
1379          * wait for memory initialization to be complete
1380          */
1381         i = 0;
1382         do {
1383                 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1384                 i++;
1385         } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1386
1387         /*
1388          * If memory initialization is not successful, IOC timeout will catch
1389          * such failures.
1390          */
1391         WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1392         bfa_trc(ioc, pss_ctl);
1393
1394         pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1395         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1396 }
1397
1398 static void
1399 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1400 {
1401         u32     pss_ctl;
1402
1403         /*
1404          * Take processor out of reset.
1405          */
1406         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1407         pss_ctl &= ~__PSS_LPU0_RESET;
1408
1409         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1410 }
1411
1412 static void
1413 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1414 {
1415         u32     pss_ctl;
1416
1417         /*
1418          * Put processors in reset.
1419          */
1420         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1421         pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1422
1423         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1424 }
1425
1426 /*
1427  * Get driver and firmware versions.
1428  */
1429 void
1430 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1431 {
1432         u32     pgnum, pgoff;
1433         u32     loff = 0;
1434         int             i;
1435         u32     *fwsig = (u32 *) fwhdr;
1436
1437         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1438         pgoff = PSS_SMEM_PGOFF(loff);
1439         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1440
1441         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1442              i++) {
1443                 fwsig[i] =
1444                         bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1445                 loff += sizeof(u32);
1446         }
1447 }
1448
1449 /*
1450  * Returns TRUE if same.
1451  */
1452 bfa_boolean_t
1453 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1454 {
1455         struct bfi_ioc_image_hdr_s *drv_fwhdr;
1456         int i;
1457
1458         drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1459                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1460
1461         for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1462                 if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
1463                         bfa_trc(ioc, i);
1464                         bfa_trc(ioc, fwhdr->md5sum[i]);
1465                         bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1466                         return BFA_FALSE;
1467                 }
1468         }
1469
1470         bfa_trc(ioc, fwhdr->md5sum[0]);
1471         return BFA_TRUE;
1472 }
1473
1474 /*
1475  * Return true if current running version is valid. Firmware signature and
1476  * execution context (driver/bios) must match.
1477  */
1478 static bfa_boolean_t
1479 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1480 {
1481         struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1482
1483         bfa_ioc_fwver_get(ioc, &fwhdr);
1484         drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1485                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1486
1487         if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
1488                 bfa_trc(ioc, fwhdr.signature);
1489                 bfa_trc(ioc, drv_fwhdr->signature);
1490                 return BFA_FALSE;
1491         }
1492
1493         if (swab32(fwhdr.bootenv) != boot_env) {
1494                 bfa_trc(ioc, fwhdr.bootenv);
1495                 bfa_trc(ioc, boot_env);
1496                 return BFA_FALSE;
1497         }
1498
1499         return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1500 }
1501
1502 /*
1503  * Conditionally flush any pending message from firmware at start.
1504  */
1505 static void
1506 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1507 {
1508         u32     r32;
1509
1510         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1511         if (r32)
1512                 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1513 }
1514
1515 static void
1516 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1517 {
1518         enum bfi_ioc_state ioc_fwstate;
1519         bfa_boolean_t fwvalid;
1520         u32 boot_type;
1521         u32 boot_env;
1522
1523         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1524
1525         if (force)
1526                 ioc_fwstate = BFI_IOC_UNINIT;
1527
1528         bfa_trc(ioc, ioc_fwstate);
1529
1530         boot_type = BFI_FWBOOT_TYPE_NORMAL;
1531         boot_env = BFI_FWBOOT_ENV_OS;
1532
1533         /*
1534          * check if firmware is valid
1535          */
1536         fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1537                 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1538
1539         if (!fwvalid) {
1540                 bfa_ioc_boot(ioc, boot_type, boot_env);
1541                 bfa_ioc_poll_fwinit(ioc);
1542                 return;
1543         }
1544
1545         /*
1546          * If hardware initialization is in progress (initialized by other IOC),
1547          * just wait for an initialization completion interrupt.
1548          */
1549         if (ioc_fwstate == BFI_IOC_INITING) {
1550                 bfa_ioc_poll_fwinit(ioc);
1551                 return;
1552         }
1553
1554         /*
1555          * If IOC function is disabled and firmware version is same,
1556          * just re-enable IOC.
1557          *
1558          * If option rom, IOC must not be in operational state. With
1559          * convergence, IOC will be in operational state when 2nd driver
1560          * is loaded.
1561          */
1562         if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1563
1564                 /*
1565                  * When using MSI-X any pending firmware ready event should
1566                  * be flushed. Otherwise MSI-X interrupts are not delivered.
1567                  */
1568                 bfa_ioc_msgflush(ioc);
1569                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1570                 return;
1571         }
1572
1573         /*
1574          * Initialize the h/w for any other states.
1575          */
1576         bfa_ioc_boot(ioc, boot_type, boot_env);
1577         bfa_ioc_poll_fwinit(ioc);
1578 }
1579
1580 static void
1581 bfa_ioc_timeout(void *ioc_arg)
1582 {
1583         struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1584
1585         bfa_trc(ioc, 0);
1586         bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1587 }
1588
1589 void
1590 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1591 {
1592         u32 *msgp = (u32 *) ioc_msg;
1593         u32 i;
1594
1595         bfa_trc(ioc, msgp[0]);
1596         bfa_trc(ioc, len);
1597
1598         WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1599
1600         /*
1601          * first write msg to mailbox registers
1602          */
1603         for (i = 0; i < len / sizeof(u32); i++)
1604                 writel(cpu_to_le32(msgp[i]),
1605                         ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1606
1607         for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1608                 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1609
1610         /*
1611          * write 1 to mailbox CMD to trigger LPU event
1612          */
1613         writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1614         (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1615 }
1616
1617 static void
1618 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1619 {
1620         struct bfi_ioc_ctrl_req_s enable_req;
1621         struct timeval tv;
1622
1623         bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1624                     bfa_ioc_portid(ioc));
1625         enable_req.clscode = cpu_to_be16(ioc->clscode);
1626         do_gettimeofday(&tv);
1627         enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1628         bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1629 }
1630
1631 static void
1632 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1633 {
1634         struct bfi_ioc_ctrl_req_s disable_req;
1635
1636         bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1637                     bfa_ioc_portid(ioc));
1638         bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1639 }
1640
1641 static void
1642 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1643 {
1644         struct bfi_ioc_getattr_req_s    attr_req;
1645
1646         bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1647                     bfa_ioc_portid(ioc));
1648         bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1649         bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1650 }
1651
1652 static void
1653 bfa_ioc_hb_check(void *cbarg)
1654 {
1655         struct bfa_ioc_s  *ioc = cbarg;
1656         u32     hb_count;
1657
1658         hb_count = readl(ioc->ioc_regs.heartbeat);
1659         if (ioc->hb_count == hb_count) {
1660                 bfa_ioc_recover(ioc);
1661                 return;
1662         } else {
1663                 ioc->hb_count = hb_count;
1664         }
1665
1666         bfa_ioc_mbox_poll(ioc);
1667         bfa_hb_timer_start(ioc);
1668 }
1669
1670 static void
1671 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1672 {
1673         ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1674         bfa_hb_timer_start(ioc);
1675 }
1676
1677 /*
1678  *      Initiate a full firmware download.
1679  */
1680 static void
1681 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1682                     u32 boot_env)
1683 {
1684         u32 *fwimg;
1685         u32 pgnum, pgoff;
1686         u32 loff = 0;
1687         u32 chunkno = 0;
1688         u32 i;
1689         u32 asicmode;
1690
1691         bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1692         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1693
1694         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1695         pgoff = PSS_SMEM_PGOFF(loff);
1696
1697         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1698
1699         for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1700
1701                 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1702                         chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1703                         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1704                                         BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1705                 }
1706
1707                 /*
1708                  * write smem
1709                  */
1710                 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1711                         cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
1712
1713                 loff += sizeof(u32);
1714
1715                 /*
1716                  * handle page offset wrap around
1717                  */
1718                 loff = PSS_SMEM_PGOFF(loff);
1719                 if (loff == 0) {
1720                         pgnum++;
1721                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1722                 }
1723         }
1724
1725         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1726                         ioc->ioc_regs.host_page_num_fn);
1727
1728         /*
1729          * Set boot type and device mode at the end.
1730          */
1731         asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1732                                 ioc->port0_mode, ioc->port1_mode);
1733         bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1734                         swab32(asicmode));
1735         bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1736                         swab32(boot_type));
1737         bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1738                         swab32(boot_env));
1739 }
1740
1741
1742 /*
1743  * Update BFA configuration from firmware configuration.
1744  */
1745 static void
1746 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1747 {
1748         struct bfi_ioc_attr_s   *attr = ioc->attr;
1749
1750         attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1751         attr->card_type     = be32_to_cpu(attr->card_type);
1752         attr->maxfrsize     = be16_to_cpu(attr->maxfrsize);
1753         ioc->fcmode     = (attr->port_mode == BFI_PORT_MODE_FC);
1754
1755         bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1756 }
1757
1758 /*
1759  * Attach time initialization of mbox logic.
1760  */
1761 static void
1762 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1763 {
1764         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1765         int     mc;
1766
1767         INIT_LIST_HEAD(&mod->cmd_q);
1768         for (mc = 0; mc < BFI_MC_MAX; mc++) {
1769                 mod->mbhdlr[mc].cbfn = NULL;
1770                 mod->mbhdlr[mc].cbarg = ioc->bfa;
1771         }
1772 }
1773
1774 /*
1775  * Mbox poll timer -- restarts any pending mailbox requests.
1776  */
1777 static void
1778 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1779 {
1780         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1781         struct bfa_mbox_cmd_s           *cmd;
1782         u32                     stat;
1783
1784         /*
1785          * If no command pending, do nothing
1786          */
1787         if (list_empty(&mod->cmd_q))
1788                 return;
1789
1790         /*
1791          * If previous command is not yet fetched by firmware, do nothing
1792          */
1793         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1794         if (stat)
1795                 return;
1796
1797         /*
1798          * Enqueue command to firmware.
1799          */
1800         bfa_q_deq(&mod->cmd_q, &cmd);
1801         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1802 }
1803
1804 /*
1805  * Cleanup any pending requests.
1806  */
1807 static void
1808 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1809 {
1810         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1811         struct bfa_mbox_cmd_s           *cmd;
1812
1813         while (!list_empty(&mod->cmd_q))
1814                 bfa_q_deq(&mod->cmd_q, &cmd);
1815 }
1816
1817 /*
1818  * Read data from SMEM to host through PCI memmap
1819  *
1820  * @param[in]   ioc     memory for IOC
1821  * @param[in]   tbuf    app memory to store data from smem
1822  * @param[in]   soff    smem offset
1823  * @param[in]   sz      size of smem in bytes
1824  */
1825 static bfa_status_t
1826 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1827 {
1828         u32 pgnum, loff;
1829         __be32 r32;
1830         int i, len;
1831         u32 *buf = tbuf;
1832
1833         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1834         loff = PSS_SMEM_PGOFF(soff);
1835         bfa_trc(ioc, pgnum);
1836         bfa_trc(ioc, loff);
1837         bfa_trc(ioc, sz);
1838
1839         /*
1840          *  Hold semaphore to serialize pll init and fwtrc.
1841          */
1842         if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1843                 bfa_trc(ioc, 0);
1844                 return BFA_STATUS_FAILED;
1845         }
1846
1847         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1848
1849         len = sz/sizeof(u32);
1850         bfa_trc(ioc, len);
1851         for (i = 0; i < len; i++) {
1852                 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1853                 buf[i] = be32_to_cpu(r32);
1854                 loff += sizeof(u32);
1855
1856                 /*
1857                  * handle page offset wrap around
1858                  */
1859                 loff = PSS_SMEM_PGOFF(loff);
1860                 if (loff == 0) {
1861                         pgnum++;
1862                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1863                 }
1864         }
1865         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1866                         ioc->ioc_regs.host_page_num_fn);
1867         /*
1868          *  release semaphore.
1869          */
1870         readl(ioc->ioc_regs.ioc_init_sem_reg);
1871         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1872
1873         bfa_trc(ioc, pgnum);
1874         return BFA_STATUS_OK;
1875 }
1876
1877 /*
1878  * Clear SMEM data from host through PCI memmap
1879  *
1880  * @param[in]   ioc     memory for IOC
1881  * @param[in]   soff    smem offset
1882  * @param[in]   sz      size of smem in bytes
1883  */
1884 static bfa_status_t
1885 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1886 {
1887         int i, len;
1888         u32 pgnum, loff;
1889
1890         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1891         loff = PSS_SMEM_PGOFF(soff);
1892         bfa_trc(ioc, pgnum);
1893         bfa_trc(ioc, loff);
1894         bfa_trc(ioc, sz);
1895
1896         /*
1897          *  Hold semaphore to serialize pll init and fwtrc.
1898          */
1899         if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1900                 bfa_trc(ioc, 0);
1901                 return BFA_STATUS_FAILED;
1902         }
1903
1904         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1905
1906         len = sz/sizeof(u32); /* len in words */
1907         bfa_trc(ioc, len);
1908         for (i = 0; i < len; i++) {
1909                 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1910                 loff += sizeof(u32);
1911
1912                 /*
1913                  * handle page offset wrap around
1914                  */
1915                 loff = PSS_SMEM_PGOFF(loff);
1916                 if (loff == 0) {
1917                         pgnum++;
1918                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1919                 }
1920         }
1921         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1922                         ioc->ioc_regs.host_page_num_fn);
1923
1924         /*
1925          *  release semaphore.
1926          */
1927         readl(ioc->ioc_regs.ioc_init_sem_reg);
1928         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1929         bfa_trc(ioc, pgnum);
1930         return BFA_STATUS_OK;
1931 }
1932
1933 static void
1934 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1935 {
1936         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1937
1938         /*
1939          * Notify driver and common modules registered for notification.
1940          */
1941         ioc->cbfn->hbfail_cbfn(ioc->bfa);
1942         bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1943
1944         bfa_ioc_debug_save_ftrc(ioc);
1945
1946         BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1947                 "Heart Beat of IOC has failed\n");
1948         bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
1949
1950 }
1951
1952 static void
1953 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1954 {
1955         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1956         /*
1957          * Provide enable completion callback.
1958          */
1959         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1960         BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1961                 "Running firmware version is incompatible "
1962                 "with the driver version\n");
1963         bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
1964 }
1965
1966 bfa_status_t
1967 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1968 {
1969
1970         /*
1971          *  Hold semaphore so that nobody can access the chip during init.
1972          */
1973         bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1974
1975         bfa_ioc_pll_init_asic(ioc);
1976
1977         ioc->pllinit = BFA_TRUE;
1978
1979         /*
1980          * Initialize LMEM
1981          */
1982         bfa_ioc_lmem_init(ioc);
1983
1984         /*
1985          *  release semaphore.
1986          */
1987         readl(ioc->ioc_regs.ioc_init_sem_reg);
1988         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1989
1990         return BFA_STATUS_OK;
1991 }
1992
1993 /*
1994  * Interface used by diag module to do firmware boot with memory test
1995  * as the entry vector.
1996  */
1997 void
1998 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1999 {
2000         bfa_ioc_stats(ioc, ioc_boots);
2001
2002         if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2003                 return;
2004
2005         /*
2006          * Initialize IOC state of all functions on a chip reset.
2007          */
2008         if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2009                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2010                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
2011         } else {
2012                 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2013                 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
2014         }
2015
2016         bfa_ioc_msgflush(ioc);
2017         bfa_ioc_download_fw(ioc, boot_type, boot_env);
2018         bfa_ioc_lpu_start(ioc);
2019 }
2020
2021 /*
2022  * Enable/disable IOC failure auto recovery.
2023  */
2024 void
2025 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2026 {
2027         bfa_auto_recover = auto_recover;
2028 }
2029
2030
2031
2032 bfa_boolean_t
2033 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2034 {
2035         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2036 }
2037
2038 bfa_boolean_t
2039 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2040 {
2041         u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
2042
2043         return ((r32 != BFI_IOC_UNINIT) &&
2044                 (r32 != BFI_IOC_INITING) &&
2045                 (r32 != BFI_IOC_MEMTEST));
2046 }
2047
2048 bfa_boolean_t
2049 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2050 {
2051         __be32  *msgp = mbmsg;
2052         u32     r32;
2053         int             i;
2054
2055         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2056         if ((r32 & 1) == 0)
2057                 return BFA_FALSE;
2058
2059         /*
2060          * read the MBOX msg
2061          */
2062         for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2063              i++) {
2064                 r32 = readl(ioc->ioc_regs.lpu_mbox +
2065                                    i * sizeof(u32));
2066                 msgp[i] = cpu_to_be32(r32);
2067         }
2068
2069         /*
2070          * turn off mailbox interrupt by clearing mailbox status
2071          */
2072         writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2073         readl(ioc->ioc_regs.lpu_mbox_cmd);
2074
2075         return BFA_TRUE;
2076 }
2077
2078 void
2079 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2080 {
2081         union bfi_ioc_i2h_msg_u *msg;
2082         struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2083
2084         msg = (union bfi_ioc_i2h_msg_u *) m;
2085
2086         bfa_ioc_stats(ioc, ioc_isrs);
2087
2088         switch (msg->mh.msg_id) {
2089         case BFI_IOC_I2H_HBEAT:
2090                 break;
2091
2092         case BFI_IOC_I2H_ENABLE_REPLY:
2093                 ioc->port_mode = ioc->port_mode_cfg =
2094                                 (enum bfa_mode_s)msg->fw_event.port_mode;
2095                 ioc->ad_cap_bm = msg->fw_event.cap_bm;
2096                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2097                 break;
2098
2099         case BFI_IOC_I2H_DISABLE_REPLY:
2100                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2101                 break;
2102
2103         case BFI_IOC_I2H_GETATTR_REPLY:
2104                 bfa_ioc_getattr_reply(ioc);
2105                 break;
2106
2107         default:
2108                 bfa_trc(ioc, msg->mh.msg_id);
2109                 WARN_ON(1);
2110         }
2111 }
2112
2113 /*
2114  * IOC attach time initialization and setup.
2115  *
2116  * @param[in]   ioc     memory for IOC
2117  * @param[in]   bfa     driver instance structure
2118  */
2119 void
2120 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2121                struct bfa_timer_mod_s *timer_mod)
2122 {
2123         ioc->bfa        = bfa;
2124         ioc->cbfn       = cbfn;
2125         ioc->timer_mod  = timer_mod;
2126         ioc->fcmode     = BFA_FALSE;
2127         ioc->pllinit    = BFA_FALSE;
2128         ioc->dbg_fwsave_once = BFA_TRUE;
2129         ioc->iocpf.ioc  = ioc;
2130
2131         bfa_ioc_mbox_attach(ioc);
2132         INIT_LIST_HEAD(&ioc->notify_q);
2133
2134         bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2135         bfa_fsm_send_event(ioc, IOC_E_RESET);
2136 }
2137
2138 /*
2139  * Driver detach time IOC cleanup.
2140  */
2141 void
2142 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2143 {
2144         bfa_fsm_send_event(ioc, IOC_E_DETACH);
2145         INIT_LIST_HEAD(&ioc->notify_q);
2146 }
2147
2148 /*
2149  * Setup IOC PCI properties.
2150  *
2151  * @param[in]   pcidev  PCI device information for this IOC
2152  */
2153 void
2154 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2155                 enum bfi_pcifn_class clscode)
2156 {
2157         ioc->clscode    = clscode;
2158         ioc->pcidev     = *pcidev;
2159
2160         /*
2161          * Initialize IOC and device personality
2162          */
2163         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2164         ioc->asic_mode  = BFI_ASIC_MODE_FC;
2165
2166         switch (pcidev->device_id) {
2167         case BFA_PCI_DEVICE_ID_FC_8G1P:
2168         case BFA_PCI_DEVICE_ID_FC_8G2P:
2169                 ioc->asic_gen = BFI_ASIC_GEN_CB;
2170                 ioc->fcmode = BFA_TRUE;
2171                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2172                 ioc->ad_cap_bm = BFA_CM_HBA;
2173                 break;
2174
2175         case BFA_PCI_DEVICE_ID_CT:
2176                 ioc->asic_gen = BFI_ASIC_GEN_CT;
2177                 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2178                 ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2179                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2180                 ioc->ad_cap_bm = BFA_CM_CNA;
2181                 break;
2182
2183         case BFA_PCI_DEVICE_ID_CT_FC:
2184                 ioc->asic_gen = BFI_ASIC_GEN_CT;
2185                 ioc->fcmode = BFA_TRUE;
2186                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2187                 ioc->ad_cap_bm = BFA_CM_HBA;
2188                 break;
2189
2190         case BFA_PCI_DEVICE_ID_CT2:
2191                 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2192                 if (clscode == BFI_PCIFN_CLASS_FC &&
2193                     pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2194                         ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2195                         ioc->fcmode = BFA_TRUE;
2196                         ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2197                         ioc->ad_cap_bm = BFA_CM_HBA;
2198                 } else {
2199                         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2200                         ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2201                         if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2202                                 ioc->port_mode =
2203                                 ioc->port_mode_cfg = BFA_MODE_CNA;
2204                                 ioc->ad_cap_bm = BFA_CM_CNA;
2205                         } else {
2206                                 ioc->port_mode =
2207                                 ioc->port_mode_cfg = BFA_MODE_NIC;
2208                                 ioc->ad_cap_bm = BFA_CM_NIC;
2209                         }
2210                 }
2211                 break;
2212
2213         default:
2214                 WARN_ON(1);
2215         }
2216
2217         /*
2218          * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2219          */
2220         if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2221                 bfa_ioc_set_cb_hwif(ioc);
2222         else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2223                 bfa_ioc_set_ct_hwif(ioc);
2224         else {
2225                 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2226                 bfa_ioc_set_ct2_hwif(ioc);
2227                 bfa_ioc_ct2_poweron(ioc);
2228         }
2229
2230         bfa_ioc_map_port(ioc);
2231         bfa_ioc_reg_init(ioc);
2232 }
2233
2234 /*
2235  * Initialize IOC dma memory
2236  *
2237  * @param[in]   dm_kva  kernel virtual address of IOC dma memory
2238  * @param[in]   dm_pa   physical address of IOC dma memory
2239  */
2240 void
2241 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2242 {
2243         /*
2244          * dma memory for firmware attribute
2245          */
2246         ioc->attr_dma.kva = dm_kva;
2247         ioc->attr_dma.pa = dm_pa;
2248         ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2249 }
2250
2251 void
2252 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2253 {
2254         bfa_ioc_stats(ioc, ioc_enables);
2255         ioc->dbg_fwsave_once = BFA_TRUE;
2256
2257         bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2258 }
2259
2260 void
2261 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2262 {
2263         bfa_ioc_stats(ioc, ioc_disables);
2264         bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2265 }
2266
2267 void
2268 bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2269 {
2270         ioc->dbg_fwsave_once = BFA_TRUE;
2271         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2272 }
2273
2274 /*
2275  * Initialize memory for saving firmware trace. Driver must initialize
2276  * trace memory before call bfa_ioc_enable().
2277  */
2278 void
2279 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2280 {
2281         ioc->dbg_fwsave     = dbg_fwsave;
2282         ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2283 }
2284
2285 /*
2286  * Register mailbox message handler functions
2287  *
2288  * @param[in]   ioc             IOC instance
2289  * @param[in]   mcfuncs         message class handler functions
2290  */
2291 void
2292 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2293 {
2294         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2295         int                             mc;
2296
2297         for (mc = 0; mc < BFI_MC_MAX; mc++)
2298                 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2299 }
2300
2301 /*
2302  * Register mailbox message handler function, to be called by common modules
2303  */
2304 void
2305 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2306                     bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2307 {
2308         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2309
2310         mod->mbhdlr[mc].cbfn    = cbfn;
2311         mod->mbhdlr[mc].cbarg   = cbarg;
2312 }
2313
2314 /*
2315  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2316  * Responsibility of caller to serialize
2317  *
2318  * @param[in]   ioc     IOC instance
2319  * @param[i]    cmd     Mailbox command
2320  */
2321 void
2322 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2323 {
2324         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2325         u32                     stat;
2326
2327         /*
2328          * If a previous command is pending, queue new command
2329          */
2330         if (!list_empty(&mod->cmd_q)) {
2331                 list_add_tail(&cmd->qe, &mod->cmd_q);
2332                 return;
2333         }
2334
2335         /*
2336          * If mailbox is busy, queue command for poll timer
2337          */
2338         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2339         if (stat) {
2340                 list_add_tail(&cmd->qe, &mod->cmd_q);
2341                 return;
2342         }
2343
2344         /*
2345          * mailbox is free -- queue command to firmware
2346          */
2347         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2348 }
2349
2350 /*
2351  * Handle mailbox interrupts
2352  */
2353 void
2354 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2355 {
2356         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2357         struct bfi_mbmsg_s              m;
2358         int                             mc;
2359
2360         if (bfa_ioc_msgget(ioc, &m)) {
2361                 /*
2362                  * Treat IOC message class as special.
2363                  */
2364                 mc = m.mh.msg_class;
2365                 if (mc == BFI_MC_IOC) {
2366                         bfa_ioc_isr(ioc, &m);
2367                         return;
2368                 }
2369
2370                 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2371                         return;
2372
2373                 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2374         }
2375
2376         bfa_ioc_lpu_read_stat(ioc);
2377
2378         /*
2379          * Try to send pending mailbox commands
2380          */
2381         bfa_ioc_mbox_poll(ioc);
2382 }
2383
2384 void
2385 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2386 {
2387         bfa_ioc_stats(ioc, ioc_hbfails);
2388         ioc->stats.hb_count = ioc->hb_count;
2389         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2390 }
2391
2392 /*
2393  * return true if IOC is disabled
2394  */
2395 bfa_boolean_t
2396 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2397 {
2398         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2399                 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2400 }
2401
2402 /*
2403  * return true if IOC firmware is different.
2404  */
2405 bfa_boolean_t
2406 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2407 {
2408         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2409                 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2410                 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2411 }
2412
2413 #define bfa_ioc_state_disabled(__sm)            \
2414         (((__sm) == BFI_IOC_UNINIT) ||          \
2415          ((__sm) == BFI_IOC_INITING) ||         \
2416          ((__sm) == BFI_IOC_HWINIT) ||          \
2417          ((__sm) == BFI_IOC_DISABLED) ||        \
2418          ((__sm) == BFI_IOC_FAIL) ||            \
2419          ((__sm) == BFI_IOC_CFG_DISABLED))
2420
2421 /*
2422  * Check if adapter is disabled -- both IOCs should be in a disabled
2423  * state.
2424  */
2425 bfa_boolean_t
2426 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2427 {
2428         u32     ioc_state;
2429
2430         if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2431                 return BFA_FALSE;
2432
2433         ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2434         if (!bfa_ioc_state_disabled(ioc_state))
2435                 return BFA_FALSE;
2436
2437         if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2438                 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2439                 if (!bfa_ioc_state_disabled(ioc_state))
2440                         return BFA_FALSE;
2441         }
2442
2443         return BFA_TRUE;
2444 }
2445
2446 /*
2447  * Reset IOC fwstate registers.
2448  */
2449 void
2450 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2451 {
2452         writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2453         writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2454 }
2455
2456 #define BFA_MFG_NAME "Brocade"
2457 void
2458 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2459                          struct bfa_adapter_attr_s *ad_attr)
2460 {
2461         struct bfi_ioc_attr_s   *ioc_attr;
2462
2463         ioc_attr = ioc->attr;
2464
2465         bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2466         bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2467         bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2468         bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2469         memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2470                       sizeof(struct bfa_mfg_vpd_s));
2471
2472         ad_attr->nports = bfa_ioc_get_nports(ioc);
2473         ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2474
2475         bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2476         /* For now, model descr uses same model string */
2477         bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2478
2479         ad_attr->card_type = ioc_attr->card_type;
2480         ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2481
2482         if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2483                 ad_attr->prototype = 1;
2484         else
2485                 ad_attr->prototype = 0;
2486
2487         ad_attr->pwwn = ioc->attr->pwwn;
2488         ad_attr->mac  = bfa_ioc_get_mac(ioc);
2489
2490         ad_attr->pcie_gen = ioc_attr->pcie_gen;
2491         ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2492         ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2493         ad_attr->asic_rev = ioc_attr->asic_rev;
2494
2495         bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2496
2497         ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2498         ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2499                                   !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2500 }
2501
2502 enum bfa_ioc_type_e
2503 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2504 {
2505         if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2506                 return BFA_IOC_TYPE_LL;
2507
2508         WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2509
2510         return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2511                 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2512 }
2513
2514 void
2515 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2516 {
2517         memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2518         memcpy((void *)serial_num,
2519                         (void *)ioc->attr->brcd_serialnum,
2520                         BFA_ADAPTER_SERIAL_NUM_LEN);
2521 }
2522
2523 void
2524 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2525 {
2526         memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2527         memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2528 }
2529
2530 void
2531 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2532 {
2533         WARN_ON(!chip_rev);
2534
2535         memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2536
2537         chip_rev[0] = 'R';
2538         chip_rev[1] = 'e';
2539         chip_rev[2] = 'v';
2540         chip_rev[3] = '-';
2541         chip_rev[4] = ioc->attr->asic_rev;
2542         chip_rev[5] = '\0';
2543 }
2544
2545 void
2546 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2547 {
2548         memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2549         memcpy(optrom_ver, ioc->attr->optrom_version,
2550                       BFA_VERSION_LEN);
2551 }
2552
2553 void
2554 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2555 {
2556         memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2557         memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2558 }
2559
2560 void
2561 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2562 {
2563         struct bfi_ioc_attr_s   *ioc_attr;
2564
2565         WARN_ON(!model);
2566         memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2567
2568         ioc_attr = ioc->attr;
2569
2570         snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2571                         BFA_MFG_NAME, ioc_attr->card_type);
2572 }
2573
2574 enum bfa_ioc_state
2575 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2576 {
2577         enum bfa_iocpf_state iocpf_st;
2578         enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2579
2580         if (ioc_st == BFA_IOC_ENABLING ||
2581                 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2582
2583                 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2584
2585                 switch (iocpf_st) {
2586                 case BFA_IOCPF_SEMWAIT:
2587                         ioc_st = BFA_IOC_SEMWAIT;
2588                         break;
2589
2590                 case BFA_IOCPF_HWINIT:
2591                         ioc_st = BFA_IOC_HWINIT;
2592                         break;
2593
2594                 case BFA_IOCPF_FWMISMATCH:
2595                         ioc_st = BFA_IOC_FWMISMATCH;
2596                         break;
2597
2598                 case BFA_IOCPF_FAIL:
2599                         ioc_st = BFA_IOC_FAIL;
2600                         break;
2601
2602                 case BFA_IOCPF_INITFAIL:
2603                         ioc_st = BFA_IOC_INITFAIL;
2604                         break;
2605
2606                 default:
2607                         break;
2608                 }
2609         }
2610
2611         return ioc_st;
2612 }
2613
2614 void
2615 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2616 {
2617         memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2618
2619         ioc_attr->state = bfa_ioc_get_state(ioc);
2620         ioc_attr->port_id = ioc->port_id;
2621         ioc_attr->port_mode = ioc->port_mode;
2622         ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2623         ioc_attr->cap_bm = ioc->ad_cap_bm;
2624
2625         ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2626
2627         bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2628
2629         ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2630         ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2631         bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2632 }
2633
2634 mac_t
2635 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2636 {
2637         /*
2638          * Check the IOC type and return the appropriate MAC
2639          */
2640         if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2641                 return ioc->attr->fcoe_mac;
2642         else
2643                 return ioc->attr->mac;
2644 }
2645
2646 mac_t
2647 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2648 {
2649         mac_t   m;
2650
2651         m = ioc->attr->mfg_mac;
2652         if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2653                 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2654         else
2655                 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2656                         bfa_ioc_pcifn(ioc));
2657
2658         return m;
2659 }
2660
2661 /*
2662  * Send AEN notification
2663  */
2664 void
2665 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2666 {
2667         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2668         struct bfa_aen_entry_s  *aen_entry;
2669         enum bfa_ioc_type_e ioc_type;
2670
2671         bfad_get_aen_entry(bfad, aen_entry);
2672         if (!aen_entry)
2673                 return;
2674
2675         ioc_type = bfa_ioc_get_type(ioc);
2676         switch (ioc_type) {
2677         case BFA_IOC_TYPE_FC:
2678                 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2679                 break;
2680         case BFA_IOC_TYPE_FCoE:
2681                 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2682                 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2683                 break;
2684         case BFA_IOC_TYPE_LL:
2685                 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2686                 break;
2687         default:
2688                 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2689                 break;
2690         }
2691
2692         /* Send the AEN notification */
2693         aen_entry->aen_data.ioc.ioc_type = ioc_type;
2694         bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2695                                   BFA_AEN_CAT_IOC, event);
2696 }
2697
2698 /*
2699  * Retrieve saved firmware trace from a prior IOC failure.
2700  */
2701 bfa_status_t
2702 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2703 {
2704         int     tlen;
2705
2706         if (ioc->dbg_fwsave_len == 0)
2707                 return BFA_STATUS_ENOFSAVE;
2708
2709         tlen = *trclen;
2710         if (tlen > ioc->dbg_fwsave_len)
2711                 tlen = ioc->dbg_fwsave_len;
2712
2713         memcpy(trcdata, ioc->dbg_fwsave, tlen);
2714         *trclen = tlen;
2715         return BFA_STATUS_OK;
2716 }
2717
2718
2719 /*
2720  * Retrieve saved firmware trace from a prior IOC failure.
2721  */
2722 bfa_status_t
2723 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2724 {
2725         u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2726         int tlen;
2727         bfa_status_t status;
2728
2729         bfa_trc(ioc, *trclen);
2730
2731         tlen = *trclen;
2732         if (tlen > BFA_DBG_FWTRC_LEN)
2733                 tlen = BFA_DBG_FWTRC_LEN;
2734
2735         status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2736         *trclen = tlen;
2737         return status;
2738 }
2739
2740 static void
2741 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2742 {
2743         struct bfa_mbox_cmd_s cmd;
2744         struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2745
2746         bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2747                     bfa_ioc_portid(ioc));
2748         req->clscode = cpu_to_be16(ioc->clscode);
2749         bfa_ioc_mbox_queue(ioc, &cmd);
2750 }
2751
2752 static void
2753 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2754 {
2755         u32 fwsync_iter = 1000;
2756
2757         bfa_ioc_send_fwsync(ioc);
2758
2759         /*
2760          * After sending a fw sync mbox command wait for it to
2761          * take effect.  We will not wait for a response because
2762          *    1. fw_sync mbox cmd doesn't have a response.
2763          *    2. Even if we implement that,  interrupts might not
2764          *       be enabled when we call this function.
2765          * So, just keep checking if any mbox cmd is pending, and
2766          * after waiting for a reasonable amount of time, go ahead.
2767          * It is possible that fw has crashed and the mbox command
2768          * is never acknowledged.
2769          */
2770         while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2771                 fwsync_iter--;
2772 }
2773
2774 /*
2775  * Dump firmware smem
2776  */
2777 bfa_status_t
2778 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2779                                 u32 *offset, int *buflen)
2780 {
2781         u32 loff;
2782         int dlen;
2783         bfa_status_t status;
2784         u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2785
2786         if (*offset >= smem_len) {
2787                 *offset = *buflen = 0;
2788                 return BFA_STATUS_EINVAL;
2789         }
2790
2791         loff = *offset;
2792         dlen = *buflen;
2793
2794         /*
2795          * First smem read, sync smem before proceeding
2796          * No need to sync before reading every chunk.
2797          */
2798         if (loff == 0)
2799                 bfa_ioc_fwsync(ioc);
2800
2801         if ((loff + dlen) >= smem_len)
2802                 dlen = smem_len - loff;
2803
2804         status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2805
2806         if (status != BFA_STATUS_OK) {
2807                 *offset = *buflen = 0;
2808                 return status;
2809         }
2810
2811         *offset += dlen;
2812
2813         if (*offset >= smem_len)
2814                 *offset = 0;
2815
2816         *buflen = dlen;
2817
2818         return status;
2819 }
2820
2821 /*
2822  * Firmware statistics
2823  */
2824 bfa_status_t
2825 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2826 {
2827         u32 loff = BFI_IOC_FWSTATS_OFF + \
2828                 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2829         int tlen;
2830         bfa_status_t status;
2831
2832         if (ioc->stats_busy) {
2833                 bfa_trc(ioc, ioc->stats_busy);
2834                 return BFA_STATUS_DEVBUSY;
2835         }
2836         ioc->stats_busy = BFA_TRUE;
2837
2838         tlen = sizeof(struct bfa_fw_stats_s);
2839         status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2840
2841         ioc->stats_busy = BFA_FALSE;
2842         return status;
2843 }
2844
2845 bfa_status_t
2846 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2847 {
2848         u32 loff = BFI_IOC_FWSTATS_OFF + \
2849                 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2850         int tlen;
2851         bfa_status_t status;
2852
2853         if (ioc->stats_busy) {
2854                 bfa_trc(ioc, ioc->stats_busy);
2855                 return BFA_STATUS_DEVBUSY;
2856         }
2857         ioc->stats_busy = BFA_TRUE;
2858
2859         tlen = sizeof(struct bfa_fw_stats_s);
2860         status = bfa_ioc_smem_clr(ioc, loff, tlen);
2861
2862         ioc->stats_busy = BFA_FALSE;
2863         return status;
2864 }
2865
2866 /*
2867  * Save firmware trace if configured.
2868  */
2869 void
2870 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2871 {
2872         int             tlen;
2873
2874         if (ioc->dbg_fwsave_once) {
2875                 ioc->dbg_fwsave_once = BFA_FALSE;
2876                 if (ioc->dbg_fwsave_len) {
2877                         tlen = ioc->dbg_fwsave_len;
2878                         bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2879                 }
2880         }
2881 }
2882
2883 /*
2884  * Firmware failure detected. Start recovery actions.
2885  */
2886 static void
2887 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2888 {
2889         bfa_ioc_stats(ioc, ioc_hbfails);
2890         ioc->stats.hb_count = ioc->hb_count;
2891         bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2892 }
2893
2894 /*
2895  *  BFA IOC PF private functions
2896  */
2897 static void
2898 bfa_iocpf_timeout(void *ioc_arg)
2899 {
2900         struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2901
2902         bfa_trc(ioc, 0);
2903         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2904 }
2905
2906 static void
2907 bfa_iocpf_sem_timeout(void *ioc_arg)
2908 {
2909         struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2910
2911         bfa_ioc_hw_sem_get(ioc);
2912 }
2913
2914 static void
2915 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2916 {
2917         u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2918
2919         bfa_trc(ioc, fwstate);
2920
2921         if (fwstate == BFI_IOC_DISABLED) {
2922                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2923                 return;
2924         }
2925
2926         if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2927                 bfa_iocpf_timeout(ioc);
2928         else {
2929                 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2930                 bfa_iocpf_poll_timer_start(ioc);
2931         }
2932 }
2933
2934 static void
2935 bfa_iocpf_poll_timeout(void *ioc_arg)
2936 {
2937         struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2938
2939         bfa_ioc_poll_fwinit(ioc);
2940 }
2941
2942 /*
2943  *  bfa timer function
2944  */
2945 void
2946 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2947 {
2948         struct list_head *qh = &mod->timer_q;
2949         struct list_head *qe, *qe_next;
2950         struct bfa_timer_s *elem;
2951         struct list_head timedout_q;
2952
2953         INIT_LIST_HEAD(&timedout_q);
2954
2955         qe = bfa_q_next(qh);
2956
2957         while (qe != qh) {
2958                 qe_next = bfa_q_next(qe);
2959
2960                 elem = (struct bfa_timer_s *) qe;
2961                 if (elem->timeout <= BFA_TIMER_FREQ) {
2962                         elem->timeout = 0;
2963                         list_del(&elem->qe);
2964                         list_add_tail(&elem->qe, &timedout_q);
2965                 } else {
2966                         elem->timeout -= BFA_TIMER_FREQ;
2967                 }
2968
2969                 qe = qe_next;   /* go to next elem */
2970         }
2971
2972         /*
2973          * Pop all the timeout entries
2974          */
2975         while (!list_empty(&timedout_q)) {
2976                 bfa_q_deq(&timedout_q, &elem);
2977                 elem->timercb(elem->arg);
2978         }
2979 }
2980
2981 /*
2982  * Should be called with lock protection
2983  */
2984 void
2985 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2986                     void (*timercb) (void *), void *arg, unsigned int timeout)
2987 {
2988
2989         WARN_ON(timercb == NULL);
2990         WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2991
2992         timer->timeout = timeout;
2993         timer->timercb = timercb;
2994         timer->arg = arg;
2995
2996         list_add_tail(&timer->qe, &mod->timer_q);
2997 }
2998
2999 /*
3000  * Should be called with lock protection
3001  */
3002 void
3003 bfa_timer_stop(struct bfa_timer_s *timer)
3004 {
3005         WARN_ON(list_empty(&timer->qe));
3006
3007         list_del(&timer->qe);
3008 }
3009
3010 /*
3011  *      ASIC block related
3012  */
3013 static void
3014 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3015 {
3016         struct bfa_ablk_cfg_inst_s *cfg_inst;
3017         int i, j;
3018         u16     be16;
3019         u32     be32;
3020
3021         for (i = 0; i < BFA_ABLK_MAX; i++) {
3022                 cfg_inst = &cfg->inst[i];
3023                 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3024                         be16 = cfg_inst->pf_cfg[j].pers;
3025                         cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3026                         be16 = cfg_inst->pf_cfg[j].num_qpairs;
3027                         cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3028                         be16 = cfg_inst->pf_cfg[j].num_vectors;
3029                         cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3030                         be32 = cfg_inst->pf_cfg[j].bw;
3031                         cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3032                 }
3033         }
3034 }
3035
3036 static void
3037 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3038 {
3039         struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3040         struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3041         bfa_ablk_cbfn_t cbfn;
3042
3043         WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3044         bfa_trc(ablk->ioc, msg->mh.msg_id);
3045
3046         switch (msg->mh.msg_id) {
3047         case BFI_ABLK_I2H_QUERY:
3048                 if (rsp->status == BFA_STATUS_OK) {
3049                         memcpy(ablk->cfg, ablk->dma_addr.kva,
3050                                 sizeof(struct bfa_ablk_cfg_s));
3051                         bfa_ablk_config_swap(ablk->cfg);
3052                         ablk->cfg = NULL;
3053                 }
3054                 break;
3055
3056         case BFI_ABLK_I2H_ADPT_CONFIG:
3057         case BFI_ABLK_I2H_PORT_CONFIG:
3058                 /* update config port mode */
3059                 ablk->ioc->port_mode_cfg = rsp->port_mode;
3060
3061         case BFI_ABLK_I2H_PF_DELETE:
3062         case BFI_ABLK_I2H_PF_UPDATE:
3063         case BFI_ABLK_I2H_OPTROM_ENABLE:
3064         case BFI_ABLK_I2H_OPTROM_DISABLE:
3065                 /* No-op */
3066                 break;
3067
3068         case BFI_ABLK_I2H_PF_CREATE:
3069                 *(ablk->pcifn) = rsp->pcifn;
3070                 ablk->pcifn = NULL;
3071                 break;
3072
3073         default:
3074                 WARN_ON(1);
3075         }
3076
3077         ablk->busy = BFA_FALSE;
3078         if (ablk->cbfn) {
3079                 cbfn = ablk->cbfn;
3080                 ablk->cbfn = NULL;
3081                 cbfn(ablk->cbarg, rsp->status);
3082         }
3083 }
3084
3085 static void
3086 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3087 {
3088         struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3089
3090         bfa_trc(ablk->ioc, event);
3091
3092         switch (event) {
3093         case BFA_IOC_E_ENABLED:
3094                 WARN_ON(ablk->busy != BFA_FALSE);
3095                 break;
3096
3097         case BFA_IOC_E_DISABLED:
3098         case BFA_IOC_E_FAILED:
3099                 /* Fail any pending requests */
3100                 ablk->pcifn = NULL;
3101                 if (ablk->busy) {
3102                         if (ablk->cbfn)
3103                                 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3104                         ablk->cbfn = NULL;
3105                         ablk->busy = BFA_FALSE;
3106                 }
3107                 break;
3108
3109         default:
3110                 WARN_ON(1);
3111                 break;
3112         }
3113 }
3114
3115 u32
3116 bfa_ablk_meminfo(void)
3117 {
3118         return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3119 }
3120
3121 void
3122 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3123 {
3124         ablk->dma_addr.kva = dma_kva;
3125         ablk->dma_addr.pa  = dma_pa;
3126 }
3127
3128 void
3129 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3130 {
3131         ablk->ioc = ioc;
3132
3133         bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3134         bfa_q_qe_init(&ablk->ioc_notify);
3135         bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3136         list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3137 }
3138
3139 bfa_status_t
3140 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3141                 bfa_ablk_cbfn_t cbfn, void *cbarg)
3142 {
3143         struct bfi_ablk_h2i_query_s *m;
3144
3145         WARN_ON(!ablk_cfg);
3146
3147         if (!bfa_ioc_is_operational(ablk->ioc)) {
3148                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3149                 return BFA_STATUS_IOC_FAILURE;
3150         }
3151
3152         if (ablk->busy) {
3153                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3154                 return  BFA_STATUS_DEVBUSY;
3155         }
3156
3157         ablk->cfg = ablk_cfg;
3158         ablk->cbfn  = cbfn;
3159         ablk->cbarg = cbarg;
3160         ablk->busy  = BFA_TRUE;
3161
3162         m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3163         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3164                     bfa_ioc_portid(ablk->ioc));
3165         bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3166         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3167
3168         return BFA_STATUS_OK;
3169 }
3170
3171 bfa_status_t
3172 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3173                 u8 port, enum bfi_pcifn_class personality, int bw,
3174                 bfa_ablk_cbfn_t cbfn, void *cbarg)
3175 {
3176         struct bfi_ablk_h2i_pf_req_s *m;
3177
3178         if (!bfa_ioc_is_operational(ablk->ioc)) {
3179                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3180                 return BFA_STATUS_IOC_FAILURE;
3181         }
3182
3183         if (ablk->busy) {
3184                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3185                 return  BFA_STATUS_DEVBUSY;
3186         }
3187
3188         ablk->pcifn = pcifn;
3189         ablk->cbfn = cbfn;
3190         ablk->cbarg = cbarg;
3191         ablk->busy  = BFA_TRUE;
3192
3193         m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3194         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3195                     bfa_ioc_portid(ablk->ioc));
3196         m->pers = cpu_to_be16((u16)personality);
3197         m->bw = cpu_to_be32(bw);
3198         m->port = port;
3199         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3200
3201         return BFA_STATUS_OK;
3202 }
3203
3204 bfa_status_t
3205 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3206                 bfa_ablk_cbfn_t cbfn, void *cbarg)
3207 {
3208         struct bfi_ablk_h2i_pf_req_s *m;
3209
3210         if (!bfa_ioc_is_operational(ablk->ioc)) {
3211                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3212                 return BFA_STATUS_IOC_FAILURE;
3213         }
3214
3215         if (ablk->busy) {
3216                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3217                 return  BFA_STATUS_DEVBUSY;
3218         }
3219
3220         ablk->cbfn  = cbfn;
3221         ablk->cbarg = cbarg;
3222         ablk->busy  = BFA_TRUE;
3223
3224         m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3225         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3226                     bfa_ioc_portid(ablk->ioc));
3227         m->pcifn = (u8)pcifn;
3228         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3229
3230         return BFA_STATUS_OK;
3231 }
3232
3233 bfa_status_t
3234 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3235                 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3236 {
3237         struct bfi_ablk_h2i_cfg_req_s *m;
3238
3239         if (!bfa_ioc_is_operational(ablk->ioc)) {
3240                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3241                 return BFA_STATUS_IOC_FAILURE;
3242         }
3243
3244         if (ablk->busy) {
3245                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3246                 return  BFA_STATUS_DEVBUSY;
3247         }
3248
3249         ablk->cbfn  = cbfn;
3250         ablk->cbarg = cbarg;
3251         ablk->busy  = BFA_TRUE;
3252
3253         m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3254         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3255                     bfa_ioc_portid(ablk->ioc));
3256         m->mode = (u8)mode;
3257         m->max_pf = (u8)max_pf;
3258         m->max_vf = (u8)max_vf;
3259         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3260
3261         return BFA_STATUS_OK;
3262 }
3263
3264 bfa_status_t
3265 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3266                 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3267 {
3268         struct bfi_ablk_h2i_cfg_req_s *m;
3269
3270         if (!bfa_ioc_is_operational(ablk->ioc)) {
3271                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3272                 return BFA_STATUS_IOC_FAILURE;
3273         }
3274
3275         if (ablk->busy) {
3276                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3277                 return  BFA_STATUS_DEVBUSY;
3278         }
3279
3280         ablk->cbfn  = cbfn;
3281         ablk->cbarg = cbarg;
3282         ablk->busy  = BFA_TRUE;
3283
3284         m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3285         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3286                 bfa_ioc_portid(ablk->ioc));
3287         m->port = (u8)port;
3288         m->mode = (u8)mode;
3289         m->max_pf = (u8)max_pf;
3290         m->max_vf = (u8)max_vf;
3291         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3292
3293         return BFA_STATUS_OK;
3294 }
3295
3296 bfa_status_t
3297 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3298                 bfa_ablk_cbfn_t cbfn, void *cbarg)
3299 {
3300         struct bfi_ablk_h2i_pf_req_s *m;
3301
3302         if (!bfa_ioc_is_operational(ablk->ioc)) {
3303                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3304                 return BFA_STATUS_IOC_FAILURE;
3305         }
3306
3307         if (ablk->busy) {
3308                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3309                 return  BFA_STATUS_DEVBUSY;
3310         }
3311
3312         ablk->cbfn  = cbfn;
3313         ablk->cbarg = cbarg;
3314         ablk->busy  = BFA_TRUE;
3315
3316         m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3317         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3318                 bfa_ioc_portid(ablk->ioc));
3319         m->pcifn = (u8)pcifn;
3320         m->bw = cpu_to_be32(bw);
3321         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3322
3323         return BFA_STATUS_OK;
3324 }
3325
3326 bfa_status_t
3327 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3328 {
3329         struct bfi_ablk_h2i_optrom_s *m;
3330
3331         if (!bfa_ioc_is_operational(ablk->ioc)) {
3332                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3333                 return BFA_STATUS_IOC_FAILURE;
3334         }
3335
3336         if (ablk->busy) {
3337                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3338                 return  BFA_STATUS_DEVBUSY;
3339         }
3340
3341         ablk->cbfn  = cbfn;
3342         ablk->cbarg = cbarg;
3343         ablk->busy  = BFA_TRUE;
3344
3345         m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3346         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3347                 bfa_ioc_portid(ablk->ioc));
3348         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3349
3350         return BFA_STATUS_OK;
3351 }
3352
3353 bfa_status_t
3354 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3355 {
3356         struct bfi_ablk_h2i_optrom_s *m;
3357
3358         if (!bfa_ioc_is_operational(ablk->ioc)) {
3359                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3360                 return BFA_STATUS_IOC_FAILURE;
3361         }
3362
3363         if (ablk->busy) {
3364                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3365                 return  BFA_STATUS_DEVBUSY;
3366         }
3367
3368         ablk->cbfn  = cbfn;
3369         ablk->cbarg = cbarg;
3370         ablk->busy  = BFA_TRUE;
3371
3372         m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3373         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3374                 bfa_ioc_portid(ablk->ioc));
3375         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3376
3377         return BFA_STATUS_OK;
3378 }
3379
3380 /*
3381  *      SFP module specific
3382  */
3383
3384 /* forward declarations */
3385 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3386 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3387 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3388                                 enum bfa_port_speed portspeed);
3389
3390 static void
3391 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3392 {
3393         bfa_trc(sfp, sfp->lock);
3394         if (sfp->cbfn)
3395                 sfp->cbfn(sfp->cbarg, sfp->status);
3396         sfp->lock = 0;
3397         sfp->cbfn = NULL;
3398 }
3399
3400 static void
3401 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3402 {
3403         bfa_trc(sfp, sfp->portspeed);
3404         if (sfp->media) {
3405                 bfa_sfp_media_get(sfp);
3406                 if (sfp->state_query_cbfn)
3407                         sfp->state_query_cbfn(sfp->state_query_cbarg,
3408                                         sfp->status);
3409                         sfp->media = NULL;
3410                 }
3411
3412                 if (sfp->portspeed) {
3413                         sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3414                         if (sfp->state_query_cbfn)
3415                                 sfp->state_query_cbfn(sfp->state_query_cbarg,
3416                                                 sfp->status);
3417                                 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3418                 }
3419
3420                 sfp->state_query_lock = 0;
3421                 sfp->state_query_cbfn = NULL;
3422 }
3423
3424 /*
3425  *      IOC event handler.
3426  */
3427 static void
3428 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3429 {
3430         struct bfa_sfp_s *sfp = sfp_arg;
3431
3432         bfa_trc(sfp, event);
3433         bfa_trc(sfp, sfp->lock);
3434         bfa_trc(sfp, sfp->state_query_lock);
3435
3436         switch (event) {
3437         case BFA_IOC_E_DISABLED:
3438         case BFA_IOC_E_FAILED:
3439                 if (sfp->lock) {
3440                         sfp->status = BFA_STATUS_IOC_FAILURE;
3441                         bfa_cb_sfp_show(sfp);
3442                 }
3443
3444                 if (sfp->state_query_lock) {
3445                         sfp->status = BFA_STATUS_IOC_FAILURE;
3446                         bfa_cb_sfp_state_query(sfp);
3447                 }
3448                 break;
3449
3450         default:
3451                 break;
3452         }
3453 }
3454
3455 /*
3456  * SFP's State Change Notification post to AEN
3457  */
3458 static void
3459 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3460 {
3461         struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3462         struct bfa_aen_entry_s  *aen_entry;
3463         enum bfa_port_aen_event aen_evt = 0;
3464
3465         bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3466                       ((u64)rsp->event));
3467
3468         bfad_get_aen_entry(bfad, aen_entry);
3469         if (!aen_entry)
3470                 return;
3471
3472         aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3473         aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3474         aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3475
3476         switch (rsp->event) {
3477         case BFA_SFP_SCN_INSERTED:
3478                 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3479                 break;
3480         case BFA_SFP_SCN_REMOVED:
3481                 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3482                 break;
3483         case BFA_SFP_SCN_FAILED:
3484                 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3485                 break;
3486         case BFA_SFP_SCN_UNSUPPORT:
3487                 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3488                 break;
3489         case BFA_SFP_SCN_POM:
3490                 aen_evt = BFA_PORT_AEN_SFP_POM;
3491                 aen_entry->aen_data.port.level = rsp->pomlvl;
3492                 break;
3493         default:
3494                 bfa_trc(sfp, rsp->event);
3495                 WARN_ON(1);
3496         }
3497
3498         /* Send the AEN notification */
3499         bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3500                                   BFA_AEN_CAT_PORT, aen_evt);
3501 }
3502
3503 /*
3504  *      SFP get data send
3505  */
3506 static void
3507 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3508 {
3509         struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3510
3511         bfa_trc(sfp, req->memtype);
3512
3513         /* build host command */
3514         bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3515                         bfa_ioc_portid(sfp->ioc));
3516
3517         /* send mbox cmd */
3518         bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3519 }
3520
3521 /*
3522  *      SFP is valid, read sfp data
3523  */
3524 static void
3525 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3526 {
3527         struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3528
3529         WARN_ON(sfp->lock != 0);
3530         bfa_trc(sfp, sfp->state);
3531
3532         sfp->lock = 1;
3533         sfp->memtype = memtype;
3534         req->memtype = memtype;
3535
3536         /* Setup SG list */
3537         bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3538
3539         bfa_sfp_getdata_send(sfp);
3540 }
3541
3542 /*
3543  *      SFP scn handler
3544  */
3545 static void
3546 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3547 {
3548         struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3549
3550         switch (rsp->event) {
3551         case BFA_SFP_SCN_INSERTED:
3552                 sfp->state = BFA_SFP_STATE_INSERTED;
3553                 sfp->data_valid = 0;
3554                 bfa_sfp_scn_aen_post(sfp, rsp);
3555                 break;
3556         case BFA_SFP_SCN_REMOVED:
3557                 sfp->state = BFA_SFP_STATE_REMOVED;
3558                 sfp->data_valid = 0;
3559                 bfa_sfp_scn_aen_post(sfp, rsp);
3560                  break;
3561         case BFA_SFP_SCN_FAILED:
3562                 sfp->state = BFA_SFP_STATE_FAILED;
3563                 sfp->data_valid = 0;
3564                 bfa_sfp_scn_aen_post(sfp, rsp);
3565                 break;
3566         case BFA_SFP_SCN_UNSUPPORT:
3567                 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3568                 bfa_sfp_scn_aen_post(sfp, rsp);
3569                 if (!sfp->lock)
3570                         bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3571                 break;
3572         case BFA_SFP_SCN_POM:
3573                 bfa_sfp_scn_aen_post(sfp, rsp);
3574                 break;
3575         case BFA_SFP_SCN_VALID:
3576                 sfp->state = BFA_SFP_STATE_VALID;
3577                 if (!sfp->lock)
3578                         bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3579                 break;
3580         default:
3581                 bfa_trc(sfp, rsp->event);
3582                 WARN_ON(1);
3583         }
3584 }
3585
3586 /*
3587  * SFP show complete
3588  */
3589 static void
3590 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3591 {
3592         struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3593
3594         if (!sfp->lock) {
3595                 /*
3596                  * receiving response after ioc failure
3597                  */
3598                 bfa_trc(sfp, sfp->lock);
3599                 return;
3600         }
3601
3602         bfa_trc(sfp, rsp->status);
3603         if (rsp->status == BFA_STATUS_OK) {
3604                 sfp->data_valid = 1;
3605                 if (sfp->state == BFA_SFP_STATE_VALID)
3606                         sfp->status = BFA_STATUS_OK;
3607                 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3608                         sfp->status = BFA_STATUS_SFP_UNSUPP;
3609                 else
3610                         bfa_trc(sfp, sfp->state);
3611         } else {
3612                 sfp->data_valid = 0;
3613                 sfp->status = rsp->status;
3614                 /* sfpshow shouldn't change sfp state */
3615         }
3616
3617         bfa_trc(sfp, sfp->memtype);
3618         if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3619                 bfa_trc(sfp, sfp->data_valid);
3620                 if (sfp->data_valid) {
3621                         u32     size = sizeof(struct sfp_mem_s);
3622                         u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3623                         memcpy(des, sfp->dbuf_kva, size);
3624                 }
3625                 /*
3626                  * Queue completion callback.
3627                  */
3628                 bfa_cb_sfp_show(sfp);
3629         } else
3630                 sfp->lock = 0;
3631
3632         bfa_trc(sfp, sfp->state_query_lock);
3633         if (sfp->state_query_lock) {
3634                 sfp->state = rsp->state;
3635                 /* Complete callback */
3636                 bfa_cb_sfp_state_query(sfp);
3637         }
3638 }
3639
3640 /*
3641  *      SFP query fw sfp state
3642  */
3643 static void
3644 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3645 {
3646         struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3647
3648         /* Should not be doing query if not in _INIT state */
3649         WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3650         WARN_ON(sfp->state_query_lock != 0);
3651         bfa_trc(sfp, sfp->state);
3652
3653         sfp->state_query_lock = 1;
3654         req->memtype = 0;
3655
3656         if (!sfp->lock)
3657                 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3658 }
3659
3660 static void
3661 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3662 {
3663         enum bfa_defs_sfp_media_e *media = sfp->media;
3664
3665         *media = BFA_SFP_MEDIA_UNKNOWN;
3666
3667         if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3668                 *media = BFA_SFP_MEDIA_UNSUPPORT;
3669         else if (sfp->state == BFA_SFP_STATE_VALID) {
3670                 union sfp_xcvr_e10g_code_u e10g;
3671                 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3672                 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3673                                 (sfpmem->srlid_base.xcvr[5] >> 1);
3674
3675                 e10g.b = sfpmem->srlid_base.xcvr[0];
3676                 bfa_trc(sfp, e10g.b);
3677                 bfa_trc(sfp, xmtr_tech);
3678                 /* check fc transmitter tech */
3679                 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3680                     (xmtr_tech & SFP_XMTR_TECH_CP) ||
3681                     (xmtr_tech & SFP_XMTR_TECH_CA))
3682                         *media = BFA_SFP_MEDIA_CU;
3683                 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3684                          (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3685                         *media = BFA_SFP_MEDIA_EL;
3686                 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3687                          (xmtr_tech & SFP_XMTR_TECH_LC))
3688                         *media = BFA_SFP_MEDIA_LW;
3689                 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3690                          (xmtr_tech & SFP_XMTR_TECH_SN) ||
3691                          (xmtr_tech & SFP_XMTR_TECH_SA))
3692                         *media = BFA_SFP_MEDIA_SW;
3693                 /* Check 10G Ethernet Compilance code */
3694                 else if (e10g.r.e10g_sr)
3695                         *media = BFA_SFP_MEDIA_SW;
3696                 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3697                         *media = BFA_SFP_MEDIA_LW;
3698                 else if (e10g.r.e10g_unall)
3699                         *media = BFA_SFP_MEDIA_UNKNOWN;
3700                 else
3701                         bfa_trc(sfp, 0);
3702         } else
3703                 bfa_trc(sfp, sfp->state);
3704 }
3705
3706 static bfa_status_t
3707 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3708 {
3709         struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3710         struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3711         union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3712         union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3713
3714         if (portspeed == BFA_PORT_SPEED_10GBPS) {
3715                 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3716                         return BFA_STATUS_OK;
3717                 else {
3718                         bfa_trc(sfp, e10g.b);
3719                         return BFA_STATUS_UNSUPP_SPEED;
3720                 }
3721         }
3722         if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3723             ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3724             ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3725             ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3726             ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3727                 return BFA_STATUS_OK;
3728         else {
3729                 bfa_trc(sfp, portspeed);
3730                 bfa_trc(sfp, fc3.b);
3731                 bfa_trc(sfp, e10g.b);
3732                 return BFA_STATUS_UNSUPP_SPEED;
3733         }
3734 }
3735
3736 /*
3737  *      SFP hmbox handler
3738  */
3739 void
3740 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3741 {
3742         struct bfa_sfp_s *sfp = sfparg;
3743
3744         switch (msg->mh.msg_id) {
3745         case BFI_SFP_I2H_SHOW:
3746                 bfa_sfp_show_comp(sfp, msg);
3747                 break;
3748
3749         case BFI_SFP_I2H_SCN:
3750                 bfa_sfp_scn(sfp, msg);
3751                 break;
3752
3753         default:
3754                 bfa_trc(sfp, msg->mh.msg_id);
3755                 WARN_ON(1);
3756         }
3757 }
3758
3759 /*
3760  *      Return DMA memory needed by sfp module.
3761  */
3762 u32
3763 bfa_sfp_meminfo(void)
3764 {
3765         return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3766 }
3767
3768 /*
3769  *      Attach virtual and physical memory for SFP.
3770  */
3771 void
3772 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3773                 struct bfa_trc_mod_s *trcmod)
3774 {
3775         sfp->dev = dev;
3776         sfp->ioc = ioc;
3777         sfp->trcmod = trcmod;
3778
3779         sfp->cbfn = NULL;
3780         sfp->cbarg = NULL;
3781         sfp->sfpmem = NULL;
3782         sfp->lock = 0;
3783         sfp->data_valid = 0;
3784         sfp->state = BFA_SFP_STATE_INIT;
3785         sfp->state_query_lock = 0;
3786         sfp->state_query_cbfn = NULL;
3787         sfp->state_query_cbarg = NULL;
3788         sfp->media = NULL;
3789         sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3790         sfp->is_elb = BFA_FALSE;
3791
3792         bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3793         bfa_q_qe_init(&sfp->ioc_notify);
3794         bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3795         list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3796 }
3797
3798 /*
3799  *      Claim Memory for SFP
3800  */
3801 void
3802 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3803 {
3804         sfp->dbuf_kva   = dm_kva;
3805         sfp->dbuf_pa    = dm_pa;
3806         memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3807
3808         dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3809         dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3810 }
3811
3812 /*
3813  * Show SFP eeprom content
3814  *
3815  * @param[in] sfp   - bfa sfp module
3816  *
3817  * @param[out] sfpmem - sfp eeprom data
3818  *
3819  */
3820 bfa_status_t
3821 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3822                 bfa_cb_sfp_t cbfn, void *cbarg)
3823 {
3824
3825         if (!bfa_ioc_is_operational(sfp->ioc)) {
3826                 bfa_trc(sfp, 0);
3827                 return BFA_STATUS_IOC_NON_OP;
3828         }
3829
3830         if (sfp->lock) {
3831                 bfa_trc(sfp, 0);
3832                 return BFA_STATUS_DEVBUSY;
3833         }
3834
3835         sfp->cbfn = cbfn;
3836         sfp->cbarg = cbarg;
3837         sfp->sfpmem = sfpmem;
3838
3839         bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3840         return BFA_STATUS_OK;
3841 }
3842
3843 /*
3844  * Return SFP Media type
3845  *
3846  * @param[in] sfp   - bfa sfp module
3847  *
3848  * @param[out] media - port speed from user
3849  *
3850  */
3851 bfa_status_t
3852 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3853                 bfa_cb_sfp_t cbfn, void *cbarg)
3854 {
3855         if (!bfa_ioc_is_operational(sfp->ioc)) {
3856                 bfa_trc(sfp, 0);
3857                 return BFA_STATUS_IOC_NON_OP;
3858         }
3859
3860         sfp->media = media;
3861         if (sfp->state == BFA_SFP_STATE_INIT) {
3862                 if (sfp->state_query_lock) {
3863                         bfa_trc(sfp, 0);
3864                         return BFA_STATUS_DEVBUSY;
3865                 } else {
3866                         sfp->state_query_cbfn = cbfn;
3867                         sfp->state_query_cbarg = cbarg;
3868                         bfa_sfp_state_query(sfp);
3869                         return BFA_STATUS_SFP_NOT_READY;
3870                 }
3871         }
3872
3873         bfa_sfp_media_get(sfp);
3874         return BFA_STATUS_OK;
3875 }
3876
3877 /*
3878  * Check if user set port speed is allowed by the SFP
3879  *
3880  * @param[in] sfp   - bfa sfp module
3881  * @param[in] portspeed - port speed from user
3882  *
3883  */
3884 bfa_status_t
3885 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3886                 bfa_cb_sfp_t cbfn, void *cbarg)
3887 {
3888         WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3889
3890         if (!bfa_ioc_is_operational(sfp->ioc))
3891                 return BFA_STATUS_IOC_NON_OP;
3892
3893         /* For Mezz card, all speed is allowed */
3894         if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3895                 return BFA_STATUS_OK;
3896
3897         /* Check SFP state */
3898         sfp->portspeed = portspeed;
3899         if (sfp->state == BFA_SFP_STATE_INIT) {
3900                 if (sfp->state_query_lock) {
3901                         bfa_trc(sfp, 0);
3902                         return BFA_STATUS_DEVBUSY;
3903                 } else {
3904                         sfp->state_query_cbfn = cbfn;
3905                         sfp->state_query_cbarg = cbarg;
3906                         bfa_sfp_state_query(sfp);
3907                         return BFA_STATUS_SFP_NOT_READY;
3908                 }
3909         }
3910
3911         if (sfp->state == BFA_SFP_STATE_REMOVED ||
3912             sfp->state == BFA_SFP_STATE_FAILED) {
3913                 bfa_trc(sfp, sfp->state);
3914                 return BFA_STATUS_NO_SFP_DEV;
3915         }
3916
3917         if (sfp->state == BFA_SFP_STATE_INSERTED) {
3918                 bfa_trc(sfp, sfp->state);
3919                 return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
3920         }
3921
3922         /* For eloopback, all speed is allowed */
3923         if (sfp->is_elb)
3924                 return BFA_STATUS_OK;
3925
3926         return bfa_sfp_speed_valid(sfp, portspeed);
3927 }
3928
3929 /*
3930  *      Flash module specific
3931  */
3932
3933 /*
3934  * FLASH DMA buffer should be big enough to hold both MFG block and
3935  * asic block(64k) at the same time and also should be 2k aligned to
3936  * avoid write segement to cross sector boundary.
3937  */
3938 #define BFA_FLASH_SEG_SZ        2048
3939 #define BFA_FLASH_DMA_BUF_SZ    \
3940         BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3941
3942 static void
3943 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3944                         int inst, int type)
3945 {
3946         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3947         struct bfa_aen_entry_s  *aen_entry;
3948
3949         bfad_get_aen_entry(bfad, aen_entry);
3950         if (!aen_entry)
3951                 return;
3952
3953         aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3954         aen_entry->aen_data.audit.partition_inst = inst;
3955         aen_entry->aen_data.audit.partition_type = type;
3956
3957         /* Send the AEN notification */
3958         bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3959                                   BFA_AEN_CAT_AUDIT, event);
3960 }
3961
3962 static void
3963 bfa_flash_cb(struct bfa_flash_s *flash)
3964 {
3965         flash->op_busy = 0;
3966         if (flash->cbfn)
3967                 flash->cbfn(flash->cbarg, flash->status);
3968 }
3969
3970 static void
3971 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3972 {
3973         struct bfa_flash_s      *flash = cbarg;
3974
3975         bfa_trc(flash, event);
3976         switch (event) {
3977         case BFA_IOC_E_DISABLED:
3978         case BFA_IOC_E_FAILED:
3979                 if (flash->op_busy) {
3980                         flash->status = BFA_STATUS_IOC_FAILURE;
3981                         flash->cbfn(flash->cbarg, flash->status);
3982                         flash->op_busy = 0;
3983                 }
3984                 break;
3985
3986         default:
3987                 break;
3988         }
3989 }
3990
3991 /*
3992  * Send flash attribute query request.
3993  *
3994  * @param[in] cbarg - callback argument
3995  */
3996 static void
3997 bfa_flash_query_send(void *cbarg)
3998 {
3999         struct bfa_flash_s *flash = cbarg;
4000         struct bfi_flash_query_req_s *msg =
4001                         (struct bfi_flash_query_req_s *) flash->mb.msg;
4002
4003         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4004                 bfa_ioc_portid(flash->ioc));
4005         bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4006                 flash->dbuf_pa);
4007         bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4008 }
4009
4010 /*
4011  * Send flash write request.
4012  *
4013  * @param[in] cbarg - callback argument
4014  */
4015 static void
4016 bfa_flash_write_send(struct bfa_flash_s *flash)
4017 {
4018         struct bfi_flash_write_req_s *msg =
4019                         (struct bfi_flash_write_req_s *) flash->mb.msg;
4020         u32     len;
4021
4022         msg->type = be32_to_cpu(flash->type);
4023         msg->instance = flash->instance;
4024         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4025         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4026                 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4027         msg->length = be32_to_cpu(len);
4028
4029         /* indicate if it's the last msg of the whole write operation */
4030         msg->last = (len == flash->residue) ? 1 : 0;
4031
4032         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4033                         bfa_ioc_portid(flash->ioc));
4034         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4035         memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4036         bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4037
4038         flash->residue -= len;
4039         flash->offset += len;
4040 }
4041
4042 /*
4043  * Send flash read request.
4044  *
4045  * @param[in] cbarg - callback argument
4046  */
4047 static void
4048 bfa_flash_read_send(void *cbarg)
4049 {
4050         struct bfa_flash_s *flash = cbarg;
4051         struct bfi_flash_read_req_s *msg =
4052                         (struct bfi_flash_read_req_s *) flash->mb.msg;
4053         u32     len;
4054
4055         msg->type = be32_to_cpu(flash->type);
4056         msg->instance = flash->instance;
4057         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4058         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4059                         flash->residue : BFA_FLASH_DMA_BUF_SZ;
4060         msg->length = be32_to_cpu(len);
4061         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4062                 bfa_ioc_portid(flash->ioc));
4063         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4064         bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4065 }
4066
4067 /*
4068  * Send flash erase request.
4069  *
4070  * @param[in] cbarg - callback argument
4071  */
4072 static void
4073 bfa_flash_erase_send(void *cbarg)
4074 {
4075         struct bfa_flash_s *flash = cbarg;
4076         struct bfi_flash_erase_req_s *msg =
4077                         (struct bfi_flash_erase_req_s *) flash->mb.msg;
4078
4079         msg->type = be32_to_cpu(flash->type);
4080         msg->instance = flash->instance;
4081         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4082                         bfa_ioc_portid(flash->ioc));
4083         bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4084 }
4085
4086 /*
4087  * Process flash response messages upon receiving interrupts.
4088  *
4089  * @param[in] flasharg - flash structure
4090  * @param[in] msg - message structure
4091  */
4092 static void
4093 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4094 {
4095         struct bfa_flash_s *flash = flasharg;
4096         u32     status;
4097
4098         union {
4099                 struct bfi_flash_query_rsp_s *query;
4100                 struct bfi_flash_erase_rsp_s *erase;
4101                 struct bfi_flash_write_rsp_s *write;
4102                 struct bfi_flash_read_rsp_s *read;
4103                 struct bfi_flash_event_s *event;
4104                 struct bfi_mbmsg_s   *msg;
4105         } m;
4106
4107         m.msg = msg;
4108         bfa_trc(flash, msg->mh.msg_id);
4109
4110         if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4111                 /* receiving response after ioc failure */
4112                 bfa_trc(flash, 0x9999);
4113                 return;
4114         }
4115
4116         switch (msg->mh.msg_id) {
4117         case BFI_FLASH_I2H_QUERY_RSP:
4118                 status = be32_to_cpu(m.query->status);
4119                 bfa_trc(flash, status);
4120                 if (status == BFA_STATUS_OK) {
4121                         u32     i;
4122                         struct bfa_flash_attr_s *attr, *f;
4123
4124                         attr = (struct bfa_flash_attr_s *) flash->ubuf;
4125                         f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4126                         attr->status = be32_to_cpu(f->status);
4127                         attr->npart = be32_to_cpu(f->npart);
4128                         bfa_trc(flash, attr->status);
4129                         bfa_trc(flash, attr->npart);
4130                         for (i = 0; i < attr->npart; i++) {
4131                                 attr->part[i].part_type =
4132                                         be32_to_cpu(f->part[i].part_type);
4133                                 attr->part[i].part_instance =
4134                                         be32_to_cpu(f->part[i].part_instance);
4135                                 attr->part[i].part_off =
4136                                         be32_to_cpu(f->part[i].part_off);
4137                                 attr->part[i].part_size =
4138                                         be32_to_cpu(f->part[i].part_size);
4139                                 attr->part[i].part_len =
4140                                         be32_to_cpu(f->part[i].part_len);
4141                                 attr->part[i].part_status =
4142                                         be32_to_cpu(f->part[i].part_status);
4143                         }
4144                 }
4145                 flash->status = status;
4146                 bfa_flash_cb(flash);
4147                 break;
4148         case BFI_FLASH_I2H_ERASE_RSP:
4149                 status = be32_to_cpu(m.erase->status);
4150                 bfa_trc(flash, status);
4151                 flash->status = status;
4152                 bfa_flash_cb(flash);
4153                 break;
4154         case BFI_FLASH_I2H_WRITE_RSP:
4155                 status = be32_to_cpu(m.write->status);
4156                 bfa_trc(flash, status);
4157                 if (status != BFA_STATUS_OK || flash->residue == 0) {
4158                         flash->status = status;
4159                         bfa_flash_cb(flash);
4160                 } else {
4161                         bfa_trc(flash, flash->offset);
4162                         bfa_flash_write_send(flash);
4163                 }
4164                 break;
4165         case BFI_FLASH_I2H_READ_RSP:
4166                 status = be32_to_cpu(m.read->status);
4167                 bfa_trc(flash, status);
4168                 if (status != BFA_STATUS_OK) {
4169                         flash->status = status;
4170                         bfa_flash_cb(flash);
4171                 } else {
4172                         u32 len = be32_to_cpu(m.read->length);
4173                         bfa_trc(flash, flash->offset);
4174                         bfa_trc(flash, len);
4175                         memcpy(flash->ubuf + flash->offset,
4176                                 flash->dbuf_kva, len);
4177                         flash->residue -= len;
4178                         flash->offset += len;
4179                         if (flash->residue == 0) {
4180                                 flash->status = status;
4181                                 bfa_flash_cb(flash);
4182                         } else
4183                                 bfa_flash_read_send(flash);
4184                 }
4185                 break;
4186         case BFI_FLASH_I2H_BOOT_VER_RSP:
4187                 break;
4188         case BFI_FLASH_I2H_EVENT:
4189                 status = be32_to_cpu(m.event->status);
4190                 bfa_trc(flash, status);
4191                 if (status == BFA_STATUS_BAD_FWCFG)
4192                         bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4193                 else if (status == BFA_STATUS_INVALID_VENDOR) {
4194                         u32 param;
4195                         param = be32_to_cpu(m.event->param);
4196                         bfa_trc(flash, param);
4197                         bfa_ioc_aen_post(flash->ioc,
4198                                 BFA_IOC_AEN_INVALID_VENDOR);
4199                 }
4200                 break;
4201
4202         default:
4203                 WARN_ON(1);
4204         }
4205 }
4206
4207 /*
4208  * Flash memory info API.
4209  *
4210  * @param[in] mincfg - minimal cfg variable
4211  */
4212 u32
4213 bfa_flash_meminfo(bfa_boolean_t mincfg)
4214 {
4215         /* min driver doesn't need flash */
4216         if (mincfg)
4217                 return 0;
4218         return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4219 }
4220
4221 /*
4222  * Flash attach API.
4223  *
4224  * @param[in] flash - flash structure
4225  * @param[in] ioc  - ioc structure
4226  * @param[in] dev  - device structure
4227  * @param[in] trcmod - trace module
4228  * @param[in] logmod - log module
4229  */
4230 void
4231 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4232                 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4233 {
4234         flash->ioc = ioc;
4235         flash->trcmod = trcmod;
4236         flash->cbfn = NULL;
4237         flash->cbarg = NULL;
4238         flash->op_busy = 0;
4239
4240         bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4241         bfa_q_qe_init(&flash->ioc_notify);
4242         bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4243         list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4244
4245         /* min driver doesn't need flash */
4246         if (mincfg) {
4247                 flash->dbuf_kva = NULL;
4248                 flash->dbuf_pa = 0;
4249         }
4250 }
4251
4252 /*
4253  * Claim memory for flash
4254  *
4255  * @param[in] flash - flash structure
4256  * @param[in] dm_kva - pointer to virtual memory address
4257  * @param[in] dm_pa - physical memory address
4258  * @param[in] mincfg - minimal cfg variable
4259  */
4260 void
4261 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4262                 bfa_boolean_t mincfg)
4263 {
4264         if (mincfg)
4265                 return;
4266
4267         flash->dbuf_kva = dm_kva;
4268         flash->dbuf_pa = dm_pa;
4269         memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4270         dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4271         dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4272 }
4273
4274 /*
4275  * Get flash attribute.
4276  *
4277  * @param[in] flash - flash structure
4278  * @param[in] attr - flash attribute structure
4279  * @param[in] cbfn - callback function
4280  * @param[in] cbarg - callback argument
4281  *
4282  * Return status.
4283  */
4284 bfa_status_t
4285 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4286                 bfa_cb_flash_t cbfn, void *cbarg)
4287 {
4288         bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4289
4290         if (!bfa_ioc_is_operational(flash->ioc))
4291                 return BFA_STATUS_IOC_NON_OP;
4292
4293         if (flash->op_busy) {
4294                 bfa_trc(flash, flash->op_busy);
4295                 return BFA_STATUS_DEVBUSY;
4296         }
4297
4298         flash->op_busy = 1;
4299         flash->cbfn = cbfn;
4300         flash->cbarg = cbarg;
4301         flash->ubuf = (u8 *) attr;
4302         bfa_flash_query_send(flash);
4303
4304         return BFA_STATUS_OK;
4305 }
4306
4307 /*
4308  * Erase flash partition.
4309  *
4310  * @param[in] flash - flash structure
4311  * @param[in] type - flash partition type
4312  * @param[in] instance - flash partition instance
4313  * @param[in] cbfn - callback function
4314  * @param[in] cbarg - callback argument
4315  *
4316  * Return status.
4317  */
4318 bfa_status_t
4319 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4320                 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4321 {
4322         bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4323         bfa_trc(flash, type);
4324         bfa_trc(flash, instance);
4325
4326         if (!bfa_ioc_is_operational(flash->ioc))
4327                 return BFA_STATUS_IOC_NON_OP;
4328
4329         if (flash->op_busy) {
4330                 bfa_trc(flash, flash->op_busy);
4331                 return BFA_STATUS_DEVBUSY;
4332         }
4333
4334         flash->op_busy = 1;
4335         flash->cbfn = cbfn;
4336         flash->cbarg = cbarg;
4337         flash->type = type;
4338         flash->instance = instance;
4339
4340         bfa_flash_erase_send(flash);
4341         bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4342                                 instance, type);
4343         return BFA_STATUS_OK;
4344 }
4345
4346 /*
4347  * Update flash partition.
4348  *
4349  * @param[in] flash - flash structure
4350  * @param[in] type - flash partition type
4351  * @param[in] instance - flash partition instance
4352  * @param[in] buf - update data buffer
4353  * @param[in] len - data buffer length
4354  * @param[in] offset - offset relative to the partition starting address
4355  * @param[in] cbfn - callback function
4356  * @param[in] cbarg - callback argument
4357  *
4358  * Return status.
4359  */
4360 bfa_status_t
4361 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4362                 u8 instance, void *buf, u32 len, u32 offset,
4363                 bfa_cb_flash_t cbfn, void *cbarg)
4364 {
4365         bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4366         bfa_trc(flash, type);
4367         bfa_trc(flash, instance);
4368         bfa_trc(flash, len);
4369         bfa_trc(flash, offset);
4370
4371         if (!bfa_ioc_is_operational(flash->ioc))
4372                 return BFA_STATUS_IOC_NON_OP;
4373
4374         /*
4375          * 'len' must be in word (4-byte) boundary
4376          * 'offset' must be in sector (16kb) boundary
4377          */
4378         if (!len || (len & 0x03) || (offset & 0x00003FFF))
4379                 return BFA_STATUS_FLASH_BAD_LEN;
4380
4381         if (type == BFA_FLASH_PART_MFG)
4382                 return BFA_STATUS_EINVAL;
4383
4384         if (flash->op_busy) {
4385                 bfa_trc(flash, flash->op_busy);
4386                 return BFA_STATUS_DEVBUSY;
4387         }
4388
4389         flash->op_busy = 1;
4390         flash->cbfn = cbfn;
4391         flash->cbarg = cbarg;
4392         flash->type = type;
4393         flash->instance = instance;
4394         flash->residue = len;
4395         flash->offset = 0;
4396         flash->addr_off = offset;
4397         flash->ubuf = buf;
4398
4399         bfa_flash_write_send(flash);
4400         return BFA_STATUS_OK;
4401 }
4402
4403 /*
4404  * Read flash partition.
4405  *
4406  * @param[in] flash - flash structure
4407  * @param[in] type - flash partition type
4408  * @param[in] instance - flash partition instance
4409  * @param[in] buf - read data buffer
4410  * @param[in] len - data buffer length
4411  * @param[in] offset - offset relative to the partition starting address
4412  * @param[in] cbfn - callback function
4413  * @param[in] cbarg - callback argument
4414  *
4415  * Return status.
4416  */
4417 bfa_status_t
4418 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4419                 u8 instance, void *buf, u32 len, u32 offset,
4420                 bfa_cb_flash_t cbfn, void *cbarg)
4421 {
4422         bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4423         bfa_trc(flash, type);
4424         bfa_trc(flash, instance);
4425         bfa_trc(flash, len);
4426         bfa_trc(flash, offset);
4427
4428         if (!bfa_ioc_is_operational(flash->ioc))
4429                 return BFA_STATUS_IOC_NON_OP;
4430
4431         /*
4432          * 'len' must be in word (4-byte) boundary
4433          * 'offset' must be in sector (16kb) boundary
4434          */
4435         if (!len || (len & 0x03) || (offset & 0x00003FFF))
4436                 return BFA_STATUS_FLASH_BAD_LEN;
4437
4438         if (flash->op_busy) {
4439                 bfa_trc(flash, flash->op_busy);
4440                 return BFA_STATUS_DEVBUSY;
4441         }
4442
4443         flash->op_busy = 1;
4444         flash->cbfn = cbfn;
4445         flash->cbarg = cbarg;
4446         flash->type = type;
4447         flash->instance = instance;
4448         flash->residue = len;
4449         flash->offset = 0;
4450         flash->addr_off = offset;
4451         flash->ubuf = buf;
4452         bfa_flash_read_send(flash);
4453
4454         return BFA_STATUS_OK;
4455 }
4456
4457 /*
4458  *      DIAG module specific
4459  */
4460
4461 #define BFA_DIAG_MEMTEST_TOV    50000   /* memtest timeout in msec */
4462 #define CT2_BFA_DIAG_MEMTEST_TOV        (9*30*1000)  /* 4.5 min */
4463
4464 /* IOC event handler */
4465 static void
4466 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4467 {
4468         struct bfa_diag_s *diag = diag_arg;
4469
4470         bfa_trc(diag, event);
4471         bfa_trc(diag, diag->block);
4472         bfa_trc(diag, diag->fwping.lock);
4473         bfa_trc(diag, diag->tsensor.lock);
4474
4475         switch (event) {
4476         case BFA_IOC_E_DISABLED:
4477         case BFA_IOC_E_FAILED:
4478                 if (diag->fwping.lock) {
4479                         diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4480                         diag->fwping.cbfn(diag->fwping.cbarg,
4481                                         diag->fwping.status);
4482                         diag->fwping.lock = 0;
4483                 }
4484
4485                 if (diag->tsensor.lock) {
4486                         diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4487                         diag->tsensor.cbfn(diag->tsensor.cbarg,
4488                                            diag->tsensor.status);
4489                         diag->tsensor.lock = 0;
4490                 }
4491
4492                 if (diag->block) {
4493                         if (diag->timer_active) {
4494                                 bfa_timer_stop(&diag->timer);
4495                                 diag->timer_active = 0;
4496                         }
4497
4498                         diag->status = BFA_STATUS_IOC_FAILURE;
4499                         diag->cbfn(diag->cbarg, diag->status);
4500                         diag->block = 0;
4501                 }
4502                 break;
4503
4504         default:
4505                 break;
4506         }
4507 }
4508
4509 static void
4510 bfa_diag_memtest_done(void *cbarg)
4511 {
4512         struct bfa_diag_s *diag = cbarg;
4513         struct bfa_ioc_s  *ioc = diag->ioc;
4514         struct bfa_diag_memtest_result *res = diag->result;
4515         u32     loff = BFI_BOOT_MEMTEST_RES_ADDR;
4516         u32     pgnum, pgoff, i;
4517
4518         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4519         pgoff = PSS_SMEM_PGOFF(loff);
4520
4521         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4522
4523         for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4524                          sizeof(u32)); i++) {
4525                 /* read test result from smem */
4526                 *((u32 *) res + i) =
4527                         bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4528                 loff += sizeof(u32);
4529         }
4530
4531         /* Reset IOC fwstates to BFI_IOC_UNINIT */
4532         bfa_ioc_reset_fwstate(ioc);
4533
4534         res->status = swab32(res->status);
4535         bfa_trc(diag, res->status);
4536
4537         if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4538                 diag->status = BFA_STATUS_OK;
4539         else {
4540                 diag->status = BFA_STATUS_MEMTEST_FAILED;
4541                 res->addr = swab32(res->addr);
4542                 res->exp = swab32(res->exp);
4543                 res->act = swab32(res->act);
4544                 res->err_status = swab32(res->err_status);
4545                 res->err_status1 = swab32(res->err_status1);
4546                 res->err_addr = swab32(res->err_addr);
4547                 bfa_trc(diag, res->addr);
4548                 bfa_trc(diag, res->exp);
4549                 bfa_trc(diag, res->act);
4550                 bfa_trc(diag, res->err_status);
4551                 bfa_trc(diag, res->err_status1);
4552                 bfa_trc(diag, res->err_addr);
4553         }
4554         diag->timer_active = 0;
4555         diag->cbfn(diag->cbarg, diag->status);
4556         diag->block = 0;
4557 }
4558
4559 /*
4560  * Firmware ping
4561  */
4562
4563 /*
4564  * Perform DMA test directly
4565  */
4566 static void
4567 diag_fwping_send(struct bfa_diag_s *diag)
4568 {
4569         struct bfi_diag_fwping_req_s *fwping_req;
4570         u32     i;
4571
4572         bfa_trc(diag, diag->fwping.dbuf_pa);
4573
4574         /* fill DMA area with pattern */
4575         for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4576                 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4577
4578         /* Fill mbox msg */
4579         fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4580
4581         /* Setup SG list */
4582         bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4583                         diag->fwping.dbuf_pa);
4584         /* Set up dma count */
4585         fwping_req->count = cpu_to_be32(diag->fwping.count);
4586         /* Set up data pattern */
4587         fwping_req->data = diag->fwping.data;
4588
4589         /* build host command */
4590         bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4591                 bfa_ioc_portid(diag->ioc));
4592
4593         /* send mbox cmd */
4594         bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4595 }
4596
4597 static void
4598 diag_fwping_comp(struct bfa_diag_s *diag,
4599                  struct bfi_diag_fwping_rsp_s *diag_rsp)
4600 {
4601         u32     rsp_data = diag_rsp->data;
4602         u8      rsp_dma_status = diag_rsp->dma_status;
4603
4604         bfa_trc(diag, rsp_data);
4605         bfa_trc(diag, rsp_dma_status);
4606
4607         if (rsp_dma_status == BFA_STATUS_OK) {
4608                 u32     i, pat;
4609                 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4610                         diag->fwping.data;
4611                 /* Check mbox data */
4612                 if (diag->fwping.data != rsp_data) {
4613                         bfa_trc(diag, rsp_data);
4614                         diag->fwping.result->dmastatus =
4615                                         BFA_STATUS_DATACORRUPTED;
4616                         diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4617                         diag->fwping.cbfn(diag->fwping.cbarg,
4618                                         diag->fwping.status);
4619                         diag->fwping.lock = 0;
4620                         return;
4621                 }
4622                 /* Check dma pattern */
4623                 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4624                         if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4625                                 bfa_trc(diag, i);
4626                                 bfa_trc(diag, pat);
4627                                 bfa_trc(diag,
4628                                         *((u32 *)diag->fwping.dbuf_kva + i));
4629                                 diag->fwping.result->dmastatus =
4630                                                 BFA_STATUS_DATACORRUPTED;
4631                                 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4632                                 diag->fwping.cbfn(diag->fwping.cbarg,
4633                                                 diag->fwping.status);
4634                                 diag->fwping.lock = 0;
4635                                 return;
4636                         }
4637                 }
4638                 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4639                 diag->fwping.status = BFA_STATUS_OK;
4640                 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4641                 diag->fwping.lock = 0;
4642         } else {
4643                 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4644                 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4645                 diag->fwping.lock = 0;
4646         }
4647 }
4648
4649 /*
4650  * Temperature Sensor
4651  */
4652
4653 static void
4654 diag_tempsensor_send(struct bfa_diag_s *diag)
4655 {
4656         struct bfi_diag_ts_req_s *msg;
4657
4658         msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4659         bfa_trc(diag, msg->temp);
4660         /* build host command */
4661         bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4662                 bfa_ioc_portid(diag->ioc));
4663         /* send mbox cmd */
4664         bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4665 }
4666
4667 static void
4668 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4669 {
4670         if (!diag->tsensor.lock) {
4671                 /* receiving response after ioc failure */
4672                 bfa_trc(diag, diag->tsensor.lock);
4673                 return;
4674         }
4675
4676         /*
4677          * ASIC junction tempsensor is a reg read operation
4678          * it will always return OK
4679          */
4680         diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4681         diag->tsensor.temp->ts_junc = rsp->ts_junc;
4682         diag->tsensor.temp->ts_brd = rsp->ts_brd;
4683         diag->tsensor.temp->status = BFA_STATUS_OK;
4684
4685         if (rsp->ts_brd) {
4686                 if (rsp->status == BFA_STATUS_OK) {
4687                         diag->tsensor.temp->brd_temp =
4688                                 be16_to_cpu(rsp->brd_temp);
4689                 } else {
4690                         bfa_trc(diag, rsp->status);
4691                         diag->tsensor.temp->brd_temp = 0;
4692                         diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4693                 }
4694         }
4695         bfa_trc(diag, rsp->ts_junc);
4696         bfa_trc(diag, rsp->temp);
4697         bfa_trc(diag, rsp->ts_brd);
4698         bfa_trc(diag, rsp->brd_temp);
4699         diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4700         diag->tsensor.lock = 0;
4701 }
4702
4703 /*
4704  *      LED Test command
4705  */
4706 static void
4707 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4708 {
4709         struct bfi_diag_ledtest_req_s  *msg;
4710
4711         msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4712         /* build host command */
4713         bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4714                         bfa_ioc_portid(diag->ioc));
4715
4716         /*
4717          * convert the freq from N blinks per 10 sec to
4718          * crossbow ontime value. We do it here because division is need
4719          */
4720         if (ledtest->freq)
4721                 ledtest->freq = 500 / ledtest->freq;
4722
4723         if (ledtest->freq == 0)
4724                 ledtest->freq = 1;
4725
4726         bfa_trc(diag, ledtest->freq);
4727         /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4728         msg->cmd = (u8) ledtest->cmd;
4729         msg->color = (u8) ledtest->color;
4730         msg->portid = bfa_ioc_portid(diag->ioc);
4731         msg->led = ledtest->led;
4732         msg->freq = cpu_to_be16(ledtest->freq);
4733
4734         /* send mbox cmd */
4735         bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4736 }
4737
4738 static void
4739 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4740 {
4741         bfa_trc(diag, diag->ledtest.lock);
4742         diag->ledtest.lock = BFA_FALSE;
4743         /* no bfa_cb_queue is needed because driver is not waiting */
4744 }
4745
4746 /*
4747  * Port beaconing
4748  */
4749 static void
4750 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4751 {
4752         struct bfi_diag_portbeacon_req_s *msg;
4753
4754         msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4755         /* build host command */
4756         bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4757                 bfa_ioc_portid(diag->ioc));
4758         msg->beacon = beacon;
4759         msg->period = cpu_to_be32(sec);
4760         /* send mbox cmd */
4761         bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4762 }
4763
4764 static void
4765 diag_portbeacon_comp(struct bfa_diag_s *diag)
4766 {
4767         bfa_trc(diag, diag->beacon.state);
4768         diag->beacon.state = BFA_FALSE;
4769         if (diag->cbfn_beacon)
4770                 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4771 }
4772
4773 /*
4774  *      Diag hmbox handler
4775  */
4776 void
4777 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4778 {
4779         struct bfa_diag_s *diag = diagarg;
4780
4781         switch (msg->mh.msg_id) {
4782         case BFI_DIAG_I2H_PORTBEACON:
4783                 diag_portbeacon_comp(diag);
4784                 break;
4785         case BFI_DIAG_I2H_FWPING:
4786                 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4787                 break;
4788         case BFI_DIAG_I2H_TEMPSENSOR:
4789                 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4790                 break;
4791         case BFI_DIAG_I2H_LEDTEST:
4792                 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4793                 break;
4794         default:
4795                 bfa_trc(diag, msg->mh.msg_id);
4796                 WARN_ON(1);
4797         }
4798 }
4799
4800 /*
4801  * Gen RAM Test
4802  *
4803  *   @param[in] *diag           - diag data struct
4804  *   @param[in] *memtest        - mem test params input from upper layer,
4805  *   @param[in] pattern         - mem test pattern
4806  *   @param[in] *result         - mem test result
4807  *   @param[in] cbfn            - mem test callback functioin
4808  *   @param[in] cbarg           - callback functioin arg
4809  *
4810  *   @param[out]
4811  */
4812 bfa_status_t
4813 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4814                 u32 pattern, struct bfa_diag_memtest_result *result,
4815                 bfa_cb_diag_t cbfn, void *cbarg)
4816 {
4817         u32     memtest_tov;
4818
4819         bfa_trc(diag, pattern);
4820
4821         if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4822                 return BFA_STATUS_ADAPTER_ENABLED;
4823
4824         /* check to see if there is another destructive diag cmd running */
4825         if (diag->block) {
4826                 bfa_trc(diag, diag->block);
4827                 return BFA_STATUS_DEVBUSY;
4828         } else
4829                 diag->block = 1;
4830
4831         diag->result = result;
4832         diag->cbfn = cbfn;
4833         diag->cbarg = cbarg;
4834
4835         /* download memtest code and take LPU0 out of reset */
4836         bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4837
4838         memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
4839                        CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
4840         bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4841                         bfa_diag_memtest_done, diag, memtest_tov);
4842         diag->timer_active = 1;
4843         return BFA_STATUS_OK;
4844 }
4845
4846 /*
4847  * DIAG firmware ping command
4848  *
4849  *   @param[in] *diag           - diag data struct
4850  *   @param[in] cnt             - dma loop count for testing PCIE
4851  *   @param[in] data            - data pattern to pass in fw
4852  *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
4853  *   @param[in] cbfn            - callback function
4854  *   @param[in] *cbarg          - callback functioin arg
4855  *
4856  *   @param[out]
4857  */
4858 bfa_status_t
4859 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4860                 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4861                 void *cbarg)
4862 {
4863         bfa_trc(diag, cnt);
4864         bfa_trc(diag, data);
4865
4866         if (!bfa_ioc_is_operational(diag->ioc))
4867                 return BFA_STATUS_IOC_NON_OP;
4868
4869         if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4870             ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4871                 return BFA_STATUS_CMD_NOTSUPP;
4872
4873         /* check to see if there is another destructive diag cmd running */
4874         if (diag->block || diag->fwping.lock) {
4875                 bfa_trc(diag, diag->block);
4876                 bfa_trc(diag, diag->fwping.lock);
4877                 return BFA_STATUS_DEVBUSY;
4878         }
4879
4880         /* Initialization */
4881         diag->fwping.lock = 1;
4882         diag->fwping.cbfn = cbfn;
4883         diag->fwping.cbarg = cbarg;
4884         diag->fwping.result = result;
4885         diag->fwping.data = data;
4886         diag->fwping.count = cnt;
4887
4888         /* Init test results */
4889         diag->fwping.result->data = 0;
4890         diag->fwping.result->status = BFA_STATUS_OK;
4891
4892         /* kick off the first ping */
4893         diag_fwping_send(diag);
4894         return BFA_STATUS_OK;
4895 }
4896
4897 /*
4898  * Read Temperature Sensor
4899  *
4900  *   @param[in] *diag           - diag data struct
4901  *   @param[in] *result         - pt to bfa_diag_temp_t data struct
4902  *   @param[in] cbfn            - callback function
4903  *   @param[in] *cbarg          - callback functioin arg
4904  *
4905  *   @param[out]
4906  */
4907 bfa_status_t
4908 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4909                 struct bfa_diag_results_tempsensor_s *result,
4910                 bfa_cb_diag_t cbfn, void *cbarg)
4911 {
4912         /* check to see if there is a destructive diag cmd running */
4913         if (diag->block || diag->tsensor.lock) {
4914                 bfa_trc(diag, diag->block);
4915                 bfa_trc(diag, diag->tsensor.lock);
4916                 return BFA_STATUS_DEVBUSY;
4917         }
4918
4919         if (!bfa_ioc_is_operational(diag->ioc))
4920                 return BFA_STATUS_IOC_NON_OP;
4921
4922         /* Init diag mod params */
4923         diag->tsensor.lock = 1;
4924         diag->tsensor.temp = result;
4925         diag->tsensor.cbfn = cbfn;
4926         diag->tsensor.cbarg = cbarg;
4927
4928         /* Send msg to fw */
4929         diag_tempsensor_send(diag);
4930
4931         return BFA_STATUS_OK;
4932 }
4933
4934 /*
4935  * LED Test command
4936  *
4937  *   @param[in] *diag           - diag data struct
4938  *   @param[in] *ledtest        - pt to ledtest data structure
4939  *
4940  *   @param[out]
4941  */
4942 bfa_status_t
4943 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4944 {
4945         bfa_trc(diag, ledtest->cmd);
4946
4947         if (!bfa_ioc_is_operational(diag->ioc))
4948                 return BFA_STATUS_IOC_NON_OP;
4949
4950         if (diag->beacon.state)
4951                 return BFA_STATUS_BEACON_ON;
4952
4953         if (diag->ledtest.lock)
4954                 return BFA_STATUS_LEDTEST_OP;
4955
4956         /* Send msg to fw */
4957         diag->ledtest.lock = BFA_TRUE;
4958         diag_ledtest_send(diag, ledtest);
4959
4960         return BFA_STATUS_OK;
4961 }
4962
4963 /*
4964  * Port beaconing command
4965  *
4966  *   @param[in] *diag           - diag data struct
4967  *   @param[in] beacon          - port beaconing 1:ON   0:OFF
4968  *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
4969  *   @param[in] sec             - beaconing duration in seconds
4970  *
4971  *   @param[out]
4972  */
4973 bfa_status_t
4974 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4975                 bfa_boolean_t link_e2e_beacon, uint32_t sec)
4976 {
4977         bfa_trc(diag, beacon);
4978         bfa_trc(diag, link_e2e_beacon);
4979         bfa_trc(diag, sec);
4980
4981         if (!bfa_ioc_is_operational(diag->ioc))
4982                 return BFA_STATUS_IOC_NON_OP;
4983
4984         if (diag->ledtest.lock)
4985                 return BFA_STATUS_LEDTEST_OP;
4986
4987         if (diag->beacon.state && beacon)       /* beacon alread on */
4988                 return BFA_STATUS_BEACON_ON;
4989
4990         diag->beacon.state      = beacon;
4991         diag->beacon.link_e2e   = link_e2e_beacon;
4992         if (diag->cbfn_beacon)
4993                 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4994
4995         /* Send msg to fw */
4996         diag_portbeacon_send(diag, beacon, sec);
4997
4998         return BFA_STATUS_OK;
4999 }
5000
5001 /*
5002  * Return DMA memory needed by diag module.
5003  */
5004 u32
5005 bfa_diag_meminfo(void)
5006 {
5007         return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5008 }
5009
5010 /*
5011  *      Attach virtual and physical memory for Diag.
5012  */
5013 void
5014 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5015         bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5016 {
5017         diag->dev = dev;
5018         diag->ioc = ioc;
5019         diag->trcmod = trcmod;
5020
5021         diag->block = 0;
5022         diag->cbfn = NULL;
5023         diag->cbarg = NULL;
5024         diag->result = NULL;
5025         diag->cbfn_beacon = cbfn_beacon;
5026
5027         bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5028         bfa_q_qe_init(&diag->ioc_notify);
5029         bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5030         list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5031 }
5032
5033 void
5034 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5035 {
5036         diag->fwping.dbuf_kva = dm_kva;
5037         diag->fwping.dbuf_pa = dm_pa;
5038         memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5039 }
5040
5041 /*
5042  *      PHY module specific
5043  */
5044 #define BFA_PHY_DMA_BUF_SZ      0x02000         /* 8k dma buffer */
5045 #define BFA_PHY_LOCK_STATUS     0x018878        /* phy semaphore status reg */
5046
5047 static void
5048 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5049 {
5050         int i, m = sz >> 2;
5051
5052         for (i = 0; i < m; i++)
5053                 obuf[i] = be32_to_cpu(ibuf[i]);
5054 }
5055
5056 static bfa_boolean_t
5057 bfa_phy_present(struct bfa_phy_s *phy)
5058 {
5059         return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5060 }
5061
5062 static void
5063 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5064 {
5065         struct bfa_phy_s *phy = cbarg;
5066
5067         bfa_trc(phy, event);
5068
5069         switch (event) {
5070         case BFA_IOC_E_DISABLED:
5071         case BFA_IOC_E_FAILED:
5072                 if (phy->op_busy) {
5073                         phy->status = BFA_STATUS_IOC_FAILURE;
5074                         phy->cbfn(phy->cbarg, phy->status);
5075                         phy->op_busy = 0;
5076                 }
5077                 break;
5078
5079         default:
5080                 break;
5081         }
5082 }
5083
5084 /*
5085  * Send phy attribute query request.
5086  *
5087  * @param[in] cbarg - callback argument
5088  */
5089 static void
5090 bfa_phy_query_send(void *cbarg)
5091 {
5092         struct bfa_phy_s *phy = cbarg;
5093         struct bfi_phy_query_req_s *msg =
5094                         (struct bfi_phy_query_req_s *) phy->mb.msg;
5095
5096         msg->instance = phy->instance;
5097         bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5098                 bfa_ioc_portid(phy->ioc));
5099         bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5100         bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5101 }
5102
5103 /*
5104  * Send phy write request.
5105  *
5106  * @param[in] cbarg - callback argument
5107  */
5108 static void
5109 bfa_phy_write_send(void *cbarg)
5110 {
5111         struct bfa_phy_s *phy = cbarg;
5112         struct bfi_phy_write_req_s *msg =
5113                         (struct bfi_phy_write_req_s *) phy->mb.msg;
5114         u32     len;
5115         u16     *buf, *dbuf;
5116         int     i, sz;
5117
5118         msg->instance = phy->instance;
5119         msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5120         len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5121                         phy->residue : BFA_PHY_DMA_BUF_SZ;
5122         msg->length = cpu_to_be32(len);
5123
5124         /* indicate if it's the last msg of the whole write operation */
5125         msg->last = (len == phy->residue) ? 1 : 0;
5126
5127         bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5128                 bfa_ioc_portid(phy->ioc));
5129         bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5130
5131         buf = (u16 *) (phy->ubuf + phy->offset);
5132         dbuf = (u16 *)phy->dbuf_kva;
5133         sz = len >> 1;
5134         for (i = 0; i < sz; i++)
5135                 buf[i] = cpu_to_be16(dbuf[i]);
5136
5137         bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5138
5139         phy->residue -= len;
5140         phy->offset += len;
5141 }
5142
5143 /*
5144  * Send phy read request.
5145  *
5146  * @param[in] cbarg - callback argument
5147  */
5148 static void
5149 bfa_phy_read_send(void *cbarg)
5150 {
5151         struct bfa_phy_s *phy = cbarg;
5152         struct bfi_phy_read_req_s *msg =
5153                         (struct bfi_phy_read_req_s *) phy->mb.msg;
5154         u32     len;
5155
5156         msg->instance = phy->instance;
5157         msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5158         len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5159                         phy->residue : BFA_PHY_DMA_BUF_SZ;
5160         msg->length = cpu_to_be32(len);
5161         bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5162                 bfa_ioc_portid(phy->ioc));
5163         bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5164         bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5165 }
5166
5167 /*
5168  * Send phy stats request.
5169  *
5170  * @param[in] cbarg - callback argument
5171  */
5172 static void
5173 bfa_phy_stats_send(void *cbarg)
5174 {
5175         struct bfa_phy_s *phy = cbarg;
5176         struct bfi_phy_stats_req_s *msg =
5177                         (struct bfi_phy_stats_req_s *) phy->mb.msg;
5178
5179         msg->instance = phy->instance;
5180         bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5181                 bfa_ioc_portid(phy->ioc));
5182         bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5183         bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5184 }
5185
5186 /*
5187  * Flash memory info API.
5188  *
5189  * @param[in] mincfg - minimal cfg variable
5190  */
5191 u32
5192 bfa_phy_meminfo(bfa_boolean_t mincfg)
5193 {
5194         /* min driver doesn't need phy */
5195         if (mincfg)
5196                 return 0;
5197
5198         return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5199 }
5200
5201 /*
5202  * Flash attach API.
5203  *
5204  * @param[in] phy - phy structure
5205  * @param[in] ioc  - ioc structure
5206  * @param[in] dev  - device structure
5207  * @param[in] trcmod - trace module
5208  * @param[in] logmod - log module
5209  */
5210 void
5211 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5212                 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5213 {
5214         phy->ioc = ioc;
5215         phy->trcmod = trcmod;
5216         phy->cbfn = NULL;
5217         phy->cbarg = NULL;
5218         phy->op_busy = 0;
5219
5220         bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5221         bfa_q_qe_init(&phy->ioc_notify);
5222         bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5223         list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5224
5225         /* min driver doesn't need phy */
5226         if (mincfg) {
5227                 phy->dbuf_kva = NULL;
5228                 phy->dbuf_pa = 0;
5229         }
5230 }
5231
5232 /*
5233  * Claim memory for phy
5234  *
5235  * @param[in] phy - phy structure
5236  * @param[in] dm_kva - pointer to virtual memory address
5237  * @param[in] dm_pa - physical memory address
5238  * @param[in] mincfg - minimal cfg variable
5239  */
5240 void
5241 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5242                 bfa_boolean_t mincfg)
5243 {
5244         if (mincfg)
5245                 return;
5246
5247         phy->dbuf_kva = dm_kva;
5248         phy->dbuf_pa = dm_pa;
5249         memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5250         dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5251         dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5252 }
5253
5254 bfa_boolean_t
5255 bfa_phy_busy(struct bfa_ioc_s *ioc)
5256 {
5257         void __iomem    *rb;
5258
5259         rb = bfa_ioc_bar0(ioc);
5260         return readl(rb + BFA_PHY_LOCK_STATUS);
5261 }
5262
5263 /*
5264  * Get phy attribute.
5265  *
5266  * @param[in] phy - phy structure
5267  * @param[in] attr - phy attribute structure
5268  * @param[in] cbfn - callback function
5269  * @param[in] cbarg - callback argument
5270  *
5271  * Return status.
5272  */
5273 bfa_status_t
5274 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5275                 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5276 {
5277         bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5278         bfa_trc(phy, instance);
5279
5280         if (!bfa_phy_present(phy))
5281                 return BFA_STATUS_PHY_NOT_PRESENT;
5282
5283         if (!bfa_ioc_is_operational(phy->ioc))
5284                 return BFA_STATUS_IOC_NON_OP;
5285
5286         if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5287                 bfa_trc(phy, phy->op_busy);
5288                 return BFA_STATUS_DEVBUSY;
5289         }
5290
5291         phy->op_busy = 1;
5292         phy->cbfn = cbfn;
5293         phy->cbarg = cbarg;
5294         phy->instance = instance;
5295         phy->ubuf = (uint8_t *) attr;
5296         bfa_phy_query_send(phy);
5297
5298         return BFA_STATUS_OK;
5299 }
5300
5301 /*
5302  * Get phy stats.
5303  *
5304  * @param[in] phy - phy structure
5305  * @param[in] instance - phy image instance
5306  * @param[in] stats - pointer to phy stats
5307  * @param[in] cbfn - callback function
5308  * @param[in] cbarg - callback argument
5309  *
5310  * Return status.
5311  */
5312 bfa_status_t
5313 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5314                 struct bfa_phy_stats_s *stats,
5315                 bfa_cb_phy_t cbfn, void *cbarg)
5316 {
5317         bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5318         bfa_trc(phy, instance);
5319
5320         if (!bfa_phy_present(phy))
5321                 return BFA_STATUS_PHY_NOT_PRESENT;
5322
5323         if (!bfa_ioc_is_operational(phy->ioc))
5324                 return BFA_STATUS_IOC_NON_OP;
5325
5326         if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5327                 bfa_trc(phy, phy->op_busy);
5328                 return BFA_STATUS_DEVBUSY;
5329         }
5330
5331         phy->op_busy = 1;
5332         phy->cbfn = cbfn;
5333         phy->cbarg = cbarg;
5334         phy->instance = instance;
5335         phy->ubuf = (u8 *) stats;
5336         bfa_phy_stats_send(phy);
5337
5338         return BFA_STATUS_OK;
5339 }
5340
5341 /*
5342  * Update phy image.
5343  *
5344  * @param[in] phy - phy structure
5345  * @param[in] instance - phy image instance
5346  * @param[in] buf - update data buffer
5347  * @param[in] len - data buffer length
5348  * @param[in] offset - offset relative to starting address
5349  * @param[in] cbfn - callback function
5350  * @param[in] cbarg - callback argument
5351  *
5352  * Return status.
5353  */
5354 bfa_status_t
5355 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5356                 void *buf, u32 len, u32 offset,
5357                 bfa_cb_phy_t cbfn, void *cbarg)
5358 {
5359         bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5360         bfa_trc(phy, instance);
5361         bfa_trc(phy, len);
5362         bfa_trc(phy, offset);
5363
5364         if (!bfa_phy_present(phy))
5365                 return BFA_STATUS_PHY_NOT_PRESENT;
5366
5367         if (!bfa_ioc_is_operational(phy->ioc))
5368                 return BFA_STATUS_IOC_NON_OP;
5369
5370         /* 'len' must be in word (4-byte) boundary */
5371         if (!len || (len & 0x03))
5372                 return BFA_STATUS_FAILED;
5373
5374         if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5375                 bfa_trc(phy, phy->op_busy);
5376                 return BFA_STATUS_DEVBUSY;
5377         }
5378
5379         phy->op_busy = 1;
5380         phy->cbfn = cbfn;
5381         phy->cbarg = cbarg;
5382         phy->instance = instance;
5383         phy->residue = len;
5384         phy->offset = 0;
5385         phy->addr_off = offset;
5386         phy->ubuf = buf;
5387
5388         bfa_phy_write_send(phy);
5389         return BFA_STATUS_OK;
5390 }
5391
5392 /*
5393  * Read phy image.
5394  *
5395  * @param[in] phy - phy structure
5396  * @param[in] instance - phy image instance
5397  * @param[in] buf - read data buffer
5398  * @param[in] len - data buffer length
5399  * @param[in] offset - offset relative to starting address
5400  * @param[in] cbfn - callback function
5401  * @param[in] cbarg - callback argument
5402  *
5403  * Return status.
5404  */
5405 bfa_status_t
5406 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5407                 void *buf, u32 len, u32 offset,
5408                 bfa_cb_phy_t cbfn, void *cbarg)
5409 {
5410         bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5411         bfa_trc(phy, instance);
5412         bfa_trc(phy, len);
5413         bfa_trc(phy, offset);
5414
5415         if (!bfa_phy_present(phy))
5416                 return BFA_STATUS_PHY_NOT_PRESENT;
5417
5418         if (!bfa_ioc_is_operational(phy->ioc))
5419                 return BFA_STATUS_IOC_NON_OP;
5420
5421         /* 'len' must be in word (4-byte) boundary */
5422         if (!len || (len & 0x03))
5423                 return BFA_STATUS_FAILED;
5424
5425         if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5426                 bfa_trc(phy, phy->op_busy);
5427                 return BFA_STATUS_DEVBUSY;
5428         }
5429
5430         phy->op_busy = 1;
5431         phy->cbfn = cbfn;
5432         phy->cbarg = cbarg;
5433         phy->instance = instance;
5434         phy->residue = len;
5435         phy->offset = 0;
5436         phy->addr_off = offset;
5437         phy->ubuf = buf;
5438         bfa_phy_read_send(phy);
5439
5440         return BFA_STATUS_OK;
5441 }
5442
5443 /*
5444  * Process phy response messages upon receiving interrupts.
5445  *
5446  * @param[in] phyarg - phy structure
5447  * @param[in] msg - message structure
5448  */
5449 void
5450 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5451 {
5452         struct bfa_phy_s *phy = phyarg;
5453         u32     status;
5454
5455         union {
5456                 struct bfi_phy_query_rsp_s *query;
5457                 struct bfi_phy_stats_rsp_s *stats;
5458                 struct bfi_phy_write_rsp_s *write;
5459                 struct bfi_phy_read_rsp_s *read;
5460                 struct bfi_mbmsg_s   *msg;
5461         } m;
5462
5463         m.msg = msg;
5464         bfa_trc(phy, msg->mh.msg_id);
5465
5466         if (!phy->op_busy) {
5467                 /* receiving response after ioc failure */
5468                 bfa_trc(phy, 0x9999);
5469                 return;
5470         }
5471
5472         switch (msg->mh.msg_id) {
5473         case BFI_PHY_I2H_QUERY_RSP:
5474                 status = be32_to_cpu(m.query->status);
5475                 bfa_trc(phy, status);
5476
5477                 if (status == BFA_STATUS_OK) {
5478                         struct bfa_phy_attr_s *attr =
5479                                 (struct bfa_phy_attr_s *) phy->ubuf;
5480                         bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5481                                         sizeof(struct bfa_phy_attr_s));
5482                         bfa_trc(phy, attr->status);
5483                         bfa_trc(phy, attr->length);
5484                 }
5485
5486                 phy->status = status;
5487                 phy->op_busy = 0;
5488                 if (phy->cbfn)
5489                         phy->cbfn(phy->cbarg, phy->status);
5490                 break;
5491         case BFI_PHY_I2H_STATS_RSP:
5492                 status = be32_to_cpu(m.stats->status);
5493                 bfa_trc(phy, status);
5494
5495                 if (status == BFA_STATUS_OK) {
5496                         struct bfa_phy_stats_s *stats =
5497                                 (struct bfa_phy_stats_s *) phy->ubuf;
5498                         bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5499                                 sizeof(struct bfa_phy_stats_s));
5500                                 bfa_trc(phy, stats->status);
5501                 }
5502
5503                 phy->status = status;
5504                 phy->op_busy = 0;
5505                 if (phy->cbfn)
5506                         phy->cbfn(phy->cbarg, phy->status);
5507                 break;
5508         case BFI_PHY_I2H_WRITE_RSP:
5509                 status = be32_to_cpu(m.write->status);
5510                 bfa_trc(phy, status);
5511
5512                 if (status != BFA_STATUS_OK || phy->residue == 0) {
5513                         phy->status = status;
5514                         phy->op_busy = 0;
5515                         if (phy->cbfn)
5516                                 phy->cbfn(phy->cbarg, phy->status);
5517                 } else {
5518                         bfa_trc(phy, phy->offset);
5519                         bfa_phy_write_send(phy);
5520                 }
5521                 break;
5522         case BFI_PHY_I2H_READ_RSP:
5523                 status = be32_to_cpu(m.read->status);
5524                 bfa_trc(phy, status);
5525
5526                 if (status != BFA_STATUS_OK) {
5527                         phy->status = status;
5528                         phy->op_busy = 0;
5529                         if (phy->cbfn)
5530                                 phy->cbfn(phy->cbarg, phy->status);
5531                 } else {
5532                         u32 len = be32_to_cpu(m.read->length);
5533                         u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5534                         u16 *dbuf = (u16 *)phy->dbuf_kva;
5535                         int i, sz = len >> 1;
5536
5537                         bfa_trc(phy, phy->offset);
5538                         bfa_trc(phy, len);
5539
5540                         for (i = 0; i < sz; i++)
5541                                 buf[i] = be16_to_cpu(dbuf[i]);
5542
5543                         phy->residue -= len;
5544                         phy->offset += len;
5545
5546                         if (phy->residue == 0) {
5547                                 phy->status = status;
5548                                 phy->op_busy = 0;
5549                                 if (phy->cbfn)
5550                                         phy->cbfn(phy->cbarg, phy->status);
5551                         } else
5552                                 bfa_phy_read_send(phy);
5553                 }
5554                 break;
5555         default:
5556                 WARN_ON(1);
5557         }
5558 }
5559
5560 /*
5561  *      DCONF module specific
5562  */
5563
5564 BFA_MODULE(dconf);
5565
5566 /*
5567  * DCONF state machine events
5568  */
5569 enum bfa_dconf_event {
5570         BFA_DCONF_SM_INIT               = 1,    /* dconf Init */
5571         BFA_DCONF_SM_FLASH_COMP         = 2,    /* read/write to flash */
5572         BFA_DCONF_SM_WR                 = 3,    /* binding change, map */
5573         BFA_DCONF_SM_TIMEOUT            = 4,    /* Start timer */
5574         BFA_DCONF_SM_EXIT               = 5,    /* exit dconf module */
5575         BFA_DCONF_SM_IOCDISABLE         = 6,    /* IOC disable event */
5576 };
5577
5578 /* forward declaration of DCONF state machine */
5579 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5580                                 enum bfa_dconf_event event);
5581 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5582                                 enum bfa_dconf_event event);
5583 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5584                                 enum bfa_dconf_event event);
5585 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5586                                 enum bfa_dconf_event event);
5587 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5588                                 enum bfa_dconf_event event);
5589 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5590                                 enum bfa_dconf_event event);
5591 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5592                                 enum bfa_dconf_event event);
5593
5594 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5595 static void bfa_dconf_timer(void *cbarg);
5596 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5597 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5598
5599 /*
5600  * Begining state of dconf module. Waiting for an event to start.
5601  */
5602 static void
5603 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5604 {
5605         bfa_status_t bfa_status;
5606         bfa_trc(dconf->bfa, event);
5607
5608         switch (event) {
5609         case BFA_DCONF_SM_INIT:
5610                 if (dconf->min_cfg) {
5611                         bfa_trc(dconf->bfa, dconf->min_cfg);
5612                         bfa_fsm_send_event(&dconf->bfa->iocfc,
5613                                         IOCFC_E_DCONF_DONE);
5614                         return;
5615                 }
5616                 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5617                 bfa_timer_start(dconf->bfa, &dconf->timer,
5618                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5619                 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5620                                         BFA_FLASH_PART_DRV, dconf->instance,
5621                                         dconf->dconf,
5622                                         sizeof(struct bfa_dconf_s), 0,
5623                                         bfa_dconf_init_cb, dconf->bfa);
5624                 if (bfa_status != BFA_STATUS_OK) {
5625                         bfa_timer_stop(&dconf->timer);
5626                         bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5627                         bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5628                         return;
5629                 }
5630                 break;
5631         case BFA_DCONF_SM_EXIT:
5632                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5633         case BFA_DCONF_SM_IOCDISABLE:
5634         case BFA_DCONF_SM_WR:
5635         case BFA_DCONF_SM_FLASH_COMP:
5636                 break;
5637         default:
5638                 bfa_sm_fault(dconf->bfa, event);
5639         }
5640 }
5641
5642 /*
5643  * Read flash for dconf entries and make a call back to the driver once done.
5644  */
5645 static void
5646 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5647                         enum bfa_dconf_event event)
5648 {
5649         bfa_trc(dconf->bfa, event);
5650
5651         switch (event) {
5652         case BFA_DCONF_SM_FLASH_COMP:
5653                 bfa_timer_stop(&dconf->timer);
5654                 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5655                 break;
5656         case BFA_DCONF_SM_TIMEOUT:
5657                 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5658                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
5659                 break;
5660         case BFA_DCONF_SM_EXIT:
5661                 bfa_timer_stop(&dconf->timer);
5662                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5663                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5664                 break;
5665         case BFA_DCONF_SM_IOCDISABLE:
5666                 bfa_timer_stop(&dconf->timer);
5667                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5668                 break;
5669         default:
5670                 bfa_sm_fault(dconf->bfa, event);
5671         }
5672 }
5673
5674 /*
5675  * DCONF Module is in ready state. Has completed the initialization.
5676  */
5677 static void
5678 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5679 {
5680         bfa_trc(dconf->bfa, event);
5681
5682         switch (event) {
5683         case BFA_DCONF_SM_WR:
5684                 bfa_timer_start(dconf->bfa, &dconf->timer,
5685                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5686                 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5687                 break;
5688         case BFA_DCONF_SM_EXIT:
5689                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5690                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5691                 break;
5692         case BFA_DCONF_SM_INIT:
5693         case BFA_DCONF_SM_IOCDISABLE:
5694                 break;
5695         default:
5696                 bfa_sm_fault(dconf->bfa, event);
5697         }
5698 }
5699
5700 /*
5701  * entries are dirty, write back to the flash.
5702  */
5703
5704 static void
5705 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5706 {
5707         bfa_trc(dconf->bfa, event);
5708
5709         switch (event) {
5710         case BFA_DCONF_SM_TIMEOUT:
5711                 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5712                 bfa_dconf_flash_write(dconf);
5713                 break;
5714         case BFA_DCONF_SM_WR:
5715                 bfa_timer_stop(&dconf->timer);
5716                 bfa_timer_start(dconf->bfa, &dconf->timer,
5717                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5718                 break;
5719         case BFA_DCONF_SM_EXIT:
5720                 bfa_timer_stop(&dconf->timer);
5721                 bfa_timer_start(dconf->bfa, &dconf->timer,
5722                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5723                 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5724                 bfa_dconf_flash_write(dconf);
5725                 break;
5726         case BFA_DCONF_SM_FLASH_COMP:
5727                 break;
5728         case BFA_DCONF_SM_IOCDISABLE:
5729                 bfa_timer_stop(&dconf->timer);
5730                 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5731                 break;
5732         default:
5733                 bfa_sm_fault(dconf->bfa, event);
5734         }
5735 }
5736
5737 /*
5738  * Sync the dconf entries to the flash.
5739  */
5740 static void
5741 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5742                         enum bfa_dconf_event event)
5743 {
5744         bfa_trc(dconf->bfa, event);
5745
5746         switch (event) {
5747         case BFA_DCONF_SM_IOCDISABLE:
5748         case BFA_DCONF_SM_FLASH_COMP:
5749                 bfa_timer_stop(&dconf->timer);
5750         case BFA_DCONF_SM_TIMEOUT:
5751                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5752                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5753                 break;
5754         default:
5755                 bfa_sm_fault(dconf->bfa, event);
5756         }
5757 }
5758
5759 static void
5760 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5761 {
5762         bfa_trc(dconf->bfa, event);
5763
5764         switch (event) {
5765         case BFA_DCONF_SM_FLASH_COMP:
5766                 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5767                 break;
5768         case BFA_DCONF_SM_WR:
5769                 bfa_timer_start(dconf->bfa, &dconf->timer,
5770                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5771                 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5772                 break;
5773         case BFA_DCONF_SM_EXIT:
5774                 bfa_timer_start(dconf->bfa, &dconf->timer,
5775                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5776                 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5777                 break;
5778         case BFA_DCONF_SM_IOCDISABLE:
5779                 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5780                 break;
5781         default:
5782                 bfa_sm_fault(dconf->bfa, event);
5783         }
5784 }
5785
5786 static void
5787 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5788                         enum bfa_dconf_event event)
5789 {
5790         bfa_trc(dconf->bfa, event);
5791
5792         switch (event) {
5793         case BFA_DCONF_SM_INIT:
5794                 bfa_timer_start(dconf->bfa, &dconf->timer,
5795                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5796                 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5797                 break;
5798         case BFA_DCONF_SM_EXIT:
5799                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5800                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5801                 break;
5802         case BFA_DCONF_SM_IOCDISABLE:
5803                 break;
5804         default:
5805                 bfa_sm_fault(dconf->bfa, event);
5806         }
5807 }
5808
5809 /*
5810  * Compute and return memory needed by DRV_CFG module.
5811  */
5812 static void
5813 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5814                   struct bfa_s *bfa)
5815 {
5816         struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5817
5818         if (cfg->drvcfg.min_cfg)
5819                 bfa_mem_kva_setup(meminfo, dconf_kva,
5820                                 sizeof(struct bfa_dconf_hdr_s));
5821         else
5822                 bfa_mem_kva_setup(meminfo, dconf_kva,
5823                                 sizeof(struct bfa_dconf_s));
5824 }
5825
5826 static void
5827 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5828                 struct bfa_pcidev_s *pcidev)
5829 {
5830         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5831
5832         dconf->bfad = bfad;
5833         dconf->bfa = bfa;
5834         dconf->instance = bfa->ioc.port_id;
5835         bfa_trc(bfa, dconf->instance);
5836
5837         dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5838         if (cfg->drvcfg.min_cfg) {
5839                 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5840                 dconf->min_cfg = BFA_TRUE;
5841         } else {
5842                 dconf->min_cfg = BFA_FALSE;
5843                 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5844         }
5845
5846         bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5847         bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5848 }
5849
5850 static void
5851 bfa_dconf_init_cb(void *arg, bfa_status_t status)
5852 {
5853         struct bfa_s *bfa = arg;
5854         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5855
5856         bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5857         if (status == BFA_STATUS_OK) {
5858                 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5859                 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5860                         dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5861                 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5862                         dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5863         }
5864         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
5865 }
5866
5867 void
5868 bfa_dconf_modinit(struct bfa_s *bfa)
5869 {
5870         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5871         bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5872 }
5873 static void
5874 bfa_dconf_start(struct bfa_s *bfa)
5875 {
5876 }
5877
5878 static void
5879 bfa_dconf_stop(struct bfa_s *bfa)
5880 {
5881 }
5882
5883 static void bfa_dconf_timer(void *cbarg)
5884 {
5885         struct bfa_dconf_mod_s *dconf = cbarg;
5886         bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5887 }
5888 static void
5889 bfa_dconf_iocdisable(struct bfa_s *bfa)
5890 {
5891         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5892         bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5893 }
5894
5895 static void
5896 bfa_dconf_detach(struct bfa_s *bfa)
5897 {
5898 }
5899
5900 static bfa_status_t
5901 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5902 {
5903         bfa_status_t bfa_status;
5904         bfa_trc(dconf->bfa, 0);
5905
5906         bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5907                                 BFA_FLASH_PART_DRV, dconf->instance,
5908                                 dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
5909                                 bfa_dconf_cbfn, dconf);
5910         if (bfa_status != BFA_STATUS_OK)
5911                 WARN_ON(bfa_status);
5912         bfa_trc(dconf->bfa, bfa_status);
5913
5914         return bfa_status;
5915 }
5916
5917 bfa_status_t
5918 bfa_dconf_update(struct bfa_s *bfa)
5919 {
5920         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5921         bfa_trc(dconf->bfa, 0);
5922         if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5923                 return BFA_STATUS_FAILED;
5924
5925         if (dconf->min_cfg) {
5926                 bfa_trc(dconf->bfa, dconf->min_cfg);
5927                 return BFA_STATUS_FAILED;
5928         }
5929
5930         bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5931         return BFA_STATUS_OK;
5932 }
5933
5934 static void
5935 bfa_dconf_cbfn(void *arg, bfa_status_t status)
5936 {
5937         struct bfa_dconf_mod_s *dconf = arg;
5938         WARN_ON(status);
5939         bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5940 }
5941
5942 void
5943 bfa_dconf_modexit(struct bfa_s *bfa)
5944 {
5945         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5946         bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5947 }