]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/bna/bfa_ioc.c
bna: Fix set-but-unused variables.
[mv-sheeva.git] / drivers / net / bna / bfa_ioc.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_ctreg.h"
23 #include "bfa_defs.h"
24
25 /**
26  * IOC local definitions
27  */
28
29 /**
30  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
31  */
32
33 #define bfa_ioc_firmware_lock(__ioc)                    \
34                         ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
35 #define bfa_ioc_firmware_unlock(__ioc)                  \
36                         ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
37 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
38 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
39 #define bfa_ioc_notify_fail(__ioc)                      \
40                         ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41 #define bfa_ioc_sync_join(__ioc)                        \
42                         ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43 #define bfa_ioc_sync_leave(__ioc)                       \
44                         ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
45 #define bfa_ioc_sync_ack(__ioc)                         \
46                         ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
47 #define bfa_ioc_sync_complete(__ioc)                    \
48                         ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
49
50 #define bfa_ioc_mbox_cmd_pending(__ioc)         \
51                         (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
52                         readl((__ioc)->ioc_regs.hfn_mbox_cmd))
53
54 static bool bfa_nw_auto_recover = true;
55
56 /*
57  * forward declarations
58  */
59 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
60 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
61 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
62 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
63 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
64 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
65 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
66 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
67 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
68 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
69 static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
70 static void bfa_ioc_recover(struct bfa_ioc *ioc);
71 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
72 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
73 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
74 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
75 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
76 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
77 static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
79 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
80 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
81                          u32 boot_param);
82 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
83 static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
84 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
85                                                 char *serial_num);
86 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
87                                                 char *fw_ver);
88 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
89                                                 char *chip_rev);
90 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
91                                                 char *optrom_ver);
92 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
93                                                 char *manufacturer);
94 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
95 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
96
97 /**
98  * IOC state machine definitions/declarations
99  */
100 enum ioc_event {
101         IOC_E_RESET             = 1,    /*!< IOC reset request          */
102         IOC_E_ENABLE            = 2,    /*!< IOC enable request         */
103         IOC_E_DISABLE           = 3,    /*!< IOC disable request        */
104         IOC_E_DETACH            = 4,    /*!< driver detach cleanup      */
105         IOC_E_ENABLED           = 5,    /*!< f/w enabled                */
106         IOC_E_FWRSP_GETATTR     = 6,    /*!< IOC get attribute response */
107         IOC_E_DISABLED          = 7,    /*!< f/w disabled               */
108         IOC_E_INITFAILED        = 8,    /*!< failure notice by iocpf sm */
109         IOC_E_PFAILED           = 9,    /*!< failure notice by iocpf sm */
110         IOC_E_HBFAIL            = 10,   /*!< heartbeat failure          */
111         IOC_E_HWERROR           = 11,   /*!< hardware error interrupt   */
112         IOC_E_TIMEOUT           = 12,   /*!< timeout                    */
113 };
114
115 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
124
125 static struct bfa_sm_table ioc_sm_table[] = {
126         {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
127         {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
128         {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
129         {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
130         {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
131         {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
132         {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
133         {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
134         {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
135 };
136
137 /**
138  * IOCPF state machine definitions/declarations
139  */
140
141 /*
142  * Forward declareations for iocpf state machine
143  */
144 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
145 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
146 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
147 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
150
151 /**
152  * IOCPF state machine events
153  */
154 enum iocpf_event {
155         IOCPF_E_ENABLE          = 1,    /*!< IOCPF enable request       */
156         IOCPF_E_DISABLE         = 2,    /*!< IOCPF disable request      */
157         IOCPF_E_STOP            = 3,    /*!< stop on driver detach      */
158         IOCPF_E_FWREADY         = 4,    /*!< f/w initialization done    */
159         IOCPF_E_FWRSP_ENABLE    = 5,    /*!< enable f/w response        */
160         IOCPF_E_FWRSP_DISABLE   = 6,    /*!< disable f/w response       */
161         IOCPF_E_FAIL            = 7,    /*!< failure notice by ioc sm   */
162         IOCPF_E_INITFAIL        = 8,    /*!< init fail notice by ioc sm */
163         IOCPF_E_GETATTRFAIL     = 9,    /*!< init fail notice by ioc sm */
164         IOCPF_E_SEMLOCKED       = 10,   /*!< h/w semaphore is locked    */
165         IOCPF_E_TIMEOUT         = 11,   /*!< f/w response timeout       */
166 };
167
168 /**
169  * IOCPF states
170  */
171 enum bfa_iocpf_state {
172         BFA_IOCPF_RESET         = 1,    /*!< IOC is in reset state */
173         BFA_IOCPF_SEMWAIT       = 2,    /*!< Waiting for IOC h/w semaphore */
174         BFA_IOCPF_HWINIT        = 3,    /*!< IOC h/w is being initialized */
175         BFA_IOCPF_READY         = 4,    /*!< IOCPF is initialized */
176         BFA_IOCPF_INITFAIL      = 5,    /*!< IOCPF failed */
177         BFA_IOCPF_FAIL          = 6,    /*!< IOCPF failed */
178         BFA_IOCPF_DISABLING     = 7,    /*!< IOCPF is being disabled */
179         BFA_IOCPF_DISABLED      = 8,    /*!< IOCPF is disabled */
180         BFA_IOCPF_FWMISMATCH    = 9,    /*!< IOC f/w different from drivers */
181 };
182
183 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
184 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
185 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
188 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
190 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
191                                                 enum iocpf_event);
192 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
193 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
197                                                 enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
199
200 static struct bfa_sm_table iocpf_sm_table[] = {
201         {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
202         {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
203         {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
204         {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
205         {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
206         {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
207         {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
208         {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
209         {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
210         {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
211         {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
212         {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
213         {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
214         {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
215 };
216
217 /**
218  * IOC State Machine
219  */
220
221 /**
222  * Beginning state. IOC uninit state.
223  */
224 static void
225 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
226 {
227 }
228
229 /**
230  * IOC is in uninit state.
231  */
232 static void
233 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
234 {
235         switch (event) {
236         case IOC_E_RESET:
237                 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
238                 break;
239
240         default:
241                 bfa_sm_fault(ioc, event);
242         }
243 }
244
245 /**
246  * Reset entry actions -- initialize state machine
247  */
248 static void
249 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
250 {
251         bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
252 }
253
254 /**
255  * IOC is in reset state.
256  */
257 static void
258 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
259 {
260         switch (event) {
261         case IOC_E_ENABLE:
262                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
263                 break;
264
265         case IOC_E_DISABLE:
266                 bfa_ioc_disable_comp(ioc);
267                 break;
268
269         case IOC_E_DETACH:
270                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
271                 break;
272
273         default:
274                 bfa_sm_fault(ioc, event);
275         }
276 }
277
278 static void
279 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
280 {
281         bfa_iocpf_enable(ioc);
282 }
283
284 /**
285  * Host IOC function is being enabled, awaiting response from firmware.
286  * Semaphore is acquired.
287  */
288 static void
289 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
290 {
291         switch (event) {
292         case IOC_E_ENABLED:
293                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
294                 break;
295
296         case IOC_E_PFAILED:
297                 /* !!! fall through !!! */
298         case IOC_E_HWERROR:
299                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
300                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
301                 if (event != IOC_E_PFAILED)
302                         bfa_iocpf_initfail(ioc);
303                 break;
304
305         case IOC_E_DISABLE:
306                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
307                 break;
308
309         case IOC_E_DETACH:
310                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
311                 bfa_iocpf_stop(ioc);
312                 break;
313
314         case IOC_E_ENABLE:
315                 break;
316
317         default:
318                 bfa_sm_fault(ioc, event);
319         }
320 }
321
322 /**
323  * Semaphore should be acquired for version check.
324  */
325 static void
326 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
327 {
328         mod_timer(&ioc->ioc_timer, jiffies +
329                 msecs_to_jiffies(BFA_IOC_TOV));
330         bfa_ioc_send_getattr(ioc);
331 }
332
333 /**
334  * IOC configuration in progress. Timer is active.
335  */
336 static void
337 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
338 {
339         switch (event) {
340         case IOC_E_FWRSP_GETATTR:
341                 del_timer(&ioc->ioc_timer);
342                 bfa_ioc_check_attr_wwns(ioc);
343                 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
344                 break;
345
346         case IOC_E_PFAILED:
347         case IOC_E_HWERROR:
348                 del_timer(&ioc->ioc_timer);
349                 /* fall through */
350         case IOC_E_TIMEOUT:
351                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
352                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
353                 if (event != IOC_E_PFAILED)
354                         bfa_iocpf_getattrfail(ioc);
355                 break;
356
357         case IOC_E_DISABLE:
358                 del_timer(&ioc->ioc_timer);
359                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
360                 break;
361
362         case IOC_E_ENABLE:
363                 break;
364
365         default:
366                 bfa_sm_fault(ioc, event);
367         }
368 }
369
370 static void
371 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
372 {
373         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
374         bfa_ioc_hb_monitor(ioc);
375 }
376
377 static void
378 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
379 {
380         switch (event) {
381         case IOC_E_ENABLE:
382                 break;
383
384         case IOC_E_DISABLE:
385                 bfa_ioc_hb_stop(ioc);
386                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
387                 break;
388
389         case IOC_E_PFAILED:
390         case IOC_E_HWERROR:
391                 bfa_ioc_hb_stop(ioc);
392                 /* !!! fall through !!! */
393         case IOC_E_HBFAIL:
394                 bfa_ioc_fail_notify(ioc);
395                 if (ioc->iocpf.auto_recover)
396                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
397                 else
398                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
399
400                 if (event != IOC_E_PFAILED)
401                         bfa_iocpf_fail(ioc);
402                 break;
403
404         default:
405                 bfa_sm_fault(ioc, event);
406         }
407 }
408
409 static void
410 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
411 {
412         bfa_iocpf_disable(ioc);
413 }
414
415 /**
416  * IOC is being desabled
417  */
418 static void
419 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
420 {
421         switch (event) {
422         case IOC_E_DISABLED:
423                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
424                 break;
425
426         case IOC_E_HWERROR:
427                 /*
428                  * No state change.  Will move to disabled state
429                  * after iocpf sm completes failure processing and
430                  * moves to disabled state.
431                  */
432                 bfa_iocpf_fail(ioc);
433                 break;
434
435         default:
436                 bfa_sm_fault(ioc, event);
437         }
438 }
439
440 /**
441  * IOC desable completion entry.
442  */
443 static void
444 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
445 {
446         bfa_ioc_disable_comp(ioc);
447 }
448
449 static void
450 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
451 {
452         switch (event) {
453         case IOC_E_ENABLE:
454                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
455                 break;
456
457         case IOC_E_DISABLE:
458                 ioc->cbfn->disable_cbfn(ioc->bfa);
459                 break;
460
461         case IOC_E_DETACH:
462                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
463                 bfa_iocpf_stop(ioc);
464                 break;
465
466         default:
467                 bfa_sm_fault(ioc, event);
468         }
469 }
470
471 static void
472 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
473 {
474 }
475
476 /**
477  * Hardware initialization retry.
478  */
479 static void
480 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
481 {
482         switch (event) {
483         case IOC_E_ENABLED:
484                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
485                 break;
486
487         case IOC_E_PFAILED:
488         case IOC_E_HWERROR:
489                 /**
490                  * Initialization retry failed.
491                  */
492                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
493                 if (event != IOC_E_PFAILED)
494                         bfa_iocpf_initfail(ioc);
495                 break;
496
497         case IOC_E_INITFAILED:
498                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
499                 break;
500
501         case IOC_E_ENABLE:
502                 break;
503
504         case IOC_E_DISABLE:
505                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
506                 break;
507
508         case IOC_E_DETACH:
509                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
510                 bfa_iocpf_stop(ioc);
511                 break;
512
513         default:
514                 bfa_sm_fault(ioc, event);
515         }
516 }
517
518 static void
519 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
520 {
521 }
522
523 /**
524  * IOC failure.
525  */
526 static void
527 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
528 {
529         switch (event) {
530         case IOC_E_ENABLE:
531                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
532                 break;
533
534         case IOC_E_DISABLE:
535                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
536                 break;
537
538         case IOC_E_DETACH:
539                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
540                 bfa_iocpf_stop(ioc);
541                 break;
542
543         case IOC_E_HWERROR:
544                 /* HB failure notification, ignore. */
545                 break;
546
547         default:
548                 bfa_sm_fault(ioc, event);
549         }
550 }
551
552 /**
553  * IOCPF State Machine
554  */
555
556 /**
557  * Reset entry actions -- initialize state machine
558  */
559 static void
560 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
561 {
562         iocpf->retry_count = 0;
563         iocpf->auto_recover = bfa_nw_auto_recover;
564 }
565
566 /**
567  * Beginning state. IOC is in reset state.
568  */
569 static void
570 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
571 {
572         switch (event) {
573         case IOCPF_E_ENABLE:
574                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
575                 break;
576
577         case IOCPF_E_STOP:
578                 break;
579
580         default:
581                 bfa_sm_fault(iocpf->ioc, event);
582         }
583 }
584
585 /**
586  * Semaphore should be acquired for version check.
587  */
588 static void
589 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
590 {
591         bfa_ioc_hw_sem_get(iocpf->ioc);
592 }
593
594 /**
595  * Awaiting h/w semaphore to continue with version check.
596  */
597 static void
598 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
599 {
600         struct bfa_ioc *ioc = iocpf->ioc;
601
602         switch (event) {
603         case IOCPF_E_SEMLOCKED:
604                 if (bfa_ioc_firmware_lock(ioc)) {
605                         if (bfa_ioc_sync_complete(ioc)) {
606                                 iocpf->retry_count = 0;
607                                 bfa_ioc_sync_join(ioc);
608                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
609                         } else {
610                                 bfa_ioc_firmware_unlock(ioc);
611                                 bfa_nw_ioc_hw_sem_release(ioc);
612                                 mod_timer(&ioc->sem_timer, jiffies +
613                                         msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
614                         }
615                 } else {
616                         bfa_nw_ioc_hw_sem_release(ioc);
617                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
618                 }
619                 break;
620
621         case IOCPF_E_DISABLE:
622                 bfa_ioc_hw_sem_get_cancel(ioc);
623                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
624                 bfa_ioc_pf_disabled(ioc);
625                 break;
626
627         case IOCPF_E_STOP:
628                 bfa_ioc_hw_sem_get_cancel(ioc);
629                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
630                 break;
631
632         default:
633                 bfa_sm_fault(ioc, event);
634         }
635 }
636
637 /**
638  * Notify enable completion callback
639  */
640 static void
641 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
642 {
643         /* Call only the first time sm enters fwmismatch state. */
644         if (iocpf->retry_count == 0)
645                 bfa_ioc_pf_fwmismatch(iocpf->ioc);
646
647         iocpf->retry_count++;
648         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
649                 msecs_to_jiffies(BFA_IOC_TOV));
650 }
651
652 /**
653  * Awaiting firmware version match.
654  */
655 static void
656 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
657 {
658         struct bfa_ioc *ioc = iocpf->ioc;
659
660         switch (event) {
661         case IOCPF_E_TIMEOUT:
662                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
663                 break;
664
665         case IOCPF_E_DISABLE:
666                 del_timer(&ioc->iocpf_timer);
667                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668                 bfa_ioc_pf_disabled(ioc);
669                 break;
670
671         case IOCPF_E_STOP:
672                 del_timer(&ioc->iocpf_timer);
673                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674                 break;
675
676         default:
677                 bfa_sm_fault(ioc, event);
678         }
679 }
680
681 /**
682  * Request for semaphore.
683  */
684 static void
685 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
686 {
687         bfa_ioc_hw_sem_get(iocpf->ioc);
688 }
689
690 /**
691  * Awaiting semaphore for h/w initialzation.
692  */
693 static void
694 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
695 {
696         struct bfa_ioc *ioc = iocpf->ioc;
697
698         switch (event) {
699         case IOCPF_E_SEMLOCKED:
700                 if (bfa_ioc_sync_complete(ioc)) {
701                         bfa_ioc_sync_join(ioc);
702                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
703                 } else {
704                         bfa_nw_ioc_hw_sem_release(ioc);
705                         mod_timer(&ioc->sem_timer, jiffies +
706                                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
707                 }
708                 break;
709
710         case IOCPF_E_DISABLE:
711                 bfa_ioc_hw_sem_get_cancel(ioc);
712                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
713                 break;
714
715         default:
716                 bfa_sm_fault(ioc, event);
717         }
718 }
719
720 static void
721 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
722 {
723         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
724                 msecs_to_jiffies(BFA_IOC_TOV));
725         bfa_ioc_reset(iocpf->ioc, 0);
726 }
727
728 /**
729  * Hardware is being initialized. Interrupts are enabled.
730  * Holding hardware semaphore lock.
731  */
732 static void
733 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
734 {
735         struct bfa_ioc *ioc = iocpf->ioc;
736
737         switch (event) {
738         case IOCPF_E_FWREADY:
739                 del_timer(&ioc->iocpf_timer);
740                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
741                 break;
742
743         case IOCPF_E_INITFAIL:
744                 del_timer(&ioc->iocpf_timer);
745                 /*
746                  * !!! fall through !!!
747                  */
748
749         case IOCPF_E_TIMEOUT:
750                 bfa_nw_ioc_hw_sem_release(ioc);
751                 if (event == IOCPF_E_TIMEOUT)
752                         bfa_ioc_pf_failed(ioc);
753                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
754                 break;
755
756         case IOCPF_E_DISABLE:
757                 del_timer(&ioc->iocpf_timer);
758                 bfa_ioc_sync_leave(ioc);
759                 bfa_nw_ioc_hw_sem_release(ioc);
760                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
761                 break;
762
763         default:
764                 bfa_sm_fault(ioc, event);
765         }
766 }
767
768 static void
769 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
770 {
771         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
772                 msecs_to_jiffies(BFA_IOC_TOV));
773         bfa_ioc_send_enable(iocpf->ioc);
774 }
775
776 /**
777  * Host IOC function is being enabled, awaiting response from firmware.
778  * Semaphore is acquired.
779  */
780 static void
781 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
782 {
783         struct bfa_ioc *ioc = iocpf->ioc;
784
785         switch (event) {
786         case IOCPF_E_FWRSP_ENABLE:
787                 del_timer(&ioc->iocpf_timer);
788                 bfa_nw_ioc_hw_sem_release(ioc);
789                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
790                 break;
791
792         case IOCPF_E_INITFAIL:
793                 del_timer(&ioc->iocpf_timer);
794                 /*
795                  * !!! fall through !!!
796                  */
797         case IOCPF_E_TIMEOUT:
798                 bfa_nw_ioc_hw_sem_release(ioc);
799                 if (event == IOCPF_E_TIMEOUT)
800                         bfa_ioc_pf_failed(ioc);
801                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
802                 break;
803
804         case IOCPF_E_DISABLE:
805                 del_timer(&ioc->iocpf_timer);
806                 bfa_nw_ioc_hw_sem_release(ioc);
807                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
808                 break;
809
810         case IOCPF_E_FWREADY:
811                 bfa_ioc_send_enable(ioc);
812                 break;
813
814         default:
815                 bfa_sm_fault(ioc, event);
816         }
817 }
818
819 static bool
820 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
821 {
822         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
823 }
824
825 static void
826 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
827 {
828         bfa_ioc_pf_enabled(iocpf->ioc);
829 }
830
831 static void
832 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
833 {
834         struct bfa_ioc *ioc = iocpf->ioc;
835
836         switch (event) {
837         case IOCPF_E_DISABLE:
838                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
839                 break;
840
841         case IOCPF_E_GETATTRFAIL:
842                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
843                 break;
844
845         case IOCPF_E_FAIL:
846                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
847                 break;
848
849         case IOCPF_E_FWREADY:
850                 bfa_ioc_pf_failed(ioc);
851                 if (bfa_nw_ioc_is_operational(ioc))
852                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
853                 else
854                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
855                 break;
856
857         default:
858                 bfa_sm_fault(ioc, event);
859         }
860 }
861
862 static void
863 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
864 {
865         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
866                 msecs_to_jiffies(BFA_IOC_TOV));
867         bfa_ioc_send_disable(iocpf->ioc);
868 }
869
870 /**
871  * IOC is being disabled
872  */
873 static void
874 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
875 {
876         struct bfa_ioc *ioc = iocpf->ioc;
877
878         switch (event) {
879         case IOCPF_E_FWRSP_DISABLE:
880         case IOCPF_E_FWREADY:
881                 del_timer(&ioc->iocpf_timer);
882                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
883                 break;
884
885         case IOCPF_E_FAIL:
886                 del_timer(&ioc->iocpf_timer);
887                 /*
888                  * !!! fall through !!!
889                  */
890
891         case IOCPF_E_TIMEOUT:
892                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
893                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
894                 break;
895
896         case IOCPF_E_FWRSP_ENABLE:
897                 break;
898
899         default:
900                 bfa_sm_fault(ioc, event);
901         }
902 }
903
904 static void
905 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
906 {
907         bfa_ioc_hw_sem_get(iocpf->ioc);
908 }
909
910 /**
911  * IOC hb ack request is being removed.
912  */
913 static void
914 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
915 {
916         struct bfa_ioc *ioc = iocpf->ioc;
917
918         switch (event) {
919         case IOCPF_E_SEMLOCKED:
920                 bfa_ioc_sync_leave(ioc);
921                 bfa_nw_ioc_hw_sem_release(ioc);
922                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
923                 break;
924
925         case IOCPF_E_FAIL:
926                 break;
927
928         default:
929                 bfa_sm_fault(ioc, event);
930         }
931 }
932
933 /**
934  * IOC disable completion entry.
935  */
936 static void
937 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
938 {
939         bfa_ioc_pf_disabled(iocpf->ioc);
940 }
941
942 static void
943 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
944 {
945         struct bfa_ioc *ioc = iocpf->ioc;
946
947         switch (event) {
948         case IOCPF_E_ENABLE:
949                 iocpf->retry_count = 0;
950                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
951                 break;
952
953         case IOCPF_E_STOP:
954                 bfa_ioc_firmware_unlock(ioc);
955                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
956                 break;
957
958         default:
959                 bfa_sm_fault(ioc, event);
960         }
961 }
962
963 static void
964 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
965 {
966         bfa_ioc_hw_sem_get(iocpf->ioc);
967 }
968
969 /**
970  * Hardware initialization failed.
971  */
972 static void
973 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
974 {
975         struct bfa_ioc *ioc = iocpf->ioc;
976
977         switch (event) {
978         case IOCPF_E_SEMLOCKED:
979                 bfa_ioc_notify_fail(ioc);
980                 bfa_ioc_sync_ack(ioc);
981                 iocpf->retry_count++;
982                 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
983                         bfa_ioc_sync_leave(ioc);
984                         bfa_nw_ioc_hw_sem_release(ioc);
985                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
986                 } else {
987                         if (bfa_ioc_sync_complete(ioc))
988                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
989                         else {
990                                 bfa_nw_ioc_hw_sem_release(ioc);
991                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
992                         }
993                 }
994                 break;
995
996         case IOCPF_E_DISABLE:
997                 bfa_ioc_hw_sem_get_cancel(ioc);
998                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
999                 break;
1000
1001         case IOCPF_E_STOP:
1002                 bfa_ioc_hw_sem_get_cancel(ioc);
1003                 bfa_ioc_firmware_unlock(ioc);
1004                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1005                 break;
1006
1007         case IOCPF_E_FAIL:
1008                 break;
1009
1010         default:
1011                 bfa_sm_fault(ioc, event);
1012         }
1013 }
1014
1015 static void
1016 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1017 {
1018         bfa_ioc_pf_initfailed(iocpf->ioc);
1019 }
1020
1021 /**
1022  * Hardware initialization failed.
1023  */
1024 static void
1025 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1026 {
1027         struct bfa_ioc *ioc = iocpf->ioc;
1028
1029         switch (event) {
1030         case IOCPF_E_DISABLE:
1031                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1032                 break;
1033
1034         case IOCPF_E_STOP:
1035                 bfa_ioc_firmware_unlock(ioc);
1036                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1037                 break;
1038
1039         default:
1040                 bfa_sm_fault(ioc, event);
1041         }
1042 }
1043
1044 static void
1045 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1046 {
1047         /**
1048          * Mark IOC as failed in hardware and stop firmware.
1049          */
1050         bfa_ioc_lpu_stop(iocpf->ioc);
1051
1052         /**
1053          * Flush any queued up mailbox requests.
1054          */
1055         bfa_ioc_mbox_hbfail(iocpf->ioc);
1056         bfa_ioc_hw_sem_get(iocpf->ioc);
1057 }
1058
1059 /**
1060  * IOC is in failed state.
1061  */
1062 static void
1063 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1064 {
1065         struct bfa_ioc *ioc = iocpf->ioc;
1066
1067         switch (event) {
1068         case IOCPF_E_SEMLOCKED:
1069                 iocpf->retry_count = 0;
1070                 bfa_ioc_sync_ack(ioc);
1071                 bfa_ioc_notify_fail(ioc);
1072                 if (!iocpf->auto_recover) {
1073                         bfa_ioc_sync_leave(ioc);
1074                         bfa_nw_ioc_hw_sem_release(ioc);
1075                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1076                 } else {
1077                         if (bfa_ioc_sync_complete(ioc))
1078                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1079                         else {
1080                                 bfa_nw_ioc_hw_sem_release(ioc);
1081                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1082                         }
1083                 }
1084                 break;
1085
1086         case IOCPF_E_DISABLE:
1087                 bfa_ioc_hw_sem_get_cancel(ioc);
1088                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1089                 break;
1090
1091         case IOCPF_E_FAIL:
1092                 break;
1093
1094         default:
1095                 bfa_sm_fault(ioc, event);
1096         }
1097 }
1098
1099 static void
1100 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1101 {
1102 }
1103
1104 /**
1105  * @brief
1106  * IOC is in failed state.
1107  */
1108 static void
1109 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1110 {
1111         switch (event) {
1112         case IOCPF_E_DISABLE:
1113                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1114                 break;
1115
1116         default:
1117                 bfa_sm_fault(iocpf->ioc, event);
1118         }
1119 }
1120
1121 /**
1122  * BFA IOC private functions
1123  */
1124
1125 static void
1126 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1127 {
1128         struct list_head                        *qe;
1129         struct bfa_ioc_hbfail_notify *notify;
1130
1131         ioc->cbfn->disable_cbfn(ioc->bfa);
1132
1133         /**
1134          * Notify common modules registered for notification.
1135          */
1136         list_for_each(qe, &ioc->hb_notify_q) {
1137                 notify = (struct bfa_ioc_hbfail_notify *) qe;
1138                 notify->cbfn(notify->cbarg);
1139         }
1140 }
1141
1142 bool
1143 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1144 {
1145         u32 r32;
1146         int cnt = 0;
1147 #define BFA_SEM_SPINCNT 3000
1148
1149         r32 = readl(sem_reg);
1150
1151         while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1152                 cnt++;
1153                 udelay(2);
1154                 r32 = readl(sem_reg);
1155         }
1156
1157         if (r32 == 0)
1158                 return true;
1159
1160         BUG_ON(!(cnt < BFA_SEM_SPINCNT));
1161         return false;
1162 }
1163
1164 void
1165 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1166 {
1167         writel(1, sem_reg);
1168 }
1169
1170 static void
1171 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1172 {
1173         u32     r32;
1174
1175         /**
1176          * First read to the semaphore register will return 0, subsequent reads
1177          * will return 1. Semaphore is released by writing 1 to the register
1178          */
1179         r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1180         if (r32 == 0) {
1181                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1182                 return;
1183         }
1184
1185         mod_timer(&ioc->sem_timer, jiffies +
1186                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1187 }
1188
1189 void
1190 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1191 {
1192         writel(1, ioc->ioc_regs.ioc_sem_reg);
1193 }
1194
1195 static void
1196 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1197 {
1198         del_timer(&ioc->sem_timer);
1199 }
1200
1201 /**
1202  * @brief
1203  * Initialize LPU local memory (aka secondary memory / SRAM)
1204  */
1205 static void
1206 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1207 {
1208         u32     pss_ctl;
1209         int             i;
1210 #define PSS_LMEM_INIT_TIME  10000
1211
1212         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1213         pss_ctl &= ~__PSS_LMEM_RESET;
1214         pss_ctl |= __PSS_LMEM_INIT_EN;
1215
1216         /*
1217          * i2c workaround 12.5khz clock
1218          */
1219         pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1220         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1221
1222         /**
1223          * wait for memory initialization to be complete
1224          */
1225         i = 0;
1226         do {
1227                 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1228                 i++;
1229         } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1230
1231         /**
1232          * If memory initialization is not successful, IOC timeout will catch
1233          * such failures.
1234          */
1235         BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1236
1237         pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1238         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1239 }
1240
1241 static void
1242 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1243 {
1244         u32     pss_ctl;
1245
1246         /**
1247          * Take processor out of reset.
1248          */
1249         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1250         pss_ctl &= ~__PSS_LPU0_RESET;
1251
1252         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1253 }
1254
1255 static void
1256 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1257 {
1258         u32     pss_ctl;
1259
1260         /**
1261          * Put processors in reset.
1262          */
1263         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1264         pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1265
1266         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1267 }
1268
1269 /**
1270  * Get driver and firmware versions.
1271  */
1272 void
1273 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1274 {
1275         u32     pgnum;
1276         u32     loff = 0;
1277         int             i;
1278         u32     *fwsig = (u32 *) fwhdr;
1279
1280         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1281         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1282
1283         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1284              i++) {
1285                 fwsig[i] =
1286                         swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1287                 loff += sizeof(u32);
1288         }
1289 }
1290
1291 /**
1292  * Returns TRUE if same.
1293  */
1294 bool
1295 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1296 {
1297         struct bfi_ioc_image_hdr *drv_fwhdr;
1298         int i;
1299
1300         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1301                 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1302
1303         for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1304                 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1305                         return false;
1306         }
1307
1308         return true;
1309 }
1310
1311 /**
1312  * Return true if current running version is valid. Firmware signature and
1313  * execution context (driver/bios) must match.
1314  */
1315 static bool
1316 bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
1317 {
1318         struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1319
1320         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1321         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1322                 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1323
1324         if (fwhdr.signature != drv_fwhdr->signature)
1325                 return false;
1326
1327         if (fwhdr.exec != drv_fwhdr->exec)
1328                 return false;
1329
1330         return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1331 }
1332
1333 /**
1334  * Conditionally flush any pending message from firmware at start.
1335  */
1336 static void
1337 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1338 {
1339         u32     r32;
1340
1341         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1342         if (r32)
1343                 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1344 }
1345
1346 /**
1347  * @img ioc_init_logic.jpg
1348  */
1349 static void
1350 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1351 {
1352         enum bfi_ioc_state ioc_fwstate;
1353         bool fwvalid;
1354
1355         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1356
1357         if (force)
1358                 ioc_fwstate = BFI_IOC_UNINIT;
1359
1360         /**
1361          * check if firmware is valid
1362          */
1363         fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1364                 false : bfa_ioc_fwver_valid(ioc);
1365
1366         if (!fwvalid) {
1367                 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1368                 return;
1369         }
1370
1371         /**
1372          * If hardware initialization is in progress (initialized by other IOC),
1373          * just wait for an initialization completion interrupt.
1374          */
1375         if (ioc_fwstate == BFI_IOC_INITING) {
1376                 ioc->cbfn->reset_cbfn(ioc->bfa);
1377                 return;
1378         }
1379
1380         /**
1381          * If IOC function is disabled and firmware version is same,
1382          * just re-enable IOC.
1383          */
1384         if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1385                 /**
1386                  * When using MSI-X any pending firmware ready event should
1387                  * be flushed. Otherwise MSI-X interrupts are not delivered.
1388                  */
1389                 bfa_ioc_msgflush(ioc);
1390                 ioc->cbfn->reset_cbfn(ioc->bfa);
1391                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1392                 return;
1393         }
1394
1395         /**
1396          * Initialize the h/w for any other states.
1397          */
1398         bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1399 }
1400
1401 void
1402 bfa_nw_ioc_timeout(void *ioc_arg)
1403 {
1404         struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1405
1406         bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1407 }
1408
1409 static void
1410 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1411 {
1412         u32 *msgp = (u32 *) ioc_msg;
1413         u32 i;
1414
1415         BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1416
1417         /*
1418          * first write msg to mailbox registers
1419          */
1420         for (i = 0; i < len / sizeof(u32); i++)
1421                 writel(cpu_to_le32(msgp[i]),
1422                               ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1423
1424         for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1425                 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1426
1427         /*
1428          * write 1 to mailbox CMD to trigger LPU event
1429          */
1430         writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1431         (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1432 }
1433
1434 static void
1435 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1436 {
1437         struct bfi_ioc_ctrl_req enable_req;
1438         struct timeval tv;
1439
1440         bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1441                     bfa_ioc_portid(ioc));
1442         enable_req.ioc_class = ioc->ioc_mc;
1443         do_gettimeofday(&tv);
1444         enable_req.tv_sec = ntohl(tv.tv_sec);
1445         bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1446 }
1447
1448 static void
1449 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1450 {
1451         struct bfi_ioc_ctrl_req disable_req;
1452
1453         bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1454                     bfa_ioc_portid(ioc));
1455         bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1456 }
1457
1458 static void
1459 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1460 {
1461         struct bfi_ioc_getattr_req attr_req;
1462
1463         bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1464                     bfa_ioc_portid(ioc));
1465         bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1466         bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1467 }
1468
1469 void
1470 bfa_nw_ioc_hb_check(void *cbarg)
1471 {
1472         struct bfa_ioc *ioc = cbarg;
1473         u32     hb_count;
1474
1475         hb_count = readl(ioc->ioc_regs.heartbeat);
1476         if (ioc->hb_count == hb_count) {
1477                 bfa_ioc_recover(ioc);
1478                 return;
1479         } else {
1480                 ioc->hb_count = hb_count;
1481         }
1482
1483         bfa_ioc_mbox_poll(ioc);
1484         mod_timer(&ioc->hb_timer, jiffies +
1485                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1486 }
1487
1488 static void
1489 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1490 {
1491         ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1492         mod_timer(&ioc->hb_timer, jiffies +
1493                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1494 }
1495
1496 static void
1497 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1498 {
1499         del_timer(&ioc->hb_timer);
1500 }
1501
1502 /**
1503  * @brief
1504  *      Initiate a full firmware download.
1505  */
1506 static void
1507 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1508                     u32 boot_param)
1509 {
1510         u32 *fwimg;
1511         u32 pgnum;
1512         u32 loff = 0;
1513         u32 chunkno = 0;
1514         u32 i;
1515
1516         /**
1517          * Initialize LMEM first before code download
1518          */
1519         bfa_ioc_lmem_init(ioc);
1520
1521         fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1522
1523         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1524
1525         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1526
1527         for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1528                 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1529                         chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1530                         fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1531                                         BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1532                 }
1533
1534                 /**
1535                  * write smem
1536                  */
1537                 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1538                               ((ioc->ioc_regs.smem_page_start) + (loff)));
1539
1540                 loff += sizeof(u32);
1541
1542                 /**
1543                  * handle page offset wrap around
1544                  */
1545                 loff = PSS_SMEM_PGOFF(loff);
1546                 if (loff == 0) {
1547                         pgnum++;
1548                         writel(pgnum,
1549                                       ioc->ioc_regs.host_page_num_fn);
1550                 }
1551         }
1552
1553         writel(bfa_ioc_smem_pgnum(ioc, 0),
1554                       ioc->ioc_regs.host_page_num_fn);
1555
1556         /*
1557          * Set boot type and boot param at the end.
1558         */
1559         writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1560                         + (BFI_BOOT_TYPE_OFF)));
1561         writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1562                         + (BFI_BOOT_PARAM_OFF)));
1563 }
1564
1565 static void
1566 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1567 {
1568         bfa_ioc_hwinit(ioc, force);
1569 }
1570
1571 /**
1572  * @brief
1573  * Update BFA configuration from firmware configuration.
1574  */
1575 static void
1576 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1577 {
1578         struct bfi_ioc_attr *attr = ioc->attr;
1579
1580         attr->adapter_prop  = ntohl(attr->adapter_prop);
1581         attr->card_type     = ntohl(attr->card_type);
1582         attr->maxfrsize     = ntohs(attr->maxfrsize);
1583
1584         bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1585 }
1586
1587 /**
1588  * Attach time initialization of mbox logic.
1589  */
1590 static void
1591 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1592 {
1593         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1594         int     mc;
1595
1596         INIT_LIST_HEAD(&mod->cmd_q);
1597         for (mc = 0; mc < BFI_MC_MAX; mc++) {
1598                 mod->mbhdlr[mc].cbfn = NULL;
1599                 mod->mbhdlr[mc].cbarg = ioc->bfa;
1600         }
1601 }
1602
1603 /**
1604  * Mbox poll timer -- restarts any pending mailbox requests.
1605  */
1606 static void
1607 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1608 {
1609         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1610         struct bfa_mbox_cmd *cmd;
1611         u32                     stat;
1612
1613         /**
1614          * If no command pending, do nothing
1615          */
1616         if (list_empty(&mod->cmd_q))
1617                 return;
1618
1619         /**
1620          * If previous command is not yet fetched by firmware, do nothing
1621          */
1622         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1623         if (stat)
1624                 return;
1625
1626         /**
1627          * Enqueue command to firmware.
1628          */
1629         bfa_q_deq(&mod->cmd_q, &cmd);
1630         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1631 }
1632
1633 /**
1634  * Cleanup any pending requests.
1635  */
1636 static void
1637 bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1638 {
1639         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1640         struct bfa_mbox_cmd *cmd;
1641
1642         while (!list_empty(&mod->cmd_q))
1643                 bfa_q_deq(&mod->cmd_q, &cmd);
1644 }
1645
1646 static void
1647 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1648 {
1649         struct list_head                *qe;
1650         struct bfa_ioc_hbfail_notify    *notify;
1651
1652         /**
1653          * Notify driver and common modules registered for notification.
1654          */
1655         ioc->cbfn->hbfail_cbfn(ioc->bfa);
1656         list_for_each(qe, &ioc->hb_notify_q) {
1657                 notify = (struct bfa_ioc_hbfail_notify *) qe;
1658                 notify->cbfn(notify->cbarg);
1659         }
1660 }
1661
1662 static void
1663 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1664 {
1665         bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1666 }
1667
1668 static void
1669 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1670 {
1671         bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1672 }
1673
1674 static void
1675 bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
1676 {
1677         bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
1678 }
1679
1680 static void
1681 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1682 {
1683         bfa_fsm_send_event(ioc, IOC_E_PFAILED);
1684 }
1685
1686 static void
1687 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1688 {
1689         /**
1690          * Provide enable completion callback and AEN notification.
1691          */
1692         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1693 }
1694
1695 /**
1696  * IOC public
1697  */
1698 static enum bfa_status
1699 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1700 {
1701         /*
1702          *  Hold semaphore so that nobody can access the chip during init.
1703          */
1704         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1705
1706         bfa_ioc_pll_init_asic(ioc);
1707
1708         ioc->pllinit = true;
1709         /*
1710          *  release semaphore.
1711          */
1712         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1713
1714         return BFA_STATUS_OK;
1715 }
1716
1717 /**
1718  * Interface used by diag module to do firmware boot with memory test
1719  * as the entry vector.
1720  */
1721 static void
1722 bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1723 {
1724         void __iomem *rb;
1725
1726         bfa_ioc_stats(ioc, ioc_boots);
1727
1728         if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1729                 return;
1730
1731         /**
1732          * Initialize IOC state of all functions on a chip reset.
1733          */
1734         rb = ioc->pcidev.pci_bar_kva;
1735         if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1736                 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1737                 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1738         } else {
1739                 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1740                 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1741         }
1742
1743         bfa_ioc_msgflush(ioc);
1744         bfa_ioc_download_fw(ioc, boot_type, boot_param);
1745
1746         /**
1747          * Enable interrupts just before starting LPU
1748          */
1749         ioc->cbfn->reset_cbfn(ioc->bfa);
1750         bfa_ioc_lpu_start(ioc);
1751 }
1752
1753 /**
1754  * Enable/disable IOC failure auto recovery.
1755  */
1756 void
1757 bfa_nw_ioc_auto_recover(bool auto_recover)
1758 {
1759         bfa_nw_auto_recover = auto_recover;
1760 }
1761
1762 static void
1763 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1764 {
1765         u32     *msgp = mbmsg;
1766         u32     r32;
1767         int             i;
1768
1769         /**
1770          * read the MBOX msg
1771          */
1772         for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1773              i++) {
1774                 r32 = readl(ioc->ioc_regs.lpu_mbox +
1775                                    i * sizeof(u32));
1776                 msgp[i] = htonl(r32);
1777         }
1778
1779         /**
1780          * turn off mailbox interrupt by clearing mailbox status
1781          */
1782         writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1783         readl(ioc->ioc_regs.lpu_mbox_cmd);
1784 }
1785
1786 static void
1787 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1788 {
1789         union bfi_ioc_i2h_msg_u *msg;
1790         struct bfa_iocpf *iocpf = &ioc->iocpf;
1791
1792         msg = (union bfi_ioc_i2h_msg_u *) m;
1793
1794         bfa_ioc_stats(ioc, ioc_isrs);
1795
1796         switch (msg->mh.msg_id) {
1797         case BFI_IOC_I2H_HBEAT:
1798                 break;
1799
1800         case BFI_IOC_I2H_READY_EVENT:
1801                 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1802                 break;
1803
1804         case BFI_IOC_I2H_ENABLE_REPLY:
1805                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1806                 break;
1807
1808         case BFI_IOC_I2H_DISABLE_REPLY:
1809                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1810                 break;
1811
1812         case BFI_IOC_I2H_GETATTR_REPLY:
1813                 bfa_ioc_getattr_reply(ioc);
1814                 break;
1815
1816         default:
1817                 BUG_ON(1);
1818         }
1819 }
1820
1821 /**
1822  * IOC attach time initialization and setup.
1823  *
1824  * @param[in]   ioc     memory for IOC
1825  * @param[in]   bfa     driver instance structure
1826  */
1827 void
1828 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1829 {
1830         ioc->bfa        = bfa;
1831         ioc->cbfn       = cbfn;
1832         ioc->fcmode     = false;
1833         ioc->pllinit    = false;
1834         ioc->dbg_fwsave_once = true;
1835         ioc->iocpf.ioc  = ioc;
1836
1837         bfa_ioc_mbox_attach(ioc);
1838         INIT_LIST_HEAD(&ioc->hb_notify_q);
1839
1840         bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1841         bfa_fsm_send_event(ioc, IOC_E_RESET);
1842 }
1843
1844 /**
1845  * Driver detach time IOC cleanup.
1846  */
1847 void
1848 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1849 {
1850         bfa_fsm_send_event(ioc, IOC_E_DETACH);
1851 }
1852
1853 /**
1854  * Setup IOC PCI properties.
1855  *
1856  * @param[in]   pcidev  PCI device information for this IOC
1857  */
1858 void
1859 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1860                  enum bfi_mclass mc)
1861 {
1862         ioc->ioc_mc     = mc;
1863         ioc->pcidev     = *pcidev;
1864         ioc->ctdev      = bfa_asic_id_ct(ioc->pcidev.device_id);
1865         ioc->cna        = ioc->ctdev && !ioc->fcmode;
1866
1867         bfa_nw_ioc_set_ct_hwif(ioc);
1868
1869         bfa_ioc_map_port(ioc);
1870         bfa_ioc_reg_init(ioc);
1871 }
1872
1873 /**
1874  * Initialize IOC dma memory
1875  *
1876  * @param[in]   dm_kva  kernel virtual address of IOC dma memory
1877  * @param[in]   dm_pa   physical address of IOC dma memory
1878  */
1879 void
1880 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
1881 {
1882         /**
1883          * dma memory for firmware attribute
1884          */
1885         ioc->attr_dma.kva = dm_kva;
1886         ioc->attr_dma.pa = dm_pa;
1887         ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1888 }
1889
1890 /**
1891  * Return size of dma memory required.
1892  */
1893 u32
1894 bfa_nw_ioc_meminfo(void)
1895 {
1896         return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1897 }
1898
1899 void
1900 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1901 {
1902         bfa_ioc_stats(ioc, ioc_enables);
1903         ioc->dbg_fwsave_once = true;
1904
1905         bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1906 }
1907
1908 void
1909 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1910 {
1911         bfa_ioc_stats(ioc, ioc_disables);
1912         bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1913 }
1914
1915 static u32
1916 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1917 {
1918         return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1919 }
1920
1921 static u32
1922 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1923 {
1924         return PSS_SMEM_PGOFF(fmaddr);
1925 }
1926
1927 /**
1928  * Register mailbox message handler function, to be called by common modules
1929  */
1930 void
1931 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1932                     bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1933 {
1934         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1935
1936         mod->mbhdlr[mc].cbfn    = cbfn;
1937         mod->mbhdlr[mc].cbarg = cbarg;
1938 }
1939
1940 /**
1941  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1942  * Responsibility of caller to serialize
1943  *
1944  * @param[in]   ioc     IOC instance
1945  * @param[i]    cmd     Mailbox command
1946  */
1947 void
1948 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1949 {
1950         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1951         u32                     stat;
1952
1953         /**
1954          * If a previous command is pending, queue new command
1955          */
1956         if (!list_empty(&mod->cmd_q)) {
1957                 list_add_tail(&cmd->qe, &mod->cmd_q);
1958                 return;
1959         }
1960
1961         /**
1962          * If mailbox is busy, queue command for poll timer
1963          */
1964         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1965         if (stat) {
1966                 list_add_tail(&cmd->qe, &mod->cmd_q);
1967                 return;
1968         }
1969
1970         /**
1971          * mailbox is free -- queue command to firmware
1972          */
1973         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1974 }
1975
1976 /**
1977  * Handle mailbox interrupts
1978  */
1979 void
1980 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1981 {
1982         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1983         struct bfi_mbmsg m;
1984         int                             mc;
1985
1986         bfa_ioc_msgget(ioc, &m);
1987
1988         /**
1989          * Treat IOC message class as special.
1990          */
1991         mc = m.mh.msg_class;
1992         if (mc == BFI_MC_IOC) {
1993                 bfa_ioc_isr(ioc, &m);
1994                 return;
1995         }
1996
1997         if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1998                 return;
1999
2000         mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2001 }
2002
2003 void
2004 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2005 {
2006         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2007 }
2008
2009 /**
2010  * Add to IOC heartbeat failure notification queue. To be used by common
2011  * modules such as cee, port, diag.
2012  */
2013 void
2014 bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
2015                         struct bfa_ioc_hbfail_notify *notify)
2016 {
2017         list_add_tail(&notify->qe, &ioc->hb_notify_q);
2018 }
2019
2020 #define BFA_MFG_NAME "Brocade"
2021 static void
2022 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2023                          struct bfa_adapter_attr *ad_attr)
2024 {
2025         struct bfi_ioc_attr *ioc_attr;
2026
2027         ioc_attr = ioc->attr;
2028
2029         bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2030         bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2031         bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2032         bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2033         memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2034                       sizeof(struct bfa_mfg_vpd));
2035
2036         ad_attr->nports = bfa_ioc_get_nports(ioc);
2037         ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2038
2039         bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2040         /* For now, model descr uses same model string */
2041         bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2042
2043         ad_attr->card_type = ioc_attr->card_type;
2044         ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2045
2046         if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2047                 ad_attr->prototype = 1;
2048         else
2049                 ad_attr->prototype = 0;
2050
2051         ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2052         ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2053
2054         ad_attr->pcie_gen = ioc_attr->pcie_gen;
2055         ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2056         ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2057         ad_attr->asic_rev = ioc_attr->asic_rev;
2058
2059         bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2060
2061         ad_attr->cna_capable = ioc->cna;
2062         ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2063 }
2064
2065 static enum bfa_ioc_type
2066 bfa_ioc_get_type(struct bfa_ioc *ioc)
2067 {
2068         if (!ioc->ctdev || ioc->fcmode)
2069                 return BFA_IOC_TYPE_FC;
2070         else if (ioc->ioc_mc == BFI_MC_IOCFC)
2071                 return BFA_IOC_TYPE_FCoE;
2072         else if (ioc->ioc_mc == BFI_MC_LL)
2073                 return BFA_IOC_TYPE_LL;
2074         else {
2075                 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
2076                 return BFA_IOC_TYPE_LL;
2077         }
2078 }
2079
2080 static void
2081 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2082 {
2083         memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2084         memcpy(serial_num,
2085                         (void *)ioc->attr->brcd_serialnum,
2086                         BFA_ADAPTER_SERIAL_NUM_LEN);
2087 }
2088
2089 static void
2090 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2091 {
2092         memset(fw_ver, 0, BFA_VERSION_LEN);
2093         memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2094 }
2095
2096 static void
2097 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2098 {
2099         BUG_ON(!(chip_rev));
2100
2101         memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2102
2103         chip_rev[0] = 'R';
2104         chip_rev[1] = 'e';
2105         chip_rev[2] = 'v';
2106         chip_rev[3] = '-';
2107         chip_rev[4] = ioc->attr->asic_rev;
2108         chip_rev[5] = '\0';
2109 }
2110
2111 static void
2112 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2113 {
2114         memset(optrom_ver, 0, BFA_VERSION_LEN);
2115         memcpy(optrom_ver, ioc->attr->optrom_version,
2116                       BFA_VERSION_LEN);
2117 }
2118
2119 static void
2120 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2121 {
2122         memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2123         memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2124 }
2125
2126 static void
2127 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2128 {
2129         struct bfi_ioc_attr *ioc_attr;
2130
2131         BUG_ON(!(model));
2132         memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2133
2134         ioc_attr = ioc->attr;
2135
2136         /**
2137          * model name
2138          */
2139         snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2140                 BFA_MFG_NAME, ioc_attr->card_type);
2141 }
2142
2143 static enum bfa_ioc_state
2144 bfa_ioc_get_state(struct bfa_ioc *ioc)
2145 {
2146         enum bfa_iocpf_state iocpf_st;
2147         enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2148
2149         if (ioc_st == BFA_IOC_ENABLING ||
2150                 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2151
2152                 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2153
2154                 switch (iocpf_st) {
2155                 case BFA_IOCPF_SEMWAIT:
2156                         ioc_st = BFA_IOC_SEMWAIT;
2157                         break;
2158
2159                 case BFA_IOCPF_HWINIT:
2160                         ioc_st = BFA_IOC_HWINIT;
2161                         break;
2162
2163                 case BFA_IOCPF_FWMISMATCH:
2164                         ioc_st = BFA_IOC_FWMISMATCH;
2165                         break;
2166
2167                 case BFA_IOCPF_FAIL:
2168                         ioc_st = BFA_IOC_FAIL;
2169                         break;
2170
2171                 case BFA_IOCPF_INITFAIL:
2172                         ioc_st = BFA_IOC_INITFAIL;
2173                         break;
2174
2175                 default:
2176                         break;
2177                 }
2178         }
2179         return ioc_st;
2180 }
2181
2182 void
2183 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2184 {
2185         memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2186
2187         ioc_attr->state = bfa_ioc_get_state(ioc);
2188         ioc_attr->port_id = ioc->port_id;
2189
2190         ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2191
2192         bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2193
2194         ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2195         ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2196         bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2197 }
2198
2199 /**
2200  * WWN public
2201  */
2202 static u64
2203 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2204 {
2205         return ioc->attr->pwwn;
2206 }
2207
2208 mac_t
2209 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2210 {
2211         return ioc->attr->mac;
2212 }
2213
2214 /**
2215  * Firmware failure detected. Start recovery actions.
2216  */
2217 static void
2218 bfa_ioc_recover(struct bfa_ioc *ioc)
2219 {
2220         pr_crit("Heart Beat of IOC has failed\n");
2221         bfa_ioc_stats(ioc, ioc_hbfails);
2222         bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2223 }
2224
2225 static void
2226 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2227 {
2228         if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2229                 return;
2230 }
2231
2232 /**
2233  * @dg hal_iocpf_pvt BFA IOC PF private functions
2234  * @{
2235  */
2236
2237 static void
2238 bfa_iocpf_enable(struct bfa_ioc *ioc)
2239 {
2240         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2241 }
2242
2243 static void
2244 bfa_iocpf_disable(struct bfa_ioc *ioc)
2245 {
2246         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2247 }
2248
2249 static void
2250 bfa_iocpf_fail(struct bfa_ioc *ioc)
2251 {
2252         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2253 }
2254
2255 static void
2256 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2257 {
2258         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2259 }
2260
2261 static void
2262 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2263 {
2264         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2265 }
2266
2267 static void
2268 bfa_iocpf_stop(struct bfa_ioc *ioc)
2269 {
2270         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2271 }
2272
2273 void
2274 bfa_nw_iocpf_timeout(void *ioc_arg)
2275 {
2276         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2277
2278         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2279 }
2280
2281 void
2282 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2283 {
2284         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2285
2286         bfa_ioc_hw_sem_get(ioc);
2287 }