]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
f998fdcd7551b5f9796be5aa7fba4140d8c0bb87
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_sriov_common.c
1 /*
2  * QLogic qlcnic NIC Driver
3  * Copyright (c) 2009-2013 QLogic Corporation
4  *
5  * See LICENSE.qlcnic for copyright and licensing details.
6  */
7
8 #include "qlcnic_sriov.h"
9 #include "qlcnic.h"
10 #include "qlcnic_83xx_hw.h"
11 #include <linux/types.h>
12
13 #define QLC_BC_COMMAND  0
14 #define QLC_BC_RESPONSE 1
15
16 #define QLC_MBOX_RESP_TIMEOUT           (10 * HZ)
17 #define QLC_MBOX_CH_FREE_TIMEOUT        (10 * HZ)
18
19 #define QLC_BC_MSG              0
20 #define QLC_BC_CFREE            1
21 #define QLC_BC_FLR              2
22 #define QLC_BC_HDR_SZ           16
23 #define QLC_BC_PAYLOAD_SZ       (1024 - QLC_BC_HDR_SZ)
24
25 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF            2048
26 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF      512
27
28 #define QLC_83XX_VF_RESET_FAIL_THRESH   8
29 #define QLC_BC_CMD_MAX_RETRY_CNT        5
30
31 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
32 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
33 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
34 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
35 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
36 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
37                                   struct qlcnic_cmd_args *);
38 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
39
40 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
41         .read_crb                       = qlcnic_83xx_read_crb,
42         .write_crb                      = qlcnic_83xx_write_crb,
43         .read_reg                       = qlcnic_83xx_rd_reg_indirect,
44         .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
45         .get_mac_address                = qlcnic_83xx_get_mac_address,
46         .setup_intr                     = qlcnic_83xx_setup_intr,
47         .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
48         .mbx_cmd                        = qlcnic_sriov_issue_cmd,
49         .get_func_no                    = qlcnic_83xx_get_func_no,
50         .api_lock                       = qlcnic_83xx_cam_lock,
51         .api_unlock                     = qlcnic_83xx_cam_unlock,
52         .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
53         .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
54         .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
55         .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
56         .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
57         .setup_link_event               = qlcnic_83xx_setup_link_event,
58         .get_nic_info                   = qlcnic_83xx_get_nic_info,
59         .get_pci_info                   = qlcnic_83xx_get_pci_info,
60         .set_nic_info                   = qlcnic_83xx_set_nic_info,
61         .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
62         .napi_enable                    = qlcnic_83xx_napi_enable,
63         .napi_disable                   = qlcnic_83xx_napi_disable,
64         .config_intr_coal               = qlcnic_83xx_config_intr_coal,
65         .config_rss                     = qlcnic_83xx_config_rss,
66         .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
67         .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
68         .change_l2_filter               = qlcnic_83xx_change_l2_filter,
69         .get_board_info                 = qlcnic_83xx_get_port_info,
70         .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
71 };
72
73 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
74         .config_bridged_mode    = qlcnic_config_bridged_mode,
75         .config_led             = qlcnic_config_led,
76         .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
77         .napi_add               = qlcnic_83xx_napi_add,
78         .napi_del               = qlcnic_83xx_napi_del,
79         .shutdown               = qlcnic_sriov_vf_shutdown,
80         .resume                 = qlcnic_sriov_vf_resume,
81         .config_ipaddr          = qlcnic_83xx_config_ipaddr,
82         .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
83 };
84
85 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
86         {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
87         {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
88         {QLCNIC_BC_CMD_GET_ACL, 3, 14},
89         {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
90 };
91
92 static inline bool qlcnic_sriov_bc_msg_check(u32 val)
93 {
94         return (val & (1 << QLC_BC_MSG)) ? true : false;
95 }
96
97 static inline bool qlcnic_sriov_channel_free_check(u32 val)
98 {
99         return (val & (1 << QLC_BC_CFREE)) ? true : false;
100 }
101
102 static inline bool qlcnic_sriov_flr_check(u32 val)
103 {
104         return (val & (1 << QLC_BC_FLR)) ? true : false;
105 }
106
107 static inline u8 qlcnic_sriov_target_func_id(u32 val)
108 {
109         return (val >> 4) & 0xff;
110 }
111
112 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
113 {
114         struct pci_dev *dev = adapter->pdev;
115         int pos;
116         u16 stride, offset;
117
118         if (qlcnic_sriov_vf_check(adapter))
119                 return 0;
120
121         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
122         pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
123         pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
124
125         return (dev->devfn + offset + stride * vf_id) & 0xff;
126 }
127
128 int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
129 {
130         struct qlcnic_sriov *sriov;
131         struct qlcnic_back_channel *bc;
132         struct workqueue_struct *wq;
133         struct qlcnic_vport *vp;
134         struct qlcnic_vf_info *vf;
135         int err, i;
136
137         if (!qlcnic_sriov_enable_check(adapter))
138                 return -EIO;
139
140         sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
141         if (!sriov)
142                 return -ENOMEM;
143
144         adapter->ahw->sriov = sriov;
145         sriov->num_vfs = num_vfs;
146         bc = &sriov->bc;
147         sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
148                                  num_vfs, GFP_KERNEL);
149         if (!sriov->vf_info) {
150                 err = -ENOMEM;
151                 goto qlcnic_free_sriov;
152         }
153
154         wq = create_singlethread_workqueue("bc-trans");
155         if (wq == NULL) {
156                 err = -ENOMEM;
157                 dev_err(&adapter->pdev->dev,
158                         "Cannot create bc-trans workqueue\n");
159                 goto qlcnic_free_vf_info;
160         }
161
162         bc->bc_trans_wq = wq;
163
164         wq = create_singlethread_workqueue("async");
165         if (wq == NULL) {
166                 err = -ENOMEM;
167                 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
168                 goto qlcnic_destroy_trans_wq;
169         }
170
171         bc->bc_async_wq =  wq;
172         INIT_LIST_HEAD(&bc->async_list);
173
174         for (i = 0; i < num_vfs; i++) {
175                 vf = &sriov->vf_info[i];
176                 vf->adapter = adapter;
177                 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
178                 mutex_init(&vf->send_cmd_lock);
179                 mutex_init(&vf->vlan_list_lock);
180                 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
181                 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
182                 spin_lock_init(&vf->rcv_act.lock);
183                 spin_lock_init(&vf->rcv_pend.lock);
184                 init_completion(&vf->ch_free_cmpl);
185
186                 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
187
188                 if (qlcnic_sriov_pf_check(adapter)) {
189                         vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
190                         if (!vp) {
191                                 err = -ENOMEM;
192                                 goto qlcnic_destroy_async_wq;
193                         }
194                         sriov->vf_info[i].vp = vp;
195                         vp->max_tx_bw = MAX_BW;
196                         vp->spoofchk = true;
197                         random_ether_addr(vp->mac);
198                         dev_info(&adapter->pdev->dev,
199                                  "MAC Address %pM is configured for VF %d\n",
200                                  vp->mac, i);
201                 }
202         }
203
204         return 0;
205
206 qlcnic_destroy_async_wq:
207         destroy_workqueue(bc->bc_async_wq);
208
209 qlcnic_destroy_trans_wq:
210         destroy_workqueue(bc->bc_trans_wq);
211
212 qlcnic_free_vf_info:
213         kfree(sriov->vf_info);
214
215 qlcnic_free_sriov:
216         kfree(adapter->ahw->sriov);
217         return err;
218 }
219
220 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
221 {
222         struct qlcnic_bc_trans *trans;
223         struct qlcnic_cmd_args cmd;
224         unsigned long flags;
225
226         spin_lock_irqsave(&t_list->lock, flags);
227
228         while (!list_empty(&t_list->wait_list)) {
229                 trans = list_first_entry(&t_list->wait_list,
230                                          struct qlcnic_bc_trans, list);
231                 list_del(&trans->list);
232                 t_list->count--;
233                 cmd.req.arg = (u32 *)trans->req_pay;
234                 cmd.rsp.arg = (u32 *)trans->rsp_pay;
235                 qlcnic_free_mbx_args(&cmd);
236                 qlcnic_sriov_cleanup_transaction(trans);
237         }
238
239         spin_unlock_irqrestore(&t_list->lock, flags);
240 }
241
242 void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
243 {
244         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
245         struct qlcnic_back_channel *bc = &sriov->bc;
246         struct qlcnic_vf_info *vf;
247         int i;
248
249         if (!qlcnic_sriov_enable_check(adapter))
250                 return;
251
252         qlcnic_sriov_cleanup_async_list(bc);
253         destroy_workqueue(bc->bc_async_wq);
254
255         for (i = 0; i < sriov->num_vfs; i++) {
256                 vf = &sriov->vf_info[i];
257                 qlcnic_sriov_cleanup_list(&vf->rcv_pend);
258                 cancel_work_sync(&vf->trans_work);
259                 qlcnic_sriov_cleanup_list(&vf->rcv_act);
260         }
261
262         destroy_workqueue(bc->bc_trans_wq);
263
264         for (i = 0; i < sriov->num_vfs; i++)
265                 kfree(sriov->vf_info[i].vp);
266
267         kfree(sriov->vf_info);
268         kfree(adapter->ahw->sriov);
269 }
270
271 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
272 {
273         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
274         qlcnic_sriov_cfg_bc_intr(adapter, 0);
275         __qlcnic_sriov_cleanup(adapter);
276 }
277
278 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
279 {
280         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
281                 return;
282
283         qlcnic_sriov_free_vlans(adapter);
284
285         if (qlcnic_sriov_pf_check(adapter))
286                 qlcnic_sriov_pf_cleanup(adapter);
287
288         if (qlcnic_sriov_vf_check(adapter))
289                 qlcnic_sriov_vf_cleanup(adapter);
290 }
291
292 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
293                                     u32 *pay, u8 pci_func, u8 size)
294 {
295         struct qlcnic_hardware_context *ahw = adapter->ahw;
296         struct qlcnic_mailbox *mbx = ahw->mailbox;
297         struct qlcnic_cmd_args cmd;
298         unsigned long timeout;
299         int err;
300
301         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
302         cmd.hdr = hdr;
303         cmd.pay = pay;
304         cmd.pay_size = size;
305         cmd.func_num = pci_func;
306         cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
307         cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
308
309         err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
310         if (err) {
311                 dev_err(&adapter->pdev->dev,
312                         "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
313                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
314                         ahw->op_mode);
315                 return err;
316         }
317
318         if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
319                 dev_err(&adapter->pdev->dev,
320                         "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
321                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
322                         ahw->op_mode);
323                 flush_workqueue(mbx->work_q);
324         }
325
326         return cmd.rsp_opcode;
327 }
328
329 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
330 {
331         adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
332         adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
333         adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
334         adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
335         adapter->num_txd = MAX_CMD_DESCRIPTORS;
336         adapter->max_rds_rings = MAX_RDS_RINGS;
337 }
338
339 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
340                                    struct qlcnic_info *npar_info, u16 vport_id)
341 {
342         struct device *dev = &adapter->pdev->dev;
343         struct qlcnic_cmd_args cmd;
344         int err;
345         u32 status;
346
347         err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
348         if (err)
349                 return err;
350
351         cmd.req.arg[1] = vport_id << 16 | 0x1;
352         err = qlcnic_issue_cmd(adapter, &cmd);
353         if (err) {
354                 dev_err(&adapter->pdev->dev,
355                         "Failed to get vport info, err=%d\n", err);
356                 qlcnic_free_mbx_args(&cmd);
357                 return err;
358         }
359
360         status = cmd.rsp.arg[2] & 0xffff;
361         if (status & BIT_0)
362                 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
363         if (status & BIT_1)
364                 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
365         if (status & BIT_2)
366                 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
367         if (status & BIT_3)
368                 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
369         if (status & BIT_4)
370                 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
371         if (status & BIT_5)
372                 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
373         if (status & BIT_6)
374                 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
375         if (status & BIT_7)
376                 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
377         if (status & BIT_8)
378                 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
379         if (status & BIT_9)
380                 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
381
382         npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
383         npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
384         npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
385         npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
386
387         dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
388                  "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
389                  "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
390                  "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
391                  "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
392                  "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
393                  npar_info->min_tx_bw, npar_info->max_tx_bw,
394                  npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
395                  npar_info->max_rx_mcast_mac_filters,
396                  npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
397                  npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
398                  npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
399                  npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
400                  npar_info->max_remote_ipv6_addrs);
401
402         qlcnic_free_mbx_args(&cmd);
403         return err;
404 }
405
406 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
407                                       struct qlcnic_cmd_args *cmd)
408 {
409         adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
410         adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
411         return 0;
412 }
413
414 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
415                                             struct qlcnic_cmd_args *cmd)
416 {
417         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
418         int i, num_vlans;
419         u16 *vlans;
420
421         if (sriov->allowed_vlans)
422                 return 0;
423
424         sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
425         sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
426         dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
427                  sriov->num_allowed_vlans);
428
429         qlcnic_sriov_alloc_vlans(adapter);
430
431         if (!sriov->any_vlan)
432                 return 0;
433
434         num_vlans = sriov->num_allowed_vlans;
435         sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
436         if (!sriov->allowed_vlans)
437                 return -ENOMEM;
438
439         vlans = (u16 *)&cmd->rsp.arg[3];
440         for (i = 0; i < num_vlans; i++)
441                 sriov->allowed_vlans[i] = vlans[i];
442
443         return 0;
444 }
445
446 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
447                                    struct qlcnic_info *info)
448 {
449         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
450         struct qlcnic_cmd_args cmd;
451         int ret = 0;
452
453         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
454         if (ret)
455                 return ret;
456
457         ret = qlcnic_issue_cmd(adapter, &cmd);
458         if (ret) {
459                 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
460                         ret);
461         } else {
462                 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
463                 switch (sriov->vlan_mode) {
464                 case QLC_GUEST_VLAN_MODE:
465                         ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
466                         break;
467                 case QLC_PVID_MODE:
468                         ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
469                         break;
470                 }
471         }
472
473         qlcnic_free_mbx_args(&cmd);
474         return ret;
475 }
476
477 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
478 {
479         struct qlcnic_hardware_context *ahw = adapter->ahw;
480         struct qlcnic_info nic_info;
481         int err;
482
483         err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
484         if (err)
485                 return err;
486
487         ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
488
489         err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
490         if (err)
491                 return -EIO;
492
493         err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
494         if (err)
495                 return err;
496
497         if (qlcnic_83xx_get_port_info(adapter))
498                 return -EIO;
499
500         qlcnic_sriov_vf_cfg_buff_desc(adapter);
501         adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
502         dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
503                  adapter->ahw->fw_hal_version);
504
505         ahw->physical_port = (u8) nic_info.phys_port;
506         ahw->switch_mode = nic_info.switch_mode;
507         ahw->max_mtu = nic_info.max_mtu;
508         ahw->op_mode = nic_info.op_mode;
509         ahw->capabilities = nic_info.capabilities;
510         return 0;
511 }
512
513 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
514                                  int pci_using_dac)
515 {
516         int err;
517
518         INIT_LIST_HEAD(&adapter->vf_mc_list);
519         if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
520                 dev_warn(&adapter->pdev->dev,
521                          "Device does not support MSI interrupts\n");
522
523         /* compute and set default and max tx/sds rings */
524         qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
525         qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
526
527         err = qlcnic_setup_intr(adapter);
528         if (err) {
529                 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
530                 goto err_out_disable_msi;
531         }
532
533         err = qlcnic_83xx_setup_mbx_intr(adapter);
534         if (err)
535                 goto err_out_disable_msi;
536
537         err = qlcnic_sriov_init(adapter, 1);
538         if (err)
539                 goto err_out_disable_mbx_intr;
540
541         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
542         if (err)
543                 goto err_out_cleanup_sriov;
544
545         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
546         if (err)
547                 goto err_out_disable_bc_intr;
548
549         err = qlcnic_sriov_vf_init_driver(adapter);
550         if (err)
551                 goto err_out_send_channel_term;
552
553         err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
554         if (err)
555                 goto err_out_send_channel_term;
556
557         pci_set_drvdata(adapter->pdev, adapter);
558         dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
559                  adapter->netdev->name);
560
561         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
562                              adapter->ahw->idc.delay);
563         return 0;
564
565 err_out_send_channel_term:
566         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
567
568 err_out_disable_bc_intr:
569         qlcnic_sriov_cfg_bc_intr(adapter, 0);
570
571 err_out_cleanup_sriov:
572         __qlcnic_sriov_cleanup(adapter);
573
574 err_out_disable_mbx_intr:
575         qlcnic_83xx_free_mbx_intr(adapter);
576
577 err_out_disable_msi:
578         qlcnic_teardown_intr(adapter);
579         return err;
580 }
581
582 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
583 {
584         u32 state;
585
586         do {
587                 msleep(20);
588                 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
589                         return -EIO;
590                 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
591         } while (state != QLC_83XX_IDC_DEV_READY);
592
593         return 0;
594 }
595
596 int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
597 {
598         struct qlcnic_hardware_context *ahw = adapter->ahw;
599         int err;
600
601         set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
602         ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
603         ahw->reset_context = 0;
604         adapter->fw_fail_cnt = 0;
605         ahw->msix_supported = 1;
606         adapter->need_fw_reset = 0;
607         adapter->flags |= QLCNIC_TX_INTR_SHARED;
608
609         err = qlcnic_sriov_check_dev_ready(adapter);
610         if (err)
611                 return err;
612
613         err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
614         if (err)
615                 return err;
616
617         if (qlcnic_read_mac_addr(adapter))
618                 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
619
620         INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
621
622         clear_bit(__QLCNIC_RESETTING, &adapter->state);
623         return 0;
624 }
625
626 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
627 {
628         struct qlcnic_hardware_context *ahw = adapter->ahw;
629
630         ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
631         dev_info(&adapter->pdev->dev,
632                  "HAL Version: %d Non Privileged SRIOV function\n",
633                  ahw->fw_hal_version);
634         adapter->nic_ops = &qlcnic_sriov_vf_ops;
635         set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
636         return;
637 }
638
639 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
640 {
641         ahw->hw_ops             = &qlcnic_sriov_vf_hw_ops;
642         ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
643         ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
644 }
645
646 static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
647 {
648         u32 pay_size;
649
650         pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
651
652         if (pay_size)
653                 pay_size = QLC_BC_PAYLOAD_SZ;
654         else
655                 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
656
657         return pay_size;
658 }
659
660 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
661 {
662         struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
663         u8 i;
664
665         if (qlcnic_sriov_vf_check(adapter))
666                 return 0;
667
668         for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
669                 if (vf_info[i].pci_func == pci_func)
670                         return i;
671         }
672
673         return -EINVAL;
674 }
675
676 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
677 {
678         *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
679         if (!*trans)
680                 return -ENOMEM;
681
682         init_completion(&(*trans)->resp_cmpl);
683         return 0;
684 }
685
686 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
687                                             u32 size)
688 {
689         *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
690         if (!*hdr)
691                 return -ENOMEM;
692
693         return 0;
694 }
695
696 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
697 {
698         const struct qlcnic_mailbox_metadata *mbx_tbl;
699         int i, size;
700
701         mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
702         size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
703
704         for (i = 0; i < size; i++) {
705                 if (type == mbx_tbl[i].cmd) {
706                         mbx->op_type = QLC_BC_CMD;
707                         mbx->req.num = mbx_tbl[i].in_args;
708                         mbx->rsp.num = mbx_tbl[i].out_args;
709                         mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
710                                                GFP_ATOMIC);
711                         if (!mbx->req.arg)
712                                 return -ENOMEM;
713                         mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
714                                                GFP_ATOMIC);
715                         if (!mbx->rsp.arg) {
716                                 kfree(mbx->req.arg);
717                                 mbx->req.arg = NULL;
718                                 return -ENOMEM;
719                         }
720                         memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
721                         memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
722                         mbx->req.arg[0] = (type | (mbx->req.num << 16) |
723                                            (3 << 29));
724                         mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
725                         return 0;
726                 }
727         }
728         return -EINVAL;
729 }
730
731 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
732                                        struct qlcnic_cmd_args *cmd,
733                                        u16 seq, u8 msg_type)
734 {
735         struct qlcnic_bc_hdr *hdr;
736         int i;
737         u32 num_regs, bc_pay_sz;
738         u16 remainder;
739         u8 cmd_op, num_frags, t_num_frags;
740
741         bc_pay_sz = QLC_BC_PAYLOAD_SZ;
742         if (msg_type == QLC_BC_COMMAND) {
743                 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
744                 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
745                 num_regs = cmd->req.num;
746                 trans->req_pay_size = (num_regs * 4);
747                 num_regs = cmd->rsp.num;
748                 trans->rsp_pay_size = (num_regs * 4);
749                 cmd_op = cmd->req.arg[0] & 0xff;
750                 remainder = (trans->req_pay_size) % (bc_pay_sz);
751                 num_frags = (trans->req_pay_size) / (bc_pay_sz);
752                 if (remainder)
753                         num_frags++;
754                 t_num_frags = num_frags;
755                 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
756                         return -ENOMEM;
757                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
758                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
759                 if (remainder)
760                         num_frags++;
761                 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
762                         return -ENOMEM;
763                 num_frags  = t_num_frags;
764                 hdr = trans->req_hdr;
765         }  else {
766                 cmd->req.arg = (u32 *)trans->req_pay;
767                 cmd->rsp.arg = (u32 *)trans->rsp_pay;
768                 cmd_op = cmd->req.arg[0] & 0xff;
769                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
770                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
771                 if (remainder)
772                         num_frags++;
773                 cmd->req.num = trans->req_pay_size / 4;
774                 cmd->rsp.num = trans->rsp_pay_size / 4;
775                 hdr = trans->rsp_hdr;
776                 cmd->op_type = trans->req_hdr->op_type;
777         }
778
779         trans->trans_id = seq;
780         trans->cmd_id = cmd_op;
781         for (i = 0; i < num_frags; i++) {
782                 hdr[i].version = 2;
783                 hdr[i].msg_type = msg_type;
784                 hdr[i].op_type = cmd->op_type;
785                 hdr[i].num_cmds = 1;
786                 hdr[i].num_frags = num_frags;
787                 hdr[i].frag_num = i + 1;
788                 hdr[i].cmd_op = cmd_op;
789                 hdr[i].seq_id = seq;
790         }
791         return 0;
792 }
793
794 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
795 {
796         if (!trans)
797                 return;
798         kfree(trans->req_hdr);
799         kfree(trans->rsp_hdr);
800         kfree(trans);
801 }
802
803 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
804                                     struct qlcnic_bc_trans *trans, u8 type)
805 {
806         struct qlcnic_trans_list *t_list;
807         unsigned long flags;
808         int ret = 0;
809
810         if (type == QLC_BC_RESPONSE) {
811                 t_list = &vf->rcv_act;
812                 spin_lock_irqsave(&t_list->lock, flags);
813                 t_list->count--;
814                 list_del(&trans->list);
815                 if (t_list->count > 0)
816                         ret = 1;
817                 spin_unlock_irqrestore(&t_list->lock, flags);
818         }
819         if (type == QLC_BC_COMMAND) {
820                 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
821                         msleep(100);
822                 vf->send_cmd = NULL;
823                 clear_bit(QLC_BC_VF_SEND, &vf->state);
824         }
825         return ret;
826 }
827
828 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
829                                          struct qlcnic_vf_info *vf,
830                                          work_func_t func)
831 {
832         if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
833             vf->adapter->need_fw_reset)
834                 return;
835
836         queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
837 }
838
839 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
840 {
841         struct completion *cmpl = &trans->resp_cmpl;
842
843         if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
844                 trans->trans_state = QLC_END;
845         else
846                 trans->trans_state = QLC_ABORT;
847
848         return;
849 }
850
851 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
852                                             u8 type)
853 {
854         if (type == QLC_BC_RESPONSE) {
855                 trans->curr_rsp_frag++;
856                 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
857                         trans->trans_state = QLC_INIT;
858                 else
859                         trans->trans_state = QLC_END;
860         } else {
861                 trans->curr_req_frag++;
862                 if (trans->curr_req_frag < trans->req_hdr->num_frags)
863                         trans->trans_state = QLC_INIT;
864                 else
865                         trans->trans_state = QLC_WAIT_FOR_RESP;
866         }
867 }
868
869 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
870                                                u8 type)
871 {
872         struct qlcnic_vf_info *vf = trans->vf;
873         struct completion *cmpl = &vf->ch_free_cmpl;
874
875         if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
876                 trans->trans_state = QLC_ABORT;
877                 return;
878         }
879
880         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
881         qlcnic_sriov_handle_multi_frags(trans, type);
882 }
883
884 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
885                                      u32 *hdr, u32 *pay, u32 size)
886 {
887         struct qlcnic_hardware_context *ahw = adapter->ahw;
888         u32 fw_mbx;
889         u8 i, max = 2, hdr_size, j;
890
891         hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
892         max = (size / sizeof(u32)) + hdr_size;
893
894         fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
895         for (i = 2, j = 0; j < hdr_size; i++, j++)
896                 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
897         for (; j < max; i++, j++)
898                 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
899 }
900
901 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
902 {
903         int ret = -EBUSY;
904         u32 timeout = 10000;
905
906         do {
907                 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
908                         ret = 0;
909                         break;
910                 }
911                 mdelay(1);
912         } while (--timeout);
913
914         return ret;
915 }
916
917 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
918 {
919         struct qlcnic_vf_info *vf = trans->vf;
920         u32 pay_size, hdr_size;
921         u32 *hdr, *pay;
922         int ret;
923         u8 pci_func = trans->func_id;
924
925         if (__qlcnic_sriov_issue_bc_post(vf))
926                 return -EBUSY;
927
928         if (type == QLC_BC_COMMAND) {
929                 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
930                 pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
931                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
932                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
933                                                        trans->curr_req_frag);
934                 pay_size = (pay_size / sizeof(u32));
935         } else {
936                 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
937                 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
938                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
939                 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
940                                                        trans->curr_rsp_frag);
941                 pay_size = (pay_size / sizeof(u32));
942         }
943
944         ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
945                                        pci_func, pay_size);
946         return ret;
947 }
948
949 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
950                                       struct qlcnic_vf_info *vf, u8 type)
951 {
952         bool flag = true;
953         int err = -EIO;
954
955         while (flag) {
956                 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
957                     vf->adapter->need_fw_reset)
958                         trans->trans_state = QLC_ABORT;
959
960                 switch (trans->trans_state) {
961                 case QLC_INIT:
962                         trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
963                         if (qlcnic_sriov_issue_bc_post(trans, type))
964                                 trans->trans_state = QLC_ABORT;
965                         break;
966                 case QLC_WAIT_FOR_CHANNEL_FREE:
967                         qlcnic_sriov_wait_for_channel_free(trans, type);
968                         break;
969                 case QLC_WAIT_FOR_RESP:
970                         qlcnic_sriov_wait_for_resp(trans);
971                         break;
972                 case QLC_END:
973                         err = 0;
974                         flag = false;
975                         break;
976                 case QLC_ABORT:
977                         err = -EIO;
978                         flag = false;
979                         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
980                         break;
981                 default:
982                         err = -EIO;
983                         flag = false;
984                 }
985         }
986         return err;
987 }
988
989 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
990                                     struct qlcnic_bc_trans *trans, int pci_func)
991 {
992         struct qlcnic_vf_info *vf;
993         int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
994
995         if (index < 0)
996                 return -EIO;
997
998         vf = &adapter->ahw->sriov->vf_info[index];
999         trans->vf = vf;
1000         trans->func_id = pci_func;
1001
1002         if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1003                 if (qlcnic_sriov_pf_check(adapter))
1004                         return -EIO;
1005                 if (qlcnic_sriov_vf_check(adapter) &&
1006                     trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1007                         return -EIO;
1008         }
1009
1010         mutex_lock(&vf->send_cmd_lock);
1011         vf->send_cmd = trans;
1012         err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1013         qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1014         mutex_unlock(&vf->send_cmd_lock);
1015         return err;
1016 }
1017
1018 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1019                                           struct qlcnic_bc_trans *trans,
1020                                           struct qlcnic_cmd_args *cmd)
1021 {
1022 #ifdef CONFIG_QLCNIC_SRIOV
1023         if (qlcnic_sriov_pf_check(adapter)) {
1024                 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1025                 return;
1026         }
1027 #endif
1028         cmd->rsp.arg[0] |= (0x9 << 25);
1029         return;
1030 }
1031
1032 static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1033 {
1034         struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1035                                                  trans_work);
1036         struct qlcnic_bc_trans *trans = NULL;
1037         struct qlcnic_adapter *adapter  = vf->adapter;
1038         struct qlcnic_cmd_args cmd;
1039         u8 req;
1040
1041         if (adapter->need_fw_reset)
1042                 return;
1043
1044         if (test_bit(QLC_BC_VF_FLR, &vf->state))
1045                 return;
1046
1047         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1048         trans = list_first_entry(&vf->rcv_act.wait_list,
1049                                  struct qlcnic_bc_trans, list);
1050         adapter = vf->adapter;
1051
1052         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1053                                         QLC_BC_RESPONSE))
1054                 goto cleanup_trans;
1055
1056         __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1057         trans->trans_state = QLC_INIT;
1058         __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1059
1060 cleanup_trans:
1061         qlcnic_free_mbx_args(&cmd);
1062         req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1063         qlcnic_sriov_cleanup_transaction(trans);
1064         if (req)
1065                 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1066                                              qlcnic_sriov_process_bc_cmd);
1067 }
1068
1069 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1070                                         struct qlcnic_vf_info *vf)
1071 {
1072         struct qlcnic_bc_trans *trans;
1073         u32 pay_size;
1074
1075         if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1076                 return;
1077
1078         trans = vf->send_cmd;
1079
1080         if (trans == NULL)
1081                 goto clear_send;
1082
1083         if (trans->trans_id != hdr->seq_id)
1084                 goto clear_send;
1085
1086         pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1087                                                trans->curr_rsp_frag);
1088         qlcnic_sriov_pull_bc_msg(vf->adapter,
1089                                  (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1090                                  (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1091                                  pay_size);
1092         if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1093                 goto clear_send;
1094
1095         complete(&trans->resp_cmpl);
1096
1097 clear_send:
1098         clear_bit(QLC_BC_VF_SEND, &vf->state);
1099 }
1100
1101 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1102                                 struct qlcnic_vf_info *vf,
1103                                 struct qlcnic_bc_trans *trans)
1104 {
1105         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1106
1107         t_list->count++;
1108         list_add_tail(&trans->list, &t_list->wait_list);
1109         if (t_list->count == 1)
1110                 qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1111                                              qlcnic_sriov_process_bc_cmd);
1112         return 0;
1113 }
1114
1115 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1116                                      struct qlcnic_vf_info *vf,
1117                                      struct qlcnic_bc_trans *trans)
1118 {
1119         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1120
1121         spin_lock(&t_list->lock);
1122
1123         __qlcnic_sriov_add_act_list(sriov, vf, trans);
1124
1125         spin_unlock(&t_list->lock);
1126         return 0;
1127 }
1128
1129 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1130                                               struct qlcnic_vf_info *vf,
1131                                               struct qlcnic_bc_hdr *hdr)
1132 {
1133         struct qlcnic_bc_trans *trans = NULL;
1134         struct list_head *node;
1135         u32 pay_size, curr_frag;
1136         u8 found = 0, active = 0;
1137
1138         spin_lock(&vf->rcv_pend.lock);
1139         if (vf->rcv_pend.count > 0) {
1140                 list_for_each(node, &vf->rcv_pend.wait_list) {
1141                         trans = list_entry(node, struct qlcnic_bc_trans, list);
1142                         if (trans->trans_id == hdr->seq_id) {
1143                                 found = 1;
1144                                 break;
1145                         }
1146                 }
1147         }
1148
1149         if (found) {
1150                 curr_frag = trans->curr_req_frag;
1151                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1152                                                        curr_frag);
1153                 qlcnic_sriov_pull_bc_msg(vf->adapter,
1154                                          (u32 *)(trans->req_hdr + curr_frag),
1155                                          (u32 *)(trans->req_pay + curr_frag),
1156                                          pay_size);
1157                 trans->curr_req_frag++;
1158                 if (trans->curr_req_frag >= hdr->num_frags) {
1159                         vf->rcv_pend.count--;
1160                         list_del(&trans->list);
1161                         active = 1;
1162                 }
1163         }
1164         spin_unlock(&vf->rcv_pend.lock);
1165
1166         if (active)
1167                 if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1168                         qlcnic_sriov_cleanup_transaction(trans);
1169
1170         return;
1171 }
1172
1173 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1174                                        struct qlcnic_bc_hdr *hdr,
1175                                        struct qlcnic_vf_info *vf)
1176 {
1177         struct qlcnic_bc_trans *trans;
1178         struct qlcnic_adapter *adapter = vf->adapter;
1179         struct qlcnic_cmd_args cmd;
1180         u32 pay_size;
1181         int err;
1182         u8 cmd_op;
1183
1184         if (adapter->need_fw_reset)
1185                 return;
1186
1187         if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1188             hdr->op_type != QLC_BC_CMD &&
1189             hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1190                 return;
1191
1192         if (hdr->frag_num > 1) {
1193                 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1194                 return;
1195         }
1196
1197         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1198         cmd_op = hdr->cmd_op;
1199         if (qlcnic_sriov_alloc_bc_trans(&trans))
1200                 return;
1201
1202         if (hdr->op_type == QLC_BC_CMD)
1203                 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1204         else
1205                 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1206
1207         if (err) {
1208                 qlcnic_sriov_cleanup_transaction(trans);
1209                 return;
1210         }
1211
1212         cmd.op_type = hdr->op_type;
1213         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1214                                         QLC_BC_COMMAND)) {
1215                 qlcnic_free_mbx_args(&cmd);
1216                 qlcnic_sriov_cleanup_transaction(trans);
1217                 return;
1218         }
1219
1220         pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1221                                          trans->curr_req_frag);
1222         qlcnic_sriov_pull_bc_msg(vf->adapter,
1223                                  (u32 *)(trans->req_hdr + trans->curr_req_frag),
1224                                  (u32 *)(trans->req_pay + trans->curr_req_frag),
1225                                  pay_size);
1226         trans->func_id = vf->pci_func;
1227         trans->vf = vf;
1228         trans->trans_id = hdr->seq_id;
1229         trans->curr_req_frag++;
1230
1231         if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1232                 return;
1233
1234         if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1235                 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1236                         qlcnic_free_mbx_args(&cmd);
1237                         qlcnic_sriov_cleanup_transaction(trans);
1238                 }
1239         } else {
1240                 spin_lock(&vf->rcv_pend.lock);
1241                 list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1242                 vf->rcv_pend.count++;
1243                 spin_unlock(&vf->rcv_pend.lock);
1244         }
1245 }
1246
1247 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1248                                           struct qlcnic_vf_info *vf)
1249 {
1250         struct qlcnic_bc_hdr hdr;
1251         u32 *ptr = (u32 *)&hdr;
1252         u8 msg_type, i;
1253
1254         for (i = 2; i < 6; i++)
1255                 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1256         msg_type = hdr.msg_type;
1257
1258         switch (msg_type) {
1259         case QLC_BC_COMMAND:
1260                 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1261                 break;
1262         case QLC_BC_RESPONSE:
1263                 qlcnic_sriov_handle_bc_resp(&hdr, vf);
1264                 break;
1265         }
1266 }
1267
1268 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1269                                           struct qlcnic_vf_info *vf)
1270 {
1271         struct qlcnic_adapter *adapter = vf->adapter;
1272
1273         if (qlcnic_sriov_pf_check(adapter))
1274                 qlcnic_sriov_pf_handle_flr(sriov, vf);
1275         else
1276                 dev_err(&adapter->pdev->dev,
1277                         "Invalid event to VF. VF should not get FLR event\n");
1278 }
1279
1280 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1281 {
1282         struct qlcnic_vf_info *vf;
1283         struct qlcnic_sriov *sriov;
1284         int index;
1285         u8 pci_func;
1286
1287         sriov = adapter->ahw->sriov;
1288         pci_func = qlcnic_sriov_target_func_id(event);
1289         index = qlcnic_sriov_func_to_index(adapter, pci_func);
1290
1291         if (index < 0)
1292                 return;
1293
1294         vf = &sriov->vf_info[index];
1295         vf->pci_func = pci_func;
1296
1297         if (qlcnic_sriov_channel_free_check(event))
1298                 complete(&vf->ch_free_cmpl);
1299
1300         if (qlcnic_sriov_flr_check(event)) {
1301                 qlcnic_sriov_handle_flr_event(sriov, vf);
1302                 return;
1303         }
1304
1305         if (qlcnic_sriov_bc_msg_check(event))
1306                 qlcnic_sriov_handle_msg_event(sriov, vf);
1307 }
1308
1309 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1310 {
1311         struct qlcnic_cmd_args cmd;
1312         int err;
1313
1314         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1315                 return 0;
1316
1317         if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1318                 return -ENOMEM;
1319
1320         if (enable)
1321                 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1322
1323         err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1324
1325         if (err != QLCNIC_RCODE_SUCCESS) {
1326                 dev_err(&adapter->pdev->dev,
1327                         "Failed to %s bc events, err=%d\n",
1328                         (enable ? "enable" : "disable"), err);
1329         }
1330
1331         qlcnic_free_mbx_args(&cmd);
1332         return err;
1333 }
1334
1335 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1336                                      struct qlcnic_bc_trans *trans)
1337 {
1338         u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1339         u32 state;
1340
1341         state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1342         if (state == QLC_83XX_IDC_DEV_READY) {
1343                 msleep(20);
1344                 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1345                 trans->trans_state = QLC_INIT;
1346                 if (++adapter->fw_fail_cnt > max)
1347                         return -EIO;
1348                 else
1349                         return 0;
1350         }
1351
1352         return -EIO;
1353 }
1354
1355 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1356                                   struct qlcnic_cmd_args *cmd)
1357 {
1358         struct qlcnic_hardware_context *ahw = adapter->ahw;
1359         struct qlcnic_mailbox *mbx = ahw->mailbox;
1360         struct device *dev = &adapter->pdev->dev;
1361         struct qlcnic_bc_trans *trans;
1362         int err;
1363         u32 rsp_data, opcode, mbx_err_code, rsp;
1364         u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1365         u8 func = ahw->pci_func;
1366
1367         rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1368         if (rsp)
1369                 return rsp;
1370
1371         rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1372         if (rsp)
1373                 goto cleanup_transaction;
1374
1375 retry:
1376         if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1377                 rsp = -EIO;
1378                 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1379                       QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1380                 goto err_out;
1381         }
1382
1383         err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1384         if (err) {
1385                 dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1386                         (cmd->req.arg[0] & 0xffff), func);
1387                 rsp = QLCNIC_RCODE_TIMEOUT;
1388
1389                 /* After adapter reset PF driver may take some time to
1390                  * respond to VF's request. Retry request till maximum retries.
1391                  */
1392                 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1393                     !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1394                         goto retry;
1395
1396                 goto err_out;
1397         }
1398
1399         rsp_data = cmd->rsp.arg[0];
1400         mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1401         opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1402
1403         if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1404             (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1405                 rsp = QLCNIC_RCODE_SUCCESS;
1406         } else {
1407                 rsp = mbx_err_code;
1408                 if (!rsp)
1409                         rsp = 1;
1410                 dev_err(dev,
1411                         "MBX command 0x%x failed with err:0x%x for VF %d\n",
1412                         opcode, mbx_err_code, func);
1413         }
1414
1415 err_out:
1416         if (rsp == QLCNIC_RCODE_TIMEOUT) {
1417                 ahw->reset_context = 1;
1418                 adapter->need_fw_reset = 1;
1419                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1420         }
1421
1422 cleanup_transaction:
1423         qlcnic_sriov_cleanup_transaction(trans);
1424         return rsp;
1425 }
1426
1427 int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1428 {
1429         struct qlcnic_cmd_args cmd;
1430         struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1431         int ret;
1432
1433         if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1434                 return -ENOMEM;
1435
1436         ret = qlcnic_issue_cmd(adapter, &cmd);
1437         if (ret) {
1438                 dev_err(&adapter->pdev->dev,
1439                         "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1440                         ret);
1441                 goto out;
1442         }
1443
1444         cmd_op = (cmd.rsp.arg[0] & 0xff);
1445         if (cmd.rsp.arg[0] >> 25 == 2)
1446                 return 2;
1447         if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1448                 set_bit(QLC_BC_VF_STATE, &vf->state);
1449         else
1450                 clear_bit(QLC_BC_VF_STATE, &vf->state);
1451
1452 out:
1453         qlcnic_free_mbx_args(&cmd);
1454         return ret;
1455 }
1456
1457 static void qlcnic_vf_add_mc_list(struct net_device *netdev)
1458 {
1459         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1460         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1461         struct qlcnic_mac_vlan_list *cur;
1462         struct list_head *head, tmp_list;
1463         struct qlcnic_vf_info *vf;
1464         u16 vlan_id;
1465         int i;
1466
1467         static const u8 bcast_addr[ETH_ALEN] = {
1468                 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1469         };
1470
1471         vf = &adapter->ahw->sriov->vf_info[0];
1472         INIT_LIST_HEAD(&tmp_list);
1473         head = &adapter->vf_mc_list;
1474         netif_addr_lock_bh(netdev);
1475
1476         while (!list_empty(head)) {
1477                 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
1478                 list_move(&cur->list, &tmp_list);
1479         }
1480
1481         netif_addr_unlock_bh(netdev);
1482
1483         while (!list_empty(&tmp_list)) {
1484                 cur = list_entry((&tmp_list)->next,
1485                                  struct qlcnic_mac_vlan_list, list);
1486                 if (!qlcnic_sriov_check_any_vlan(vf)) {
1487                         qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1488                         qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1489                 } else {
1490                         mutex_lock(&vf->vlan_list_lock);
1491                         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1492                                 vlan_id = vf->sriov_vlans[i];
1493                                 if (vlan_id) {
1494                                         qlcnic_nic_add_mac(adapter, bcast_addr,
1495                                                            vlan_id);
1496                                         qlcnic_nic_add_mac(adapter,
1497                                                            cur->mac_addr,
1498                                                            vlan_id);
1499                                 }
1500                         }
1501                         mutex_unlock(&vf->vlan_list_lock);
1502                         if (qlcnic_84xx_check(adapter)) {
1503                                 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1504                                 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1505                         }
1506                 }
1507                 list_del(&cur->list);
1508                 kfree(cur);
1509         }
1510 }
1511
1512 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1513 {
1514         struct list_head *head = &bc->async_list;
1515         struct qlcnic_async_work_list *entry;
1516
1517         while (!list_empty(head)) {
1518                 entry = list_entry(head->next, struct qlcnic_async_work_list,
1519                                    list);
1520                 cancel_work_sync(&entry->work);
1521                 list_del(&entry->list);
1522                 kfree(entry);
1523         }
1524 }
1525
1526 static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1527 {
1528         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1529         struct qlcnic_hardware_context *ahw = adapter->ahw;
1530         u32 mode = VPORT_MISS_MODE_DROP;
1531
1532         if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1533                 return;
1534
1535         if (netdev->flags & IFF_PROMISC) {
1536                 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
1537                         mode = VPORT_MISS_MODE_ACCEPT_ALL;
1538         } else if ((netdev->flags & IFF_ALLMULTI) ||
1539                    (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1540                 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1541         }
1542
1543         if (qlcnic_sriov_vf_check(adapter))
1544                 qlcnic_vf_add_mc_list(netdev);
1545
1546         qlcnic_nic_set_promisc(adapter, mode);
1547 }
1548
1549 static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
1550 {
1551         struct qlcnic_async_work_list *entry;
1552         struct net_device *netdev;
1553
1554         entry = container_of(work, struct qlcnic_async_work_list, work);
1555         netdev = (struct net_device *)entry->ptr;
1556
1557         qlcnic_sriov_vf_set_multi(netdev);
1558         return;
1559 }
1560
1561 static struct qlcnic_async_work_list *
1562 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1563 {
1564         struct list_head *node;
1565         struct qlcnic_async_work_list *entry = NULL;
1566         u8 empty = 0;
1567
1568         list_for_each(node, &bc->async_list) {
1569                 entry = list_entry(node, struct qlcnic_async_work_list, list);
1570                 if (!work_pending(&entry->work)) {
1571                         empty = 1;
1572                         break;
1573                 }
1574         }
1575
1576         if (!empty) {
1577                 entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1578                                 GFP_ATOMIC);
1579                 if (entry == NULL)
1580                         return NULL;
1581                 list_add_tail(&entry->list, &bc->async_list);
1582         }
1583
1584         return entry;
1585 }
1586
1587 static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1588                                                 work_func_t func, void *data)
1589 {
1590         struct qlcnic_async_work_list *entry = NULL;
1591
1592         entry = qlcnic_sriov_get_free_node_async_work(bc);
1593         if (!entry)
1594                 return;
1595
1596         entry->ptr = data;
1597         INIT_WORK(&entry->work, func);
1598         queue_work(bc->bc_async_wq, &entry->work);
1599 }
1600
1601 void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
1602 {
1603
1604         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1605         struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1606
1607         if (adapter->need_fw_reset)
1608                 return;
1609
1610         qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
1611                                             netdev);
1612 }
1613
1614 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1615 {
1616         int err;
1617
1618         adapter->need_fw_reset = 0;
1619         qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1620         qlcnic_83xx_enable_mbx_interrupt(adapter);
1621
1622         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1623         if (err)
1624                 return err;
1625
1626         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1627         if (err)
1628                 goto err_out_cleanup_bc_intr;
1629
1630         err = qlcnic_sriov_vf_init_driver(adapter);
1631         if (err)
1632                 goto err_out_term_channel;
1633
1634         return 0;
1635
1636 err_out_term_channel:
1637         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1638
1639 err_out_cleanup_bc_intr:
1640         qlcnic_sriov_cfg_bc_intr(adapter, 0);
1641         return err;
1642 }
1643
1644 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1645 {
1646         struct net_device *netdev = adapter->netdev;
1647
1648         if (netif_running(netdev)) {
1649                 if (!qlcnic_up(adapter, netdev))
1650                         qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1651         }
1652
1653         netif_device_attach(netdev);
1654 }
1655
1656 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1657 {
1658         struct qlcnic_hardware_context *ahw = adapter->ahw;
1659         struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1660         struct net_device *netdev = adapter->netdev;
1661         u8 i, max_ints = ahw->num_msix - 1;
1662
1663         netif_device_detach(netdev);
1664         qlcnic_83xx_detach_mailbox_work(adapter);
1665         qlcnic_83xx_disable_mbx_intr(adapter);
1666
1667         if (netif_running(netdev))
1668                 qlcnic_down(adapter, netdev);
1669
1670         for (i = 0; i < max_ints; i++) {
1671                 intr_tbl[i].id = i;
1672                 intr_tbl[i].enabled = 0;
1673                 intr_tbl[i].src = 0;
1674         }
1675         ahw->reset_context = 0;
1676 }
1677
1678 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1679 {
1680         struct qlcnic_hardware_context *ahw = adapter->ahw;
1681         struct device *dev = &adapter->pdev->dev;
1682         struct qlc_83xx_idc *idc = &ahw->idc;
1683         u8 func = ahw->pci_func;
1684         u32 state;
1685
1686         if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1687             (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1688                 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1689                         qlcnic_sriov_vf_attach(adapter);
1690                         adapter->fw_fail_cnt = 0;
1691                         dev_info(dev,
1692                                  "%s: Reinitialization of VF 0x%x done after FW reset\n",
1693                                  __func__, func);
1694                 } else {
1695                         dev_err(dev,
1696                                 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1697                                 __func__, func);
1698                         state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1699                         dev_info(dev, "Current state 0x%x after FW reset\n",
1700                                  state);
1701                 }
1702         }
1703
1704         return 0;
1705 }
1706
1707 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1708 {
1709         struct qlcnic_hardware_context *ahw = adapter->ahw;
1710         struct qlcnic_mailbox *mbx = ahw->mailbox;
1711         struct device *dev = &adapter->pdev->dev;
1712         struct qlc_83xx_idc *idc = &ahw->idc;
1713         u8 func = ahw->pci_func;
1714         u32 state;
1715
1716         adapter->reset_ctx_cnt++;
1717
1718         /* Skip the context reset and check if FW is hung */
1719         if (adapter->reset_ctx_cnt < 3) {
1720                 adapter->need_fw_reset = 1;
1721                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1722                 dev_info(dev,
1723                          "Resetting context, wait here to check if FW is in failed state\n");
1724                 return 0;
1725         }
1726
1727         /* Check if number of resets exceed the threshold.
1728          * If it exceeds the threshold just fail the VF.
1729          */
1730         if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1731                 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1732                 adapter->tx_timeo_cnt = 0;
1733                 adapter->fw_fail_cnt = 0;
1734                 adapter->reset_ctx_cnt = 0;
1735                 qlcnic_sriov_vf_detach(adapter);
1736                 dev_err(dev,
1737                         "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1738                 return -EIO;
1739         }
1740
1741         dev_info(dev, "Resetting context of VF 0x%x\n", func);
1742         dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1743                  __func__, adapter->reset_ctx_cnt, func);
1744         set_bit(__QLCNIC_RESETTING, &adapter->state);
1745         adapter->need_fw_reset = 1;
1746         clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1747         qlcnic_sriov_vf_detach(adapter);
1748         adapter->need_fw_reset = 0;
1749
1750         if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1751                 qlcnic_sriov_vf_attach(adapter);
1752                 adapter->tx_timeo_cnt = 0;
1753                 adapter->reset_ctx_cnt = 0;
1754                 adapter->fw_fail_cnt = 0;
1755                 dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1756         } else {
1757                 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1758                         __func__, func);
1759                 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1760                 dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1761         }
1762
1763         return 0;
1764 }
1765
1766 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1767 {
1768         struct qlcnic_hardware_context *ahw = adapter->ahw;
1769         int ret = 0;
1770
1771         if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1772                 ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1773         else if (ahw->reset_context)
1774                 ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1775
1776         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1777         return ret;
1778 }
1779
1780 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1781 {
1782         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1783
1784         dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1785         if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1786                 qlcnic_sriov_vf_detach(adapter);
1787
1788         clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1789         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1790         return -EIO;
1791 }
1792
1793 static int
1794 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1795 {
1796         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1797         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1798
1799         dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1800         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1801                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1802                 adapter->tx_timeo_cnt = 0;
1803                 adapter->reset_ctx_cnt = 0;
1804                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1805                 qlcnic_sriov_vf_detach(adapter);
1806         }
1807
1808         return 0;
1809 }
1810
1811 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1812 {
1813         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1814         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1815         u8 func = adapter->ahw->pci_func;
1816
1817         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1818                 dev_err(&adapter->pdev->dev,
1819                         "Firmware hang detected by VF 0x%x\n", func);
1820                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1821                 adapter->tx_timeo_cnt = 0;
1822                 adapter->reset_ctx_cnt = 0;
1823                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1824                 qlcnic_sriov_vf_detach(adapter);
1825         }
1826         return 0;
1827 }
1828
1829 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1830 {
1831         dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1832         return 0;
1833 }
1834
1835 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1836 {
1837         struct qlcnic_adapter *adapter;
1838         struct qlc_83xx_idc *idc;
1839         int ret = 0;
1840
1841         adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1842         idc = &adapter->ahw->idc;
1843         idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1844
1845         switch (idc->curr_state) {
1846         case QLC_83XX_IDC_DEV_READY:
1847                 ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1848                 break;
1849         case QLC_83XX_IDC_DEV_NEED_RESET:
1850         case QLC_83XX_IDC_DEV_INIT:
1851                 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1852                 break;
1853         case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1854                 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1855                 break;
1856         case QLC_83XX_IDC_DEV_FAILED:
1857                 ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1858                 break;
1859         case QLC_83XX_IDC_DEV_QUISCENT:
1860                 break;
1861         default:
1862                 ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1863         }
1864
1865         idc->prev_state = idc->curr_state;
1866         if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1867                 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1868                                      idc->delay);
1869 }
1870
1871 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1872 {
1873         while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1874                 msleep(20);
1875
1876         clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1877         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1878         cancel_delayed_work_sync(&adapter->fw_work);
1879 }
1880
1881 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1882                                       struct qlcnic_vf_info *vf, u16 vlan_id)
1883 {
1884         int i, err = -EINVAL;
1885
1886         if (!vf->sriov_vlans)
1887                 return err;
1888
1889         mutex_lock(&vf->vlan_list_lock);
1890
1891         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1892                 if (vf->sriov_vlans[i] == vlan_id) {
1893                         err = 0;
1894                         break;
1895                 }
1896         }
1897
1898         mutex_unlock(&vf->vlan_list_lock);
1899         return err;
1900 }
1901
1902 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1903                                            struct qlcnic_vf_info *vf)
1904 {
1905         int err = 0;
1906
1907         mutex_lock(&vf->vlan_list_lock);
1908
1909         if (vf->num_vlan >= sriov->num_allowed_vlans)
1910                 err = -EINVAL;
1911
1912         mutex_unlock(&vf->vlan_list_lock);
1913         return err;
1914 }
1915
1916 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
1917                                           u16 vid, u8 enable)
1918 {
1919         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1920         struct qlcnic_vf_info *vf;
1921         bool vlan_exist;
1922         u8 allowed = 0;
1923         int i;
1924
1925         vf = &adapter->ahw->sriov->vf_info[0];
1926         vlan_exist = qlcnic_sriov_check_any_vlan(vf);
1927         if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1928                 return -EINVAL;
1929
1930         if (enable) {
1931                 if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
1932                         return -EINVAL;
1933
1934                 if (qlcnic_sriov_validate_num_vlans(sriov, vf))
1935                         return -EINVAL;
1936
1937                 if (sriov->any_vlan) {
1938                         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1939                                 if (sriov->allowed_vlans[i] == vid)
1940                                         allowed = 1;
1941                         }
1942
1943                         if (!allowed)
1944                                 return -EINVAL;
1945                 }
1946         } else {
1947                 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
1948                         return -EINVAL;
1949         }
1950
1951         return 0;
1952 }
1953
1954 static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1955                                         enum qlcnic_vlan_operations opcode)
1956 {
1957         struct qlcnic_adapter *adapter = vf->adapter;
1958         struct qlcnic_sriov *sriov;
1959
1960         sriov = adapter->ahw->sriov;
1961
1962         if (!vf->sriov_vlans)
1963                 return;
1964
1965         mutex_lock(&vf->vlan_list_lock);
1966
1967         switch (opcode) {
1968         case QLC_VLAN_ADD:
1969                 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
1970                 break;
1971         case QLC_VLAN_DELETE:
1972                 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
1973                 break;
1974         default:
1975                 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
1976         }
1977
1978         mutex_unlock(&vf->vlan_list_lock);
1979         return;
1980 }
1981
1982 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1983                                    u16 vid, u8 enable)
1984 {
1985         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1986         struct qlcnic_vf_info *vf;
1987         struct qlcnic_cmd_args cmd;
1988         int ret;
1989
1990         if (vid == 0)
1991                 return 0;
1992
1993         vf = &adapter->ahw->sriov->vf_info[0];
1994         ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
1995         if (ret)
1996                 return ret;
1997
1998         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
1999                                              QLCNIC_BC_CMD_CFG_GUEST_VLAN);
2000         if (ret)
2001                 return ret;
2002
2003         cmd.req.arg[1] = (enable & 1) | vid << 16;
2004
2005         qlcnic_sriov_cleanup_async_list(&sriov->bc);
2006         ret = qlcnic_issue_cmd(adapter, &cmd);
2007         if (ret) {
2008                 dev_err(&adapter->pdev->dev,
2009                         "Failed to configure guest VLAN, err=%d\n", ret);
2010         } else {
2011                 qlcnic_free_mac_list(adapter);
2012
2013                 if (enable)
2014                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2015                 else
2016                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2017
2018                 qlcnic_set_multi(adapter->netdev);
2019         }
2020
2021         qlcnic_free_mbx_args(&cmd);
2022         return ret;
2023 }
2024
2025 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
2026 {
2027         struct list_head *head = &adapter->mac_list;
2028         struct qlcnic_mac_vlan_list *cur;
2029
2030         while (!list_empty(head)) {
2031                 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
2032                 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
2033                                           QLCNIC_MAC_DEL);
2034                 list_del(&cur->list);
2035                 kfree(cur);
2036         }
2037 }
2038
2039
2040 int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
2041 {
2042         struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2043         struct net_device *netdev = adapter->netdev;
2044         int retval;
2045
2046         netif_device_detach(netdev);
2047         qlcnic_cancel_idc_work(adapter);
2048
2049         if (netif_running(netdev))
2050                 qlcnic_down(adapter, netdev);
2051
2052         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
2053         qlcnic_sriov_cfg_bc_intr(adapter, 0);
2054         qlcnic_83xx_disable_mbx_intr(adapter);
2055         cancel_delayed_work_sync(&adapter->idc_aen_work);
2056
2057         retval = pci_save_state(pdev);
2058         if (retval)
2059                 return retval;
2060
2061         return 0;
2062 }
2063
2064 int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
2065 {
2066         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
2067         struct net_device *netdev = adapter->netdev;
2068         int err;
2069
2070         set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
2071         qlcnic_83xx_enable_mbx_interrupt(adapter);
2072         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
2073         if (err)
2074                 return err;
2075
2076         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
2077         if (!err) {
2078                 if (netif_running(netdev)) {
2079                         err = qlcnic_up(adapter, netdev);
2080                         if (!err)
2081                                 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2082                 }
2083         }
2084
2085         netif_device_attach(netdev);
2086         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2087                              idc->delay);
2088         return err;
2089 }
2090
2091 void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
2092 {
2093         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2094         struct qlcnic_vf_info *vf;
2095         int i;
2096
2097         for (i = 0; i < sriov->num_vfs; i++) {
2098                 vf = &sriov->vf_info[i];
2099                 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
2100                                           sizeof(*vf->sriov_vlans), GFP_KERNEL);
2101         }
2102 }
2103
2104 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
2105 {
2106         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2107         struct qlcnic_vf_info *vf;
2108         int i;
2109
2110         for (i = 0; i < sriov->num_vfs; i++) {
2111                 vf = &sriov->vf_info[i];
2112                 kfree(vf->sriov_vlans);
2113                 vf->sriov_vlans = NULL;
2114         }
2115 }
2116
2117 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
2118                               struct qlcnic_vf_info *vf, u16 vlan_id)
2119 {
2120         int i;
2121
2122         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2123                 if (!vf->sriov_vlans[i]) {
2124                         vf->sriov_vlans[i] = vlan_id;
2125                         vf->num_vlan++;
2126                         return;
2127                 }
2128         }
2129 }
2130
2131 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
2132                               struct qlcnic_vf_info *vf, u16 vlan_id)
2133 {
2134         int i;
2135
2136         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2137                 if (vf->sriov_vlans[i] == vlan_id) {
2138                         vf->sriov_vlans[i] = 0;
2139                         vf->num_vlan--;
2140                         return;
2141                 }
2142         }
2143 }
2144
2145 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2146 {
2147         bool err = false;
2148
2149         mutex_lock(&vf->vlan_list_lock);
2150
2151         if (vf->num_vlan)
2152                 err = true;
2153
2154         mutex_unlock(&vf->vlan_list_lock);
2155         return err;
2156 }