2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/log2.h>
17 #include <linux/prefetch.h>
18 #include <linux/irq.h>
22 #include "nicvf_queues.h"
23 #include "thunder_bgx.h"
25 #define DRV_NAME "thunder-nicvf"
26 #define DRV_VERSION "1.0"
28 /* Supported devices */
29 static const struct pci_device_id nicvf_id_table[] = {
30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
31 PCI_DEVICE_ID_THUNDER_NIC_VF,
33 PCI_SUBSYS_DEVID_88XX_NIC_VF) },
34 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
35 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
37 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
38 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
39 PCI_DEVICE_ID_THUNDER_NIC_VF,
41 PCI_SUBSYS_DEVID_81XX_NIC_VF) },
42 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
43 PCI_DEVICE_ID_THUNDER_NIC_VF,
45 PCI_SUBSYS_DEVID_83XX_NIC_VF) },
46 { 0, } /* end of table */
49 MODULE_AUTHOR("Sunil Goutham");
50 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
51 MODULE_LICENSE("GPL v2");
52 MODULE_VERSION(DRV_VERSION);
53 MODULE_DEVICE_TABLE(pci, nicvf_id_table);
55 static int debug = 0x00;
56 module_param(debug, int, 0644);
57 MODULE_PARM_DESC(debug, "Debug message level bitmap");
59 static int cpi_alg = CPI_ALG_NONE;
60 module_param(cpi_alg, int, S_IRUGO);
61 MODULE_PARM_DESC(cpi_alg,
62 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
64 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
67 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
72 /* The Cavium ThunderX network controller can *only* be found in SoCs
73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
74 * registers on this platform are implicitly strongly ordered with respect
75 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
76 * with no memory barriers in this driver. The readq()/writeq() functions add
77 * explicit ordering operation which in this case are redundant, and only
81 /* Register read/write APIs */
82 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
84 writeq_relaxed(val, nic->reg_base + offset);
87 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
89 return readq_relaxed(nic->reg_base + offset);
92 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
95 void __iomem *addr = nic->reg_base + offset;
97 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
100 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
102 void __iomem *addr = nic->reg_base + offset;
104 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
107 /* VF -> PF mailbox communication */
108 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
110 u64 *msg = (u64 *)mbx;
112 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
113 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
116 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
118 int timeout = NIC_MBOX_MSG_TIMEOUT;
121 nic->pf_acked = false;
122 nic->pf_nacked = false;
124 nicvf_write_to_mbx(nic, mbx);
126 /* Wait for previous message to be acked, timeout 2sec */
127 while (!nic->pf_acked) {
128 if (nic->pf_nacked) {
129 netdev_err(nic->netdev,
130 "PF NACK to mbox msg 0x%02x from VF%d\n",
131 (mbx->msg.msg & 0xFF), nic->vf_id);
139 netdev_err(nic->netdev,
140 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
141 (mbx->msg.msg & 0xFF), nic->vf_id);
148 /* Checks if VF is able to comminicate with PF
149 * and also gets the VNIC number this VF is associated to.
151 static int nicvf_check_pf_ready(struct nicvf *nic)
153 union nic_mbx mbx = {};
155 mbx.msg.msg = NIC_MBOX_MSG_READY;
156 if (nicvf_send_msg_to_pf(nic, &mbx)) {
157 netdev_err(nic->netdev,
158 "PF didn't respond to READY msg\n");
165 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
168 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
170 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
173 static void nicvf_handle_mbx_intr(struct nicvf *nic)
175 union nic_mbx mbx = {};
180 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
181 mbx_data = (u64 *)&mbx;
183 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
184 *mbx_data = nicvf_reg_read(nic, mbx_addr);
186 mbx_addr += sizeof(u64);
189 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
190 switch (mbx.msg.msg) {
191 case NIC_MBOX_MSG_READY:
192 nic->pf_acked = true;
193 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
194 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
195 nic->node = mbx.nic_cfg.node_id;
196 if (!nic->set_mac_pending)
197 ether_addr_copy(nic->netdev->dev_addr,
198 mbx.nic_cfg.mac_addr);
199 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
200 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
201 nic->link_up = false;
205 case NIC_MBOX_MSG_ACK:
206 nic->pf_acked = true;
208 case NIC_MBOX_MSG_NACK:
209 nic->pf_nacked = true;
211 case NIC_MBOX_MSG_RSS_SIZE:
212 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
213 nic->pf_acked = true;
215 case NIC_MBOX_MSG_BGX_STATS:
216 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
217 nic->pf_acked = true;
219 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
220 nic->pf_acked = true;
221 nic->link_up = mbx.link_status.link_up;
222 nic->duplex = mbx.link_status.duplex;
223 nic->speed = mbx.link_status.speed;
224 nic->mac_type = mbx.link_status.mac_type;
226 netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
227 nic->netdev->name, nic->speed,
228 nic->duplex == DUPLEX_FULL ?
229 "Full duplex" : "Half duplex");
230 netif_carrier_on(nic->netdev);
231 netif_tx_start_all_queues(nic->netdev);
233 netdev_info(nic->netdev, "%s: Link is Down\n",
235 netif_carrier_off(nic->netdev);
236 netif_tx_stop_all_queues(nic->netdev);
239 case NIC_MBOX_MSG_ALLOC_SQS:
240 nic->sqs_count = mbx.sqs_alloc.qs_count;
241 nic->pf_acked = true;
243 case NIC_MBOX_MSG_SNICVF_PTR:
244 /* Primary VF: make note of secondary VF's pointer
245 * to be used while packet transmission.
247 nic->snicvf[mbx.nicvf.sqs_id] =
248 (struct nicvf *)mbx.nicvf.nicvf;
249 nic->pf_acked = true;
251 case NIC_MBOX_MSG_PNICVF_PTR:
252 /* Secondary VF/Qset: make note of primary VF's pointer
253 * to be used while packet reception, to handover packet
254 * to primary VF's netdev.
256 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
257 nic->pf_acked = true;
259 case NIC_MBOX_MSG_PFC:
260 nic->pfc.autoneg = mbx.pfc.autoneg;
261 nic->pfc.fc_rx = mbx.pfc.fc_rx;
262 nic->pfc.fc_tx = mbx.pfc.fc_tx;
263 nic->pf_acked = true;
266 netdev_err(nic->netdev,
267 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
270 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
273 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
275 union nic_mbx mbx = {};
277 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
278 mbx.mac.vf_id = nic->vf_id;
279 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
281 return nicvf_send_msg_to_pf(nic, &mbx);
284 static void nicvf_config_cpi(struct nicvf *nic)
286 union nic_mbx mbx = {};
288 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
289 mbx.cpi_cfg.vf_id = nic->vf_id;
290 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
291 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
293 nicvf_send_msg_to_pf(nic, &mbx);
296 static void nicvf_get_rss_size(struct nicvf *nic)
298 union nic_mbx mbx = {};
300 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
301 mbx.rss_size.vf_id = nic->vf_id;
302 nicvf_send_msg_to_pf(nic, &mbx);
305 void nicvf_config_rss(struct nicvf *nic)
307 union nic_mbx mbx = {};
308 struct nicvf_rss_info *rss = &nic->rss_info;
309 int ind_tbl_len = rss->rss_size;
312 mbx.rss_cfg.vf_id = nic->vf_id;
313 mbx.rss_cfg.hash_bits = rss->hash_bits;
314 while (ind_tbl_len) {
315 mbx.rss_cfg.tbl_offset = nextq;
316 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
317 RSS_IND_TBL_LEN_PER_MBX_MSG);
318 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
319 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
321 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
322 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
324 nicvf_send_msg_to_pf(nic, &mbx);
326 ind_tbl_len -= mbx.rss_cfg.tbl_len;
330 void nicvf_set_rss_key(struct nicvf *nic)
332 struct nicvf_rss_info *rss = &nic->rss_info;
333 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
336 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
337 nicvf_reg_write(nic, key_addr, rss->key[idx]);
338 key_addr += sizeof(u64);
342 static int nicvf_rss_init(struct nicvf *nic)
344 struct nicvf_rss_info *rss = &nic->rss_info;
347 nicvf_get_rss_size(nic);
349 if (cpi_alg != CPI_ALG_NONE) {
357 netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
358 nicvf_set_rss_key(nic);
360 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
361 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
363 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
365 for (idx = 0; idx < rss->rss_size; idx++)
366 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
368 nicvf_config_rss(nic);
372 /* Request PF to allocate additional Qsets */
373 static void nicvf_request_sqs(struct nicvf *nic)
375 union nic_mbx mbx = {};
377 int sqs_count = nic->sqs_count;
378 int rx_queues = 0, tx_queues = 0;
380 /* Only primary VF should request */
381 if (nic->sqs_mode || !nic->sqs_count)
384 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
385 mbx.sqs_alloc.vf_id = nic->vf_id;
386 mbx.sqs_alloc.qs_count = nic->sqs_count;
387 if (nicvf_send_msg_to_pf(nic, &mbx)) {
388 /* No response from PF */
393 /* Return if no Secondary Qsets available */
397 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
398 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
399 if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
400 tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
402 /* Set no of Rx/Tx queues in each of the SQsets */
403 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
404 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
405 mbx.nicvf.vf_id = nic->vf_id;
406 mbx.nicvf.sqs_id = sqs;
407 nicvf_send_msg_to_pf(nic, &mbx);
409 nic->snicvf[sqs]->sqs_id = sqs;
410 if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
411 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
412 rx_queues -= MAX_RCV_QUEUES_PER_QS;
414 nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
418 if (tx_queues > MAX_SND_QUEUES_PER_QS) {
419 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
420 tx_queues -= MAX_SND_QUEUES_PER_QS;
422 nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
426 nic->snicvf[sqs]->qs->cq_cnt =
427 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
429 /* Initialize secondary Qset's queues and its interrupts */
430 nicvf_open(nic->snicvf[sqs]->netdev);
433 /* Update stack with actual Rx/Tx queue count allocated */
434 if (sqs_count != nic->sqs_count)
435 nicvf_set_real_num_queues(nic->netdev,
436 nic->tx_queues, nic->rx_queues);
439 /* Send this Qset's nicvf pointer to PF.
440 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
441 * so that packets received by these Qsets can use primary VF's netdev
443 static void nicvf_send_vf_struct(struct nicvf *nic)
445 union nic_mbx mbx = {};
447 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
448 mbx.nicvf.sqs_mode = nic->sqs_mode;
449 mbx.nicvf.nicvf = (u64)nic;
450 nicvf_send_msg_to_pf(nic, &mbx);
453 static void nicvf_get_primary_vf_struct(struct nicvf *nic)
455 union nic_mbx mbx = {};
457 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
458 nicvf_send_msg_to_pf(nic, &mbx);
461 int nicvf_set_real_num_queues(struct net_device *netdev,
462 int tx_queues, int rx_queues)
466 err = netif_set_real_num_tx_queues(netdev, tx_queues);
469 "Failed to set no of Tx queues: %d\n", tx_queues);
473 err = netif_set_real_num_rx_queues(netdev, rx_queues);
476 "Failed to set no of Rx queues: %d\n", rx_queues);
480 static int nicvf_init_resources(struct nicvf *nic)
485 nicvf_qset_config(nic, true);
487 /* Initialize queues and HW for data transfer */
488 err = nicvf_config_data_transfer(nic, true);
490 netdev_err(nic->netdev,
491 "Failed to alloc/config VF's QSet resources\n");
498 static void nicvf_snd_pkt_handler(struct net_device *netdev,
499 struct cqe_send_t *cqe_tx,
500 int cqe_type, int budget,
501 unsigned int *tx_pkts, unsigned int *tx_bytes)
503 struct sk_buff *skb = NULL;
504 struct nicvf *nic = netdev_priv(netdev);
505 struct snd_queue *sq;
506 struct sq_hdr_subdesc *hdr;
507 struct sq_hdr_subdesc *tso_sqe;
509 sq = &nic->qs->sq[cqe_tx->sq_idx];
511 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
512 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
515 netdev_dbg(nic->netdev,
516 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
517 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
518 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
520 nicvf_check_cqe_tx_errs(nic, cqe_tx);
521 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
523 /* Check for dummy descriptor used for HW TSO offload on 88xx */
524 if (hdr->dont_send) {
525 /* Get actual TSO descriptors and free them */
527 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
528 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
530 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
533 *tx_bytes += skb->len;
534 napi_consume_skb(skb, budget);
535 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
537 /* In case of SW TSO on 88xx, only last segment will have
538 * a SKB attached, so just free SQEs here.
541 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
545 static inline void nicvf_set_rxhash(struct net_device *netdev,
546 struct cqe_rx_t *cqe_rx,
552 if (!(netdev->features & NETIF_F_RXHASH))
555 switch (cqe_rx->rss_alg) {
558 hash_type = PKT_HASH_TYPE_L4;
559 hash = cqe_rx->rss_tag;
562 hash_type = PKT_HASH_TYPE_L3;
563 hash = cqe_rx->rss_tag;
566 hash_type = PKT_HASH_TYPE_NONE;
570 skb_set_hash(skb, hash, hash_type);
573 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
574 struct napi_struct *napi,
575 struct cqe_rx_t *cqe_rx)
578 struct nicvf *nic = netdev_priv(netdev);
582 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
585 /* Use primary VF's 'nicvf' struct */
587 netdev = nic->netdev;
590 /* Check for errors */
591 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
592 if (err && !cqe_rx->rb_cnt)
595 skb = nicvf_get_rcv_skb(nic, cqe_rx);
597 netdev_dbg(nic->netdev, "Packet not received\n");
601 if (netif_msg_pktdata(nic)) {
602 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
604 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
605 skb->data, skb->len, true);
608 /* If error packet, drop it here */
610 dev_kfree_skb_any(skb);
614 nicvf_set_rxhash(netdev, cqe_rx, skb);
616 skb_record_rx_queue(skb, rq_idx);
617 if (netdev->hw_features & NETIF_F_RXCSUM) {
618 /* HW by default verifies TCP/UDP/SCTP checksums */
619 skb->ip_summed = CHECKSUM_UNNECESSARY;
621 skb_checksum_none_assert(skb);
624 skb->protocol = eth_type_trans(skb, netdev);
626 /* Check for stripped VLAN */
627 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
628 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
629 ntohs((__force __be16)cqe_rx->vlan_tci));
631 if (napi && (netdev->features & NETIF_F_GRO))
632 napi_gro_receive(napi, skb);
634 netif_receive_skb(skb);
637 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
638 struct napi_struct *napi, int budget)
640 int processed_cqe, work_done = 0, tx_done = 0;
641 int cqe_count, cqe_head;
642 struct nicvf *nic = netdev_priv(netdev);
643 struct queue_set *qs = nic->qs;
644 struct cmp_queue *cq = &qs->cq[cq_idx];
645 struct cqe_rx_t *cq_desc;
646 struct netdev_queue *txq;
647 struct snd_queue *sq;
648 unsigned int tx_pkts = 0, tx_bytes = 0;
650 spin_lock_bh(&cq->lock);
653 /* Get no of valid CQ entries to process */
654 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
655 cqe_count &= CQ_CQE_COUNT;
659 /* Get head of the valid CQ entries */
660 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
663 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
664 __func__, cq_idx, cqe_count, cqe_head);
665 while (processed_cqe < cqe_count) {
666 /* Get the CQ descriptor */
667 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
669 cqe_head &= (cq->dmem.q_len - 1);
670 /* Initiate prefetch for next descriptor */
671 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
673 if ((work_done >= budget) && napi &&
674 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
678 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
679 cq_idx, cq_desc->cqe_type);
680 switch (cq_desc->cqe_type) {
682 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
686 nicvf_snd_pkt_handler(netdev,
687 (void *)cq_desc, CQE_TYPE_SEND,
688 budget, &tx_pkts, &tx_bytes);
691 case CQE_TYPE_INVALID:
692 case CQE_TYPE_RX_SPLIT:
693 case CQE_TYPE_RX_TCP:
694 case CQE_TYPE_SEND_PTP:
700 netdev_dbg(nic->netdev,
701 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
702 __func__, cq_idx, processed_cqe, work_done, budget);
704 /* Ring doorbell to inform H/W to reuse processed CQEs */
705 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
706 cq_idx, processed_cqe);
708 if ((work_done < budget) && napi)
712 /* Wakeup TXQ if its stopped earlier due to SQ full */
713 sq = &nic->qs->sq[cq_idx];
715 (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
716 netdev = nic->pnicvf->netdev;
717 txq = netdev_get_tx_queue(netdev,
718 nicvf_netdev_qidx(nic, cq_idx));
720 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
722 /* To read updated queue and carrier status */
724 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
725 netif_tx_wake_queue(txq);
727 this_cpu_inc(nic->drv_stats->txq_wake);
728 if (netif_msg_tx_err(nic))
730 "%s: Transmit queue wakeup SQ%d\n",
731 netdev->name, cq_idx);
735 spin_unlock_bh(&cq->lock);
739 static int nicvf_poll(struct napi_struct *napi, int budget)
743 struct net_device *netdev = napi->dev;
744 struct nicvf *nic = netdev_priv(netdev);
745 struct nicvf_cq_poll *cq;
747 cq = container_of(napi, struct nicvf_cq_poll, napi);
748 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
750 if (work_done < budget) {
751 /* Slow packet rate, exit polling */
752 napi_complete_done(napi, work_done);
753 /* Re-enable interrupts */
754 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
756 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
757 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
758 cq->cq_idx, cq_head);
759 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
764 /* Qset error interrupt handler
766 * As of now only CQ errors are handled
768 static void nicvf_handle_qs_err(unsigned long data)
770 struct nicvf *nic = (struct nicvf *)data;
771 struct queue_set *qs = nic->qs;
775 netif_tx_disable(nic->netdev);
777 /* Check if it is CQ err */
778 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
779 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
781 if (!(status & CQ_ERR_MASK))
783 /* Process already queued CQEs and reconfig CQ */
784 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
785 nicvf_sq_disable(nic, qidx);
786 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
787 nicvf_cmp_queue_config(nic, qs, qidx, true);
788 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
789 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
791 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
794 netif_tx_start_all_queues(nic->netdev);
795 /* Re-enable Qset error interrupt */
796 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
799 static void nicvf_dump_intr_status(struct nicvf *nic)
801 if (netif_msg_intr(nic))
802 netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
803 nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
806 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
808 struct nicvf *nic = (struct nicvf *)nicvf_irq;
811 nicvf_dump_intr_status(nic);
813 intr = nicvf_reg_read(nic, NIC_VF_INT);
814 /* Check for spurious interrupt */
815 if (!(intr & NICVF_INTR_MBOX_MASK))
818 nicvf_handle_mbx_intr(nic);
823 static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
825 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
826 struct nicvf *nic = cq_poll->nicvf;
827 int qidx = cq_poll->cq_idx;
829 nicvf_dump_intr_status(nic);
831 /* Disable interrupts */
832 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
835 napi_schedule_irqoff(&cq_poll->napi);
837 /* Clear interrupt */
838 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
843 static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
845 struct nicvf *nic = (struct nicvf *)nicvf_irq;
849 nicvf_dump_intr_status(nic);
851 /* Disable RBDR interrupt and schedule softirq */
852 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
853 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
855 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
856 tasklet_hi_schedule(&nic->rbdr_task);
857 /* Clear interrupt */
858 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
864 static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
866 struct nicvf *nic = (struct nicvf *)nicvf_irq;
868 nicvf_dump_intr_status(nic);
870 /* Disable Qset err interrupt and schedule softirq */
871 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
872 tasklet_hi_schedule(&nic->qs_err_task);
873 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
878 static int nicvf_enable_msix(struct nicvf *nic)
882 nic->num_vec = NIC_VF_MSIX_VECTORS;
884 for (vec = 0; vec < nic->num_vec; vec++)
885 nic->msix_entries[vec].entry = vec;
887 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
889 netdev_err(nic->netdev,
890 "Req for #%d msix vectors failed\n", nic->num_vec);
893 nic->msix_enabled = 1;
897 static void nicvf_disable_msix(struct nicvf *nic)
899 if (nic->msix_enabled) {
900 pci_disable_msix(nic->pdev);
901 nic->msix_enabled = 0;
906 static void nicvf_set_irq_affinity(struct nicvf *nic)
911 for (vec = 0; vec < nic->num_vec; vec++) {
912 if (!nic->irq_allocated[vec])
915 if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
918 if (vec < NICVF_INTR_ID_SQ)
919 /* Leave CPU0 for RBDR and other interrupts */
920 cpu = nicvf_netdev_qidx(nic, vec) + 1;
924 cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
925 nic->affinity_mask[vec]);
926 irqnum = nic->msix_entries[vec].vector;
927 irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
931 static int nicvf_register_interrupts(struct nicvf *nic)
937 sprintf(nic->irq_name[irq], "%s-rxtx-%d",
938 nic->pnicvf->netdev->name,
939 nicvf_netdev_qidx(nic, irq));
942 sprintf(nic->irq_name[irq], "%s-sq-%d",
943 nic->pnicvf->netdev->name,
944 nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
946 for_each_rbdr_irq(irq)
947 sprintf(nic->irq_name[irq], "%s-rbdr-%d",
948 nic->pnicvf->netdev->name,
949 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
951 /* Register CQ interrupts */
952 for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
953 vector = nic->msix_entries[irq].vector;
954 ret = request_irq(vector, nicvf_intr_handler,
955 0, nic->irq_name[irq], nic->napi[irq]);
958 nic->irq_allocated[irq] = true;
961 /* Register RBDR interrupt */
962 for (irq = NICVF_INTR_ID_RBDR;
963 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
964 vector = nic->msix_entries[irq].vector;
965 ret = request_irq(vector, nicvf_rbdr_intr_handler,
966 0, nic->irq_name[irq], nic);
969 nic->irq_allocated[irq] = true;
972 /* Register QS error interrupt */
973 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
974 nic->pnicvf->netdev->name,
975 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
976 irq = NICVF_INTR_ID_QS_ERR;
977 ret = request_irq(nic->msix_entries[irq].vector,
978 nicvf_qs_err_intr_handler,
979 0, nic->irq_name[irq], nic);
983 nic->irq_allocated[irq] = true;
985 /* Set IRQ affinities */
986 nicvf_set_irq_affinity(nic);
990 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
995 static void nicvf_unregister_interrupts(struct nicvf *nic)
999 /* Free registered interrupts */
1000 for (irq = 0; irq < nic->num_vec; irq++) {
1001 if (!nic->irq_allocated[irq])
1004 irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
1005 free_cpumask_var(nic->affinity_mask[irq]);
1007 if (irq < NICVF_INTR_ID_SQ)
1008 free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
1010 free_irq(nic->msix_entries[irq].vector, nic);
1012 nic->irq_allocated[irq] = false;
1016 nicvf_disable_msix(nic);
1019 /* Initialize MSIX vectors and register MISC interrupt.
1020 * Send READY message to PF to check if its alive
1022 static int nicvf_register_misc_interrupt(struct nicvf *nic)
1025 int irq = NICVF_INTR_ID_MISC;
1027 /* Return if mailbox interrupt is already registered */
1028 if (nic->msix_enabled)
1032 if (!nicvf_enable_msix(nic))
1035 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
1036 /* Register Misc interrupt */
1037 ret = request_irq(nic->msix_entries[irq].vector,
1038 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
1042 nic->irq_allocated[irq] = true;
1044 /* Enable mailbox interrupt */
1045 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1047 /* Check if VF is able to communicate with PF */
1048 if (!nicvf_check_pf_ready(nic)) {
1049 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1050 nicvf_unregister_interrupts(nic);
1057 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1059 struct nicvf *nic = netdev_priv(netdev);
1060 int qid = skb_get_queue_mapping(skb);
1061 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1063 struct snd_queue *sq;
1066 /* Check for minimum packet length */
1067 if (skb->len <= ETH_HLEN) {
1069 return NETDEV_TX_OK;
1073 /* Get secondary Qset's SQ structure */
1074 if (qid >= MAX_SND_QUEUES_PER_QS) {
1075 tmp = qid / MAX_SND_QUEUES_PER_QS;
1076 snic = (struct nicvf *)nic->snicvf[tmp - 1];
1078 netdev_warn(nic->netdev,
1079 "Secondary Qset#%d's ptr not initialized\n",
1082 return NETDEV_TX_OK;
1084 qid = qid % MAX_SND_QUEUES_PER_QS;
1087 sq = &snic->qs->sq[qid];
1088 if (!netif_tx_queue_stopped(txq) &&
1089 !nicvf_sq_append_skb(snic, sq, skb, qid)) {
1090 netif_tx_stop_queue(txq);
1092 /* Barrier, so that stop_queue visible to other cpus */
1095 /* Check again, incase another cpu freed descriptors */
1096 if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
1097 netif_tx_wake_queue(txq);
1099 this_cpu_inc(nic->drv_stats->txq_stop);
1100 if (netif_msg_tx_err(nic))
1102 "%s: Transmit ring full, stopping SQ%d\n",
1105 return NETDEV_TX_BUSY;
1108 return NETDEV_TX_OK;
1111 static inline void nicvf_free_cq_poll(struct nicvf *nic)
1113 struct nicvf_cq_poll *cq_poll;
1116 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1117 cq_poll = nic->napi[qidx];
1120 nic->napi[qidx] = NULL;
1125 int nicvf_stop(struct net_device *netdev)
1128 struct nicvf *nic = netdev_priv(netdev);
1129 struct queue_set *qs = nic->qs;
1130 struct nicvf_cq_poll *cq_poll = NULL;
1131 union nic_mbx mbx = {};
1133 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1134 nicvf_send_msg_to_pf(nic, &mbx);
1136 netif_carrier_off(netdev);
1137 netif_tx_stop_all_queues(nic->netdev);
1138 nic->link_up = false;
1140 /* Teardown secondary qsets first */
1141 if (!nic->sqs_mode) {
1142 for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1143 if (!nic->snicvf[qidx])
1145 nicvf_stop(nic->snicvf[qidx]->netdev);
1146 nic->snicvf[qidx] = NULL;
1150 /* Disable RBDR & QS error interrupts */
1151 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1152 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1153 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1155 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1156 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1158 /* Wait for pending IRQ handlers to finish */
1159 for (irq = 0; irq < nic->num_vec; irq++)
1160 synchronize_irq(nic->msix_entries[irq].vector);
1162 tasklet_kill(&nic->rbdr_task);
1163 tasklet_kill(&nic->qs_err_task);
1164 if (nic->rb_work_scheduled)
1165 cancel_delayed_work_sync(&nic->rbdr_work);
1167 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1168 cq_poll = nic->napi[qidx];
1171 napi_synchronize(&cq_poll->napi);
1172 /* CQ intr is enabled while napi_complete,
1175 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1176 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1177 napi_disable(&cq_poll->napi);
1178 netif_napi_del(&cq_poll->napi);
1181 netif_tx_disable(netdev);
1183 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1184 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1186 /* Free resources */
1187 nicvf_config_data_transfer(nic, false);
1189 /* Disable HW Qset */
1190 nicvf_qset_config(nic, false);
1192 /* disable mailbox interrupt */
1193 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1195 nicvf_unregister_interrupts(nic);
1197 nicvf_free_cq_poll(nic);
1199 /* Clear multiqset info */
1205 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1207 union nic_mbx mbx = {};
1209 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1210 mbx.frs.max_frs = mtu;
1211 mbx.frs.vf_id = nic->vf_id;
1213 return nicvf_send_msg_to_pf(nic, &mbx);
1216 int nicvf_open(struct net_device *netdev)
1219 struct nicvf *nic = netdev_priv(netdev);
1220 struct queue_set *qs = nic->qs;
1221 struct nicvf_cq_poll *cq_poll = NULL;
1222 union nic_mbx mbx = {};
1224 netif_carrier_off(netdev);
1226 err = nicvf_register_misc_interrupt(nic);
1230 /* Register NAPI handler for processing CQEs */
1231 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1232 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1237 cq_poll->cq_idx = qidx;
1238 cq_poll->nicvf = nic;
1239 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1241 napi_enable(&cq_poll->napi);
1242 nic->napi[qidx] = cq_poll;
1245 /* Check if we got MAC address from PF or else generate a radom MAC */
1246 if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
1247 eth_hw_addr_random(netdev);
1248 nicvf_hw_set_mac_addr(nic, netdev);
1251 if (nic->set_mac_pending) {
1252 nic->set_mac_pending = false;
1253 nicvf_hw_set_mac_addr(nic, netdev);
1256 /* Init tasklet for handling Qset err interrupt */
1257 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1258 (unsigned long)nic);
1260 /* Init RBDR tasklet which will refill RBDR */
1261 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1262 (unsigned long)nic);
1263 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1265 /* Configure CPI alorithm */
1266 nic->cpi_alg = cpi_alg;
1268 nicvf_config_cpi(nic);
1270 nicvf_request_sqs(nic);
1272 nicvf_get_primary_vf_struct(nic);
1274 /* Configure receive side scaling and MTU */
1275 if (!nic->sqs_mode) {
1276 nicvf_rss_init(nic);
1277 err = nicvf_update_hw_max_frs(nic, netdev->mtu);
1281 /* Clear percpu stats */
1282 for_each_possible_cpu(cpu)
1283 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1284 sizeof(struct nicvf_drv_stats));
1287 err = nicvf_register_interrupts(nic);
1291 /* Initialize the queues */
1292 err = nicvf_init_resources(nic);
1296 /* Make sure queue initialization is written */
1299 nicvf_reg_write(nic, NIC_VF_INT, -1);
1300 /* Enable Qset err interrupt */
1301 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1303 /* Enable completion queue interrupt */
1304 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1305 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1307 /* Enable RBDR threshold interrupt */
1308 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1309 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1311 /* Send VF config done msg to PF */
1312 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1313 nicvf_write_to_mbx(nic, &mbx);
1317 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1318 nicvf_unregister_interrupts(nic);
1319 tasklet_kill(&nic->qs_err_task);
1320 tasklet_kill(&nic->rbdr_task);
1322 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1323 cq_poll = nic->napi[qidx];
1326 napi_disable(&cq_poll->napi);
1327 netif_napi_del(&cq_poll->napi);
1329 nicvf_free_cq_poll(nic);
1333 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1335 struct nicvf *nic = netdev_priv(netdev);
1336 int orig_mtu = netdev->mtu;
1338 netdev->mtu = new_mtu;
1340 if (!netif_running(netdev))
1343 if (nicvf_update_hw_max_frs(nic, new_mtu)) {
1344 netdev->mtu = orig_mtu;
1351 static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1353 struct sockaddr *addr = p;
1354 struct nicvf *nic = netdev_priv(netdev);
1356 if (!is_valid_ether_addr(addr->sa_data))
1357 return -EADDRNOTAVAIL;
1359 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1361 if (nic->msix_enabled) {
1362 if (nicvf_hw_set_mac_addr(nic, netdev))
1365 nic->set_mac_pending = true;
1371 void nicvf_update_lmac_stats(struct nicvf *nic)
1374 union nic_mbx mbx = {};
1376 if (!netif_running(nic->netdev))
1379 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1380 mbx.bgx_stats.vf_id = nic->vf_id;
1382 mbx.bgx_stats.rx = 1;
1383 while (stat < BGX_RX_STATS_COUNT) {
1384 mbx.bgx_stats.idx = stat;
1385 if (nicvf_send_msg_to_pf(nic, &mbx))
1393 mbx.bgx_stats.rx = 0;
1394 while (stat < BGX_TX_STATS_COUNT) {
1395 mbx.bgx_stats.idx = stat;
1396 if (nicvf_send_msg_to_pf(nic, &mbx))
1402 void nicvf_update_stats(struct nicvf *nic)
1406 struct nicvf_hw_stats *stats = &nic->hw_stats;
1407 struct nicvf_drv_stats *drv_stats;
1408 struct queue_set *qs = nic->qs;
1410 #define GET_RX_STATS(reg) \
1411 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1412 #define GET_TX_STATS(reg) \
1413 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1415 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1416 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1417 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1418 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1419 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1420 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1421 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1422 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1423 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1424 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1425 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1426 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1427 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1428 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1430 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1431 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1432 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1433 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1434 stats->tx_drops = GET_TX_STATS(TX_DROP);
1436 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1437 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1438 * pointed by dummy SQE and results in tx_drops counter being
1439 * incremented. Subtracting it from tx_tso counter will give
1440 * exact tx_drops counter.
1442 if (nic->t88 && nic->hw_tso) {
1443 for_each_possible_cpu(cpu) {
1444 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1445 tmp_stats += drv_stats->tx_tso;
1447 stats->tx_drops = tmp_stats - stats->tx_drops;
1449 stats->tx_frames = stats->tx_ucast_frames +
1450 stats->tx_bcast_frames +
1451 stats->tx_mcast_frames;
1452 stats->rx_frames = stats->rx_ucast_frames +
1453 stats->rx_bcast_frames +
1454 stats->rx_mcast_frames;
1455 stats->rx_drops = stats->rx_drop_red +
1456 stats->rx_drop_overrun;
1458 /* Update RQ and SQ stats */
1459 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1460 nicvf_update_rq_stats(nic, qidx);
1461 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1462 nicvf_update_sq_stats(nic, qidx);
1465 static void nicvf_get_stats64(struct net_device *netdev,
1466 struct rtnl_link_stats64 *stats)
1468 struct nicvf *nic = netdev_priv(netdev);
1469 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1471 nicvf_update_stats(nic);
1473 stats->rx_bytes = hw_stats->rx_bytes;
1474 stats->rx_packets = hw_stats->rx_frames;
1475 stats->rx_dropped = hw_stats->rx_drops;
1476 stats->multicast = hw_stats->rx_mcast_frames;
1478 stats->tx_bytes = hw_stats->tx_bytes;
1479 stats->tx_packets = hw_stats->tx_frames;
1480 stats->tx_dropped = hw_stats->tx_drops;
1484 static void nicvf_tx_timeout(struct net_device *dev)
1486 struct nicvf *nic = netdev_priv(dev);
1488 if (netif_msg_tx_err(nic))
1489 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1492 this_cpu_inc(nic->drv_stats->tx_timeout);
1493 schedule_work(&nic->reset_task);
1496 static void nicvf_reset_task(struct work_struct *work)
1500 nic = container_of(work, struct nicvf, reset_task);
1502 if (!netif_running(nic->netdev))
1505 nicvf_stop(nic->netdev);
1506 nicvf_open(nic->netdev);
1507 netif_trans_update(nic->netdev);
1510 static int nicvf_config_loopback(struct nicvf *nic,
1511 netdev_features_t features)
1513 union nic_mbx mbx = {};
1515 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1516 mbx.lbk.vf_id = nic->vf_id;
1517 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1519 return nicvf_send_msg_to_pf(nic, &mbx);
1522 static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1523 netdev_features_t features)
1525 struct nicvf *nic = netdev_priv(netdev);
1527 if ((features & NETIF_F_LOOPBACK) &&
1528 netif_running(netdev) && !nic->loopback_supported)
1529 features &= ~NETIF_F_LOOPBACK;
1534 static int nicvf_set_features(struct net_device *netdev,
1535 netdev_features_t features)
1537 struct nicvf *nic = netdev_priv(netdev);
1538 netdev_features_t changed = features ^ netdev->features;
1540 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1541 nicvf_config_vlan_stripping(nic, features);
1543 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1544 return nicvf_config_loopback(nic, features);
1549 static const struct net_device_ops nicvf_netdev_ops = {
1550 .ndo_open = nicvf_open,
1551 .ndo_stop = nicvf_stop,
1552 .ndo_start_xmit = nicvf_xmit,
1553 .ndo_change_mtu = nicvf_change_mtu,
1554 .ndo_set_mac_address = nicvf_set_mac_address,
1555 .ndo_get_stats64 = nicvf_get_stats64,
1556 .ndo_tx_timeout = nicvf_tx_timeout,
1557 .ndo_fix_features = nicvf_fix_features,
1558 .ndo_set_features = nicvf_set_features,
1561 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1563 struct device *dev = &pdev->dev;
1564 struct net_device *netdev;
1569 err = pci_enable_device(pdev);
1571 dev_err(dev, "Failed to enable PCI device\n");
1575 err = pci_request_regions(pdev, DRV_NAME);
1577 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1578 goto err_disable_device;
1581 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1583 dev_err(dev, "Unable to get usable DMA configuration\n");
1584 goto err_release_regions;
1587 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1589 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1590 goto err_release_regions;
1593 qcount = netif_get_num_default_rss_queues();
1595 /* Restrict multiqset support only for host bound VFs */
1596 if (pdev->is_virtfn) {
1597 /* Set max number of queues per VF */
1598 qcount = min_t(int, num_online_cpus(),
1599 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
1602 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
1605 goto err_release_regions;
1608 pci_set_drvdata(pdev, netdev);
1610 SET_NETDEV_DEV(netdev, &pdev->dev);
1612 nic = netdev_priv(netdev);
1613 nic->netdev = netdev;
1616 nic->max_queues = qcount;
1618 /* MAP VF's configuration registers */
1619 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1620 if (!nic->reg_base) {
1621 dev_err(dev, "Cannot map config register space, aborting\n");
1623 goto err_free_netdev;
1626 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
1627 if (!nic->drv_stats) {
1629 goto err_free_netdev;
1632 err = nicvf_set_qset_resources(nic);
1634 goto err_free_netdev;
1636 /* Check if PF is alive and get MAC address for this VF */
1637 err = nicvf_register_misc_interrupt(nic);
1639 goto err_free_netdev;
1641 nicvf_send_vf_struct(nic);
1643 if (!pass1_silicon(nic->pdev))
1646 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
1647 if (sdevid == 0xA134)
1650 /* Check if this VF is in QS only mode */
1654 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
1656 goto err_unregister_interrupts;
1658 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
1659 NETIF_F_TSO | NETIF_F_GRO |
1660 NETIF_F_HW_VLAN_CTAG_RX);
1662 netdev->hw_features |= NETIF_F_RXHASH;
1664 netdev->features |= netdev->hw_features;
1665 netdev->hw_features |= NETIF_F_LOOPBACK;
1667 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1669 netdev->netdev_ops = &nicvf_netdev_ops;
1670 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1672 /* MTU range: 64 - 9200 */
1673 netdev->min_mtu = NIC_HW_MIN_FRS;
1674 netdev->max_mtu = NIC_HW_MAX_FRS;
1676 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1678 err = register_netdev(netdev);
1680 dev_err(dev, "Failed to register netdevice\n");
1681 goto err_unregister_interrupts;
1684 nic->msg_enable = debug;
1686 nicvf_set_ethtool_ops(netdev);
1690 err_unregister_interrupts:
1691 nicvf_unregister_interrupts(nic);
1693 pci_set_drvdata(pdev, NULL);
1695 free_percpu(nic->drv_stats);
1696 free_netdev(netdev);
1697 err_release_regions:
1698 pci_release_regions(pdev);
1700 pci_disable_device(pdev);
1704 static void nicvf_remove(struct pci_dev *pdev)
1706 struct net_device *netdev = pci_get_drvdata(pdev);
1708 struct net_device *pnetdev;
1713 nic = netdev_priv(netdev);
1714 pnetdev = nic->pnicvf->netdev;
1716 /* Check if this Qset is assigned to different VF.
1717 * If yes, clean primary and all secondary Qsets.
1719 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
1720 unregister_netdev(pnetdev);
1721 nicvf_unregister_interrupts(nic);
1722 pci_set_drvdata(pdev, NULL);
1724 free_percpu(nic->drv_stats);
1725 free_netdev(netdev);
1726 pci_release_regions(pdev);
1727 pci_disable_device(pdev);
1730 static void nicvf_shutdown(struct pci_dev *pdev)
1735 static struct pci_driver nicvf_driver = {
1737 .id_table = nicvf_id_table,
1738 .probe = nicvf_probe,
1739 .remove = nicvf_remove,
1740 .shutdown = nicvf_shutdown,
1743 static int __init nicvf_init_module(void)
1745 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1747 return pci_register_driver(&nicvf_driver);
1750 static void __exit nicvf_cleanup_module(void)
1752 pci_unregister_driver(&nicvf_driver);
1755 module_init(nicvf_init_module);
1756 module_exit(nicvf_cleanup_module);