From: David S. Miller Date: Wed, 28 Jul 2010 04:01:35 +0000 (-0700) Subject: Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 X-Git-Tag: v2.6.36-rc1~571^2~68 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=bb7e95c8fd859922c6cf3ebbb3a8546007df1748;p=karo-tx-linux.git Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 Conflicts: drivers/net/bnx2x_main.c Merge bnx2x bug fixes in by hand... :-/ Signed-off-by: David S. Miller --- bb7e95c8fd859922c6cf3ebbb3a8546007df1748 diff --cc drivers/net/bnx2x/bnx2x_main.c index 75568f111754,46167c081727..b4ec2b02a465 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@@ -6600,194 -12221,311 +6600,195 @@@ static int __devinit bnx2x_get_hwinfo(s } } -exit_lbl: - if (unlikely(to_copy)) - DP(NETIF_MSG_TX_QUEUED, - "Linearization IS REQUIRED for %s packet. " - "num_frags %d hlen %d first_bd_sz %d\n", - (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", - skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); - - return to_copy; -} -#endif - -/* called with netif_tx_lock - * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call - * netif_wake_queue() - */ -static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - struct bnx2x_fastpath *fp; - struct netdev_queue *txq; - struct sw_tx_bd *tx_buf; - struct eth_tx_start_bd *tx_start_bd; - struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; - struct eth_tx_parse_bd *pbd = NULL; - u16 pkt_prod, bd_prod; - int nbd, fp_index; - dma_addr_t mapping; - u32 xmit_type = bnx2x_xmit_type(bp, skb); - int i; - u8 hlen = 0; - __le16 pkt_size = 0; - struct ethhdr *eth; - u8 mac_type = UNICAST_ADDRESS; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return NETDEV_TX_BUSY; -#endif - - fp_index = skb_get_queue_mapping(skb); - txq = netdev_get_tx_queue(dev, fp_index); - - fp = &bp->fp[fp_index]; + if (!BP_NOMCP(bp)) { + bnx2x_get_port_hwinfo(bp); - if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { - fp->eth_q_stats.driver_xoff++; - netif_tx_stop_queue(txq); - BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); - return NETDEV_TX_BUSY; + bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); } - DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" - " gso type %x xmit_type %x\n", - skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, - ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); + if (IS_E1HMF(bp)) { + val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); + val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower); + if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && + (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { + bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); + bp->dev->dev_addr[1] = (u8)(val2 & 0xff); + bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff); + bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff); + bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff); + bp->dev->dev_addr[5] = (u8)(val & 0xff); + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, + ETH_ALEN); + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, + ETH_ALEN); + } - eth = (struct ethhdr *)skb->data; + return rc; + } - /* set flag according to packet type (UNICAST_ADDRESS is default)*/ - if (unlikely(is_multicast_ether_addr(eth->h_dest))) { - if (is_broadcast_ether_addr(eth->h_dest)) - mac_type = BROADCAST_ADDRESS; - else - mac_type = MULTICAST_ADDRESS; - } - -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) - /* First, check if we need to linearize the skb (due to FW - restrictions). No need to check fragmentation if page size > 8K - (there will be no violation to FW restrictions) */ - if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { - /* Statistics of linearization */ - bp->lin_cnt++; - if (skb_linearize(skb) != 0) { - DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " - "silently dropping this SKB\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (BP_NOMCP(bp)) { + /* only supposed to happen on emulation/FPGA */ + BNX2X_ERROR("warning: random MAC workaround active\n"); + random_ether_addr(bp->dev->dev_addr); + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); } -#endif - /* - Please read carefully. First we use one BD which we mark as start, - then we have a parsing info BD (used for TSO or xsum), - and only then we have the rest of the TSO BDs. - (don't forget to mark the last one as last, - and to unmap only AFTER you write to the BD ...) - And above all, all pdb sizes are in words - NOT DWORDS! - */ - - pkt_prod = fp->tx_pkt_prod++; - bd_prod = TX_BD(fp->tx_bd_prod); - - /* get a tx_buf and first BD */ - tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; - tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; - - tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - tx_start_bd->general_data = (mac_type << - ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); - /* header nbd */ - tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); - - /* remember the first BD of the packet */ - tx_buf->first_bd = fp->tx_bd_prod; - tx_buf->skb = skb; - tx_buf->flags = 0; - - DP(NETIF_MSG_TX_QUEUED, - "sending pkt %u @%p next_idx %u bd %u @%p\n", - pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); + return rc; +} -#ifdef BCM_VLAN - if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && - (bp->flags & HW_VLAN_TX_FLAG)) { - tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; - } else -#endif - tx_start_bd->vlan = cpu_to_le16(pkt_prod); +static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) +{ + int cnt, i, block_end, rodi; + char vpd_data[BNX2X_VPD_LEN+1]; + char str_id_reg[VENDOR_ID_LEN+1]; + char str_id_cap[VENDOR_ID_LEN+1]; + u8 len; - /* turn on parsing and get a BD */ - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - pbd = &fp->tx_desc_ring[bd_prod].parse_bd; + cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data); + memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); - memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); + if (cnt < BNX2X_VPD_LEN) + goto out_not_found; - if (xmit_type & XMIT_CSUM) { - hlen = (skb_network_header(skb) - skb->data) / 2; + i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN, + PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; - /* for now NS flag is not used in Linux */ - pbd->global_data = - (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << - ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); - pbd->ip_hlen = (skb_transport_header(skb) - - skb_network_header(skb)) / 2; + block_end = i + PCI_VPD_LRDT_TAG_SIZE + + pci_vpd_lrdt_size(&vpd_data[i]); - hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; + i += PCI_VPD_LRDT_TAG_SIZE; - pbd->total_hlen = cpu_to_le16(hlen); - hlen = hlen*2; + if (block_end > BNX2X_VPD_LEN) + goto out_not_found; - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (rodi < 0) + goto out_not_found; - if (xmit_type & XMIT_CSUM_V4) - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IP_CSUM; - else - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IPV6; + len = pci_vpd_info_field_size(&vpd_data[rodi]); - if (xmit_type & XMIT_CSUM_TCP) { - pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); + if (len != VENDOR_ID_LEN) + goto out_not_found; - } else { - s8 fix = SKB_CS_OFF(skb); /* signed! */ + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; - pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG; + /* vendor specific info */ + snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); + snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); + if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || + !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { - DP(NETIF_MSG_TX_QUEUED, - "hlen %d fix %d csum before fix %x\n", - le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (rodi >= 0) { + len = pci_vpd_info_field_size(&vpd_data[rodi]); - /* HW bug: fixup the CSUM */ - pbd->tcp_pseudo_csum = - bnx2x_csum_fix(skb_transport_header(skb), - SKB_CS(skb), fix); + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; - DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", - pbd->tcp_pseudo_csum); + if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { + memcpy(bp->fw_ver, &vpd_data[rodi], len); + bp->fw_ver[len] = ' '; + } } + return; } +out_not_found: + return; +} - mapping = dma_map_single(&bp->pdev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - - tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ - tx_start_bd->nbd = cpu_to_le16(nbd); - tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); - pkt_size = tx_start_bd->nbytes; - - DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" - " nbytes %d flags %x vlan %x\n", - tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, - le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), - tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); +static int __devinit bnx2x_init_bp(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + int timer_interval; + int rc; - if (xmit_type & XMIT_GSO) { + /* Disable interrupt handling until HW is initialized */ + atomic_set(&bp->intr_sem, 1); + smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ - DP(NETIF_MSG_TX_QUEUED, - "TSO packet len %d hlen %d total len %d tso size %d\n", - skb->len, hlen, skb_headlen(skb), - skb_shinfo(skb)->gso_size); + mutex_init(&bp->port.phy_mutex); + mutex_init(&bp->fw_mb_mutex); ++ spin_lock_init(&bp->stats_lock); +#ifdef BCM_CNIC + mutex_init(&bp->cnic_mutex); +#endif - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; + INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); + INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); - if (unlikely(skb_headlen(skb) > hlen)) - bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, - hlen, bd_prod, ++nbd); + rc = bnx2x_get_hwinfo(bp); - pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); - pbd->tcp_flags = pbd_tcp_flags(skb); + bnx2x_read_fwinfo(bp); + /* need to reset chip if undi was active */ + if (!BP_NOMCP(bp)) + bnx2x_undi_unload(bp); - if (xmit_type & XMIT_GSO_V4) { - pbd->ip_id = swab16(ip_hdr(skb)->id); - pbd->tcp_pseudo_csum = - swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); + if (CHIP_REV_IS_FPGA(bp)) + dev_err(&bp->pdev->dev, "FPGA detected\n"); - } else - pbd->tcp_pseudo_csum = - swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); + if (BP_NOMCP(bp) && (func == 0)) + dev_err(&bp->pdev->dev, "MCP disabled, " + "must load devices in order!\n"); - pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; + /* Set multi queue mode */ + if ((multi_mode != ETH_RSS_MODE_DISABLED) && + ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { + dev_err(&bp->pdev->dev, "Multi disabled since int_mode " + "requested is not MSI-X\n"); + multi_mode = ETH_RSS_MODE_DISABLED; } - tx_data_bd = (struct eth_tx_bd *)tx_start_bd; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; - if (total_pkt_bd == NULL) - total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; - - mapping = dma_map_page(&bp->pdev->dev, frag->page, - frag->page_offset, - frag->size, DMA_TO_DEVICE); + bp->multi_mode = multi_mode; + bp->int_mode = int_mode; - tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - tx_data_bd->nbytes = cpu_to_le16(frag->size); - le16_add_cpu(&pkt_size, frag->size); + bp->dev->features |= NETIF_F_GRO; - DP(NETIF_MSG_TX_QUEUED, - "frag %d bd @%p addr (%x:%x) nbytes %d\n", - i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, - le16_to_cpu(tx_data_bd->nbytes)); + /* Set TPA flags */ + if (disable_tpa) { + bp->flags &= ~TPA_ENABLE_FLAG; + bp->dev->features &= ~NETIF_F_LRO; + } else { + bp->flags |= TPA_ENABLE_FLAG; + bp->dev->features |= NETIF_F_LRO; } + bp->disable_tpa = disable_tpa; - DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); - - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - - /* now send a tx doorbell, counting the next BD - * if the packet contains or ends with it - */ - if (TX_BD_POFF(bd_prod) < nbd) - nbd++; - - if (total_pkt_bd != NULL) - total_pkt_bd->total_pkt_bytes = pkt_size; - - if (pbd) - DP(NETIF_MSG_TX_QUEUED, - "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" - " tcp_flags %x xsum %x seq %u hlen %u\n", - pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, - pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, - pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); + if (CHIP_IS_E1(bp)) + bp->dropless_fc = 0; + else + bp->dropless_fc = dropless_fc; - DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); + bp->mrrs = mrrs; - /* - * Make sure that the BD data is updated before updating the producer - * since FW might read the BD right after the producer is updated. - * This is only applicable for weak-ordered memory model archs such - * as IA-64. The following barrier is also mandatory since FW will - * assumes packets must have BDs. - */ - wmb(); + bp->tx_ring_size = MAX_TX_AVAIL; + bp->rx_ring_size = MAX_RX_AVAIL; - fp->tx_db.data.prod += nbd; - barrier(); - DOORBELL(bp, fp->index, fp->tx_db.raw); + bp->rx_csum = 1; - mmiowb(); + /* make sure that the numbers are in the right granularity */ + bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); + bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); - fp->tx_bd_prod += nbd; + timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); + bp->current_interval = (poll ? poll : timer_interval); - if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { - netif_tx_stop_queue(txq); + init_timer(&bp->timer); + bp->timer.expires = jiffies + bp->current_interval; + bp->timer.data = (unsigned long) bp; + bp->timer.function = bnx2x_timer; - /* paired memory barrier is in bnx2x_tx_int(), we have to keep - * ordering of set_bit() in netif_tx_stop_queue() and read of - * fp->bd_tx_cons */ - smp_mb(); + return rc; +} - fp->eth_q_stats.driver_xoff++; - if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) - netif_tx_wake_queue(txq); - } - fp->tx_pkt++; - return NETDEV_TX_OK; -} +/**************************************************************************** +* General service functions +****************************************************************************/ /* called with rtnl_lock */ static int bnx2x_open(struct net_device *dev) diff --cc drivers/net/bnx2x/bnx2x_stats.c index 3f5127720423,000000000000..c74724461020 mode 100644,000000..100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@@ -1,1400 -1,0 +1,1411 @@@ +/* bnx2x_stats.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + #include "bnx2x_cmn.h" + #include "bnx2x_stats.h" + +/* Statistics */ + +/**************************************************************************** +* Macros +****************************************************************************/ + +/* sum[hi:lo] += add[hi:lo] */ +#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ + do { \ + s_lo += a_lo; \ + s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ + } while (0) + +/* difference = minuend - subtrahend */ +#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ + do { \ + if (m_lo < s_lo) { \ + /* underflow */ \ + d_hi = m_hi - s_hi; \ + if (d_hi > 0) { \ + /* we can 'loan' 1 */ \ + d_hi--; \ + d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ + } else { \ + /* m_hi <= s_hi */ \ + d_hi = 0; \ + d_lo = 0; \ + } \ + } else { \ + /* m_lo >= s_lo */ \ + if (m_hi < s_hi) { \ + d_hi = 0; \ + d_lo = 0; \ + } else { \ + /* m_hi >= s_hi */ \ + d_hi = m_hi - s_hi; \ + d_lo = m_lo - s_lo; \ + } \ + } \ + } while (0) + +#define UPDATE_STAT64(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ + diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ + pstats->mac_stx[0].t##_hi = new->s##_hi; \ + pstats->mac_stx[0].t##_lo = new->s##_lo; \ + ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ + pstats->mac_stx[1].t##_lo, diff.lo); \ + } while (0) + +#define UPDATE_STAT64_NIG(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ + diff.lo, new->s##_lo, old->s##_lo); \ + ADD_64(estats->t##_hi, diff.hi, \ + estats->t##_lo, diff.lo); \ + } while (0) + +/* sum[hi:lo] += add */ +#define ADD_EXTEND_64(s_hi, s_lo, a) \ + do { \ + s_lo += a; \ + s_hi += (s_lo < a) ? 1 : 0; \ + } while (0) + +#define UPDATE_EXTEND_STAT(s) \ + do { \ + ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ + pstats->mac_stx[1].s##_lo, \ + new->s); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT(s, t) \ + do { \ + diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ + old_tclient->s = tclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + old_uclient->s = uclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_XSTAT(s, t) \ + do { \ + diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ + old_xclient->s = xclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +/* minuend -= subtrahend */ +#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ + do { \ + DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ + } while (0) + +/* minuend[hi:lo] -= subtrahend */ +#define SUB_EXTEND_64(m_hi, m_lo, s) \ + do { \ + SUB_64(m_hi, 0, m_lo, s); \ + } while (0) + +#define SUB_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +/* + * General service functions + */ + +static inline long bnx2x_hilo(u32 *hiref) +{ + u32 lo = *(hiref + 1); +#if (BITS_PER_LONG == 64) + u32 hi = *hiref; + + return HILO_U64(hi, lo); +#else + return lo; +#endif +} + +/* + * Init service functions + */ + + +static void bnx2x_storm_stats_post(struct bnx2x *bp) +{ + if (!bp->stats_pending) { + struct eth_query_ramrod_data ramrod_data = {0}; + int i, rc; + ++ spin_lock_bh(&bp->stats_lock); ++ + ramrod_data.drv_counter = bp->stats_counter++; + ramrod_data.collect_port = bp->port.pmf ? 1 : 0; + for_each_queue(bp, i) + ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); + + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, + ((u32 *)&ramrod_data)[1], + ((u32 *)&ramrod_data)[0], 0); + if (rc == 0) { + /* stats ramrod has it's own slot on the spq */ + bp->spq_left++; + bp->stats_pending = 1; + } ++ ++ spin_unlock_bh(&bp->stats_lock); + } +} + +static void bnx2x_hw_stats_post(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + *stats_comp = DMAE_COMP_VAL; + if (CHIP_REV_IS_SLOW(bp)) + return; + + /* loader */ + if (bp->executer_idx) { + int loader_idx = PMF_DMAE_C(bp); + + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | + DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : + DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); + dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + + sizeof(struct dmae_command) * + (loader_idx + 1)) >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct dmae_command) >> 2; + if (CHIP_IS_E1(bp)) + dmae->len--; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + *stats_comp = 0; + bnx2x_post_dmae(bp, dmae, loader_idx); + + } else if (bp->func_stx) { + *stats_comp = 0; + bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + } +} + +static int bnx2x_stats_comp(struct bnx2x *bp) +{ + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + int cnt = 10; + + might_sleep(); + while (*stats_comp != DMAE_COMP_VAL) { + if (!cnt) { + BNX2X_ERR("timeout waiting for stats finished\n"); + break; + } + cnt--; + msleep(1); + } + return 1; +} + +/* + * Statistics service functions + */ + +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); + dmae->src_addr_lo = bp->port.port_stx >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->len = DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); + dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +static void bnx2x_port_stats_init(struct bnx2x *bp) +{ + struct dmae_command *dmae; + int port = BP_PORT(bp); + int vn = BP_E1HVN(bp); + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 mac_addr; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->link_vars.link_up || !bp->port.pmf) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + /* MCP */ + opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (vn << DMAE_CMD_E1HVN_SHIFT)); + + if (bp->port.port_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + if (bp->func_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* MAC */ + opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (vn << DMAE_CMD_E1HVN_SHIFT)); + + if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { + + mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM); + + /* BIGMAC_REGISTER_TX_STAT_GTPKT .. + BIGMAC_REGISTER_TX_STAT_GTBYT */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* BIGMAC_REGISTER_RX_STAT_GR64 .. + BIGMAC_REGISTER_RX_STAT_GRIPJ */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct bmac_stats, rx_stat_gr64_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct bmac_stats, rx_stat_gr64_lo)); + dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { + + mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); + + /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_RX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_RX_STAT_AC_28 */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); + dmae->len = 1; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_TX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); + dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* NIG */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : + NIG_REG_STAT0_BRB_DISCARD) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); + dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : + NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (vn << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : + NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void bnx2x_func_stats_init(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (bp->port.pmf) + bnx2x_port_stats_init(bp); + + else if (bp->func_stx) + bnx2x_func_stats_init(bp); + + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); +} + +static void bnx2x_stats_pmf_start(struct bnx2x *bp) +{ + bnx2x_stats_comp(bp); + bnx2x_stats_pmf_update(bp); + bnx2x_stats_start(bp); +} + +static void bnx2x_stats_restart(struct bnx2x *bp) +{ + bnx2x_stats_comp(bp); + bnx2x_stats_start(bp); +} + +static void bnx2x_bmac_stats_update(struct bnx2x *bp) +{ + struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct { + u32 lo; + u32 hi; + } diff; + + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_bmac_xpf_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_bmac_xpf_lo; + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxoffsent_lo; +} + +static void bnx2x_emac_stats_update(struct bnx2x *bp) +{ + struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + + UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); + UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); + UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); + UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); + UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); + UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); + UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); + UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); + UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); + UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); + UPDATE_EXTEND_STAT(tx_stat_outxonsent); + UPDATE_EXTEND_STAT(tx_stat_outxoffsent); + UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); + UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); + UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); + UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; + ADD_64(estats->pause_frames_received_hi, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, + estats->pause_frames_received_lo, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxonsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxonsent_lo; + ADD_64(estats->pause_frames_sent_hi, + pstats->mac_stx[1].tx_stat_outxoffsent_hi, + estats->pause_frames_sent_lo, + pstats->mac_stx[1].tx_stat_outxoffsent_lo); +} + +static int bnx2x_hw_stats_update(struct bnx2x *bp) +{ + struct nig_stats *new = bnx2x_sp(bp, nig_stats); + struct nig_stats *old = &(bp->port.old_nig_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct { + u32 lo; + u32 hi; + } diff; + + if (bp->link_vars.mac_type == MAC_TYPE_BMAC) + bnx2x_bmac_stats_update(bp); + + else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) + bnx2x_emac_stats_update(bp); + + else { /* unreached */ + BNX2X_ERR("stats updated by DMAE but no MAC active\n"); + return -1; + } + + ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, + new->brb_discard - old->brb_discard); + ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, + new->brb_truncate - old->brb_truncate); + + UPDATE_STAT64_NIG(egress_mac_pkt0, + etherstatspkts1024octetsto1522octets); + UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); + + memcpy(old, new, sizeof(struct nig_stats)); + + memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), + sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + + pstats->host_port_stats_start = ++pstats->host_port_stats_end; + + if (!BP_NOMCP(bp)) { + u32 nig_timer_max = + SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); + if (nig_timer_max != estats->nig_timer_max) { + estats->nig_timer_max = nig_timer_max; + BNX2X_ERR("NIG timer max (%u)\n", + estats->nig_timer_max); + } + } + + return 0; +} + +static int bnx2x_storm_stats_update(struct bnx2x *bp) +{ + struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); + struct tstorm_per_port_stats *tport = + &stats->tstorm_common.port_statistics; + struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i; ++ u16 cur_stats_counter; ++ ++ /* Make sure we use the value of the counter ++ * used for sending the last stats ramrod. ++ */ ++ spin_lock_bh(&bp->stats_lock); ++ cur_stats_counter = bp->stats_counter - 1; ++ spin_unlock_bh(&bp->stats_lock); + + memcpy(&(fstats->total_bytes_received_hi), + &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), + sizeof(struct host_func_stats) - 2*sizeof(u32)); + estats->error_bytes_received_hi = 0; + estats->error_bytes_received_lo = 0; + estats->etherstatsoverrsizepkts_hi = 0; + estats->etherstatsoverrsizepkts_lo = 0; + estats->no_buff_discard_hi = 0; + estats->no_buff_discard_lo = 0; + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + int cl_id = fp->cl_id; + struct tstorm_per_client_stats *tclient = + &stats->tstorm_common.client_statistics[cl_id]; + struct tstorm_per_client_stats *old_tclient = &fp->old_tclient; + struct ustorm_per_client_stats *uclient = + &stats->ustorm_common.client_statistics[cl_id]; + struct ustorm_per_client_stats *old_uclient = &fp->old_uclient; + struct xstorm_per_client_stats *xclient = + &stats->xstorm_common.client_statistics[cl_id]; + struct xstorm_per_client_stats *old_xclient = &fp->old_xclient; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + u32 diff; + + /* are storm stats valid? */ - if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != - bp->stats_counter) { ++ if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" + " xstorm counter (0x%x) != stats_counter (0x%x)\n", - i, xclient->stats_counter, bp->stats_counter); ++ i, xclient->stats_counter, cur_stats_counter + 1); + return -1; + } - if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != - bp->stats_counter) { ++ if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" + " tstorm counter (0x%x) != stats_counter (0x%x)\n", - i, tclient->stats_counter, bp->stats_counter); ++ i, tclient->stats_counter, cur_stats_counter + 1); + return -2; + } - if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) != - bp->stats_counter) { ++ if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" + " ustorm counter (0x%x) != stats_counter (0x%x)\n", - i, uclient->stats_counter, bp->stats_counter); ++ i, uclient->stats_counter, cur_stats_counter + 1); + return -4; + } + + qstats->total_bytes_received_hi = + le32_to_cpu(tclient->rcv_broadcast_bytes.hi); + qstats->total_bytes_received_lo = + le32_to_cpu(tclient->rcv_broadcast_bytes.lo); + + ADD_64(qstats->total_bytes_received_hi, + le32_to_cpu(tclient->rcv_multicast_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(tclient->rcv_multicast_bytes.lo)); + + ADD_64(qstats->total_bytes_received_hi, + le32_to_cpu(tclient->rcv_unicast_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(tclient->rcv_unicast_bytes.lo)); + + SUB_64(qstats->total_bytes_received_hi, + le32_to_cpu(uclient->bcast_no_buff_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(uclient->bcast_no_buff_bytes.lo)); + + SUB_64(qstats->total_bytes_received_hi, + le32_to_cpu(uclient->mcast_no_buff_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(uclient->mcast_no_buff_bytes.lo)); + + SUB_64(qstats->total_bytes_received_hi, + le32_to_cpu(uclient->ucast_no_buff_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(uclient->ucast_no_buff_bytes.lo)); + + qstats->valid_bytes_received_hi = + qstats->total_bytes_received_hi; + qstats->valid_bytes_received_lo = + qstats->total_bytes_received_lo; + + qstats->error_bytes_received_hi = + le32_to_cpu(tclient->rcv_error_bytes.hi); + qstats->error_bytes_received_lo = + le32_to_cpu(tclient->rcv_error_bytes.lo); + + ADD_64(qstats->total_bytes_received_hi, + qstats->error_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->error_bytes_received_lo); + + UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, + total_unicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, + total_multicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_TSTAT(packets_too_big_discard, + etherstatsoverrsizepkts); + UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); + + SUB_EXTEND_USTAT(ucast_no_buff_pkts, + total_unicast_packets_received); + SUB_EXTEND_USTAT(mcast_no_buff_pkts, + total_multicast_packets_received); + SUB_EXTEND_USTAT(bcast_no_buff_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); + + qstats->total_bytes_transmitted_hi = + le32_to_cpu(xclient->unicast_bytes_sent.hi); + qstats->total_bytes_transmitted_lo = + le32_to_cpu(xclient->unicast_bytes_sent.lo); + + ADD_64(qstats->total_bytes_transmitted_hi, + le32_to_cpu(xclient->multicast_bytes_sent.hi), + qstats->total_bytes_transmitted_lo, + le32_to_cpu(xclient->multicast_bytes_sent.lo)); + + ADD_64(qstats->total_bytes_transmitted_hi, + le32_to_cpu(xclient->broadcast_bytes_sent.hi), + qstats->total_bytes_transmitted_lo, + le32_to_cpu(xclient->broadcast_bytes_sent.lo)); + + UPDATE_EXTEND_XSTAT(unicast_pkts_sent, + total_unicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(multicast_pkts_sent, + total_multicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, + total_broadcast_packets_transmitted); + + old_tclient->checksum_discard = tclient->checksum_discard; + old_tclient->ttl0_discard = tclient->ttl0_discard; + + ADD_64(fstats->total_bytes_received_hi, + qstats->total_bytes_received_hi, + fstats->total_bytes_received_lo, + qstats->total_bytes_received_lo); + ADD_64(fstats->total_bytes_transmitted_hi, + qstats->total_bytes_transmitted_hi, + fstats->total_bytes_transmitted_lo, + qstats->total_bytes_transmitted_lo); + ADD_64(fstats->total_unicast_packets_received_hi, + qstats->total_unicast_packets_received_hi, + fstats->total_unicast_packets_received_lo, + qstats->total_unicast_packets_received_lo); + ADD_64(fstats->total_multicast_packets_received_hi, + qstats->total_multicast_packets_received_hi, + fstats->total_multicast_packets_received_lo, + qstats->total_multicast_packets_received_lo); + ADD_64(fstats->total_broadcast_packets_received_hi, + qstats->total_broadcast_packets_received_hi, + fstats->total_broadcast_packets_received_lo, + qstats->total_broadcast_packets_received_lo); + ADD_64(fstats->total_unicast_packets_transmitted_hi, + qstats->total_unicast_packets_transmitted_hi, + fstats->total_unicast_packets_transmitted_lo, + qstats->total_unicast_packets_transmitted_lo); + ADD_64(fstats->total_multicast_packets_transmitted_hi, + qstats->total_multicast_packets_transmitted_hi, + fstats->total_multicast_packets_transmitted_lo, + qstats->total_multicast_packets_transmitted_lo); + ADD_64(fstats->total_broadcast_packets_transmitted_hi, + qstats->total_broadcast_packets_transmitted_hi, + fstats->total_broadcast_packets_transmitted_lo, + qstats->total_broadcast_packets_transmitted_lo); + ADD_64(fstats->valid_bytes_received_hi, + qstats->valid_bytes_received_hi, + fstats->valid_bytes_received_lo, + qstats->valid_bytes_received_lo); + + ADD_64(estats->error_bytes_received_hi, + qstats->error_bytes_received_hi, + estats->error_bytes_received_lo, + qstats->error_bytes_received_lo); + ADD_64(estats->etherstatsoverrsizepkts_hi, + qstats->etherstatsoverrsizepkts_hi, + estats->etherstatsoverrsizepkts_lo, + qstats->etherstatsoverrsizepkts_lo); + ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, + estats->no_buff_discard_lo, qstats->no_buff_discard_lo); + } + + ADD_64(fstats->total_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + fstats->total_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + memcpy(estats, &(fstats->total_bytes_received_hi), + sizeof(struct host_func_stats) - 2*sizeof(u32)); + + ADD_64(estats->etherstatsoverrsizepkts_hi, + estats->rx_stat_dot3statsframestoolong_hi, + estats->etherstatsoverrsizepkts_lo, + estats->rx_stat_dot3statsframestoolong_lo); + ADD_64(estats->error_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->error_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + if (bp->port.pmf) { + estats->mac_filter_discard = + le32_to_cpu(tport->mac_filter_discard); + estats->xxoverflow_discard = + le32_to_cpu(tport->xxoverflow_discard); + estats->brb_truncate_discard = + le32_to_cpu(tport->brb_truncate_discard); + estats->mac_discard = le32_to_cpu(tport->mac_discard); + } + + fstats->host_func_stats_start = ++fstats->host_func_stats_end; + + bp->stats_pending = 0; + + return 0; +} + +static void bnx2x_net_stats_update(struct bnx2x *bp) +{ + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct net_device_stats *nstats = &bp->dev->stats; + int i; + + nstats->rx_packets = + bnx2x_hilo(&estats->total_unicast_packets_received_hi) + + bnx2x_hilo(&estats->total_multicast_packets_received_hi) + + bnx2x_hilo(&estats->total_broadcast_packets_received_hi); + + nstats->tx_packets = + bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + + bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + + bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); + + nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); + + nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); + + nstats->rx_dropped = estats->mac_discard; + for_each_queue(bp, i) + nstats->rx_dropped += + le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); + + nstats->tx_dropped = 0; + + nstats->multicast = + bnx2x_hilo(&estats->total_multicast_packets_received_hi); + + nstats->collisions = + bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); + + nstats->rx_length_errors = + bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + + bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); + nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + + bnx2x_hilo(&estats->brb_truncate_hi); + nstats->rx_crc_errors = + bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); + nstats->rx_frame_errors = + bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); + nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); + nstats->rx_missed_errors = estats->xxoverflow_discard; + + nstats->rx_errors = nstats->rx_length_errors + + nstats->rx_over_errors + + nstats->rx_crc_errors + + nstats->rx_frame_errors + + nstats->rx_fifo_errors + + nstats->rx_missed_errors; + + nstats->tx_aborted_errors = + bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + + bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); + nstats->tx_carrier_errors = + bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); + nstats->tx_fifo_errors = 0; + nstats->tx_heartbeat_errors = 0; + nstats->tx_window_errors = 0; + + nstats->tx_errors = nstats->tx_aborted_errors + + nstats->tx_carrier_errors + + bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); +} + +static void bnx2x_drv_stats_update(struct bnx2x *bp) +{ + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i; + + estats->driver_xoff = 0; + estats->rx_err_discard_pkt = 0; + estats->rx_skb_alloc_failed = 0; + estats->hw_csum_err = 0; + for_each_queue(bp, i) { + struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; + + estats->driver_xoff += qstats->driver_xoff; + estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; + estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; + estats->hw_csum_err += qstats->hw_csum_err; + } +} + +static void bnx2x_stats_update(struct bnx2x *bp) +{ + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + if (*stats_comp != DMAE_COMP_VAL) + return; + + if (bp->port.pmf) + bnx2x_hw_stats_update(bp); + + if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + return; + } + + bnx2x_net_stats_update(bp); + bnx2x_drv_stats_update(bp); + + if (netif_msg_timer(bp)) { + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i; + + printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n", + bp->dev->name, + estats->brb_drop_lo, estats->brb_truncate_lo); + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + + printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)" + " rx pkt(%lu) rx calls(%lu %lu)\n", + fp->name, (le16_to_cpu(*fp->rx_cons_sb) - + fp->rx_comp_cons), + le16_to_cpu(*fp->rx_cons_sb), + bnx2x_hilo(&qstats-> + total_unicast_packets_received_hi), + fp->rx_calls, fp->rx_pkt); + } + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct netdev_queue *txq = + netdev_get_tx_queue(bp->dev, i); + + printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)" + " tx pkt(%lu) tx calls (%lu)" + " %s (Xoff events %u)\n", + fp->name, bnx2x_tx_avail(fp), + le16_to_cpu(*fp->tx_cons_sb), + bnx2x_hilo(&qstats-> + total_unicast_packets_transmitted_hi), + fp->tx_pkt, + (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"), + qstats->driver_xoff); + } + } + + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); +} + +static void bnx2x_port_stats_stop(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + bp->executer_idx = 0; + + opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + + if (bp->port.port_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + if (bp->func_stx) + dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); + else + dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + if (bp->func_stx) { + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + dmae->comp_addr_lo = + U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = + U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } + } + + if (bp->func_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } +} + +static void bnx2x_stats_stop(struct bnx2x *bp) +{ + int update = 0; + + bnx2x_stats_comp(bp); + + if (bp->port.pmf) + update = (bnx2x_hw_stats_update(bp) == 0); + + update |= (bnx2x_storm_stats_update(bp) == 0); + + if (update) { + bnx2x_net_stats_update(bp); + + if (bp->port.pmf) + bnx2x_port_stats_stop(bp); + + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } +} + +static void bnx2x_stats_do_nothing(struct bnx2x *bp) +{ +} + +static const struct { + void (*action)(struct bnx2x *bp); + enum bnx2x_stats_state next_state; +} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { +/* state event */ +{ +/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, +/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, +/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, +/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} +}, +{ +/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, +/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, +/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, +/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} +} +}; + +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) +{ - enum bnx2x_stats_state state = bp->stats_state; ++ enum bnx2x_stats_state state; + + if (unlikely(bp->panic)) + return; + - bnx2x_stats_stm[state][event].action(bp); ++ /* Protect a state change flow */ ++ spin_lock_bh(&bp->stats_lock); ++ state = bp->stats_state; + bp->stats_state = bnx2x_stats_stm[state][event].next_state; ++ spin_unlock_bh(&bp->stats_lock); + - /* Make sure the state has been "changed" */ - smp_wmb(); ++ bnx2x_stats_stm[state][event].action(bp); + + if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) + DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", + state, event, bp->stats_state); +} + +static void bnx2x_port_stats_base_init(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->port.pmf || !bp->port.port_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +static void bnx2x_func_stats_base_init(struct bnx2x *bp) +{ + int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX; + int port = BP_PORT(bp); + int func; + u32 func_stx; + + /* sanity */ + if (!bp->port.pmf || !bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + /* save our func_stx */ + func_stx = bp->func_stx; + + for (vn = VN_0; vn < vn_max; vn++) { + func = 2*vn + port; + + bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); + bnx2x_func_stats_init(bp); + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } + + /* restore our func_stx */ + bp->func_stx = func_stx; +} + +static void bnx2x_func_stats_base_update(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = bp->func_stx >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +void bnx2x_stats_init(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int i; + + bp->stats_pending = 0; + bp->executer_idx = 0; + bp->stats_counter = 0; + + /* port and func stats for management */ + if (!BP_NOMCP(bp)) { + bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); + bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); + + } else { + bp->port.port_stx = 0; + bp->func_stx = 0; + } + DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", + bp->port.port_stx, bp->func_stx); + + /* port stats */ + memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); + bp->port.old_nig_stats.brb_discard = + REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); + bp->port.old_nig_stats.brb_truncate = + REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); + + /* function stats */ + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + memset(&fp->old_tclient, 0, + sizeof(struct tstorm_per_client_stats)); + memset(&fp->old_uclient, 0, + sizeof(struct ustorm_per_client_stats)); + memset(&fp->old_xclient, 0, + sizeof(struct xstorm_per_client_stats)); + memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); + } + + memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); + memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); + + bp->stats_state = STATS_STATE_DISABLED; + + if (bp->port.pmf) { + if (bp->port.port_stx) + bnx2x_port_stats_base_init(bp); + + if (bp->func_stx) + bnx2x_func_stats_base_init(bp); + + } else if (bp->func_stx) + bnx2x_func_stats_base_update(bp); +} diff --cc net/sched/act_mirred.c index a16b0175f890,1980b71c283f..11f195af2da0 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@@ -160,13 -165,16 +165,18 @@@ static int tcf_mirred(struct sk_buff *s spin_lock(&m->tcf_lock); m->tcf_tm.lastuse = jiffies; + m->tcf_bstats.bytes += qdisc_pkt_len(skb); + m->tcf_bstats.packets++; dev = m->tcfm_dev; + if (!dev) { + printk_once(KERN_NOTICE "tc mirred: target device is gone\n"); + goto out; + } + if (!(dev->flags & IFF_UP)) { if (net_ratelimit()) - pr_notice("tc mirred to Houston: device %s is gone!\n", + pr_notice("tc mirred to Houston: device %s is down\n", dev->name); goto out; }