]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/net/bnx2x/bnx2x_cmn.c
Merge tag 'v2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mv-sheeva.git] / drivers / net / bnx2x / bnx2x_cmn.c
index 0af361e4e3d16ab83d8c3067396f63c5d46d1cb1..a71b329405335b4a93c0b55e9fbb45808b7d39e9 100644 (file)
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 #endif
 }
 
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ *             nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN     12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ *                  aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+                                   u16 len_on_bd)
+{
+       /* TPA arrgregation won't have an IP options and TCP options
+        * other than timestamp.
+        */
+       u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+       /* Check if there was a TCP timestamp, if there is it's will
+        * always be 12 bytes length: nop nop kind length echo val.
+        *
+        * Otherwise FW would close the aggregation.
+        */
+       if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+               hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+       return len_on_bd - hdrs_len;
+}
+
 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                               struct sk_buff *skb,
                               struct eth_fast_path_rx_cqe *fp_cqe,
-                              u16 cqe_idx)
+                              u16 cqe_idx, u16 parsing_flags)
 {
        struct sw_rx_page *rx_pg, old_rx_pg;
        u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
        /* This is needed in order to enable forwarding support */
        if (frag_size)
-               skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
-                                              max(frag_size, (u32)len_on_bd));
+               skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+                                                             len_on_bd);
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        if (likely(new_skb)) {
                /* fix ip xsum and give it to the stack */
                /* (no need to map the new skb) */
+               u16 parsing_flags =
+                       le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
 
                prefetch(skb);
                prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
 
                if (!bnx2x_fill_frag_skb(bp, fp, skb,
-                                        &cqe->fast_path_cqe, cqe_idx)) {
-                       if ((le16_to_cpu(cqe->fast_path_cqe.
-                           pars_flags.flags) & PARSING_FLAGS_VLAN))
+                                        &cqe->fast_path_cqe, cqe_idx,
+                                        parsing_flags)) {
+                       if (parsing_flags & PARSING_FLAGS_VLAN)
                                __vlan_hwaccel_put_tag(skb,
                                                 le16_to_cpu(cqe->fast_path_cqe.
                                                             vlan_tag));
@@ -698,6 +734,30 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
        mutex_unlock(&bp->port.phy_mutex);
 }
 
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+       u16 line_speed = bp->link_vars.line_speed;
+       if (IS_MF(bp)) {
+               u16 maxCfg = bnx2x_extract_max_cfg(bp,
+                                                  bp->mf_config[BP_VN(bp)]);
+
+               /* Calculate the current MAX line speed limit for the MF
+                * devices
+                */
+               if (IS_MF_SI(bp))
+                       line_speed = (line_speed * maxCfg) / 100;
+               else { /* SD mode */
+                       u16 vn_max_rate = maxCfg * 100;
+
+                       if (vn_max_rate < line_speed)
+                               line_speed = vn_max_rate;
+               }
+       }
+
+       return line_speed;
+}
+
 void bnx2x_link_report(struct bnx2x *bp)
 {
        if (bp->flags & MF_FUNC_DIS) {
@@ -713,17 +773,8 @@ void bnx2x_link_report(struct bnx2x *bp)
                        netif_carrier_on(bp->dev);
                netdev_info(bp->dev, "NIC Link is Up, ");
 
-               line_speed = bp->link_vars.line_speed;
-               if (IS_MF(bp)) {
-                       u16 vn_max_rate;
+               line_speed = bnx2x_get_mf_speed(bp);
 
-                       vn_max_rate =
-                               ((bp->mf_config[BP_VN(bp)] &
-                                 FUNC_MF_CFG_MAX_BW_MASK) >>
-                                               FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
-                       if (vn_max_rate < line_speed)
-                               line_speed = vn_max_rate;
-               }
                pr_cont("%d Mbps ", line_speed);
 
                if (bp->link_vars.duplex == DUPLEX_FULL)
@@ -813,7 +864,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
        DP(NETIF_MSG_IFUP,
           "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
 
-       for_each_queue(bp, j) {
+       for_each_rx_queue(bp, j) {
                struct bnx2x_fastpath *fp = &bp->fp[j];
 
                if (!fp->disable_tpa) {
@@ -866,7 +917,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                }
        }
 
-       for_each_queue(bp, j) {
+       for_each_rx_queue(bp, j) {
                struct bnx2x_fastpath *fp = &bp->fp[j];
 
                fp->rx_bd_cons = 0;
@@ -897,7 +948,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
 {
        int i;
 
-       for_each_queue(bp, i) {
+       for_each_tx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
 
                u16 bd_cons = fp->tx_bd_cons;
@@ -915,7 +966,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 {
        int i, j;
 
-       for_each_queue(bp, j) {
+       for_each_rx_queue(bp, j) {
                struct bnx2x_fastpath *fp = &bp->fp[j];
 
                for (i = 0; i < NUM_RX_BD; i++) {
@@ -945,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
        bnx2x_free_rx_skbs(bp);
 }
 
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+       /* load old values */
+       u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+       if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+               /* leave all but MAX value */
+               mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+               /* set new MAX value */
+               mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+                               & FUNC_MF_CFG_MAX_BW_MASK;
+
+               bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+       }
+}
+
 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
 {
        int i, offset = 1;
@@ -956,7 +1024,7 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
 #ifdef BCM_CNIC
        offset++;
 #endif
-       for_each_queue(bp, i) {
+       for_each_eth_queue(bp, i) {
                DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
                   "state %x\n", i, bp->msix_table[i + offset].vector,
                   bnx2x_fp(bp, i, state));
@@ -990,14 +1058,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
           bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
        msix_vec++;
 #endif
-       for_each_queue(bp, i) {
+       for_each_eth_queue(bp, i) {
                bp->msix_table[msix_vec].entry = msix_vec;
                DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
                   "(fastpath #%u)\n", msix_vec, msix_vec, i);
                msix_vec++;
        }
 
-       req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
+       req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
 
        rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
 
@@ -1053,7 +1121,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
 #ifdef BCM_CNIC
        offset++;
 #endif
-       for_each_queue(bp, i) {
+       for_each_eth_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
                         bp->dev->name, i);
@@ -1070,7 +1138,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
                fp->state = BNX2X_FP_STATE_IRQ;
        }
 
-       i = BNX2X_NUM_QUEUES(bp);
+       i = BNX2X_NUM_ETH_QUEUES(bp);
        offset = 1 + CNIC_CONTEXT_USE;
        netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
               " ... fp[%d] %d\n",
@@ -1117,7 +1185,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
 {
        int i;
 
-       for_each_queue(bp, i)
+       for_each_napi_queue(bp, i)
                napi_enable(&bnx2x_fp(bp, i, napi));
 }
 
@@ -1125,7 +1193,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
 {
        int i;
 
-       for_each_queue(bp, i)
+       for_each_napi_queue(bp, i)
                napi_disable(&bnx2x_fp(bp, i, napi));
 }
 
@@ -1153,6 +1221,35 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
        netif_tx_disable(bp->dev);
 }
 
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef BCM_CNIC
+       struct bnx2x *bp = netdev_priv(dev);
+       if (NO_FCOE(bp))
+               return skb_tx_hash(dev, skb);
+       else {
+               struct ethhdr *hdr = (struct ethhdr *)skb->data;
+               u16 ether_type = ntohs(hdr->h_proto);
+
+               /* Skip VLAN tag if present */
+               if (ether_type == ETH_P_8021Q) {
+                       struct vlan_ethhdr *vhdr =
+                               (struct vlan_ethhdr *)skb->data;
+
+                       ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+               }
+
+               /* If ethertype is FCoE or FIP - use FCoE ring */
+               if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+                       return bnx2x_fcoe(bp, index);
+       }
+#endif
+       /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
+        */
+       return __skb_tx_hash(dev, skb,
+                       dev->real_num_tx_queues - FCOE_CONTEXT_USE);
+}
+
 void bnx2x_set_num_queues(struct bnx2x *bp)
 {
        switch (bp->multi_mode) {
@@ -1167,8 +1264,23 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
                bp->num_queues = 1;
                break;
        }
+
+       /* Add special queues */
+       bp->num_queues += NONE_ETH_CONTEXT_USE;
 }
 
+#ifdef BCM_CNIC
+static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
+{
+       if (!NO_FCOE(bp)) {
+               if (!IS_MF_SD(bp))
+                       bnx2x_set_fip_eth_mac_addr(bp, 1);
+               bnx2x_set_all_enode_macs(bp, 1);
+               bp->flags |= FCOE_MACS_SET;
+       }
+}
+#endif
+
 static void bnx2x_release_firmware(struct bnx2x *bp)
 {
        kfree(bp->init_ops_offsets);
@@ -1177,6 +1289,20 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
        release_firmware(bp->firmware);
 }
 
+static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
+{
+       int rc, num = bp->num_queues;
+
+#ifdef BCM_CNIC
+       if (NO_FCOE(bp))
+               num -= FCOE_CONTEXT_USE;
+
+#endif
+       netif_set_real_num_tx_queues(bp->dev, num);
+       rc = netif_set_real_num_rx_queues(bp->dev, num);
+       return rc;
+}
+
 /* must be called with rtnl_lock */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 {
@@ -1203,10 +1329,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        if (bnx2x_alloc_mem(bp))
                return -ENOMEM;
 
-       netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
-       rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
+       rc = bnx2x_set_real_num_queues(bp);
        if (rc) {
-               BNX2X_ERR("Unable to update real_num_rx_queues\n");
+               BNX2X_ERR("Unable to set real_num_queues\n");
                goto load_error0;
        }
 
@@ -1214,6 +1339,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                bnx2x_fp(bp, i, disable_tpa) =
                                        ((bp->flags & TPA_ENABLE_FLAG) == 0);
 
+#ifdef BCM_CNIC
+       /* We don't want TPA on FCoE L2 ring */
+       bnx2x_fcoe(bp, disable_tpa) = 1;
+#endif
        bnx2x_napi_enable(bp);
 
        /* Send LOAD_REQUEST command to MCP
@@ -1296,6 +1425,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                }
        }
 
+       bnx2x_dcbx_init(bp);
+
        bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
 
        rc = bnx2x_func_start(bp);
@@ -1344,8 +1475,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /* Now when Clients are configured we are ready to work */
        bp->state = BNX2X_STATE_OPEN;
 
+#ifdef BCM_CNIC
+       bnx2x_set_fcoe_eth_macs(bp);
+#endif
+
        bnx2x_set_eth_mac(bp, 1);
 
+       if (bp->pending_max) {
+               bnx2x_update_max_mf_config(bp, bp->pending_max);
+               bp->pending_max = 0;
+       }
+
        if (bp->port.pmf)
                bnx2x_initial_phy_init(bp, load_mode);
 
@@ -1402,7 +1542,7 @@ load_error3:
 
        /* Free SKBs, SGEs, TPA pool and driver internals */
        bnx2x_free_skbs(bp);
-       for_each_queue(bp, i)
+       for_each_rx_queue(bp, i)
                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 
        /* Release IRQs */
@@ -1473,7 +1613,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
        /* Free SKBs, SGEs, TPA pool and driver internals */
        bnx2x_free_skbs(bp);
-       for_each_queue(bp, i)
+       for_each_rx_queue(bp, i)
                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 
        bnx2x_free_mem(bp);
@@ -1577,6 +1717,17 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
 
                /* Fall out from the NAPI loop if needed */
                if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+#ifdef BCM_CNIC
+                       /* No need to update SB for FCoE L2 ring as long as
+                        * it's connected to the default SB and the SB
+                        * has been updated when NAPI was scheduled.
+                        */
+                       if (IS_FCOE_FP(fp)) {
+                               napi_complete(napi);
+                               break;
+                       }
+#endif
+
                        bnx2x_update_fpsb_idx(fp);
                        /* bnx2x_has_rx_work() reads the status block,
                         * thus we need to ensure that status block indices
@@ -1692,11 +1843,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
                }
        }
 
-       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-               rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
-
-       else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+       if (skb_is_gso_v6(skb))
+               rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+       else if (skb_is_gso(skb))
+               rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
 
        return rc;
 }
@@ -2242,7 +2392,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
        bp->fp = fp;
 
        /* msix table */
-       tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
+       tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
                                  GFP_KERNEL);
        if (!tbl)
                goto alloc_err;