fp->last_max_sge = idx;
}
-static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
- struct eth_fast_path_rx_cqe *fp_cqe)
+static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+ u16 sge_len,
+ struct eth_end_agg_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
- u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
- le16_to_cpu(fp_cqe->len_on_bd)) >>
- SGE_PAGE_SHIFT;
u16 last_max, last_elem, first_elem;
u16 delta = 0;
u16 i;
/* First mark all used pages */
for (i = 0; i < sge_len; i++)
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
- RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
+ RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
- sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+ sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
/* Here we assume that the last SGE index is the biggest */
prefetch((void *)(fp->sge_mask));
bnx2x_update_last_max_sge(fp,
- le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+ le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
last_max = RX_SGE(fp->last_max_sge);
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ if (fp->mode == TPA_MODE_GRO) {
+ u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
+ tpa_info->full_page =
+ SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+ tpa_info->gro_size = gro_size;
+ }
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
}
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, struct sk_buff *skb,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct sk_buff *skb,
struct eth_end_agg_rx_cqe *cqe,
u16 cqe_idx)
{
struct sw_rx_page *rx_pg, old_rx_pg;
- u32 i, frag_len, frag_size, pages;
- int err;
- int j;
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ u32 i, frag_len, frag_size;
+ int err, j, frag_id = 0;
u16 len_on_bd = tpa_info->len_on_bd;
+ u16 full_page = 0, gro_size = 0;
frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
- pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+ if (fp->mode == TPA_MODE_GRO) {
+ gro_size = tpa_info->gro_size;
+ full_page = tpa_info->full_page;
+ }
/* This is needed in order to enable forwarding support */
- if (frag_size)
+ if (frag_size) {
skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
tpa_info->parsing_flags, len_on_bd);
+ /* set for GRO */
+ if (fp->mode == TPA_MODE_GRO)
+ skb_shinfo(skb)->gso_type =
+ (GET_FLAG(tpa_info->parsing_flags,
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+ PRS_FLAG_OVERETH_IPV6) ?
+ SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
+ }
+
+
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
/* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */
- frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+ if (fp->mode == TPA_MODE_GRO)
+ frag_len = min_t(u32, frag_size, (u32)full_page);
+ else /* LRO */
+ frag_len = min_t(u32, frag_size,
+ (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
+
rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-
/* Add one frag and update the appropriate fields in the skb */
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ if (fp->mode == TPA_MODE_LRO)
+ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ else { /* GRO */
+ int rem;
+ int offset = 0;
+ for (rem = frag_len; rem > 0; rem -= gro_size) {
+ int len = rem > gro_size ? gro_size : rem;
+ skb_fill_page_desc(skb, frag_id++,
+ old_rx_pg.page, offset, len);
+ if (offset)
+ get_page(old_rx_pg.page);
+ offset += len;
+ }
+ }
skb->data_len += frag_len;
skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
return 0;
}
-static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, struct eth_end_agg_rx_cqe *cqe,
- u16 cqe_idx)
+static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
{
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
- u32 pad = tpa_info->placement_offset;
+ u8 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
struct sk_buff *skb = NULL;
- u8 *data = rx_buf->data;
- /* alloc new skb */
- u8 *new_data;
+ u8 *new_data, *data = rx_buf->data;
u8 old_tpa_state = tpa_info->tpa_state;
tpa_info->tpa_state = BNX2X_TPA_STOP;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
+ skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
napi_gro_receive(&fp->napi, skb);
struct eth_fast_path_rx_cqe *cqe_fp;
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
- u16 len, pad;
+ u16 len, pad, queue;
u8 *data;
#ifdef BNX2X_STOP_ON_ERROR
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
cqe_fp_flags, cqe_fp->status_flags,
le32_to_cpu(cqe_fp->rss_hash_result),
- le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
+ le16_to_cpu(cqe_fp->vlan_tag),
+ le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
/* is this a slowpath msg? */
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
}
+
rx_buf = &fp->rx_buf_ring[bd_cons];
data = rx_buf->data;
if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ struct bnx2x_agg_info *tpa_info;
+ u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
/* sanity check */
if (fp->disable_tpa &&
bnx2x_tpa_start(fp, queue,
bd_cons, bd_prod,
cqe_fp);
+
goto next_rx;
- } else {
- u16 queue =
- cqe->end_agg_cqe.queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
- bnx2x_tpa_stop(bp, fp, queue,
- &cqe->end_agg_cqe,
- comp_ring_cons);
+ }
+ queue = cqe->end_agg_cqe.queue_index;
+ tpa_info = &fp->tpa_info[queue];
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
+ tpa_info->len_on_bd;
+
+ if (fp->mode == TPA_MODE_GRO)
+ pages = (frag_size + tpa_info->full_page - 1) /
+ tpa_info->full_page;
+ else
+ pages = SGE_PAGE_ALIGN(frag_size) >>
+ SGE_PAGE_SHIFT;
+
+ bnx2x_tpa_stop(bp, fp, tpa_info, pages,
+ &cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
+ if (bp->panic)
+ return 0;
#endif
- bnx2x_update_sge_prod(fp, cqe_fp);
- goto next_cqe;
- }
+ bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
+ goto next_cqe;
}
/* non TPA */
- len = le16_to_cpu(cqe_fp->pkt_len);
+ len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
pad = cqe_fp->placement_offset;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
}
netdev_features_t bnx2x_fix_features(struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
/* TPA requires Rx CSUM offloading */
- if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
+ if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
features &= ~NETIF_F_LRO;
+ features &= ~NETIF_F_GRO;
+ }
return features;
}
else
flags &= ~TPA_ENABLE_FLAG;
+ if (features & NETIF_F_GRO)
+ flags |= GRO_ENABLE_FLAG;
+ else
+ flags &= ~GRO_ENABLE_FLAG;
+
if (features & NETIF_F_LOOPBACK) {
if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
bp->link_params.loopback_mode = LOOPBACK_BMAC;
};
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
/****************************************************************************
* Shared HW configuration *
#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
- /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
- #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000
- #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22
- #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000
- #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000
-
/* Determine the Serdes electrical interface */
#define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
#define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
#define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
#define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
- #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200
- #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9
- #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000
- #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200
-
#define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
#define PORT_FEATURE_EN_SIZE_SHIFT 24
#define PORT_FEATURE_WOL_ENABLED 0x01000000
#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
-/* LED Blink rate that will achieve ~15.9Hz */
-#define LED_BLINK_RATE_VAL 480
-
/****************************************************************************
* Driver <-> FW Mailbox *
****************************************************************************/
#define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
#define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
- u32 reserved[3];
+ u32 reserved[1];
};
struct mf_cfg {
struct shared_mf_cfg shared_mf_config; /* 0x4 */
- struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */
+ /* 0x8*2*2=0x20 */
+ struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX];
/* for all chips, there are 8 mf functions */
struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
/*
#define DRV_INFO_CONTROL_VER_SHIFT 0
#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
+ u32 ibft_host_addr; /* initialized by option ROM */
};
struct iscsi_stats_info iscsi_stat;
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 0
-#define BCM_5710_FW_REVISION_VERSION 29
+#define BCM_5710_FW_MINOR_VERSION 2
+#define BCM_5710_FW_REVISION_VERSION 16
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2)
-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2
+#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
+#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
u8 vmqueue_mode_en_flg;
u8 extra_data_over_sgl_en_flg;
u8 cache_line_alignment_log_size;
u8 outer_vlan_removal_enable_flg;
u8 status_block_id;
u8 rx_sb_index_number;
- u8 reserved0;
+ u8 dont_verify_rings_pause_thr_flg;
u8 max_tpa_queues;
u8 silent_vlan_removal_flg;
__le16 max_bytes_on_bd;
u8 placement_offset;
__le32 rss_hash_result;
__le16 vlan_tag;
- __le16 pkt_len;
+ __le16 pkt_len_or_gro_seg_len;
__le16 len_on_bd;
struct parsing_flags pars_flags;
union eth_sgl_or_raw_data sgl_or_raw_data;
};
+/*
+ * Ethernet TPA Modes
+ */
+enum tpa_mode {
+ TPA_LRO,
+ TPA_GRO,
+ MAX_TPA_MODE};
+
+
/*
* tpa update ramrod data
*/
u8 max_tpa_queues;
u8 max_sges_for_packet;
u8 complete_on_both_clients;
- __le16 reserved1;
+ u8 dont_verify_rings_pause_thr_flg;
+ u8 tpa_mode;
__le16 sge_buff_size;
__le16 max_agg_size;
__le32 sge_page_base_lo;
RAMROD_CMD_ID_COMMON_UNUSED,
RAMROD_CMD_ID_COMMON_FUNCTION_START,
RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+ RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
RAMROD_CMD_ID_COMMON_CFC_DEL,
RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
RAMROD_CMD_ID_COMMON_STAT_QUERY,
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_RESERVED1,
- RAMROD_CMD_ID_COMMON_RESERVED2,
MAX_COMMON_SPQE_CMD_ID
};
EVENT_RING_OPCODE_MALICIOUS_VF,
EVENT_RING_OPCODE_FORWARD_SETUP,
EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+ EVENT_RING_OPCODE_FUNCTION_UPDATE,
EVENT_RING_OPCODE_RESERVED1,
- EVENT_RING_OPCODE_RESERVED2,
EVENT_RING_OPCODE_SET_MAC,
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,