}
}
+static void be_async_dbg_evt_process(struct be_adapter *adapter,
+ u32 trailer, struct be_mcc_compl *cmp)
+{
+ u8 event_type = 0;
+ struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
+
+ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
+ ASYNC_TRAILER_EVENT_TYPE_MASK;
+
+ switch (event_type) {
+ case ASYNC_DEBUG_EVENT_TYPE_QNQ:
+ if (evt->valid)
+ adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
+ adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
+ break;
+ }
+}
+
static inline bool is_link_state_evt(u32 trailer)
{
return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
ASYNC_EVENT_CODE_GRP_5);
}
+static inline bool is_dbg_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_QNQ);
+}
+
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
{
struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
else if (is_grp5_evt(compl->flags))
be_async_grp5_evt_process(adapter,
compl->flags, compl);
+ else if (is_dbg_evt(compl->flags))
+ be_async_dbg_evt_process(adapter,
+ compl->flags, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(adapter, compl);
atomic_dec(&mcc_obj->q.used);
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
+ req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
}
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len)
+ struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
{
u16 vlan_tag;
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
}
+ /* To skip HW VLAN tagging: evt = 1, compl = 0 */
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
}
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+ bool skip_hw_vlan)
{
dma_addr_t busaddr;
int i, copied = 0;
queue_head_inc(txq);
}
- wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
+ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
be_dws_cpu_to_le(hdr, sizeof(*hdr));
return copied;
}
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
{
u16 vlan_tag = 0;
skb->vlan_tci = 0;
}
+ if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
+ if (!vlan_tag)
+ vlan_tag = adapter->pvid;
+ if (skip_hw_vlan)
+ *skip_hw_vlan = true;
+ }
+
+ if (vlan_tag) {
+ skb = __vlan_put_tag(skb, vlan_tag);
+ if (unlikely(!skb))
+ return skb;
+
+ skb->vlan_tci = 0;
+ }
+
+ /* Insert the outer VLAN, if any */
+ if (adapter->qnq_vid) {
+ vlan_tag = adapter->qnq_vid;
+ skb = __vlan_put_tag(skb, vlan_tag);
+ if (unlikely(!skb))
+ return skb;
+ if (skip_hw_vlan)
+ *skip_hw_vlan = true;
+ }
+
return skb;
}
+static bool be_ipv6_exthdr_check(struct sk_buff *skb)
+{
+ struct ethhdr *eh = (struct ethhdr *)skb->data;
+ u16 offset = ETH_HLEN;
+
+ if (eh->h_proto == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
+
+ offset += sizeof(struct ipv6hdr);
+ if (ip6h->nexthdr != NEXTHDR_TCP &&
+ ip6h->nexthdr != NEXTHDR_UDP) {
+ struct ipv6_opt_hdr *ehdr =
+ (struct ipv6_opt_hdr *) (skb->data + offset);
+
+ /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
+ if (ehdr->hdrlen == 0xff)
+ return true;
+ }
+ }
+ return false;
+}
+
+static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
+}
+
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return BE3_chip(adapter) &&
+ be_ipv6_exthdr_check(skb);
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
u32 wrb_cnt = 0, copied = 0;
u32 start = txq->head, eth_hdr_len;
bool dummy_wrb, stopped = false;
+ bool skip_hw_vlan = false;
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
*/
if (skb->ip_summed != CHECKSUM_PARTIAL &&
vlan_tx_tag_present(skb)) {
- skb = be_insert_vlan_in_pkt(adapter, skb);
+ skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
+ if (unlikely(!skb))
+ goto tx_drop;
+ }
+
+ /* HW may lockup when VLAN HW tagging is requested on
+ * certain ipv6 packets. Drop such pkts if the HW workaround to
+ * skip HW tagging is not enabled by FW.
+ */
+ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
+ (adapter->pvid || adapter->qnq_vid) &&
+ !qnq_async_evt_rcvd(adapter)))
+ goto tx_drop;
+
+ /* Manual VLAN tag insertion to prevent:
+ * ASIC lockup when the ASIC inserts VLAN tag into
+ * certain ipv6 packets. Insert VLAN tags in driver,
+ * and set event, completion, vlan bits accordingly
+ * in the Tx WRB.
+ */
+ if (be_ipv6_tx_stall_chk(adapter, skb) &&
+ be_vlan_tag_tx_chk(adapter, skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
if (unlikely(!skb))
goto tx_drop;
}
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
- copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
+ skip_hw_vlan);
if (copied) {
int gso_segs = skb_shinfo(skb)->gso_segs;