2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/clk.h>
11 #include <linux/cpumask.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
17 #include <linux/ipv6.h>
18 #include <linux/module.h>
19 #include <linux/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/skbuff.h>
26 #define NIC_MAX_Q_PER_VF 16
27 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
29 #define SERVICE_TIMER_HZ (1 * HZ)
31 #define NIC_TX_CLEAN_MAX_NUM 256
32 #define NIC_RX_CLEAN_MAX_NUM 64
34 #define RCB_IRQ_NOT_INITED 0
35 #define RCB_IRQ_INITED 1
36 #define HNS_BUFFER_SIZE_2048 2048
38 #define BD_MAX_SEND_SIZE 8191
39 #define SKB_TMP_LEN(SKB) \
40 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
42 static void fill_v2_desc(struct hnae_ring *ring, void *priv,
43 int size, dma_addr_t dma, int frag_end,
44 int buf_num, enum hns_desc_type type, int mtu)
46 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
47 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
49 struct ipv6hdr *ipv6hdr;
62 desc_cb->length = size;
66 desc->addr = cpu_to_le64(dma);
67 desc->tx.send_size = cpu_to_le16((u16)size);
69 /*config bd buffer end */
70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
71 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
73 if (type == DESC_TYPE_SKB) {
74 skb = (struct sk_buff *)priv;
76 if (skb->ip_summed == CHECKSUM_PARTIAL) {
77 skb_reset_mac_len(skb);
78 protocol = skb->protocol;
81 if (protocol == htons(ETH_P_8021Q)) {
82 ip_offset += VLAN_HLEN;
83 protocol = vlan_get_protocol(skb);
84 skb->protocol = protocol;
87 if (skb->protocol == htons(ETH_P_IP)) {
89 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
90 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
92 /* check for tcp/udp header */
93 if (iphdr->protocol == IPPROTO_TCP) {
96 skb_tmp_len = SKB_TMP_LEN(skb);
97 l4_len = tcp_hdrlen(skb);
98 mss = mtu - skb_tmp_len - ETH_FCS_LEN;
99 paylen = skb->len - skb_tmp_len;
101 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
103 ipv6hdr = ipv6_hdr(skb);
104 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
106 /* check for tcp/udp header */
107 if (ipv6hdr->nexthdr == IPPROTO_TCP) {
110 skb_tmp_len = SKB_TMP_LEN(skb);
111 l4_len = tcp_hdrlen(skb);
112 mss = mtu - skb_tmp_len - ETH_FCS_LEN;
113 paylen = skb->len - skb_tmp_len;
116 desc->tx.ip_offset = ip_offset;
117 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
118 desc->tx.mss = cpu_to_le16(mss);
119 desc->tx.l4_len = l4_len;
120 desc->tx.paylen = cpu_to_le16(paylen);
124 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
126 desc->tx.bn_pid = bn_pid;
127 desc->tx.ra_ri_cs_fe_vld = rrcfv;
129 ring_ptr_move_fw(ring, next_to_use);
132 static void fill_desc(struct hnae_ring *ring, void *priv,
133 int size, dma_addr_t dma, int frag_end,
134 int buf_num, enum hns_desc_type type, int mtu)
136 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
137 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
141 u32 asid_bufnum_pid = 0;
142 u32 flag_ipoffset = 0;
144 desc_cb->priv = priv;
145 desc_cb->length = size;
147 desc_cb->type = type;
149 desc->addr = cpu_to_le64(dma);
150 desc->tx.send_size = cpu_to_le16((u16)size);
152 /*config bd buffer end */
153 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
155 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
157 if (type == DESC_TYPE_SKB) {
158 skb = (struct sk_buff *)priv;
160 if (skb->ip_summed == CHECKSUM_PARTIAL) {
161 protocol = skb->protocol;
162 ip_offset = ETH_HLEN;
164 /*if it is a SW VLAN check the next protocol*/
165 if (protocol == htons(ETH_P_8021Q)) {
166 ip_offset += VLAN_HLEN;
167 protocol = vlan_get_protocol(skb);
168 skb->protocol = protocol;
171 if (skb->protocol == htons(ETH_P_IP)) {
172 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
173 /* check for tcp/udp header */
174 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
176 } else if (skb->protocol == htons(ETH_P_IPV6)) {
177 /* ipv6 has not l3 cs, check for L4 header */
178 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
181 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
185 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
187 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
188 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
190 ring_ptr_move_fw(ring, next_to_use);
193 static void unfill_desc(struct hnae_ring *ring)
195 ring_ptr_move_bw(ring, next_to_use);
198 static int hns_nic_maybe_stop_tx(
199 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
201 struct sk_buff *skb = *out_skb;
202 struct sk_buff *new_skb = NULL;
205 /* no. of segments (plus a header) */
206 buf_num = skb_shinfo(skb)->nr_frags + 1;
208 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
209 if (ring_space(ring) < 1)
212 new_skb = skb_copy(skb, GFP_ATOMIC);
216 dev_kfree_skb_any(skb);
219 } else if (buf_num > ring_space(ring)) {
227 static int hns_nic_maybe_stop_tso(
228 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
234 struct sk_buff *skb = *out_skb;
235 struct sk_buff *new_skb = NULL;
236 struct skb_frag_struct *frag;
238 size = skb_headlen(skb);
239 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
241 frag_num = skb_shinfo(skb)->nr_frags;
242 for (i = 0; i < frag_num; i++) {
243 frag = &skb_shinfo(skb)->frags[i];
244 size = skb_frag_size(frag);
245 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
248 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
249 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
250 if (ring_space(ring) < buf_num)
252 /* manual split the send packet */
253 new_skb = skb_copy(skb, GFP_ATOMIC);
256 dev_kfree_skb_any(skb);
259 } else if (ring_space(ring) < buf_num) {
267 static void fill_tso_desc(struct hnae_ring *ring, void *priv,
268 int size, dma_addr_t dma, int frag_end,
269 int buf_num, enum hns_desc_type type, int mtu)
275 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
276 sizeoflast = size % BD_MAX_SEND_SIZE;
277 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
279 /* when the frag size is bigger than hardware, split this frag */
280 for (k = 0; k < frag_buf_num; k++)
281 fill_v2_desc(ring, priv,
282 (k == frag_buf_num - 1) ?
283 sizeoflast : BD_MAX_SEND_SIZE,
284 dma + BD_MAX_SEND_SIZE * k,
285 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
287 (type == DESC_TYPE_SKB && !k) ?
288 DESC_TYPE_SKB : DESC_TYPE_PAGE,
292 int hns_nic_net_xmit_hw(struct net_device *ndev,
294 struct hns_nic_ring_data *ring_data)
296 struct hns_nic_priv *priv = netdev_priv(ndev);
297 struct device *dev = priv->dev;
298 struct hnae_ring *ring = ring_data->ring;
299 struct netdev_queue *dev_queue;
300 struct skb_frag_struct *frag;
304 int size, next_to_use;
307 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
309 ring->stats.tx_busy++;
310 goto out_net_tx_busy;
312 ring->stats.sw_err_cnt++;
313 netdev_err(ndev, "no memory to xmit!\n");
319 /* no. of segments (plus a header) */
320 seg_num = skb_shinfo(skb)->nr_frags + 1;
321 next_to_use = ring->next_to_use;
323 /* fill the first part */
324 size = skb_headlen(skb);
325 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
326 if (dma_mapping_error(dev, dma)) {
327 netdev_err(ndev, "TX head DMA map failed\n");
328 ring->stats.sw_err_cnt++;
331 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
332 buf_num, DESC_TYPE_SKB, ndev->mtu);
334 /* fill the fragments */
335 for (i = 1; i < seg_num; i++) {
336 frag = &skb_shinfo(skb)->frags[i - 1];
337 size = skb_frag_size(frag);
338 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
339 if (dma_mapping_error(dev, dma)) {
340 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
341 ring->stats.sw_err_cnt++;
342 goto out_map_frag_fail;
344 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
345 seg_num - 1 == i ? 1 : 0, buf_num,
346 DESC_TYPE_PAGE, ndev->mtu);
349 /*complete translate all packets*/
350 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
351 netdev_tx_sent_queue(dev_queue, skb->len);
353 wmb(); /* commit all data before submit */
354 assert(skb->queue_mapping < priv->ae_handle->q_num);
355 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
356 ring->stats.tx_pkts++;
357 ring->stats.tx_bytes += skb->len;
363 while (ring->next_to_use != next_to_use) {
365 if (ring->next_to_use != next_to_use)
367 ring->desc_cb[ring->next_to_use].dma,
368 ring->desc_cb[ring->next_to_use].length,
371 dma_unmap_single(dev,
372 ring->desc_cb[next_to_use].dma,
373 ring->desc_cb[next_to_use].length,
379 dev_kfree_skb_any(skb);
384 netif_stop_subqueue(ndev, skb->queue_mapping);
386 /* Herbert's original patch had:
387 * smp_mb__after_netif_stop_queue();
388 * but since that doesn't exist yet, just open code it.
391 return NETDEV_TX_BUSY;
395 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
396 * @data: pointer to the start of the headers
397 * @max: total length of section to find headers in
399 * This function is meant to determine the length of headers that will
400 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
401 * motivation of doing this is to only perform one pull for IPv4 TCP
402 * packets so that we can do basic things like calculating the gso_size
403 * based on the average data per packet.
405 static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
406 unsigned int max_size)
408 unsigned char *network;
411 /* this should never happen, but better safe than sorry */
412 if (max_size < ETH_HLEN)
415 /* initialize network frame pointer */
418 /* set first protocol and move network header forward */
421 /* handle any vlan tag if present */
422 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
423 == HNS_RX_FLAG_VLAN_PRESENT) {
424 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
427 network += VLAN_HLEN;
430 /* handle L3 protocols */
431 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
432 == HNS_RX_FLAG_L3ID_IPV4) {
433 if ((typeof(max_size))(network - data) >
434 (max_size - sizeof(struct iphdr)))
437 /* access ihl as a u8 to avoid unaligned access on ia64 */
438 hlen = (network[0] & 0x0F) << 2;
440 /* verify hlen meets minimum size requirements */
441 if (hlen < sizeof(struct iphdr))
442 return network - data;
444 /* record next protocol if header is present */
445 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
446 == HNS_RX_FLAG_L3ID_IPV6) {
447 if ((typeof(max_size))(network - data) >
448 (max_size - sizeof(struct ipv6hdr)))
451 /* record next protocol */
452 hlen = sizeof(struct ipv6hdr);
454 return network - data;
457 /* relocate pointer to start of L4 header */
460 /* finally sort out TCP/UDP */
461 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
462 == HNS_RX_FLAG_L4ID_TCP) {
463 if ((typeof(max_size))(network - data) >
464 (max_size - sizeof(struct tcphdr)))
467 /* access doff as a u8 to avoid unaligned access on ia64 */
468 hlen = (network[12] & 0xF0) >> 2;
470 /* verify hlen meets minimum size requirements */
471 if (hlen < sizeof(struct tcphdr))
472 return network - data;
475 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
476 == HNS_RX_FLAG_L4ID_UDP) {
477 if ((typeof(max_size))(network - data) >
478 (max_size - sizeof(struct udphdr)))
481 network += sizeof(struct udphdr);
484 /* If everything has gone correctly network should be the
485 * data section of the packet and will be the end of the header.
486 * If not then it probably represents the end of the last recognized
489 if ((typeof(max_size))(network - data) < max_size)
490 return network - data;
495 static void hns_nic_reuse_page(struct sk_buff *skb, int i,
496 struct hnae_ring *ring, int pull_len,
497 struct hnae_desc_cb *desc_cb)
499 struct hnae_desc *desc;
504 twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
506 desc = &ring->desc[ring->next_to_clean];
507 size = le16_to_cpu(desc->rx.size);
510 truesize = hnae_buf_size(ring);
512 truesize = ALIGN(size, L1_CACHE_BYTES);
513 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
516 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
517 size - pull_len, truesize - pull_len);
519 /* avoid re-using remote pages,flag default unreuse */
520 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
524 /* if we are only owner of page we can reuse it */
525 if (likely(page_count(desc_cb->priv) == 1)) {
526 /* flip page offset to other buffer */
527 desc_cb->page_offset ^= truesize;
529 desc_cb->reuse_flag = 1;
530 /* bump ref count on page before it is given*/
531 get_page(desc_cb->priv);
536 /* move offset up to the next cache line */
537 desc_cb->page_offset += truesize;
539 if (desc_cb->page_offset <= last_offset) {
540 desc_cb->reuse_flag = 1;
541 /* bump ref count on page before it is given*/
542 get_page(desc_cb->priv);
546 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
548 *out_bnum = hnae_get_field(bnum_flag,
549 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
552 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
554 *out_bnum = hnae_get_field(bnum_flag,
555 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
558 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
559 struct sk_buff **out_skb, int *out_bnum)
561 struct hnae_ring *ring = ring_data->ring;
562 struct net_device *ndev = ring_data->napi.dev;
563 struct hns_nic_priv *priv = netdev_priv(ndev);
565 struct hnae_desc *desc;
566 struct hnae_desc_cb *desc_cb;
572 desc = &ring->desc[ring->next_to_clean];
573 desc_cb = &ring->desc_cb[ring->next_to_clean];
577 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
579 /* prefetch first cache line of first page */
581 #if L1_CACHE_BYTES < 128
582 prefetch(va + L1_CACHE_BYTES);
585 skb = *out_skb = napi_alloc_skb(&ring_data->napi,
587 if (unlikely(!skb)) {
588 netdev_err(ndev, "alloc rx skb fail\n");
589 ring->stats.sw_err_cnt++;
593 prefetchw(skb->data);
594 length = le16_to_cpu(desc->rx.pkt_len);
595 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
596 priv->ops.get_rxd_bnum(bnum_flag, &bnum);
599 if (length <= HNS_RX_HEAD_SIZE) {
600 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
602 /* we can reuse buffer as-is, just make sure it is local */
603 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
604 desc_cb->reuse_flag = 1;
605 else /* this page cannot be reused so discard it */
606 put_page(desc_cb->priv);
608 ring_ptr_move_fw(ring, next_to_clean);
610 if (unlikely(bnum != 1)) { /* check err*/
615 ring->stats.seg_pkt_cnt++;
617 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
618 memcpy(__skb_put(skb, pull_len), va,
619 ALIGN(pull_len, sizeof(long)));
621 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
622 ring_ptr_move_fw(ring, next_to_clean);
624 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
628 for (i = 1; i < bnum; i++) {
629 desc = &ring->desc[ring->next_to_clean];
630 desc_cb = &ring->desc_cb[ring->next_to_clean];
632 hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
633 ring_ptr_move_fw(ring, next_to_clean);
637 /* check except process, free skb and jump the desc */
638 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
640 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
641 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
642 bnum, ring->max_desc_num_per_pkt,
643 length, (int)MAX_SKB_FRAGS,
644 ((u64 *)desc)[0], ((u64 *)desc)[1]);
645 ring->stats.err_bd_num++;
646 dev_kfree_skb_any(skb);
650 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
652 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
653 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
654 ((u64 *)desc)[0], ((u64 *)desc)[1]);
655 ring->stats.non_vld_descs++;
656 dev_kfree_skb_any(skb);
660 if (unlikely((!desc->rx.pkt_len) ||
661 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
662 ring->stats.err_pkt_len++;
663 dev_kfree_skb_any(skb);
667 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
668 ring->stats.l2_err++;
669 dev_kfree_skb_any(skb);
673 ring->stats.rx_pkts++;
674 ring->stats.rx_bytes += skb->len;
676 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
677 hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
678 ring->stats.l3l4_csum_err++;
682 skb->ip_summed = CHECKSUM_UNNECESSARY;
688 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
691 struct hnae_desc_cb res_cbs;
692 struct hnae_desc_cb *desc_cb;
693 struct hnae_ring *ring = ring_data->ring;
694 struct net_device *ndev = ring_data->napi.dev;
696 for (i = 0; i < cleand_count; i++) {
697 desc_cb = &ring->desc_cb[ring->next_to_use];
698 if (desc_cb->reuse_flag) {
699 ring->stats.reuse_pg_cnt++;
700 hnae_reuse_buffer(ring, ring->next_to_use);
702 ret = hnae_reserve_buffer_map(ring, &res_cbs);
704 ring->stats.sw_err_cnt++;
705 netdev_err(ndev, "hnae reserve buffer map failed.\n");
708 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
711 ring_ptr_move_fw(ring, next_to_use);
714 wmb(); /* make all data has been write before submit */
715 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
718 /* return error number for error or number of desc left to take
720 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
723 struct net_device *ndev = ring_data->napi.dev;
725 skb->protocol = eth_type_trans(skb, ndev);
726 (void)napi_gro_receive(&ring_data->napi, skb);
727 ndev->last_rx = jiffies;
730 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
733 struct hnae_ring *ring = ring_data->ring;
735 int num, bnum, ex_num;
736 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
737 int recv_pkts, recv_bds, clean_count, err;
739 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
740 rmb(); /* make sure num taken effect before the other data is touched */
742 recv_pkts = 0, recv_bds = 0, clean_count = 0;
744 while (recv_pkts < budget && recv_bds < num) {
745 /* reuse or realloc buffers*/
746 if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
747 hns_nic_alloc_rx_buffers(ring_data, clean_count);
752 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
753 if (unlikely(!skb)) /* this fault cannot be repaired */
758 if (unlikely(err)) { /* do jump the err */
763 /* do update ip stack process*/
764 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
769 /* make all data has been write before submit */
770 if (recv_pkts < budget) {
771 ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
773 if (ex_num > clean_count) {
774 num += ex_num - clean_count;
775 rmb(); /*complete read rx ring bd number*/
780 /* make all data has been write before submit */
782 hns_nic_alloc_rx_buffers(ring_data, clean_count);
787 static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
789 struct hnae_ring *ring = ring_data->ring;
792 /* for hardware bug fixed */
793 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
796 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
799 napi_schedule(&ring_data->napi);
803 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
804 int *bytes, int *pkts)
806 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
808 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
809 (*bytes) += desc_cb->length;
810 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
811 hnae_free_buffer_detach(ring, ring->next_to_clean);
813 ring_ptr_move_fw(ring, next_to_clean);
816 static int is_valid_clean_head(struct hnae_ring *ring, int h)
818 int u = ring->next_to_use;
819 int c = ring->next_to_clean;
821 if (unlikely(h > ring->desc_num))
824 assert(u > 0 && u < ring->desc_num);
825 assert(c > 0 && c < ring->desc_num);
826 assert(u != c && h != c); /* must be checked before call this func */
828 return u > c ? (h > c && h <= u) : (h > c || h <= u);
831 /* netif_tx_lock will turn down the performance, set only when necessary */
832 #ifdef CONFIG_NET_POLL_CONTROLLER
833 #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
834 #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
836 #define NETIF_TX_LOCK(ndev)
837 #define NETIF_TX_UNLOCK(ndev)
839 /* reclaim all desc in one budget
840 * return error or number of desc left
842 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
845 struct hnae_ring *ring = ring_data->ring;
846 struct net_device *ndev = ring_data->napi.dev;
847 struct netdev_queue *dev_queue;
848 struct hns_nic_priv *priv = netdev_priv(ndev);
854 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
855 rmb(); /* make sure head is ready before touch any data */
857 if (is_ring_empty(ring) || head == ring->next_to_clean) {
858 NETIF_TX_UNLOCK(ndev);
859 return 0; /* no data to poll */
862 if (!is_valid_clean_head(ring, head)) {
863 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
864 ring->next_to_use, ring->next_to_clean);
865 ring->stats.io_err_cnt++;
866 NETIF_TX_UNLOCK(ndev);
872 while (head != ring->next_to_clean) {
873 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
874 /* issue prefetch for next Tx descriptor */
875 prefetch(&ring->desc_cb[ring->next_to_clean]);
878 NETIF_TX_UNLOCK(ndev);
880 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
881 netdev_tx_completed_queue(dev_queue, pkts, bytes);
883 if (unlikely(priv->link && !netif_carrier_ok(ndev)))
884 netif_carrier_on(ndev);
886 if (unlikely(pkts && netif_carrier_ok(ndev) &&
887 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
888 /* Make sure that anybody stopping the queue after this
889 * sees the new next_to_clean.
892 if (netif_tx_queue_stopped(dev_queue) &&
893 !test_bit(NIC_STATE_DOWN, &priv->state)) {
894 netif_tx_wake_queue(dev_queue);
895 ring->stats.restart_queue++;
901 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
903 struct hnae_ring *ring = ring_data->ring;
904 int head = ring->next_to_clean;
906 /* for hardware bug fixed */
907 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
909 if (head != ring->next_to_clean) {
910 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
913 napi_schedule(&ring_data->napi);
917 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
919 struct hnae_ring *ring = ring_data->ring;
920 struct net_device *ndev = ring_data->napi.dev;
921 struct netdev_queue *dev_queue;
927 head = ring->next_to_use; /* ntu :soft setted ring position*/
930 while (head != ring->next_to_clean)
931 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
933 NETIF_TX_UNLOCK(ndev);
935 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
936 netdev_tx_reset_queue(dev_queue);
939 static int hns_nic_common_poll(struct napi_struct *napi, int budget)
941 struct hns_nic_ring_data *ring_data =
942 container_of(napi, struct hns_nic_ring_data, napi);
943 int clean_complete = ring_data->poll_one(
944 ring_data, budget, ring_data->ex_process);
946 if (clean_complete >= 0 && clean_complete < budget) {
948 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
951 ring_data->fini_process(ring_data);
955 return clean_complete;
958 static irqreturn_t hns_irq_handle(int irq, void *dev)
960 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
962 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
964 napi_schedule(&ring_data->napi);
970 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
973 static void hns_nic_adjust_link(struct net_device *ndev)
975 struct hns_nic_priv *priv = netdev_priv(ndev);
976 struct hnae_handle *h = priv->ae_handle;
978 h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
982 *hns_nic_init_phy - init phy
985 * Return 0 on success, negative on failure
987 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
989 struct hns_nic_priv *priv = netdev_priv(ndev);
990 struct phy_device *phy_dev = NULL;
995 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
996 phy_dev = of_phy_connect(ndev, h->phy_node,
997 hns_nic_adjust_link, 0, h->phy_if);
999 phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
1001 if (unlikely(!phy_dev) || IS_ERR(phy_dev))
1002 return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
1004 phy_dev->supported &= h->if_support;
1005 phy_dev->advertising = phy_dev->supported;
1007 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1008 phy_dev->autoneg = false;
1010 priv->phy = phy_dev;
1015 static int hns_nic_ring_open(struct net_device *netdev, int idx)
1017 struct hns_nic_priv *priv = netdev_priv(netdev);
1018 struct hnae_handle *h = priv->ae_handle;
1020 napi_enable(&priv->ring_data[idx].napi);
1022 enable_irq(priv->ring_data[idx].ring->irq);
1023 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1028 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1030 struct hns_nic_priv *priv = netdev_priv(ndev);
1031 struct hnae_handle *h = priv->ae_handle;
1032 struct sockaddr *mac_addr = p;
1035 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1036 return -EADDRNOTAVAIL;
1038 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1040 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1044 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
1049 void hns_nic_update_stats(struct net_device *netdev)
1051 struct hns_nic_priv *priv = netdev_priv(netdev);
1052 struct hnae_handle *h = priv->ae_handle;
1054 h->dev->ops->update_stats(h, &netdev->stats);
1057 /* set mac addr if it is configed. or leave it to the AE driver */
1058 static void hns_init_mac_addr(struct net_device *ndev)
1060 struct hns_nic_priv *priv = netdev_priv(ndev);
1061 struct device_node *node = priv->dev->of_node;
1062 const void *mac_addr_temp;
1064 mac_addr_temp = of_get_mac_address(node);
1065 if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
1066 memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
1068 eth_hw_addr_random(ndev);
1069 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1074 static void hns_nic_ring_close(struct net_device *netdev, int idx)
1076 struct hns_nic_priv *priv = netdev_priv(netdev);
1077 struct hnae_handle *h = priv->ae_handle;
1079 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1080 disable_irq(priv->ring_data[idx].ring->irq);
1082 napi_disable(&priv->ring_data[idx].napi);
1085 static void hns_set_irq_affinity(struct hns_nic_priv *priv)
1087 struct hnae_handle *h = priv->ae_handle;
1088 struct hns_nic_ring_data *rd;
1093 /*diffrent irq banlance for 16core and 32core*/
1094 if (h->q_num == num_possible_cpus()) {
1095 for (i = 0; i < h->q_num * 2; i++) {
1096 rd = &priv->ring_data[i];
1097 if (cpu_online(rd->queue_index)) {
1098 cpumask_clear(&mask);
1099 cpu = rd->queue_index;
1100 cpumask_set_cpu(cpu, &mask);
1101 (void)irq_set_affinity_hint(rd->ring->irq,
1106 for (i = 0; i < h->q_num; i++) {
1107 rd = &priv->ring_data[i];
1108 if (cpu_online(rd->queue_index * 2)) {
1109 cpumask_clear(&mask);
1110 cpu = rd->queue_index * 2;
1111 cpumask_set_cpu(cpu, &mask);
1112 (void)irq_set_affinity_hint(rd->ring->irq,
1117 for (i = h->q_num; i < h->q_num * 2; i++) {
1118 rd = &priv->ring_data[i];
1119 if (cpu_online(rd->queue_index * 2 + 1)) {
1120 cpumask_clear(&mask);
1121 cpu = rd->queue_index * 2 + 1;
1122 cpumask_set_cpu(cpu, &mask);
1123 (void)irq_set_affinity_hint(rd->ring->irq,
1130 static int hns_nic_init_irq(struct hns_nic_priv *priv)
1132 struct hnae_handle *h = priv->ae_handle;
1133 struct hns_nic_ring_data *rd;
1137 for (i = 0; i < h->q_num * 2; i++) {
1138 rd = &priv->ring_data[i];
1140 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1143 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1144 "%s-%s%d", priv->netdev->name,
1145 (i < h->q_num ? "tx" : "rx"), rd->queue_index);
1147 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1149 ret = request_irq(rd->ring->irq,
1150 hns_irq_handle, 0, rd->ring->ring_name, rd);
1152 netdev_err(priv->netdev, "request irq(%d) fail\n",
1156 disable_irq(rd->ring->irq);
1157 rd->ring->irq_init_flag = RCB_IRQ_INITED;
1160 /*set cpu affinity*/
1161 hns_set_irq_affinity(priv);
1166 static int hns_nic_net_up(struct net_device *ndev)
1168 struct hns_nic_priv *priv = netdev_priv(ndev);
1169 struct hnae_handle *h = priv->ae_handle;
1173 ret = hns_nic_init_irq(priv);
1175 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1179 for (i = 0; i < h->q_num * 2; i++) {
1180 ret = hns_nic_ring_open(ndev, i);
1182 goto out_has_some_queues;
1185 for (k = 0; k < h->q_num; k++)
1186 h->dev->ops->toggle_queue_status(h->qs[k], 1);
1188 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1190 goto out_set_mac_addr_err;
1192 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1197 phy_start(priv->phy);
1199 clear_bit(NIC_STATE_DOWN, &priv->state);
1200 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1205 netif_stop_queue(ndev);
1206 out_set_mac_addr_err:
1207 for (k = 0; k < h->q_num; k++)
1208 h->dev->ops->toggle_queue_status(h->qs[k], 0);
1209 out_has_some_queues:
1210 for (j = i - 1; j >= 0; j--)
1211 hns_nic_ring_close(ndev, j);
1213 set_bit(NIC_STATE_DOWN, &priv->state);
1218 static void hns_nic_net_down(struct net_device *ndev)
1221 struct hnae_ae_ops *ops;
1222 struct hns_nic_priv *priv = netdev_priv(ndev);
1224 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1227 (void)del_timer_sync(&priv->service_timer);
1228 netif_tx_stop_all_queues(ndev);
1229 netif_carrier_off(ndev);
1230 netif_tx_disable(ndev);
1234 phy_stop(priv->phy);
1236 ops = priv->ae_handle->dev->ops;
1239 ops->stop(priv->ae_handle);
1241 netif_tx_stop_all_queues(ndev);
1243 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1244 hns_nic_ring_close(ndev, i);
1245 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1247 /* clean tx buffers*/
1248 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1252 void hns_nic_net_reset(struct net_device *ndev)
1254 struct hns_nic_priv *priv = netdev_priv(ndev);
1255 struct hnae_handle *handle = priv->ae_handle;
1257 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1258 usleep_range(1000, 2000);
1260 (void)hnae_reinit_handle(handle);
1262 clear_bit(NIC_STATE_RESETTING, &priv->state);
1265 void hns_nic_net_reinit(struct net_device *netdev)
1267 struct hns_nic_priv *priv = netdev_priv(netdev);
1269 priv->netdev->trans_start = jiffies;
1270 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1271 usleep_range(1000, 2000);
1273 hns_nic_net_down(netdev);
1274 hns_nic_net_reset(netdev);
1275 (void)hns_nic_net_up(netdev);
1276 clear_bit(NIC_STATE_REINITING, &priv->state);
1279 static int hns_nic_net_open(struct net_device *ndev)
1281 struct hns_nic_priv *priv = netdev_priv(ndev);
1282 struct hnae_handle *h = priv->ae_handle;
1285 if (test_bit(NIC_STATE_TESTING, &priv->state))
1289 netif_carrier_off(ndev);
1291 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1293 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1298 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1301 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1305 ret = hns_nic_net_up(ndev);
1308 "hns net up fail, ret=%d!\n", ret);
1315 static int hns_nic_net_stop(struct net_device *ndev)
1317 hns_nic_net_down(ndev);
1322 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1323 static void hns_nic_net_timeout(struct net_device *ndev)
1325 struct hns_nic_priv *priv = netdev_priv(ndev);
1327 hns_tx_timeout_reset(priv);
1330 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1333 struct hns_nic_priv *priv = netdev_priv(netdev);
1334 struct phy_device *phy_dev = priv->phy;
1336 if (!netif_running(netdev))
1342 return phy_mii_ioctl(phy_dev, ifr, cmd);
1345 /* use only for netconsole to poll with the device without interrupt */
1346 #ifdef CONFIG_NET_POLL_CONTROLLER
1347 void hns_nic_poll_controller(struct net_device *ndev)
1349 struct hns_nic_priv *priv = netdev_priv(ndev);
1350 unsigned long flags;
1353 local_irq_save(flags);
1354 for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1355 napi_schedule(&priv->ring_data[i].napi);
1356 local_irq_restore(flags);
1360 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1361 struct net_device *ndev)
1363 struct hns_nic_priv *priv = netdev_priv(ndev);
1366 assert(skb->queue_mapping < ndev->ae_handle->q_num);
1367 ret = hns_nic_net_xmit_hw(ndev, skb,
1368 &tx_ring_data(priv, skb->queue_mapping));
1369 if (ret == NETDEV_TX_OK) {
1370 ndev->trans_start = jiffies;
1371 ndev->stats.tx_bytes += skb->len;
1372 ndev->stats.tx_packets++;
1374 return (netdev_tx_t)ret;
1377 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1379 struct hns_nic_priv *priv = netdev_priv(ndev);
1380 struct hnae_handle *h = priv->ae_handle;
1383 /* MTU < 68 is an error and causes problems on some kernels */
1387 if (!h->dev->ops->set_mtu)
1390 if (netif_running(ndev)) {
1391 (void)hns_nic_net_stop(ndev);
1394 ret = h->dev->ops->set_mtu(h, new_mtu);
1396 netdev_err(ndev, "set mtu fail, return value %d\n",
1399 if (hns_nic_net_open(ndev))
1400 netdev_err(ndev, "hns net open fail\n");
1402 ret = h->dev->ops->set_mtu(h, new_mtu);
1406 ndev->mtu = new_mtu;
1411 static int hns_nic_set_features(struct net_device *netdev,
1412 netdev_features_t features)
1414 struct hns_nic_priv *priv = netdev_priv(netdev);
1415 struct hnae_handle *h = priv->ae_handle;
1417 switch (priv->enet_ver) {
1419 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1420 netdev_info(netdev, "enet v1 do not support tso!\n");
1423 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1424 priv->ops.fill_desc = fill_tso_desc;
1425 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1426 /* The chip only support 7*4096 */
1427 netif_set_gso_max_size(netdev, 7 * 4096);
1428 h->dev->ops->set_tso_stats(h, 1);
1430 priv->ops.fill_desc = fill_v2_desc;
1431 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1432 h->dev->ops->set_tso_stats(h, 0);
1436 netdev->features = features;
1440 static netdev_features_t hns_nic_fix_features(
1441 struct net_device *netdev, netdev_features_t features)
1443 struct hns_nic_priv *priv = netdev_priv(netdev);
1445 switch (priv->enet_ver) {
1447 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1448 NETIF_F_HW_VLAN_CTAG_FILTER);
1457 * nic_set_multicast_list - set mutl mac address
1458 * @netdev: net device
1463 void hns_set_multicast_list(struct net_device *ndev)
1465 struct hns_nic_priv *priv = netdev_priv(ndev);
1466 struct hnae_handle *h = priv->ae_handle;
1467 struct netdev_hw_addr *ha = NULL;
1470 netdev_err(ndev, "hnae handle is null\n");
1474 if (h->dev->ops->set_mc_addr) {
1475 netdev_for_each_mc_addr(ha, ndev)
1476 if (h->dev->ops->set_mc_addr(h, ha->addr))
1477 netdev_err(ndev, "set multicast fail\n");
1481 void hns_nic_set_rx_mode(struct net_device *ndev)
1483 struct hns_nic_priv *priv = netdev_priv(ndev);
1484 struct hnae_handle *h = priv->ae_handle;
1486 if (h->dev->ops->set_promisc_mode) {
1487 if (ndev->flags & IFF_PROMISC)
1488 h->dev->ops->set_promisc_mode(h, 1);
1490 h->dev->ops->set_promisc_mode(h, 0);
1493 hns_set_multicast_list(ndev);
1496 struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
1497 struct rtnl_link_stats64 *stats)
1504 struct hns_nic_priv *priv = netdev_priv(ndev);
1505 struct hnae_handle *h = priv->ae_handle;
1507 for (idx = 0; idx < h->q_num; idx++) {
1508 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1509 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1510 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1511 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1514 stats->tx_bytes = tx_bytes;
1515 stats->tx_packets = tx_pkts;
1516 stats->rx_bytes = rx_bytes;
1517 stats->rx_packets = rx_pkts;
1519 stats->rx_errors = ndev->stats.rx_errors;
1520 stats->multicast = ndev->stats.multicast;
1521 stats->rx_length_errors = ndev->stats.rx_length_errors;
1522 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1523 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1525 stats->tx_errors = ndev->stats.tx_errors;
1526 stats->rx_dropped = ndev->stats.rx_dropped;
1527 stats->tx_dropped = ndev->stats.tx_dropped;
1528 stats->collisions = ndev->stats.collisions;
1529 stats->rx_over_errors = ndev->stats.rx_over_errors;
1530 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1531 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1532 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1533 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1534 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1535 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1536 stats->tx_window_errors = ndev->stats.tx_window_errors;
1537 stats->rx_compressed = ndev->stats.rx_compressed;
1538 stats->tx_compressed = ndev->stats.tx_compressed;
1543 static const struct net_device_ops hns_nic_netdev_ops = {
1544 .ndo_open = hns_nic_net_open,
1545 .ndo_stop = hns_nic_net_stop,
1546 .ndo_start_xmit = hns_nic_net_xmit,
1547 .ndo_tx_timeout = hns_nic_net_timeout,
1548 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1549 .ndo_change_mtu = hns_nic_change_mtu,
1550 .ndo_do_ioctl = hns_nic_do_ioctl,
1551 .ndo_set_features = hns_nic_set_features,
1552 .ndo_fix_features = hns_nic_fix_features,
1553 .ndo_get_stats64 = hns_nic_get_stats64,
1554 #ifdef CONFIG_NET_POLL_CONTROLLER
1555 .ndo_poll_controller = hns_nic_poll_controller,
1557 .ndo_set_rx_mode = hns_nic_set_rx_mode,
1560 static void hns_nic_update_link_status(struct net_device *netdev)
1562 struct hns_nic_priv *priv = netdev_priv(netdev);
1564 struct hnae_handle *h = priv->ae_handle;
1568 if (!genphy_update_link(priv->phy))
1569 state = priv->phy->link;
1573 state = state && h->dev->ops->get_status(h);
1575 if (state != priv->link) {
1577 netif_carrier_on(netdev);
1578 netif_tx_wake_all_queues(netdev);
1579 netdev_info(netdev, "link up\n");
1581 netif_carrier_off(netdev);
1582 netdev_info(netdev, "link down\n");
1588 /* for dumping key regs*/
1589 static void hns_nic_dump(struct hns_nic_priv *priv)
1591 struct hnae_handle *h = priv->ae_handle;
1592 struct hnae_ae_ops *ops = h->dev->ops;
1593 u32 *data, reg_num, i;
1595 if (ops->get_regs_len && ops->get_regs) {
1596 reg_num = ops->get_regs_len(priv->ae_handle);
1597 reg_num = (reg_num + 3ul) & ~3ul;
1598 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1600 ops->get_regs(priv->ae_handle, data);
1601 for (i = 0; i < reg_num; i += 4)
1602 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1603 i, data[i], data[i + 1],
1604 data[i + 2], data[i + 3]);
1609 for (i = 0; i < h->q_num; i++) {
1610 pr_info("tx_queue%d_next_to_clean:%d\n",
1611 i, h->qs[i]->tx_ring.next_to_clean);
1612 pr_info("tx_queue%d_next_to_use:%d\n",
1613 i, h->qs[i]->tx_ring.next_to_use);
1614 pr_info("rx_queue%d_next_to_clean:%d\n",
1615 i, h->qs[i]->rx_ring.next_to_clean);
1616 pr_info("rx_queue%d_next_to_use:%d\n",
1617 i, h->qs[i]->rx_ring.next_to_use);
1621 /* for resetting suntask*/
1622 static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
1624 enum hnae_port_type type = priv->ae_handle->port_type;
1626 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
1628 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1630 /* If we're already down, removing or resetting, just bail */
1631 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
1632 test_bit(NIC_STATE_REMOVING, &priv->state) ||
1633 test_bit(NIC_STATE_RESETTING, &priv->state))
1637 netdev_info(priv->netdev, "try to reset %s port!\n",
1638 (type == HNAE_PORT_DEBUG ? "debug" : "service"));
1641 /* put off any impending NetWatchDogTimeout */
1642 priv->netdev->trans_start = jiffies;
1644 if (type == HNAE_PORT_DEBUG) {
1645 hns_nic_net_reinit(priv->netdev);
1647 netif_carrier_off(priv->netdev);
1648 netif_tx_disable(priv->netdev);
1653 /* for doing service complete*/
1654 static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
1656 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
1658 smp_mb__before_atomic();
1659 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1662 static void hns_nic_service_task(struct work_struct *work)
1664 struct hns_nic_priv *priv
1665 = container_of(work, struct hns_nic_priv, service_task);
1666 struct hnae_handle *h = priv->ae_handle;
1668 hns_nic_update_link_status(priv->netdev);
1669 h->dev->ops->update_led_status(h);
1670 hns_nic_update_stats(priv->netdev);
1672 hns_nic_reset_subtask(priv);
1673 hns_nic_service_event_complete(priv);
1676 static void hns_nic_task_schedule(struct hns_nic_priv *priv)
1678 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
1679 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
1680 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
1681 (void)schedule_work(&priv->service_task);
1684 static void hns_nic_service_timer(unsigned long data)
1686 struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
1688 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1690 hns_nic_task_schedule(priv);
1694 * hns_tx_timeout_reset - initiate reset due to Tx timeout
1695 * @priv: driver private struct
1697 static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
1699 /* Do the reset outside of interrupt context */
1700 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
1701 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1702 netdev_warn(priv->netdev,
1703 "initiating reset due to tx timeout(%llu,0x%lx)\n",
1704 priv->tx_timeout_count, priv->state);
1705 priv->tx_timeout_count++;
1706 hns_nic_task_schedule(priv);
1710 static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1712 struct hnae_handle *h = priv->ae_handle;
1713 struct hns_nic_ring_data *rd;
1716 if (h->q_num > NIC_MAX_Q_PER_VF) {
1717 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
1721 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
1723 if (!priv->ring_data)
1726 for (i = 0; i < h->q_num; i++) {
1727 rd = &priv->ring_data[i];
1728 rd->queue_index = i;
1729 rd->ring = &h->qs[i]->tx_ring;
1730 rd->poll_one = hns_nic_tx_poll_one;
1731 rd->fini_process = hns_nic_tx_fini_pro;
1733 netif_napi_add(priv->netdev, &rd->napi,
1734 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
1735 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1737 for (i = h->q_num; i < h->q_num * 2; i++) {
1738 rd = &priv->ring_data[i];
1739 rd->queue_index = i - h->q_num;
1740 rd->ring = &h->qs[i - h->q_num]->rx_ring;
1741 rd->poll_one = hns_nic_rx_poll_one;
1742 rd->ex_process = hns_nic_rx_up_pro;
1743 rd->fini_process = hns_nic_rx_fini_pro;
1745 netif_napi_add(priv->netdev, &rd->napi,
1746 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
1747 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1753 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
1755 struct hnae_handle *h = priv->ae_handle;
1758 for (i = 0; i < h->q_num * 2; i++) {
1759 netif_napi_del(&priv->ring_data[i].napi);
1760 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1761 (void)irq_set_affinity_hint(
1762 priv->ring_data[i].ring->irq,
1764 free_irq(priv->ring_data[i].ring->irq,
1765 &priv->ring_data[i]);
1768 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1770 kfree(priv->ring_data);
1773 static void hns_nic_set_priv_ops(struct net_device *netdev)
1775 struct hns_nic_priv *priv = netdev_priv(netdev);
1776 struct hnae_handle *h = priv->ae_handle;
1778 if (AE_IS_VER1(priv->enet_ver)) {
1779 priv->ops.fill_desc = fill_desc;
1780 priv->ops.get_rxd_bnum = get_rx_desc_bnum;
1781 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1783 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
1784 if ((netdev->features & NETIF_F_TSO) ||
1785 (netdev->features & NETIF_F_TSO6)) {
1786 priv->ops.fill_desc = fill_tso_desc;
1787 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1788 /* This chip only support 7*4096 */
1789 netif_set_gso_max_size(netdev, 7 * 4096);
1790 h->dev->ops->set_tso_stats(h, 1);
1792 priv->ops.fill_desc = fill_v2_desc;
1793 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1798 static int hns_nic_try_get_ae(struct net_device *ndev)
1800 struct hns_nic_priv *priv = netdev_priv(ndev);
1801 struct hnae_handle *h;
1804 h = hnae_get_handle(&priv->netdev->dev,
1805 priv->ae_node, priv->port_id, NULL);
1806 if (IS_ERR_OR_NULL(h)) {
1808 dev_dbg(priv->dev, "has not handle, register notifier!\n");
1811 priv->ae_handle = h;
1813 ret = hns_nic_init_phy(ndev, h);
1815 dev_err(priv->dev, "probe phy device fail!\n");
1819 ret = hns_nic_init_ring_data(priv);
1822 goto out_init_ring_data;
1825 hns_nic_set_priv_ops(ndev);
1827 ret = register_netdev(ndev);
1829 dev_err(priv->dev, "probe register netdev fail!\n");
1830 goto out_reg_ndev_fail;
1835 hns_nic_uninit_ring_data(priv);
1836 priv->ring_data = NULL;
1839 hnae_put_handle(priv->ae_handle);
1840 priv->ae_handle = NULL;
1845 static int hns_nic_notifier_action(struct notifier_block *nb,
1846 unsigned long action, void *data)
1848 struct hns_nic_priv *priv =
1849 container_of(nb, struct hns_nic_priv, notifier_block);
1851 assert(action == HNAE_AE_REGISTER);
1853 if (!hns_nic_try_get_ae(priv->netdev)) {
1854 hnae_unregister_notifier(&priv->notifier_block);
1855 priv->notifier_block.notifier_call = NULL;
1860 static int hns_nic_dev_probe(struct platform_device *pdev)
1862 struct device *dev = &pdev->dev;
1863 struct net_device *ndev;
1864 struct hns_nic_priv *priv;
1865 struct device_node *node = dev->of_node;
1868 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
1872 platform_set_drvdata(pdev, ndev);
1874 priv = netdev_priv(ndev);
1876 priv->netdev = ndev;
1878 if (of_device_is_compatible(node, "hisilicon,hns-nic-v1"))
1879 priv->enet_ver = AE_VERSION_1;
1881 priv->enet_ver = AE_VERSION_2;
1883 priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
1884 if (IS_ERR_OR_NULL(priv->ae_node)) {
1885 ret = PTR_ERR(priv->ae_node);
1886 dev_err(dev, "not find ae-handle\n");
1887 goto out_read_prop_fail;
1890 ret = of_property_read_u32(node, "port-id", &priv->port_id);
1892 goto out_read_prop_fail;
1894 hns_init_mac_addr(ndev);
1896 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1897 ndev->priv_flags |= IFF_UNICAST_FLT;
1898 ndev->netdev_ops = &hns_nic_netdev_ops;
1899 hns_ethtool_set_ops(ndev);
1901 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1902 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1904 ndev->vlan_features |=
1905 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1906 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
1908 switch (priv->enet_ver) {
1910 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1911 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1912 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1913 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
1919 SET_NETDEV_DEV(ndev, dev);
1921 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
1922 dev_dbg(dev, "set mask to 64bit\n");
1924 dev_err(dev, "set mask to 32bit fail!\n");
1926 /* carrier off reporting is important to ethtool even BEFORE open */
1927 netif_carrier_off(ndev);
1929 setup_timer(&priv->service_timer, hns_nic_service_timer,
1930 (unsigned long)priv);
1931 INIT_WORK(&priv->service_task, hns_nic_service_task);
1933 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
1934 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1935 set_bit(NIC_STATE_DOWN, &priv->state);
1937 if (hns_nic_try_get_ae(priv->netdev)) {
1938 priv->notifier_block.notifier_call = hns_nic_notifier_action;
1939 ret = hnae_register_notifier(&priv->notifier_block);
1941 dev_err(dev, "register notifier fail!\n");
1942 goto out_notify_fail;
1944 dev_dbg(dev, "has not handle, register notifier!\n");
1950 (void)cancel_work_sync(&priv->service_task);
1956 static int hns_nic_dev_remove(struct platform_device *pdev)
1958 struct net_device *ndev = platform_get_drvdata(pdev);
1959 struct hns_nic_priv *priv = netdev_priv(ndev);
1961 if (ndev->reg_state != NETREG_UNINITIALIZED)
1962 unregister_netdev(ndev);
1964 if (priv->ring_data)
1965 hns_nic_uninit_ring_data(priv);
1966 priv->ring_data = NULL;
1969 phy_disconnect(priv->phy);
1972 if (!IS_ERR_OR_NULL(priv->ae_handle))
1973 hnae_put_handle(priv->ae_handle);
1974 priv->ae_handle = NULL;
1975 if (priv->notifier_block.notifier_call)
1976 hnae_unregister_notifier(&priv->notifier_block);
1977 priv->notifier_block.notifier_call = NULL;
1979 set_bit(NIC_STATE_REMOVING, &priv->state);
1980 (void)cancel_work_sync(&priv->service_task);
1986 static const struct of_device_id hns_enet_of_match[] = {
1987 {.compatible = "hisilicon,hns-nic-v1",},
1988 {.compatible = "hisilicon,hns-nic-v2",},
1992 MODULE_DEVICE_TABLE(of, hns_enet_of_match);
1994 static struct platform_driver hns_nic_dev_driver = {
1997 .of_match_table = hns_enet_of_match,
1999 .probe = hns_nic_dev_probe,
2000 .remove = hns_nic_dev_remove,
2003 module_platform_driver(hns_nic_dev_driver);
2005 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2006 MODULE_AUTHOR("Hisilicon, Inc.");
2007 MODULE_LICENSE("GPL");
2008 MODULE_ALIAS("platform:hns-nic");