1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_sgmac.h"
25 #include "xgene_enet_xgmac.h"
27 #define RES_ENET_CSR 0
28 #define RES_RING_CSR 1
29 #define RES_RING_CMD 2
31 static const struct of_device_id xgene_enet_of_match[];
32 static const struct acpi_device_id xgene_enet_acpi_match[];
34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
36 struct xgene_enet_raw_desc16 *raw_desc;
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
53 struct xgene_enet_raw_desc16 *raw_desc;
54 struct xgene_enet_pdata *pdata;
55 struct net_device *ndev;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
65 pdata = netdev_priv(ndev);
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
72 skb = netdev_alloc_skb_ip_align(ndev, len);
75 buf_pool->rx_skb[tail] = skb;
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85 SET_VAL(BUFDATALEN, bufdatalen) |
87 tail = (tail + 1) & slots;
90 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
91 buf_pool->tail = tail;
96 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
98 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
100 return ((u16)pdata->rm << 10) | ring->num;
103 static u8 xgene_enet_hdr_len(const void *data)
105 const struct ethhdr *eth = data;
107 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
112 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
113 struct xgene_enet_raw_desc16 *raw_desc;
114 u32 slots = buf_pool->slots - 1;
115 u32 tail = buf_pool->tail;
119 len = pdata->ring_ops->len(buf_pool);
120 for (i = 0; i < len; i++) {
121 tail = (tail - 1) & slots;
122 raw_desc = &buf_pool->raw_desc16[tail];
124 /* Hardware stores descriptor in little endian format */
125 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
126 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
129 pdata->ring_ops->wr_cmd(buf_pool, -len);
130 buf_pool->tail = tail;
133 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
135 struct xgene_enet_desc_ring *rx_ring = data;
137 if (napi_schedule_prep(&rx_ring->napi)) {
138 disable_irq_nosync(irq);
139 __napi_schedule(&rx_ring->napi);
145 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
146 struct xgene_enet_raw_desc *raw_desc)
151 dma_addr_t *frag_dma_addr;
156 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
157 skb = cp_ring->cp_skb[skb_index];
158 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
160 dev = ndev_to_dev(cp_ring->ndev);
161 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
165 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
166 frag = &skb_shinfo(skb)->frags[i];
167 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
171 /* Checking for error */
172 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
173 if (unlikely(status > 2)) {
174 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
180 dev_kfree_skb_any(skb);
182 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
189 static u64 xgene_enet_work_msg(struct sk_buff *skb)
191 struct net_device *ndev = skb->dev;
192 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
194 u8 l3hlen = 0, l4hlen = 0;
195 u8 ethhdr, proto = 0, csum_enable = 0;
197 u32 hdr_len, mss = 0;
198 u32 i, len, nr_frags;
200 ethhdr = xgene_enet_hdr_len(skb->data);
202 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
203 unlikely(skb->protocol != htons(ETH_P_8021Q)))
206 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
210 if (unlikely(ip_is_fragment(iph)))
213 if (likely(iph->protocol == IPPROTO_TCP)) {
214 l4hlen = tcp_hdrlen(skb) >> 2;
216 proto = TSO_IPPROTO_TCP;
217 if (ndev->features & NETIF_F_TSO) {
218 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
219 mss = skb_shinfo(skb)->gso_size;
221 if (skb_is_nonlinear(skb)) {
222 len = skb_headlen(skb);
223 nr_frags = skb_shinfo(skb)->nr_frags;
225 for (i = 0; i < 2 && i < nr_frags; i++)
226 len += skb_shinfo(skb)->frags[i].size;
228 /* HW requires header must reside in 3 buffer */
229 if (unlikely(hdr_len > len)) {
230 if (skb_linearize(skb))
235 if (!mss || ((skb->len - hdr_len) <= mss))
238 if (mss != pdata->mss) {
240 pdata->mac_ops->set_mss(pdata);
242 hopinfo |= SET_BIT(ET);
244 } else if (iph->protocol == IPPROTO_UDP) {
245 l4hlen = UDP_HDR_SIZE;
249 l3hlen = ip_hdrlen(skb) >> 2;
250 hopinfo |= SET_VAL(TCPHDR, l4hlen) |
251 SET_VAL(IPHDR, l3hlen) |
252 SET_VAL(ETHHDR, ethhdr) |
253 SET_VAL(EC, csum_enable) |
256 SET_BIT(TYPE_ETH_WORK_MESSAGE);
261 static u16 xgene_enet_encode_len(u16 len)
263 return (len == BUFLEN_16K) ? 0 : len;
266 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
268 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
269 SET_VAL(BUFDATALEN, len));
272 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
276 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
277 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
278 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
283 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
285 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
288 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
291 struct device *dev = ndev_to_dev(tx_ring->ndev);
292 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
293 struct xgene_enet_raw_desc *raw_desc;
294 __le64 *exp_desc = NULL, *exp_bufs = NULL;
295 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
297 u16 tail = tx_ring->tail;
300 u8 ll = 0, nv = 0, idx = 0;
302 u32 size, offset, ell_bytes = 0;
303 u32 i, fidx, nr_frags, count = 1;
305 raw_desc = &tx_ring->raw_desc[tail];
306 tail = (tail + 1) & (tx_ring->slots - 1);
307 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
309 hopinfo = xgene_enet_work_msg(skb);
312 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
315 len = skb_headlen(skb);
316 hw_len = xgene_enet_encode_len(len);
318 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
319 if (dma_mapping_error(dev, dma_addr)) {
320 netdev_err(tx_ring->ndev, "DMA mapping error\n");
324 /* Hardware expects descriptor in little endian format */
325 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
326 SET_VAL(BUFDATALEN, hw_len) |
329 if (!skb_is_nonlinear(skb))
334 exp_desc = (void *)&tx_ring->raw_desc[tail];
335 tail = (tail + 1) & (tx_ring->slots - 1);
336 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
338 nr_frags = skb_shinfo(skb)->nr_frags;
339 for (i = nr_frags; i < 4 ; i++)
340 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
342 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
344 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
346 frag = &skb_shinfo(skb)->frags[fidx];
347 size = skb_frag_size(frag);
350 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
352 if (dma_mapping_error(dev, pbuf_addr))
355 frag_dma_addr[fidx] = pbuf_addr;
358 if (size > BUFLEN_16K)
362 if (size > BUFLEN_16K) {
370 dma_addr = pbuf_addr + offset;
371 hw_len = xgene_enet_encode_len(len);
377 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
380 if (split || (fidx != nr_frags)) {
381 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
382 xgene_set_addr_len(exp_bufs, idx, dma_addr,
387 xgene_set_addr_len(exp_desc, i, dma_addr,
392 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
399 offset += BUFLEN_16K;
405 dma_addr = dma_map_single(dev, exp_bufs,
406 sizeof(u64) * MAX_EXP_BUFFS,
408 if (dma_mapping_error(dev, dma_addr)) {
409 dev_kfree_skb_any(skb);
412 i = ell_bytes >> LL_BYTES_LSB_LEN;
413 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
414 SET_VAL(LL_BYTES_MSB, i) |
415 SET_VAL(LL_LEN, idx));
416 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
420 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
421 SET_VAL(USERINFO, tx_ring->tail));
422 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
423 pdata->tx_level += count;
424 tx_ring->tail = tail;
429 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
430 struct net_device *ndev)
432 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
433 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
434 u32 tx_level = pdata->tx_level;
437 if (tx_level < pdata->txc_level)
438 tx_level += ((typeof(pdata->tx_level))~0U);
440 if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
441 netif_stop_queue(ndev);
442 return NETDEV_TX_BUSY;
445 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
448 count = xgene_enet_setup_tx_desc(tx_ring, skb);
450 dev_kfree_skb_any(skb);
454 skb_tx_timestamp(skb);
456 pdata->stats.tx_packets++;
457 pdata->stats.tx_bytes += skb->len;
459 pdata->ring_ops->wr_cmd(tx_ring, count);
463 static void xgene_enet_skip_csum(struct sk_buff *skb)
465 struct iphdr *iph = ip_hdr(skb);
467 if (!ip_is_fragment(iph) ||
468 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
469 skb->ip_summed = CHECKSUM_UNNECESSARY;
473 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
474 struct xgene_enet_raw_desc *raw_desc)
476 struct net_device *ndev;
477 struct xgene_enet_pdata *pdata;
479 struct xgene_enet_desc_ring *buf_pool;
480 u32 datalen, skb_index;
485 ndev = rx_ring->ndev;
486 pdata = netdev_priv(ndev);
487 dev = ndev_to_dev(rx_ring->ndev);
488 buf_pool = rx_ring->buf_pool;
490 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
491 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
492 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
493 skb = buf_pool->rx_skb[skb_index];
495 /* checking for error */
496 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
497 if (unlikely(status > 2)) {
498 dev_kfree_skb_any(skb);
499 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
501 pdata->stats.rx_dropped++;
506 /* strip off CRC as HW isn't doing this */
507 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
508 datalen = (datalen & DATALEN_MASK) - 4;
509 prefetch(skb->data - NET_IP_ALIGN);
510 skb_put(skb, datalen);
512 skb_checksum_none_assert(skb);
513 skb->protocol = eth_type_trans(skb, ndev);
514 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
515 skb->protocol == htons(ETH_P_IP))) {
516 xgene_enet_skip_csum(skb);
519 pdata->stats.rx_packets++;
520 pdata->stats.rx_bytes += datalen;
521 napi_gro_receive(&rx_ring->napi, skb);
523 if (--rx_ring->nbufpool == 0) {
524 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
525 rx_ring->nbufpool = NUM_BUFPOOL;
531 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
533 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
536 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
539 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
540 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
541 u16 head = ring->head;
542 u16 slots = ring->slots - 1;
543 int ret, desc_count, count = 0, processed = 0;
547 raw_desc = &ring->raw_desc[head];
549 is_completion = false;
551 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
554 /* read fpqnum field after dataaddr field */
556 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
557 head = (head + 1) & slots;
558 exp_desc = &ring->raw_desc[head];
560 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
561 head = (head - 1) & slots;
568 if (is_rx_desc(raw_desc)) {
569 ret = xgene_enet_rx_frame(ring, raw_desc);
571 ret = xgene_enet_tx_completion(ring, raw_desc);
572 is_completion = true;
574 xgene_enet_mark_desc_slot_empty(raw_desc);
576 xgene_enet_mark_desc_slot_empty(exp_desc);
578 head = (head + 1) & slots;
583 pdata->txc_level += desc_count;
590 pdata->ring_ops->wr_cmd(ring, -count);
593 if (netif_queue_stopped(ring->ndev))
594 netif_start_queue(ring->ndev);
600 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
602 struct xgene_enet_desc_ring *ring;
605 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
606 processed = xgene_enet_process_ring(ring, budget);
608 if (processed != budget) {
610 enable_irq(ring->irq);
616 static void xgene_enet_timeout(struct net_device *ndev)
618 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
620 pdata->mac_ops->reset(pdata);
623 static int xgene_enet_register_irq(struct net_device *ndev)
625 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
626 struct device *dev = ndev_to_dev(ndev);
627 struct xgene_enet_desc_ring *ring;
630 ring = pdata->rx_ring;
631 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
632 IRQF_SHARED, ring->irq_name, ring);
634 netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
637 ring = pdata->tx_ring->cp_ring;
638 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
639 IRQF_SHARED, ring->irq_name, ring);
641 netdev_err(ndev, "Failed to request irq %s\n",
649 static void xgene_enet_free_irq(struct net_device *ndev)
651 struct xgene_enet_pdata *pdata;
654 pdata = netdev_priv(ndev);
655 dev = ndev_to_dev(ndev);
656 devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
659 devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
660 pdata->tx_ring->cp_ring);
664 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
666 struct napi_struct *napi;
668 napi = &pdata->rx_ring->napi;
672 napi = &pdata->tx_ring->cp_ring->napi;
677 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
679 struct napi_struct *napi;
681 napi = &pdata->rx_ring->napi;
685 napi = &pdata->tx_ring->cp_ring->napi;
690 static int xgene_enet_open(struct net_device *ndev)
692 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
693 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
696 mac_ops->tx_enable(pdata);
697 mac_ops->rx_enable(pdata);
699 xgene_enet_napi_enable(pdata);
700 ret = xgene_enet_register_irq(ndev);
704 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
705 phy_start(pdata->phy_dev);
707 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
709 netif_start_queue(ndev);
714 static int xgene_enet_close(struct net_device *ndev)
716 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
717 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
719 netif_stop_queue(ndev);
721 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
722 phy_stop(pdata->phy_dev);
724 cancel_delayed_work_sync(&pdata->link_work);
726 mac_ops->tx_disable(pdata);
727 mac_ops->rx_disable(pdata);
729 xgene_enet_free_irq(ndev);
730 xgene_enet_napi_disable(pdata);
731 xgene_enet_process_ring(pdata->rx_ring, -1);
736 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
738 struct xgene_enet_pdata *pdata;
741 pdata = netdev_priv(ring->ndev);
742 dev = ndev_to_dev(ring->ndev);
744 pdata->ring_ops->clear(ring);
745 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
748 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
750 struct xgene_enet_desc_ring *buf_pool;
752 if (pdata->tx_ring) {
753 xgene_enet_delete_ring(pdata->tx_ring);
754 pdata->tx_ring = NULL;
757 if (pdata->rx_ring) {
758 buf_pool = pdata->rx_ring->buf_pool;
759 xgene_enet_delete_bufpool(buf_pool);
760 xgene_enet_delete_ring(buf_pool);
761 xgene_enet_delete_ring(pdata->rx_ring);
762 pdata->rx_ring = NULL;
766 static int xgene_enet_get_ring_size(struct device *dev,
767 enum xgene_enet_ring_cfgsize cfgsize)
772 case RING_CFGSIZE_512B:
775 case RING_CFGSIZE_2KB:
778 case RING_CFGSIZE_16KB:
781 case RING_CFGSIZE_64KB:
784 case RING_CFGSIZE_512KB:
788 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
795 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
797 struct xgene_enet_pdata *pdata;
803 dev = ndev_to_dev(ring->ndev);
804 pdata = netdev_priv(ring->ndev);
806 if (ring->desc_addr) {
807 pdata->ring_ops->clear(ring);
808 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
810 devm_kfree(dev, ring);
813 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
815 struct device *dev = &pdata->pdev->dev;
816 struct xgene_enet_desc_ring *ring;
818 ring = pdata->tx_ring;
820 if (ring->cp_ring && ring->cp_ring->cp_skb)
821 devm_kfree(dev, ring->cp_ring->cp_skb);
822 if (ring->cp_ring && pdata->cq_cnt)
823 xgene_enet_free_desc_ring(ring->cp_ring);
824 xgene_enet_free_desc_ring(ring);
827 ring = pdata->rx_ring;
829 if (ring->buf_pool) {
830 if (ring->buf_pool->rx_skb)
831 devm_kfree(dev, ring->buf_pool->rx_skb);
832 xgene_enet_free_desc_ring(ring->buf_pool);
834 xgene_enet_free_desc_ring(ring);
838 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
839 struct xgene_enet_desc_ring *ring)
841 if ((pdata->enet_id == XGENE_ENET2) &&
842 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
849 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
850 struct xgene_enet_desc_ring *ring)
852 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
854 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
857 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
858 struct net_device *ndev, u32 ring_num,
859 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
861 struct xgene_enet_desc_ring *ring;
862 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
863 struct device *dev = ndev_to_dev(ndev);
866 size = xgene_enet_get_ring_size(dev, cfgsize);
870 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
876 ring->num = ring_num;
877 ring->cfgsize = cfgsize;
880 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
882 if (!ring->desc_addr) {
883 devm_kfree(dev, ring);
888 if (is_irq_mbox_required(pdata, ring)) {
889 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
890 &ring->irq_mbox_dma, GFP_KERNEL);
891 if (!ring->irq_mbox_addr) {
892 dma_free_coherent(dev, size, ring->desc_addr,
894 devm_kfree(dev, ring);
899 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
900 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
901 ring = pdata->ring_ops->setup(ring);
902 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
903 ring->num, ring->size, ring->id, ring->slots);
908 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
910 return (owner << 6) | (bufnum & GENMASK(5, 0));
913 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
915 enum xgene_ring_owner owner;
917 if (p->enet_id == XGENE_ENET1) {
918 switch (p->phy_mode) {
919 case PHY_INTERFACE_MODE_SGMII:
920 owner = RING_OWNER_ETH0;
923 owner = (!p->port_id) ? RING_OWNER_ETH0 :
928 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
934 static int xgene_enet_create_desc_rings(struct net_device *ndev)
936 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
937 struct device *dev = ndev_to_dev(ndev);
938 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
939 struct xgene_enet_desc_ring *buf_pool = NULL;
940 enum xgene_ring_owner owner;
941 dma_addr_t dma_exp_bufs;
942 u8 cpu_bufnum = pdata->cpu_bufnum;
943 u8 eth_bufnum = pdata->eth_bufnum;
944 u8 bp_bufnum = pdata->bp_bufnum;
945 u16 ring_num = pdata->ring_num;
949 /* allocate rx descriptor ring */
950 owner = xgene_derive_ring_owner(pdata);
951 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
952 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
953 RING_CFGSIZE_16KB, ring_id);
959 /* allocate buffer pool for receiving packets */
960 owner = xgene_derive_ring_owner(pdata);
961 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
962 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
963 RING_CFGSIZE_2KB, ring_id);
969 rx_ring->nbufpool = NUM_BUFPOOL;
970 rx_ring->buf_pool = buf_pool;
971 rx_ring->irq = pdata->rx_irq;
972 if (!pdata->cq_cnt) {
973 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
976 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
978 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
979 sizeof(struct sk_buff *), GFP_KERNEL);
980 if (!buf_pool->rx_skb) {
985 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
986 rx_ring->buf_pool = buf_pool;
987 pdata->rx_ring = rx_ring;
989 /* allocate tx descriptor ring */
990 owner = xgene_derive_ring_owner(pdata);
991 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
992 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
993 RING_CFGSIZE_16KB, ring_id);
999 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1000 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
1002 if (!tx_ring->exp_bufs) {
1007 pdata->tx_ring = tx_ring;
1009 if (!pdata->cq_cnt) {
1010 cp_ring = pdata->rx_ring;
1012 /* allocate tx completion descriptor ring */
1013 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1014 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1021 cp_ring->irq = pdata->txc_irq;
1022 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
1025 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1026 sizeof(struct sk_buff *), GFP_KERNEL);
1027 if (!cp_ring->cp_skb) {
1032 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1033 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1035 if (!cp_ring->frag_dma_addr) {
1036 devm_kfree(dev, cp_ring->cp_skb);
1041 pdata->tx_ring->cp_ring = cp_ring;
1042 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1044 pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
1049 xgene_enet_free_desc_rings(pdata);
1053 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1054 struct net_device *ndev,
1055 struct rtnl_link_stats64 *storage)
1057 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1058 struct rtnl_link_stats64 *stats = &pdata->stats;
1060 stats->rx_errors += stats->rx_length_errors +
1061 stats->rx_crc_errors +
1062 stats->rx_frame_errors +
1063 stats->rx_fifo_errors;
1064 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
1069 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1071 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1074 ret = eth_mac_addr(ndev, addr);
1077 pdata->mac_ops->set_mac_addr(pdata);
1082 static const struct net_device_ops xgene_ndev_ops = {
1083 .ndo_open = xgene_enet_open,
1084 .ndo_stop = xgene_enet_close,
1085 .ndo_start_xmit = xgene_enet_start_xmit,
1086 .ndo_tx_timeout = xgene_enet_timeout,
1087 .ndo_get_stats64 = xgene_enet_get_stats64,
1088 .ndo_change_mtu = eth_change_mtu,
1089 .ndo_set_mac_address = xgene_enet_set_mac_address,
1093 static void xgene_get_port_id_acpi(struct device *dev,
1094 struct xgene_enet_pdata *pdata)
1099 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1100 if (ACPI_FAILURE(status)) {
1103 pdata->port_id = temp;
1110 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1114 of_property_read_u32(dev->of_node, "port-id", &id);
1116 pdata->port_id = id & BIT(0);
1121 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1123 struct device *dev = &pdata->pdev->dev;
1126 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1128 pdata->tx_delay = 4;
1132 if (delay < 0 || delay > 7) {
1133 dev_err(dev, "Invalid tx-delay specified\n");
1137 pdata->tx_delay = delay;
1142 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1144 struct device *dev = &pdata->pdev->dev;
1147 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1149 pdata->rx_delay = 2;
1153 if (delay < 0 || delay > 7) {
1154 dev_err(dev, "Invalid rx-delay specified\n");
1158 pdata->rx_delay = delay;
1163 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1165 struct platform_device *pdev;
1166 struct net_device *ndev;
1168 struct resource *res;
1169 void __iomem *base_addr;
1177 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1179 dev_err(dev, "Resource enet_csr not defined\n");
1182 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1183 if (!pdata->base_addr) {
1184 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1188 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1190 dev_err(dev, "Resource ring_csr not defined\n");
1193 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1194 resource_size(res));
1195 if (!pdata->ring_csr_addr) {
1196 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1200 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1202 dev_err(dev, "Resource ring_cmd not defined\n");
1205 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1206 resource_size(res));
1207 if (!pdata->ring_cmd_addr) {
1208 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1213 xgene_get_port_id_dt(dev, pdata);
1216 xgene_get_port_id_acpi(dev, pdata);
1219 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1220 eth_hw_addr_random(ndev);
1222 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1224 pdata->phy_mode = device_get_phy_mode(dev);
1225 if (pdata->phy_mode < 0) {
1226 dev_err(dev, "Unable to get phy-connection-type\n");
1227 return pdata->phy_mode;
1229 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1230 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1231 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1232 dev_err(dev, "Incorrect phy-connection-type specified\n");
1236 ret = xgene_get_tx_delay(pdata);
1240 ret = xgene_get_rx_delay(pdata);
1244 ret = platform_get_irq(pdev, 0);
1246 dev_err(dev, "Unable to get ENET Rx IRQ\n");
1247 ret = ret ? : -ENXIO;
1250 pdata->rx_irq = ret;
1252 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
1253 ret = platform_get_irq(pdev, 1);
1256 dev_info(dev, "Unable to get Tx completion IRQ,"
1257 "using Rx IRQ instead\n");
1259 pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
1260 pdata->txc_irq = ret;
1264 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1265 if (IS_ERR(pdata->clk)) {
1266 /* Firmware may have set up the clock already. */
1267 dev_info(dev, "clocks have been setup already\n");
1270 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1271 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1273 base_addr = pdata->base_addr;
1274 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1275 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1276 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1277 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1278 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1279 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1280 offset = (pdata->enet_id == XGENE_ENET1) ?
1281 BLOCK_ETH_MAC_CSR_OFFSET :
1282 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1283 pdata->mcx_mac_csr_addr = base_addr + offset;
1285 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1286 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1288 pdata->rx_buff_cnt = NUM_PKT_BUF;
1293 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1295 struct net_device *ndev = pdata->ndev;
1296 struct xgene_enet_desc_ring *buf_pool;
1300 ret = pdata->port_ops->reset(pdata);
1304 ret = xgene_enet_create_desc_rings(ndev);
1306 netdev_err(ndev, "Error in ring configuration\n");
1310 /* setup buffer pool */
1311 buf_pool = pdata->rx_ring->buf_pool;
1312 xgene_enet_init_bufpool(buf_pool);
1313 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1315 xgene_enet_delete_desc_rings(pdata);
1319 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
1320 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1321 pdata->mac_ops->init(pdata);
1326 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1328 switch (pdata->phy_mode) {
1329 case PHY_INTERFACE_MODE_RGMII:
1330 pdata->mac_ops = &xgene_gmac_ops;
1331 pdata->port_ops = &xgene_gport_ops;
1334 case PHY_INTERFACE_MODE_SGMII:
1335 pdata->mac_ops = &xgene_sgmac_ops;
1336 pdata->port_ops = &xgene_sgport_ops;
1340 pdata->mac_ops = &xgene_xgmac_ops;
1341 pdata->port_ops = &xgene_xgport_ops;
1346 if (pdata->enet_id == XGENE_ENET1) {
1347 switch (pdata->port_id) {
1349 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1350 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1351 pdata->bp_bufnum = START_BP_BUFNUM_0;
1352 pdata->ring_num = START_RING_NUM_0;
1355 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1356 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1357 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1358 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1359 pdata->ring_num = XG_START_RING_NUM_1;
1361 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1362 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1363 pdata->bp_bufnum = START_BP_BUFNUM_1;
1364 pdata->ring_num = START_RING_NUM_1;
1370 pdata->ring_ops = &xgene_ring1_ops;
1372 switch (pdata->port_id) {
1374 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1375 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1376 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1377 pdata->ring_num = X2_START_RING_NUM_0;
1380 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1381 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1382 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1383 pdata->ring_num = X2_START_RING_NUM_1;
1389 pdata->ring_ops = &xgene_ring2_ops;
1393 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1395 struct napi_struct *napi;
1397 napi = &pdata->rx_ring->napi;
1398 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
1400 if (pdata->cq_cnt) {
1401 napi = &pdata->tx_ring->cp_ring->napi;
1402 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1407 static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1409 struct napi_struct *napi;
1411 napi = &pdata->rx_ring->napi;
1412 netif_napi_del(napi);
1414 if (pdata->cq_cnt) {
1415 napi = &pdata->tx_ring->cp_ring->napi;
1416 netif_napi_del(napi);
1420 static int xgene_enet_probe(struct platform_device *pdev)
1422 struct net_device *ndev;
1423 struct xgene_enet_pdata *pdata;
1424 struct device *dev = &pdev->dev;
1425 const struct xgene_mac_ops *mac_ops;
1426 const struct of_device_id *of_id;
1429 ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
1433 pdata = netdev_priv(ndev);
1437 SET_NETDEV_DEV(ndev, dev);
1438 platform_set_drvdata(pdev, pdata);
1439 ndev->netdev_ops = &xgene_ndev_ops;
1440 xgene_enet_set_ethtool_ops(ndev);
1441 ndev->features |= NETIF_F_IP_CSUM |
1446 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1448 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1452 const struct acpi_device_id *acpi_id;
1454 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1456 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1459 if (!pdata->enet_id) {
1464 ret = xgene_enet_get_resources(pdata);
1468 xgene_enet_setup_ops(pdata);
1470 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1471 ndev->features |= NETIF_F_TSO;
1472 pdata->mss = XGENE_ENET_MSS;
1474 ndev->hw_features = ndev->features;
1476 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1478 netdev_err(ndev, "No usable DMA configuration\n");
1482 ret = register_netdev(ndev);
1484 netdev_err(ndev, "Failed to register netdev\n");
1488 ret = xgene_enet_init_hw(pdata);
1492 mac_ops = pdata->mac_ops;
1493 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
1494 ret = xgene_enet_mdio_config(pdata);
1498 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1501 xgene_enet_napi_add(pdata);
1504 unregister_netdev(ndev);
1509 static int xgene_enet_remove(struct platform_device *pdev)
1511 struct xgene_enet_pdata *pdata;
1512 const struct xgene_mac_ops *mac_ops;
1513 struct net_device *ndev;
1515 pdata = platform_get_drvdata(pdev);
1516 mac_ops = pdata->mac_ops;
1519 mac_ops->rx_disable(pdata);
1520 mac_ops->tx_disable(pdata);
1522 xgene_enet_napi_del(pdata);
1523 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1524 xgene_enet_mdio_remove(pdata);
1525 unregister_netdev(ndev);
1526 xgene_enet_delete_desc_rings(pdata);
1527 pdata->port_ops->shutdown(pdata);
1534 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1535 { "APMC0D05", XGENE_ENET1},
1536 { "APMC0D30", XGENE_ENET1},
1537 { "APMC0D31", XGENE_ENET1},
1538 { "APMC0D3F", XGENE_ENET1},
1539 { "APMC0D26", XGENE_ENET2},
1540 { "APMC0D25", XGENE_ENET2},
1543 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1547 static const struct of_device_id xgene_enet_of_match[] = {
1548 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1549 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1550 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1551 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1552 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1556 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1559 static struct platform_driver xgene_enet_driver = {
1561 .name = "xgene-enet",
1562 .of_match_table = of_match_ptr(xgene_enet_of_match),
1563 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1565 .probe = xgene_enet_probe,
1566 .remove = xgene_enet_remove,
1569 module_platform_driver(xgene_enet_driver);
1571 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1572 MODULE_VERSION(XGENE_DRV_VERSION);
1573 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1574 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1575 MODULE_LICENSE("GPL");