1 /* Copyright 2014-2016 Freescale Semiconductor Inc.
2 * Copyright 2016-2017 NXP
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
11 * * Neither the name of Freescale Semiconductor nor the
12 * names of its contributors may be used to endorse or promote products
13 * derived from this software without specific prior written permission.
16 * ALTERNATIVELY, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") as published by the Free Software
18 * Foundation, either version 2 of that License or (at your option) any
21 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/platform_device.h>
35 #include <linux/etherdevice.h>
36 #include <linux/of_net.h>
37 #include <linux/interrupt.h>
38 #include <linux/msi.h>
39 #include <linux/kthread.h>
40 #include <linux/iommu.h>
42 #include "../../fsl-mc/include/mc.h"
43 #include "../../fsl-mc/include/mc-sys.h"
44 #include "dpaa2-eth.h"
46 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
47 * using trace events only need to #include <trace/events/sched.h>
49 #define CREATE_TRACE_POINTS
50 #include "dpaa2-eth-trace.h"
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_AUTHOR("Freescale Semiconductor, Inc");
54 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
56 const char dpaa2_eth_drv_version[] = "0.1";
58 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
61 phys_addr_t phys_addr;
63 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
65 return phys_to_virt(phys_addr);
68 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
72 skb_checksum_none_assert(skb);
74 /* HW checksum validation is disabled, nothing to do here */
75 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
78 /* Read checksum validation bits */
79 if (!((fd_status & DPAA2_FAS_L3CV) &&
80 (fd_status & DPAA2_FAS_L4CV)))
83 /* Inform the stack there's no need to compute L3/L4 csum anymore */
84 skb->ip_summed = CHECKSUM_UNNECESSARY;
87 /* Free a received FD.
88 * Not to be used for Tx conf FDs or on any other paths.
90 static void free_rx_fd(struct dpaa2_eth_priv *priv,
91 const struct dpaa2_fd *fd,
94 struct device *dev = priv->net_dev->dev.parent;
95 dma_addr_t addr = dpaa2_fd_get_addr(fd);
96 u8 fd_format = dpaa2_fd_get_format(fd);
97 struct dpaa2_sg_entry *sgt;
101 /* If single buffer frame, just free the data buffer */
102 if (fd_format == dpaa2_fd_single)
104 else if (fd_format != dpaa2_fd_sg)
105 /* We don't support any other format */
108 /* For S/G frames, we first need to free all SG entries */
109 sgt = vaddr + dpaa2_fd_get_offset(fd);
110 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
111 addr = dpaa2_sg_get_addr(&sgt[i]);
112 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
113 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
116 skb_free_frag(sg_vaddr);
117 if (dpaa2_sg_is_final(&sgt[i]))
122 skb_free_frag(vaddr);
125 /* Build a linear skb based on a single-buffer frame descriptor */
126 static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
127 struct dpaa2_eth_channel *ch,
128 const struct dpaa2_fd *fd,
131 struct sk_buff *skb = NULL;
132 u16 fd_offset = dpaa2_fd_get_offset(fd);
133 u32 fd_length = dpaa2_fd_get_len(fd);
135 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
136 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
140 skb_reserve(skb, fd_offset);
141 skb_put(skb, fd_length);
148 /* Build a non linear (fragmented) skb based on a S/G table */
149 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
150 struct dpaa2_eth_channel *ch,
151 struct dpaa2_sg_entry *sgt)
153 struct sk_buff *skb = NULL;
154 struct device *dev = priv->net_dev->dev.parent;
159 struct page *page, *head_page;
163 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
164 struct dpaa2_sg_entry *sge = &sgt[i];
166 /* NOTE: We only support SG entries in dpaa2_sg_single format,
167 * but this is the only format we may receive from HW anyway
170 /* Get the address and length from the S/G entry */
171 sg_addr = dpaa2_sg_get_addr(sge);
172 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
173 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
176 sg_length = dpaa2_sg_get_len(sge);
179 /* We build the skb around the first data buffer */
180 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
181 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
185 sg_offset = dpaa2_sg_get_offset(sge);
186 skb_reserve(skb, sg_offset);
187 skb_put(skb, sg_length);
189 /* Rest of the data buffers are stored as skb frags */
190 page = virt_to_page(sg_vaddr);
191 head_page = virt_to_head_page(sg_vaddr);
193 /* Offset in page (which may be compound).
194 * Data in subsequent SG entries is stored from the
195 * beginning of the buffer, so we don't need to add the
198 page_offset = ((unsigned long)sg_vaddr &
200 (page_address(page) - page_address(head_page));
202 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
203 sg_length, DPAA2_ETH_RX_BUF_SIZE);
206 if (dpaa2_sg_is_final(sge))
210 /* Count all data buffers + SG table buffer */
211 ch->buf_count -= i + 2;
216 /* Main Rx frame processing routine */
217 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
218 struct dpaa2_eth_channel *ch,
219 const struct dpaa2_fd *fd,
220 struct napi_struct *napi)
222 dma_addr_t addr = dpaa2_fd_get_addr(fd);
223 u8 fd_format = dpaa2_fd_get_format(fd);
226 struct rtnl_link_stats64 *percpu_stats;
227 struct dpaa2_eth_drv_stats *percpu_extras;
228 struct device *dev = priv->net_dev->dev.parent;
229 struct dpaa2_fas *fas;
234 trace_dpaa2_rx_fd(priv->net_dev, fd);
236 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
237 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
239 fas = dpaa2_get_fas(vaddr);
241 buf_data = vaddr + dpaa2_fd_get_offset(fd);
244 percpu_stats = this_cpu_ptr(priv->percpu_stats);
245 percpu_extras = this_cpu_ptr(priv->percpu_extras);
247 if (fd_format == dpaa2_fd_single) {
248 skb = build_linear_skb(priv, ch, fd, vaddr);
249 } else if (fd_format == dpaa2_fd_sg) {
250 skb = build_frag_skb(priv, ch, buf_data);
251 skb_free_frag(vaddr);
252 percpu_extras->rx_sg_frames++;
253 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
255 /* We don't support any other format */
256 goto err_frame_format;
264 /* Check if we need to validate the L4 csum */
265 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
266 status = le32_to_cpu(fas->status);
267 validate_rx_csum(priv, status, skb);
270 skb->protocol = eth_type_trans(skb, priv->net_dev);
272 percpu_stats->rx_packets++;
273 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
275 napi_gro_receive(napi, skb);
280 free_rx_fd(priv, fd, vaddr);
282 percpu_stats->rx_dropped++;
285 /* Consume all frames pull-dequeued into the store. This is the simplest way to
286 * make sure we don't accidentally issue another volatile dequeue which would
287 * overwrite (leak) frames already in the store.
289 * Observance of NAPI budget is not our concern, leaving that to the caller.
291 static int consume_frames(struct dpaa2_eth_channel *ch)
293 struct dpaa2_eth_priv *priv = ch->priv;
294 struct dpaa2_eth_fq *fq;
296 const struct dpaa2_fd *fd;
301 dq = dpaa2_io_store_next(ch->store, &is_last);
303 /* If we're here, we *must* have placed a
304 * volatile dequeue comnmand, so keep reading through
305 * the store until we get some sort of valid response
306 * token (either a valid frame or an "empty dequeue")
311 fd = dpaa2_dq_fd(dq);
312 fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
315 fq->consume(priv, ch, fd, &ch->napi);
322 /* Create a frame descriptor based on a fragmented skb */
323 static int build_sg_fd(struct dpaa2_eth_priv *priv,
327 struct device *dev = priv->net_dev->dev.parent;
328 void *sgt_buf = NULL;
330 int nr_frags = skb_shinfo(skb)->nr_frags;
331 struct dpaa2_sg_entry *sgt;
334 struct scatterlist *scl, *crt_scl;
337 struct dpaa2_eth_swa *swa;
338 struct dpaa2_fas *fas;
340 /* Create and map scatterlist.
341 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
342 * to go beyond nr_frags+1.
343 * Note: We don't support chained scatterlists
345 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
348 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
352 sg_init_table(scl, nr_frags + 1);
353 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
354 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
355 if (unlikely(!num_dma_bufs)) {
357 goto dma_map_sg_failed;
360 /* Prepare the HW SGT structure */
361 sgt_buf_size = priv->tx_data_offset +
362 sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
363 sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
364 if (unlikely(!sgt_buf)) {
366 goto sgt_buf_alloc_failed;
368 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
370 /* PTA from egress side is passed as is to the confirmation side so
371 * we need to clear some fields here in order to find consistent values
372 * on TX confirmation. We are clearing FAS (Frame Annotation Status)
373 * field from the hardware annotation area
375 fas = dpaa2_get_fas(sgt_buf);
376 memset(fas, 0, DPAA2_FAS_SIZE);
378 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
380 /* Fill in the HW SGT structure.
382 * sgt_buf is zeroed out, so the following fields are implicit
383 * in all sgt entries:
385 * - format is 'dpaa2_sg_single'
387 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
388 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
389 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
391 dpaa2_sg_set_final(&sgt[i - 1], true);
393 /* Store the skb backpointer in the SGT buffer.
394 * Fit the scatterlist and the number of buffers alongside the
395 * skb backpointer in the software annotation area. We'll need
396 * all of them on Tx Conf.
398 swa = (struct dpaa2_eth_swa *)sgt_buf;
401 swa->num_sg = num_sg;
402 swa->num_dma_bufs = num_dma_bufs;
404 /* Separately map the SGT buffer */
405 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
406 if (unlikely(dma_mapping_error(dev, addr))) {
408 goto dma_map_single_failed;
410 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
411 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
412 dpaa2_fd_set_addr(fd, addr);
413 dpaa2_fd_set_len(fd, skb->len);
414 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
419 dma_map_single_failed:
421 sgt_buf_alloc_failed:
422 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
428 /* Create a frame descriptor based on a linear skb */
429 static int build_single_fd(struct dpaa2_eth_priv *priv,
433 struct device *dev = priv->net_dev->dev.parent;
435 struct dpaa2_fas *fas;
436 struct sk_buff **skbh;
439 buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
440 DPAA2_ETH_TX_BUF_ALIGN,
441 DPAA2_ETH_TX_BUF_ALIGN);
443 /* PTA from egress side is passed as is to the confirmation side so
444 * we need to clear some fields here in order to find consistent values
445 * on TX confirmation. We are clearing FAS (Frame Annotation Status)
446 * field from the hardware annotation area
448 fas = dpaa2_get_fas(buffer_start);
449 memset(fas, 0, DPAA2_FAS_SIZE);
451 /* Store a backpointer to the skb at the beginning of the buffer
452 * (in the private data area) such that we can release it
455 skbh = (struct sk_buff **)buffer_start;
458 addr = dma_map_single(dev, buffer_start,
459 skb_tail_pointer(skb) - buffer_start,
461 if (unlikely(dma_mapping_error(dev, addr)))
464 dpaa2_fd_set_addr(fd, addr);
465 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
466 dpaa2_fd_set_len(fd, skb->len);
467 dpaa2_fd_set_format(fd, dpaa2_fd_single);
468 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
474 /* FD freeing routine on the Tx path
476 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
477 * back-pointed to is also freed.
478 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
480 * Optionally, return the frame annotation status word (FAS), which needs
481 * to be checked if we're on the confirmation path.
483 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
484 const struct dpaa2_fd *fd,
487 struct device *dev = priv->net_dev->dev.parent;
489 struct sk_buff **skbh, *skb;
490 unsigned char *buffer_start;
492 struct scatterlist *scl;
493 int num_sg, num_dma_bufs;
494 struct dpaa2_eth_swa *swa;
495 u8 fd_format = dpaa2_fd_get_format(fd);
496 struct dpaa2_fas *fas;
498 fd_addr = dpaa2_fd_get_addr(fd);
499 skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
500 fas = dpaa2_get_fas(skbh);
502 if (fd_format == dpaa2_fd_single) {
504 buffer_start = (unsigned char *)skbh;
505 /* Accessing the skb buffer is safe before dma unmap, because
506 * we didn't map the actual skb shell.
508 dma_unmap_single(dev, fd_addr,
509 skb_tail_pointer(skb) - buffer_start,
511 } else if (fd_format == dpaa2_fd_sg) {
512 swa = (struct dpaa2_eth_swa *)skbh;
515 num_sg = swa->num_sg;
516 num_dma_bufs = swa->num_dma_bufs;
518 /* Unmap the scatterlist */
519 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
522 /* Unmap the SGT buffer */
523 unmap_size = priv->tx_data_offset +
524 sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
525 dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
527 /* Unsupported format, mark it as errored and give up */
533 /* Read the status from the Frame Annotation after we unmap the first
534 * buffer but before we free it. The caller function is responsible
535 * for checking the status value.
538 *status = le32_to_cpu(fas->status);
540 /* Free SGT buffer kmalloc'ed on tx */
541 if (fd_format != dpaa2_fd_single)
544 /* Move on with skb release */
548 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
550 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
552 struct rtnl_link_stats64 *percpu_stats;
553 struct dpaa2_eth_drv_stats *percpu_extras;
554 struct dpaa2_eth_fq *fq;
558 percpu_stats = this_cpu_ptr(priv->percpu_stats);
559 percpu_extras = this_cpu_ptr(priv->percpu_extras);
561 if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
564 ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
566 percpu_stats->tx_dropped++;
567 goto err_alloc_headroom;
573 /* We'll be holding a back-reference to the skb until Tx Confirmation;
574 * we don't want that overwritten by a concurrent Tx with a cloned skb.
576 skb = skb_unshare(skb, GFP_ATOMIC);
577 if (unlikely(!skb)) {
578 /* skb_unshare() has already freed the skb */
579 percpu_stats->tx_dropped++;
583 /* Setup the FD fields */
584 memset(&fd, 0, sizeof(fd));
586 if (skb_is_nonlinear(skb)) {
587 err = build_sg_fd(priv, skb, &fd);
588 percpu_extras->tx_sg_frames++;
589 percpu_extras->tx_sg_bytes += skb->len;
591 err = build_single_fd(priv, skb, &fd);
595 percpu_stats->tx_dropped++;
600 trace_dpaa2_tx_fd(net_dev, &fd);
602 /* TxConf FQ selection primarily based on cpu affinity; this is
603 * non-migratable context, so it's safe to call smp_processor_id().
605 queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
606 fq = &priv->fq[queue_mapping];
607 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
608 err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
613 percpu_extras->tx_portal_busy += i;
614 if (unlikely(err < 0)) {
615 percpu_stats->tx_errors++;
616 /* Clean up everything, including freeing the skb */
617 free_tx_fd(priv, &fd, NULL);
619 percpu_stats->tx_packets++;
620 percpu_stats->tx_bytes += skb->len;
632 /* Tx confirmation frame processing routine */
633 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
634 struct dpaa2_eth_channel *ch,
635 const struct dpaa2_fd *fd,
636 struct napi_struct *napi __always_unused)
638 struct rtnl_link_stats64 *percpu_stats;
639 struct dpaa2_eth_drv_stats *percpu_extras;
642 bool has_fas_errors = false;
645 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
647 percpu_extras = this_cpu_ptr(priv->percpu_extras);
648 percpu_extras->tx_conf_frames++;
649 percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
651 /* Check frame errors in the FD field */
652 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
653 if (unlikely(fd_errors)) {
654 /* We only check error bits in the FAS field if corresponding
655 * FAERR bit is set in FD and the FAS field is marked as valid
657 has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
658 !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
660 netdev_dbg(priv->net_dev, "TX frame FD error: %x08\n",
664 free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
666 if (likely(!fd_errors))
669 percpu_stats = this_cpu_ptr(priv->percpu_stats);
670 /* Tx-conf logically pertains to the egress path. */
671 percpu_stats->tx_errors++;
673 if (has_fas_errors && net_ratelimit())
674 netdev_dbg(priv->net_dev, "TX frame FAS error: %x08\n",
675 status & DPAA2_FAS_TX_ERR_MASK);
678 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
682 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
683 DPNI_OFF_RX_L3_CSUM, enable);
685 netdev_err(priv->net_dev,
686 "dpni_set_offload(RX_L3_CSUM) failed\n");
690 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
691 DPNI_OFF_RX_L4_CSUM, enable);
693 netdev_err(priv->net_dev,
694 "dpni_set_offload(RX_L4_CSUM) failed\n");
701 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
705 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
706 DPNI_OFF_TX_L3_CSUM, enable);
708 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
712 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
713 DPNI_OFF_TX_L4_CSUM, enable);
715 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
722 /* Perform a single release command to add buffers
723 * to the specified buffer pool
725 static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
727 struct device *dev = priv->net_dev->dev.parent;
728 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
733 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
734 /* Allocate buffer visible to WRIOP + skb shared info +
737 buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
741 buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
743 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
745 if (unlikely(dma_mapping_error(dev, addr)))
751 trace_dpaa2_eth_buf_seed(priv->net_dev,
752 buf, DPAA2_ETH_BUF_RAW_SIZE,
753 addr, DPAA2_ETH_RX_BUF_SIZE,
758 /* In case the portal is busy, retry until successful.
759 * The buffer release function would only fail if the QBMan portal
760 * was busy, which implies portal contention (i.e. more CPUs than
761 * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
762 * there is little we can realistically do, short of giving up -
763 * in which case we'd risk depleting the buffer pool and never again
764 * receiving the Rx interrupt which would kick-start the refill logic.
765 * So just keep retrying, at the risk of being moved to ksoftirqd.
767 while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
780 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
785 /* This is the lazy seeding of Rx buffer pools.
786 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
787 * napi_alloc_frag(). The trouble with that is that it in turn ends up
788 * calling this_cpu_ptr(), which mandates execution in atomic context.
789 * Rather than splitting up the code, do a one-off preempt disable.
792 for (j = 0; j < priv->num_channels; j++) {
793 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
794 i += DPAA2_ETH_BUFS_PER_CMD) {
795 new_count = add_bufs(priv, bpid);
796 priv->channel[j]->buf_count += new_count;
798 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
810 * Drain the specified number of buffers from the DPNI's private buffer pool.
811 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
813 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
815 struct device *dev = priv->net_dev->dev.parent;
816 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
821 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
824 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
827 for (i = 0; i < ret; i++) {
828 /* Same logic as on regular Rx path */
829 vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
831 dma_unmap_single(dev, buf_array[i],
832 DPAA2_ETH_RX_BUF_SIZE,
834 skb_free_frag(vaddr);
839 static void drain_pool(struct dpaa2_eth_priv *priv)
843 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
846 for (i = 0; i < priv->num_channels; i++)
847 priv->channel[i]->buf_count = 0;
850 /* Function is called from softirq context only, so we don't need to guard
851 * the access to percpu count
853 static int refill_pool(struct dpaa2_eth_priv *priv,
854 struct dpaa2_eth_channel *ch,
859 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
863 new_count = add_bufs(priv, bpid);
864 if (unlikely(!new_count)) {
865 /* Out of memory; abort for now, we'll try later on */
868 ch->buf_count += new_count;
869 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
871 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
877 static int pull_channel(struct dpaa2_eth_channel *ch)
882 /* Retry while portal is busy */
884 err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
887 } while (err == -EBUSY);
889 ch->stats.dequeue_portal_busy += dequeues;
891 ch->stats.pull_err++;
898 * Frames are dequeued from the QMan channel associated with this NAPI context.
899 * Rx, Tx confirmation and (if configured) Rx error frames all count
900 * towards the NAPI budget.
902 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
904 struct dpaa2_eth_channel *ch;
905 int cleaned = 0, store_cleaned;
906 struct dpaa2_eth_priv *priv;
909 ch = container_of(napi, struct dpaa2_eth_channel, napi);
912 while (cleaned < budget) {
913 err = pull_channel(ch);
917 /* Refill pool if appropriate */
918 refill_pool(priv, ch, priv->bpid);
920 store_cleaned = consume_frames(ch);
921 cleaned += store_cleaned;
923 /* If we have enough budget left for a full store,
924 * try a new pull dequeue, otherwise we're done here
926 if (store_cleaned == 0 ||
927 cleaned > budget - DPAA2_ETH_STORE_SIZE)
931 if (cleaned < budget) {
932 napi_complete_done(napi, cleaned);
933 /* Re-enable data available notifications */
935 err = dpaa2_io_service_rearm(NULL, &ch->nctx);
937 } while (err == -EBUSY);
940 ch->stats.frames += cleaned;
945 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
947 struct dpaa2_eth_channel *ch;
950 for (i = 0; i < priv->num_channels; i++) {
951 ch = priv->channel[i];
952 napi_enable(&ch->napi);
956 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
958 struct dpaa2_eth_channel *ch;
961 for (i = 0; i < priv->num_channels; i++) {
962 ch = priv->channel[i];
963 napi_disable(&ch->napi);
967 static int link_state_update(struct dpaa2_eth_priv *priv)
969 struct dpni_link_state state;
972 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
974 netdev_err(priv->net_dev,
975 "dpni_get_link_state() failed\n");
979 /* Chech link state; speed / duplex changes are not treated yet */
980 if (priv->link_state.up == state.up)
983 priv->link_state = state;
985 netif_carrier_on(priv->net_dev);
986 netif_tx_start_all_queues(priv->net_dev);
988 netif_tx_stop_all_queues(priv->net_dev);
989 netif_carrier_off(priv->net_dev);
992 netdev_info(priv->net_dev, "Link Event: state %s\n",
993 state.up ? "up" : "down");
998 static int dpaa2_eth_open(struct net_device *net_dev)
1000 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1003 err = seed_pool(priv, priv->bpid);
1005 /* Not much to do; the buffer pool, though not filled up,
1006 * may still contain some buffers which would enable us
1009 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1010 priv->dpbp_dev->obj_desc.id, priv->bpid);
1013 /* We'll only start the txqs when the link is actually ready; make sure
1014 * we don't race against the link up notification, which may come
1015 * immediately after dpni_enable();
1017 netif_tx_stop_all_queues(net_dev);
1018 enable_ch_napi(priv);
1019 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1020 * return true and cause 'ip link show' to report the LOWER_UP flag,
1021 * even though the link notification wasn't even received.
1023 netif_carrier_off(net_dev);
1025 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1027 netdev_err(net_dev, "dpni_enable() failed\n");
1031 /* If the DPMAC object has already processed the link up interrupt,
1032 * we have to learn the link state ourselves.
1034 err = link_state_update(priv);
1036 netdev_err(net_dev, "Can't update link state\n");
1037 goto link_state_err;
1044 disable_ch_napi(priv);
1049 /* The DPIO store must be empty when we call this,
1050 * at the end of every NAPI cycle.
1052 static u32 drain_channel(struct dpaa2_eth_priv *priv,
1053 struct dpaa2_eth_channel *ch)
1055 u32 drained = 0, total = 0;
1059 drained = consume_frames(ch);
1066 static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1068 struct dpaa2_eth_channel *ch;
1072 for (i = 0; i < priv->num_channels; i++) {
1073 ch = priv->channel[i];
1074 drained += drain_channel(priv, ch);
1080 static int dpaa2_eth_stop(struct net_device *net_dev)
1082 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1087 netif_tx_stop_all_queues(net_dev);
1088 netif_carrier_off(net_dev);
1090 /* Loop while dpni_disable() attempts to drain the egress FQs
1091 * and confirm them back to us.
1094 dpni_disable(priv->mc_io, 0, priv->mc_token);
1095 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1097 /* Allow the hardware some slack */
1099 } while (dpni_enabled && --retries);
1101 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1102 /* Must go on and disable NAPI nonetheless, so we don't crash at
1103 * the next "ifconfig up"
1107 /* Wait for NAPI to complete on every core and disable it.
1108 * In particular, this will also prevent NAPI from being rescheduled if
1109 * a new CDAN is serviced, effectively discarding the CDAN. We therefore
1110 * don't even need to disarm the channels, except perhaps for the case
1111 * of a huge coalescing value.
1113 disable_ch_napi(priv);
1115 /* Manually drain the Rx and TxConf queues */
1116 drained = drain_ingress_frames(priv);
1118 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1120 /* Empty the buffer pool */
1126 static int dpaa2_eth_init(struct net_device *net_dev)
1129 u64 not_supported = 0;
1130 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1131 u32 options = priv->dpni_attrs.options;
1133 /* Capabilities listing */
1134 supported |= IFF_LIVE_ADDR_CHANGE;
1136 if (options & DPNI_OPT_NO_MAC_FILTER)
1137 not_supported |= IFF_UNICAST_FLT;
1139 supported |= IFF_UNICAST_FLT;
1141 net_dev->priv_flags |= supported;
1142 net_dev->priv_flags &= ~not_supported;
1145 net_dev->features = NETIF_F_RXCSUM |
1146 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1147 NETIF_F_SG | NETIF_F_HIGHDMA |
1149 net_dev->hw_features = net_dev->features;
1154 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1156 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1157 struct device *dev = net_dev->dev.parent;
1160 err = eth_mac_addr(net_dev, addr);
1162 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1166 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1169 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1176 /** Fill in counters maintained by the GPP driver. These may be different from
1177 * the hardware counters obtained by ethtool.
1179 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1180 struct rtnl_link_stats64 *stats)
1182 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1183 struct rtnl_link_stats64 *percpu_stats;
1185 u64 *netstats = (u64 *)stats;
1187 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1189 for_each_possible_cpu(i) {
1190 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1191 cpustats = (u64 *)percpu_stats;
1192 for (j = 0; j < num; j++)
1193 netstats[j] += cpustats[j];
1197 static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
1199 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1202 /* Set the maximum Rx frame length to match the transmit side;
1203 * account for L2 headers when computing the MFL
1205 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
1206 (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
1208 netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
1216 /* Copy mac unicast addresses from @net_dev to @priv.
1217 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1219 static void add_uc_hw_addr(const struct net_device *net_dev,
1220 struct dpaa2_eth_priv *priv)
1222 struct netdev_hw_addr *ha;
1225 netdev_for_each_uc_addr(ha, net_dev) {
1226 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1229 netdev_warn(priv->net_dev,
1230 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1235 /* Copy mac multicast addresses from @net_dev to @priv
1236 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1238 static void add_mc_hw_addr(const struct net_device *net_dev,
1239 struct dpaa2_eth_priv *priv)
1241 struct netdev_hw_addr *ha;
1244 netdev_for_each_mc_addr(ha, net_dev) {
1245 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1248 netdev_warn(priv->net_dev,
1249 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1254 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1256 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1257 int uc_count = netdev_uc_count(net_dev);
1258 int mc_count = netdev_mc_count(net_dev);
1259 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1260 u32 options = priv->dpni_attrs.options;
1261 u16 mc_token = priv->mc_token;
1262 struct fsl_mc_io *mc_io = priv->mc_io;
1265 /* Basic sanity checks; these probably indicate a misconfiguration */
1266 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1267 netdev_info(net_dev,
1268 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1271 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1272 if (uc_count > max_mac) {
1273 netdev_info(net_dev,
1274 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1278 if (mc_count + uc_count > max_mac) {
1279 netdev_info(net_dev,
1280 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1281 uc_count + mc_count, max_mac);
1282 goto force_mc_promisc;
1285 /* Adjust promisc settings due to flag combinations */
1286 if (net_dev->flags & IFF_PROMISC)
1288 if (net_dev->flags & IFF_ALLMULTI) {
1289 /* First, rebuild unicast filtering table. This should be done
1290 * in promisc mode, in order to avoid frame loss while we
1291 * progressively add entries to the table.
1292 * We don't know whether we had been in promisc already, and
1293 * making an MC call to find out is expensive; so set uc promisc
1296 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1298 netdev_warn(net_dev, "Can't set uc promisc\n");
1300 /* Actual uc table reconstruction. */
1301 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1303 netdev_warn(net_dev, "Can't clear uc filters\n");
1304 add_uc_hw_addr(net_dev, priv);
1306 /* Finally, clear uc promisc and set mc promisc as requested. */
1307 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1309 netdev_warn(net_dev, "Can't clear uc promisc\n");
1310 goto force_mc_promisc;
1313 /* Neither unicast, nor multicast promisc will be on... eventually.
1314 * For now, rebuild mac filtering tables while forcing both of them on.
1316 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1318 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1319 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1321 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1323 /* Actual mac filtering tables reconstruction */
1324 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1326 netdev_warn(net_dev, "Can't clear mac filters\n");
1327 add_mc_hw_addr(net_dev, priv);
1328 add_uc_hw_addr(net_dev, priv);
1330 /* Now we can clear both ucast and mcast promisc, without risking
1331 * to drop legitimate frames anymore.
1333 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1335 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1336 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1338 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1343 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1345 netdev_warn(net_dev, "Can't set ucast promisc\n");
1347 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1349 netdev_warn(net_dev, "Can't set mcast promisc\n");
1352 static int dpaa2_eth_set_features(struct net_device *net_dev,
1353 netdev_features_t features)
1355 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1356 netdev_features_t changed = features ^ net_dev->features;
1360 if (changed & NETIF_F_RXCSUM) {
1361 enable = !!(features & NETIF_F_RXCSUM);
1362 err = set_rx_csum(priv, enable);
1367 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1368 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1369 err = set_tx_csum(priv, enable);
1377 static const struct net_device_ops dpaa2_eth_ops = {
1378 .ndo_open = dpaa2_eth_open,
1379 .ndo_start_xmit = dpaa2_eth_tx,
1380 .ndo_stop = dpaa2_eth_stop,
1381 .ndo_init = dpaa2_eth_init,
1382 .ndo_set_mac_address = dpaa2_eth_set_addr,
1383 .ndo_get_stats64 = dpaa2_eth_get_stats,
1384 .ndo_change_mtu = dpaa2_eth_change_mtu,
1385 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1386 .ndo_set_features = dpaa2_eth_set_features,
1389 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1391 struct dpaa2_eth_channel *ch;
1393 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1395 /* Update NAPI statistics */
1398 napi_schedule_irqoff(&ch->napi);
1401 /* Allocate and configure a DPCON object */
1402 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1404 struct fsl_mc_device *dpcon;
1405 struct device *dev = priv->net_dev->dev.parent;
1406 struct dpcon_attr attrs;
1409 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1410 FSL_MC_POOL_DPCON, &dpcon);
1412 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1416 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1418 dev_err(dev, "dpcon_open() failed\n");
1422 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1424 dev_err(dev, "dpcon_reset() failed\n");
1428 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1430 dev_err(dev, "dpcon_get_attributes() failed\n");
1434 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1436 dev_err(dev, "dpcon_enable() failed\n");
1445 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1447 fsl_mc_object_free(dpcon);
1452 static void free_dpcon(struct dpaa2_eth_priv *priv,
1453 struct fsl_mc_device *dpcon)
1455 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1456 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1457 fsl_mc_object_free(dpcon);
1460 static struct dpaa2_eth_channel *
1461 alloc_channel(struct dpaa2_eth_priv *priv)
1463 struct dpaa2_eth_channel *channel;
1464 struct dpcon_attr attr;
1465 struct device *dev = priv->net_dev->dev.parent;
1468 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1472 channel->dpcon = setup_dpcon(priv);
1473 if (!channel->dpcon)
1476 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1479 dev_err(dev, "dpcon_get_attributes() failed\n");
1483 channel->dpcon_id = attr.id;
1484 channel->ch_id = attr.qbman_ch_id;
1485 channel->priv = priv;
1490 free_dpcon(priv, channel->dpcon);
1496 static void free_channel(struct dpaa2_eth_priv *priv,
1497 struct dpaa2_eth_channel *channel)
1499 free_dpcon(priv, channel->dpcon);
1503 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
1504 * and register data availability notifications
1506 static int setup_dpio(struct dpaa2_eth_priv *priv)
1508 struct dpaa2_io_notification_ctx *nctx;
1509 struct dpaa2_eth_channel *channel;
1510 struct dpcon_notification_cfg dpcon_notif_cfg;
1511 struct device *dev = priv->net_dev->dev.parent;
1514 /* We want the ability to spread ingress traffic (RX, TX conf) to as
1515 * many cores as possible, so we need one channel for each core
1516 * (unless there's fewer queues than cores, in which case the extra
1517 * channels would be wasted).
1518 * Allocate one channel per core and register it to the core's
1519 * affine DPIO. If not enough channels are available for all cores
1520 * or if some cores don't have an affine DPIO, there will be no
1521 * ingress frame processing on those cores.
1523 cpumask_clear(&priv->dpio_cpumask);
1524 for_each_online_cpu(i) {
1525 /* Try to allocate a channel */
1526 channel = alloc_channel(priv);
1529 "No affine channel for cpu %d and above\n", i);
1534 priv->channel[priv->num_channels] = channel;
1536 nctx = &channel->nctx;
1539 nctx->id = channel->ch_id;
1540 nctx->desired_cpu = i;
1542 /* Register the new context */
1543 err = dpaa2_io_service_register(NULL, nctx);
1545 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
1546 /* If no affine DPIO for this core, there's probably
1547 * none available for next cores either. Signal we want
1548 * to retry later, in case the DPIO devices weren't
1551 err = -EPROBE_DEFER;
1552 goto err_service_reg;
1555 /* Register DPCON notification with MC */
1556 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1557 dpcon_notif_cfg.priority = 0;
1558 dpcon_notif_cfg.user_ctx = nctx->qman64;
1559 err = dpcon_set_notification(priv->mc_io, 0,
1560 channel->dpcon->mc_handle,
1563 dev_err(dev, "dpcon_set_notification failed()\n");
1567 /* If we managed to allocate a channel and also found an affine
1568 * DPIO for this core, add it to the final mask
1570 cpumask_set_cpu(i, &priv->dpio_cpumask);
1571 priv->num_channels++;
1573 /* Stop if we already have enough channels to accommodate all
1574 * RX and TX conf queues
1576 if (priv->num_channels == dpaa2_eth_queue_count(priv))
1583 dpaa2_io_service_deregister(NULL, nctx);
1585 free_channel(priv, channel);
1587 if (cpumask_empty(&priv->dpio_cpumask)) {
1588 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
1592 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1593 cpumask_pr_args(&priv->dpio_cpumask));
1598 static void free_dpio(struct dpaa2_eth_priv *priv)
1601 struct dpaa2_eth_channel *ch;
1603 /* deregister CDAN notifications and free channels */
1604 for (i = 0; i < priv->num_channels; i++) {
1605 ch = priv->channel[i];
1606 dpaa2_io_service_deregister(NULL, &ch->nctx);
1607 free_channel(priv, ch);
1611 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1614 struct device *dev = priv->net_dev->dev.parent;
1617 for (i = 0; i < priv->num_channels; i++)
1618 if (priv->channel[i]->nctx.desired_cpu == cpu)
1619 return priv->channel[i];
1621 /* We should never get here. Issue a warning and return
1622 * the first channel, because it's still better than nothing
1624 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1626 return priv->channel[0];
1629 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1631 struct device *dev = priv->net_dev->dev.parent;
1632 struct dpaa2_eth_fq *fq;
1633 int rx_cpu, txc_cpu;
1636 /* For each FQ, pick one channel/CPU to deliver frames to.
1637 * This may well change at runtime, either through irqbalance or
1638 * through direct user intervention.
1640 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1642 for (i = 0; i < priv->num_fqs; i++) {
1646 fq->target_cpu = rx_cpu;
1647 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1648 if (rx_cpu >= nr_cpu_ids)
1649 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1651 case DPAA2_TX_CONF_FQ:
1652 fq->target_cpu = txc_cpu;
1653 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1654 if (txc_cpu >= nr_cpu_ids)
1655 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1658 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1660 fq->channel = get_affine_channel(priv, fq->target_cpu);
1664 static void setup_fqs(struct dpaa2_eth_priv *priv)
1668 /* We have one TxConf FQ per Tx flow.
1669 * The number of Tx and Rx queues is the same.
1670 * Tx queues come first in the fq array.
1672 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1673 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1674 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1675 priv->fq[priv->num_fqs++].flowid = (u16)i;
1678 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1679 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1680 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1681 priv->fq[priv->num_fqs++].flowid = (u16)i;
1684 /* For each FQ, decide on which core to process incoming frames */
1685 set_fq_affinity(priv);
1688 /* Allocate and configure one buffer pool for each interface */
1689 static int setup_dpbp(struct dpaa2_eth_priv *priv)
1692 struct fsl_mc_device *dpbp_dev;
1693 struct device *dev = priv->net_dev->dev.parent;
1694 struct dpbp_attr dpbp_attrs;
1696 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1699 dev_err(dev, "DPBP device allocation failed\n");
1703 priv->dpbp_dev = dpbp_dev;
1705 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1706 &dpbp_dev->mc_handle);
1708 dev_err(dev, "dpbp_open() failed\n");
1712 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
1714 dev_err(dev, "dpbp_reset() failed\n");
1718 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1720 dev_err(dev, "dpbp_enable() failed\n");
1724 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
1727 dev_err(dev, "dpbp_get_attributes() failed\n");
1730 priv->bpid = dpbp_attrs.bpid;
1735 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1738 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1740 fsl_mc_object_free(dpbp_dev);
1745 static void free_dpbp(struct dpaa2_eth_priv *priv)
1748 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1749 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1750 fsl_mc_object_free(priv->dpbp_dev);
1753 /* Configure the DPNI object this interface is associated with */
1754 static int setup_dpni(struct fsl_mc_device *ls_dev)
1756 struct device *dev = &ls_dev->dev;
1757 struct dpaa2_eth_priv *priv;
1758 struct net_device *net_dev;
1759 struct dpni_buffer_layout buf_layout = {0};
1762 net_dev = dev_get_drvdata(dev);
1763 priv = netdev_priv(net_dev);
1765 /* get a handle for the DPNI object */
1766 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
1768 dev_err(dev, "dpni_open() failed\n");
1772 ls_dev->mc_io = priv->mc_io;
1773 ls_dev->mc_handle = priv->mc_token;
1775 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1777 dev_err(dev, "dpni_reset() failed\n");
1781 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1784 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
1788 /* Configure buffer layouts */
1790 buf_layout.pass_parser_result = true;
1791 buf_layout.pass_frame_status = true;
1792 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1793 buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
1794 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1795 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1796 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
1797 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
1798 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1799 DPNI_QUEUE_RX, &buf_layout);
1801 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1802 goto err_buf_layout;
1806 buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1807 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
1808 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1809 DPNI_QUEUE_TX, &buf_layout);
1811 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1812 goto err_buf_layout;
1815 /* tx-confirm buffer */
1816 buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1817 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1818 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
1820 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1821 goto err_buf_layout;
1824 /* Now that we've set our tx buffer layout, retrieve the minimum
1825 * required tx data offset.
1827 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1828 &priv->tx_data_offset);
1830 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1831 goto err_data_offset;
1834 if ((priv->tx_data_offset % 64) != 0)
1835 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
1836 priv->tx_data_offset);
1838 /* Accommodate software annotation space (SWA) */
1839 priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
1847 dpni_close(priv->mc_io, 0, priv->mc_token);
1852 static void free_dpni(struct dpaa2_eth_priv *priv)
1856 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1858 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1861 dpni_close(priv->mc_io, 0, priv->mc_token);
1864 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1865 struct dpaa2_eth_fq *fq)
1867 struct device *dev = priv->net_dev->dev.parent;
1868 struct dpni_queue queue;
1869 struct dpni_queue_id qid;
1870 struct dpni_taildrop td;
1873 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1874 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1876 dev_err(dev, "dpni_get_queue(RX) failed\n");
1880 fq->fqid = qid.fqid;
1882 queue.destination.id = fq->channel->dpcon_id;
1883 queue.destination.type = DPNI_DEST_DPCON;
1884 queue.destination.priority = 1;
1885 queue.user_context = (u64)fq;
1886 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1887 DPNI_QUEUE_RX, 0, fq->flowid,
1888 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1891 dev_err(dev, "dpni_set_queue(RX) failed\n");
1896 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1897 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1898 DPNI_QUEUE_RX, 0, fq->flowid, &td);
1900 dev_err(dev, "dpni_set_threshold() failed\n");
1907 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1908 struct dpaa2_eth_fq *fq)
1910 struct device *dev = priv->net_dev->dev.parent;
1911 struct dpni_queue queue;
1912 struct dpni_queue_id qid;
1915 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1916 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
1918 dev_err(dev, "dpni_get_queue(TX) failed\n");
1922 fq->tx_qdbin = qid.qdbin;
1924 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1925 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1928 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
1932 fq->fqid = qid.fqid;
1934 queue.destination.id = fq->channel->dpcon_id;
1935 queue.destination.type = DPNI_DEST_DPCON;
1936 queue.destination.priority = 0;
1937 queue.user_context = (u64)fq;
1938 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1939 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1940 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1943 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
1950 /* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
1951 static const struct dpaa2_eth_hash_fields hash_fields[] = {
1954 .rxnfc_field = RXH_IP_SRC,
1955 .cls_prot = NET_PROT_IP,
1956 .cls_field = NH_FLD_IP_SRC,
1959 .rxnfc_field = RXH_IP_DST,
1960 .cls_prot = NET_PROT_IP,
1961 .cls_field = NH_FLD_IP_DST,
1964 .rxnfc_field = RXH_L3_PROTO,
1965 .cls_prot = NET_PROT_IP,
1966 .cls_field = NH_FLD_IP_PROTO,
1969 /* Using UDP ports, this is functionally equivalent to raw
1970 * byte pairs from L4 header.
1972 .rxnfc_field = RXH_L4_B_0_1,
1973 .cls_prot = NET_PROT_UDP,
1974 .cls_field = NH_FLD_UDP_PORT_SRC,
1977 .rxnfc_field = RXH_L4_B_2_3,
1978 .cls_prot = NET_PROT_UDP,
1979 .cls_field = NH_FLD_UDP_PORT_DST,
1984 /* Set RX hash options
1985 * flags is a combination of RXH_ bits
1987 static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
1989 struct device *dev = net_dev->dev.parent;
1990 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1991 struct dpkg_profile_cfg cls_cfg;
1992 struct dpni_rx_tc_dist_cfg dist_cfg;
1997 if (!dpaa2_eth_hash_enabled(priv)) {
1998 dev_dbg(dev, "Hashing support is not enabled\n");
2002 memset(&cls_cfg, 0, sizeof(cls_cfg));
2004 for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
2005 struct dpkg_extract *key =
2006 &cls_cfg.extracts[cls_cfg.num_extracts];
2008 if (!(flags & hash_fields[i].rxnfc_field))
2011 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2012 dev_err(dev, "error adding key extraction rule, too many rules?\n");
2016 key->type = DPKG_EXTRACT_FROM_HDR;
2017 key->extract.from_hdr.prot = hash_fields[i].cls_prot;
2018 key->extract.from_hdr.type = DPKG_FULL_FIELD;
2019 key->extract.from_hdr.field = hash_fields[i].cls_field;
2020 cls_cfg.num_extracts++;
2022 priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
2025 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2029 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2031 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2035 memset(&dist_cfg, 0, sizeof(dist_cfg));
2037 /* Prepare for setting the rx dist */
2038 dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
2039 DPAA2_CLASSIFIER_DMA_SIZE,
2041 if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
2042 dev_err(dev, "DMA mapping failed\n");
2047 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2048 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2050 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2051 dma_unmap_single(dev, dist_cfg.key_cfg_iova,
2052 DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
2054 dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
2062 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2063 * frame queues and channels
2065 static int bind_dpni(struct dpaa2_eth_priv *priv)
2067 struct net_device *net_dev = priv->net_dev;
2068 struct device *dev = net_dev->dev.parent;
2069 struct dpni_pools_cfg pools_params;
2070 struct dpni_error_cfg err_cfg;
2074 pools_params.num_dpbp = 1;
2075 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2076 pools_params.pools[0].backup_pool = 0;
2077 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2078 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2080 dev_err(dev, "dpni_set_pools() failed\n");
2084 /* have the interface implicitly distribute traffic based on supported
2087 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
2089 netdev_err(net_dev, "Failed to configure hashing\n");
2091 /* Configure handling of error frames */
2092 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
2093 err_cfg.set_frame_annotation = 1;
2094 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2095 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2098 dev_err(dev, "dpni_set_errors_behavior failed\n");
2102 /* Configure Rx and Tx conf queues to generate CDANs */
2103 for (i = 0; i < priv->num_fqs; i++) {
2104 switch (priv->fq[i].type) {
2106 err = setup_rx_flow(priv, &priv->fq[i]);
2108 case DPAA2_TX_CONF_FQ:
2109 err = setup_tx_flow(priv, &priv->fq[i]);
2112 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2119 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2120 DPNI_QUEUE_TX, &priv->tx_qdid);
2122 dev_err(dev, "dpni_get_qdid() failed\n");
2129 /* Allocate rings for storing incoming frame descriptors */
2130 static int alloc_rings(struct dpaa2_eth_priv *priv)
2132 struct net_device *net_dev = priv->net_dev;
2133 struct device *dev = net_dev->dev.parent;
2136 for (i = 0; i < priv->num_channels; i++) {
2137 priv->channel[i]->store =
2138 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2139 if (!priv->channel[i]->store) {
2140 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2148 for (i = 0; i < priv->num_channels; i++) {
2149 if (!priv->channel[i]->store)
2151 dpaa2_io_store_destroy(priv->channel[i]->store);
2157 static void free_rings(struct dpaa2_eth_priv *priv)
2161 for (i = 0; i < priv->num_channels; i++)
2162 dpaa2_io_store_destroy(priv->channel[i]->store);
2165 static int set_mac_addr(struct dpaa2_eth_priv *priv)
2167 struct net_device *net_dev = priv->net_dev;
2168 struct device *dev = net_dev->dev.parent;
2169 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2172 /* Get firmware address, if any */
2173 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2175 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2179 /* Get DPNI attributes address, if any */
2180 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2183 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
2187 /* First check if firmware has any address configured by bootloader */
2188 if (!is_zero_ether_addr(mac_addr)) {
2189 /* If the DPMAC addr != DPNI addr, update it */
2190 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2191 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2195 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2199 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2200 } else if (is_zero_ether_addr(dpni_mac_addr)) {
2201 /* No MAC address configured, fill in net_dev->dev_addr
2204 eth_hw_addr_random(net_dev);
2205 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
2207 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2210 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2214 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2215 * practical purposes, this will be our "permanent" mac address,
2216 * at least until the next reboot. This move will also permit
2217 * register_netdevice() to properly fill up net_dev->perm_addr.
2219 net_dev->addr_assign_type = NET_ADDR_PERM;
2221 /* NET_ADDR_PERM is default, all we have to do is
2222 * fill in the device addr.
2224 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2230 static int netdev_init(struct net_device *net_dev)
2232 struct device *dev = net_dev->dev.parent;
2233 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2234 u8 bcast_addr[ETH_ALEN];
2238 net_dev->netdev_ops = &dpaa2_eth_ops;
2240 err = set_mac_addr(priv);
2244 /* Explicitly add the broadcast address to the MAC filtering table */
2245 eth_broadcast_addr(bcast_addr);
2246 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2248 dev_err(dev, "dpni_add_mac_addr() failed\n");
2252 /* Reserve enough space to align buffer as per hardware requirement;
2253 * NOTE: priv->tx_data_offset MUST be initialized at this point.
2255 net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
2257 /* Set MTU limits */
2258 net_dev->min_mtu = 68;
2259 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
2261 /* Set actual number of queues in the net device */
2262 num_queues = dpaa2_eth_queue_count(priv);
2263 err = netif_set_real_num_tx_queues(net_dev, num_queues);
2265 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
2268 err = netif_set_real_num_rx_queues(net_dev, num_queues);
2270 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
2274 /* Our .ndo_init will be called herein */
2275 err = register_netdev(net_dev);
2277 dev_err(dev, "register_netdev() failed\n");
2284 static int poll_link_state(void *arg)
2286 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2289 while (!kthread_should_stop()) {
2290 err = link_state_update(priv);
2294 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2300 static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
2302 return IRQ_WAKE_THREAD;
2305 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2307 u32 status = 0, clear = 0;
2308 struct device *dev = (struct device *)arg;
2309 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2310 struct net_device *net_dev = dev_get_drvdata(dev);
2313 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2314 DPNI_IRQ_INDEX, &status);
2315 if (unlikely(err)) {
2316 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
2321 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
2322 clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
2323 link_state_update(netdev_priv(net_dev));
2327 dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2328 DPNI_IRQ_INDEX, clear);
2332 static int setup_irqs(struct fsl_mc_device *ls_dev)
2335 struct fsl_mc_device_irq *irq;
2337 err = fsl_mc_allocate_irqs(ls_dev);
2339 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2343 irq = ls_dev->irqs[0];
2344 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
2346 dpni_irq0_handler_thread,
2347 IRQF_NO_SUSPEND | IRQF_ONESHOT,
2348 dev_name(&ls_dev->dev), &ls_dev->dev);
2350 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
2354 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2355 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2357 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
2361 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2364 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
2371 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2373 fsl_mc_free_irqs(ls_dev);
2378 static void add_ch_napi(struct dpaa2_eth_priv *priv)
2381 struct dpaa2_eth_channel *ch;
2383 for (i = 0; i < priv->num_channels; i++) {
2384 ch = priv->channel[i];
2385 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
2386 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2391 static void del_ch_napi(struct dpaa2_eth_priv *priv)
2394 struct dpaa2_eth_channel *ch;
2396 for (i = 0; i < priv->num_channels; i++) {
2397 ch = priv->channel[i];
2398 netif_napi_del(&ch->napi);
2402 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2405 struct net_device *net_dev = NULL;
2406 struct dpaa2_eth_priv *priv = NULL;
2409 dev = &dpni_dev->dev;
2412 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2414 dev_err(dev, "alloc_etherdev_mq() failed\n");
2418 SET_NETDEV_DEV(net_dev, dev);
2419 dev_set_drvdata(dev, net_dev);
2421 priv = netdev_priv(net_dev);
2422 priv->net_dev = net_dev;
2424 priv->iommu_domain = iommu_get_domain_for_dev(dev);
2426 /* Obtain a MC portal */
2427 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2430 dev_err(dev, "MC portal allocation failed\n");
2431 goto err_portal_alloc;
2434 /* MC objects initialization and configuration */
2435 err = setup_dpni(dpni_dev);
2437 goto err_dpni_setup;
2439 err = setup_dpio(priv);
2441 goto err_dpio_setup;
2445 err = setup_dpbp(priv);
2447 goto err_dpbp_setup;
2449 err = bind_dpni(priv);
2453 /* Add a NAPI context for each channel */
2456 /* Percpu statistics */
2457 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2458 if (!priv->percpu_stats) {
2459 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2461 goto err_alloc_percpu_stats;
2463 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2464 if (!priv->percpu_extras) {
2465 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2467 goto err_alloc_percpu_extras;
2470 err = netdev_init(net_dev);
2472 goto err_netdev_init;
2474 /* Configure checksum offload based on current interface flags */
2475 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2479 err = set_tx_csum(priv, !!(net_dev->features &
2480 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2484 err = alloc_rings(priv);
2486 goto err_alloc_rings;
2488 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
2490 err = setup_irqs(dpni_dev);
2492 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2493 priv->poll_thread = kthread_run(poll_link_state, priv,
2494 "%s_poll_link", net_dev->name);
2495 if (IS_ERR(priv->poll_thread)) {
2496 netdev_err(net_dev, "Error starting polling thread\n");
2497 goto err_poll_thread;
2499 priv->do_link_poll = true;
2502 dev_info(dev, "Probed interface %s\n", net_dev->name);
2509 unregister_netdev(net_dev);
2511 free_percpu(priv->percpu_extras);
2512 err_alloc_percpu_extras:
2513 free_percpu(priv->percpu_stats);
2514 err_alloc_percpu_stats:
2523 fsl_mc_portal_free(priv->mc_io);
2525 dev_set_drvdata(dev, NULL);
2526 free_netdev(net_dev);
2531 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2534 struct net_device *net_dev;
2535 struct dpaa2_eth_priv *priv;
2538 net_dev = dev_get_drvdata(dev);
2539 priv = netdev_priv(net_dev);
2541 unregister_netdev(net_dev);
2542 dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2544 if (priv->do_link_poll)
2545 kthread_stop(priv->poll_thread);
2547 fsl_mc_free_irqs(ls_dev);
2550 free_percpu(priv->percpu_stats);
2551 free_percpu(priv->percpu_extras);
2558 fsl_mc_portal_free(priv->mc_io);
2560 dev_set_drvdata(dev, NULL);
2561 free_netdev(net_dev);
2566 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2568 .vendor = FSL_MC_VENDOR_FREESCALE,
2573 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2575 static struct fsl_mc_driver dpaa2_eth_driver = {
2577 .name = KBUILD_MODNAME,
2578 .owner = THIS_MODULE,
2580 .probe = dpaa2_eth_probe,
2581 .remove = dpaa2_eth_remove,
2582 .match_id_table = dpaa2_eth_match_id_table
2585 module_fsl_mc_driver(dpaa2_eth_driver);