]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
staging: fsl-dpaa2/eth: Add error message newlines
[karo-tx-linux.git] / drivers / staging / fsl-dpaa2 / ethernet / dpaa2-eth.c
1 /* Copyright 2014-2016 Freescale Semiconductor Inc.
2  * Copyright 2016-2017 NXP
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *     * Redistributions of source code must retain the above copyright
7  *       notice, this list of conditions and the following disclaimer.
8  *     * Redistributions in binary form must reproduce the above copyright
9  *       notice, this list of conditions and the following disclaimer in the
10  *       documentation and/or other materials provided with the distribution.
11  *     * Neither the name of Freescale Semiconductor nor the
12  *       names of its contributors may be used to endorse or promote products
13  *       derived from this software without specific prior written permission.
14  *
15  *
16  * ALTERNATIVELY, this software may be distributed under the terms of the
17  * GNU General Public License ("GPL") as published by the Free Software
18  * Foundation, either version 2 of that License or (at your option) any
19  * later version.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/platform_device.h>
35 #include <linux/etherdevice.h>
36 #include <linux/of_net.h>
37 #include <linux/interrupt.h>
38 #include <linux/msi.h>
39 #include <linux/kthread.h>
40 #include <linux/iommu.h>
41
42 #include "../../fsl-mc/include/mc.h"
43 #include "../../fsl-mc/include/mc-sys.h"
44 #include "dpaa2-eth.h"
45
46 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
47  * using trace events only need to #include <trace/events/sched.h>
48  */
49 #define CREATE_TRACE_POINTS
50 #include "dpaa2-eth-trace.h"
51
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_AUTHOR("Freescale Semiconductor, Inc");
54 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
55
56 const char dpaa2_eth_drv_version[] = "0.1";
57
58 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
59                                 dma_addr_t iova_addr)
60 {
61         phys_addr_t phys_addr;
62
63         phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
64
65         return phys_to_virt(phys_addr);
66 }
67
68 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
69                              u32 fd_status,
70                              struct sk_buff *skb)
71 {
72         skb_checksum_none_assert(skb);
73
74         /* HW checksum validation is disabled, nothing to do here */
75         if (!(priv->net_dev->features & NETIF_F_RXCSUM))
76                 return;
77
78         /* Read checksum validation bits */
79         if (!((fd_status & DPAA2_FAS_L3CV) &&
80               (fd_status & DPAA2_FAS_L4CV)))
81                 return;
82
83         /* Inform the stack there's no need to compute L3/L4 csum anymore */
84         skb->ip_summed = CHECKSUM_UNNECESSARY;
85 }
86
87 /* Free a received FD.
88  * Not to be used for Tx conf FDs or on any other paths.
89  */
90 static void free_rx_fd(struct dpaa2_eth_priv *priv,
91                        const struct dpaa2_fd *fd,
92                        void *vaddr)
93 {
94         struct device *dev = priv->net_dev->dev.parent;
95         dma_addr_t addr = dpaa2_fd_get_addr(fd);
96         u8 fd_format = dpaa2_fd_get_format(fd);
97         struct dpaa2_sg_entry *sgt;
98         void *sg_vaddr;
99         int i;
100
101         /* If single buffer frame, just free the data buffer */
102         if (fd_format == dpaa2_fd_single)
103                 goto free_buf;
104         else if (fd_format != dpaa2_fd_sg)
105                 /* We don't support any other format */
106                 return;
107
108         /* For S/G frames, we first need to free all SG entries */
109         sgt = vaddr + dpaa2_fd_get_offset(fd);
110         for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
111                 addr = dpaa2_sg_get_addr(&sgt[i]);
112                 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
113                 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
114                                  DMA_FROM_DEVICE);
115
116                 skb_free_frag(sg_vaddr);
117                 if (dpaa2_sg_is_final(&sgt[i]))
118                         break;
119         }
120
121 free_buf:
122         skb_free_frag(vaddr);
123 }
124
125 /* Build a linear skb based on a single-buffer frame descriptor */
126 static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
127                                         struct dpaa2_eth_channel *ch,
128                                         const struct dpaa2_fd *fd,
129                                         void *fd_vaddr)
130 {
131         struct sk_buff *skb = NULL;
132         u16 fd_offset = dpaa2_fd_get_offset(fd);
133         u32 fd_length = dpaa2_fd_get_len(fd);
134
135         skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
136                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
137         if (unlikely(!skb))
138                 return NULL;
139
140         skb_reserve(skb, fd_offset);
141         skb_put(skb, fd_length);
142
143         ch->buf_count--;
144
145         return skb;
146 }
147
148 /* Build a non linear (fragmented) skb based on a S/G table */
149 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
150                                       struct dpaa2_eth_channel *ch,
151                                       struct dpaa2_sg_entry *sgt)
152 {
153         struct sk_buff *skb = NULL;
154         struct device *dev = priv->net_dev->dev.parent;
155         void *sg_vaddr;
156         dma_addr_t sg_addr;
157         u16 sg_offset;
158         u32 sg_length;
159         struct page *page, *head_page;
160         int page_offset;
161         int i;
162
163         for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
164                 struct dpaa2_sg_entry *sge = &sgt[i];
165
166                 /* NOTE: We only support SG entries in dpaa2_sg_single format,
167                  * but this is the only format we may receive from HW anyway
168                  */
169
170                 /* Get the address and length from the S/G entry */
171                 sg_addr = dpaa2_sg_get_addr(sge);
172                 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
173                 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
174                                  DMA_FROM_DEVICE);
175
176                 sg_length = dpaa2_sg_get_len(sge);
177
178                 if (i == 0) {
179                         /* We build the skb around the first data buffer */
180                         skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
181                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
182                         if (unlikely(!skb))
183                                 return NULL;
184
185                         sg_offset = dpaa2_sg_get_offset(sge);
186                         skb_reserve(skb, sg_offset);
187                         skb_put(skb, sg_length);
188                 } else {
189                         /* Rest of the data buffers are stored as skb frags */
190                         page = virt_to_page(sg_vaddr);
191                         head_page = virt_to_head_page(sg_vaddr);
192
193                         /* Offset in page (which may be compound).
194                          * Data in subsequent SG entries is stored from the
195                          * beginning of the buffer, so we don't need to add the
196                          * sg_offset.
197                          */
198                         page_offset = ((unsigned long)sg_vaddr &
199                                 (PAGE_SIZE - 1)) +
200                                 (page_address(page) - page_address(head_page));
201
202                         skb_add_rx_frag(skb, i - 1, head_page, page_offset,
203                                         sg_length, DPAA2_ETH_RX_BUF_SIZE);
204                 }
205
206                 if (dpaa2_sg_is_final(sge))
207                         break;
208         }
209
210         /* Count all data buffers + SG table buffer */
211         ch->buf_count -= i + 2;
212
213         return skb;
214 }
215
216 /* Main Rx frame processing routine */
217 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
218                          struct dpaa2_eth_channel *ch,
219                          const struct dpaa2_fd *fd,
220                          struct napi_struct *napi)
221 {
222         dma_addr_t addr = dpaa2_fd_get_addr(fd);
223         u8 fd_format = dpaa2_fd_get_format(fd);
224         void *vaddr;
225         struct sk_buff *skb;
226         struct rtnl_link_stats64 *percpu_stats;
227         struct dpaa2_eth_drv_stats *percpu_extras;
228         struct device *dev = priv->net_dev->dev.parent;
229         struct dpaa2_fas *fas;
230         u32 status = 0;
231
232         /* Tracing point */
233         trace_dpaa2_rx_fd(priv->net_dev, fd);
234
235         vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
236         dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
237
238         prefetch(vaddr + priv->buf_layout.private_data_size);
239         prefetch(vaddr + dpaa2_fd_get_offset(fd));
240
241         percpu_stats = this_cpu_ptr(priv->percpu_stats);
242         percpu_extras = this_cpu_ptr(priv->percpu_extras);
243
244         if (fd_format == dpaa2_fd_single) {
245                 skb = build_linear_skb(priv, ch, fd, vaddr);
246         } else if (fd_format == dpaa2_fd_sg) {
247                 struct dpaa2_sg_entry *sgt =
248                                 vaddr + dpaa2_fd_get_offset(fd);
249                 skb = build_frag_skb(priv, ch, sgt);
250                 skb_free_frag(vaddr);
251                 percpu_extras->rx_sg_frames++;
252                 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
253         } else {
254                 /* We don't support any other format */
255                 goto err_frame_format;
256         }
257
258         if (unlikely(!skb))
259                 goto err_build_skb;
260
261         prefetch(skb->data);
262
263         /* Check if we need to validate the L4 csum */
264         if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
265                 fas = (struct dpaa2_fas *)
266                                 (vaddr + priv->buf_layout.private_data_size);
267                 status = le32_to_cpu(fas->status);
268                 validate_rx_csum(priv, status, skb);
269         }
270
271         skb->protocol = eth_type_trans(skb, priv->net_dev);
272
273         percpu_stats->rx_packets++;
274         percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
275
276         if (priv->net_dev->features & NETIF_F_GRO)
277                 napi_gro_receive(napi, skb);
278         else
279                 netif_receive_skb(skb);
280
281         return;
282
283 err_build_skb:
284         free_rx_fd(priv, fd, vaddr);
285 err_frame_format:
286         percpu_stats->rx_dropped++;
287 }
288
289 /* Consume all frames pull-dequeued into the store. This is the simplest way to
290  * make sure we don't accidentally issue another volatile dequeue which would
291  * overwrite (leak) frames already in the store.
292  *
293  * Observance of NAPI budget is not our concern, leaving that to the caller.
294  */
295 static int consume_frames(struct dpaa2_eth_channel *ch)
296 {
297         struct dpaa2_eth_priv *priv = ch->priv;
298         struct dpaa2_eth_fq *fq;
299         struct dpaa2_dq *dq;
300         const struct dpaa2_fd *fd;
301         int cleaned = 0;
302         int is_last;
303
304         do {
305                 dq = dpaa2_io_store_next(ch->store, &is_last);
306                 if (unlikely(!dq)) {
307                         /* If we're here, we *must* have placed a
308                          * volatile dequeue comnmand, so keep reading through
309                          * the store until we get some sort of valid response
310                          * token (either a valid frame or an "empty dequeue")
311                          */
312                         continue;
313                 }
314
315                 fd = dpaa2_dq_fd(dq);
316                 fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
317                 fq->stats.frames++;
318
319                 fq->consume(priv, ch, fd, &ch->napi);
320                 cleaned++;
321         } while (!is_last);
322
323         return cleaned;
324 }
325
326 /* Create a frame descriptor based on a fragmented skb */
327 static int build_sg_fd(struct dpaa2_eth_priv *priv,
328                        struct sk_buff *skb,
329                        struct dpaa2_fd *fd)
330 {
331         struct device *dev = priv->net_dev->dev.parent;
332         void *sgt_buf = NULL;
333         void *hwa;
334         dma_addr_t addr;
335         int nr_frags = skb_shinfo(skb)->nr_frags;
336         struct dpaa2_sg_entry *sgt;
337         int i, err;
338         int sgt_buf_size;
339         struct scatterlist *scl, *crt_scl;
340         int num_sg;
341         int num_dma_bufs;
342         struct dpaa2_eth_swa *swa;
343
344         /* Create and map scatterlist.
345          * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
346          * to go beyond nr_frags+1.
347          * Note: We don't support chained scatterlists
348          */
349         if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
350                 return -EINVAL;
351
352         scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
353         if (unlikely(!scl))
354                 return -ENOMEM;
355
356         sg_init_table(scl, nr_frags + 1);
357         num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
358         num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
359         if (unlikely(!num_dma_bufs)) {
360                 err = -ENOMEM;
361                 goto dma_map_sg_failed;
362         }
363
364         /* Prepare the HW SGT structure */
365         sgt_buf_size = priv->tx_data_offset +
366                        sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
367         sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
368         if (unlikely(!sgt_buf)) {
369                 err = -ENOMEM;
370                 goto sgt_buf_alloc_failed;
371         }
372         sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
373
374         /* PTA from egress side is passed as is to the confirmation side so
375          * we need to clear some fields here in order to find consistent values
376          * on TX confirmation. We are clearing FAS (Frame Annotation Status)
377          * field from the hardware annotation area
378          */
379         hwa = sgt_buf + priv->buf_layout.private_data_size;
380         memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
381
382         sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
383
384         /* Fill in the HW SGT structure.
385          *
386          * sgt_buf is zeroed out, so the following fields are implicit
387          * in all sgt entries:
388          *   - offset is 0
389          *   - format is 'dpaa2_sg_single'
390          */
391         for_each_sg(scl, crt_scl, num_dma_bufs, i) {
392                 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
393                 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
394         }
395         dpaa2_sg_set_final(&sgt[i - 1], true);
396
397         /* Store the skb backpointer in the SGT buffer.
398          * Fit the scatterlist and the number of buffers alongside the
399          * skb backpointer in the software annotation area. We'll need
400          * all of them on Tx Conf.
401          */
402         swa = (struct dpaa2_eth_swa *)sgt_buf;
403         swa->skb = skb;
404         swa->scl = scl;
405         swa->num_sg = num_sg;
406         swa->num_dma_bufs = num_dma_bufs;
407
408         /* Separately map the SGT buffer */
409         addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
410         if (unlikely(dma_mapping_error(dev, addr))) {
411                 err = -ENOMEM;
412                 goto dma_map_single_failed;
413         }
414         dpaa2_fd_set_offset(fd, priv->tx_data_offset);
415         dpaa2_fd_set_format(fd, dpaa2_fd_sg);
416         dpaa2_fd_set_addr(fd, addr);
417         dpaa2_fd_set_len(fd, skb->len);
418         dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
419                           DPAA2_FD_CTRL_PTV1);
420
421         return 0;
422
423 dma_map_single_failed:
424         kfree(sgt_buf);
425 sgt_buf_alloc_failed:
426         dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
427 dma_map_sg_failed:
428         kfree(scl);
429         return err;
430 }
431
432 /* Create a frame descriptor based on a linear skb */
433 static int build_single_fd(struct dpaa2_eth_priv *priv,
434                            struct sk_buff *skb,
435                            struct dpaa2_fd *fd)
436 {
437         struct device *dev = priv->net_dev->dev.parent;
438         u8 *buffer_start;
439         void *hwa;
440         struct sk_buff **skbh;
441         dma_addr_t addr;
442
443         buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
444                                  DPAA2_ETH_TX_BUF_ALIGN,
445                                  DPAA2_ETH_TX_BUF_ALIGN);
446
447         /* PTA from egress side is passed as is to the confirmation side so
448          * we need to clear some fields here in order to find consistent values
449          * on TX confirmation. We are clearing FAS (Frame Annotation Status)
450          * field from the hardware annotation area
451          */
452         hwa = buffer_start + priv->buf_layout.private_data_size;
453         memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
454
455         /* Store a backpointer to the skb at the beginning of the buffer
456          * (in the private data area) such that we can release it
457          * on Tx confirm
458          */
459         skbh = (struct sk_buff **)buffer_start;
460         *skbh = skb;
461
462         addr = dma_map_single(dev, buffer_start,
463                               skb_tail_pointer(skb) - buffer_start,
464                               DMA_BIDIRECTIONAL);
465         if (unlikely(dma_mapping_error(dev, addr)))
466                 return -ENOMEM;
467
468         dpaa2_fd_set_addr(fd, addr);
469         dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
470         dpaa2_fd_set_len(fd, skb->len);
471         dpaa2_fd_set_format(fd, dpaa2_fd_single);
472         dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
473                           DPAA2_FD_CTRL_PTV1);
474
475         return 0;
476 }
477
478 /* FD freeing routine on the Tx path
479  *
480  * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
481  * back-pointed to is also freed.
482  * This can be called either from dpaa2_eth_tx_conf() or on the error path of
483  * dpaa2_eth_tx().
484  * Optionally, return the frame annotation status word (FAS), which needs
485  * to be checked if we're on the confirmation path.
486  */
487 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
488                        const struct dpaa2_fd *fd,
489                        u32 *status)
490 {
491         struct device *dev = priv->net_dev->dev.parent;
492         dma_addr_t fd_addr;
493         struct sk_buff **skbh, *skb;
494         unsigned char *buffer_start;
495         int unmap_size;
496         struct scatterlist *scl;
497         int num_sg, num_dma_bufs;
498         struct dpaa2_eth_swa *swa;
499         u8 fd_format = dpaa2_fd_get_format(fd);
500         struct dpaa2_fas *fas;
501
502         fd_addr = dpaa2_fd_get_addr(fd);
503         skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
504
505         if (fd_format == dpaa2_fd_single) {
506                 skb = *skbh;
507                 buffer_start = (unsigned char *)skbh;
508                 /* Accessing the skb buffer is safe before dma unmap, because
509                  * we didn't map the actual skb shell.
510                  */
511                 dma_unmap_single(dev, fd_addr,
512                                  skb_tail_pointer(skb) - buffer_start,
513                                  DMA_BIDIRECTIONAL);
514         } else if (fd_format == dpaa2_fd_sg) {
515                 swa = (struct dpaa2_eth_swa *)skbh;
516                 skb = swa->skb;
517                 scl = swa->scl;
518                 num_sg = swa->num_sg;
519                 num_dma_bufs = swa->num_dma_bufs;
520
521                 /* Unmap the scatterlist */
522                 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
523                 kfree(scl);
524
525                 /* Unmap the SGT buffer */
526                 unmap_size = priv->tx_data_offset +
527                        sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
528                 dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
529         } else {
530                 /* Unsupported format, mark it as errored and give up */
531                 if (status)
532                         *status = ~0;
533                 return;
534         }
535
536         /* Read the status from the Frame Annotation after we unmap the first
537          * buffer but before we free it. The caller function is responsible
538          * for checking the status value.
539          */
540         if (status && (dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
541                 fas = (struct dpaa2_fas *)
542                         ((void *)skbh + priv->buf_layout.private_data_size);
543                 *status = le32_to_cpu(fas->status);
544         }
545
546         /* Free SGT buffer kmalloc'ed on tx */
547         if (fd_format != dpaa2_fd_single)
548                 kfree(skbh);
549
550         /* Move on with skb release */
551         dev_kfree_skb(skb);
552 }
553
554 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
555 {
556         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
557         struct dpaa2_fd fd;
558         struct rtnl_link_stats64 *percpu_stats;
559         struct dpaa2_eth_drv_stats *percpu_extras;
560         struct dpaa2_eth_fq *fq;
561         u16 queue_mapping;
562         int err, i;
563
564         percpu_stats = this_cpu_ptr(priv->percpu_stats);
565         percpu_extras = this_cpu_ptr(priv->percpu_extras);
566
567         if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
568                 struct sk_buff *ns;
569
570                 ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
571                 if (unlikely(!ns)) {
572                         percpu_stats->tx_dropped++;
573                         goto err_alloc_headroom;
574                 }
575                 dev_kfree_skb(skb);
576                 skb = ns;
577         }
578
579         /* We'll be holding a back-reference to the skb until Tx Confirmation;
580          * we don't want that overwritten by a concurrent Tx with a cloned skb.
581          */
582         skb = skb_unshare(skb, GFP_ATOMIC);
583         if (unlikely(!skb)) {
584                 /* skb_unshare() has already freed the skb */
585                 percpu_stats->tx_dropped++;
586                 return NETDEV_TX_OK;
587         }
588
589         /* Setup the FD fields */
590         memset(&fd, 0, sizeof(fd));
591
592         if (skb_is_nonlinear(skb)) {
593                 err = build_sg_fd(priv, skb, &fd);
594                 percpu_extras->tx_sg_frames++;
595                 percpu_extras->tx_sg_bytes += skb->len;
596         } else {
597                 err = build_single_fd(priv, skb, &fd);
598         }
599
600         if (unlikely(err)) {
601                 percpu_stats->tx_dropped++;
602                 goto err_build_fd;
603         }
604
605         /* Tracing point */
606         trace_dpaa2_tx_fd(net_dev, &fd);
607
608         /* TxConf FQ selection primarily based on cpu affinity; this is
609          * non-migratable context, so it's safe to call smp_processor_id().
610          */
611         queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
612         fq = &priv->fq[queue_mapping];
613         for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
614                 err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
615                                                   fq->tx_qdbin, &fd);
616                 if (err != -EBUSY)
617                         break;
618         }
619         percpu_extras->tx_portal_busy += i;
620         if (unlikely(err < 0)) {
621                 percpu_stats->tx_errors++;
622                 /* Clean up everything, including freeing the skb */
623                 free_tx_fd(priv, &fd, NULL);
624         } else {
625                 percpu_stats->tx_packets++;
626                 percpu_stats->tx_bytes += skb->len;
627         }
628
629         return NETDEV_TX_OK;
630
631 err_build_fd:
632 err_alloc_headroom:
633         dev_kfree_skb(skb);
634
635         return NETDEV_TX_OK;
636 }
637
638 /* Tx confirmation frame processing routine */
639 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
640                               struct dpaa2_eth_channel *ch,
641                               const struct dpaa2_fd *fd,
642                               struct napi_struct *napi __always_unused)
643 {
644         struct rtnl_link_stats64 *percpu_stats;
645         struct dpaa2_eth_drv_stats *percpu_extras;
646         u32 status = 0;
647
648         /* Tracing point */
649         trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
650
651         percpu_extras = this_cpu_ptr(priv->percpu_extras);
652         percpu_extras->tx_conf_frames++;
653         percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
654
655         free_tx_fd(priv, fd, &status);
656
657         if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
658                 percpu_stats = this_cpu_ptr(priv->percpu_stats);
659                 /* Tx-conf logically pertains to the egress path. */
660                 percpu_stats->tx_errors++;
661         }
662 }
663
664 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
665 {
666         int err;
667
668         err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
669                                DPNI_OFF_RX_L3_CSUM, enable);
670         if (err) {
671                 netdev_err(priv->net_dev,
672                            "dpni_set_offload(RX_L3_CSUM) failed\n");
673                 return err;
674         }
675
676         err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
677                                DPNI_OFF_RX_L4_CSUM, enable);
678         if (err) {
679                 netdev_err(priv->net_dev,
680                            "dpni_set_offload(RX_L4_CSUM) failed\n");
681                 return err;
682         }
683
684         return 0;
685 }
686
687 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
688 {
689         int err;
690
691         err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
692                                DPNI_OFF_TX_L3_CSUM, enable);
693         if (err) {
694                 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
695                 return err;
696         }
697
698         err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
699                                DPNI_OFF_TX_L4_CSUM, enable);
700         if (err) {
701                 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
702                 return err;
703         }
704
705         return 0;
706 }
707
708 /* Perform a single release command to add buffers
709  * to the specified buffer pool
710  */
711 static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
712 {
713         struct device *dev = priv->net_dev->dev.parent;
714         u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
715         void *buf;
716         dma_addr_t addr;
717         int i;
718
719         for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
720                 /* Allocate buffer visible to WRIOP + skb shared info +
721                  * alignment padding
722                  */
723                 buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
724                 if (unlikely(!buf))
725                         goto err_alloc;
726
727                 buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
728
729                 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
730                                       DMA_FROM_DEVICE);
731                 if (unlikely(dma_mapping_error(dev, addr)))
732                         goto err_map;
733
734                 buf_array[i] = addr;
735
736                 /* tracing point */
737                 trace_dpaa2_eth_buf_seed(priv->net_dev,
738                                          buf, DPAA2_ETH_BUF_RAW_SIZE,
739                                          addr, DPAA2_ETH_RX_BUF_SIZE,
740                                          bpid);
741         }
742
743 release_bufs:
744         /* In case the portal is busy, retry until successful.
745          * The buffer release function would only fail if the QBMan portal
746          * was busy, which implies portal contention (i.e. more CPUs than
747          * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
748          * there is little we can realistically do, short of giving up -
749          * in which case we'd risk depleting the buffer pool and never again
750          * receiving the Rx interrupt which would kick-start the refill logic.
751          * So just keep retrying, at the risk of being moved to ksoftirqd.
752          */
753         while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
754                 cpu_relax();
755         return i;
756
757 err_map:
758         skb_free_frag(buf);
759 err_alloc:
760         if (i)
761                 goto release_bufs;
762
763         return 0;
764 }
765
766 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
767 {
768         int i, j;
769         int new_count;
770
771         /* This is the lazy seeding of Rx buffer pools.
772          * dpaa2_add_bufs() is also used on the Rx hotpath and calls
773          * napi_alloc_frag(). The trouble with that is that it in turn ends up
774          * calling this_cpu_ptr(), which mandates execution in atomic context.
775          * Rather than splitting up the code, do a one-off preempt disable.
776          */
777         preempt_disable();
778         for (j = 0; j < priv->num_channels; j++) {
779                 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
780                      i += DPAA2_ETH_BUFS_PER_CMD) {
781                         new_count = add_bufs(priv, bpid);
782                         priv->channel[j]->buf_count += new_count;
783
784                         if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
785                                 preempt_enable();
786                                 return -ENOMEM;
787                         }
788                 }
789         }
790         preempt_enable();
791
792         return 0;
793 }
794
795 /**
796  * Drain the specified number of buffers from the DPNI's private buffer pool.
797  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
798  */
799 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
800 {
801         struct device *dev = priv->net_dev->dev.parent;
802         u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
803         void *vaddr;
804         int ret, i;
805
806         do {
807                 ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
808                                                buf_array, count);
809                 if (ret < 0) {
810                         netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
811                         return;
812                 }
813                 for (i = 0; i < ret; i++) {
814                         /* Same logic as on regular Rx path */
815                         vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
816                                                    buf_array[i]);
817                         dma_unmap_single(dev, buf_array[i],
818                                          DPAA2_ETH_RX_BUF_SIZE,
819                                          DMA_FROM_DEVICE);
820                         skb_free_frag(vaddr);
821                 }
822         } while (ret);
823 }
824
825 static void drain_pool(struct dpaa2_eth_priv *priv)
826 {
827         int i;
828
829         drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
830         drain_bufs(priv, 1);
831
832         for (i = 0; i < priv->num_channels; i++)
833                 priv->channel[i]->buf_count = 0;
834 }
835
836 /* Function is called from softirq context only, so we don't need to guard
837  * the access to percpu count
838  */
839 static int refill_pool(struct dpaa2_eth_priv *priv,
840                        struct dpaa2_eth_channel *ch,
841                        u16 bpid)
842 {
843         int new_count;
844
845         if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
846                 return 0;
847
848         do {
849                 new_count = add_bufs(priv, bpid);
850                 if (unlikely(!new_count)) {
851                         /* Out of memory; abort for now, we'll try later on */
852                         break;
853                 }
854                 ch->buf_count += new_count;
855         } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
856
857         if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
858                 return -ENOMEM;
859
860         return 0;
861 }
862
863 static int pull_channel(struct dpaa2_eth_channel *ch)
864 {
865         int err;
866         int dequeues = -1;
867
868         /* Retry while portal is busy */
869         do {
870                 err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
871                 dequeues++;
872                 cpu_relax();
873         } while (err == -EBUSY);
874
875         ch->stats.dequeue_portal_busy += dequeues;
876         if (unlikely(err))
877                 ch->stats.pull_err++;
878
879         return err;
880 }
881
882 /* NAPI poll routine
883  *
884  * Frames are dequeued from the QMan channel associated with this NAPI context.
885  * Rx, Tx confirmation and (if configured) Rx error frames all count
886  * towards the NAPI budget.
887  */
888 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
889 {
890         struct dpaa2_eth_channel *ch;
891         int cleaned = 0, store_cleaned;
892         struct dpaa2_eth_priv *priv;
893         int err;
894
895         ch = container_of(napi, struct dpaa2_eth_channel, napi);
896         priv = ch->priv;
897
898         while (cleaned < budget) {
899                 err = pull_channel(ch);
900                 if (unlikely(err))
901                         break;
902
903                 /* Refill pool if appropriate */
904                 refill_pool(priv, ch, priv->dpbp_attrs.bpid);
905
906                 store_cleaned = consume_frames(ch);
907                 cleaned += store_cleaned;
908
909                 /* If we have enough budget left for a full store,
910                  * try a new pull dequeue, otherwise we're done here
911                  */
912                 if (store_cleaned == 0 ||
913                     cleaned > budget - DPAA2_ETH_STORE_SIZE)
914                         break;
915         }
916
917         if (cleaned < budget) {
918                 napi_complete_done(napi, cleaned);
919                 /* Re-enable data available notifications */
920                 do {
921                         err = dpaa2_io_service_rearm(NULL, &ch->nctx);
922                         cpu_relax();
923                 } while (err == -EBUSY);
924         }
925
926         ch->stats.frames += cleaned;
927
928         return cleaned;
929 }
930
931 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
932 {
933         struct dpaa2_eth_channel *ch;
934         int i;
935
936         for (i = 0; i < priv->num_channels; i++) {
937                 ch = priv->channel[i];
938                 napi_enable(&ch->napi);
939         }
940 }
941
942 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
943 {
944         struct dpaa2_eth_channel *ch;
945         int i;
946
947         for (i = 0; i < priv->num_channels; i++) {
948                 ch = priv->channel[i];
949                 napi_disable(&ch->napi);
950         }
951 }
952
953 static int link_state_update(struct dpaa2_eth_priv *priv)
954 {
955         struct dpni_link_state state;
956         int err;
957
958         err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
959         if (unlikely(err)) {
960                 netdev_err(priv->net_dev,
961                            "dpni_get_link_state() failed\n");
962                 return err;
963         }
964
965         /* Chech link state; speed / duplex changes are not treated yet */
966         if (priv->link_state.up == state.up)
967                 return 0;
968
969         priv->link_state = state;
970         if (state.up) {
971                 netif_carrier_on(priv->net_dev);
972                 netif_tx_start_all_queues(priv->net_dev);
973         } else {
974                 netif_tx_stop_all_queues(priv->net_dev);
975                 netif_carrier_off(priv->net_dev);
976         }
977
978         netdev_info(priv->net_dev, "Link Event: state %s\n",
979                     state.up ? "up" : "down");
980
981         return 0;
982 }
983
984 static int dpaa2_eth_open(struct net_device *net_dev)
985 {
986         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
987         int err;
988
989         err = seed_pool(priv, priv->dpbp_attrs.bpid);
990         if (err) {
991                 /* Not much to do; the buffer pool, though not filled up,
992                  * may still contain some buffers which would enable us
993                  * to limp on.
994                  */
995                 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
996                            priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid);
997         }
998
999         /* We'll only start the txqs when the link is actually ready; make sure
1000          * we don't race against the link up notification, which may come
1001          * immediately after dpni_enable();
1002          */
1003         netif_tx_stop_all_queues(net_dev);
1004         enable_ch_napi(priv);
1005         /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1006          * return true and cause 'ip link show' to report the LOWER_UP flag,
1007          * even though the link notification wasn't even received.
1008          */
1009         netif_carrier_off(net_dev);
1010
1011         err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1012         if (err < 0) {
1013                 netdev_err(net_dev, "dpni_enable() failed\n");
1014                 goto enable_err;
1015         }
1016
1017         /* If the DPMAC object has already processed the link up interrupt,
1018          * we have to learn the link state ourselves.
1019          */
1020         err = link_state_update(priv);
1021         if (err < 0) {
1022                 netdev_err(net_dev, "Can't update link state\n");
1023                 goto link_state_err;
1024         }
1025
1026         return 0;
1027
1028 link_state_err:
1029 enable_err:
1030         disable_ch_napi(priv);
1031         drain_pool(priv);
1032         return err;
1033 }
1034
1035 /* The DPIO store must be empty when we call this,
1036  * at the end of every NAPI cycle.
1037  */
1038 static u32 drain_channel(struct dpaa2_eth_priv *priv,
1039                          struct dpaa2_eth_channel *ch)
1040 {
1041         u32 drained = 0, total = 0;
1042
1043         do {
1044                 pull_channel(ch);
1045                 drained = consume_frames(ch);
1046                 total += drained;
1047         } while (drained);
1048
1049         return total;
1050 }
1051
1052 static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1053 {
1054         struct dpaa2_eth_channel *ch;
1055         int i;
1056         u32 drained = 0;
1057
1058         for (i = 0; i < priv->num_channels; i++) {
1059                 ch = priv->channel[i];
1060                 drained += drain_channel(priv, ch);
1061         }
1062
1063         return drained;
1064 }
1065
1066 static int dpaa2_eth_stop(struct net_device *net_dev)
1067 {
1068         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1069         int dpni_enabled;
1070         int retries = 10;
1071         u32 drained;
1072
1073         netif_tx_stop_all_queues(net_dev);
1074         netif_carrier_off(net_dev);
1075
1076         /* Loop while dpni_disable() attempts to drain the egress FQs
1077          * and confirm them back to us.
1078          */
1079         do {
1080                 dpni_disable(priv->mc_io, 0, priv->mc_token);
1081                 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1082                 if (dpni_enabled)
1083                         /* Allow the hardware some slack */
1084                         msleep(100);
1085         } while (dpni_enabled && --retries);
1086         if (!retries) {
1087                 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1088                 /* Must go on and disable NAPI nonetheless, so we don't crash at
1089                  * the next "ifconfig up"
1090                  */
1091         }
1092
1093         /* Wait for NAPI to complete on every core and disable it.
1094          * In particular, this will also prevent NAPI from being rescheduled if
1095          * a new CDAN is serviced, effectively discarding the CDAN. We therefore
1096          * don't even need to disarm the channels, except perhaps for the case
1097          * of a huge coalescing value.
1098          */
1099         disable_ch_napi(priv);
1100
1101          /* Manually drain the Rx and TxConf queues */
1102         drained = drain_ingress_frames(priv);
1103         if (drained)
1104                 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1105
1106         /* Empty the buffer pool */
1107         drain_pool(priv);
1108
1109         return 0;
1110 }
1111
1112 static int dpaa2_eth_init(struct net_device *net_dev)
1113 {
1114         u64 supported = 0;
1115         u64 not_supported = 0;
1116         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1117         u32 options = priv->dpni_attrs.options;
1118
1119         /* Capabilities listing */
1120         supported |= IFF_LIVE_ADDR_CHANGE;
1121
1122         if (options & DPNI_OPT_NO_MAC_FILTER)
1123                 not_supported |= IFF_UNICAST_FLT;
1124         else
1125                 supported |= IFF_UNICAST_FLT;
1126
1127         net_dev->priv_flags |= supported;
1128         net_dev->priv_flags &= ~not_supported;
1129
1130         /* Features */
1131         net_dev->features = NETIF_F_RXCSUM |
1132                             NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1133                             NETIF_F_SG | NETIF_F_HIGHDMA |
1134                             NETIF_F_LLTX;
1135         net_dev->hw_features = net_dev->features;
1136
1137         return 0;
1138 }
1139
1140 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1141 {
1142         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1143         struct device *dev = net_dev->dev.parent;
1144         int err;
1145
1146         err = eth_mac_addr(net_dev, addr);
1147         if (err < 0) {
1148                 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1149                 return err;
1150         }
1151
1152         err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1153                                         net_dev->dev_addr);
1154         if (err) {
1155                 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1156                 return err;
1157         }
1158
1159         return 0;
1160 }
1161
1162 /** Fill in counters maintained by the GPP driver. These may be different from
1163  * the hardware counters obtained by ethtool.
1164  */
1165 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1166                                 struct rtnl_link_stats64 *stats)
1167 {
1168         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1169         struct rtnl_link_stats64 *percpu_stats;
1170         u64 *cpustats;
1171         u64 *netstats = (u64 *)stats;
1172         int i, j;
1173         int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1174
1175         for_each_possible_cpu(i) {
1176                 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1177                 cpustats = (u64 *)percpu_stats;
1178                 for (j = 0; j < num; j++)
1179                         netstats[j] += cpustats[j];
1180         }
1181 }
1182
1183 static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
1184 {
1185         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1186         int err;
1187
1188         /* Set the maximum Rx frame length to match the transmit side;
1189          * account for L2 headers when computing the MFL
1190          */
1191         err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
1192                                         (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
1193         if (err) {
1194                 netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
1195                 return err;
1196         }
1197
1198         net_dev->mtu = mtu;
1199         return 0;
1200 }
1201
1202 /* Copy mac unicast addresses from @net_dev to @priv.
1203  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1204  */
1205 static void add_uc_hw_addr(const struct net_device *net_dev,
1206                            struct dpaa2_eth_priv *priv)
1207 {
1208         struct netdev_hw_addr *ha;
1209         int err;
1210
1211         netdev_for_each_uc_addr(ha, net_dev) {
1212                 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1213                                         ha->addr);
1214                 if (err)
1215                         netdev_warn(priv->net_dev,
1216                                     "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1217                                     ha->addr, err);
1218         }
1219 }
1220
1221 /* Copy mac multicast addresses from @net_dev to @priv
1222  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1223  */
1224 static void add_mc_hw_addr(const struct net_device *net_dev,
1225                            struct dpaa2_eth_priv *priv)
1226 {
1227         struct netdev_hw_addr *ha;
1228         int err;
1229
1230         netdev_for_each_mc_addr(ha, net_dev) {
1231                 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1232                                         ha->addr);
1233                 if (err)
1234                         netdev_warn(priv->net_dev,
1235                                     "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1236                                     ha->addr, err);
1237         }
1238 }
1239
1240 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1241 {
1242         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1243         int uc_count = netdev_uc_count(net_dev);
1244         int mc_count = netdev_mc_count(net_dev);
1245         u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1246         u32 options = priv->dpni_attrs.options;
1247         u16 mc_token = priv->mc_token;
1248         struct fsl_mc_io *mc_io = priv->mc_io;
1249         int err;
1250
1251         /* Basic sanity checks; these probably indicate a misconfiguration */
1252         if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1253                 netdev_info(net_dev,
1254                             "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1255                             max_mac);
1256
1257         /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1258         if (uc_count > max_mac) {
1259                 netdev_info(net_dev,
1260                             "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1261                             uc_count, max_mac);
1262                 goto force_promisc;
1263         }
1264         if (mc_count + uc_count > max_mac) {
1265                 netdev_info(net_dev,
1266                             "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1267                             uc_count + mc_count, max_mac);
1268                 goto force_mc_promisc;
1269         }
1270
1271         /* Adjust promisc settings due to flag combinations */
1272         if (net_dev->flags & IFF_PROMISC)
1273                 goto force_promisc;
1274         if (net_dev->flags & IFF_ALLMULTI) {
1275                 /* First, rebuild unicast filtering table. This should be done
1276                  * in promisc mode, in order to avoid frame loss while we
1277                  * progressively add entries to the table.
1278                  * We don't know whether we had been in promisc already, and
1279                  * making an MC call to find out is expensive; so set uc promisc
1280                  * nonetheless.
1281                  */
1282                 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1283                 if (err)
1284                         netdev_warn(net_dev, "Can't set uc promisc\n");
1285
1286                 /* Actual uc table reconstruction. */
1287                 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1288                 if (err)
1289                         netdev_warn(net_dev, "Can't clear uc filters\n");
1290                 add_uc_hw_addr(net_dev, priv);
1291
1292                 /* Finally, clear uc promisc and set mc promisc as requested. */
1293                 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1294                 if (err)
1295                         netdev_warn(net_dev, "Can't clear uc promisc\n");
1296                 goto force_mc_promisc;
1297         }
1298
1299         /* Neither unicast, nor multicast promisc will be on... eventually.
1300          * For now, rebuild mac filtering tables while forcing both of them on.
1301          */
1302         err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1303         if (err)
1304                 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1305         err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1306         if (err)
1307                 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1308
1309         /* Actual mac filtering tables reconstruction */
1310         err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1311         if (err)
1312                 netdev_warn(net_dev, "Can't clear mac filters\n");
1313         add_mc_hw_addr(net_dev, priv);
1314         add_uc_hw_addr(net_dev, priv);
1315
1316         /* Now we can clear both ucast and mcast promisc, without risking
1317          * to drop legitimate frames anymore.
1318          */
1319         err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1320         if (err)
1321                 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1322         err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1323         if (err)
1324                 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1325
1326         return;
1327
1328 force_promisc:
1329         err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1330         if (err)
1331                 netdev_warn(net_dev, "Can't set ucast promisc\n");
1332 force_mc_promisc:
1333         err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1334         if (err)
1335                 netdev_warn(net_dev, "Can't set mcast promisc\n");
1336 }
1337
1338 static int dpaa2_eth_set_features(struct net_device *net_dev,
1339                                   netdev_features_t features)
1340 {
1341         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1342         netdev_features_t changed = features ^ net_dev->features;
1343         bool enable;
1344         int err;
1345
1346         if (changed & NETIF_F_RXCSUM) {
1347                 enable = !!(features & NETIF_F_RXCSUM);
1348                 err = set_rx_csum(priv, enable);
1349                 if (err)
1350                         return err;
1351         }
1352
1353         if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1354                 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1355                 err = set_tx_csum(priv, enable);
1356                 if (err)
1357                         return err;
1358         }
1359
1360         return 0;
1361 }
1362
1363 static const struct net_device_ops dpaa2_eth_ops = {
1364         .ndo_open = dpaa2_eth_open,
1365         .ndo_start_xmit = dpaa2_eth_tx,
1366         .ndo_stop = dpaa2_eth_stop,
1367         .ndo_init = dpaa2_eth_init,
1368         .ndo_set_mac_address = dpaa2_eth_set_addr,
1369         .ndo_get_stats64 = dpaa2_eth_get_stats,
1370         .ndo_change_mtu = dpaa2_eth_change_mtu,
1371         .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1372         .ndo_set_features = dpaa2_eth_set_features,
1373 };
1374
1375 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1376 {
1377         struct dpaa2_eth_channel *ch;
1378
1379         ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1380
1381         /* Update NAPI statistics */
1382         ch->stats.cdan++;
1383
1384         napi_schedule_irqoff(&ch->napi);
1385 }
1386
1387 /* Allocate and configure a DPCON object */
1388 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1389 {
1390         struct fsl_mc_device *dpcon;
1391         struct device *dev = priv->net_dev->dev.parent;
1392         struct dpcon_attr attrs;
1393         int err;
1394
1395         err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1396                                      FSL_MC_POOL_DPCON, &dpcon);
1397         if (err) {
1398                 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1399                 return NULL;
1400         }
1401
1402         err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1403         if (err) {
1404                 dev_err(dev, "dpcon_open() failed\n");
1405                 goto err_open;
1406         }
1407
1408         err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1409         if (err) {
1410                 dev_err(dev, "dpcon_reset() failed\n");
1411                 goto err_reset;
1412         }
1413
1414         err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1415         if (err) {
1416                 dev_err(dev, "dpcon_get_attributes() failed\n");
1417                 goto err_get_attr;
1418         }
1419
1420         err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1421         if (err) {
1422                 dev_err(dev, "dpcon_enable() failed\n");
1423                 goto err_enable;
1424         }
1425
1426         return dpcon;
1427
1428 err_enable:
1429 err_get_attr:
1430 err_reset:
1431         dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1432 err_open:
1433         fsl_mc_object_free(dpcon);
1434
1435         return NULL;
1436 }
1437
1438 static void free_dpcon(struct dpaa2_eth_priv *priv,
1439                        struct fsl_mc_device *dpcon)
1440 {
1441         dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1442         dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1443         fsl_mc_object_free(dpcon);
1444 }
1445
1446 static struct dpaa2_eth_channel *
1447 alloc_channel(struct dpaa2_eth_priv *priv)
1448 {
1449         struct dpaa2_eth_channel *channel;
1450         struct dpcon_attr attr;
1451         struct device *dev = priv->net_dev->dev.parent;
1452         int err;
1453
1454         channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1455         if (!channel)
1456                 return NULL;
1457
1458         channel->dpcon = setup_dpcon(priv);
1459         if (!channel->dpcon)
1460                 goto err_setup;
1461
1462         err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1463                                    &attr);
1464         if (err) {
1465                 dev_err(dev, "dpcon_get_attributes() failed\n");
1466                 goto err_get_attr;
1467         }
1468
1469         channel->dpcon_id = attr.id;
1470         channel->ch_id = attr.qbman_ch_id;
1471         channel->priv = priv;
1472
1473         return channel;
1474
1475 err_get_attr:
1476         free_dpcon(priv, channel->dpcon);
1477 err_setup:
1478         kfree(channel);
1479         return NULL;
1480 }
1481
1482 static void free_channel(struct dpaa2_eth_priv *priv,
1483                          struct dpaa2_eth_channel *channel)
1484 {
1485         free_dpcon(priv, channel->dpcon);
1486         kfree(channel);
1487 }
1488
1489 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
1490  * and register data availability notifications
1491  */
1492 static int setup_dpio(struct dpaa2_eth_priv *priv)
1493 {
1494         struct dpaa2_io_notification_ctx *nctx;
1495         struct dpaa2_eth_channel *channel;
1496         struct dpcon_notification_cfg dpcon_notif_cfg;
1497         struct device *dev = priv->net_dev->dev.parent;
1498         int i, err;
1499
1500         /* We want the ability to spread ingress traffic (RX, TX conf) to as
1501          * many cores as possible, so we need one channel for each core
1502          * (unless there's fewer queues than cores, in which case the extra
1503          * channels would be wasted).
1504          * Allocate one channel per core and register it to the core's
1505          * affine DPIO. If not enough channels are available for all cores
1506          * or if some cores don't have an affine DPIO, there will be no
1507          * ingress frame processing on those cores.
1508          */
1509         cpumask_clear(&priv->dpio_cpumask);
1510         for_each_online_cpu(i) {
1511                 /* Try to allocate a channel */
1512                 channel = alloc_channel(priv);
1513                 if (!channel) {
1514                         dev_info(dev,
1515                                  "No affine channel for cpu %d and above\n", i);
1516                         goto err_alloc_ch;
1517                 }
1518
1519                 priv->channel[priv->num_channels] = channel;
1520
1521                 nctx = &channel->nctx;
1522                 nctx->is_cdan = 1;
1523                 nctx->cb = cdan_cb;
1524                 nctx->id = channel->ch_id;
1525                 nctx->desired_cpu = i;
1526
1527                 /* Register the new context */
1528                 err = dpaa2_io_service_register(NULL, nctx);
1529                 if (err) {
1530                         dev_info(dev, "No affine DPIO for cpu %d\n", i);
1531                         /* If no affine DPIO for this core, there's probably
1532                          * none available for next cores either.
1533                          */
1534                         goto err_service_reg;
1535                 }
1536
1537                 /* Register DPCON notification with MC */
1538                 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1539                 dpcon_notif_cfg.priority = 0;
1540                 dpcon_notif_cfg.user_ctx = nctx->qman64;
1541                 err = dpcon_set_notification(priv->mc_io, 0,
1542                                              channel->dpcon->mc_handle,
1543                                              &dpcon_notif_cfg);
1544                 if (err) {
1545                         dev_err(dev, "dpcon_set_notification failed()\n");
1546                         goto err_set_cdan;
1547                 }
1548
1549                 /* If we managed to allocate a channel and also found an affine
1550                  * DPIO for this core, add it to the final mask
1551                  */
1552                 cpumask_set_cpu(i, &priv->dpio_cpumask);
1553                 priv->num_channels++;
1554
1555                 /* Stop if we already have enough channels to accommodate all
1556                  * RX and TX conf queues
1557                  */
1558                 if (priv->num_channels == dpaa2_eth_queue_count(priv))
1559                         break;
1560         }
1561
1562         return 0;
1563
1564 err_set_cdan:
1565         dpaa2_io_service_deregister(NULL, nctx);
1566 err_service_reg:
1567         free_channel(priv, channel);
1568 err_alloc_ch:
1569         if (cpumask_empty(&priv->dpio_cpumask)) {
1570                 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
1571                 return -ENODEV;
1572         }
1573
1574         dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1575                  cpumask_pr_args(&priv->dpio_cpumask));
1576
1577         return 0;
1578 }
1579
1580 static void free_dpio(struct dpaa2_eth_priv *priv)
1581 {
1582         int i;
1583         struct dpaa2_eth_channel *ch;
1584
1585         /* deregister CDAN notifications and free channels */
1586         for (i = 0; i < priv->num_channels; i++) {
1587                 ch = priv->channel[i];
1588                 dpaa2_io_service_deregister(NULL, &ch->nctx);
1589                 free_channel(priv, ch);
1590         }
1591 }
1592
1593 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1594                                                     int cpu)
1595 {
1596         struct device *dev = priv->net_dev->dev.parent;
1597         int i;
1598
1599         for (i = 0; i < priv->num_channels; i++)
1600                 if (priv->channel[i]->nctx.desired_cpu == cpu)
1601                         return priv->channel[i];
1602
1603         /* We should never get here. Issue a warning and return
1604          * the first channel, because it's still better than nothing
1605          */
1606         dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1607
1608         return priv->channel[0];
1609 }
1610
1611 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1612 {
1613         struct device *dev = priv->net_dev->dev.parent;
1614         struct dpaa2_eth_fq *fq;
1615         int rx_cpu, txc_cpu;
1616         int i;
1617
1618         /* For each FQ, pick one channel/CPU to deliver frames to.
1619          * This may well change at runtime, either through irqbalance or
1620          * through direct user intervention.
1621          */
1622         rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1623
1624         for (i = 0; i < priv->num_fqs; i++) {
1625                 fq = &priv->fq[i];
1626                 switch (fq->type) {
1627                 case DPAA2_RX_FQ:
1628                         fq->target_cpu = rx_cpu;
1629                         rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1630                         if (rx_cpu >= nr_cpu_ids)
1631                                 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1632                         break;
1633                 case DPAA2_TX_CONF_FQ:
1634                         fq->target_cpu = txc_cpu;
1635                         txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1636                         if (txc_cpu >= nr_cpu_ids)
1637                                 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1638                         break;
1639                 default:
1640                         dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1641                 }
1642                 fq->channel = get_affine_channel(priv, fq->target_cpu);
1643         }
1644 }
1645
1646 static void setup_fqs(struct dpaa2_eth_priv *priv)
1647 {
1648         int i;
1649
1650         /* We have one TxConf FQ per Tx flow.
1651          * The number of Tx and Rx queues is the same.
1652          * Tx queues come first in the fq array.
1653          */
1654         for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1655                 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1656                 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1657                 priv->fq[priv->num_fqs++].flowid = (u16)i;
1658         }
1659
1660         for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1661                 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1662                 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1663                 priv->fq[priv->num_fqs++].flowid = (u16)i;
1664         }
1665
1666         /* For each FQ, decide on which core to process incoming frames */
1667         set_fq_affinity(priv);
1668 }
1669
1670 /* Allocate and configure one buffer pool for each interface */
1671 static int setup_dpbp(struct dpaa2_eth_priv *priv)
1672 {
1673         int err;
1674         struct fsl_mc_device *dpbp_dev;
1675         struct device *dev = priv->net_dev->dev.parent;
1676
1677         err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1678                                      &dpbp_dev);
1679         if (err) {
1680                 dev_err(dev, "DPBP device allocation failed\n");
1681                 return err;
1682         }
1683
1684         priv->dpbp_dev = dpbp_dev;
1685
1686         err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1687                         &dpbp_dev->mc_handle);
1688         if (err) {
1689                 dev_err(dev, "dpbp_open() failed\n");
1690                 goto err_open;
1691         }
1692
1693         err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1694         if (err) {
1695                 dev_err(dev, "dpbp_enable() failed\n");
1696                 goto err_enable;
1697         }
1698
1699         err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
1700                                   &priv->dpbp_attrs);
1701         if (err) {
1702                 dev_err(dev, "dpbp_get_attributes() failed\n");
1703                 goto err_get_attr;
1704         }
1705
1706         return 0;
1707
1708 err_get_attr:
1709         dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1710 err_enable:
1711         dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1712 err_open:
1713         fsl_mc_object_free(dpbp_dev);
1714
1715         return err;
1716 }
1717
1718 static void free_dpbp(struct dpaa2_eth_priv *priv)
1719 {
1720         drain_pool(priv);
1721         dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1722         dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1723         fsl_mc_object_free(priv->dpbp_dev);
1724 }
1725
1726 /* Configure the DPNI object this interface is associated with */
1727 static int setup_dpni(struct fsl_mc_device *ls_dev)
1728 {
1729         struct device *dev = &ls_dev->dev;
1730         struct dpaa2_eth_priv *priv;
1731         struct net_device *net_dev;
1732         int err;
1733
1734         net_dev = dev_get_drvdata(dev);
1735         priv = netdev_priv(net_dev);
1736
1737         priv->dpni_id = ls_dev->obj_desc.id;
1738
1739         /* get a handle for the DPNI object */
1740         err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
1741         if (err) {
1742                 dev_err(dev, "dpni_open() failed\n");
1743                 goto err_open;
1744         }
1745
1746         ls_dev->mc_io = priv->mc_io;
1747         ls_dev->mc_handle = priv->mc_token;
1748
1749         err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1750         if (err) {
1751                 dev_err(dev, "dpni_reset() failed\n");
1752                 goto err_reset;
1753         }
1754
1755         err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1756                                   &priv->dpni_attrs);
1757         if (err) {
1758                 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
1759                 goto err_get_attr;
1760         }
1761
1762         /* Configure buffer layouts */
1763         /* rx buffer */
1764         priv->buf_layout.pass_parser_result = true;
1765         priv->buf_layout.pass_frame_status = true;
1766         priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1767         priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
1768         priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1769                                    DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1770                                    DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
1771                                    DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
1772         err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1773                                      DPNI_QUEUE_RX, &priv->buf_layout);
1774         if (err) {
1775                 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1776                 goto err_buf_layout;
1777         }
1778
1779         /* tx buffer */
1780         priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1781                                    DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
1782         err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1783                                      DPNI_QUEUE_TX, &priv->buf_layout);
1784         if (err) {
1785                 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1786                 goto err_buf_layout;
1787         }
1788
1789         /* tx-confirm buffer */
1790         priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1791         err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1792                                      DPNI_QUEUE_TX_CONFIRM, &priv->buf_layout);
1793         if (err) {
1794                 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1795                 goto err_buf_layout;
1796         }
1797
1798         /* Now that we've set our tx buffer layout, retrieve the minimum
1799          * required tx data offset.
1800          */
1801         err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1802                                       &priv->tx_data_offset);
1803         if (err) {
1804                 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1805                 goto err_data_offset;
1806         }
1807
1808         if ((priv->tx_data_offset % 64) != 0)
1809                 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
1810                          priv->tx_data_offset);
1811
1812         /* Accommodate software annotation space (SWA) */
1813         priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
1814
1815         return 0;
1816
1817 err_data_offset:
1818 err_buf_layout:
1819 err_get_attr:
1820 err_reset:
1821         dpni_close(priv->mc_io, 0, priv->mc_token);
1822 err_open:
1823         return err;
1824 }
1825
1826 static void free_dpni(struct dpaa2_eth_priv *priv)
1827 {
1828         int err;
1829
1830         err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1831         if (err)
1832                 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1833                             err);
1834
1835         dpni_close(priv->mc_io, 0, priv->mc_token);
1836 }
1837
1838 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1839                          struct dpaa2_eth_fq *fq)
1840 {
1841         struct device *dev = priv->net_dev->dev.parent;
1842         struct dpni_queue queue;
1843         struct dpni_queue_id qid;
1844         struct dpni_taildrop td;
1845         int err;
1846
1847         err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1848                              DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1849         if (err) {
1850                 dev_err(dev, "dpni_get_queue(RX) failed\n");
1851                 return err;
1852         }
1853
1854         fq->fqid = qid.fqid;
1855
1856         queue.destination.id = fq->channel->dpcon_id;
1857         queue.destination.type = DPNI_DEST_DPCON;
1858         queue.destination.priority = 1;
1859         queue.user_context = (u64)fq;
1860         err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1861                              DPNI_QUEUE_RX, 0, fq->flowid,
1862                              DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1863                              &queue);
1864         if (err) {
1865                 dev_err(dev, "dpni_set_queue(RX) failed\n");
1866                 return err;
1867         }
1868
1869         td.enable = 1;
1870         td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1871         err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1872                                 DPNI_QUEUE_RX, 0, fq->flowid, &td);
1873         if (err) {
1874                 dev_err(dev, "dpni_set_threshold() failed\n");
1875                 return err;
1876         }
1877
1878         return 0;
1879 }
1880
1881 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1882                          struct dpaa2_eth_fq *fq)
1883 {
1884         struct device *dev = priv->net_dev->dev.parent;
1885         struct dpni_queue queue;
1886         struct dpni_queue_id qid;
1887         int err;
1888
1889         err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1890                              DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
1891         if (err) {
1892                 dev_err(dev, "dpni_get_queue(TX) failed\n");
1893                 return err;
1894         }
1895
1896         fq->tx_qdbin = qid.qdbin;
1897
1898         err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1899                              DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1900                              &queue, &qid);
1901         if (err) {
1902                 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
1903                 return err;
1904         }
1905
1906         fq->fqid = qid.fqid;
1907
1908         queue.destination.id = fq->channel->dpcon_id;
1909         queue.destination.type = DPNI_DEST_DPCON;
1910         queue.destination.priority = 0;
1911         queue.user_context = (u64)fq;
1912         err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1913                              DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1914                              DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1915                              &queue);
1916         if (err) {
1917                 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
1918                 return err;
1919         }
1920
1921         return 0;
1922 }
1923
1924 /* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
1925 static const struct dpaa2_eth_hash_fields hash_fields[] = {
1926         {
1927                 /* IP header */
1928                 .rxnfc_field = RXH_IP_SRC,
1929                 .cls_prot = NET_PROT_IP,
1930                 .cls_field = NH_FLD_IP_SRC,
1931                 .size = 4,
1932         }, {
1933                 .rxnfc_field = RXH_IP_DST,
1934                 .cls_prot = NET_PROT_IP,
1935                 .cls_field = NH_FLD_IP_DST,
1936                 .size = 4,
1937         }, {
1938                 .rxnfc_field = RXH_L3_PROTO,
1939                 .cls_prot = NET_PROT_IP,
1940                 .cls_field = NH_FLD_IP_PROTO,
1941                 .size = 1,
1942         }, {
1943                 /* Using UDP ports, this is functionally equivalent to raw
1944                  * byte pairs from L4 header.
1945                  */
1946                 .rxnfc_field = RXH_L4_B_0_1,
1947                 .cls_prot = NET_PROT_UDP,
1948                 .cls_field = NH_FLD_UDP_PORT_SRC,
1949                 .size = 2,
1950         }, {
1951                 .rxnfc_field = RXH_L4_B_2_3,
1952                 .cls_prot = NET_PROT_UDP,
1953                 .cls_field = NH_FLD_UDP_PORT_DST,
1954                 .size = 2,
1955         },
1956 };
1957
1958 /* Set RX hash options
1959  * flags is a combination of RXH_ bits
1960  */
1961 static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
1962 {
1963         struct device *dev = net_dev->dev.parent;
1964         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1965         struct dpkg_profile_cfg cls_cfg;
1966         struct dpni_rx_tc_dist_cfg dist_cfg;
1967         u8 *dma_mem;
1968         int i;
1969         int err = 0;
1970
1971         if (!dpaa2_eth_hash_enabled(priv)) {
1972                 dev_dbg(dev, "Hashing support is not enabled\n");
1973                 return 0;
1974         }
1975
1976         memset(&cls_cfg, 0, sizeof(cls_cfg));
1977
1978         for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
1979                 struct dpkg_extract *key =
1980                         &cls_cfg.extracts[cls_cfg.num_extracts];
1981
1982                 if (!(flags & hash_fields[i].rxnfc_field))
1983                         continue;
1984
1985                 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
1986                         dev_err(dev, "error adding key extraction rule, too many rules?\n");
1987                         return -E2BIG;
1988                 }
1989
1990                 key->type = DPKG_EXTRACT_FROM_HDR;
1991                 key->extract.from_hdr.prot = hash_fields[i].cls_prot;
1992                 key->extract.from_hdr.type = DPKG_FULL_FIELD;
1993                 key->extract.from_hdr.field = hash_fields[i].cls_field;
1994                 cls_cfg.num_extracts++;
1995
1996                 priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
1997         }
1998
1999         dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
2000         if (!dma_mem)
2001                 return -ENOMEM;
2002
2003         err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2004         if (err) {
2005                 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2006                 goto err_prep_key;
2007         }
2008
2009         memset(&dist_cfg, 0, sizeof(dist_cfg));
2010
2011         /* Prepare for setting the rx dist */
2012         dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem,
2013                                                DPAA2_CLASSIFIER_DMA_SIZE,
2014                                                DMA_TO_DEVICE);
2015         if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) {
2016                 dev_err(dev, "DMA mapping failed\n");
2017                 err = -ENOMEM;
2018                 goto err_dma_map;
2019         }
2020
2021         dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2022         dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2023
2024         err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2025         dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova,
2026                          DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
2027         if (err)
2028                 dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
2029
2030 err_dma_map:
2031 err_prep_key:
2032         kfree(dma_mem);
2033         return err;
2034 }
2035
2036 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2037  * frame queues and channels
2038  */
2039 static int bind_dpni(struct dpaa2_eth_priv *priv)
2040 {
2041         struct net_device *net_dev = priv->net_dev;
2042         struct device *dev = net_dev->dev.parent;
2043         struct dpni_pools_cfg pools_params;
2044         struct dpni_error_cfg err_cfg;
2045         int err = 0;
2046         int i;
2047
2048         pools_params.num_dpbp = 1;
2049         pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2050         pools_params.pools[0].backup_pool = 0;
2051         pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2052         err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2053         if (err) {
2054                 dev_err(dev, "dpni_set_pools() failed\n");
2055                 return err;
2056         }
2057
2058         /* have the interface implicitly distribute traffic based on supported
2059          * header fields
2060          */
2061         err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
2062         if (err)
2063                 netdev_err(net_dev, "Failed to configure hashing\n");
2064
2065         /* Configure handling of error frames */
2066         err_cfg.errors = DPAA2_ETH_RX_ERR_MASK;
2067         err_cfg.set_frame_annotation = 1;
2068         err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2069         err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2070                                        &err_cfg);
2071         if (err) {
2072                 dev_err(dev, "dpni_set_errors_behavior failed\n");
2073                 return err;
2074         }
2075
2076         /* Configure Rx and Tx conf queues to generate CDANs */
2077         for (i = 0; i < priv->num_fqs; i++) {
2078                 switch (priv->fq[i].type) {
2079                 case DPAA2_RX_FQ:
2080                         err = setup_rx_flow(priv, &priv->fq[i]);
2081                         break;
2082                 case DPAA2_TX_CONF_FQ:
2083                         err = setup_tx_flow(priv, &priv->fq[i]);
2084                         break;
2085                 default:
2086                         dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2087                         return -EINVAL;
2088                 }
2089                 if (err)
2090                         return err;
2091         }
2092
2093         err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2094                             DPNI_QUEUE_TX, &priv->tx_qdid);
2095         if (err) {
2096                 dev_err(dev, "dpni_get_qdid() failed\n");
2097                 return err;
2098         }
2099
2100         return 0;
2101 }
2102
2103 /* Allocate rings for storing incoming frame descriptors */
2104 static int alloc_rings(struct dpaa2_eth_priv *priv)
2105 {
2106         struct net_device *net_dev = priv->net_dev;
2107         struct device *dev = net_dev->dev.parent;
2108         int i;
2109
2110         for (i = 0; i < priv->num_channels; i++) {
2111                 priv->channel[i]->store =
2112                         dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2113                 if (!priv->channel[i]->store) {
2114                         netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2115                         goto err_ring;
2116                 }
2117         }
2118
2119         return 0;
2120
2121 err_ring:
2122         for (i = 0; i < priv->num_channels; i++) {
2123                 if (!priv->channel[i]->store)
2124                         break;
2125                 dpaa2_io_store_destroy(priv->channel[i]->store);
2126         }
2127
2128         return -ENOMEM;
2129 }
2130
2131 static void free_rings(struct dpaa2_eth_priv *priv)
2132 {
2133         int i;
2134
2135         for (i = 0; i < priv->num_channels; i++)
2136                 dpaa2_io_store_destroy(priv->channel[i]->store);
2137 }
2138
2139 static int netdev_init(struct net_device *net_dev)
2140 {
2141         int err;
2142         struct device *dev = net_dev->dev.parent;
2143         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2144         u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2145         u8 bcast_addr[ETH_ALEN];
2146
2147         net_dev->netdev_ops = &dpaa2_eth_ops;
2148
2149         /* Get firmware address, if any */
2150         err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2151         if (err) {
2152                 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2153                 return err;
2154         }
2155
2156         /* Get DPNI attributes address, if any */
2157         err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2158                                         dpni_mac_addr);
2159         if (err) {
2160                 dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
2161                 return err;
2162         }
2163
2164         /* First check if firmware has any address configured by bootloader */
2165         if (!is_zero_ether_addr(mac_addr)) {
2166                 /* If the DPMAC addr != DPNI addr, update it */
2167                 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2168                         err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2169                                                         priv->mc_token,
2170                                                         mac_addr);
2171                         if (err) {
2172                                 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2173                                 return err;
2174                         }
2175                 }
2176                 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2177         } else if (is_zero_ether_addr(dpni_mac_addr)) {
2178                 /* Fills in net_dev->dev_addr, as required by
2179                  * register_netdevice()
2180                  */
2181                 eth_hw_addr_random(net_dev);
2182                 /* Make the user aware, without cluttering the boot log */
2183                 dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
2184                 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2185                                                 net_dev->dev_addr);
2186                 if (err) {
2187                         dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err);
2188                         return err;
2189                 }
2190                 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2191                  * practical purposes, this will be our "permanent" mac address,
2192                  * at least until the next reboot. This move will also permit
2193                  * register_netdevice() to properly fill up net_dev->perm_addr.
2194                  */
2195                 net_dev->addr_assign_type = NET_ADDR_PERM;
2196         } else {
2197                 /* NET_ADDR_PERM is default, all we have to do is
2198                  * fill in the device addr.
2199                  */
2200                 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2201         }
2202
2203         /* Explicitly add the broadcast address to the MAC filtering table;
2204          * the MC won't do that for us.
2205          */
2206         eth_broadcast_addr(bcast_addr);
2207         err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2208         if (err) {
2209                 dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
2210                 /* Won't return an error; at least, we'd have egress traffic */
2211         }
2212
2213         /* Reserve enough space to align buffer as per hardware requirement;
2214          * NOTE: priv->tx_data_offset MUST be initialized at this point.
2215          */
2216         net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
2217
2218         /* Set MTU limits */
2219         net_dev->min_mtu = 68;
2220         net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
2221
2222         /* Our .ndo_init will be called herein */
2223         err = register_netdev(net_dev);
2224         if (err < 0) {
2225                 dev_err(dev, "register_netdev() failed\n");
2226                 return err;
2227         }
2228
2229         return 0;
2230 }
2231
2232 static int poll_link_state(void *arg)
2233 {
2234         struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2235         int err;
2236
2237         while (!kthread_should_stop()) {
2238                 err = link_state_update(priv);
2239                 if (unlikely(err))
2240                         return err;
2241
2242                 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2243         }
2244
2245         return 0;
2246 }
2247
2248 static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
2249 {
2250         return IRQ_WAKE_THREAD;
2251 }
2252
2253 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2254 {
2255         u32 status = 0, clear = 0;
2256         struct device *dev = (struct device *)arg;
2257         struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2258         struct net_device *net_dev = dev_get_drvdata(dev);
2259         int err;
2260
2261         err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2262                                   DPNI_IRQ_INDEX, &status);
2263         if (unlikely(err)) {
2264                 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
2265                 clear = 0xffffffff;
2266                 goto out;
2267         }
2268
2269         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
2270                 clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
2271                 link_state_update(netdev_priv(net_dev));
2272         }
2273
2274 out:
2275         dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2276                               DPNI_IRQ_INDEX, clear);
2277         return IRQ_HANDLED;
2278 }
2279
2280 static int setup_irqs(struct fsl_mc_device *ls_dev)
2281 {
2282         int err = 0;
2283         struct fsl_mc_device_irq *irq;
2284
2285         err = fsl_mc_allocate_irqs(ls_dev);
2286         if (err) {
2287                 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2288                 return err;
2289         }
2290
2291         irq = ls_dev->irqs[0];
2292         err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
2293                                         dpni_irq0_handler,
2294                                         dpni_irq0_handler_thread,
2295                                         IRQF_NO_SUSPEND | IRQF_ONESHOT,
2296                                         dev_name(&ls_dev->dev), &ls_dev->dev);
2297         if (err < 0) {
2298                 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
2299                 goto free_mc_irq;
2300         }
2301
2302         err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2303                                 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2304         if (err < 0) {
2305                 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
2306                 goto free_irq;
2307         }
2308
2309         err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2310                                   DPNI_IRQ_INDEX, 1);
2311         if (err < 0) {
2312                 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
2313                 goto free_irq;
2314         }
2315
2316         return 0;
2317
2318 free_irq:
2319         devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2320 free_mc_irq:
2321         fsl_mc_free_irqs(ls_dev);
2322
2323         return err;
2324 }
2325
2326 static void add_ch_napi(struct dpaa2_eth_priv *priv)
2327 {
2328         int i;
2329         struct dpaa2_eth_channel *ch;
2330
2331         for (i = 0; i < priv->num_channels; i++) {
2332                 ch = priv->channel[i];
2333                 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
2334                 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2335                                NAPI_POLL_WEIGHT);
2336         }
2337 }
2338
2339 static void del_ch_napi(struct dpaa2_eth_priv *priv)
2340 {
2341         int i;
2342         struct dpaa2_eth_channel *ch;
2343
2344         for (i = 0; i < priv->num_channels; i++) {
2345                 ch = priv->channel[i];
2346                 netif_napi_del(&ch->napi);
2347         }
2348 }
2349
2350 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2351 {
2352         struct device *dev;
2353         struct net_device *net_dev = NULL;
2354         struct dpaa2_eth_priv *priv = NULL;
2355         int err = 0;
2356
2357         dev = &dpni_dev->dev;
2358
2359         /* Net device */
2360         net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2361         if (!net_dev) {
2362                 dev_err(dev, "alloc_etherdev_mq() failed\n");
2363                 return -ENOMEM;
2364         }
2365
2366         SET_NETDEV_DEV(net_dev, dev);
2367         dev_set_drvdata(dev, net_dev);
2368
2369         priv = netdev_priv(net_dev);
2370         priv->net_dev = net_dev;
2371
2372         priv->iommu_domain = iommu_get_domain_for_dev(dev);
2373
2374         /* Obtain a MC portal */
2375         err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2376                                      &priv->mc_io);
2377         if (err) {
2378                 dev_err(dev, "MC portal allocation failed\n");
2379                 goto err_portal_alloc;
2380         }
2381
2382         /* MC objects initialization and configuration */
2383         err = setup_dpni(dpni_dev);
2384         if (err)
2385                 goto err_dpni_setup;
2386
2387         err = setup_dpio(priv);
2388         if (err)
2389                 goto err_dpio_setup;
2390
2391         setup_fqs(priv);
2392
2393         err = setup_dpbp(priv);
2394         if (err)
2395                 goto err_dpbp_setup;
2396
2397         err = bind_dpni(priv);
2398         if (err)
2399                 goto err_bind;
2400
2401         /* Add a NAPI context for each channel */
2402         add_ch_napi(priv);
2403
2404         /* Percpu statistics */
2405         priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2406         if (!priv->percpu_stats) {
2407                 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2408                 err = -ENOMEM;
2409                 goto err_alloc_percpu_stats;
2410         }
2411         priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2412         if (!priv->percpu_extras) {
2413                 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2414                 err = -ENOMEM;
2415                 goto err_alloc_percpu_extras;
2416         }
2417
2418         err = netdev_init(net_dev);
2419         if (err)
2420                 goto err_netdev_init;
2421
2422         /* Configure checksum offload based on current interface flags */
2423         err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2424         if (err)
2425                 goto err_csum;
2426
2427         err = set_tx_csum(priv, !!(net_dev->features &
2428                                    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2429         if (err)
2430                 goto err_csum;
2431
2432         err = alloc_rings(priv);
2433         if (err)
2434                 goto err_alloc_rings;
2435
2436         net_dev->ethtool_ops = &dpaa2_ethtool_ops;
2437
2438         err = setup_irqs(dpni_dev);
2439         if (err) {
2440                 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2441                 priv->poll_thread = kthread_run(poll_link_state, priv,
2442                                                 "%s_poll_link", net_dev->name);
2443                 if (IS_ERR(priv->poll_thread)) {
2444                         netdev_err(net_dev, "Error starting polling thread\n");
2445                         goto err_poll_thread;
2446                 }
2447                 priv->do_link_poll = true;
2448         }
2449
2450         dev_info(dev, "Probed interface %s\n", net_dev->name);
2451         return 0;
2452
2453 err_poll_thread:
2454         free_rings(priv);
2455 err_alloc_rings:
2456 err_csum:
2457         unregister_netdev(net_dev);
2458 err_netdev_init:
2459         free_percpu(priv->percpu_extras);
2460 err_alloc_percpu_extras:
2461         free_percpu(priv->percpu_stats);
2462 err_alloc_percpu_stats:
2463         del_ch_napi(priv);
2464 err_bind:
2465         free_dpbp(priv);
2466 err_dpbp_setup:
2467         free_dpio(priv);
2468 err_dpio_setup:
2469         free_dpni(priv);
2470 err_dpni_setup:
2471         fsl_mc_portal_free(priv->mc_io);
2472 err_portal_alloc:
2473         dev_set_drvdata(dev, NULL);
2474         free_netdev(net_dev);
2475
2476         return err;
2477 }
2478
2479 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2480 {
2481         struct device *dev;
2482         struct net_device *net_dev;
2483         struct dpaa2_eth_priv *priv;
2484
2485         dev = &ls_dev->dev;
2486         net_dev = dev_get_drvdata(dev);
2487         priv = netdev_priv(net_dev);
2488
2489         unregister_netdev(net_dev);
2490         dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2491
2492         if (priv->do_link_poll)
2493                 kthread_stop(priv->poll_thread);
2494         else
2495                 fsl_mc_free_irqs(ls_dev);
2496
2497         free_rings(priv);
2498         free_percpu(priv->percpu_stats);
2499         free_percpu(priv->percpu_extras);
2500
2501         del_ch_napi(priv);
2502         free_dpbp(priv);
2503         free_dpio(priv);
2504         free_dpni(priv);
2505
2506         fsl_mc_portal_free(priv->mc_io);
2507
2508         dev_set_drvdata(dev, NULL);
2509         free_netdev(net_dev);
2510
2511         return 0;
2512 }
2513
2514 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2515         {
2516                 .vendor = FSL_MC_VENDOR_FREESCALE,
2517                 .obj_type = "dpni",
2518         },
2519         { .vendor = 0x0 }
2520 };
2521 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2522
2523 static struct fsl_mc_driver dpaa2_eth_driver = {
2524         .driver = {
2525                 .name = KBUILD_MODNAME,
2526                 .owner = THIS_MODULE,
2527         },
2528         .probe = dpaa2_eth_probe,
2529         .remove = dpaa2_eth_remove,
2530         .match_id_table = dpaa2_eth_match_id_table
2531 };
2532
2533 module_fsl_mc_driver(dpaa2_eth_driver);