]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
Merge branch 'stable/for-jens-4.12' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / drivers / staging / fsl-dpaa2 / ethernet / dpaa2-eth.c
1 /* Copyright 2014-2016 Freescale Semiconductor Inc.
2  * Copyright 2016-2017 NXP
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *     * Redistributions of source code must retain the above copyright
7  *       notice, this list of conditions and the following disclaimer.
8  *     * Redistributions in binary form must reproduce the above copyright
9  *       notice, this list of conditions and the following disclaimer in the
10  *       documentation and/or other materials provided with the distribution.
11  *     * Neither the name of Freescale Semiconductor nor the
12  *       names of its contributors may be used to endorse or promote products
13  *       derived from this software without specific prior written permission.
14  *
15  *
16  * ALTERNATIVELY, this software may be distributed under the terms of the
17  * GNU General Public License ("GPL") as published by the Free Software
18  * Foundation, either version 2 of that License or (at your option) any
19  * later version.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/platform_device.h>
35 #include <linux/etherdevice.h>
36 #include <linux/of_net.h>
37 #include <linux/interrupt.h>
38 #include <linux/msi.h>
39 #include <linux/kthread.h>
40
41 #include "../../fsl-mc/include/mc.h"
42 #include "../../fsl-mc/include/mc-sys.h"
43 #include "dpaa2-eth.h"
44
45 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
46  * using trace events only need to #include <trace/events/sched.h>
47  */
48 #define CREATE_TRACE_POINTS
49 #include "dpaa2-eth-trace.h"
50
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_AUTHOR("Freescale Semiconductor, Inc");
53 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
54
55 const char dpaa2_eth_drv_version[] = "0.1";
56
57 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
58                              u32 fd_status,
59                              struct sk_buff *skb)
60 {
61         skb_checksum_none_assert(skb);
62
63         /* HW checksum validation is disabled, nothing to do here */
64         if (!(priv->net_dev->features & NETIF_F_RXCSUM))
65                 return;
66
67         /* Read checksum validation bits */
68         if (!((fd_status & DPAA2_FAS_L3CV) &&
69               (fd_status & DPAA2_FAS_L4CV)))
70                 return;
71
72         /* Inform the stack there's no need to compute L3/L4 csum anymore */
73         skb->ip_summed = CHECKSUM_UNNECESSARY;
74 }
75
76 /* Free a received FD.
77  * Not to be used for Tx conf FDs or on any other paths.
78  */
79 static void free_rx_fd(struct dpaa2_eth_priv *priv,
80                        const struct dpaa2_fd *fd,
81                        void *vaddr)
82 {
83         struct device *dev = priv->net_dev->dev.parent;
84         dma_addr_t addr = dpaa2_fd_get_addr(fd);
85         u8 fd_format = dpaa2_fd_get_format(fd);
86         struct dpaa2_sg_entry *sgt;
87         void *sg_vaddr;
88         int i;
89
90         /* If single buffer frame, just free the data buffer */
91         if (fd_format == dpaa2_fd_single)
92                 goto free_buf;
93         else if (fd_format != dpaa2_fd_sg)
94                 /* We don't support any other format */
95                 return;
96
97         /* For S/G frames, we first need to free all SG entries */
98         sgt = vaddr + dpaa2_fd_get_offset(fd);
99         for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
100                 addr = dpaa2_sg_get_addr(&sgt[i]);
101                 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
102                                  DMA_FROM_DEVICE);
103
104                 sg_vaddr = phys_to_virt(addr);
105                 skb_free_frag(sg_vaddr);
106
107                 if (dpaa2_sg_is_final(&sgt[i]))
108                         break;
109         }
110
111 free_buf:
112         skb_free_frag(vaddr);
113 }
114
115 /* Build a linear skb based on a single-buffer frame descriptor */
116 static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
117                                         struct dpaa2_eth_channel *ch,
118                                         const struct dpaa2_fd *fd,
119                                         void *fd_vaddr)
120 {
121         struct sk_buff *skb = NULL;
122         u16 fd_offset = dpaa2_fd_get_offset(fd);
123         u32 fd_length = dpaa2_fd_get_len(fd);
124
125         skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
126                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
127         if (unlikely(!skb))
128                 return NULL;
129
130         skb_reserve(skb, fd_offset);
131         skb_put(skb, fd_length);
132
133         ch->buf_count--;
134
135         return skb;
136 }
137
138 /* Build a non linear (fragmented) skb based on a S/G table */
139 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
140                                       struct dpaa2_eth_channel *ch,
141                                       struct dpaa2_sg_entry *sgt)
142 {
143         struct sk_buff *skb = NULL;
144         struct device *dev = priv->net_dev->dev.parent;
145         void *sg_vaddr;
146         dma_addr_t sg_addr;
147         u16 sg_offset;
148         u32 sg_length;
149         struct page *page, *head_page;
150         int page_offset;
151         int i;
152
153         for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
154                 struct dpaa2_sg_entry *sge = &sgt[i];
155
156                 /* NOTE: We only support SG entries in dpaa2_sg_single format,
157                  * but this is the only format we may receive from HW anyway
158                  */
159
160                 /* Get the address and length from the S/G entry */
161                 sg_addr = dpaa2_sg_get_addr(sge);
162                 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
163                                  DMA_FROM_DEVICE);
164
165                 sg_vaddr = phys_to_virt(sg_addr);
166                 sg_length = dpaa2_sg_get_len(sge);
167
168                 if (i == 0) {
169                         /* We build the skb around the first data buffer */
170                         skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
171                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
172                         if (unlikely(!skb))
173                                 return NULL;
174
175                         sg_offset = dpaa2_sg_get_offset(sge);
176                         skb_reserve(skb, sg_offset);
177                         skb_put(skb, sg_length);
178                 } else {
179                         /* Rest of the data buffers are stored as skb frags */
180                         page = virt_to_page(sg_vaddr);
181                         head_page = virt_to_head_page(sg_vaddr);
182
183                         /* Offset in page (which may be compound).
184                          * Data in subsequent SG entries is stored from the
185                          * beginning of the buffer, so we don't need to add the
186                          * sg_offset.
187                          */
188                         page_offset = ((unsigned long)sg_vaddr &
189                                 (PAGE_SIZE - 1)) +
190                                 (page_address(page) - page_address(head_page));
191
192                         skb_add_rx_frag(skb, i - 1, head_page, page_offset,
193                                         sg_length, DPAA2_ETH_RX_BUF_SIZE);
194                 }
195
196                 if (dpaa2_sg_is_final(sge))
197                         break;
198         }
199
200         /* Count all data buffers + SG table buffer */
201         ch->buf_count -= i + 2;
202
203         return skb;
204 }
205
206 /* Main Rx frame processing routine */
207 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
208                          struct dpaa2_eth_channel *ch,
209                          const struct dpaa2_fd *fd,
210                          struct napi_struct *napi)
211 {
212         dma_addr_t addr = dpaa2_fd_get_addr(fd);
213         u8 fd_format = dpaa2_fd_get_format(fd);
214         void *vaddr;
215         struct sk_buff *skb;
216         struct rtnl_link_stats64 *percpu_stats;
217         struct dpaa2_eth_drv_stats *percpu_extras;
218         struct device *dev = priv->net_dev->dev.parent;
219         struct dpaa2_fas *fas;
220         u32 status = 0;
221
222         /* Tracing point */
223         trace_dpaa2_rx_fd(priv->net_dev, fd);
224
225         dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
226         vaddr = phys_to_virt(addr);
227
228         prefetch(vaddr + priv->buf_layout.private_data_size);
229         prefetch(vaddr + dpaa2_fd_get_offset(fd));
230
231         percpu_stats = this_cpu_ptr(priv->percpu_stats);
232         percpu_extras = this_cpu_ptr(priv->percpu_extras);
233
234         if (fd_format == dpaa2_fd_single) {
235                 skb = build_linear_skb(priv, ch, fd, vaddr);
236         } else if (fd_format == dpaa2_fd_sg) {
237                 struct dpaa2_sg_entry *sgt =
238                                 vaddr + dpaa2_fd_get_offset(fd);
239                 skb = build_frag_skb(priv, ch, sgt);
240                 skb_free_frag(vaddr);
241                 percpu_extras->rx_sg_frames++;
242                 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
243         } else {
244                 /* We don't support any other format */
245                 goto err_frame_format;
246         }
247
248         if (unlikely(!skb))
249                 goto err_build_skb;
250
251         prefetch(skb->data);
252
253         /* Check if we need to validate the L4 csum */
254         if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
255                 fas = (struct dpaa2_fas *)
256                                 (vaddr + priv->buf_layout.private_data_size);
257                 status = le32_to_cpu(fas->status);
258                 validate_rx_csum(priv, status, skb);
259         }
260
261         skb->protocol = eth_type_trans(skb, priv->net_dev);
262
263         percpu_stats->rx_packets++;
264         percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
265
266         if (priv->net_dev->features & NETIF_F_GRO)
267                 napi_gro_receive(napi, skb);
268         else
269                 netif_receive_skb(skb);
270
271         return;
272
273 err_build_skb:
274         free_rx_fd(priv, fd, vaddr);
275 err_frame_format:
276         percpu_stats->rx_dropped++;
277 }
278
279 /* Consume all frames pull-dequeued into the store. This is the simplest way to
280  * make sure we don't accidentally issue another volatile dequeue which would
281  * overwrite (leak) frames already in the store.
282  *
283  * Observance of NAPI budget is not our concern, leaving that to the caller.
284  */
285 static int consume_frames(struct dpaa2_eth_channel *ch)
286 {
287         struct dpaa2_eth_priv *priv = ch->priv;
288         struct dpaa2_eth_fq *fq;
289         struct dpaa2_dq *dq;
290         const struct dpaa2_fd *fd;
291         int cleaned = 0;
292         int is_last;
293
294         do {
295                 dq = dpaa2_io_store_next(ch->store, &is_last);
296                 if (unlikely(!dq)) {
297                         /* If we're here, we *must* have placed a
298                          * volatile dequeue comnmand, so keep reading through
299                          * the store until we get some sort of valid response
300                          * token (either a valid frame or an "empty dequeue")
301                          */
302                         continue;
303                 }
304
305                 fd = dpaa2_dq_fd(dq);
306                 fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
307                 fq->stats.frames++;
308
309                 fq->consume(priv, ch, fd, &ch->napi);
310                 cleaned++;
311         } while (!is_last);
312
313         return cleaned;
314 }
315
316 /* Create a frame descriptor based on a fragmented skb */
317 static int build_sg_fd(struct dpaa2_eth_priv *priv,
318                        struct sk_buff *skb,
319                        struct dpaa2_fd *fd)
320 {
321         struct device *dev = priv->net_dev->dev.parent;
322         void *sgt_buf = NULL;
323         void *hwa;
324         dma_addr_t addr;
325         int nr_frags = skb_shinfo(skb)->nr_frags;
326         struct dpaa2_sg_entry *sgt;
327         int i, err;
328         int sgt_buf_size;
329         struct scatterlist *scl, *crt_scl;
330         int num_sg;
331         int num_dma_bufs;
332         struct dpaa2_eth_swa *swa;
333
334         /* Create and map scatterlist.
335          * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
336          * to go beyond nr_frags+1.
337          * Note: We don't support chained scatterlists
338          */
339         if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
340                 return -EINVAL;
341
342         scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
343         if (unlikely(!scl))
344                 return -ENOMEM;
345
346         sg_init_table(scl, nr_frags + 1);
347         num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
348         num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
349         if (unlikely(!num_dma_bufs)) {
350                 err = -ENOMEM;
351                 goto dma_map_sg_failed;
352         }
353
354         /* Prepare the HW SGT structure */
355         sgt_buf_size = priv->tx_data_offset +
356                        sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
357         sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
358         if (unlikely(!sgt_buf)) {
359                 err = -ENOMEM;
360                 goto sgt_buf_alloc_failed;
361         }
362         sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
363
364         /* PTA from egress side is passed as is to the confirmation side so
365          * we need to clear some fields here in order to find consistent values
366          * on TX confirmation. We are clearing FAS (Frame Annotation Status)
367          * field from the hardware annotation area
368          */
369         hwa = sgt_buf + priv->buf_layout.private_data_size;
370         memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
371
372         sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
373
374         /* Fill in the HW SGT structure.
375          *
376          * sgt_buf is zeroed out, so the following fields are implicit
377          * in all sgt entries:
378          *   - offset is 0
379          *   - format is 'dpaa2_sg_single'
380          */
381         for_each_sg(scl, crt_scl, num_dma_bufs, i) {
382                 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
383                 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
384         }
385         dpaa2_sg_set_final(&sgt[i - 1], true);
386
387         /* Store the skb backpointer in the SGT buffer.
388          * Fit the scatterlist and the number of buffers alongside the
389          * skb backpointer in the software annotation area. We'll need
390          * all of them on Tx Conf.
391          */
392         swa = (struct dpaa2_eth_swa *)sgt_buf;
393         swa->skb = skb;
394         swa->scl = scl;
395         swa->num_sg = num_sg;
396         swa->num_dma_bufs = num_dma_bufs;
397
398         /* Separately map the SGT buffer */
399         addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE);
400         if (unlikely(dma_mapping_error(dev, addr))) {
401                 err = -ENOMEM;
402                 goto dma_map_single_failed;
403         }
404         dpaa2_fd_set_offset(fd, priv->tx_data_offset);
405         dpaa2_fd_set_format(fd, dpaa2_fd_sg);
406         dpaa2_fd_set_addr(fd, addr);
407         dpaa2_fd_set_len(fd, skb->len);
408         dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
409                           DPAA2_FD_CTRL_PTV1);
410
411         return 0;
412
413 dma_map_single_failed:
414         kfree(sgt_buf);
415 sgt_buf_alloc_failed:
416         dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
417 dma_map_sg_failed:
418         kfree(scl);
419         return err;
420 }
421
422 /* Create a frame descriptor based on a linear skb */
423 static int build_single_fd(struct dpaa2_eth_priv *priv,
424                            struct sk_buff *skb,
425                            struct dpaa2_fd *fd)
426 {
427         struct device *dev = priv->net_dev->dev.parent;
428         u8 *buffer_start;
429         void *hwa;
430         struct sk_buff **skbh;
431         dma_addr_t addr;
432
433         buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
434                                  DPAA2_ETH_TX_BUF_ALIGN,
435                                  DPAA2_ETH_TX_BUF_ALIGN);
436
437         /* PTA from egress side is passed as is to the confirmation side so
438          * we need to clear some fields here in order to find consistent values
439          * on TX confirmation. We are clearing FAS (Frame Annotation Status)
440          * field from the hardware annotation area
441          */
442         hwa = buffer_start + priv->buf_layout.private_data_size;
443         memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
444
445         /* Store a backpointer to the skb at the beginning of the buffer
446          * (in the private data area) such that we can release it
447          * on Tx confirm
448          */
449         skbh = (struct sk_buff **)buffer_start;
450         *skbh = skb;
451
452         addr = dma_map_single(dev, buffer_start,
453                               skb_tail_pointer(skb) - buffer_start,
454                               DMA_TO_DEVICE);
455         if (unlikely(dma_mapping_error(dev, addr)))
456                 return -ENOMEM;
457
458         dpaa2_fd_set_addr(fd, addr);
459         dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
460         dpaa2_fd_set_len(fd, skb->len);
461         dpaa2_fd_set_format(fd, dpaa2_fd_single);
462         dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
463                           DPAA2_FD_CTRL_PTV1);
464
465         return 0;
466 }
467
468 /* FD freeing routine on the Tx path
469  *
470  * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
471  * back-pointed to is also freed.
472  * This can be called either from dpaa2_eth_tx_conf() or on the error path of
473  * dpaa2_eth_tx().
474  * Optionally, return the frame annotation status word (FAS), which needs
475  * to be checked if we're on the confirmation path.
476  */
477 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
478                        const struct dpaa2_fd *fd,
479                        u32 *status)
480 {
481         struct device *dev = priv->net_dev->dev.parent;
482         dma_addr_t fd_addr;
483         struct sk_buff **skbh, *skb;
484         unsigned char *buffer_start;
485         int unmap_size;
486         struct scatterlist *scl;
487         int num_sg, num_dma_bufs;
488         struct dpaa2_eth_swa *swa;
489         u8 fd_format = dpaa2_fd_get_format(fd);
490         struct dpaa2_fas *fas;
491
492         fd_addr = dpaa2_fd_get_addr(fd);
493         skbh = phys_to_virt(fd_addr);
494
495         if (fd_format == dpaa2_fd_single) {
496                 skb = *skbh;
497                 buffer_start = (unsigned char *)skbh;
498                 /* Accessing the skb buffer is safe before dma unmap, because
499                  * we didn't map the actual skb shell.
500                  */
501                 dma_unmap_single(dev, fd_addr,
502                                  skb_tail_pointer(skb) - buffer_start,
503                                  DMA_TO_DEVICE);
504         } else if (fd_format == dpaa2_fd_sg) {
505                 swa = (struct dpaa2_eth_swa *)skbh;
506                 skb = swa->skb;
507                 scl = swa->scl;
508                 num_sg = swa->num_sg;
509                 num_dma_bufs = swa->num_dma_bufs;
510
511                 /* Unmap the scatterlist */
512                 dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
513                 kfree(scl);
514
515                 /* Unmap the SGT buffer */
516                 unmap_size = priv->tx_data_offset +
517                        sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
518                 dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE);
519         } else {
520                 /* Unsupported format, mark it as errored and give up */
521                 if (status)
522                         *status = ~0;
523                 return;
524         }
525
526         /* Read the status from the Frame Annotation after we unmap the first
527          * buffer but before we free it. The caller function is responsible
528          * for checking the status value.
529          */
530         if (status && (dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
531                 fas = (struct dpaa2_fas *)
532                         ((void *)skbh + priv->buf_layout.private_data_size);
533                 *status = le32_to_cpu(fas->status);
534         }
535
536         /* Free SGT buffer kmalloc'ed on tx */
537         if (fd_format != dpaa2_fd_single)
538                 kfree(skbh);
539
540         /* Move on with skb release */
541         dev_kfree_skb(skb);
542 }
543
544 static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
545 {
546         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
547         struct dpaa2_fd fd;
548         struct rtnl_link_stats64 *percpu_stats;
549         struct dpaa2_eth_drv_stats *percpu_extras;
550         struct dpaa2_eth_fq *fq;
551         u16 queue_mapping;
552         int err, i;
553
554         percpu_stats = this_cpu_ptr(priv->percpu_stats);
555         percpu_extras = this_cpu_ptr(priv->percpu_extras);
556
557         if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
558                 struct sk_buff *ns;
559
560                 ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
561                 if (unlikely(!ns)) {
562                         percpu_stats->tx_dropped++;
563                         goto err_alloc_headroom;
564                 }
565                 dev_kfree_skb(skb);
566                 skb = ns;
567         }
568
569         /* We'll be holding a back-reference to the skb until Tx Confirmation;
570          * we don't want that overwritten by a concurrent Tx with a cloned skb.
571          */
572         skb = skb_unshare(skb, GFP_ATOMIC);
573         if (unlikely(!skb)) {
574                 /* skb_unshare() has already freed the skb */
575                 percpu_stats->tx_dropped++;
576                 return NETDEV_TX_OK;
577         }
578
579         /* Setup the FD fields */
580         memset(&fd, 0, sizeof(fd));
581
582         if (skb_is_nonlinear(skb)) {
583                 err = build_sg_fd(priv, skb, &fd);
584                 percpu_extras->tx_sg_frames++;
585                 percpu_extras->tx_sg_bytes += skb->len;
586         } else {
587                 err = build_single_fd(priv, skb, &fd);
588         }
589
590         if (unlikely(err)) {
591                 percpu_stats->tx_dropped++;
592                 goto err_build_fd;
593         }
594
595         /* Tracing point */
596         trace_dpaa2_tx_fd(net_dev, &fd);
597
598         /* TxConf FQ selection primarily based on cpu affinity; this is
599          * non-migratable context, so it's safe to call smp_processor_id().
600          */
601         queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
602         fq = &priv->fq[queue_mapping];
603         for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
604                 err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
605                                                   fq->tx_qdbin, &fd);
606                 if (err != -EBUSY)
607                         break;
608         }
609         percpu_extras->tx_portal_busy += i;
610         if (unlikely(err < 0)) {
611                 percpu_stats->tx_errors++;
612                 /* Clean up everything, including freeing the skb */
613                 free_tx_fd(priv, &fd, NULL);
614         } else {
615                 percpu_stats->tx_packets++;
616                 percpu_stats->tx_bytes += skb->len;
617         }
618
619         return NETDEV_TX_OK;
620
621 err_build_fd:
622 err_alloc_headroom:
623         dev_kfree_skb(skb);
624
625         return NETDEV_TX_OK;
626 }
627
628 /* Tx confirmation frame processing routine */
629 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
630                               struct dpaa2_eth_channel *ch,
631                               const struct dpaa2_fd *fd,
632                               struct napi_struct *napi __always_unused)
633 {
634         struct rtnl_link_stats64 *percpu_stats;
635         struct dpaa2_eth_drv_stats *percpu_extras;
636         u32 status = 0;
637
638         /* Tracing point */
639         trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
640
641         percpu_extras = this_cpu_ptr(priv->percpu_extras);
642         percpu_extras->tx_conf_frames++;
643         percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
644
645         free_tx_fd(priv, fd, &status);
646
647         if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
648                 percpu_stats = this_cpu_ptr(priv->percpu_stats);
649                 /* Tx-conf logically pertains to the egress path. */
650                 percpu_stats->tx_errors++;
651         }
652 }
653
654 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
655 {
656         int err;
657
658         err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
659                                DPNI_OFF_RX_L3_CSUM, enable);
660         if (err) {
661                 netdev_err(priv->net_dev,
662                            "dpni_set_offload(RX_L3_CSUM) failed\n");
663                 return err;
664         }
665
666         err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
667                                DPNI_OFF_RX_L4_CSUM, enable);
668         if (err) {
669                 netdev_err(priv->net_dev,
670                            "dpni_set_offload(RX_L4_CSUM) failed\n");
671                 return err;
672         }
673
674         return 0;
675 }
676
677 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
678 {
679         int err;
680
681         err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
682                                DPNI_OFF_TX_L3_CSUM, enable);
683         if (err) {
684                 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
685                 return err;
686         }
687
688         err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
689                                DPNI_OFF_TX_L4_CSUM, enable);
690         if (err) {
691                 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
692                 return err;
693         }
694
695         return 0;
696 }
697
698 /* Perform a single release command to add buffers
699  * to the specified buffer pool
700  */
701 static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
702 {
703         struct device *dev = priv->net_dev->dev.parent;
704         u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
705         void *buf;
706         dma_addr_t addr;
707         int i;
708
709         for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
710                 /* Allocate buffer visible to WRIOP + skb shared info +
711                  * alignment padding
712                  */
713                 buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
714                 if (unlikely(!buf))
715                         goto err_alloc;
716
717                 buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
718
719                 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
720                                       DMA_FROM_DEVICE);
721                 if (unlikely(dma_mapping_error(dev, addr)))
722                         goto err_map;
723
724                 buf_array[i] = addr;
725
726                 /* tracing point */
727                 trace_dpaa2_eth_buf_seed(priv->net_dev,
728                                          buf, DPAA2_ETH_BUF_RAW_SIZE,
729                                          addr, DPAA2_ETH_RX_BUF_SIZE,
730                                          bpid);
731         }
732
733 release_bufs:
734         /* In case the portal is busy, retry until successful.
735          * The buffer release function would only fail if the QBMan portal
736          * was busy, which implies portal contention (i.e. more CPUs than
737          * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
738          * there is little we can realistically do, short of giving up -
739          * in which case we'd risk depleting the buffer pool and never again
740          * receiving the Rx interrupt which would kick-start the refill logic.
741          * So just keep retrying, at the risk of being moved to ksoftirqd.
742          */
743         while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
744                 cpu_relax();
745         return i;
746
747 err_map:
748         skb_free_frag(buf);
749 err_alloc:
750         if (i)
751                 goto release_bufs;
752
753         return 0;
754 }
755
756 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
757 {
758         int i, j;
759         int new_count;
760
761         /* This is the lazy seeding of Rx buffer pools.
762          * dpaa2_add_bufs() is also used on the Rx hotpath and calls
763          * napi_alloc_frag(). The trouble with that is that it in turn ends up
764          * calling this_cpu_ptr(), which mandates execution in atomic context.
765          * Rather than splitting up the code, do a one-off preempt disable.
766          */
767         preempt_disable();
768         for (j = 0; j < priv->num_channels; j++) {
769                 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
770                      i += DPAA2_ETH_BUFS_PER_CMD) {
771                         new_count = add_bufs(priv, bpid);
772                         priv->channel[j]->buf_count += new_count;
773
774                         if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
775                                 preempt_enable();
776                                 return -ENOMEM;
777                         }
778                 }
779         }
780         preempt_enable();
781
782         return 0;
783 }
784
785 /**
786  * Drain the specified number of buffers from the DPNI's private buffer pool.
787  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
788  */
789 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
790 {
791         struct device *dev = priv->net_dev->dev.parent;
792         u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
793         void *vaddr;
794         int ret, i;
795
796         do {
797                 ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
798                                                buf_array, count);
799                 if (ret < 0) {
800                         netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
801                         return;
802                 }
803                 for (i = 0; i < ret; i++) {
804                         /* Same logic as on regular Rx path */
805                         dma_unmap_single(dev, buf_array[i],
806                                          DPAA2_ETH_RX_BUF_SIZE,
807                                          DMA_FROM_DEVICE);
808                         vaddr = phys_to_virt(buf_array[i]);
809                         skb_free_frag(vaddr);
810                 }
811         } while (ret);
812 }
813
814 static void drain_pool(struct dpaa2_eth_priv *priv)
815 {
816         int i;
817
818         drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
819         drain_bufs(priv, 1);
820
821         for (i = 0; i < priv->num_channels; i++)
822                 priv->channel[i]->buf_count = 0;
823 }
824
825 /* Function is called from softirq context only, so we don't need to guard
826  * the access to percpu count
827  */
828 static int refill_pool(struct dpaa2_eth_priv *priv,
829                        struct dpaa2_eth_channel *ch,
830                        u16 bpid)
831 {
832         int new_count;
833
834         if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
835                 return 0;
836
837         do {
838                 new_count = add_bufs(priv, bpid);
839                 if (unlikely(!new_count)) {
840                         /* Out of memory; abort for now, we'll try later on */
841                         break;
842                 }
843                 ch->buf_count += new_count;
844         } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
845
846         if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
847                 return -ENOMEM;
848
849         return 0;
850 }
851
852 static int pull_channel(struct dpaa2_eth_channel *ch)
853 {
854         int err;
855         int dequeues = -1;
856
857         /* Retry while portal is busy */
858         do {
859                 err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
860                 dequeues++;
861                 cpu_relax();
862         } while (err == -EBUSY);
863
864         ch->stats.dequeue_portal_busy += dequeues;
865         if (unlikely(err))
866                 ch->stats.pull_err++;
867
868         return err;
869 }
870
871 /* NAPI poll routine
872  *
873  * Frames are dequeued from the QMan channel associated with this NAPI context.
874  * Rx, Tx confirmation and (if configured) Rx error frames all count
875  * towards the NAPI budget.
876  */
877 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
878 {
879         struct dpaa2_eth_channel *ch;
880         int cleaned = 0, store_cleaned;
881         struct dpaa2_eth_priv *priv;
882         int err;
883
884         ch = container_of(napi, struct dpaa2_eth_channel, napi);
885         priv = ch->priv;
886
887         while (cleaned < budget) {
888                 err = pull_channel(ch);
889                 if (unlikely(err))
890                         break;
891
892                 /* Refill pool if appropriate */
893                 refill_pool(priv, ch, priv->dpbp_attrs.bpid);
894
895                 store_cleaned = consume_frames(ch);
896                 cleaned += store_cleaned;
897
898                 /* If we have enough budget left for a full store,
899                  * try a new pull dequeue, otherwise we're done here
900                  */
901                 if (store_cleaned == 0 ||
902                     cleaned > budget - DPAA2_ETH_STORE_SIZE)
903                         break;
904         }
905
906         if (cleaned < budget) {
907                 napi_complete_done(napi, cleaned);
908                 /* Re-enable data available notifications */
909                 do {
910                         err = dpaa2_io_service_rearm(NULL, &ch->nctx);
911                         cpu_relax();
912                 } while (err == -EBUSY);
913         }
914
915         ch->stats.frames += cleaned;
916
917         return cleaned;
918 }
919
920 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
921 {
922         struct dpaa2_eth_channel *ch;
923         int i;
924
925         for (i = 0; i < priv->num_channels; i++) {
926                 ch = priv->channel[i];
927                 napi_enable(&ch->napi);
928         }
929 }
930
931 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
932 {
933         struct dpaa2_eth_channel *ch;
934         int i;
935
936         for (i = 0; i < priv->num_channels; i++) {
937                 ch = priv->channel[i];
938                 napi_disable(&ch->napi);
939         }
940 }
941
942 static int link_state_update(struct dpaa2_eth_priv *priv)
943 {
944         struct dpni_link_state state;
945         int err;
946
947         err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
948         if (unlikely(err)) {
949                 netdev_err(priv->net_dev,
950                            "dpni_get_link_state() failed\n");
951                 return err;
952         }
953
954         /* Chech link state; speed / duplex changes are not treated yet */
955         if (priv->link_state.up == state.up)
956                 return 0;
957
958         priv->link_state = state;
959         if (state.up) {
960                 netif_carrier_on(priv->net_dev);
961                 netif_tx_start_all_queues(priv->net_dev);
962         } else {
963                 netif_tx_stop_all_queues(priv->net_dev);
964                 netif_carrier_off(priv->net_dev);
965         }
966
967         netdev_info(priv->net_dev, "Link Event: state %s",
968                     state.up ? "up" : "down");
969
970         return 0;
971 }
972
973 static int dpaa2_eth_open(struct net_device *net_dev)
974 {
975         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
976         int err;
977
978         err = seed_pool(priv, priv->dpbp_attrs.bpid);
979         if (err) {
980                 /* Not much to do; the buffer pool, though not filled up,
981                  * may still contain some buffers which would enable us
982                  * to limp on.
983                  */
984                 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
985                            priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid);
986         }
987
988         /* We'll only start the txqs when the link is actually ready; make sure
989          * we don't race against the link up notification, which may come
990          * immediately after dpni_enable();
991          */
992         netif_tx_stop_all_queues(net_dev);
993         enable_ch_napi(priv);
994         /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
995          * return true and cause 'ip link show' to report the LOWER_UP flag,
996          * even though the link notification wasn't even received.
997          */
998         netif_carrier_off(net_dev);
999
1000         err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1001         if (err < 0) {
1002                 netdev_err(net_dev, "dpni_enable() failed\n");
1003                 goto enable_err;
1004         }
1005
1006         /* If the DPMAC object has already processed the link up interrupt,
1007          * we have to learn the link state ourselves.
1008          */
1009         err = link_state_update(priv);
1010         if (err < 0) {
1011                 netdev_err(net_dev, "Can't update link state\n");
1012                 goto link_state_err;
1013         }
1014
1015         return 0;
1016
1017 link_state_err:
1018 enable_err:
1019         disable_ch_napi(priv);
1020         drain_pool(priv);
1021         return err;
1022 }
1023
1024 /* The DPIO store must be empty when we call this,
1025  * at the end of every NAPI cycle.
1026  */
1027 static u32 drain_channel(struct dpaa2_eth_priv *priv,
1028                          struct dpaa2_eth_channel *ch)
1029 {
1030         u32 drained = 0, total = 0;
1031
1032         do {
1033                 pull_channel(ch);
1034                 drained = consume_frames(ch);
1035                 total += drained;
1036         } while (drained);
1037
1038         return total;
1039 }
1040
1041 static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1042 {
1043         struct dpaa2_eth_channel *ch;
1044         int i;
1045         u32 drained = 0;
1046
1047         for (i = 0; i < priv->num_channels; i++) {
1048                 ch = priv->channel[i];
1049                 drained += drain_channel(priv, ch);
1050         }
1051
1052         return drained;
1053 }
1054
1055 static int dpaa2_eth_stop(struct net_device *net_dev)
1056 {
1057         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1058         int dpni_enabled;
1059         int retries = 10;
1060         u32 drained;
1061
1062         netif_tx_stop_all_queues(net_dev);
1063         netif_carrier_off(net_dev);
1064
1065         /* Loop while dpni_disable() attempts to drain the egress FQs
1066          * and confirm them back to us.
1067          */
1068         do {
1069                 dpni_disable(priv->mc_io, 0, priv->mc_token);
1070                 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1071                 if (dpni_enabled)
1072                         /* Allow the hardware some slack */
1073                         msleep(100);
1074         } while (dpni_enabled && --retries);
1075         if (!retries) {
1076                 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1077                 /* Must go on and disable NAPI nonetheless, so we don't crash at
1078                  * the next "ifconfig up"
1079                  */
1080         }
1081
1082         /* Wait for NAPI to complete on every core and disable it.
1083          * In particular, this will also prevent NAPI from being rescheduled if
1084          * a new CDAN is serviced, effectively discarding the CDAN. We therefore
1085          * don't even need to disarm the channels, except perhaps for the case
1086          * of a huge coalescing value.
1087          */
1088         disable_ch_napi(priv);
1089
1090          /* Manually drain the Rx and TxConf queues */
1091         drained = drain_ingress_frames(priv);
1092         if (drained)
1093                 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1094
1095         /* Empty the buffer pool */
1096         drain_pool(priv);
1097
1098         return 0;
1099 }
1100
1101 static int dpaa2_eth_init(struct net_device *net_dev)
1102 {
1103         u64 supported = 0;
1104         u64 not_supported = 0;
1105         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1106         u32 options = priv->dpni_attrs.options;
1107
1108         /* Capabilities listing */
1109         supported |= IFF_LIVE_ADDR_CHANGE;
1110
1111         if (options & DPNI_OPT_NO_MAC_FILTER)
1112                 not_supported |= IFF_UNICAST_FLT;
1113         else
1114                 supported |= IFF_UNICAST_FLT;
1115
1116         net_dev->priv_flags |= supported;
1117         net_dev->priv_flags &= ~not_supported;
1118
1119         /* Features */
1120         net_dev->features = NETIF_F_RXCSUM |
1121                             NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1122                             NETIF_F_SG | NETIF_F_HIGHDMA |
1123                             NETIF_F_LLTX;
1124         net_dev->hw_features = net_dev->features;
1125
1126         return 0;
1127 }
1128
1129 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1130 {
1131         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1132         struct device *dev = net_dev->dev.parent;
1133         int err;
1134
1135         err = eth_mac_addr(net_dev, addr);
1136         if (err < 0) {
1137                 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1138                 return err;
1139         }
1140
1141         err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1142                                         net_dev->dev_addr);
1143         if (err) {
1144                 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1145                 return err;
1146         }
1147
1148         return 0;
1149 }
1150
1151 /** Fill in counters maintained by the GPP driver. These may be different from
1152  * the hardware counters obtained by ethtool.
1153  */
1154 void dpaa2_eth_get_stats(struct net_device *net_dev,
1155                          struct rtnl_link_stats64 *stats)
1156 {
1157         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1158         struct rtnl_link_stats64 *percpu_stats;
1159         u64 *cpustats;
1160         u64 *netstats = (u64 *)stats;
1161         int i, j;
1162         int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1163
1164         for_each_possible_cpu(i) {
1165                 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1166                 cpustats = (u64 *)percpu_stats;
1167                 for (j = 0; j < num; j++)
1168                         netstats[j] += cpustats[j];
1169         }
1170 }
1171
1172 static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
1173 {
1174         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1175         int err;
1176
1177         /* Set the maximum Rx frame length to match the transmit side;
1178          * account for L2 headers when computing the MFL
1179          */
1180         err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
1181                                         (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
1182         if (err) {
1183                 netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
1184                 return err;
1185         }
1186
1187         net_dev->mtu = mtu;
1188         return 0;
1189 }
1190
1191 /* Copy mac unicast addresses from @net_dev to @priv.
1192  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1193  */
1194 static void add_uc_hw_addr(const struct net_device *net_dev,
1195                            struct dpaa2_eth_priv *priv)
1196 {
1197         struct netdev_hw_addr *ha;
1198         int err;
1199
1200         netdev_for_each_uc_addr(ha, net_dev) {
1201                 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1202                                         ha->addr);
1203                 if (err)
1204                         netdev_warn(priv->net_dev,
1205                                     "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1206                                     ha->addr, err);
1207         }
1208 }
1209
1210 /* Copy mac multicast addresses from @net_dev to @priv
1211  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1212  */
1213 static void add_mc_hw_addr(const struct net_device *net_dev,
1214                            struct dpaa2_eth_priv *priv)
1215 {
1216         struct netdev_hw_addr *ha;
1217         int err;
1218
1219         netdev_for_each_mc_addr(ha, net_dev) {
1220                 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1221                                         ha->addr);
1222                 if (err)
1223                         netdev_warn(priv->net_dev,
1224                                     "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1225                                     ha->addr, err);
1226         }
1227 }
1228
1229 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1230 {
1231         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1232         int uc_count = netdev_uc_count(net_dev);
1233         int mc_count = netdev_mc_count(net_dev);
1234         u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1235         u32 options = priv->dpni_attrs.options;
1236         u16 mc_token = priv->mc_token;
1237         struct fsl_mc_io *mc_io = priv->mc_io;
1238         int err;
1239
1240         /* Basic sanity checks; these probably indicate a misconfiguration */
1241         if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1242                 netdev_info(net_dev,
1243                             "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1244                             max_mac);
1245
1246         /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1247         if (uc_count > max_mac) {
1248                 netdev_info(net_dev,
1249                             "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1250                             uc_count, max_mac);
1251                 goto force_promisc;
1252         }
1253         if (mc_count + uc_count > max_mac) {
1254                 netdev_info(net_dev,
1255                             "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1256                             uc_count + mc_count, max_mac);
1257                 goto force_mc_promisc;
1258         }
1259
1260         /* Adjust promisc settings due to flag combinations */
1261         if (net_dev->flags & IFF_PROMISC)
1262                 goto force_promisc;
1263         if (net_dev->flags & IFF_ALLMULTI) {
1264                 /* First, rebuild unicast filtering table. This should be done
1265                  * in promisc mode, in order to avoid frame loss while we
1266                  * progressively add entries to the table.
1267                  * We don't know whether we had been in promisc already, and
1268                  * making an MC call to find out is expensive; so set uc promisc
1269                  * nonetheless.
1270                  */
1271                 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1272                 if (err)
1273                         netdev_warn(net_dev, "Can't set uc promisc\n");
1274
1275                 /* Actual uc table reconstruction. */
1276                 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1277                 if (err)
1278                         netdev_warn(net_dev, "Can't clear uc filters\n");
1279                 add_uc_hw_addr(net_dev, priv);
1280
1281                 /* Finally, clear uc promisc and set mc promisc as requested. */
1282                 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1283                 if (err)
1284                         netdev_warn(net_dev, "Can't clear uc promisc\n");
1285                 goto force_mc_promisc;
1286         }
1287
1288         /* Neither unicast, nor multicast promisc will be on... eventually.
1289          * For now, rebuild mac filtering tables while forcing both of them on.
1290          */
1291         err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1292         if (err)
1293                 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1294         err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1295         if (err)
1296                 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1297
1298         /* Actual mac filtering tables reconstruction */
1299         err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1300         if (err)
1301                 netdev_warn(net_dev, "Can't clear mac filters\n");
1302         add_mc_hw_addr(net_dev, priv);
1303         add_uc_hw_addr(net_dev, priv);
1304
1305         /* Now we can clear both ucast and mcast promisc, without risking
1306          * to drop legitimate frames anymore.
1307          */
1308         err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1309         if (err)
1310                 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1311         err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1312         if (err)
1313                 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1314
1315         return;
1316
1317 force_promisc:
1318         err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1319         if (err)
1320                 netdev_warn(net_dev, "Can't set ucast promisc\n");
1321 force_mc_promisc:
1322         err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1323         if (err)
1324                 netdev_warn(net_dev, "Can't set mcast promisc\n");
1325 }
1326
1327 static int dpaa2_eth_set_features(struct net_device *net_dev,
1328                                   netdev_features_t features)
1329 {
1330         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1331         netdev_features_t changed = features ^ net_dev->features;
1332         bool enable;
1333         int err;
1334
1335         if (changed & NETIF_F_RXCSUM) {
1336                 enable = !!(features & NETIF_F_RXCSUM);
1337                 err = set_rx_csum(priv, enable);
1338                 if (err)
1339                         return err;
1340         }
1341
1342         if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1343                 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1344                 err = set_tx_csum(priv, enable);
1345                 if (err)
1346                         return err;
1347         }
1348
1349         return 0;
1350 }
1351
1352 static const struct net_device_ops dpaa2_eth_ops = {
1353         .ndo_open = dpaa2_eth_open,
1354         .ndo_start_xmit = dpaa2_eth_tx,
1355         .ndo_stop = dpaa2_eth_stop,
1356         .ndo_init = dpaa2_eth_init,
1357         .ndo_set_mac_address = dpaa2_eth_set_addr,
1358         .ndo_get_stats64 = dpaa2_eth_get_stats,
1359         .ndo_change_mtu = dpaa2_eth_change_mtu,
1360         .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1361         .ndo_set_features = dpaa2_eth_set_features,
1362 };
1363
1364 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1365 {
1366         struct dpaa2_eth_channel *ch;
1367
1368         ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1369
1370         /* Update NAPI statistics */
1371         ch->stats.cdan++;
1372
1373         napi_schedule_irqoff(&ch->napi);
1374 }
1375
1376 /* Allocate and configure a DPCON object */
1377 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1378 {
1379         struct fsl_mc_device *dpcon;
1380         struct device *dev = priv->net_dev->dev.parent;
1381         struct dpcon_attr attrs;
1382         int err;
1383
1384         err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1385                                      FSL_MC_POOL_DPCON, &dpcon);
1386         if (err) {
1387                 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1388                 return NULL;
1389         }
1390
1391         err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1392         if (err) {
1393                 dev_err(dev, "dpcon_open() failed\n");
1394                 goto err_open;
1395         }
1396
1397         err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1398         if (err) {
1399                 dev_err(dev, "dpcon_reset() failed\n");
1400                 goto err_reset;
1401         }
1402
1403         err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1404         if (err) {
1405                 dev_err(dev, "dpcon_get_attributes() failed\n");
1406                 goto err_get_attr;
1407         }
1408
1409         err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1410         if (err) {
1411                 dev_err(dev, "dpcon_enable() failed\n");
1412                 goto err_enable;
1413         }
1414
1415         return dpcon;
1416
1417 err_enable:
1418 err_get_attr:
1419 err_reset:
1420         dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1421 err_open:
1422         fsl_mc_object_free(dpcon);
1423
1424         return NULL;
1425 }
1426
1427 static void free_dpcon(struct dpaa2_eth_priv *priv,
1428                        struct fsl_mc_device *dpcon)
1429 {
1430         dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1431         dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1432         fsl_mc_object_free(dpcon);
1433 }
1434
1435 static struct dpaa2_eth_channel *
1436 alloc_channel(struct dpaa2_eth_priv *priv)
1437 {
1438         struct dpaa2_eth_channel *channel;
1439         struct dpcon_attr attr;
1440         struct device *dev = priv->net_dev->dev.parent;
1441         int err;
1442
1443         channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1444         if (!channel)
1445                 return NULL;
1446
1447         channel->dpcon = setup_dpcon(priv);
1448         if (!channel->dpcon)
1449                 goto err_setup;
1450
1451         err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1452                                    &attr);
1453         if (err) {
1454                 dev_err(dev, "dpcon_get_attributes() failed\n");
1455                 goto err_get_attr;
1456         }
1457
1458         channel->dpcon_id = attr.id;
1459         channel->ch_id = attr.qbman_ch_id;
1460         channel->priv = priv;
1461
1462         return channel;
1463
1464 err_get_attr:
1465         free_dpcon(priv, channel->dpcon);
1466 err_setup:
1467         kfree(channel);
1468         return NULL;
1469 }
1470
1471 static void free_channel(struct dpaa2_eth_priv *priv,
1472                          struct dpaa2_eth_channel *channel)
1473 {
1474         free_dpcon(priv, channel->dpcon);
1475         kfree(channel);
1476 }
1477
1478 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
1479  * and register data availability notifications
1480  */
1481 static int setup_dpio(struct dpaa2_eth_priv *priv)
1482 {
1483         struct dpaa2_io_notification_ctx *nctx;
1484         struct dpaa2_eth_channel *channel;
1485         struct dpcon_notification_cfg dpcon_notif_cfg;
1486         struct device *dev = priv->net_dev->dev.parent;
1487         int i, err;
1488
1489         /* We want the ability to spread ingress traffic (RX, TX conf) to as
1490          * many cores as possible, so we need one channel for each core
1491          * (unless there's fewer queues than cores, in which case the extra
1492          * channels would be wasted).
1493          * Allocate one channel per core and register it to the core's
1494          * affine DPIO. If not enough channels are available for all cores
1495          * or if some cores don't have an affine DPIO, there will be no
1496          * ingress frame processing on those cores.
1497          */
1498         cpumask_clear(&priv->dpio_cpumask);
1499         for_each_online_cpu(i) {
1500                 /* Try to allocate a channel */
1501                 channel = alloc_channel(priv);
1502                 if (!channel) {
1503                         dev_info(dev,
1504                                  "No affine channel for cpu %d and above\n", i);
1505                         goto err_alloc_ch;
1506                 }
1507
1508                 priv->channel[priv->num_channels] = channel;
1509
1510                 nctx = &channel->nctx;
1511                 nctx->is_cdan = 1;
1512                 nctx->cb = cdan_cb;
1513                 nctx->id = channel->ch_id;
1514                 nctx->desired_cpu = i;
1515
1516                 /* Register the new context */
1517                 err = dpaa2_io_service_register(NULL, nctx);
1518                 if (err) {
1519                         dev_info(dev, "No affine DPIO for cpu %d\n", i);
1520                         /* If no affine DPIO for this core, there's probably
1521                          * none available for next cores either.
1522                          */
1523                         goto err_service_reg;
1524                 }
1525
1526                 /* Register DPCON notification with MC */
1527                 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1528                 dpcon_notif_cfg.priority = 0;
1529                 dpcon_notif_cfg.user_ctx = nctx->qman64;
1530                 err = dpcon_set_notification(priv->mc_io, 0,
1531                                              channel->dpcon->mc_handle,
1532                                              &dpcon_notif_cfg);
1533                 if (err) {
1534                         dev_err(dev, "dpcon_set_notification failed()\n");
1535                         goto err_set_cdan;
1536                 }
1537
1538                 /* If we managed to allocate a channel and also found an affine
1539                  * DPIO for this core, add it to the final mask
1540                  */
1541                 cpumask_set_cpu(i, &priv->dpio_cpumask);
1542                 priv->num_channels++;
1543
1544                 /* Stop if we already have enough channels to accommodate all
1545                  * RX and TX conf queues
1546                  */
1547                 if (priv->num_channels == dpaa2_eth_queue_count(priv))
1548                         break;
1549         }
1550
1551         return 0;
1552
1553 err_set_cdan:
1554         dpaa2_io_service_deregister(NULL, nctx);
1555 err_service_reg:
1556         free_channel(priv, channel);
1557 err_alloc_ch:
1558         if (cpumask_empty(&priv->dpio_cpumask)) {
1559                 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
1560                 return -ENODEV;
1561         }
1562
1563         dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1564                  cpumask_pr_args(&priv->dpio_cpumask));
1565
1566         return 0;
1567 }
1568
1569 static void free_dpio(struct dpaa2_eth_priv *priv)
1570 {
1571         int i;
1572         struct dpaa2_eth_channel *ch;
1573
1574         /* deregister CDAN notifications and free channels */
1575         for (i = 0; i < priv->num_channels; i++) {
1576                 ch = priv->channel[i];
1577                 dpaa2_io_service_deregister(NULL, &ch->nctx);
1578                 free_channel(priv, ch);
1579         }
1580 }
1581
1582 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1583                                                     int cpu)
1584 {
1585         struct device *dev = priv->net_dev->dev.parent;
1586         int i;
1587
1588         for (i = 0; i < priv->num_channels; i++)
1589                 if (priv->channel[i]->nctx.desired_cpu == cpu)
1590                         return priv->channel[i];
1591
1592         /* We should never get here. Issue a warning and return
1593          * the first channel, because it's still better than nothing
1594          */
1595         dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1596
1597         return priv->channel[0];
1598 }
1599
1600 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1601 {
1602         struct device *dev = priv->net_dev->dev.parent;
1603         struct dpaa2_eth_fq *fq;
1604         int rx_cpu, txc_cpu;
1605         int i;
1606
1607         /* For each FQ, pick one channel/CPU to deliver frames to.
1608          * This may well change at runtime, either through irqbalance or
1609          * through direct user intervention.
1610          */
1611         rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1612
1613         for (i = 0; i < priv->num_fqs; i++) {
1614                 fq = &priv->fq[i];
1615                 switch (fq->type) {
1616                 case DPAA2_RX_FQ:
1617                         fq->target_cpu = rx_cpu;
1618                         rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1619                         if (rx_cpu >= nr_cpu_ids)
1620                                 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1621                         break;
1622                 case DPAA2_TX_CONF_FQ:
1623                         fq->target_cpu = txc_cpu;
1624                         txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1625                         if (txc_cpu >= nr_cpu_ids)
1626                                 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1627                         break;
1628                 default:
1629                         dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1630                 }
1631                 fq->channel = get_affine_channel(priv, fq->target_cpu);
1632         }
1633 }
1634
1635 static void setup_fqs(struct dpaa2_eth_priv *priv)
1636 {
1637         int i;
1638
1639         /* We have one TxConf FQ per Tx flow.
1640          * The number of Tx and Rx queues is the same.
1641          * Tx queues come first in the fq array.
1642          */
1643         for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1644                 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1645                 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1646                 priv->fq[priv->num_fqs++].flowid = (u16)i;
1647         }
1648
1649         for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1650                 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1651                 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1652                 priv->fq[priv->num_fqs++].flowid = (u16)i;
1653         }
1654
1655         /* For each FQ, decide on which core to process incoming frames */
1656         set_fq_affinity(priv);
1657 }
1658
1659 /* Allocate and configure one buffer pool for each interface */
1660 static int setup_dpbp(struct dpaa2_eth_priv *priv)
1661 {
1662         int err;
1663         struct fsl_mc_device *dpbp_dev;
1664         struct device *dev = priv->net_dev->dev.parent;
1665
1666         err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1667                                      &dpbp_dev);
1668         if (err) {
1669                 dev_err(dev, "DPBP device allocation failed\n");
1670                 return err;
1671         }
1672
1673         priv->dpbp_dev = dpbp_dev;
1674
1675         err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1676                         &dpbp_dev->mc_handle);
1677         if (err) {
1678                 dev_err(dev, "dpbp_open() failed\n");
1679                 goto err_open;
1680         }
1681
1682         err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1683         if (err) {
1684                 dev_err(dev, "dpbp_enable() failed\n");
1685                 goto err_enable;
1686         }
1687
1688         err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
1689                                   &priv->dpbp_attrs);
1690         if (err) {
1691                 dev_err(dev, "dpbp_get_attributes() failed\n");
1692                 goto err_get_attr;
1693         }
1694
1695         return 0;
1696
1697 err_get_attr:
1698         dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1699 err_enable:
1700         dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1701 err_open:
1702         fsl_mc_object_free(dpbp_dev);
1703
1704         return err;
1705 }
1706
1707 static void free_dpbp(struct dpaa2_eth_priv *priv)
1708 {
1709         drain_pool(priv);
1710         dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1711         dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1712         fsl_mc_object_free(priv->dpbp_dev);
1713 }
1714
1715 /* Configure the DPNI object this interface is associated with */
1716 static int setup_dpni(struct fsl_mc_device *ls_dev)
1717 {
1718         struct device *dev = &ls_dev->dev;
1719         struct dpaa2_eth_priv *priv;
1720         struct net_device *net_dev;
1721         int err;
1722
1723         net_dev = dev_get_drvdata(dev);
1724         priv = netdev_priv(net_dev);
1725
1726         priv->dpni_id = ls_dev->obj_desc.id;
1727
1728         /* get a handle for the DPNI object */
1729         err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
1730         if (err) {
1731                 dev_err(dev, "dpni_open() failed\n");
1732                 goto err_open;
1733         }
1734
1735         ls_dev->mc_io = priv->mc_io;
1736         ls_dev->mc_handle = priv->mc_token;
1737
1738         err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1739         if (err) {
1740                 dev_err(dev, "dpni_reset() failed\n");
1741                 goto err_reset;
1742         }
1743
1744         err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1745                                   &priv->dpni_attrs);
1746         if (err) {
1747                 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
1748                 goto err_get_attr;
1749         }
1750
1751         /* Configure buffer layouts */
1752         /* rx buffer */
1753         priv->buf_layout.pass_parser_result = true;
1754         priv->buf_layout.pass_frame_status = true;
1755         priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1756         priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
1757         priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1758                                    DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1759                                    DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
1760                                    DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
1761         err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1762                                      DPNI_QUEUE_RX, &priv->buf_layout);
1763         if (err) {
1764                 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1765                 goto err_buf_layout;
1766         }
1767
1768         /* tx buffer */
1769         priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1770                                    DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
1771         err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1772                                      DPNI_QUEUE_TX, &priv->buf_layout);
1773         if (err) {
1774                 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1775                 goto err_buf_layout;
1776         }
1777
1778         /* tx-confirm buffer */
1779         priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1780         err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1781                                      DPNI_QUEUE_TX_CONFIRM, &priv->buf_layout);
1782         if (err) {
1783                 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1784                 goto err_buf_layout;
1785         }
1786
1787         /* Now that we've set our tx buffer layout, retrieve the minimum
1788          * required tx data offset.
1789          */
1790         err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1791                                       &priv->tx_data_offset);
1792         if (err) {
1793                 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1794                 goto err_data_offset;
1795         }
1796
1797         if ((priv->tx_data_offset % 64) != 0)
1798                 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
1799                          priv->tx_data_offset);
1800
1801         /* Accommodate software annotation space (SWA) */
1802         priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
1803
1804         return 0;
1805
1806 err_data_offset:
1807 err_buf_layout:
1808 err_get_attr:
1809 err_reset:
1810         dpni_close(priv->mc_io, 0, priv->mc_token);
1811 err_open:
1812         return err;
1813 }
1814
1815 static void free_dpni(struct dpaa2_eth_priv *priv)
1816 {
1817         int err;
1818
1819         err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1820         if (err)
1821                 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1822                             err);
1823
1824         dpni_close(priv->mc_io, 0, priv->mc_token);
1825 }
1826
1827 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1828                          struct dpaa2_eth_fq *fq)
1829 {
1830         struct device *dev = priv->net_dev->dev.parent;
1831         struct dpni_queue queue;
1832         struct dpni_queue_id qid;
1833         struct dpni_taildrop td;
1834         int err;
1835
1836         err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1837                              DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1838         if (err) {
1839                 dev_err(dev, "dpni_get_queue(RX) failed\n");
1840                 return err;
1841         }
1842
1843         fq->fqid = qid.fqid;
1844
1845         queue.destination.id = fq->channel->dpcon_id;
1846         queue.destination.type = DPNI_DEST_DPCON;
1847         queue.destination.priority = 1;
1848         queue.user_context = (u64)fq;
1849         err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1850                              DPNI_QUEUE_RX, 0, fq->flowid,
1851                              DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1852                              &queue);
1853         if (err) {
1854                 dev_err(dev, "dpni_set_queue(RX) failed\n");
1855                 return err;
1856         }
1857
1858         td.enable = 1;
1859         td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1860         err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1861                                 DPNI_QUEUE_RX, 0, fq->flowid, &td);
1862         if (err) {
1863                 dev_err(dev, "dpni_set_threshold() failed\n");
1864                 return err;
1865         }
1866
1867         return 0;
1868 }
1869
1870 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1871                          struct dpaa2_eth_fq *fq)
1872 {
1873         struct device *dev = priv->net_dev->dev.parent;
1874         struct dpni_queue queue;
1875         struct dpni_queue_id qid;
1876         int err;
1877
1878         err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1879                              DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
1880         if (err) {
1881                 dev_err(dev, "dpni_get_queue(TX) failed\n");
1882                 return err;
1883         }
1884
1885         fq->tx_qdbin = qid.qdbin;
1886
1887         err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1888                              DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1889                              &queue, &qid);
1890         if (err) {
1891                 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
1892                 return err;
1893         }
1894
1895         fq->fqid = qid.fqid;
1896
1897         queue.destination.id = fq->channel->dpcon_id;
1898         queue.destination.type = DPNI_DEST_DPCON;
1899         queue.destination.priority = 0;
1900         queue.user_context = (u64)fq;
1901         err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1902                              DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1903                              DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1904                              &queue);
1905         if (err) {
1906                 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
1907                 return err;
1908         }
1909
1910         return 0;
1911 }
1912
1913 /* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
1914 static const struct dpaa2_eth_hash_fields hash_fields[] = {
1915         {
1916                 /* IP header */
1917                 .rxnfc_field = RXH_IP_SRC,
1918                 .cls_prot = NET_PROT_IP,
1919                 .cls_field = NH_FLD_IP_SRC,
1920                 .size = 4,
1921         }, {
1922                 .rxnfc_field = RXH_IP_DST,
1923                 .cls_prot = NET_PROT_IP,
1924                 .cls_field = NH_FLD_IP_DST,
1925                 .size = 4,
1926         }, {
1927                 .rxnfc_field = RXH_L3_PROTO,
1928                 .cls_prot = NET_PROT_IP,
1929                 .cls_field = NH_FLD_IP_PROTO,
1930                 .size = 1,
1931         }, {
1932                 /* Using UDP ports, this is functionally equivalent to raw
1933                  * byte pairs from L4 header.
1934                  */
1935                 .rxnfc_field = RXH_L4_B_0_1,
1936                 .cls_prot = NET_PROT_UDP,
1937                 .cls_field = NH_FLD_UDP_PORT_SRC,
1938                 .size = 2,
1939         }, {
1940                 .rxnfc_field = RXH_L4_B_2_3,
1941                 .cls_prot = NET_PROT_UDP,
1942                 .cls_field = NH_FLD_UDP_PORT_DST,
1943                 .size = 2,
1944         },
1945 };
1946
1947 /* Set RX hash options
1948  * flags is a combination of RXH_ bits
1949  */
1950 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
1951 {
1952         struct device *dev = net_dev->dev.parent;
1953         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1954         struct dpkg_profile_cfg cls_cfg;
1955         struct dpni_rx_tc_dist_cfg dist_cfg;
1956         u8 *dma_mem;
1957         int i;
1958         int err = 0;
1959
1960         if (!dpaa2_eth_hash_enabled(priv)) {
1961                 dev_err(dev, "Hashing support is not enabled\n");
1962                 return -EOPNOTSUPP;
1963         }
1964
1965         memset(&cls_cfg, 0, sizeof(cls_cfg));
1966
1967         for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
1968                 struct dpkg_extract *key =
1969                         &cls_cfg.extracts[cls_cfg.num_extracts];
1970
1971                 if (!(flags & hash_fields[i].rxnfc_field))
1972                         continue;
1973
1974                 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
1975                         dev_err(dev, "error adding key extraction rule, too many rules?\n");
1976                         return -E2BIG;
1977                 }
1978
1979                 key->type = DPKG_EXTRACT_FROM_HDR;
1980                 key->extract.from_hdr.prot = hash_fields[i].cls_prot;
1981                 key->extract.from_hdr.type = DPKG_FULL_FIELD;
1982                 key->extract.from_hdr.field = hash_fields[i].cls_field;
1983                 cls_cfg.num_extracts++;
1984
1985                 priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
1986         }
1987
1988         dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
1989         if (!dma_mem)
1990                 return -ENOMEM;
1991
1992         err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
1993         if (err) {
1994                 dev_err(dev, "dpni_prepare_key_cfg error %d", err);
1995                 goto err_prep_key;
1996         }
1997
1998         memset(&dist_cfg, 0, sizeof(dist_cfg));
1999
2000         /* Prepare for setting the rx dist */
2001         dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem,
2002                                                DPAA2_CLASSIFIER_DMA_SIZE,
2003                                                DMA_TO_DEVICE);
2004         if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) {
2005                 dev_err(dev, "DMA mapping failed\n");
2006                 err = -ENOMEM;
2007                 goto err_dma_map;
2008         }
2009
2010         dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2011         dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2012
2013         err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2014         dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova,
2015                          DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
2016         if (err)
2017                 dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
2018
2019 err_dma_map:
2020 err_prep_key:
2021         kfree(dma_mem);
2022         return err;
2023 }
2024
2025 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2026  * frame queues and channels
2027  */
2028 static int bind_dpni(struct dpaa2_eth_priv *priv)
2029 {
2030         struct net_device *net_dev = priv->net_dev;
2031         struct device *dev = net_dev->dev.parent;
2032         struct dpni_pools_cfg pools_params;
2033         struct dpni_error_cfg err_cfg;
2034         int err = 0;
2035         int i;
2036
2037         pools_params.num_dpbp = 1;
2038         pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2039         pools_params.pools[0].backup_pool = 0;
2040         pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2041         err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2042         if (err) {
2043                 dev_err(dev, "dpni_set_pools() failed\n");
2044                 return err;
2045         }
2046
2047         /* have the interface implicitly distribute traffic based on supported
2048          * header fields
2049          */
2050         err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
2051         if (err)
2052                 netdev_err(net_dev, "Failed to configure hashing\n");
2053
2054         /* Configure handling of error frames */
2055         err_cfg.errors = DPAA2_ETH_RX_ERR_MASK;
2056         err_cfg.set_frame_annotation = 1;
2057         err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2058         err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2059                                        &err_cfg);
2060         if (err) {
2061                 dev_err(dev, "dpni_set_errors_behavior failed\n");
2062                 return err;
2063         }
2064
2065         /* Configure Rx and Tx conf queues to generate CDANs */
2066         for (i = 0; i < priv->num_fqs; i++) {
2067                 switch (priv->fq[i].type) {
2068                 case DPAA2_RX_FQ:
2069                         err = setup_rx_flow(priv, &priv->fq[i]);
2070                         break;
2071                 case DPAA2_TX_CONF_FQ:
2072                         err = setup_tx_flow(priv, &priv->fq[i]);
2073                         break;
2074                 default:
2075                         dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2076                         return -EINVAL;
2077                 }
2078                 if (err)
2079                         return err;
2080         }
2081
2082         err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2083                             DPNI_QUEUE_TX, &priv->tx_qdid);
2084         if (err) {
2085                 dev_err(dev, "dpni_get_qdid() failed\n");
2086                 return err;
2087         }
2088
2089         return 0;
2090 }
2091
2092 /* Allocate rings for storing incoming frame descriptors */
2093 static int alloc_rings(struct dpaa2_eth_priv *priv)
2094 {
2095         struct net_device *net_dev = priv->net_dev;
2096         struct device *dev = net_dev->dev.parent;
2097         int i;
2098
2099         for (i = 0; i < priv->num_channels; i++) {
2100                 priv->channel[i]->store =
2101                         dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2102                 if (!priv->channel[i]->store) {
2103                         netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2104                         goto err_ring;
2105                 }
2106         }
2107
2108         return 0;
2109
2110 err_ring:
2111         for (i = 0; i < priv->num_channels; i++) {
2112                 if (!priv->channel[i]->store)
2113                         break;
2114                 dpaa2_io_store_destroy(priv->channel[i]->store);
2115         }
2116
2117         return -ENOMEM;
2118 }
2119
2120 static void free_rings(struct dpaa2_eth_priv *priv)
2121 {
2122         int i;
2123
2124         for (i = 0; i < priv->num_channels; i++)
2125                 dpaa2_io_store_destroy(priv->channel[i]->store);
2126 }
2127
2128 static int netdev_init(struct net_device *net_dev)
2129 {
2130         int err;
2131         struct device *dev = net_dev->dev.parent;
2132         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2133         u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2134         u8 bcast_addr[ETH_ALEN];
2135
2136         net_dev->netdev_ops = &dpaa2_eth_ops;
2137
2138         /* Get firmware address, if any */
2139         err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2140         if (err) {
2141                 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2142                 return err;
2143         }
2144
2145         /* Get DPNI attributes address, if any */
2146         err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2147                                         dpni_mac_addr);
2148         if (err) {
2149                 dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
2150                 return err;
2151         }
2152
2153         /* First check if firmware has any address configured by bootloader */
2154         if (!is_zero_ether_addr(mac_addr)) {
2155                 /* If the DPMAC addr != DPNI addr, update it */
2156                 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2157                         err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2158                                                         priv->mc_token,
2159                                                         mac_addr);
2160                         if (err) {
2161                                 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2162                                 return err;
2163                         }
2164                 }
2165                 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2166         } else if (is_zero_ether_addr(dpni_mac_addr)) {
2167                 /* Fills in net_dev->dev_addr, as required by
2168                  * register_netdevice()
2169                  */
2170                 eth_hw_addr_random(net_dev);
2171                 /* Make the user aware, without cluttering the boot log */
2172                 dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
2173                 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2174                                                 net_dev->dev_addr);
2175                 if (err) {
2176                         dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err);
2177                         return err;
2178                 }
2179                 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2180                  * practical purposes, this will be our "permanent" mac address,
2181                  * at least until the next reboot. This move will also permit
2182                  * register_netdevice() to properly fill up net_dev->perm_addr.
2183                  */
2184                 net_dev->addr_assign_type = NET_ADDR_PERM;
2185         } else {
2186                 /* NET_ADDR_PERM is default, all we have to do is
2187                  * fill in the device addr.
2188                  */
2189                 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2190         }
2191
2192         /* Explicitly add the broadcast address to the MAC filtering table;
2193          * the MC won't do that for us.
2194          */
2195         eth_broadcast_addr(bcast_addr);
2196         err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2197         if (err) {
2198                 dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
2199                 /* Won't return an error; at least, we'd have egress traffic */
2200         }
2201
2202         /* Reserve enough space to align buffer as per hardware requirement;
2203          * NOTE: priv->tx_data_offset MUST be initialized at this point.
2204          */
2205         net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
2206
2207         /* Set MTU limits */
2208         net_dev->min_mtu = 68;
2209         net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
2210
2211         /* Our .ndo_init will be called herein */
2212         err = register_netdev(net_dev);
2213         if (err < 0) {
2214                 dev_err(dev, "register_netdev() failed\n");
2215                 return err;
2216         }
2217
2218         return 0;
2219 }
2220
2221 static int poll_link_state(void *arg)
2222 {
2223         struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2224         int err;
2225
2226         while (!kthread_should_stop()) {
2227                 err = link_state_update(priv);
2228                 if (unlikely(err))
2229                         return err;
2230
2231                 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2232         }
2233
2234         return 0;
2235 }
2236
2237 static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
2238 {
2239         return IRQ_WAKE_THREAD;
2240 }
2241
2242 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2243 {
2244         u32 status, clear = 0;
2245         struct device *dev = (struct device *)arg;
2246         struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2247         struct net_device *net_dev = dev_get_drvdata(dev);
2248         int err;
2249
2250         err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2251                                   DPNI_IRQ_INDEX, &status);
2252         if (unlikely(err)) {
2253                 netdev_err(net_dev, "Can't get irq status (err %d)", err);
2254                 clear = 0xffffffff;
2255                 goto out;
2256         }
2257
2258         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
2259                 clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
2260                 link_state_update(netdev_priv(net_dev));
2261         }
2262
2263 out:
2264         dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2265                               DPNI_IRQ_INDEX, clear);
2266         return IRQ_HANDLED;
2267 }
2268
2269 static int setup_irqs(struct fsl_mc_device *ls_dev)
2270 {
2271         int err = 0;
2272         struct fsl_mc_device_irq *irq;
2273
2274         err = fsl_mc_allocate_irqs(ls_dev);
2275         if (err) {
2276                 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2277                 return err;
2278         }
2279
2280         irq = ls_dev->irqs[0];
2281         err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
2282                                         dpni_irq0_handler,
2283                                         dpni_irq0_handler_thread,
2284                                         IRQF_NO_SUSPEND | IRQF_ONESHOT,
2285                                         dev_name(&ls_dev->dev), &ls_dev->dev);
2286         if (err < 0) {
2287                 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
2288                 goto free_mc_irq;
2289         }
2290
2291         err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2292                                 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2293         if (err < 0) {
2294                 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
2295                 goto free_irq;
2296         }
2297
2298         err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2299                                   DPNI_IRQ_INDEX, 1);
2300         if (err < 0) {
2301                 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
2302                 goto free_irq;
2303         }
2304
2305         return 0;
2306
2307 free_irq:
2308         devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2309 free_mc_irq:
2310         fsl_mc_free_irqs(ls_dev);
2311
2312         return err;
2313 }
2314
2315 static void add_ch_napi(struct dpaa2_eth_priv *priv)
2316 {
2317         int i;
2318         struct dpaa2_eth_channel *ch;
2319
2320         for (i = 0; i < priv->num_channels; i++) {
2321                 ch = priv->channel[i];
2322                 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
2323                 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2324                                NAPI_POLL_WEIGHT);
2325         }
2326 }
2327
2328 static void del_ch_napi(struct dpaa2_eth_priv *priv)
2329 {
2330         int i;
2331         struct dpaa2_eth_channel *ch;
2332
2333         for (i = 0; i < priv->num_channels; i++) {
2334                 ch = priv->channel[i];
2335                 netif_napi_del(&ch->napi);
2336         }
2337 }
2338
2339 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2340 {
2341         struct device *dev;
2342         struct net_device *net_dev = NULL;
2343         struct dpaa2_eth_priv *priv = NULL;
2344         int err = 0;
2345
2346         dev = &dpni_dev->dev;
2347
2348         /* Net device */
2349         net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2350         if (!net_dev) {
2351                 dev_err(dev, "alloc_etherdev_mq() failed\n");
2352                 return -ENOMEM;
2353         }
2354
2355         SET_NETDEV_DEV(net_dev, dev);
2356         dev_set_drvdata(dev, net_dev);
2357
2358         priv = netdev_priv(net_dev);
2359         priv->net_dev = net_dev;
2360
2361         /* Obtain a MC portal */
2362         err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2363                                      &priv->mc_io);
2364         if (err) {
2365                 dev_err(dev, "MC portal allocation failed\n");
2366                 goto err_portal_alloc;
2367         }
2368
2369         /* MC objects initialization and configuration */
2370         err = setup_dpni(dpni_dev);
2371         if (err)
2372                 goto err_dpni_setup;
2373
2374         err = setup_dpio(priv);
2375         if (err)
2376                 goto err_dpio_setup;
2377
2378         setup_fqs(priv);
2379
2380         err = setup_dpbp(priv);
2381         if (err)
2382                 goto err_dpbp_setup;
2383
2384         err = bind_dpni(priv);
2385         if (err)
2386                 goto err_bind;
2387
2388         /* Add a NAPI context for each channel */
2389         add_ch_napi(priv);
2390
2391         /* Percpu statistics */
2392         priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2393         if (!priv->percpu_stats) {
2394                 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2395                 err = -ENOMEM;
2396                 goto err_alloc_percpu_stats;
2397         }
2398         priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2399         if (!priv->percpu_extras) {
2400                 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2401                 err = -ENOMEM;
2402                 goto err_alloc_percpu_extras;
2403         }
2404
2405         err = netdev_init(net_dev);
2406         if (err)
2407                 goto err_netdev_init;
2408
2409         /* Configure checksum offload based on current interface flags */
2410         err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2411         if (err)
2412                 goto err_csum;
2413
2414         err = set_tx_csum(priv, !!(net_dev->features &
2415                                    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2416         if (err)
2417                 goto err_csum;
2418
2419         err = alloc_rings(priv);
2420         if (err)
2421                 goto err_alloc_rings;
2422
2423         net_dev->ethtool_ops = &dpaa2_ethtool_ops;
2424
2425         err = setup_irqs(dpni_dev);
2426         if (err) {
2427                 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2428                 priv->poll_thread = kthread_run(poll_link_state, priv,
2429                                                 "%s_poll_link", net_dev->name);
2430                 if (IS_ERR(priv->poll_thread)) {
2431                         netdev_err(net_dev, "Error starting polling thread\n");
2432                         goto err_poll_thread;
2433                 }
2434                 priv->do_link_poll = true;
2435         }
2436
2437         dev_info(dev, "Probed interface %s\n", net_dev->name);
2438         return 0;
2439
2440 err_poll_thread:
2441         free_rings(priv);
2442 err_alloc_rings:
2443 err_csum:
2444         unregister_netdev(net_dev);
2445 err_netdev_init:
2446         free_percpu(priv->percpu_extras);
2447 err_alloc_percpu_extras:
2448         free_percpu(priv->percpu_stats);
2449 err_alloc_percpu_stats:
2450         del_ch_napi(priv);
2451 err_bind:
2452         free_dpbp(priv);
2453 err_dpbp_setup:
2454         free_dpio(priv);
2455 err_dpio_setup:
2456         free_dpni(priv);
2457 err_dpni_setup:
2458         fsl_mc_portal_free(priv->mc_io);
2459 err_portal_alloc:
2460         dev_set_drvdata(dev, NULL);
2461         free_netdev(net_dev);
2462
2463         return err;
2464 }
2465
2466 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2467 {
2468         struct device *dev;
2469         struct net_device *net_dev;
2470         struct dpaa2_eth_priv *priv;
2471
2472         dev = &ls_dev->dev;
2473         net_dev = dev_get_drvdata(dev);
2474         priv = netdev_priv(net_dev);
2475
2476         unregister_netdev(net_dev);
2477         dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2478
2479         if (priv->do_link_poll)
2480                 kthread_stop(priv->poll_thread);
2481         else
2482                 fsl_mc_free_irqs(ls_dev);
2483
2484         free_rings(priv);
2485         free_percpu(priv->percpu_stats);
2486         free_percpu(priv->percpu_extras);
2487
2488         del_ch_napi(priv);
2489         free_dpbp(priv);
2490         free_dpio(priv);
2491         free_dpni(priv);
2492
2493         fsl_mc_portal_free(priv->mc_io);
2494
2495         dev_set_drvdata(dev, NULL);
2496         free_netdev(net_dev);
2497
2498         return 0;
2499 }
2500
2501 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2502         {
2503                 .vendor = FSL_MC_VENDOR_FREESCALE,
2504                 .obj_type = "dpni",
2505         },
2506         { .vendor = 0x0 }
2507 };
2508 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2509
2510 static struct fsl_mc_driver dpaa2_eth_driver = {
2511         .driver = {
2512                 .name = KBUILD_MODNAME,
2513                 .owner = THIS_MODULE,
2514         },
2515         .probe = dpaa2_eth_probe,
2516         .remove = dpaa2_eth_remove,
2517         .match_id_table = dpaa2_eth_match_id_table
2518 };
2519
2520 module_fsl_mc_driver(dpaa2_eth_driver);