]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/rocker/rocker.c
34ac41ac9e610b63e5bf3db276f64b215cce733c
[karo-tx-linux.git] / drivers / net / ethernet / rocker / rocker.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39 #include <asm-generic/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
41
42 #include "rocker.h"
43
44 static const char rocker_driver_name[] = "rocker";
45
46 static const struct pci_device_id rocker_pci_id_table[] = {
47         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48         {0, }
49 };
50
51 struct rocker_flow_tbl_key {
52         u32 priority;
53         enum rocker_of_dpa_table_id tbl_id;
54         union {
55                 struct {
56                         u32 in_pport;
57                         u32 in_pport_mask;
58                         enum rocker_of_dpa_table_id goto_tbl;
59                 } ig_port;
60                 struct {
61                         u32 in_pport;
62                         __be16 vlan_id;
63                         __be16 vlan_id_mask;
64                         enum rocker_of_dpa_table_id goto_tbl;
65                         bool untagged;
66                         __be16 new_vlan_id;
67                 } vlan;
68                 struct {
69                         u32 in_pport;
70                         u32 in_pport_mask;
71                         __be16 eth_type;
72                         u8 eth_dst[ETH_ALEN];
73                         u8 eth_dst_mask[ETH_ALEN];
74                         __be16 vlan_id;
75                         __be16 vlan_id_mask;
76                         enum rocker_of_dpa_table_id goto_tbl;
77                         bool copy_to_cpu;
78                 } term_mac;
79                 struct {
80                         __be16 eth_type;
81                         __be32 dst4;
82                         __be32 dst4_mask;
83                         enum rocker_of_dpa_table_id goto_tbl;
84                         u32 group_id;
85                 } ucast_routing;
86                 struct {
87                         u8 eth_dst[ETH_ALEN];
88                         u8 eth_dst_mask[ETH_ALEN];
89                         int has_eth_dst;
90                         int has_eth_dst_mask;
91                         __be16 vlan_id;
92                         u32 tunnel_id;
93                         enum rocker_of_dpa_table_id goto_tbl;
94                         u32 group_id;
95                         bool copy_to_cpu;
96                 } bridge;
97                 struct {
98                         u32 in_pport;
99                         u32 in_pport_mask;
100                         u8 eth_src[ETH_ALEN];
101                         u8 eth_src_mask[ETH_ALEN];
102                         u8 eth_dst[ETH_ALEN];
103                         u8 eth_dst_mask[ETH_ALEN];
104                         __be16 eth_type;
105                         __be16 vlan_id;
106                         __be16 vlan_id_mask;
107                         u8 ip_proto;
108                         u8 ip_proto_mask;
109                         u8 ip_tos;
110                         u8 ip_tos_mask;
111                         u32 group_id;
112                 } acl;
113         };
114 };
115
116 struct rocker_flow_tbl_entry {
117         struct hlist_node entry;
118         u32 cmd;
119         u64 cookie;
120         struct rocker_flow_tbl_key key;
121         size_t key_len;
122         u32 key_crc32; /* key */
123 };
124
125 struct rocker_group_tbl_entry {
126         struct hlist_node entry;
127         u32 cmd;
128         u32 group_id; /* key */
129         u16 group_count;
130         u32 *group_ids;
131         union {
132                 struct {
133                         u8 pop_vlan;
134                 } l2_interface;
135                 struct {
136                         u8 eth_src[ETH_ALEN];
137                         u8 eth_dst[ETH_ALEN];
138                         __be16 vlan_id;
139                         u32 group_id;
140                 } l2_rewrite;
141                 struct {
142                         u8 eth_src[ETH_ALEN];
143                         u8 eth_dst[ETH_ALEN];
144                         __be16 vlan_id;
145                         bool ttl_check;
146                         u32 group_id;
147                 } l3_unicast;
148         };
149 };
150
151 struct rocker_fdb_tbl_entry {
152         struct hlist_node entry;
153         u32 key_crc32; /* key */
154         bool learned;
155         struct rocker_fdb_tbl_key {
156                 u32 pport;
157                 u8 addr[ETH_ALEN];
158                 __be16 vlan_id;
159         } key;
160 };
161
162 struct rocker_internal_vlan_tbl_entry {
163         struct hlist_node entry;
164         int ifindex; /* key */
165         u32 ref_count;
166         __be16 vlan_id;
167 };
168
169 struct rocker_neigh_tbl_entry {
170         struct hlist_node entry;
171         __be32 ip_addr; /* key */
172         struct net_device *dev;
173         u32 ref_count;
174         u32 index;
175         u8 eth_dst[ETH_ALEN];
176         bool ttl_check;
177 };
178
179 struct rocker_desc_info {
180         char *data; /* mapped */
181         size_t data_size;
182         size_t tlv_size;
183         struct rocker_desc *desc;
184         dma_addr_t mapaddr;
185 };
186
187 struct rocker_dma_ring_info {
188         size_t size;
189         u32 head;
190         u32 tail;
191         struct rocker_desc *desc; /* mapped */
192         dma_addr_t mapaddr;
193         struct rocker_desc_info *desc_info;
194         unsigned int type;
195 };
196
197 struct rocker;
198
199 enum {
200         ROCKER_CTRL_LINK_LOCAL_MCAST,
201         ROCKER_CTRL_LOCAL_ARP,
202         ROCKER_CTRL_IPV4_MCAST,
203         ROCKER_CTRL_IPV6_MCAST,
204         ROCKER_CTRL_DFLT_BRIDGING,
205         ROCKER_CTRL_DFLT_OVS,
206         ROCKER_CTRL_MAX,
207 };
208
209 #define ROCKER_INTERNAL_VLAN_ID_BASE    0x0f00
210 #define ROCKER_N_INTERNAL_VLANS         255
211 #define ROCKER_VLAN_BITMAP_LEN          BITS_TO_LONGS(VLAN_N_VID)
212 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
213
214 struct rocker_port {
215         struct net_device *dev;
216         struct net_device *bridge_dev;
217         struct rocker *rocker;
218         unsigned int port_number;
219         u32 pport;
220         __be16 internal_vlan_id;
221         int stp_state;
222         u32 brport_flags;
223         bool ctrls[ROCKER_CTRL_MAX];
224         unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
225         struct napi_struct napi_tx;
226         struct napi_struct napi_rx;
227         struct rocker_dma_ring_info tx_ring;
228         struct rocker_dma_ring_info rx_ring;
229         struct list_head trans_mem;
230 };
231
232 struct rocker {
233         struct pci_dev *pdev;
234         u8 __iomem *hw_addr;
235         struct msix_entry *msix_entries;
236         unsigned int port_count;
237         struct rocker_port **ports;
238         struct {
239                 u64 id;
240         } hw;
241         spinlock_t cmd_ring_lock;               /* for cmd ring accesses */
242         struct rocker_dma_ring_info cmd_ring;
243         struct rocker_dma_ring_info event_ring;
244         DECLARE_HASHTABLE(flow_tbl, 16);
245         spinlock_t flow_tbl_lock;               /* for flow tbl accesses */
246         u64 flow_tbl_next_cookie;
247         DECLARE_HASHTABLE(group_tbl, 16);
248         spinlock_t group_tbl_lock;              /* for group tbl accesses */
249         DECLARE_HASHTABLE(fdb_tbl, 16);
250         spinlock_t fdb_tbl_lock;                /* for fdb tbl accesses */
251         unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
252         DECLARE_HASHTABLE(internal_vlan_tbl, 8);
253         spinlock_t internal_vlan_tbl_lock;      /* for vlan tbl accesses */
254         DECLARE_HASHTABLE(neigh_tbl, 16);
255         spinlock_t neigh_tbl_lock;              /* for neigh tbl accesses */
256         u32 neigh_tbl_next_index;
257 };
258
259 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
260 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
261 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
262 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
263 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
264 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
265 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
266 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
267 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
268
269 /* Rocker priority levels for flow table entries.  Higher
270  * priority match takes precedence over lower priority match.
271  */
272
273 enum {
274         ROCKER_PRIORITY_UNKNOWN = 0,
275         ROCKER_PRIORITY_IG_PORT = 1,
276         ROCKER_PRIORITY_VLAN = 1,
277         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
278         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
279         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
280         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
281         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
282         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
283         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
284         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
285         ROCKER_PRIORITY_ACL_CTRL = 3,
286         ROCKER_PRIORITY_ACL_NORMAL = 2,
287         ROCKER_PRIORITY_ACL_DFLT = 1,
288 };
289
290 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
291 {
292         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
293         u16 end = 0xffe;
294         u16 _vlan_id = ntohs(vlan_id);
295
296         return (_vlan_id >= start && _vlan_id <= end);
297 }
298
299 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
300                                       u16 vid, bool *pop_vlan)
301 {
302         __be16 vlan_id;
303
304         if (pop_vlan)
305                 *pop_vlan = false;
306         vlan_id = htons(vid);
307         if (!vlan_id) {
308                 vlan_id = rocker_port->internal_vlan_id;
309                 if (pop_vlan)
310                         *pop_vlan = true;
311         }
312
313         return vlan_id;
314 }
315
316 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
317                                    __be16 vlan_id)
318 {
319         if (rocker_vlan_id_is_internal(vlan_id))
320                 return 0;
321
322         return ntohs(vlan_id);
323 }
324
325 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
326 {
327         return rocker_port->bridge_dev &&
328                netif_is_bridge_master(rocker_port->bridge_dev);
329 }
330
331 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
332 {
333         return rocker_port->bridge_dev &&
334                netif_is_ovs_master(rocker_port->bridge_dev);
335 }
336
337 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
338 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
339 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
340 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
341
342 static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
343                                      enum switchdev_trans trans, int flags,
344                                      size_t size)
345 {
346         struct list_head *elem = NULL;
347         gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
348                           GFP_ATOMIC : GFP_KERNEL;
349
350         /* If in transaction prepare phase, allocate the memory
351          * and enqueue it on a per-port list.  If in transaction
352          * commit phase, dequeue the memory from the per-port list
353          * rather than re-allocating the memory.  The idea is the
354          * driver code paths for prepare and commit are identical
355          * so the memory allocated in the prepare phase is the
356          * memory used in the commit phase.
357          */
358
359         switch (trans) {
360         case SWITCHDEV_TRANS_PREPARE:
361                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
362                 if (!elem)
363                         return NULL;
364                 list_add_tail(elem, &rocker_port->trans_mem);
365                 break;
366         case SWITCHDEV_TRANS_COMMIT:
367                 BUG_ON(list_empty(&rocker_port->trans_mem));
368                 elem = rocker_port->trans_mem.next;
369                 list_del_init(elem);
370                 break;
371         case SWITCHDEV_TRANS_NONE:
372                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
373                 if (elem)
374                         INIT_LIST_HEAD(elem);
375                 break;
376         default:
377                 break;
378         }
379
380         return elem ? elem + 1 : NULL;
381 }
382
383 static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
384                                  enum switchdev_trans trans, int flags,
385                                  size_t size)
386 {
387         return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
388 }
389
390 static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
391                                  enum switchdev_trans trans, int flags,
392                                  size_t n, size_t size)
393 {
394         return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
395 }
396
397 static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
398 {
399         struct list_head *elem;
400
401         /* Frees are ignored if in transaction prepare phase.  The
402          * memory remains on the per-port list until freed in the
403          * commit phase.
404          */
405
406         if (trans == SWITCHDEV_TRANS_PREPARE)
407                 return;
408
409         elem = (struct list_head *)mem - 1;
410         BUG_ON(!list_empty(elem));
411         kfree(elem);
412 }
413
414 struct rocker_wait {
415         wait_queue_head_t wait;
416         bool done;
417         bool nowait;
418 };
419
420 static void rocker_wait_reset(struct rocker_wait *wait)
421 {
422         wait->done = false;
423         wait->nowait = false;
424 }
425
426 static void rocker_wait_init(struct rocker_wait *wait)
427 {
428         init_waitqueue_head(&wait->wait);
429         rocker_wait_reset(wait);
430 }
431
432 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
433                                               enum switchdev_trans trans,
434                                               int flags)
435 {
436         struct rocker_wait *wait;
437
438         wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
439         if (!wait)
440                 return NULL;
441         rocker_wait_init(wait);
442         return wait;
443 }
444
445 static void rocker_wait_destroy(enum switchdev_trans trans,
446                                 struct rocker_wait *wait)
447 {
448         rocker_port_kfree(trans, wait);
449 }
450
451 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
452                                       unsigned long timeout)
453 {
454         wait_event_timeout(wait->wait, wait->done, HZ / 10);
455         if (!wait->done)
456                 return false;
457         return true;
458 }
459
460 static void rocker_wait_wake_up(struct rocker_wait *wait)
461 {
462         wait->done = true;
463         wake_up(&wait->wait);
464 }
465
466 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
467 {
468         return rocker->msix_entries[vector].vector;
469 }
470
471 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
472 {
473         return rocker_msix_vector(rocker_port->rocker,
474                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
475 }
476
477 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
478 {
479         return rocker_msix_vector(rocker_port->rocker,
480                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
481 }
482
483 #define rocker_write32(rocker, reg, val)        \
484         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
485 #define rocker_read32(rocker, reg)      \
486         readl((rocker)->hw_addr + (ROCKER_ ## reg))
487 #define rocker_write64(rocker, reg, val)        \
488         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
489 #define rocker_read64(rocker, reg)      \
490         readq((rocker)->hw_addr + (ROCKER_ ## reg))
491
492 /*****************************
493  * HW basic testing functions
494  *****************************/
495
496 static int rocker_reg_test(const struct rocker *rocker)
497 {
498         const struct pci_dev *pdev = rocker->pdev;
499         u64 test_reg;
500         u64 rnd;
501
502         rnd = prandom_u32();
503         rnd >>= 1;
504         rocker_write32(rocker, TEST_REG, rnd);
505         test_reg = rocker_read32(rocker, TEST_REG);
506         if (test_reg != rnd * 2) {
507                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
508                         test_reg, rnd * 2);
509                 return -EIO;
510         }
511
512         rnd = prandom_u32();
513         rnd <<= 31;
514         rnd |= prandom_u32();
515         rocker_write64(rocker, TEST_REG64, rnd);
516         test_reg = rocker_read64(rocker, TEST_REG64);
517         if (test_reg != rnd * 2) {
518                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
519                         test_reg, rnd * 2);
520                 return -EIO;
521         }
522
523         return 0;
524 }
525
526 static int rocker_dma_test_one(const struct rocker *rocker,
527                                struct rocker_wait *wait, u32 test_type,
528                                dma_addr_t dma_handle, const unsigned char *buf,
529                                const unsigned char *expect, size_t size)
530 {
531         const struct pci_dev *pdev = rocker->pdev;
532         int i;
533
534         rocker_wait_reset(wait);
535         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
536
537         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
538                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
539                 return -EIO;
540         }
541
542         for (i = 0; i < size; i++) {
543                 if (buf[i] != expect[i]) {
544                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
545                                 buf[i], i, expect[i]);
546                         return -EIO;
547                 }
548         }
549         return 0;
550 }
551
552 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
553 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
554
555 static int rocker_dma_test_offset(const struct rocker *rocker,
556                                   struct rocker_wait *wait, int offset)
557 {
558         struct pci_dev *pdev = rocker->pdev;
559         unsigned char *alloc;
560         unsigned char *buf;
561         unsigned char *expect;
562         dma_addr_t dma_handle;
563         int i;
564         int err;
565
566         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
567                         GFP_KERNEL | GFP_DMA);
568         if (!alloc)
569                 return -ENOMEM;
570         buf = alloc + offset;
571         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
572
573         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
574                                     PCI_DMA_BIDIRECTIONAL);
575         if (pci_dma_mapping_error(pdev, dma_handle)) {
576                 err = -EIO;
577                 goto free_alloc;
578         }
579
580         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
581         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
582
583         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
584         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
585                                   dma_handle, buf, expect,
586                                   ROCKER_TEST_DMA_BUF_SIZE);
587         if (err)
588                 goto unmap;
589
590         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
591         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
592                                   dma_handle, buf, expect,
593                                   ROCKER_TEST_DMA_BUF_SIZE);
594         if (err)
595                 goto unmap;
596
597         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
598         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
599                 expect[i] = ~buf[i];
600         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
601                                   dma_handle, buf, expect,
602                                   ROCKER_TEST_DMA_BUF_SIZE);
603         if (err)
604                 goto unmap;
605
606 unmap:
607         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
608                          PCI_DMA_BIDIRECTIONAL);
609 free_alloc:
610         kfree(alloc);
611
612         return err;
613 }
614
615 static int rocker_dma_test(const struct rocker *rocker,
616                            struct rocker_wait *wait)
617 {
618         int i;
619         int err;
620
621         for (i = 0; i < 8; i++) {
622                 err = rocker_dma_test_offset(rocker, wait, i);
623                 if (err)
624                         return err;
625         }
626         return 0;
627 }
628
629 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
630 {
631         struct rocker_wait *wait = dev_id;
632
633         rocker_wait_wake_up(wait);
634
635         return IRQ_HANDLED;
636 }
637
638 static int rocker_basic_hw_test(const struct rocker *rocker)
639 {
640         const struct pci_dev *pdev = rocker->pdev;
641         struct rocker_wait wait;
642         int err;
643
644         err = rocker_reg_test(rocker);
645         if (err) {
646                 dev_err(&pdev->dev, "reg test failed\n");
647                 return err;
648         }
649
650         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
651                           rocker_test_irq_handler, 0,
652                           rocker_driver_name, &wait);
653         if (err) {
654                 dev_err(&pdev->dev, "cannot assign test irq\n");
655                 return err;
656         }
657
658         rocker_wait_init(&wait);
659         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
660
661         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
662                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
663                 err = -EIO;
664                 goto free_irq;
665         }
666
667         err = rocker_dma_test(rocker, &wait);
668         if (err)
669                 dev_err(&pdev->dev, "dma test failed\n");
670
671 free_irq:
672         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
673         return err;
674 }
675
676 /******
677  * TLV
678  ******/
679
680 #define ROCKER_TLV_ALIGNTO 8U
681 #define ROCKER_TLV_ALIGN(len) \
682         (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
683 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
684
685 /*  <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
686  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
687  * |             Header          | Pad |           Payload           | Pad |
688  * |      (struct rocker_tlv)    | ing |                             | ing |
689  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
690  *  <--------------------------- tlv->len -------------------------->
691  */
692
693 static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
694                                           int *remaining)
695 {
696         int totlen = ROCKER_TLV_ALIGN(tlv->len);
697
698         *remaining -= totlen;
699         return (struct rocker_tlv *) ((char *) tlv + totlen);
700 }
701
702 static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
703 {
704         return remaining >= (int) ROCKER_TLV_HDRLEN &&
705                tlv->len >= ROCKER_TLV_HDRLEN &&
706                tlv->len <= remaining;
707 }
708
709 #define rocker_tlv_for_each(pos, head, len, rem)        \
710         for (pos = head, rem = len;                     \
711              rocker_tlv_ok(pos, rem);                   \
712              pos = rocker_tlv_next(pos, &(rem)))
713
714 #define rocker_tlv_for_each_nested(pos, tlv, rem)       \
715         rocker_tlv_for_each(pos, rocker_tlv_data(tlv),  \
716                             rocker_tlv_len(tlv), rem)
717
718 static int rocker_tlv_attr_size(int payload)
719 {
720         return ROCKER_TLV_HDRLEN + payload;
721 }
722
723 static int rocker_tlv_total_size(int payload)
724 {
725         return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
726 }
727
728 static int rocker_tlv_padlen(int payload)
729 {
730         return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
731 }
732
733 static int rocker_tlv_type(const struct rocker_tlv *tlv)
734 {
735         return tlv->type;
736 }
737
738 static void *rocker_tlv_data(const struct rocker_tlv *tlv)
739 {
740         return (char *) tlv + ROCKER_TLV_HDRLEN;
741 }
742
743 static int rocker_tlv_len(const struct rocker_tlv *tlv)
744 {
745         return tlv->len - ROCKER_TLV_HDRLEN;
746 }
747
748 static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
749 {
750         return *(u8 *) rocker_tlv_data(tlv);
751 }
752
753 static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
754 {
755         return *(u16 *) rocker_tlv_data(tlv);
756 }
757
758 static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
759 {
760         return *(__be16 *) rocker_tlv_data(tlv);
761 }
762
763 static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
764 {
765         return *(u32 *) rocker_tlv_data(tlv);
766 }
767
768 static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
769 {
770         return *(u64 *) rocker_tlv_data(tlv);
771 }
772
773 static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
774                              const char *buf, int buf_len)
775 {
776         const struct rocker_tlv *tlv;
777         const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
778         int rem;
779
780         memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
781
782         rocker_tlv_for_each(tlv, head, buf_len, rem) {
783                 u32 type = rocker_tlv_type(tlv);
784
785                 if (type > 0 && type <= maxtype)
786                         tb[type] = tlv;
787         }
788 }
789
790 static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
791                                     const struct rocker_tlv *tlv)
792 {
793         rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
794                          rocker_tlv_len(tlv));
795 }
796
797 static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
798                                   const struct rocker_desc_info *desc_info)
799 {
800         rocker_tlv_parse(tb, maxtype, desc_info->data,
801                          desc_info->desc->tlv_size);
802 }
803
804 static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
805 {
806         return (struct rocker_tlv *) ((char *) desc_info->data +
807                                                desc_info->tlv_size);
808 }
809
810 static int rocker_tlv_put(struct rocker_desc_info *desc_info,
811                           int attrtype, int attrlen, const void *data)
812 {
813         int tail_room = desc_info->data_size - desc_info->tlv_size;
814         int total_size = rocker_tlv_total_size(attrlen);
815         struct rocker_tlv *tlv;
816
817         if (unlikely(tail_room < total_size))
818                 return -EMSGSIZE;
819
820         tlv = rocker_tlv_start(desc_info);
821         desc_info->tlv_size += total_size;
822         tlv->type = attrtype;
823         tlv->len = rocker_tlv_attr_size(attrlen);
824         memcpy(rocker_tlv_data(tlv), data, attrlen);
825         memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
826         return 0;
827 }
828
829 static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
830                              int attrtype, u8 value)
831 {
832         return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
833 }
834
835 static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
836                               int attrtype, u16 value)
837 {
838         return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
839 }
840
841 static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
842                                int attrtype, __be16 value)
843 {
844         return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
845 }
846
847 static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
848                               int attrtype, u32 value)
849 {
850         return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
851 }
852
853 static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
854                                int attrtype, __be32 value)
855 {
856         return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
857 }
858
859 static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
860                               int attrtype, u64 value)
861 {
862         return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
863 }
864
865 static struct rocker_tlv *
866 rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
867 {
868         struct rocker_tlv *start = rocker_tlv_start(desc_info);
869
870         if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
871                 return NULL;
872
873         return start;
874 }
875
876 static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
877                                 struct rocker_tlv *start)
878 {
879         start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
880 }
881
882 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
883                                    const struct rocker_tlv *start)
884 {
885         desc_info->tlv_size = (const char *) start - desc_info->data;
886 }
887
888 /******************************************
889  * DMA rings and descriptors manipulations
890  ******************************************/
891
892 static u32 __pos_inc(u32 pos, size_t limit)
893 {
894         return ++pos == limit ? 0 : pos;
895 }
896
897 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
898 {
899         int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
900
901         switch (err) {
902         case ROCKER_OK:
903                 return 0;
904         case -ROCKER_ENOENT:
905                 return -ENOENT;
906         case -ROCKER_ENXIO:
907                 return -ENXIO;
908         case -ROCKER_ENOMEM:
909                 return -ENOMEM;
910         case -ROCKER_EEXIST:
911                 return -EEXIST;
912         case -ROCKER_EINVAL:
913                 return -EINVAL;
914         case -ROCKER_EMSGSIZE:
915                 return -EMSGSIZE;
916         case -ROCKER_ENOTSUP:
917                 return -EOPNOTSUPP;
918         case -ROCKER_ENOBUFS:
919                 return -ENOBUFS;
920         }
921
922         return -EINVAL;
923 }
924
925 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
926 {
927         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
928 }
929
930 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
931 {
932         u32 comp_err = desc_info->desc->comp_err;
933
934         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
935 }
936
937 static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
938 {
939         return (void *)(uintptr_t)desc_info->desc->cookie;
940 }
941
942 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
943                                        void *ptr)
944 {
945         desc_info->desc->cookie = (uintptr_t) ptr;
946 }
947
948 static struct rocker_desc_info *
949 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
950 {
951         static struct rocker_desc_info *desc_info;
952         u32 head = __pos_inc(info->head, info->size);
953
954         desc_info = &info->desc_info[info->head];
955         if (head == info->tail)
956                 return NULL; /* ring full */
957         desc_info->tlv_size = 0;
958         return desc_info;
959 }
960
961 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
962 {
963         desc_info->desc->buf_size = desc_info->data_size;
964         desc_info->desc->tlv_size = desc_info->tlv_size;
965 }
966
967 static void rocker_desc_head_set(const struct rocker *rocker,
968                                  struct rocker_dma_ring_info *info,
969                                  const struct rocker_desc_info *desc_info)
970 {
971         u32 head = __pos_inc(info->head, info->size);
972
973         BUG_ON(head == info->tail);
974         rocker_desc_commit(desc_info);
975         info->head = head;
976         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
977 }
978
979 static struct rocker_desc_info *
980 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
981 {
982         static struct rocker_desc_info *desc_info;
983
984         if (info->tail == info->head)
985                 return NULL; /* nothing to be done between head and tail */
986         desc_info = &info->desc_info[info->tail];
987         if (!rocker_desc_gen(desc_info))
988                 return NULL; /* gen bit not set, desc is not ready yet */
989         info->tail = __pos_inc(info->tail, info->size);
990         desc_info->tlv_size = desc_info->desc->tlv_size;
991         return desc_info;
992 }
993
994 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
995                                         const struct rocker_dma_ring_info *info,
996                                         u32 credits)
997 {
998         if (credits)
999                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
1000 }
1001
1002 static unsigned long rocker_dma_ring_size_fix(size_t size)
1003 {
1004         return max(ROCKER_DMA_SIZE_MIN,
1005                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
1006 }
1007
1008 static int rocker_dma_ring_create(const struct rocker *rocker,
1009                                   unsigned int type,
1010                                   size_t size,
1011                                   struct rocker_dma_ring_info *info)
1012 {
1013         int i;
1014
1015         BUG_ON(size != rocker_dma_ring_size_fix(size));
1016         info->size = size;
1017         info->type = type;
1018         info->head = 0;
1019         info->tail = 0;
1020         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1021                                   GFP_KERNEL);
1022         if (!info->desc_info)
1023                 return -ENOMEM;
1024
1025         info->desc = pci_alloc_consistent(rocker->pdev,
1026                                           info->size * sizeof(*info->desc),
1027                                           &info->mapaddr);
1028         if (!info->desc) {
1029                 kfree(info->desc_info);
1030                 return -ENOMEM;
1031         }
1032
1033         for (i = 0; i < info->size; i++)
1034                 info->desc_info[i].desc = &info->desc[i];
1035
1036         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1037                        ROCKER_DMA_DESC_CTRL_RESET);
1038         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1039         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1040
1041         return 0;
1042 }
1043
1044 static void rocker_dma_ring_destroy(const struct rocker *rocker,
1045                                     const struct rocker_dma_ring_info *info)
1046 {
1047         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1048
1049         pci_free_consistent(rocker->pdev,
1050                             info->size * sizeof(struct rocker_desc),
1051                             info->desc, info->mapaddr);
1052         kfree(info->desc_info);
1053 }
1054
1055 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
1056                                              struct rocker_dma_ring_info *info)
1057 {
1058         int i;
1059
1060         BUG_ON(info->head || info->tail);
1061
1062         /* When ring is consumer, we need to advance head for each desc.
1063          * That tells hw that the desc is ready to be used by it.
1064          */
1065         for (i = 0; i < info->size - 1; i++)
1066                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1067         rocker_desc_commit(&info->desc_info[i]);
1068 }
1069
1070 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1071                                       const struct rocker_dma_ring_info *info,
1072                                       int direction, size_t buf_size)
1073 {
1074         struct pci_dev *pdev = rocker->pdev;
1075         int i;
1076         int err;
1077
1078         for (i = 0; i < info->size; i++) {
1079                 struct rocker_desc_info *desc_info = &info->desc_info[i];
1080                 struct rocker_desc *desc = &info->desc[i];
1081                 dma_addr_t dma_handle;
1082                 char *buf;
1083
1084                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1085                 if (!buf) {
1086                         err = -ENOMEM;
1087                         goto rollback;
1088                 }
1089
1090                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1091                 if (pci_dma_mapping_error(pdev, dma_handle)) {
1092                         kfree(buf);
1093                         err = -EIO;
1094                         goto rollback;
1095                 }
1096
1097                 desc_info->data = buf;
1098                 desc_info->data_size = buf_size;
1099                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1100
1101                 desc->buf_addr = dma_handle;
1102                 desc->buf_size = buf_size;
1103         }
1104         return 0;
1105
1106 rollback:
1107         for (i--; i >= 0; i--) {
1108                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1109
1110                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1111                                  desc_info->data_size, direction);
1112                 kfree(desc_info->data);
1113         }
1114         return err;
1115 }
1116
1117 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1118                                       const struct rocker_dma_ring_info *info,
1119                                       int direction)
1120 {
1121         struct pci_dev *pdev = rocker->pdev;
1122         int i;
1123
1124         for (i = 0; i < info->size; i++) {
1125                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1126                 struct rocker_desc *desc = &info->desc[i];
1127
1128                 desc->buf_addr = 0;
1129                 desc->buf_size = 0;
1130                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1131                                  desc_info->data_size, direction);
1132                 kfree(desc_info->data);
1133         }
1134 }
1135
1136 static int rocker_dma_rings_init(struct rocker *rocker)
1137 {
1138         const struct pci_dev *pdev = rocker->pdev;
1139         int err;
1140
1141         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1142                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
1143                                      &rocker->cmd_ring);
1144         if (err) {
1145                 dev_err(&pdev->dev, "failed to create command dma ring\n");
1146                 return err;
1147         }
1148
1149         spin_lock_init(&rocker->cmd_ring_lock);
1150
1151         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1152                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1153         if (err) {
1154                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1155                 goto err_dma_cmd_ring_bufs_alloc;
1156         }
1157
1158         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1159                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
1160                                      &rocker->event_ring);
1161         if (err) {
1162                 dev_err(&pdev->dev, "failed to create event dma ring\n");
1163                 goto err_dma_event_ring_create;
1164         }
1165
1166         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1167                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
1168         if (err) {
1169                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1170                 goto err_dma_event_ring_bufs_alloc;
1171         }
1172         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1173         return 0;
1174
1175 err_dma_event_ring_bufs_alloc:
1176         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1177 err_dma_event_ring_create:
1178         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1179                                   PCI_DMA_BIDIRECTIONAL);
1180 err_dma_cmd_ring_bufs_alloc:
1181         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1182         return err;
1183 }
1184
1185 static void rocker_dma_rings_fini(struct rocker *rocker)
1186 {
1187         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1188                                   PCI_DMA_BIDIRECTIONAL);
1189         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1190         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1191                                   PCI_DMA_BIDIRECTIONAL);
1192         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1193 }
1194
1195 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
1196                                       struct rocker_desc_info *desc_info,
1197                                       struct sk_buff *skb, size_t buf_len)
1198 {
1199         const struct rocker *rocker = rocker_port->rocker;
1200         struct pci_dev *pdev = rocker->pdev;
1201         dma_addr_t dma_handle;
1202
1203         dma_handle = pci_map_single(pdev, skb->data, buf_len,
1204                                     PCI_DMA_FROMDEVICE);
1205         if (pci_dma_mapping_error(pdev, dma_handle))
1206                 return -EIO;
1207         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1208                 goto tlv_put_failure;
1209         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1210                 goto tlv_put_failure;
1211         return 0;
1212
1213 tlv_put_failure:
1214         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1215         desc_info->tlv_size = 0;
1216         return -EMSGSIZE;
1217 }
1218
1219 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
1220 {
1221         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1222 }
1223
1224 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
1225                                         struct rocker_desc_info *desc_info)
1226 {
1227         struct net_device *dev = rocker_port->dev;
1228         struct sk_buff *skb;
1229         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1230         int err;
1231
1232         /* Ensure that hw will see tlv_size zero in case of an error.
1233          * That tells hw to use another descriptor.
1234          */
1235         rocker_desc_cookie_ptr_set(desc_info, NULL);
1236         desc_info->tlv_size = 0;
1237
1238         skb = netdev_alloc_skb_ip_align(dev, buf_len);
1239         if (!skb)
1240                 return -ENOMEM;
1241         err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
1242         if (err) {
1243                 dev_kfree_skb_any(skb);
1244                 return err;
1245         }
1246         rocker_desc_cookie_ptr_set(desc_info, skb);
1247         return 0;
1248 }
1249
1250 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1251                                          const struct rocker_tlv **attrs)
1252 {
1253         struct pci_dev *pdev = rocker->pdev;
1254         dma_addr_t dma_handle;
1255         size_t len;
1256
1257         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1258             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1259                 return;
1260         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1261         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1262         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1263 }
1264
1265 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1266                                         const struct rocker_desc_info *desc_info)
1267 {
1268         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1269         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1270
1271         if (!skb)
1272                 return;
1273         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1274         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1275         dev_kfree_skb_any(skb);
1276 }
1277
1278 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1279 {
1280         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1281         const struct rocker *rocker = rocker_port->rocker;
1282         int i;
1283         int err;
1284
1285         for (i = 0; i < rx_ring->size; i++) {
1286                 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1287                                                    &rx_ring->desc_info[i]);
1288                 if (err)
1289                         goto rollback;
1290         }
1291         return 0;
1292
1293 rollback:
1294         for (i--; i >= 0; i--)
1295                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1296         return err;
1297 }
1298
1299 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1300 {
1301         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1302         const struct rocker *rocker = rocker_port->rocker;
1303         int i;
1304
1305         for (i = 0; i < rx_ring->size; i++)
1306                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1307 }
1308
1309 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1310 {
1311         struct rocker *rocker = rocker_port->rocker;
1312         int err;
1313
1314         err = rocker_dma_ring_create(rocker,
1315                                      ROCKER_DMA_TX(rocker_port->port_number),
1316                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1317                                      &rocker_port->tx_ring);
1318         if (err) {
1319                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1320                 return err;
1321         }
1322
1323         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1324                                          PCI_DMA_TODEVICE,
1325                                          ROCKER_DMA_TX_DESC_SIZE);
1326         if (err) {
1327                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1328                 goto err_dma_tx_ring_bufs_alloc;
1329         }
1330
1331         err = rocker_dma_ring_create(rocker,
1332                                      ROCKER_DMA_RX(rocker_port->port_number),
1333                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1334                                      &rocker_port->rx_ring);
1335         if (err) {
1336                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1337                 goto err_dma_rx_ring_create;
1338         }
1339
1340         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1341                                          PCI_DMA_BIDIRECTIONAL,
1342                                          ROCKER_DMA_RX_DESC_SIZE);
1343         if (err) {
1344                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1345                 goto err_dma_rx_ring_bufs_alloc;
1346         }
1347
1348         err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1349         if (err) {
1350                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1351                 goto err_dma_rx_ring_skbs_alloc;
1352         }
1353         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1354
1355         return 0;
1356
1357 err_dma_rx_ring_skbs_alloc:
1358         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1359                                   PCI_DMA_BIDIRECTIONAL);
1360 err_dma_rx_ring_bufs_alloc:
1361         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1362 err_dma_rx_ring_create:
1363         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1364                                   PCI_DMA_TODEVICE);
1365 err_dma_tx_ring_bufs_alloc:
1366         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1367         return err;
1368 }
1369
1370 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1371 {
1372         struct rocker *rocker = rocker_port->rocker;
1373
1374         rocker_dma_rx_ring_skbs_free(rocker_port);
1375         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1376                                   PCI_DMA_BIDIRECTIONAL);
1377         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1378         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1379                                   PCI_DMA_TODEVICE);
1380         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1381 }
1382
1383 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1384                                    bool enable)
1385 {
1386         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1387
1388         if (enable)
1389                 val |= 1ULL << rocker_port->pport;
1390         else
1391                 val &= ~(1ULL << rocker_port->pport);
1392         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1393 }
1394
1395 /********************************
1396  * Interrupt handler and helpers
1397  ********************************/
1398
1399 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1400 {
1401         struct rocker *rocker = dev_id;
1402         const struct rocker_desc_info *desc_info;
1403         struct rocker_wait *wait;
1404         u32 credits = 0;
1405
1406         spin_lock(&rocker->cmd_ring_lock);
1407         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1408                 wait = rocker_desc_cookie_ptr_get(desc_info);
1409                 if (wait->nowait) {
1410                         rocker_desc_gen_clear(desc_info);
1411                         rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
1412                 } else {
1413                         rocker_wait_wake_up(wait);
1414                 }
1415                 credits++;
1416         }
1417         spin_unlock(&rocker->cmd_ring_lock);
1418         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1419
1420         return IRQ_HANDLED;
1421 }
1422
1423 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1424 {
1425         netif_carrier_on(rocker_port->dev);
1426         netdev_info(rocker_port->dev, "Link is up\n");
1427 }
1428
1429 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1430 {
1431         netif_carrier_off(rocker_port->dev);
1432         netdev_info(rocker_port->dev, "Link is down\n");
1433 }
1434
1435 static int rocker_event_link_change(const struct rocker *rocker,
1436                                     const struct rocker_tlv *info)
1437 {
1438         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1439         unsigned int port_number;
1440         bool link_up;
1441         struct rocker_port *rocker_port;
1442
1443         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1444         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1445             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1446                 return -EIO;
1447         port_number =
1448                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1449         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1450
1451         if (port_number >= rocker->port_count)
1452                 return -EINVAL;
1453
1454         rocker_port = rocker->ports[port_number];
1455         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1456                 if (link_up)
1457                         rocker_port_link_up(rocker_port);
1458                 else
1459                         rocker_port_link_down(rocker_port);
1460         }
1461
1462         return 0;
1463 }
1464
1465 static int rocker_port_fdb(struct rocker_port *rocker_port,
1466                            enum switchdev_trans trans,
1467                            const unsigned char *addr,
1468                            __be16 vlan_id, int flags);
1469
1470 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1471                                       const struct rocker_tlv *info)
1472 {
1473         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1474         unsigned int port_number;
1475         struct rocker_port *rocker_port;
1476         const unsigned char *addr;
1477         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1478         __be16 vlan_id;
1479
1480         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1481         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1482             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1483             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1484                 return -EIO;
1485         port_number =
1486                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1487         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1488         vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1489
1490         if (port_number >= rocker->port_count)
1491                 return -EINVAL;
1492
1493         rocker_port = rocker->ports[port_number];
1494
1495         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1496             rocker_port->stp_state != BR_STATE_FORWARDING)
1497                 return 0;
1498
1499         return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
1500                                addr, vlan_id, flags);
1501 }
1502
1503 static int rocker_event_process(const struct rocker *rocker,
1504                                 const struct rocker_desc_info *desc_info)
1505 {
1506         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1507         const struct rocker_tlv *info;
1508         u16 type;
1509
1510         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1511         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1512             !attrs[ROCKER_TLV_EVENT_INFO])
1513                 return -EIO;
1514
1515         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1516         info = attrs[ROCKER_TLV_EVENT_INFO];
1517
1518         switch (type) {
1519         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1520                 return rocker_event_link_change(rocker, info);
1521         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1522                 return rocker_event_mac_vlan_seen(rocker, info);
1523         }
1524
1525         return -EOPNOTSUPP;
1526 }
1527
1528 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1529 {
1530         struct rocker *rocker = dev_id;
1531         const struct pci_dev *pdev = rocker->pdev;
1532         const struct rocker_desc_info *desc_info;
1533         u32 credits = 0;
1534         int err;
1535
1536         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1537                 err = rocker_desc_err(desc_info);
1538                 if (err) {
1539                         dev_err(&pdev->dev, "event desc received with err %d\n",
1540                                 err);
1541                 } else {
1542                         err = rocker_event_process(rocker, desc_info);
1543                         if (err)
1544                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1545                                         err);
1546                 }
1547                 rocker_desc_gen_clear(desc_info);
1548                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1549                 credits++;
1550         }
1551         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1552
1553         return IRQ_HANDLED;
1554 }
1555
1556 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1557 {
1558         struct rocker_port *rocker_port = dev_id;
1559
1560         napi_schedule(&rocker_port->napi_tx);
1561         return IRQ_HANDLED;
1562 }
1563
1564 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1565 {
1566         struct rocker_port *rocker_port = dev_id;
1567
1568         napi_schedule(&rocker_port->napi_rx);
1569         return IRQ_HANDLED;
1570 }
1571
1572 /********************
1573  * Command interface
1574  ********************/
1575
1576 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1577                                     struct rocker_desc_info *desc_info,
1578                                     void *priv);
1579
1580 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1581                                     const struct rocker_desc_info *desc_info,
1582                                     void *priv);
1583
1584 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1585                            enum switchdev_trans trans, int flags,
1586                            rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1587                            rocker_cmd_proc_cb_t process, void *process_priv)
1588 {
1589         struct rocker *rocker = rocker_port->rocker;
1590         struct rocker_desc_info *desc_info;
1591         struct rocker_wait *wait;
1592         bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1593         unsigned long lock_flags;
1594         int err;
1595
1596         wait = rocker_wait_create(rocker_port, trans, flags);
1597         if (!wait)
1598                 return -ENOMEM;
1599         wait->nowait = nowait;
1600
1601         spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1602
1603         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1604         if (!desc_info) {
1605                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1606                 err = -EAGAIN;
1607                 goto out;
1608         }
1609
1610         err = prepare(rocker_port, desc_info, prepare_priv);
1611         if (err) {
1612                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1613                 goto out;
1614         }
1615
1616         rocker_desc_cookie_ptr_set(desc_info, wait);
1617
1618         if (trans != SWITCHDEV_TRANS_PREPARE)
1619                 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1620
1621         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1622
1623         if (nowait)
1624                 return 0;
1625
1626         if (trans != SWITCHDEV_TRANS_PREPARE)
1627                 if (!rocker_wait_event_timeout(wait, HZ / 10))
1628                         return -EIO;
1629
1630         err = rocker_desc_err(desc_info);
1631         if (err)
1632                 return err;
1633
1634         if (process)
1635                 err = process(rocker_port, desc_info, process_priv);
1636
1637         rocker_desc_gen_clear(desc_info);
1638 out:
1639         rocker_wait_destroy(trans, wait);
1640         return err;
1641 }
1642
1643 static int
1644 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1645                                   struct rocker_desc_info *desc_info,
1646                                   void *priv)
1647 {
1648         struct rocker_tlv *cmd_info;
1649
1650         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1651                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1652                 return -EMSGSIZE;
1653         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1654         if (!cmd_info)
1655                 return -EMSGSIZE;
1656         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1657                                rocker_port->pport))
1658                 return -EMSGSIZE;
1659         rocker_tlv_nest_end(desc_info, cmd_info);
1660         return 0;
1661 }
1662
1663 static int
1664 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1665                                           const struct rocker_desc_info *desc_info,
1666                                           void *priv)
1667 {
1668         struct ethtool_cmd *ecmd = priv;
1669         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1670         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1671         u32 speed;
1672         u8 duplex;
1673         u8 autoneg;
1674
1675         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1676         if (!attrs[ROCKER_TLV_CMD_INFO])
1677                 return -EIO;
1678
1679         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1680                                 attrs[ROCKER_TLV_CMD_INFO]);
1681         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1682             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1683             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1684                 return -EIO;
1685
1686         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1687         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1688         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1689
1690         ecmd->transceiver = XCVR_INTERNAL;
1691         ecmd->supported = SUPPORTED_TP;
1692         ecmd->phy_address = 0xff;
1693         ecmd->port = PORT_TP;
1694         ethtool_cmd_speed_set(ecmd, speed);
1695         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1696         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1697
1698         return 0;
1699 }
1700
1701 static int
1702 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1703                                           const struct rocker_desc_info *desc_info,
1704                                           void *priv)
1705 {
1706         unsigned char *macaddr = priv;
1707         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1708         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1709         const struct rocker_tlv *attr;
1710
1711         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1712         if (!attrs[ROCKER_TLV_CMD_INFO])
1713                 return -EIO;
1714
1715         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1716                                 attrs[ROCKER_TLV_CMD_INFO]);
1717         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1718         if (!attr)
1719                 return -EIO;
1720
1721         if (rocker_tlv_len(attr) != ETH_ALEN)
1722                 return -EINVAL;
1723
1724         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1725         return 0;
1726 }
1727
1728 struct port_name {
1729         char *buf;
1730         size_t len;
1731 };
1732
1733 static int
1734 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1735                                             const struct rocker_desc_info *desc_info,
1736                                             void *priv)
1737 {
1738         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1739         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1740         struct port_name *name = priv;
1741         const struct rocker_tlv *attr;
1742         size_t i, j, len;
1743         const char *str;
1744
1745         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1746         if (!attrs[ROCKER_TLV_CMD_INFO])
1747                 return -EIO;
1748
1749         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1750                                 attrs[ROCKER_TLV_CMD_INFO]);
1751         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1752         if (!attr)
1753                 return -EIO;
1754
1755         len = min_t(size_t, rocker_tlv_len(attr), name->len);
1756         str = rocker_tlv_data(attr);
1757
1758         /* make sure name only contains alphanumeric characters */
1759         for (i = j = 0; i < len; ++i) {
1760                 if (isalnum(str[i])) {
1761                         name->buf[j] = str[i];
1762                         j++;
1763                 }
1764         }
1765
1766         if (j == 0)
1767                 return -EIO;
1768
1769         name->buf[j] = '\0';
1770
1771         return 0;
1772 }
1773
1774 static int
1775 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1776                                           struct rocker_desc_info *desc_info,
1777                                           void *priv)
1778 {
1779         struct ethtool_cmd *ecmd = priv;
1780         struct rocker_tlv *cmd_info;
1781
1782         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1783                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1784                 return -EMSGSIZE;
1785         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1786         if (!cmd_info)
1787                 return -EMSGSIZE;
1788         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1789                                rocker_port->pport))
1790                 return -EMSGSIZE;
1791         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1792                                ethtool_cmd_speed(ecmd)))
1793                 return -EMSGSIZE;
1794         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1795                               ecmd->duplex))
1796                 return -EMSGSIZE;
1797         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1798                               ecmd->autoneg))
1799                 return -EMSGSIZE;
1800         rocker_tlv_nest_end(desc_info, cmd_info);
1801         return 0;
1802 }
1803
1804 static int
1805 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1806                                           struct rocker_desc_info *desc_info,
1807                                           void *priv)
1808 {
1809         const unsigned char *macaddr = priv;
1810         struct rocker_tlv *cmd_info;
1811
1812         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1813                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1814                 return -EMSGSIZE;
1815         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1816         if (!cmd_info)
1817                 return -EMSGSIZE;
1818         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1819                                rocker_port->pport))
1820                 return -EMSGSIZE;
1821         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1822                            ETH_ALEN, macaddr))
1823                 return -EMSGSIZE;
1824         rocker_tlv_nest_end(desc_info, cmd_info);
1825         return 0;
1826 }
1827
1828 static int
1829 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1830                                       struct rocker_desc_info *desc_info,
1831                                       void *priv)
1832 {
1833         int mtu = *(int *)priv;
1834         struct rocker_tlv *cmd_info;
1835
1836         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1837                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1838                 return -EMSGSIZE;
1839         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1840         if (!cmd_info)
1841                 return -EMSGSIZE;
1842         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1843                                rocker_port->pport))
1844                 return -EMSGSIZE;
1845         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1846                                mtu))
1847                 return -EMSGSIZE;
1848         rocker_tlv_nest_end(desc_info, cmd_info);
1849         return 0;
1850 }
1851
1852 static int
1853 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1854                                   struct rocker_desc_info *desc_info,
1855                                   void *priv)
1856 {
1857         struct rocker_tlv *cmd_info;
1858
1859         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1860                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1861                 return -EMSGSIZE;
1862         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1863         if (!cmd_info)
1864                 return -EMSGSIZE;
1865         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1866                                rocker_port->pport))
1867                 return -EMSGSIZE;
1868         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1869                               !!(rocker_port->brport_flags & BR_LEARNING)))
1870                 return -EMSGSIZE;
1871         rocker_tlv_nest_end(desc_info, cmd_info);
1872         return 0;
1873 }
1874
1875 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1876                                                 struct ethtool_cmd *ecmd)
1877 {
1878         return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1879                                rocker_cmd_get_port_settings_prep, NULL,
1880                                rocker_cmd_get_port_settings_ethtool_proc,
1881                                ecmd);
1882 }
1883
1884 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1885                                                 unsigned char *macaddr)
1886 {
1887         return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1888                                rocker_cmd_get_port_settings_prep, NULL,
1889                                rocker_cmd_get_port_settings_macaddr_proc,
1890                                macaddr);
1891 }
1892
1893 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1894                                                 struct ethtool_cmd *ecmd)
1895 {
1896         return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1897                                rocker_cmd_set_port_settings_ethtool_prep,
1898                                ecmd, NULL, NULL);
1899 }
1900
1901 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1902                                                 unsigned char *macaddr)
1903 {
1904         return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1905                                rocker_cmd_set_port_settings_macaddr_prep,
1906                                macaddr, NULL, NULL);
1907 }
1908
1909 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1910                                             int mtu)
1911 {
1912         return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1913                                rocker_cmd_set_port_settings_mtu_prep,
1914                                &mtu, NULL, NULL);
1915 }
1916
1917 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1918                                     enum switchdev_trans trans)
1919 {
1920         return rocker_cmd_exec(rocker_port, trans, 0,
1921                                rocker_cmd_set_port_learning_prep,
1922                                NULL, NULL, NULL);
1923 }
1924
1925 static int
1926 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1927                                 const struct rocker_flow_tbl_entry *entry)
1928 {
1929         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1930                                entry->key.ig_port.in_pport))
1931                 return -EMSGSIZE;
1932         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1933                                entry->key.ig_port.in_pport_mask))
1934                 return -EMSGSIZE;
1935         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1936                                entry->key.ig_port.goto_tbl))
1937                 return -EMSGSIZE;
1938
1939         return 0;
1940 }
1941
1942 static int
1943 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1944                              const struct rocker_flow_tbl_entry *entry)
1945 {
1946         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1947                                entry->key.vlan.in_pport))
1948                 return -EMSGSIZE;
1949         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1950                                 entry->key.vlan.vlan_id))
1951                 return -EMSGSIZE;
1952         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1953                                 entry->key.vlan.vlan_id_mask))
1954                 return -EMSGSIZE;
1955         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1956                                entry->key.vlan.goto_tbl))
1957                 return -EMSGSIZE;
1958         if (entry->key.vlan.untagged &&
1959             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1960                                 entry->key.vlan.new_vlan_id))
1961                 return -EMSGSIZE;
1962
1963         return 0;
1964 }
1965
1966 static int
1967 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1968                                  const struct rocker_flow_tbl_entry *entry)
1969 {
1970         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1971                                entry->key.term_mac.in_pport))
1972                 return -EMSGSIZE;
1973         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1974                                entry->key.term_mac.in_pport_mask))
1975                 return -EMSGSIZE;
1976         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1977                                 entry->key.term_mac.eth_type))
1978                 return -EMSGSIZE;
1979         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1980                            ETH_ALEN, entry->key.term_mac.eth_dst))
1981                 return -EMSGSIZE;
1982         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1983                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1984                 return -EMSGSIZE;
1985         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1986                                 entry->key.term_mac.vlan_id))
1987                 return -EMSGSIZE;
1988         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1989                                 entry->key.term_mac.vlan_id_mask))
1990                 return -EMSGSIZE;
1991         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1992                                entry->key.term_mac.goto_tbl))
1993                 return -EMSGSIZE;
1994         if (entry->key.term_mac.copy_to_cpu &&
1995             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1996                               entry->key.term_mac.copy_to_cpu))
1997                 return -EMSGSIZE;
1998
1999         return 0;
2000 }
2001
2002 static int
2003 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2004                                       const struct rocker_flow_tbl_entry *entry)
2005 {
2006         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2007                                 entry->key.ucast_routing.eth_type))
2008                 return -EMSGSIZE;
2009         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2010                                 entry->key.ucast_routing.dst4))
2011                 return -EMSGSIZE;
2012         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2013                                 entry->key.ucast_routing.dst4_mask))
2014                 return -EMSGSIZE;
2015         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2016                                entry->key.ucast_routing.goto_tbl))
2017                 return -EMSGSIZE;
2018         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2019                                entry->key.ucast_routing.group_id))
2020                 return -EMSGSIZE;
2021
2022         return 0;
2023 }
2024
2025 static int
2026 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2027                                const struct rocker_flow_tbl_entry *entry)
2028 {
2029         if (entry->key.bridge.has_eth_dst &&
2030             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2031                            ETH_ALEN, entry->key.bridge.eth_dst))
2032                 return -EMSGSIZE;
2033         if (entry->key.bridge.has_eth_dst_mask &&
2034             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2035                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
2036                 return -EMSGSIZE;
2037         if (entry->key.bridge.vlan_id &&
2038             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2039                                 entry->key.bridge.vlan_id))
2040                 return -EMSGSIZE;
2041         if (entry->key.bridge.tunnel_id &&
2042             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2043                                entry->key.bridge.tunnel_id))
2044                 return -EMSGSIZE;
2045         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2046                                entry->key.bridge.goto_tbl))
2047                 return -EMSGSIZE;
2048         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2049                                entry->key.bridge.group_id))
2050                 return -EMSGSIZE;
2051         if (entry->key.bridge.copy_to_cpu &&
2052             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2053                               entry->key.bridge.copy_to_cpu))
2054                 return -EMSGSIZE;
2055
2056         return 0;
2057 }
2058
2059 static int
2060 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2061                             const struct rocker_flow_tbl_entry *entry)
2062 {
2063         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2064                                entry->key.acl.in_pport))
2065                 return -EMSGSIZE;
2066         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2067                                entry->key.acl.in_pport_mask))
2068                 return -EMSGSIZE;
2069         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2070                            ETH_ALEN, entry->key.acl.eth_src))
2071                 return -EMSGSIZE;
2072         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2073                            ETH_ALEN, entry->key.acl.eth_src_mask))
2074                 return -EMSGSIZE;
2075         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2076                            ETH_ALEN, entry->key.acl.eth_dst))
2077                 return -EMSGSIZE;
2078         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2079                            ETH_ALEN, entry->key.acl.eth_dst_mask))
2080                 return -EMSGSIZE;
2081         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2082                                 entry->key.acl.eth_type))
2083                 return -EMSGSIZE;
2084         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2085                                 entry->key.acl.vlan_id))
2086                 return -EMSGSIZE;
2087         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2088                                 entry->key.acl.vlan_id_mask))
2089                 return -EMSGSIZE;
2090
2091         switch (ntohs(entry->key.acl.eth_type)) {
2092         case ETH_P_IP:
2093         case ETH_P_IPV6:
2094                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2095                                       entry->key.acl.ip_proto))
2096                         return -EMSGSIZE;
2097                 if (rocker_tlv_put_u8(desc_info,
2098                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2099                                       entry->key.acl.ip_proto_mask))
2100                         return -EMSGSIZE;
2101                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2102                                       entry->key.acl.ip_tos & 0x3f))
2103                         return -EMSGSIZE;
2104                 if (rocker_tlv_put_u8(desc_info,
2105                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2106                                       entry->key.acl.ip_tos_mask & 0x3f))
2107                         return -EMSGSIZE;
2108                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2109                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
2110                         return -EMSGSIZE;
2111                 if (rocker_tlv_put_u8(desc_info,
2112                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2113                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2114                         return -EMSGSIZE;
2115                 break;
2116         }
2117
2118         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2119             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2120                                entry->key.acl.group_id))
2121                 return -EMSGSIZE;
2122
2123         return 0;
2124 }
2125
2126 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2127                                    struct rocker_desc_info *desc_info,
2128                                    void *priv)
2129 {
2130         const struct rocker_flow_tbl_entry *entry = priv;
2131         struct rocker_tlv *cmd_info;
2132         int err = 0;
2133
2134         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2135                 return -EMSGSIZE;
2136         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2137         if (!cmd_info)
2138                 return -EMSGSIZE;
2139         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2140                                entry->key.tbl_id))
2141                 return -EMSGSIZE;
2142         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2143                                entry->key.priority))
2144                 return -EMSGSIZE;
2145         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2146                 return -EMSGSIZE;
2147         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2148                                entry->cookie))
2149                 return -EMSGSIZE;
2150
2151         switch (entry->key.tbl_id) {
2152         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2153                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2154                 break;
2155         case ROCKER_OF_DPA_TABLE_ID_VLAN:
2156                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2157                 break;
2158         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2159                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2160                 break;
2161         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2162                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2163                 break;
2164         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2165                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2166                 break;
2167         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2168                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2169                 break;
2170         default:
2171                 err = -ENOTSUPP;
2172                 break;
2173         }
2174
2175         if (err)
2176                 return err;
2177
2178         rocker_tlv_nest_end(desc_info, cmd_info);
2179
2180         return 0;
2181 }
2182
2183 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2184                                    struct rocker_desc_info *desc_info,
2185                                    void *priv)
2186 {
2187         const struct rocker_flow_tbl_entry *entry = priv;
2188         struct rocker_tlv *cmd_info;
2189
2190         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2191                 return -EMSGSIZE;
2192         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2193         if (!cmd_info)
2194                 return -EMSGSIZE;
2195         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2196                                entry->cookie))
2197                 return -EMSGSIZE;
2198         rocker_tlv_nest_end(desc_info, cmd_info);
2199
2200         return 0;
2201 }
2202
2203 static int
2204 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2205                                       struct rocker_group_tbl_entry *entry)
2206 {
2207         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2208                                ROCKER_GROUP_PORT_GET(entry->group_id)))
2209                 return -EMSGSIZE;
2210         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2211                               entry->l2_interface.pop_vlan))
2212                 return -EMSGSIZE;
2213
2214         return 0;
2215 }
2216
2217 static int
2218 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2219                                     const struct rocker_group_tbl_entry *entry)
2220 {
2221         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2222                                entry->l2_rewrite.group_id))
2223                 return -EMSGSIZE;
2224         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2225             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2226                            ETH_ALEN, entry->l2_rewrite.eth_src))
2227                 return -EMSGSIZE;
2228         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2229             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2230                            ETH_ALEN, entry->l2_rewrite.eth_dst))
2231                 return -EMSGSIZE;
2232         if (entry->l2_rewrite.vlan_id &&
2233             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2234                                 entry->l2_rewrite.vlan_id))
2235                 return -EMSGSIZE;
2236
2237         return 0;
2238 }
2239
2240 static int
2241 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2242                                    const struct rocker_group_tbl_entry *entry)
2243 {
2244         int i;
2245         struct rocker_tlv *group_ids;
2246
2247         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2248                                entry->group_count))
2249                 return -EMSGSIZE;
2250
2251         group_ids = rocker_tlv_nest_start(desc_info,
2252                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
2253         if (!group_ids)
2254                 return -EMSGSIZE;
2255
2256         for (i = 0; i < entry->group_count; i++)
2257                 /* Note TLV array is 1-based */
2258                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2259                         return -EMSGSIZE;
2260
2261         rocker_tlv_nest_end(desc_info, group_ids);
2262
2263         return 0;
2264 }
2265
2266 static int
2267 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2268                                     const struct rocker_group_tbl_entry *entry)
2269 {
2270         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2271             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2272                            ETH_ALEN, entry->l3_unicast.eth_src))
2273                 return -EMSGSIZE;
2274         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2275             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2276                            ETH_ALEN, entry->l3_unicast.eth_dst))
2277                 return -EMSGSIZE;
2278         if (entry->l3_unicast.vlan_id &&
2279             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2280                                 entry->l3_unicast.vlan_id))
2281                 return -EMSGSIZE;
2282         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2283                               entry->l3_unicast.ttl_check))
2284                 return -EMSGSIZE;
2285         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2286                                entry->l3_unicast.group_id))
2287                 return -EMSGSIZE;
2288
2289         return 0;
2290 }
2291
2292 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2293                                     struct rocker_desc_info *desc_info,
2294                                     void *priv)
2295 {
2296         struct rocker_group_tbl_entry *entry = priv;
2297         struct rocker_tlv *cmd_info;
2298         int err = 0;
2299
2300         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2301                 return -EMSGSIZE;
2302         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2303         if (!cmd_info)
2304                 return -EMSGSIZE;
2305
2306         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2307                                entry->group_id))
2308                 return -EMSGSIZE;
2309
2310         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2311         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2312                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2313                 break;
2314         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2315                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2316                 break;
2317         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2318         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2319                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2320                 break;
2321         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2322                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2323                 break;
2324         default:
2325                 err = -ENOTSUPP;
2326                 break;
2327         }
2328
2329         if (err)
2330                 return err;
2331
2332         rocker_tlv_nest_end(desc_info, cmd_info);
2333
2334         return 0;
2335 }
2336
2337 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2338                                     struct rocker_desc_info *desc_info,
2339                                     void *priv)
2340 {
2341         const struct rocker_group_tbl_entry *entry = priv;
2342         struct rocker_tlv *cmd_info;
2343
2344         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2345                 return -EMSGSIZE;
2346         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2347         if (!cmd_info)
2348                 return -EMSGSIZE;
2349         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2350                                entry->group_id))
2351                 return -EMSGSIZE;
2352         rocker_tlv_nest_end(desc_info, cmd_info);
2353
2354         return 0;
2355 }
2356
2357 /***************************************************
2358  * Flow, group, FDB, internal VLAN and neigh tables
2359  ***************************************************/
2360
2361 static int rocker_init_tbls(struct rocker *rocker)
2362 {
2363         hash_init(rocker->flow_tbl);
2364         spin_lock_init(&rocker->flow_tbl_lock);
2365
2366         hash_init(rocker->group_tbl);
2367         spin_lock_init(&rocker->group_tbl_lock);
2368
2369         hash_init(rocker->fdb_tbl);
2370         spin_lock_init(&rocker->fdb_tbl_lock);
2371
2372         hash_init(rocker->internal_vlan_tbl);
2373         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2374
2375         hash_init(rocker->neigh_tbl);
2376         spin_lock_init(&rocker->neigh_tbl_lock);
2377
2378         return 0;
2379 }
2380
2381 static void rocker_free_tbls(struct rocker *rocker)
2382 {
2383         unsigned long flags;
2384         struct rocker_flow_tbl_entry *flow_entry;
2385         struct rocker_group_tbl_entry *group_entry;
2386         struct rocker_fdb_tbl_entry *fdb_entry;
2387         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2388         struct rocker_neigh_tbl_entry *neigh_entry;
2389         struct hlist_node *tmp;
2390         int bkt;
2391
2392         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2393         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2394                 hash_del(&flow_entry->entry);
2395         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2396
2397         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2398         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2399                 hash_del(&group_entry->entry);
2400         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2401
2402         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2403         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2404                 hash_del(&fdb_entry->entry);
2405         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2406
2407         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2408         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2409                            tmp, internal_vlan_entry, entry)
2410                 hash_del(&internal_vlan_entry->entry);
2411         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2412
2413         spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2414         hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2415                 hash_del(&neigh_entry->entry);
2416         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2417 }
2418
2419 static struct rocker_flow_tbl_entry *
2420 rocker_flow_tbl_find(const struct rocker *rocker,
2421                      const struct rocker_flow_tbl_entry *match)
2422 {
2423         struct rocker_flow_tbl_entry *found;
2424         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2425
2426         hash_for_each_possible(rocker->flow_tbl, found,
2427                                entry, match->key_crc32) {
2428                 if (memcmp(&found->key, &match->key, key_len) == 0)
2429                         return found;
2430         }
2431
2432         return NULL;
2433 }
2434
2435 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2436                                enum switchdev_trans trans, int flags,
2437                                struct rocker_flow_tbl_entry *match)
2438 {
2439         struct rocker *rocker = rocker_port->rocker;
2440         struct rocker_flow_tbl_entry *found;
2441         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2442         unsigned long lock_flags;
2443
2444         match->key_crc32 = crc32(~0, &match->key, key_len);
2445
2446         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2447
2448         found = rocker_flow_tbl_find(rocker, match);
2449
2450         if (found) {
2451                 match->cookie = found->cookie;
2452                 if (trans != SWITCHDEV_TRANS_PREPARE)
2453                         hash_del(&found->entry);
2454                 rocker_port_kfree(trans, found);
2455                 found = match;
2456                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2457         } else {
2458                 found = match;
2459                 found->cookie = rocker->flow_tbl_next_cookie++;
2460                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2461         }
2462
2463         if (trans != SWITCHDEV_TRANS_PREPARE)
2464                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2465
2466         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2467
2468         return rocker_cmd_exec(rocker_port, trans, flags,
2469                                rocker_cmd_flow_tbl_add, found, NULL, NULL);
2470 }
2471
2472 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2473                                enum switchdev_trans trans, int flags,
2474                                struct rocker_flow_tbl_entry *match)
2475 {
2476         struct rocker *rocker = rocker_port->rocker;
2477         struct rocker_flow_tbl_entry *found;
2478         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2479         unsigned long lock_flags;
2480         int err = 0;
2481
2482         match->key_crc32 = crc32(~0, &match->key, key_len);
2483
2484         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2485
2486         found = rocker_flow_tbl_find(rocker, match);
2487
2488         if (found) {
2489                 if (trans != SWITCHDEV_TRANS_PREPARE)
2490                         hash_del(&found->entry);
2491                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2492         }
2493
2494         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2495
2496         rocker_port_kfree(trans, match);
2497
2498         if (found) {
2499                 err = rocker_cmd_exec(rocker_port, trans, flags,
2500                                       rocker_cmd_flow_tbl_del,
2501                                       found, NULL, NULL);
2502                 rocker_port_kfree(trans, found);
2503         }
2504
2505         return err;
2506 }
2507
2508 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2509                               enum switchdev_trans trans, int flags,
2510                               struct rocker_flow_tbl_entry *entry)
2511 {
2512         if (flags & ROCKER_OP_FLAG_REMOVE)
2513                 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2514         else
2515                 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2516 }
2517
2518 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2519                                    enum switchdev_trans trans, int flags,
2520                                    u32 in_pport, u32 in_pport_mask,
2521                                    enum rocker_of_dpa_table_id goto_tbl)
2522 {
2523         struct rocker_flow_tbl_entry *entry;
2524
2525         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2526         if (!entry)
2527                 return -ENOMEM;
2528
2529         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2530         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2531         entry->key.ig_port.in_pport = in_pport;
2532         entry->key.ig_port.in_pport_mask = in_pport_mask;
2533         entry->key.ig_port.goto_tbl = goto_tbl;
2534
2535         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2536 }
2537
2538 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2539                                 enum switchdev_trans trans, int flags,
2540                                 u32 in_pport, __be16 vlan_id,
2541                                 __be16 vlan_id_mask,
2542                                 enum rocker_of_dpa_table_id goto_tbl,
2543                                 bool untagged, __be16 new_vlan_id)
2544 {
2545         struct rocker_flow_tbl_entry *entry;
2546
2547         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2548         if (!entry)
2549                 return -ENOMEM;
2550
2551         entry->key.priority = ROCKER_PRIORITY_VLAN;
2552         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2553         entry->key.vlan.in_pport = in_pport;
2554         entry->key.vlan.vlan_id = vlan_id;
2555         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2556         entry->key.vlan.goto_tbl = goto_tbl;
2557
2558         entry->key.vlan.untagged = untagged;
2559         entry->key.vlan.new_vlan_id = new_vlan_id;
2560
2561         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2562 }
2563
2564 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2565                                     enum switchdev_trans trans,
2566                                     u32 in_pport, u32 in_pport_mask,
2567                                     __be16 eth_type, const u8 *eth_dst,
2568                                     const u8 *eth_dst_mask, __be16 vlan_id,
2569                                     __be16 vlan_id_mask, bool copy_to_cpu,
2570                                     int flags)
2571 {
2572         struct rocker_flow_tbl_entry *entry;
2573
2574         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2575         if (!entry)
2576                 return -ENOMEM;
2577
2578         if (is_multicast_ether_addr(eth_dst)) {
2579                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2580                 entry->key.term_mac.goto_tbl =
2581                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2582         } else {
2583                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2584                 entry->key.term_mac.goto_tbl =
2585                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2586         }
2587
2588         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2589         entry->key.term_mac.in_pport = in_pport;
2590         entry->key.term_mac.in_pport_mask = in_pport_mask;
2591         entry->key.term_mac.eth_type = eth_type;
2592         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2593         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2594         entry->key.term_mac.vlan_id = vlan_id;
2595         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2596         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2597
2598         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2599 }
2600
2601 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2602                                   enum switchdev_trans trans, int flags,
2603                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2604                                   __be16 vlan_id, u32 tunnel_id,
2605                                   enum rocker_of_dpa_table_id goto_tbl,
2606                                   u32 group_id, bool copy_to_cpu)
2607 {
2608         struct rocker_flow_tbl_entry *entry;
2609         u32 priority;
2610         bool vlan_bridging = !!vlan_id;
2611         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2612         bool wild = false;
2613
2614         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2615         if (!entry)
2616                 return -ENOMEM;
2617
2618         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2619
2620         if (eth_dst) {
2621                 entry->key.bridge.has_eth_dst = 1;
2622                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2623         }
2624         if (eth_dst_mask) {
2625                 entry->key.bridge.has_eth_dst_mask = 1;
2626                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2627                 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2628                         wild = true;
2629         }
2630
2631         priority = ROCKER_PRIORITY_UNKNOWN;
2632         if (vlan_bridging && dflt && wild)
2633                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2634         else if (vlan_bridging && dflt && !wild)
2635                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2636         else if (vlan_bridging && !dflt)
2637                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2638         else if (!vlan_bridging && dflt && wild)
2639                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2640         else if (!vlan_bridging && dflt && !wild)
2641                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2642         else if (!vlan_bridging && !dflt)
2643                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2644
2645         entry->key.priority = priority;
2646         entry->key.bridge.vlan_id = vlan_id;
2647         entry->key.bridge.tunnel_id = tunnel_id;
2648         entry->key.bridge.goto_tbl = goto_tbl;
2649         entry->key.bridge.group_id = group_id;
2650         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2651
2652         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2653 }
2654
2655 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2656                                           enum switchdev_trans trans,
2657                                           __be16 eth_type, __be32 dst,
2658                                           __be32 dst_mask, u32 priority,
2659                                           enum rocker_of_dpa_table_id goto_tbl,
2660                                           u32 group_id, int flags)
2661 {
2662         struct rocker_flow_tbl_entry *entry;
2663
2664         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2665         if (!entry)
2666                 return -ENOMEM;
2667
2668         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2669         entry->key.priority = priority;
2670         entry->key.ucast_routing.eth_type = eth_type;
2671         entry->key.ucast_routing.dst4 = dst;
2672         entry->key.ucast_routing.dst4_mask = dst_mask;
2673         entry->key.ucast_routing.goto_tbl = goto_tbl;
2674         entry->key.ucast_routing.group_id = group_id;
2675         entry->key_len = offsetof(struct rocker_flow_tbl_key,
2676                                   ucast_routing.group_id);
2677
2678         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2679 }
2680
2681 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2682                                enum switchdev_trans trans, int flags,
2683                                u32 in_pport, u32 in_pport_mask,
2684                                const u8 *eth_src, const u8 *eth_src_mask,
2685                                const u8 *eth_dst, const u8 *eth_dst_mask,
2686                                __be16 eth_type, __be16 vlan_id,
2687                                __be16 vlan_id_mask, u8 ip_proto,
2688                                u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2689                                u32 group_id)
2690 {
2691         u32 priority;
2692         struct rocker_flow_tbl_entry *entry;
2693
2694         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2695         if (!entry)
2696                 return -ENOMEM;
2697
2698         priority = ROCKER_PRIORITY_ACL_NORMAL;
2699         if (eth_dst && eth_dst_mask) {
2700                 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2701                         priority = ROCKER_PRIORITY_ACL_DFLT;
2702                 else if (is_link_local_ether_addr(eth_dst))
2703                         priority = ROCKER_PRIORITY_ACL_CTRL;
2704         }
2705
2706         entry->key.priority = priority;
2707         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2708         entry->key.acl.in_pport = in_pport;
2709         entry->key.acl.in_pport_mask = in_pport_mask;
2710
2711         if (eth_src)
2712                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2713         if (eth_src_mask)
2714                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2715         if (eth_dst)
2716                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2717         if (eth_dst_mask)
2718                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2719
2720         entry->key.acl.eth_type = eth_type;
2721         entry->key.acl.vlan_id = vlan_id;
2722         entry->key.acl.vlan_id_mask = vlan_id_mask;
2723         entry->key.acl.ip_proto = ip_proto;
2724         entry->key.acl.ip_proto_mask = ip_proto_mask;
2725         entry->key.acl.ip_tos = ip_tos;
2726         entry->key.acl.ip_tos_mask = ip_tos_mask;
2727         entry->key.acl.group_id = group_id;
2728
2729         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2730 }
2731
2732 static struct rocker_group_tbl_entry *
2733 rocker_group_tbl_find(const struct rocker *rocker,
2734                       const struct rocker_group_tbl_entry *match)
2735 {
2736         struct rocker_group_tbl_entry *found;
2737
2738         hash_for_each_possible(rocker->group_tbl, found,
2739                                entry, match->group_id) {
2740                 if (found->group_id == match->group_id)
2741                         return found;
2742         }
2743
2744         return NULL;
2745 }
2746
2747 static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
2748                                         struct rocker_group_tbl_entry *entry)
2749 {
2750         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2751         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2752         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2753                 rocker_port_kfree(trans, entry->group_ids);
2754                 break;
2755         default:
2756                 break;
2757         }
2758         rocker_port_kfree(trans, entry);
2759 }
2760
2761 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2762                                 enum switchdev_trans trans, int flags,
2763                                 struct rocker_group_tbl_entry *match)
2764 {
2765         struct rocker *rocker = rocker_port->rocker;
2766         struct rocker_group_tbl_entry *found;
2767         unsigned long lock_flags;
2768
2769         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2770
2771         found = rocker_group_tbl_find(rocker, match);
2772
2773         if (found) {
2774                 if (trans != SWITCHDEV_TRANS_PREPARE)
2775                         hash_del(&found->entry);
2776                 rocker_group_tbl_entry_free(trans, found);
2777                 found = match;
2778                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2779         } else {
2780                 found = match;
2781                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2782         }
2783
2784         if (trans != SWITCHDEV_TRANS_PREPARE)
2785                 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2786
2787         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2788
2789         return rocker_cmd_exec(rocker_port, trans, flags,
2790                                rocker_cmd_group_tbl_add, found, NULL, NULL);
2791 }
2792
2793 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2794                                 enum switchdev_trans trans, int flags,
2795                                 struct rocker_group_tbl_entry *match)
2796 {
2797         struct rocker *rocker = rocker_port->rocker;
2798         struct rocker_group_tbl_entry *found;
2799         unsigned long lock_flags;
2800         int err = 0;
2801
2802         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2803
2804         found = rocker_group_tbl_find(rocker, match);
2805
2806         if (found) {
2807                 if (trans != SWITCHDEV_TRANS_PREPARE)
2808                         hash_del(&found->entry);
2809                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2810         }
2811
2812         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2813
2814         rocker_group_tbl_entry_free(trans, match);
2815
2816         if (found) {
2817                 err = rocker_cmd_exec(rocker_port, trans, flags,
2818                                       rocker_cmd_group_tbl_del,
2819                                       found, NULL, NULL);
2820                 rocker_group_tbl_entry_free(trans, found);
2821         }
2822
2823         return err;
2824 }
2825
2826 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2827                                enum switchdev_trans trans, int flags,
2828                                struct rocker_group_tbl_entry *entry)
2829 {
2830         if (flags & ROCKER_OP_FLAG_REMOVE)
2831                 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2832         else
2833                 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2834 }
2835
2836 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2837                                      enum switchdev_trans trans, int flags,
2838                                      __be16 vlan_id, u32 out_pport,
2839                                      int pop_vlan)
2840 {
2841         struct rocker_group_tbl_entry *entry;
2842
2843         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2844         if (!entry)
2845                 return -ENOMEM;
2846
2847         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2848         entry->l2_interface.pop_vlan = pop_vlan;
2849
2850         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2851 }
2852
2853 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2854                                    enum switchdev_trans trans,
2855                                    int flags, u8 group_count,
2856                                    const u32 *group_ids, u32 group_id)
2857 {
2858         struct rocker_group_tbl_entry *entry;
2859
2860         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2861         if (!entry)
2862                 return -ENOMEM;
2863
2864         entry->group_id = group_id;
2865         entry->group_count = group_count;
2866
2867         entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
2868                                                group_count, sizeof(u32));
2869         if (!entry->group_ids) {
2870                 rocker_port_kfree(trans, entry);
2871                 return -ENOMEM;
2872         }
2873         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2874
2875         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2876 }
2877
2878 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2879                                  enum switchdev_trans trans, int flags,
2880                                  __be16 vlan_id, u8 group_count,
2881                                  const u32 *group_ids, u32 group_id)
2882 {
2883         return rocker_group_l2_fan_out(rocker_port, trans, flags,
2884                                        group_count, group_ids,
2885                                        group_id);
2886 }
2887
2888 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2889                                    enum switchdev_trans trans, int flags,
2890                                    u32 index, const u8 *src_mac, const u8 *dst_mac,
2891                                    __be16 vlan_id, bool ttl_check, u32 pport)
2892 {
2893         struct rocker_group_tbl_entry *entry;
2894
2895         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2896         if (!entry)
2897                 return -ENOMEM;
2898
2899         entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2900         if (src_mac)
2901                 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2902         if (dst_mac)
2903                 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2904         entry->l3_unicast.vlan_id = vlan_id;
2905         entry->l3_unicast.ttl_check = ttl_check;
2906         entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2907
2908         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2909 }
2910
2911 static struct rocker_neigh_tbl_entry *
2912 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2913 {
2914         struct rocker_neigh_tbl_entry *found;
2915
2916         hash_for_each_possible(rocker->neigh_tbl, found,
2917                                entry, be32_to_cpu(ip_addr))
2918                 if (found->ip_addr == ip_addr)
2919                         return found;
2920
2921         return NULL;
2922 }
2923
2924 static void _rocker_neigh_add(struct rocker *rocker,
2925                               enum switchdev_trans trans,
2926                               struct rocker_neigh_tbl_entry *entry)
2927 {
2928         if (trans != SWITCHDEV_TRANS_COMMIT)
2929                 entry->index = rocker->neigh_tbl_next_index++;
2930         if (trans == SWITCHDEV_TRANS_PREPARE)
2931                 return;
2932         entry->ref_count++;
2933         hash_add(rocker->neigh_tbl, &entry->entry,
2934                  be32_to_cpu(entry->ip_addr));
2935 }
2936
2937 static void _rocker_neigh_del(enum switchdev_trans trans,
2938                               struct rocker_neigh_tbl_entry *entry)
2939 {
2940         if (trans == SWITCHDEV_TRANS_PREPARE)
2941                 return;
2942         if (--entry->ref_count == 0) {
2943                 hash_del(&entry->entry);
2944                 rocker_port_kfree(trans, entry);
2945         }
2946 }
2947
2948 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
2949                                  enum switchdev_trans trans,
2950                                  const u8 *eth_dst, bool ttl_check)
2951 {
2952         if (eth_dst) {
2953                 ether_addr_copy(entry->eth_dst, eth_dst);
2954                 entry->ttl_check = ttl_check;
2955         } else if (trans != SWITCHDEV_TRANS_PREPARE) {
2956                 entry->ref_count++;
2957         }
2958 }
2959
2960 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2961                                   enum switchdev_trans trans,
2962                                   int flags, __be32 ip_addr, const u8 *eth_dst)
2963 {
2964         struct rocker *rocker = rocker_port->rocker;
2965         struct rocker_neigh_tbl_entry *entry;
2966         struct rocker_neigh_tbl_entry *found;
2967         unsigned long lock_flags;
2968         __be16 eth_type = htons(ETH_P_IP);
2969         enum rocker_of_dpa_table_id goto_tbl =
2970                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2971         u32 group_id;
2972         u32 priority = 0;
2973         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2974         bool updating;
2975         bool removing;
2976         int err = 0;
2977
2978         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2979         if (!entry)
2980                 return -ENOMEM;
2981
2982         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2983
2984         found = rocker_neigh_tbl_find(rocker, ip_addr);
2985
2986         updating = found && adding;
2987         removing = found && !adding;
2988         adding = !found && adding;
2989
2990         if (adding) {
2991                 entry->ip_addr = ip_addr;
2992                 entry->dev = rocker_port->dev;
2993                 ether_addr_copy(entry->eth_dst, eth_dst);
2994                 entry->ttl_check = true;
2995                 _rocker_neigh_add(rocker, trans, entry);
2996         } else if (removing) {
2997                 memcpy(entry, found, sizeof(*entry));
2998                 _rocker_neigh_del(trans, found);
2999         } else if (updating) {
3000                 _rocker_neigh_update(found, trans, eth_dst, true);
3001                 memcpy(entry, found, sizeof(*entry));
3002         } else {
3003                 err = -ENOENT;
3004         }
3005
3006         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3007
3008         if (err)
3009                 goto err_out;
3010
3011         /* For each active neighbor, we have an L3 unicast group and
3012          * a /32 route to the neighbor, which uses the L3 unicast
3013          * group.  The L3 unicast group can also be referred to by
3014          * other routes' nexthops.
3015          */
3016
3017         err = rocker_group_l3_unicast(rocker_port, trans, flags,
3018                                       entry->index,
3019                                       rocker_port->dev->dev_addr,
3020                                       entry->eth_dst,
3021                                       rocker_port->internal_vlan_id,
3022                                       entry->ttl_check,
3023                                       rocker_port->pport);
3024         if (err) {
3025                 netdev_err(rocker_port->dev,
3026                            "Error (%d) L3 unicast group index %d\n",
3027                            err, entry->index);
3028                 goto err_out;
3029         }
3030
3031         if (adding || removing) {
3032                 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3033                 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3034                                                      eth_type, ip_addr,
3035                                                      inet_make_mask(32),
3036                                                      priority, goto_tbl,
3037                                                      group_id, flags);
3038
3039                 if (err)
3040                         netdev_err(rocker_port->dev,
3041                                    "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3042                                    err, &entry->ip_addr, group_id);
3043         }
3044
3045 err_out:
3046         if (!adding)
3047                 rocker_port_kfree(trans, entry);
3048
3049         return err;
3050 }
3051
3052 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3053                                     enum switchdev_trans trans, __be32 ip_addr)
3054 {
3055         struct net_device *dev = rocker_port->dev;
3056         struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3057         int err = 0;
3058
3059         if (!n) {
3060                 n = neigh_create(&arp_tbl, &ip_addr, dev);
3061                 if (IS_ERR(n))
3062                         return IS_ERR(n);
3063         }
3064
3065         /* If the neigh is already resolved, then go ahead and
3066          * install the entry, otherwise start the ARP process to
3067          * resolve the neigh.
3068          */
3069
3070         if (n->nud_state & NUD_VALID)
3071                 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3072                                              ip_addr, n->ha);
3073         else
3074                 neigh_event_send(n, NULL);
3075
3076         neigh_release(n);
3077         return err;
3078 }
3079
3080 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3081                                enum switchdev_trans trans, int flags,
3082                                __be32 ip_addr, u32 *index)
3083 {
3084         struct rocker *rocker = rocker_port->rocker;
3085         struct rocker_neigh_tbl_entry *entry;
3086         struct rocker_neigh_tbl_entry *found;
3087         unsigned long lock_flags;
3088         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3089         bool updating;
3090         bool removing;
3091         bool resolved = true;
3092         int err = 0;
3093
3094         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
3095         if (!entry)
3096                 return -ENOMEM;
3097
3098         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3099
3100         found = rocker_neigh_tbl_find(rocker, ip_addr);
3101         if (found)
3102                 *index = found->index;
3103
3104         updating = found && adding;
3105         removing = found && !adding;
3106         adding = !found && adding;
3107
3108         if (adding) {
3109                 entry->ip_addr = ip_addr;
3110                 entry->dev = rocker_port->dev;
3111                 _rocker_neigh_add(rocker, trans, entry);
3112                 *index = entry->index;
3113                 resolved = false;
3114         } else if (removing) {
3115                 _rocker_neigh_del(trans, found);
3116         } else if (updating) {
3117                 _rocker_neigh_update(found, trans, NULL, false);
3118                 resolved = !is_zero_ether_addr(found->eth_dst);
3119         } else {
3120                 err = -ENOENT;
3121         }
3122
3123         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3124
3125         if (!adding)
3126                 rocker_port_kfree(trans, entry);
3127
3128         if (err)
3129                 return err;
3130
3131         /* Resolved means neigh ip_addr is resolved to neigh mac. */
3132
3133         if (!resolved)
3134                 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3135
3136         return err;
3137 }
3138
3139 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3140                                         enum switchdev_trans trans,
3141                                         int flags, __be16 vlan_id)
3142 {
3143         struct rocker_port *p;
3144         const struct rocker *rocker = rocker_port->rocker;
3145         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3146         u32 *group_ids;
3147         u8 group_count = 0;
3148         int err = 0;
3149         int i;
3150
3151         group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
3152                                         rocker->port_count, sizeof(u32));
3153         if (!group_ids)
3154                 return -ENOMEM;
3155
3156         /* Adjust the flood group for this VLAN.  The flood group
3157          * references an L2 interface group for each port in this
3158          * VLAN.
3159          */
3160
3161         for (i = 0; i < rocker->port_count; i++) {
3162                 p = rocker->ports[i];
3163                 if (!p)
3164                         continue;
3165                 if (!rocker_port_is_bridged(p))
3166                         continue;
3167                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3168                         group_ids[group_count++] =
3169                                 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3170                 }
3171         }
3172
3173         /* If there are no bridged ports in this VLAN, we're done */
3174         if (group_count == 0)
3175                 goto no_ports_in_vlan;
3176
3177         err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3178                                     group_count, group_ids, group_id);
3179         if (err)
3180                 netdev_err(rocker_port->dev,
3181                            "Error (%d) port VLAN l2 flood group\n", err);
3182
3183 no_ports_in_vlan:
3184         rocker_port_kfree(trans, group_ids);
3185         return err;
3186 }
3187
3188 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3189                                       enum switchdev_trans trans, int flags,
3190                                       __be16 vlan_id, bool pop_vlan)
3191 {
3192         const struct rocker *rocker = rocker_port->rocker;
3193         struct rocker_port *p;
3194         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3195         u32 out_pport;
3196         int ref = 0;
3197         int err;
3198         int i;
3199
3200         /* An L2 interface group for this port in this VLAN, but
3201          * only when port STP state is LEARNING|FORWARDING.
3202          */
3203
3204         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3205             rocker_port->stp_state == BR_STATE_FORWARDING) {
3206                 out_pport = rocker_port->pport;
3207                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3208                                                 vlan_id, out_pport, pop_vlan);
3209                 if (err) {
3210                         netdev_err(rocker_port->dev,
3211                                    "Error (%d) port VLAN l2 group for pport %d\n",
3212                                    err, out_pport);
3213                         return err;
3214                 }
3215         }
3216
3217         /* An L2 interface group for this VLAN to CPU port.
3218          * Add when first port joins this VLAN and destroy when
3219          * last port leaves this VLAN.
3220          */
3221
3222         for (i = 0; i < rocker->port_count; i++) {
3223                 p = rocker->ports[i];
3224                 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3225                         ref++;
3226         }
3227
3228         if ((!adding || ref != 1) && (adding || ref != 0))
3229                 return 0;
3230
3231         out_pport = 0;
3232         err = rocker_group_l2_interface(rocker_port, trans, flags,
3233                                         vlan_id, out_pport, pop_vlan);
3234         if (err) {
3235                 netdev_err(rocker_port->dev,
3236                            "Error (%d) port VLAN l2 group for CPU port\n", err);
3237                 return err;
3238         }
3239
3240         return 0;
3241 }
3242
3243 static struct rocker_ctrl {
3244         const u8 *eth_dst;
3245         const u8 *eth_dst_mask;
3246         __be16 eth_type;
3247         bool acl;
3248         bool bridge;
3249         bool term;
3250         bool copy_to_cpu;
3251 } rocker_ctrls[] = {
3252         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3253                 /* pass link local multicast pkts up to CPU for filtering */
3254                 .eth_dst = ll_mac,
3255                 .eth_dst_mask = ll_mask,
3256                 .acl = true,
3257         },
3258         [ROCKER_CTRL_LOCAL_ARP] = {
3259                 /* pass local ARP pkts up to CPU */
3260                 .eth_dst = zero_mac,
3261                 .eth_dst_mask = zero_mac,
3262                 .eth_type = htons(ETH_P_ARP),
3263                 .acl = true,
3264         },
3265         [ROCKER_CTRL_IPV4_MCAST] = {
3266                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3267                 .eth_dst = ipv4_mcast,
3268                 .eth_dst_mask = ipv4_mask,
3269                 .eth_type = htons(ETH_P_IP),
3270                 .term  = true,
3271                 .copy_to_cpu = true,
3272         },
3273         [ROCKER_CTRL_IPV6_MCAST] = {
3274                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3275                 .eth_dst = ipv6_mcast,
3276                 .eth_dst_mask = ipv6_mask,
3277                 .eth_type = htons(ETH_P_IPV6),
3278                 .term  = true,
3279                 .copy_to_cpu = true,
3280         },
3281         [ROCKER_CTRL_DFLT_BRIDGING] = {
3282                 /* flood any pkts on vlan */
3283                 .bridge = true,
3284                 .copy_to_cpu = true,
3285         },
3286         [ROCKER_CTRL_DFLT_OVS] = {
3287                 /* pass all pkts up to CPU */
3288                 .eth_dst = zero_mac,
3289                 .eth_dst_mask = zero_mac,
3290                 .acl = true,
3291         },
3292 };
3293
3294 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3295                                      enum switchdev_trans trans, int flags,
3296                                      const struct rocker_ctrl *ctrl, __be16 vlan_id)
3297 {
3298         u32 in_pport = rocker_port->pport;
3299         u32 in_pport_mask = 0xffffffff;
3300         u32 out_pport = 0;
3301         const u8 *eth_src = NULL;
3302         const u8 *eth_src_mask = NULL;
3303         __be16 vlan_id_mask = htons(0xffff);
3304         u8 ip_proto = 0;
3305         u8 ip_proto_mask = 0;
3306         u8 ip_tos = 0;
3307         u8 ip_tos_mask = 0;
3308         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3309         int err;
3310
3311         err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3312                                   in_pport, in_pport_mask,
3313                                   eth_src, eth_src_mask,
3314                                   ctrl->eth_dst, ctrl->eth_dst_mask,
3315                                   ctrl->eth_type,
3316                                   vlan_id, vlan_id_mask,
3317                                   ip_proto, ip_proto_mask,
3318                                   ip_tos, ip_tos_mask,
3319                                   group_id);
3320
3321         if (err)
3322                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3323
3324         return err;
3325 }
3326
3327 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3328                                         enum switchdev_trans trans, int flags,
3329                                         const struct rocker_ctrl *ctrl,
3330                                         __be16 vlan_id)
3331 {
3332         enum rocker_of_dpa_table_id goto_tbl =
3333                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3334         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3335         u32 tunnel_id = 0;
3336         int err;
3337
3338         if (!rocker_port_is_bridged(rocker_port))
3339                 return 0;
3340
3341         err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3342                                      ctrl->eth_dst, ctrl->eth_dst_mask,
3343                                      vlan_id, tunnel_id,
3344                                      goto_tbl, group_id, ctrl->copy_to_cpu);
3345
3346         if (err)
3347                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3348
3349         return err;
3350 }
3351
3352 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3353                                       enum switchdev_trans trans, int flags,
3354                                       const struct rocker_ctrl *ctrl, __be16 vlan_id)
3355 {
3356         u32 in_pport_mask = 0xffffffff;
3357         __be16 vlan_id_mask = htons(0xffff);
3358         int err;
3359
3360         if (ntohs(vlan_id) == 0)
3361                 vlan_id = rocker_port->internal_vlan_id;
3362
3363         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3364                                        rocker_port->pport, in_pport_mask,
3365                                        ctrl->eth_type, ctrl->eth_dst,
3366                                        ctrl->eth_dst_mask, vlan_id,
3367                                        vlan_id_mask, ctrl->copy_to_cpu,
3368                                        flags);
3369
3370         if (err)
3371                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3372
3373         return err;
3374 }
3375
3376 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3377                                  enum switchdev_trans trans, int flags,
3378                                  const struct rocker_ctrl *ctrl, __be16 vlan_id)
3379 {
3380         if (ctrl->acl)
3381                 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3382                                                  ctrl, vlan_id);
3383         if (ctrl->bridge)
3384                 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3385                                                     ctrl, vlan_id);
3386
3387         if (ctrl->term)
3388                 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3389                                                   ctrl, vlan_id);
3390
3391         return -EOPNOTSUPP;
3392 }
3393
3394 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3395                                      enum switchdev_trans trans, int flags,
3396                                      __be16 vlan_id)
3397 {
3398         int err = 0;
3399         int i;
3400
3401         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3402                 if (rocker_port->ctrls[i]) {
3403                         err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3404                                                     &rocker_ctrls[i], vlan_id);
3405                         if (err)
3406                                 return err;
3407                 }
3408         }
3409
3410         return err;
3411 }
3412
3413 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3414                             enum switchdev_trans trans, int flags,
3415                             const struct rocker_ctrl *ctrl)
3416 {
3417         u16 vid;
3418         int err = 0;
3419
3420         for (vid = 1; vid < VLAN_N_VID; vid++) {
3421                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3422                         continue;
3423                 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3424                                             ctrl, htons(vid));
3425                 if (err)
3426                         break;
3427         }
3428
3429         return err;
3430 }
3431
3432 static int rocker_port_vlan(struct rocker_port *rocker_port,
3433                             enum switchdev_trans trans, int flags, u16 vid)
3434 {
3435         enum rocker_of_dpa_table_id goto_tbl =
3436                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3437         u32 in_pport = rocker_port->pport;
3438         __be16 vlan_id = htons(vid);
3439         __be16 vlan_id_mask = htons(0xffff);
3440         __be16 internal_vlan_id;
3441         bool untagged;
3442         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3443         int err;
3444
3445         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3446
3447         if (adding && test_bit(ntohs(internal_vlan_id),
3448                                rocker_port->vlan_bitmap))
3449                         return 0; /* already added */
3450         else if (!adding && !test_bit(ntohs(internal_vlan_id),
3451                                       rocker_port->vlan_bitmap))
3452                         return 0; /* already removed */
3453
3454         change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3455
3456         if (adding) {
3457                 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3458                                                 internal_vlan_id);
3459                 if (err) {
3460                         netdev_err(rocker_port->dev,
3461                                    "Error (%d) port ctrl vlan add\n", err);
3462                         goto err_out;
3463                 }
3464         }
3465
3466         err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3467                                          internal_vlan_id, untagged);
3468         if (err) {
3469                 netdev_err(rocker_port->dev,
3470                            "Error (%d) port VLAN l2 groups\n", err);
3471                 goto err_out;
3472         }
3473
3474         err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3475                                            internal_vlan_id);
3476         if (err) {
3477                 netdev_err(rocker_port->dev,
3478                            "Error (%d) port VLAN l2 flood group\n", err);
3479                 goto err_out;
3480         }
3481
3482         err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3483                                    in_pport, vlan_id, vlan_id_mask,
3484                                    goto_tbl, untagged, internal_vlan_id);
3485         if (err)
3486                 netdev_err(rocker_port->dev,
3487                            "Error (%d) port VLAN table\n", err);
3488
3489 err_out:
3490         if (trans == SWITCHDEV_TRANS_PREPARE)
3491                 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3492
3493         return err;
3494 }
3495
3496 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3497                               enum switchdev_trans trans, int flags)
3498 {
3499         enum rocker_of_dpa_table_id goto_tbl;
3500         u32 in_pport;
3501         u32 in_pport_mask;
3502         int err;
3503
3504         /* Normal Ethernet Frames.  Matches pkts from any local physical
3505          * ports.  Goto VLAN tbl.
3506          */
3507
3508         in_pport = 0;
3509         in_pport_mask = 0xffff0000;
3510         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3511
3512         err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3513                                       in_pport, in_pport_mask,
3514                                       goto_tbl);
3515         if (err)
3516                 netdev_err(rocker_port->dev,
3517                            "Error (%d) ingress port table entry\n", err);
3518
3519         return err;
3520 }
3521
3522 struct rocker_fdb_learn_work {
3523         struct work_struct work;
3524         struct rocker_port *rocker_port;
3525         enum switchdev_trans trans;
3526         int flags;
3527         u8 addr[ETH_ALEN];
3528         u16 vid;
3529 };
3530
3531 static void rocker_port_fdb_learn_work(struct work_struct *work)
3532 {
3533         const struct rocker_fdb_learn_work *lw =
3534                 container_of(work, struct rocker_fdb_learn_work, work);
3535         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3536         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3537         struct switchdev_notifier_fdb_info info;
3538
3539         info.addr = lw->addr;
3540         info.vid = lw->vid;
3541
3542         if (learned && removing)
3543                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3544                                          lw->rocker_port->dev, &info.info);
3545         else if (learned && !removing)
3546                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3547                                          lw->rocker_port->dev, &info.info);
3548
3549         rocker_port_kfree(lw->trans, work);
3550 }
3551
3552 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3553                                  enum switchdev_trans trans, int flags,
3554                                  const u8 *addr, __be16 vlan_id)
3555 {
3556         struct rocker_fdb_learn_work *lw;
3557         enum rocker_of_dpa_table_id goto_tbl =
3558                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3559         u32 out_pport = rocker_port->pport;
3560         u32 tunnel_id = 0;
3561         u32 group_id = ROCKER_GROUP_NONE;
3562         bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3563         bool copy_to_cpu = false;
3564         int err;
3565
3566         if (rocker_port_is_bridged(rocker_port))
3567                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3568
3569         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3570                 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3571                                              NULL, vlan_id, tunnel_id, goto_tbl,
3572                                              group_id, copy_to_cpu);
3573                 if (err)
3574                         return err;
3575         }
3576
3577         if (!syncing)
3578                 return 0;
3579
3580         if (!rocker_port_is_bridged(rocker_port))
3581                 return 0;
3582
3583         lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
3584         if (!lw)
3585                 return -ENOMEM;
3586
3587         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3588
3589         lw->rocker_port = rocker_port;
3590         lw->trans = trans;
3591         lw->flags = flags;
3592         ether_addr_copy(lw->addr, addr);
3593         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3594
3595         if (trans == SWITCHDEV_TRANS_PREPARE)
3596                 rocker_port_kfree(trans, lw);
3597         else
3598                 schedule_work(&lw->work);
3599
3600         return 0;
3601 }
3602
3603 static struct rocker_fdb_tbl_entry *
3604 rocker_fdb_tbl_find(const struct rocker *rocker,
3605                     const struct rocker_fdb_tbl_entry *match)
3606 {
3607         struct rocker_fdb_tbl_entry *found;
3608
3609         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3610                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3611                         return found;
3612
3613         return NULL;
3614 }
3615
3616 static int rocker_port_fdb(struct rocker_port *rocker_port,
3617                            enum switchdev_trans trans,
3618                            const unsigned char *addr,
3619                            __be16 vlan_id, int flags)
3620 {
3621         struct rocker *rocker = rocker_port->rocker;
3622         struct rocker_fdb_tbl_entry *fdb;
3623         struct rocker_fdb_tbl_entry *found;
3624         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3625         unsigned long lock_flags;
3626
3627         fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
3628         if (!fdb)
3629                 return -ENOMEM;
3630
3631         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3632         fdb->key.pport = rocker_port->pport;
3633         ether_addr_copy(fdb->key.addr, addr);
3634         fdb->key.vlan_id = vlan_id;
3635         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3636
3637         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3638
3639         found = rocker_fdb_tbl_find(rocker, fdb);
3640
3641         if (removing && found) {
3642                 rocker_port_kfree(trans, fdb);
3643                 if (trans != SWITCHDEV_TRANS_PREPARE)
3644                         hash_del(&found->entry);
3645         } else if (!removing && !found) {
3646                 if (trans != SWITCHDEV_TRANS_PREPARE)
3647                         hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3648         }
3649
3650         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3651
3652         /* Check if adding and already exists, or removing and can't find */
3653         if (!found != !removing) {
3654                 rocker_port_kfree(trans, fdb);
3655                 if (!found && removing)
3656                         return 0;
3657                 /* Refreshing existing to update aging timers */
3658                 flags |= ROCKER_OP_FLAG_REFRESH;
3659         }
3660
3661         return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3662 }
3663
3664 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3665                                  enum switchdev_trans trans, int flags)
3666 {
3667         struct rocker *rocker = rocker_port->rocker;
3668         struct rocker_fdb_tbl_entry *found;
3669         unsigned long lock_flags;
3670         struct hlist_node *tmp;
3671         int bkt;
3672         int err = 0;
3673
3674         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3675             rocker_port->stp_state == BR_STATE_FORWARDING)
3676                 return 0;
3677
3678         flags |= ROCKER_OP_FLAG_REMOVE;
3679
3680         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3681
3682         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3683                 if (found->key.pport != rocker_port->pport)
3684                         continue;
3685                 if (!found->learned)
3686                         continue;
3687                 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3688                                             found->key.addr,
3689                                             found->key.vlan_id);
3690                 if (err)
3691                         goto err_out;
3692                 if (trans != SWITCHDEV_TRANS_PREPARE)
3693                         hash_del(&found->entry);
3694         }
3695
3696 err_out:
3697         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3698
3699         return err;
3700 }
3701
3702 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3703                                   enum switchdev_trans trans, int flags,
3704                                   __be16 vlan_id)
3705 {
3706         u32 in_pport_mask = 0xffffffff;
3707         __be16 eth_type;
3708         const u8 *dst_mac_mask = ff_mac;
3709         __be16 vlan_id_mask = htons(0xffff);
3710         bool copy_to_cpu = false;
3711         int err;
3712
3713         if (ntohs(vlan_id) == 0)
3714                 vlan_id = rocker_port->internal_vlan_id;
3715
3716         eth_type = htons(ETH_P_IP);
3717         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3718                                        rocker_port->pport, in_pport_mask,
3719                                        eth_type, rocker_port->dev->dev_addr,
3720                                        dst_mac_mask, vlan_id, vlan_id_mask,
3721                                        copy_to_cpu, flags);
3722         if (err)
3723                 return err;
3724
3725         eth_type = htons(ETH_P_IPV6);
3726         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3727                                        rocker_port->pport, in_pport_mask,
3728                                        eth_type, rocker_port->dev->dev_addr,
3729                                        dst_mac_mask, vlan_id, vlan_id_mask,
3730                                        copy_to_cpu, flags);
3731
3732         return err;
3733 }
3734
3735 static int rocker_port_fwding(struct rocker_port *rocker_port,
3736                               enum switchdev_trans trans, int flags)
3737 {
3738         bool pop_vlan;
3739         u32 out_pport;
3740         __be16 vlan_id;
3741         u16 vid;
3742         int err;
3743
3744         /* Port will be forwarding-enabled if its STP state is LEARNING
3745          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3746          * port STP state.  Use L2 interface group on port VLANs as a way
3747          * to toggle port forwarding: if forwarding is disabled, L2
3748          * interface group will not exist.
3749          */
3750
3751         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3752             rocker_port->stp_state != BR_STATE_FORWARDING)
3753                 flags |= ROCKER_OP_FLAG_REMOVE;
3754
3755         out_pport = rocker_port->pport;
3756         for (vid = 1; vid < VLAN_N_VID; vid++) {
3757                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3758                         continue;
3759                 vlan_id = htons(vid);
3760                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3761                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3762                                                 vlan_id, out_pport, pop_vlan);
3763                 if (err) {
3764                         netdev_err(rocker_port->dev,
3765                                    "Error (%d) port VLAN l2 group for pport %d\n",
3766                                    err, out_pport);
3767                         return err;
3768                 }
3769         }
3770
3771         return 0;
3772 }
3773
3774 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3775                                   enum switchdev_trans trans, int flags,
3776                                   u8 state)
3777 {
3778         bool want[ROCKER_CTRL_MAX] = { 0, };
3779         bool prev_ctrls[ROCKER_CTRL_MAX];
3780         u8 prev_state;
3781         int err;
3782         int i;
3783
3784         if (trans == SWITCHDEV_TRANS_PREPARE) {
3785                 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3786                 prev_state = rocker_port->stp_state;
3787         }
3788
3789         if (rocker_port->stp_state == state)
3790                 return 0;
3791
3792         rocker_port->stp_state = state;
3793
3794         switch (state) {
3795         case BR_STATE_DISABLED:
3796                 /* port is completely disabled */
3797                 break;
3798         case BR_STATE_LISTENING:
3799         case BR_STATE_BLOCKING:
3800                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3801                 break;
3802         case BR_STATE_LEARNING:
3803         case BR_STATE_FORWARDING:
3804                 if (!rocker_port_is_ovsed(rocker_port))
3805                         want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3806                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3807                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3808                 if (rocker_port_is_bridged(rocker_port))
3809                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3810                 else if (rocker_port_is_ovsed(rocker_port))
3811                         want[ROCKER_CTRL_DFLT_OVS] = true;
3812                 else
3813                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3814                 break;
3815         }
3816
3817         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3818                 if (want[i] != rocker_port->ctrls[i]) {
3819                         int ctrl_flags = flags |
3820                                          (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3821                         err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3822                                                &rocker_ctrls[i]);
3823                         if (err)
3824                                 goto err_out;
3825                         rocker_port->ctrls[i] = want[i];
3826                 }
3827         }
3828
3829         err = rocker_port_fdb_flush(rocker_port, trans, flags);
3830         if (err)
3831                 goto err_out;
3832
3833         err = rocker_port_fwding(rocker_port, trans, flags);
3834
3835 err_out:
3836         if (trans == SWITCHDEV_TRANS_PREPARE) {
3837                 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3838                 rocker_port->stp_state = prev_state;
3839         }
3840
3841         return err;
3842 }
3843
3844 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3845                                   enum switchdev_trans trans, int flags)
3846 {
3847         if (rocker_port_is_bridged(rocker_port))
3848                 /* bridge STP will enable port */
3849                 return 0;
3850
3851         /* port is not bridged, so simulate going to FORWARDING state */
3852         return rocker_port_stp_update(rocker_port, trans, flags,
3853                                       BR_STATE_FORWARDING);
3854 }
3855
3856 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3857                                    enum switchdev_trans trans, int flags)
3858 {
3859         if (rocker_port_is_bridged(rocker_port))
3860                 /* bridge STP will disable port */
3861                 return 0;
3862
3863         /* port is not bridged, so simulate going to DISABLED state */
3864         return rocker_port_stp_update(rocker_port, trans, flags,
3865                                       BR_STATE_DISABLED);
3866 }
3867
3868 static struct rocker_internal_vlan_tbl_entry *
3869 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3870 {
3871         struct rocker_internal_vlan_tbl_entry *found;
3872
3873         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3874                                entry, ifindex) {
3875                 if (found->ifindex == ifindex)
3876                         return found;
3877         }
3878
3879         return NULL;
3880 }
3881
3882 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3883                                                int ifindex)
3884 {
3885         struct rocker *rocker = rocker_port->rocker;
3886         struct rocker_internal_vlan_tbl_entry *entry;
3887         struct rocker_internal_vlan_tbl_entry *found;
3888         unsigned long lock_flags;
3889         int i;
3890
3891         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3892         if (!entry)
3893                 return 0;
3894
3895         entry->ifindex = ifindex;
3896
3897         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3898
3899         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3900         if (found) {
3901                 kfree(entry);
3902                 goto found;
3903         }
3904
3905         found = entry;
3906         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3907
3908         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3909                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3910                         continue;
3911                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3912                 goto found;
3913         }
3914
3915         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3916
3917 found:
3918         found->ref_count++;
3919         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3920
3921         return found->vlan_id;
3922 }
3923
3924 static void
3925 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3926                                  int ifindex)
3927 {
3928         struct rocker *rocker = rocker_port->rocker;
3929         struct rocker_internal_vlan_tbl_entry *found;
3930         unsigned long lock_flags;
3931         unsigned long bit;
3932
3933         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3934
3935         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3936         if (!found) {
3937                 netdev_err(rocker_port->dev,
3938                            "ifindex (%d) not found in internal VLAN tbl\n",
3939                            ifindex);
3940                 goto not_found;
3941         }
3942
3943         if (--found->ref_count <= 0) {
3944                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3945                 clear_bit(bit, rocker->internal_vlan_bitmap);
3946                 hash_del(&found->entry);
3947                 kfree(found);
3948         }
3949
3950 not_found:
3951         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3952 }
3953
3954 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3955                                 enum switchdev_trans trans, __be32 dst,
3956                                 int dst_len, const struct fib_info *fi,
3957                                 u32 tb_id, int flags)
3958 {
3959         const struct fib_nh *nh;
3960         __be16 eth_type = htons(ETH_P_IP);
3961         __be32 dst_mask = inet_make_mask(dst_len);
3962         __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3963         u32 priority = fi->fib_priority;
3964         enum rocker_of_dpa_table_id goto_tbl =
3965                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3966         u32 group_id;
3967         bool nh_on_port;
3968         bool has_gw;
3969         u32 index;
3970         int err;
3971
3972         /* XXX support ECMP */
3973
3974         nh = fi->fib_nh;
3975         nh_on_port = (fi->fib_dev == rocker_port->dev);
3976         has_gw = !!nh->nh_gw;
3977
3978         if (has_gw && nh_on_port) {
3979                 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
3980                                           nh->nh_gw, &index);
3981                 if (err)
3982                         return err;
3983
3984                 group_id = ROCKER_GROUP_L3_UNICAST(index);
3985         } else {
3986                 /* Send to CPU for processing */
3987                 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3988         }
3989
3990         err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
3991                                              dst_mask, priority, goto_tbl,
3992                                              group_id, flags);
3993         if (err)
3994                 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3995                            err, &dst);
3996
3997         return err;
3998 }
3999
4000 /*****************
4001  * Net device ops
4002  *****************/
4003
4004 static int rocker_port_open(struct net_device *dev)
4005 {
4006         struct rocker_port *rocker_port = netdev_priv(dev);
4007         int err;
4008
4009         err = rocker_port_dma_rings_init(rocker_port);
4010         if (err)
4011                 return err;
4012
4013         err = request_irq(rocker_msix_tx_vector(rocker_port),
4014                           rocker_tx_irq_handler, 0,
4015                           rocker_driver_name, rocker_port);
4016         if (err) {
4017                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4018                 goto err_request_tx_irq;
4019         }
4020
4021         err = request_irq(rocker_msix_rx_vector(rocker_port),
4022                           rocker_rx_irq_handler, 0,
4023                           rocker_driver_name, rocker_port);
4024         if (err) {
4025                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4026                 goto err_request_rx_irq;
4027         }
4028
4029         err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
4030         if (err)
4031                 goto err_fwd_enable;
4032
4033         napi_enable(&rocker_port->napi_tx);
4034         napi_enable(&rocker_port->napi_rx);
4035         if (!dev->proto_down)
4036                 rocker_port_set_enable(rocker_port, true);
4037         netif_start_queue(dev);
4038         return 0;
4039
4040 err_fwd_enable:
4041         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4042 err_request_rx_irq:
4043         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4044 err_request_tx_irq:
4045         rocker_port_dma_rings_fini(rocker_port);
4046         return err;
4047 }
4048
4049 static int rocker_port_stop(struct net_device *dev)
4050 {
4051         struct rocker_port *rocker_port = netdev_priv(dev);
4052
4053         netif_stop_queue(dev);
4054         rocker_port_set_enable(rocker_port, false);
4055         napi_disable(&rocker_port->napi_rx);
4056         napi_disable(&rocker_port->napi_tx);
4057         rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
4058                                 ROCKER_OP_FLAG_NOWAIT);
4059         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4060         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4061         rocker_port_dma_rings_fini(rocker_port);
4062
4063         return 0;
4064 }
4065
4066 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4067                                        const struct rocker_desc_info *desc_info)
4068 {
4069         const struct rocker *rocker = rocker_port->rocker;
4070         struct pci_dev *pdev = rocker->pdev;
4071         const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4072         struct rocker_tlv *attr;
4073         int rem;
4074
4075         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4076         if (!attrs[ROCKER_TLV_TX_FRAGS])
4077                 return;
4078         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4079                 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4080                 dma_addr_t dma_handle;
4081                 size_t len;
4082
4083                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4084                         continue;
4085                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4086                                         attr);
4087                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4088                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4089                         continue;
4090                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4091                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4092                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4093         }
4094 }
4095
4096 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4097                                        struct rocker_desc_info *desc_info,
4098                                        char *buf, size_t buf_len)
4099 {
4100         const struct rocker *rocker = rocker_port->rocker;
4101         struct pci_dev *pdev = rocker->pdev;
4102         dma_addr_t dma_handle;
4103         struct rocker_tlv *frag;
4104
4105         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4106         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4107                 if (net_ratelimit())
4108                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4109                 return -EIO;
4110         }
4111         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4112         if (!frag)
4113                 goto unmap_frag;
4114         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4115                                dma_handle))
4116                 goto nest_cancel;
4117         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4118                                buf_len))
4119                 goto nest_cancel;
4120         rocker_tlv_nest_end(desc_info, frag);
4121         return 0;
4122
4123 nest_cancel:
4124         rocker_tlv_nest_cancel(desc_info, frag);
4125 unmap_frag:
4126         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4127         return -EMSGSIZE;
4128 }
4129
4130 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4131 {
4132         struct rocker_port *rocker_port = netdev_priv(dev);
4133         struct rocker *rocker = rocker_port->rocker;
4134         struct rocker_desc_info *desc_info;
4135         struct rocker_tlv *frags;
4136         int i;
4137         int err;
4138
4139         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4140         if (unlikely(!desc_info)) {
4141                 if (net_ratelimit())
4142                         netdev_err(dev, "tx ring full when queue awake\n");
4143                 return NETDEV_TX_BUSY;
4144         }
4145
4146         rocker_desc_cookie_ptr_set(desc_info, skb);
4147
4148         frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4149         if (!frags)
4150                 goto out;
4151         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4152                                           skb->data, skb_headlen(skb));
4153         if (err)
4154                 goto nest_cancel;
4155         if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4156                 err = skb_linearize(skb);
4157                 if (err)
4158                         goto unmap_frags;
4159         }
4160
4161         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4162                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4163
4164                 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4165                                                   skb_frag_address(frag),
4166                                                   skb_frag_size(frag));
4167                 if (err)
4168                         goto unmap_frags;
4169         }
4170         rocker_tlv_nest_end(desc_info, frags);
4171
4172         rocker_desc_gen_clear(desc_info);
4173         rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4174
4175         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4176         if (!desc_info)
4177                 netif_stop_queue(dev);
4178
4179         return NETDEV_TX_OK;
4180
4181 unmap_frags:
4182         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4183 nest_cancel:
4184         rocker_tlv_nest_cancel(desc_info, frags);
4185 out:
4186         dev_kfree_skb(skb);
4187         dev->stats.tx_dropped++;
4188
4189         return NETDEV_TX_OK;
4190 }
4191
4192 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4193 {
4194         struct sockaddr *addr = p;
4195         struct rocker_port *rocker_port = netdev_priv(dev);
4196         int err;
4197
4198         if (!is_valid_ether_addr(addr->sa_data))
4199                 return -EADDRNOTAVAIL;
4200
4201         err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4202         if (err)
4203                 return err;
4204         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4205         return 0;
4206 }
4207
4208 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4209 {
4210         struct rocker_port *rocker_port = netdev_priv(dev);
4211         int running = netif_running(dev);
4212         int err;
4213
4214 #define ROCKER_PORT_MIN_MTU     68
4215 #define ROCKER_PORT_MAX_MTU     9000
4216
4217         if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4218                 return -EINVAL;
4219
4220         if (running)
4221                 rocker_port_stop(dev);
4222
4223         netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4224         dev->mtu = new_mtu;
4225
4226         err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4227         if (err)
4228                 return err;
4229
4230         if (running)
4231                 err = rocker_port_open(dev);
4232
4233         return err;
4234 }
4235
4236 static int rocker_port_get_phys_port_name(struct net_device *dev,
4237                                           char *buf, size_t len)
4238 {
4239         struct rocker_port *rocker_port = netdev_priv(dev);
4240         struct port_name name = { .buf = buf, .len = len };
4241         int err;
4242
4243         err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
4244                               rocker_cmd_get_port_settings_prep, NULL,
4245                               rocker_cmd_get_port_settings_phys_name_proc,
4246                               &name);
4247
4248         return err ? -EOPNOTSUPP : 0;
4249 }
4250
4251 static int rocker_port_change_proto_down(struct net_device *dev,
4252                                          bool proto_down)
4253 {
4254         struct rocker_port *rocker_port = netdev_priv(dev);
4255
4256         if (rocker_port->dev->flags & IFF_UP)
4257                 rocker_port_set_enable(rocker_port, !proto_down);
4258         rocker_port->dev->proto_down = proto_down;
4259         return 0;
4260 }
4261
4262 static void rocker_port_neigh_destroy(struct neighbour *n)
4263 {
4264         struct rocker_port *rocker_port = netdev_priv(n->dev);
4265         int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4266         __be32 ip_addr = *(__be32 *)n->primary_key;
4267
4268         rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
4269                                flags, ip_addr, n->ha);
4270 }
4271
4272 static const struct net_device_ops rocker_port_netdev_ops = {
4273         .ndo_open                       = rocker_port_open,
4274         .ndo_stop                       = rocker_port_stop,
4275         .ndo_start_xmit                 = rocker_port_xmit,
4276         .ndo_set_mac_address            = rocker_port_set_mac_address,
4277         .ndo_change_mtu                 = rocker_port_change_mtu,
4278         .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
4279         .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
4280         .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
4281         .ndo_fdb_add                    = switchdev_port_fdb_add,
4282         .ndo_fdb_del                    = switchdev_port_fdb_del,
4283         .ndo_fdb_dump                   = switchdev_port_fdb_dump,
4284         .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
4285         .ndo_change_proto_down          = rocker_port_change_proto_down,
4286         .ndo_neigh_destroy              = rocker_port_neigh_destroy,
4287 };
4288
4289 /********************
4290  * swdev interface
4291  ********************/
4292
4293 static int rocker_port_attr_get(struct net_device *dev,
4294                                 struct switchdev_attr *attr)
4295 {
4296         const struct rocker_port *rocker_port = netdev_priv(dev);
4297         const struct rocker *rocker = rocker_port->rocker;
4298
4299         switch (attr->id) {
4300         case SWITCHDEV_ATTR_PORT_PARENT_ID:
4301                 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4302                 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4303                 break;
4304         case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4305                 attr->u.brport_flags = rocker_port->brport_flags;
4306                 break;
4307         default:
4308                 return -EOPNOTSUPP;
4309         }
4310
4311         return 0;
4312 }
4313
4314 static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
4315 {
4316         struct list_head *mem, *tmp;
4317
4318         list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
4319                 list_del(mem);
4320                 kfree(mem);
4321         }
4322 }
4323
4324 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4325                                         enum switchdev_trans trans,
4326                                         unsigned long brport_flags)
4327 {
4328         unsigned long orig_flags;
4329         int err = 0;
4330
4331         orig_flags = rocker_port->brport_flags;
4332         rocker_port->brport_flags = brport_flags;
4333         if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4334                 err = rocker_port_set_learning(rocker_port, trans);
4335
4336         if (trans == SWITCHDEV_TRANS_PREPARE)
4337                 rocker_port->brport_flags = orig_flags;
4338
4339         return err;
4340 }
4341
4342 static int rocker_port_attr_set(struct net_device *dev,
4343                                 struct switchdev_attr *attr)
4344 {
4345         struct rocker_port *rocker_port = netdev_priv(dev);
4346         int err = 0;
4347
4348         switch (attr->trans) {
4349         case SWITCHDEV_TRANS_PREPARE:
4350                 BUG_ON(!list_empty(&rocker_port->trans_mem));
4351                 break;
4352         case SWITCHDEV_TRANS_ABORT:
4353                 rocker_port_trans_abort(rocker_port);
4354                 return 0;
4355         default:
4356                 break;
4357         }
4358
4359         switch (attr->id) {
4360         case SWITCHDEV_ATTR_PORT_STP_STATE:
4361                 err = rocker_port_stp_update(rocker_port, attr->trans,
4362                                              ROCKER_OP_FLAG_NOWAIT,
4363                                              attr->u.stp_state);
4364                 break;
4365         case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4366                 err = rocker_port_brport_flags_set(rocker_port, attr->trans,
4367                                                    attr->u.brport_flags);
4368                 break;
4369         default:
4370                 err = -EOPNOTSUPP;
4371                 break;
4372         }
4373
4374         return err;
4375 }
4376
4377 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4378                                 enum switchdev_trans trans, u16 vid, u16 flags)
4379 {
4380         int err;
4381
4382         /* XXX deal with flags for PVID and untagged */
4383
4384         err = rocker_port_vlan(rocker_port, trans, 0, vid);
4385         if (err)
4386                 return err;
4387
4388         err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4389         if (err)
4390                 rocker_port_vlan(rocker_port, trans,
4391                                  ROCKER_OP_FLAG_REMOVE, vid);
4392
4393         return err;
4394 }
4395
4396 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4397                                  enum switchdev_trans trans,
4398                                  const struct switchdev_obj_vlan *vlan)
4399 {
4400         u16 vid;
4401         int err;
4402
4403         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4404                 err = rocker_port_vlan_add(rocker_port, trans,
4405                                            vid, vlan->flags);
4406                 if (err)
4407                         return err;
4408         }
4409
4410         return 0;
4411 }
4412
4413 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4414                                enum switchdev_trans trans,
4415                                const struct switchdev_obj_fdb *fdb)
4416 {
4417         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4418         int flags = 0;
4419
4420         if (!rocker_port_is_bridged(rocker_port))
4421                 return -EINVAL;
4422
4423         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4424 }
4425
4426 static int rocker_port_obj_add(struct net_device *dev,
4427                                struct switchdev_obj *obj)
4428 {
4429         struct rocker_port *rocker_port = netdev_priv(dev);
4430         const struct switchdev_obj_ipv4_fib *fib4;
4431         int err = 0;
4432
4433         switch (obj->trans) {
4434         case SWITCHDEV_TRANS_PREPARE:
4435                 BUG_ON(!list_empty(&rocker_port->trans_mem));
4436                 break;
4437         case SWITCHDEV_TRANS_ABORT:
4438                 rocker_port_trans_abort(rocker_port);
4439                 return 0;
4440         default:
4441                 break;
4442         }
4443
4444         switch (obj->id) {
4445         case SWITCHDEV_OBJ_PORT_VLAN:
4446                 err = rocker_port_vlans_add(rocker_port, obj->trans,
4447                                             &obj->u.vlan);
4448                 break;
4449         case SWITCHDEV_OBJ_IPV4_FIB:
4450                 fib4 = &obj->u.ipv4_fib;
4451                 err = rocker_port_fib_ipv4(rocker_port, obj->trans,
4452                                            htonl(fib4->dst), fib4->dst_len,
4453                                            fib4->fi, fib4->tb_id, 0);
4454                 break;
4455         case SWITCHDEV_OBJ_PORT_FDB:
4456                 err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
4457                 break;
4458         default:
4459                 err = -EOPNOTSUPP;
4460                 break;
4461         }
4462
4463         return err;
4464 }
4465
4466 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4467                                 u16 vid, u16 flags)
4468 {
4469         int err;
4470
4471         err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
4472                                      ROCKER_OP_FLAG_REMOVE, htons(vid));
4473         if (err)
4474                 return err;
4475
4476         return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
4477                                 ROCKER_OP_FLAG_REMOVE, vid);
4478 }
4479
4480 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4481                                  const struct switchdev_obj_vlan *vlan)
4482 {
4483         u16 vid;
4484         int err;
4485
4486         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4487                 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4488                 if (err)
4489                         return err;
4490         }
4491
4492         return 0;
4493 }
4494
4495 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4496                                enum switchdev_trans trans,
4497                                const struct switchdev_obj_fdb *fdb)
4498 {
4499         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4500         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
4501
4502         if (!rocker_port_is_bridged(rocker_port))
4503                 return -EINVAL;
4504
4505         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4506 }
4507
4508 static int rocker_port_obj_del(struct net_device *dev,
4509                                struct switchdev_obj *obj)
4510 {
4511         struct rocker_port *rocker_port = netdev_priv(dev);
4512         const struct switchdev_obj_ipv4_fib *fib4;
4513         int err = 0;
4514
4515         switch (obj->id) {
4516         case SWITCHDEV_OBJ_PORT_VLAN:
4517                 err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
4518                 break;
4519         case SWITCHDEV_OBJ_IPV4_FIB:
4520                 fib4 = &obj->u.ipv4_fib;
4521                 err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
4522                                            htonl(fib4->dst), fib4->dst_len,
4523                                            fib4->fi, fib4->tb_id,
4524                                            ROCKER_OP_FLAG_REMOVE);
4525                 break;
4526         case SWITCHDEV_OBJ_PORT_FDB:
4527                 err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
4528                 break;
4529         default:
4530                 err = -EOPNOTSUPP;
4531                 break;
4532         }
4533
4534         return err;
4535 }
4536
4537 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4538                                 struct switchdev_obj *obj)
4539 {
4540         struct rocker *rocker = rocker_port->rocker;
4541         struct switchdev_obj_fdb *fdb = &obj->u.fdb;
4542         struct rocker_fdb_tbl_entry *found;
4543         struct hlist_node *tmp;
4544         unsigned long lock_flags;
4545         int bkt;
4546         int err = 0;
4547
4548         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4549         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4550                 if (found->key.pport != rocker_port->pport)
4551                         continue;
4552                 fdb->addr = found->key.addr;
4553                 fdb->ndm_state = NUD_REACHABLE;
4554                 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4555                                                    found->key.vlan_id);
4556                 err = obj->cb(rocker_port->dev, obj);
4557                 if (err)
4558                         break;
4559         }
4560         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4561
4562         return err;
4563 }
4564
4565 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4566                                  struct switchdev_obj *obj)
4567 {
4568         struct switchdev_obj_vlan *vlan = &obj->u.vlan;
4569         u16 vid;
4570         int err = 0;
4571
4572         for (vid = 1; vid < VLAN_N_VID; vid++) {
4573                 if (!test_bit(vid, rocker_port->vlan_bitmap))
4574                         continue;
4575                 vlan->flags = 0;
4576                 if (rocker_vlan_id_is_internal(htons(vid)))
4577                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4578                 vlan->vid_begin = vlan->vid_end = vid;
4579                 err = obj->cb(rocker_port->dev, obj);
4580                 if (err)
4581                         break;
4582         }
4583
4584         return err;
4585 }
4586
4587 static int rocker_port_obj_dump(struct net_device *dev,
4588                                 struct switchdev_obj *obj)
4589 {
4590         const struct rocker_port *rocker_port = netdev_priv(dev);
4591         int err = 0;
4592
4593         switch (obj->id) {
4594         case SWITCHDEV_OBJ_PORT_FDB:
4595                 err = rocker_port_fdb_dump(rocker_port, obj);
4596                 break;
4597         case SWITCHDEV_OBJ_PORT_VLAN:
4598                 err = rocker_port_vlan_dump(rocker_port, obj);
4599                 break;
4600         default:
4601                 err = -EOPNOTSUPP;
4602                 break;
4603         }
4604
4605         return err;
4606 }
4607
4608 static const struct switchdev_ops rocker_port_switchdev_ops = {
4609         .switchdev_port_attr_get        = rocker_port_attr_get,
4610         .switchdev_port_attr_set        = rocker_port_attr_set,
4611         .switchdev_port_obj_add         = rocker_port_obj_add,
4612         .switchdev_port_obj_del         = rocker_port_obj_del,
4613         .switchdev_port_obj_dump        = rocker_port_obj_dump,
4614 };
4615
4616 /********************
4617  * ethtool interface
4618  ********************/
4619
4620 static int rocker_port_get_settings(struct net_device *dev,
4621                                     struct ethtool_cmd *ecmd)
4622 {
4623         struct rocker_port *rocker_port = netdev_priv(dev);
4624
4625         return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4626 }
4627
4628 static int rocker_port_set_settings(struct net_device *dev,
4629                                     struct ethtool_cmd *ecmd)
4630 {
4631         struct rocker_port *rocker_port = netdev_priv(dev);
4632
4633         return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4634 }
4635
4636 static void rocker_port_get_drvinfo(struct net_device *dev,
4637                                     struct ethtool_drvinfo *drvinfo)
4638 {
4639         strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4640         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4641 }
4642
4643 static struct rocker_port_stats {
4644         char str[ETH_GSTRING_LEN];
4645         int type;
4646 } rocker_port_stats[] = {
4647         { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
4648         { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
4649         { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4650         { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
4651
4652         { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
4653         { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
4654         { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4655         { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
4656 };
4657
4658 #define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
4659
4660 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4661                                     u8 *data)
4662 {
4663         u8 *p = data;
4664         int i;
4665
4666         switch (stringset) {
4667         case ETH_SS_STATS:
4668                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4669                         memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4670                         p += ETH_GSTRING_LEN;
4671                 }
4672                 break;
4673         }
4674 }
4675
4676 static int
4677 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4678                                struct rocker_desc_info *desc_info,
4679                                void *priv)
4680 {
4681         struct rocker_tlv *cmd_stats;
4682
4683         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4684                                ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4685                 return -EMSGSIZE;
4686
4687         cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4688         if (!cmd_stats)
4689                 return -EMSGSIZE;
4690
4691         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4692                                rocker_port->pport))
4693                 return -EMSGSIZE;
4694
4695         rocker_tlv_nest_end(desc_info, cmd_stats);
4696
4697         return 0;
4698 }
4699
4700 static int
4701 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4702                                        const struct rocker_desc_info *desc_info,
4703                                        void *priv)
4704 {
4705         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4706         const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4707         const struct rocker_tlv *pattr;
4708         u32 pport;
4709         u64 *data = priv;
4710         int i;
4711
4712         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4713
4714         if (!attrs[ROCKER_TLV_CMD_INFO])
4715                 return -EIO;
4716
4717         rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4718                                 attrs[ROCKER_TLV_CMD_INFO]);
4719
4720         if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4721                 return -EIO;
4722
4723         pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4724         if (pport != rocker_port->pport)
4725                 return -EIO;
4726
4727         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4728                 pattr = stats_attrs[rocker_port_stats[i].type];
4729                 if (!pattr)
4730                         continue;
4731
4732                 data[i] = rocker_tlv_get_u64(pattr);
4733         }
4734
4735         return 0;
4736 }
4737
4738 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4739                                              void *priv)
4740 {
4741         return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
4742                                rocker_cmd_get_port_stats_prep, NULL,
4743                                rocker_cmd_get_port_stats_ethtool_proc,
4744                                priv);
4745 }
4746
4747 static void rocker_port_get_stats(struct net_device *dev,
4748                                   struct ethtool_stats *stats, u64 *data)
4749 {
4750         struct rocker_port *rocker_port = netdev_priv(dev);
4751
4752         if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4753                 int i;
4754
4755                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4756                         data[i] = 0;
4757         }
4758 }
4759
4760 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4761 {
4762         switch (sset) {
4763         case ETH_SS_STATS:
4764                 return ROCKER_PORT_STATS_LEN;
4765         default:
4766                 return -EOPNOTSUPP;
4767         }
4768 }
4769
4770 static const struct ethtool_ops rocker_port_ethtool_ops = {
4771         .get_settings           = rocker_port_get_settings,
4772         .set_settings           = rocker_port_set_settings,
4773         .get_drvinfo            = rocker_port_get_drvinfo,
4774         .get_link               = ethtool_op_get_link,
4775         .get_strings            = rocker_port_get_strings,
4776         .get_ethtool_stats      = rocker_port_get_stats,
4777         .get_sset_count         = rocker_port_get_sset_count,
4778 };
4779
4780 /*****************
4781  * NAPI interface
4782  *****************/
4783
4784 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4785 {
4786         return container_of(napi, struct rocker_port, napi_tx);
4787 }
4788
4789 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4790 {
4791         struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4792         const struct rocker *rocker = rocker_port->rocker;
4793         const struct rocker_desc_info *desc_info;
4794         u32 credits = 0;
4795         int err;
4796
4797         /* Cleanup tx descriptors */
4798         while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4799                 struct sk_buff *skb;
4800
4801                 err = rocker_desc_err(desc_info);
4802                 if (err && net_ratelimit())
4803                         netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4804                                    err);
4805                 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4806
4807                 skb = rocker_desc_cookie_ptr_get(desc_info);
4808                 if (err == 0) {
4809                         rocker_port->dev->stats.tx_packets++;
4810                         rocker_port->dev->stats.tx_bytes += skb->len;
4811                 } else {
4812                         rocker_port->dev->stats.tx_errors++;
4813                 }
4814
4815                 dev_kfree_skb_any(skb);
4816                 credits++;
4817         }
4818
4819         if (credits && netif_queue_stopped(rocker_port->dev))
4820                 netif_wake_queue(rocker_port->dev);
4821
4822         napi_complete(napi);
4823         rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4824
4825         return 0;
4826 }
4827
4828 static int rocker_port_rx_proc(const struct rocker *rocker,
4829                                const struct rocker_port *rocker_port,
4830                                struct rocker_desc_info *desc_info)
4831 {
4832         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4833         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4834         size_t rx_len;
4835         u16 rx_flags = 0;
4836
4837         if (!skb)
4838                 return -ENOENT;
4839
4840         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4841         if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4842                 return -EINVAL;
4843         if (attrs[ROCKER_TLV_RX_FLAGS])
4844                 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
4845
4846         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4847
4848         rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4849         skb_put(skb, rx_len);
4850         skb->protocol = eth_type_trans(skb, rocker_port->dev);
4851
4852         if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4853                 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4854
4855         rocker_port->dev->stats.rx_packets++;
4856         rocker_port->dev->stats.rx_bytes += skb->len;
4857
4858         netif_receive_skb(skb);
4859
4860         return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4861 }
4862
4863 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4864 {
4865         return container_of(napi, struct rocker_port, napi_rx);
4866 }
4867
4868 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4869 {
4870         struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4871         const struct rocker *rocker = rocker_port->rocker;
4872         struct rocker_desc_info *desc_info;
4873         u32 credits = 0;
4874         int err;
4875
4876         /* Process rx descriptors */
4877         while (credits < budget &&
4878                (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4879                 err = rocker_desc_err(desc_info);
4880                 if (err) {
4881                         if (net_ratelimit())
4882                                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4883                                            err);
4884                 } else {
4885                         err = rocker_port_rx_proc(rocker, rocker_port,
4886                                                   desc_info);
4887                         if (err && net_ratelimit())
4888                                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4889                                            err);
4890                 }
4891                 if (err)
4892                         rocker_port->dev->stats.rx_errors++;
4893
4894                 rocker_desc_gen_clear(desc_info);
4895                 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4896                 credits++;
4897         }
4898
4899         if (credits < budget)
4900                 napi_complete(napi);
4901
4902         rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4903
4904         return credits;
4905 }
4906
4907 /*****************
4908  * PCI driver ops
4909  *****************/
4910
4911 static void rocker_carrier_init(const struct rocker_port *rocker_port)
4912 {
4913         const struct rocker *rocker = rocker_port->rocker;
4914         u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4915         bool link_up;
4916
4917         link_up = link_status & (1 << rocker_port->pport);
4918         if (link_up)
4919                 netif_carrier_on(rocker_port->dev);
4920         else
4921                 netif_carrier_off(rocker_port->dev);
4922 }
4923
4924 static void rocker_remove_ports(const struct rocker *rocker)
4925 {
4926         struct rocker_port *rocker_port;
4927         int i;
4928
4929         for (i = 0; i < rocker->port_count; i++) {
4930                 rocker_port = rocker->ports[i];
4931                 if (!rocker_port)
4932                         continue;
4933                 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4934                                    ROCKER_OP_FLAG_REMOVE);
4935                 unregister_netdev(rocker_port->dev);
4936                 free_netdev(rocker_port->dev);
4937         }
4938         kfree(rocker->ports);
4939 }
4940
4941 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4942 {
4943         const struct rocker *rocker = rocker_port->rocker;
4944         const struct pci_dev *pdev = rocker->pdev;
4945         int err;
4946
4947         err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4948                                                    rocker_port->dev->dev_addr);
4949         if (err) {
4950                 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4951                 eth_hw_addr_random(rocker_port->dev);
4952         }
4953 }
4954
4955 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4956 {
4957         const struct pci_dev *pdev = rocker->pdev;
4958         struct rocker_port *rocker_port;
4959         struct net_device *dev;
4960         u16 untagged_vid = 0;
4961         int err;
4962
4963         dev = alloc_etherdev(sizeof(struct rocker_port));
4964         if (!dev)
4965                 return -ENOMEM;
4966         rocker_port = netdev_priv(dev);
4967         rocker_port->dev = dev;
4968         rocker_port->rocker = rocker;
4969         rocker_port->port_number = port_number;
4970         rocker_port->pport = port_number + 1;
4971         rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
4972         INIT_LIST_HEAD(&rocker_port->trans_mem);
4973
4974         rocker_port_dev_addr_init(rocker_port);
4975         dev->netdev_ops = &rocker_port_netdev_ops;
4976         dev->ethtool_ops = &rocker_port_ethtool_ops;
4977         dev->switchdev_ops = &rocker_port_switchdev_ops;
4978         netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4979                        NAPI_POLL_WEIGHT);
4980         netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4981                        NAPI_POLL_WEIGHT);
4982         rocker_carrier_init(rocker_port);
4983
4984         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
4985
4986         err = register_netdev(dev);
4987         if (err) {
4988                 dev_err(&pdev->dev, "register_netdev failed\n");
4989                 goto err_register_netdev;
4990         }
4991         rocker->ports[port_number] = rocker_port;
4992
4993         switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
4994
4995         rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
4996
4997         err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
4998         if (err) {
4999                 netdev_err(rocker_port->dev, "install ig port table failed\n");
5000                 goto err_port_ig_tbl;
5001         }
5002
5003         rocker_port->internal_vlan_id =
5004                 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5005
5006         err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5007                                    untagged_vid, 0);
5008         if (err) {
5009                 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5010                 goto err_untagged_vlan;
5011         }
5012
5013         return 0;
5014
5015 err_untagged_vlan:
5016         rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
5017                            ROCKER_OP_FLAG_REMOVE);
5018 err_port_ig_tbl:
5019         rocker->ports[port_number] = NULL;
5020         unregister_netdev(dev);
5021 err_register_netdev:
5022         free_netdev(dev);
5023         return err;
5024 }
5025
5026 static int rocker_probe_ports(struct rocker *rocker)
5027 {
5028         int i;
5029         size_t alloc_size;
5030         int err;
5031
5032         alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5033         rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5034         if (!rocker->ports)
5035                 return -ENOMEM;
5036         for (i = 0; i < rocker->port_count; i++) {
5037                 err = rocker_probe_port(rocker, i);
5038                 if (err)
5039                         goto remove_ports;
5040         }
5041         return 0;
5042
5043 remove_ports:
5044         rocker_remove_ports(rocker);
5045         return err;
5046 }
5047
5048 static int rocker_msix_init(struct rocker *rocker)
5049 {
5050         struct pci_dev *pdev = rocker->pdev;
5051         int msix_entries;
5052         int i;
5053         int err;
5054
5055         msix_entries = pci_msix_vec_count(pdev);
5056         if (msix_entries < 0)
5057                 return msix_entries;
5058
5059         if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5060                 return -EINVAL;
5061
5062         rocker->msix_entries = kmalloc_array(msix_entries,
5063                                              sizeof(struct msix_entry),
5064                                              GFP_KERNEL);
5065         if (!rocker->msix_entries)
5066                 return -ENOMEM;
5067
5068         for (i = 0; i < msix_entries; i++)
5069                 rocker->msix_entries[i].entry = i;
5070
5071         err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5072         if (err < 0)
5073                 goto err_enable_msix;
5074
5075         return 0;
5076
5077 err_enable_msix:
5078         kfree(rocker->msix_entries);
5079         return err;
5080 }
5081
5082 static void rocker_msix_fini(const struct rocker *rocker)
5083 {
5084         pci_disable_msix(rocker->pdev);
5085         kfree(rocker->msix_entries);
5086 }
5087
5088 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5089 {
5090         struct rocker *rocker;
5091         int err;
5092
5093         rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5094         if (!rocker)
5095                 return -ENOMEM;
5096
5097         err = pci_enable_device(pdev);
5098         if (err) {
5099                 dev_err(&pdev->dev, "pci_enable_device failed\n");
5100                 goto err_pci_enable_device;
5101         }
5102
5103         err = pci_request_regions(pdev, rocker_driver_name);
5104         if (err) {
5105                 dev_err(&pdev->dev, "pci_request_regions failed\n");
5106                 goto err_pci_request_regions;
5107         }
5108
5109         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5110         if (!err) {
5111                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5112                 if (err) {
5113                         dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5114                         goto err_pci_set_dma_mask;
5115                 }
5116         } else {
5117                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5118                 if (err) {
5119                         dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5120                         goto err_pci_set_dma_mask;
5121                 }
5122         }
5123
5124         if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5125                 dev_err(&pdev->dev, "invalid PCI region size\n");
5126                 err = -EINVAL;
5127                 goto err_pci_resource_len_check;
5128         }
5129
5130         rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5131                                   pci_resource_len(pdev, 0));
5132         if (!rocker->hw_addr) {
5133                 dev_err(&pdev->dev, "ioremap failed\n");
5134                 err = -EIO;
5135                 goto err_ioremap;
5136         }
5137         pci_set_master(pdev);
5138
5139         rocker->pdev = pdev;
5140         pci_set_drvdata(pdev, rocker);
5141
5142         rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5143
5144         err = rocker_msix_init(rocker);
5145         if (err) {
5146                 dev_err(&pdev->dev, "MSI-X init failed\n");
5147                 goto err_msix_init;
5148         }
5149
5150         err = rocker_basic_hw_test(rocker);
5151         if (err) {
5152                 dev_err(&pdev->dev, "basic hw test failed\n");
5153                 goto err_basic_hw_test;
5154         }
5155
5156         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5157
5158         err = rocker_dma_rings_init(rocker);
5159         if (err)
5160                 goto err_dma_rings_init;
5161
5162         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5163                           rocker_cmd_irq_handler, 0,
5164                           rocker_driver_name, rocker);
5165         if (err) {
5166                 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5167                 goto err_request_cmd_irq;
5168         }
5169
5170         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5171                           rocker_event_irq_handler, 0,
5172                           rocker_driver_name, rocker);
5173         if (err) {
5174                 dev_err(&pdev->dev, "cannot assign event irq\n");
5175                 goto err_request_event_irq;
5176         }
5177
5178         rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5179
5180         err = rocker_init_tbls(rocker);
5181         if (err) {
5182                 dev_err(&pdev->dev, "cannot init rocker tables\n");
5183                 goto err_init_tbls;
5184         }
5185
5186         err = rocker_probe_ports(rocker);
5187         if (err) {
5188                 dev_err(&pdev->dev, "failed to probe ports\n");
5189                 goto err_probe_ports;
5190         }
5191
5192         dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5193                  (int)sizeof(rocker->hw.id), &rocker->hw.id);
5194
5195         return 0;
5196
5197 err_probe_ports:
5198         rocker_free_tbls(rocker);
5199 err_init_tbls:
5200         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5201 err_request_event_irq:
5202         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5203 err_request_cmd_irq:
5204         rocker_dma_rings_fini(rocker);
5205 err_dma_rings_init:
5206 err_basic_hw_test:
5207         rocker_msix_fini(rocker);
5208 err_msix_init:
5209         iounmap(rocker->hw_addr);
5210 err_ioremap:
5211 err_pci_resource_len_check:
5212 err_pci_set_dma_mask:
5213         pci_release_regions(pdev);
5214 err_pci_request_regions:
5215         pci_disable_device(pdev);
5216 err_pci_enable_device:
5217         kfree(rocker);
5218         return err;
5219 }
5220
5221 static void rocker_remove(struct pci_dev *pdev)
5222 {
5223         struct rocker *rocker = pci_get_drvdata(pdev);
5224
5225         rocker_free_tbls(rocker);
5226         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5227         rocker_remove_ports(rocker);
5228         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5229         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5230         rocker_dma_rings_fini(rocker);
5231         rocker_msix_fini(rocker);
5232         iounmap(rocker->hw_addr);
5233         pci_release_regions(rocker->pdev);
5234         pci_disable_device(rocker->pdev);
5235         kfree(rocker);
5236 }
5237
5238 static struct pci_driver rocker_pci_driver = {
5239         .name           = rocker_driver_name,
5240         .id_table       = rocker_pci_id_table,
5241         .probe          = rocker_probe,
5242         .remove         = rocker_remove,
5243 };
5244
5245 /************************************
5246  * Net device notifier event handler
5247  ************************************/
5248
5249 static bool rocker_port_dev_check(const struct net_device *dev)
5250 {
5251         return dev->netdev_ops == &rocker_port_netdev_ops;
5252 }
5253
5254 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5255                                    struct net_device *bridge)
5256 {
5257         u16 untagged_vid = 0;
5258         int err;
5259
5260         /* Port is joining bridge, so the internal VLAN for the
5261          * port is going to change to the bridge internal VLAN.
5262          * Let's remove untagged VLAN (vid=0) from port and
5263          * re-add once internal VLAN has changed.
5264          */
5265
5266         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5267         if (err)
5268                 return err;
5269
5270         rocker_port_internal_vlan_id_put(rocker_port,
5271                                          rocker_port->dev->ifindex);
5272         rocker_port->internal_vlan_id =
5273                 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5274
5275         rocker_port->bridge_dev = bridge;
5276         switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5277
5278         return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5279                                     untagged_vid, 0);
5280 }
5281
5282 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5283 {
5284         u16 untagged_vid = 0;
5285         int err;
5286
5287         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5288         if (err)
5289                 return err;
5290
5291         rocker_port_internal_vlan_id_put(rocker_port,
5292                                          rocker_port->bridge_dev->ifindex);
5293         rocker_port->internal_vlan_id =
5294                 rocker_port_internal_vlan_id_get(rocker_port,
5295                                                  rocker_port->dev->ifindex);
5296
5297         switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5298                                     false);
5299         rocker_port->bridge_dev = NULL;
5300
5301         err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5302                                    untagged_vid, 0);
5303         if (err)
5304                 return err;
5305
5306         if (rocker_port->dev->flags & IFF_UP)
5307                 err = rocker_port_fwd_enable(rocker_port,
5308                                              SWITCHDEV_TRANS_NONE, 0);
5309
5310         return err;
5311 }
5312
5313
5314 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5315                                    struct net_device *master)
5316 {
5317         int err;
5318
5319         rocker_port->bridge_dev = master;
5320
5321         err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5322         if (err)
5323                 return err;
5324         err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5325
5326         return err;
5327 }
5328
5329 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5330                                      struct net_device *master)
5331 {
5332         int err = 0;
5333
5334         if (netif_is_bridge_master(master))
5335                 err = rocker_port_bridge_join(rocker_port, master);
5336         else if (netif_is_ovs_master(master))
5337                 err = rocker_port_ovs_changed(rocker_port, master);
5338         return err;
5339 }
5340
5341 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5342 {
5343         int err = 0;
5344
5345         if (rocker_port_is_bridged(rocker_port))
5346                 err = rocker_port_bridge_leave(rocker_port);
5347         else if (rocker_port_is_ovsed(rocker_port))
5348                 err = rocker_port_ovs_changed(rocker_port, NULL);
5349         return err;
5350 }
5351
5352 static int rocker_netdevice_event(struct notifier_block *unused,
5353                                   unsigned long event, void *ptr)
5354 {
5355         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5356         struct netdev_notifier_changeupper_info *info;
5357         struct rocker_port *rocker_port;
5358         int err;
5359
5360         if (!rocker_port_dev_check(dev))
5361                 return NOTIFY_DONE;
5362
5363         switch (event) {
5364         case NETDEV_CHANGEUPPER:
5365                 info = ptr;
5366                 if (!info->master)
5367                         goto out;
5368                 rocker_port = netdev_priv(dev);
5369                 if (info->linking) {
5370                         err = rocker_port_master_linked(rocker_port,
5371                                                         info->upper_dev);
5372                         if (err)
5373                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5374                                             err);
5375                 } else {
5376                         err = rocker_port_master_unlinked(rocker_port);
5377                         if (err)
5378                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5379                                             err);
5380                 }
5381                 break;
5382         }
5383 out:
5384         return NOTIFY_DONE;
5385 }
5386
5387 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5388         .notifier_call = rocker_netdevice_event,
5389 };
5390
5391 /************************************
5392  * Net event notifier event handler
5393  ************************************/
5394
5395 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5396 {
5397         struct rocker_port *rocker_port = netdev_priv(dev);
5398         int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5399                     ROCKER_OP_FLAG_NOWAIT;
5400         __be32 ip_addr = *(__be32 *)n->primary_key;
5401
5402         return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
5403                                       flags, ip_addr, n->ha);
5404 }
5405
5406 static int rocker_netevent_event(struct notifier_block *unused,
5407                                  unsigned long event, void *ptr)
5408 {
5409         struct net_device *dev;
5410         struct neighbour *n = ptr;
5411         int err;
5412
5413         switch (event) {
5414         case NETEVENT_NEIGH_UPDATE:
5415                 if (n->tbl != &arp_tbl)
5416                         return NOTIFY_DONE;
5417                 dev = n->dev;
5418                 if (!rocker_port_dev_check(dev))
5419                         return NOTIFY_DONE;
5420                 err = rocker_neigh_update(dev, n);
5421                 if (err)
5422                         netdev_warn(dev,
5423                                     "failed to handle neigh update (err %d)\n",
5424                                     err);
5425                 break;
5426         }
5427
5428         return NOTIFY_DONE;
5429 }
5430
5431 static struct notifier_block rocker_netevent_nb __read_mostly = {
5432         .notifier_call = rocker_netevent_event,
5433 };
5434
5435 /***********************
5436  * Module init and exit
5437  ***********************/
5438
5439 static int __init rocker_module_init(void)
5440 {
5441         int err;
5442
5443         register_netdevice_notifier(&rocker_netdevice_nb);
5444         register_netevent_notifier(&rocker_netevent_nb);
5445         err = pci_register_driver(&rocker_pci_driver);
5446         if (err)
5447                 goto err_pci_register_driver;
5448         return 0;
5449
5450 err_pci_register_driver:
5451         unregister_netevent_notifier(&rocker_netevent_nb);
5452         unregister_netdevice_notifier(&rocker_netdevice_nb);
5453         return err;
5454 }
5455
5456 static void __exit rocker_module_exit(void)
5457 {
5458         unregister_netevent_notifier(&rocker_netevent_nb);
5459         unregister_netdevice_notifier(&rocker_netdevice_nb);
5460         pci_unregister_driver(&rocker_pci_driver);
5461 }
5462
5463 module_init(rocker_module_init);
5464 module_exit(rocker_module_exit);
5465
5466 MODULE_LICENSE("GPL v2");
5467 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5468 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5469 MODULE_DESCRIPTION("Rocker switch device driver");
5470 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);