]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qede/qede_filter.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qede / qede_filter.c
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <net/udp_tunnel.h>
35 #include <linux/bitops.h>
36 #include <linux/vmalloc.h>
37
38 #include <linux/qed/qed_if.h>
39 #include "qede.h"
40
41 #ifdef CONFIG_RFS_ACCEL
42 struct qede_arfs_tuple {
43         union {
44                 __be32 src_ipv4;
45                 struct in6_addr src_ipv6;
46         };
47         union {
48                 __be32 dst_ipv4;
49                 struct in6_addr dst_ipv6;
50         };
51         __be16  src_port;
52         __be16  dst_port;
53         __be16  eth_proto;
54         u8      ip_proto;
55 };
56
57 struct qede_arfs_fltr_node {
58 #define QEDE_FLTR_VALID  0
59         unsigned long state;
60
61         /* pointer to aRFS packet buffer */
62         void *data;
63
64         /* dma map address of aRFS packet buffer */
65         dma_addr_t mapping;
66
67         /* length of aRFS packet buffer */
68         int buf_len;
69
70         /* tuples to hold from aRFS packet buffer */
71         struct qede_arfs_tuple tuple;
72
73         u32 flow_id;
74         u16 sw_id;
75         u16 rxq_id;
76         u16 next_rxq_id;
77         bool filter_op;
78         bool used;
79         struct hlist_node node;
80 };
81
82 struct qede_arfs {
83 #define QEDE_ARFS_POLL_COUNT    100
84 #define QEDE_RFS_FLW_BITSHIFT   (4)
85 #define QEDE_RFS_FLW_MASK       ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
86         struct hlist_head       arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
87
88         /* lock for filter list access */
89         spinlock_t              arfs_list_lock;
90         unsigned long           *arfs_fltr_bmap;
91         int                     filter_count;
92         bool                    enable;
93 };
94
95 static void qede_configure_arfs_fltr(struct qede_dev *edev,
96                                      struct qede_arfs_fltr_node *n,
97                                      u16 rxq_id, bool add_fltr)
98 {
99         const struct qed_eth_ops *op = edev->ops;
100
101         if (n->used)
102                 return;
103
104         DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
105                    "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
106                    add_fltr ? "Adding" : "Deleting",
107                    n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
108                    ntohs(n->tuple.dst_port), rxq_id);
109
110         n->used = true;
111         n->filter_op = add_fltr;
112         op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
113                                  rxq_id, add_fltr);
114 }
115
116 static void
117 qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
118 {
119         kfree(fltr->data);
120         clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
121         kfree(fltr);
122 }
123
124 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
125 {
126         struct qede_arfs_fltr_node *fltr = filter;
127         struct qede_dev *edev = dev;
128
129         if (fw_rc) {
130                 DP_NOTICE(edev,
131                           "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
132                           fw_rc, fltr->flow_id, fltr->sw_id,
133                           ntohs(fltr->tuple.src_port),
134                           ntohs(fltr->tuple.dst_port), fltr->rxq_id);
135
136                 spin_lock_bh(&edev->arfs->arfs_list_lock);
137
138                 fltr->used = false;
139                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
140
141                 spin_unlock_bh(&edev->arfs->arfs_list_lock);
142                 return;
143         }
144
145         spin_lock_bh(&edev->arfs->arfs_list_lock);
146
147         fltr->used = false;
148
149         if (fltr->filter_op) {
150                 set_bit(QEDE_FLTR_VALID, &fltr->state);
151                 if (fltr->rxq_id != fltr->next_rxq_id)
152                         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
153                                                  false);
154         } else {
155                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
156                 if (fltr->rxq_id != fltr->next_rxq_id) {
157                         fltr->rxq_id = fltr->next_rxq_id;
158                         qede_configure_arfs_fltr(edev, fltr,
159                                                  fltr->rxq_id, true);
160                 }
161         }
162
163         spin_unlock_bh(&edev->arfs->arfs_list_lock);
164 }
165
166 /* Should be called while qede_lock is held */
167 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
168 {
169         int i;
170
171         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
172                 struct hlist_node *temp;
173                 struct hlist_head *head;
174                 struct qede_arfs_fltr_node *fltr;
175
176                 head = &edev->arfs->arfs_hl_head[i];
177
178                 hlist_for_each_entry_safe(fltr, temp, head, node) {
179                         bool del = false;
180
181                         if (edev->state != QEDE_STATE_OPEN)
182                                 del = true;
183
184                         spin_lock_bh(&edev->arfs->arfs_list_lock);
185
186                         if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
187                              !fltr->used) || free_fltr) {
188                                 hlist_del(&fltr->node);
189                                 dma_unmap_single(&edev->pdev->dev,
190                                                  fltr->mapping,
191                                                  fltr->buf_len, DMA_TO_DEVICE);
192                                 qede_free_arfs_filter(edev, fltr);
193                                 edev->arfs->filter_count--;
194                         } else {
195                                 if ((rps_may_expire_flow(edev->ndev,
196                                                          fltr->rxq_id,
197                                                          fltr->flow_id,
198                                                          fltr->sw_id) || del) &&
199                                                          !free_fltr)
200                                         qede_configure_arfs_fltr(edev, fltr,
201                                                                  fltr->rxq_id,
202                                                                  false);
203                         }
204
205                         spin_unlock_bh(&edev->arfs->arfs_list_lock);
206                 }
207         }
208
209         spin_lock_bh(&edev->arfs->arfs_list_lock);
210
211         if (!edev->arfs->filter_count) {
212                 if (edev->arfs->enable) {
213                         edev->arfs->enable = false;
214                         edev->ops->configure_arfs_searcher(edev->cdev, false);
215                 }
216         } else {
217                 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
218                 schedule_delayed_work(&edev->sp_task,
219                                       QEDE_SP_TASK_POLL_DELAY);
220         }
221
222         spin_unlock_bh(&edev->arfs->arfs_list_lock);
223 }
224
225 /* This function waits until all aRFS filters get deleted and freed.
226  * On timeout it frees all filters forcefully.
227  */
228 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
229 {
230         int count = QEDE_ARFS_POLL_COUNT;
231
232         while (count) {
233                 qede_process_arfs_filters(edev, false);
234
235                 if (!edev->arfs->filter_count)
236                         break;
237
238                 msleep(100);
239                 count--;
240         }
241
242         if (!count) {
243                 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
244
245                 /* Something is terribly wrong, free forcefully */
246                 qede_process_arfs_filters(edev, true);
247         }
248 }
249
250 int qede_alloc_arfs(struct qede_dev *edev)
251 {
252         int i;
253
254         edev->arfs = vzalloc(sizeof(*edev->arfs));
255         if (!edev->arfs)
256                 return -ENOMEM;
257
258         spin_lock_init(&edev->arfs->arfs_list_lock);
259
260         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
261                 INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
262
263         edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
264         if (!edev->ndev->rx_cpu_rmap) {
265                 vfree(edev->arfs);
266                 edev->arfs = NULL;
267                 return -ENOMEM;
268         }
269
270         edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
271                                              sizeof(long));
272         if (!edev->arfs->arfs_fltr_bmap) {
273                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
274                 edev->ndev->rx_cpu_rmap = NULL;
275                 vfree(edev->arfs);
276                 edev->arfs = NULL;
277                 return -ENOMEM;
278         }
279
280         return 0;
281 }
282
283 void qede_free_arfs(struct qede_dev *edev)
284 {
285         if (!edev->arfs)
286                 return;
287
288         if (edev->ndev->rx_cpu_rmap)
289                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
290
291         edev->ndev->rx_cpu_rmap = NULL;
292         vfree(edev->arfs->arfs_fltr_bmap);
293         edev->arfs->arfs_fltr_bmap = NULL;
294         vfree(edev->arfs);
295         edev->arfs = NULL;
296 }
297
298 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
299                                  const struct sk_buff *skb)
300 {
301         if (skb->protocol == htons(ETH_P_IP)) {
302                 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
303                     tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
304                         return true;
305                 else
306                         return false;
307         } else {
308                 struct in6_addr *src = &tpos->tuple.src_ipv6;
309                 u8 size = sizeof(struct in6_addr);
310
311                 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
312                     !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
313                         return true;
314                 else
315                         return false;
316         }
317 }
318
319 static struct qede_arfs_fltr_node *
320 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
321                           __be16 src_port, __be16 dst_port, u8 ip_proto)
322 {
323         struct qede_arfs_fltr_node *tpos;
324
325         hlist_for_each_entry(tpos, h, node)
326                 if (tpos->tuple.ip_proto == ip_proto &&
327                     tpos->tuple.eth_proto == skb->protocol &&
328                     qede_compare_ip_addr(tpos, skb) &&
329                     tpos->tuple.src_port == src_port &&
330                     tpos->tuple.dst_port == dst_port)
331                         return tpos;
332
333         return NULL;
334 }
335
336 static struct qede_arfs_fltr_node *
337 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
338 {
339         struct qede_arfs_fltr_node *n;
340         int bit_id;
341
342         bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
343                                      QEDE_RFS_MAX_FLTR);
344
345         if (bit_id >= QEDE_RFS_MAX_FLTR)
346                 return NULL;
347
348         n = kzalloc(sizeof(*n), GFP_ATOMIC);
349         if (!n)
350                 return NULL;
351
352         n->data = kzalloc(min_hlen, GFP_ATOMIC);
353         if (!n->data) {
354                 kfree(n);
355                 return NULL;
356         }
357
358         n->sw_id = (u16)bit_id;
359         set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
360         return n;
361 }
362
363 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
364                        u16 rxq_index, u32 flow_id)
365 {
366         struct qede_dev *edev = netdev_priv(dev);
367         struct qede_arfs_fltr_node *n;
368         int min_hlen, rc, tp_offset;
369         struct ethhdr *eth;
370         __be16 *ports;
371         u16 tbl_idx;
372         u8 ip_proto;
373
374         if (skb->encapsulation)
375                 return -EPROTONOSUPPORT;
376
377         if (skb->protocol != htons(ETH_P_IP) &&
378             skb->protocol != htons(ETH_P_IPV6))
379                 return -EPROTONOSUPPORT;
380
381         if (skb->protocol == htons(ETH_P_IP)) {
382                 ip_proto = ip_hdr(skb)->protocol;
383                 tp_offset = sizeof(struct iphdr);
384         } else {
385                 ip_proto = ipv6_hdr(skb)->nexthdr;
386                 tp_offset = sizeof(struct ipv6hdr);
387         }
388
389         if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
390                 return -EPROTONOSUPPORT;
391
392         ports = (__be16 *)(skb->data + tp_offset);
393         tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
394
395         spin_lock_bh(&edev->arfs->arfs_list_lock);
396
397         n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
398                                       skb, ports[0], ports[1], ip_proto);
399
400         if (n) {
401                 /* Filter match */
402                 n->next_rxq_id = rxq_index;
403
404                 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
405                         if (n->rxq_id != rxq_index)
406                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
407                                                          false);
408                 } else {
409                         if (!n->used) {
410                                 n->rxq_id = rxq_index;
411                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
412                                                          true);
413                         }
414                 }
415
416                 rc = n->sw_id;
417                 goto ret_unlock;
418         }
419
420         min_hlen = ETH_HLEN + skb_headlen(skb);
421
422         n = qede_alloc_filter(edev, min_hlen);
423         if (!n) {
424                 rc = -ENOMEM;
425                 goto ret_unlock;
426         }
427
428         n->buf_len = min_hlen;
429         n->rxq_id = rxq_index;
430         n->next_rxq_id = rxq_index;
431         n->tuple.src_port = ports[0];
432         n->tuple.dst_port = ports[1];
433         n->flow_id = flow_id;
434
435         if (skb->protocol == htons(ETH_P_IP)) {
436                 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
437                 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
438         } else {
439                 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
440                        sizeof(struct in6_addr));
441                 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
442                        sizeof(struct in6_addr));
443         }
444
445         eth = (struct ethhdr *)n->data;
446         eth->h_proto = skb->protocol;
447         n->tuple.eth_proto = skb->protocol;
448         n->tuple.ip_proto = ip_proto;
449         memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
450
451         n->mapping = dma_map_single(&edev->pdev->dev, n->data,
452                                     n->buf_len, DMA_TO_DEVICE);
453         if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
454                 DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
455                 qede_free_arfs_filter(edev, n);
456                 rc = -ENOMEM;
457                 goto ret_unlock;
458         }
459
460         INIT_HLIST_NODE(&n->node);
461         hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
462         edev->arfs->filter_count++;
463
464         if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
465                 edev->ops->configure_arfs_searcher(edev->cdev, true);
466                 edev->arfs->enable = true;
467         }
468
469         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
470
471         spin_unlock_bh(&edev->arfs->arfs_list_lock);
472
473         set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
474         schedule_delayed_work(&edev->sp_task, 0);
475         return n->sw_id;
476
477 ret_unlock:
478         spin_unlock_bh(&edev->arfs->arfs_list_lock);
479         return rc;
480 }
481 #endif
482
483 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
484 {
485         struct qede_dev *edev = dev;
486
487         if (edev->vxlan_dst_port != vxlan_port)
488                 edev->vxlan_dst_port = 0;
489
490         if (edev->geneve_dst_port != geneve_port)
491                 edev->geneve_dst_port = 0;
492 }
493
494 void qede_force_mac(void *dev, u8 *mac, bool forced)
495 {
496         struct qede_dev *edev = dev;
497
498         /* MAC hints take effect only if we haven't set one already */
499         if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
500                 return;
501
502         ether_addr_copy(edev->ndev->dev_addr, mac);
503         ether_addr_copy(edev->primary_mac, mac);
504 }
505
506 void qede_fill_rss_params(struct qede_dev *edev,
507                           struct qed_update_vport_rss_params *rss, u8 *update)
508 {
509         bool need_reset = false;
510         int i;
511
512         if (QEDE_RSS_COUNT(edev) <= 1) {
513                 memset(rss, 0, sizeof(*rss));
514                 *update = 0;
515                 return;
516         }
517
518         /* Need to validate current RSS config uses valid entries */
519         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
520                 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
521                         need_reset = true;
522                         break;
523                 }
524         }
525
526         if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
527                 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
528                         u16 indir_val, val;
529
530                         val = QEDE_RSS_COUNT(edev);
531                         indir_val = ethtool_rxfh_indir_default(i, val);
532                         edev->rss_ind_table[i] = indir_val;
533                 }
534                 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
535         }
536
537         /* Now that we have the queue-indirection, prepare the handles */
538         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
539                 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
540
541                 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
542         }
543
544         if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
545                 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
546                 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
547         }
548         memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
549
550         if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
551                 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
552                     QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
553                 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
554         }
555         rss->rss_caps = edev->rss_caps;
556
557         *update = 1;
558 }
559
560 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
561                                  enum qed_filter_xcast_params_type opcode,
562                                  unsigned char mac[ETH_ALEN])
563 {
564         struct qed_filter_params filter_cmd;
565
566         memset(&filter_cmd, 0, sizeof(filter_cmd));
567         filter_cmd.type = QED_FILTER_TYPE_UCAST;
568         filter_cmd.filter.ucast.type = opcode;
569         filter_cmd.filter.ucast.mac_valid = 1;
570         ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
571
572         return edev->ops->filter_config(edev->cdev, &filter_cmd);
573 }
574
575 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
576                                   enum qed_filter_xcast_params_type opcode,
577                                   u16 vid)
578 {
579         struct qed_filter_params filter_cmd;
580
581         memset(&filter_cmd, 0, sizeof(filter_cmd));
582         filter_cmd.type = QED_FILTER_TYPE_UCAST;
583         filter_cmd.filter.ucast.type = opcode;
584         filter_cmd.filter.ucast.vlan_valid = 1;
585         filter_cmd.filter.ucast.vlan = vid;
586
587         return edev->ops->filter_config(edev->cdev, &filter_cmd);
588 }
589
590 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
591 {
592         struct qed_update_vport_params *params;
593         int rc;
594
595         /* Proceed only if action actually needs to be performed */
596         if (edev->accept_any_vlan == action)
597                 return 0;
598
599         params = vzalloc(sizeof(*params));
600         if (!params)
601                 return -ENOMEM;
602
603         params->vport_id = 0;
604         params->accept_any_vlan = action;
605         params->update_accept_any_vlan_flg = 1;
606
607         rc = edev->ops->vport_update(edev->cdev, params);
608         if (rc) {
609                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
610                        action ? "enable" : "disable");
611         } else {
612                 DP_INFO(edev, "%s accept-any-vlan\n",
613                         action ? "enabled" : "disabled");
614                 edev->accept_any_vlan = action;
615         }
616
617         vfree(params);
618         return 0;
619 }
620
621 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
622 {
623         struct qede_dev *edev = netdev_priv(dev);
624         struct qede_vlan *vlan, *tmp;
625         int rc = 0;
626
627         DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
628
629         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
630         if (!vlan) {
631                 DP_INFO(edev, "Failed to allocate struct for vlan\n");
632                 return -ENOMEM;
633         }
634         INIT_LIST_HEAD(&vlan->list);
635         vlan->vid = vid;
636         vlan->configured = false;
637
638         /* Verify vlan isn't already configured */
639         list_for_each_entry(tmp, &edev->vlan_list, list) {
640                 if (tmp->vid == vlan->vid) {
641                         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
642                                    "vlan already configured\n");
643                         kfree(vlan);
644                         return -EEXIST;
645                 }
646         }
647
648         /* If interface is down, cache this VLAN ID and return */
649         __qede_lock(edev);
650         if (edev->state != QEDE_STATE_OPEN) {
651                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
652                            "Interface is down, VLAN %d will be configured when interface is up\n",
653                            vid);
654                 if (vid != 0)
655                         edev->non_configured_vlans++;
656                 list_add(&vlan->list, &edev->vlan_list);
657                 goto out;
658         }
659
660         /* Check for the filter limit.
661          * Note - vlan0 has a reserved filter and can be added without
662          * worrying about quota
663          */
664         if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
665             (vlan->vid == 0)) {
666                 rc = qede_set_ucast_rx_vlan(edev,
667                                             QED_FILTER_XCAST_TYPE_ADD,
668                                             vlan->vid);
669                 if (rc) {
670                         DP_ERR(edev, "Failed to configure VLAN %d\n",
671                                vlan->vid);
672                         kfree(vlan);
673                         goto out;
674                 }
675                 vlan->configured = true;
676
677                 /* vlan0 filter isn't consuming out of our quota */
678                 if (vlan->vid != 0)
679                         edev->configured_vlans++;
680         } else {
681                 /* Out of quota; Activate accept-any-VLAN mode */
682                 if (!edev->non_configured_vlans) {
683                         rc = qede_config_accept_any_vlan(edev, true);
684                         if (rc) {
685                                 kfree(vlan);
686                                 goto out;
687                         }
688                 }
689
690                 edev->non_configured_vlans++;
691         }
692
693         list_add(&vlan->list, &edev->vlan_list);
694
695 out:
696         __qede_unlock(edev);
697         return rc;
698 }
699
700 static void qede_del_vlan_from_list(struct qede_dev *edev,
701                                     struct qede_vlan *vlan)
702 {
703         /* vlan0 filter isn't consuming out of our quota */
704         if (vlan->vid != 0) {
705                 if (vlan->configured)
706                         edev->configured_vlans--;
707                 else
708                         edev->non_configured_vlans--;
709         }
710
711         list_del(&vlan->list);
712         kfree(vlan);
713 }
714
715 int qede_configure_vlan_filters(struct qede_dev *edev)
716 {
717         int rc = 0, real_rc = 0, accept_any_vlan = 0;
718         struct qed_dev_eth_info *dev_info;
719         struct qede_vlan *vlan = NULL;
720
721         if (list_empty(&edev->vlan_list))
722                 return 0;
723
724         dev_info = &edev->dev_info;
725
726         /* Configure non-configured vlans */
727         list_for_each_entry(vlan, &edev->vlan_list, list) {
728                 if (vlan->configured)
729                         continue;
730
731                 /* We have used all our credits, now enable accept_any_vlan */
732                 if ((vlan->vid != 0) &&
733                     (edev->configured_vlans == dev_info->num_vlan_filters)) {
734                         accept_any_vlan = 1;
735                         continue;
736                 }
737
738                 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
739
740                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
741                                             vlan->vid);
742                 if (rc) {
743                         DP_ERR(edev, "Failed to configure VLAN %u\n",
744                                vlan->vid);
745                         real_rc = rc;
746                         continue;
747                 }
748
749                 vlan->configured = true;
750                 /* vlan0 filter doesn't consume our VLAN filter's quota */
751                 if (vlan->vid != 0) {
752                         edev->non_configured_vlans--;
753                         edev->configured_vlans++;
754                 }
755         }
756
757         /* enable accept_any_vlan mode if we have more VLANs than credits,
758          * or remove accept_any_vlan mode if we've actually removed
759          * a non-configured vlan, and all remaining vlans are truly configured.
760          */
761
762         if (accept_any_vlan)
763                 rc = qede_config_accept_any_vlan(edev, true);
764         else if (!edev->non_configured_vlans)
765                 rc = qede_config_accept_any_vlan(edev, false);
766
767         if (rc && !real_rc)
768                 real_rc = rc;
769
770         return real_rc;
771 }
772
773 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
774 {
775         struct qede_dev *edev = netdev_priv(dev);
776         struct qede_vlan *vlan = NULL;
777         int rc = 0;
778
779         DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
780
781         /* Find whether entry exists */
782         __qede_lock(edev);
783         list_for_each_entry(vlan, &edev->vlan_list, list)
784                 if (vlan->vid == vid)
785                         break;
786
787         if (!vlan || (vlan->vid != vid)) {
788                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
789                            "Vlan isn't configured\n");
790                 goto out;
791         }
792
793         if (edev->state != QEDE_STATE_OPEN) {
794                 /* As interface is already down, we don't have a VPORT
795                  * instance to remove vlan filter. So just update vlan list
796                  */
797                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
798                            "Interface is down, removing VLAN from list only\n");
799                 qede_del_vlan_from_list(edev, vlan);
800                 goto out;
801         }
802
803         /* Remove vlan */
804         if (vlan->configured) {
805                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
806                                             vid);
807                 if (rc) {
808                         DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
809                         goto out;
810                 }
811         }
812
813         qede_del_vlan_from_list(edev, vlan);
814
815         /* We have removed a VLAN - try to see if we can
816          * configure non-configured VLAN from the list.
817          */
818         rc = qede_configure_vlan_filters(edev);
819
820 out:
821         __qede_unlock(edev);
822         return rc;
823 }
824
825 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
826 {
827         struct qede_vlan *vlan = NULL;
828
829         if (list_empty(&edev->vlan_list))
830                 return;
831
832         list_for_each_entry(vlan, &edev->vlan_list, list) {
833                 if (!vlan->configured)
834                         continue;
835
836                 vlan->configured = false;
837
838                 /* vlan0 filter isn't consuming out of our quota */
839                 if (vlan->vid != 0) {
840                         edev->non_configured_vlans++;
841                         edev->configured_vlans--;
842                 }
843
844                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
845                            "marked vlan %d as non-configured\n", vlan->vid);
846         }
847
848         edev->accept_any_vlan = false;
849 }
850
851 static void qede_set_features_reload(struct qede_dev *edev,
852                                      struct qede_reload_args *args)
853 {
854         edev->ndev->features = args->u.features;
855 }
856
857 int qede_set_features(struct net_device *dev, netdev_features_t features)
858 {
859         struct qede_dev *edev = netdev_priv(dev);
860         netdev_features_t changes = features ^ dev->features;
861         bool need_reload = false;
862
863         /* No action needed if hardware GRO is disabled during driver load */
864         if (changes & NETIF_F_GRO) {
865                 if (dev->features & NETIF_F_GRO)
866                         need_reload = !edev->gro_disable;
867                 else
868                         need_reload = edev->gro_disable;
869         }
870
871         if (need_reload) {
872                 struct qede_reload_args args;
873
874                 args.u.features = features;
875                 args.func = &qede_set_features_reload;
876
877                 /* Make sure that we definitely need to reload.
878                  * In case of an eBPF attached program, there will be no FW
879                  * aggregations, so no need to actually reload.
880                  */
881                 __qede_lock(edev);
882                 if (edev->xdp_prog)
883                         args.func(edev, &args);
884                 else
885                         qede_reload(edev, &args, true);
886                 __qede_unlock(edev);
887
888                 return 1;
889         }
890
891         return 0;
892 }
893
894 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
895 {
896         struct qede_dev *edev = netdev_priv(dev);
897         struct qed_tunn_params tunn_params;
898         u16 t_port = ntohs(ti->port);
899         int rc;
900
901         memset(&tunn_params, 0, sizeof(tunn_params));
902
903         switch (ti->type) {
904         case UDP_TUNNEL_TYPE_VXLAN:
905                 if (!edev->dev_info.common.vxlan_enable)
906                         return;
907
908                 if (edev->vxlan_dst_port)
909                         return;
910
911                 tunn_params.update_vxlan_port = 1;
912                 tunn_params.vxlan_port = t_port;
913
914                 __qede_lock(edev);
915                 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
916                 __qede_unlock(edev);
917
918                 if (!rc) {
919                         edev->vxlan_dst_port = t_port;
920                         DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
921                                    t_port);
922                 } else {
923                         DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
924                                   t_port);
925                 }
926
927                 break;
928         case UDP_TUNNEL_TYPE_GENEVE:
929                 if (!edev->dev_info.common.geneve_enable)
930                         return;
931
932                 if (edev->geneve_dst_port)
933                         return;
934
935                 tunn_params.update_geneve_port = 1;
936                 tunn_params.geneve_port = t_port;
937
938                 __qede_lock(edev);
939                 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
940                 __qede_unlock(edev);
941
942                 if (!rc) {
943                         edev->geneve_dst_port = t_port;
944                         DP_VERBOSE(edev, QED_MSG_DEBUG,
945                                    "Added geneve port=%d\n", t_port);
946                 } else {
947                         DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
948                                   t_port);
949                 }
950
951                 break;
952         default:
953                 return;
954         }
955 }
956
957 void qede_udp_tunnel_del(struct net_device *dev,
958                          struct udp_tunnel_info *ti)
959 {
960         struct qede_dev *edev = netdev_priv(dev);
961         struct qed_tunn_params tunn_params;
962         u16 t_port = ntohs(ti->port);
963
964         memset(&tunn_params, 0, sizeof(tunn_params));
965
966         switch (ti->type) {
967         case UDP_TUNNEL_TYPE_VXLAN:
968                 if (t_port != edev->vxlan_dst_port)
969                         return;
970
971                 tunn_params.update_vxlan_port = 1;
972                 tunn_params.vxlan_port = 0;
973
974                 __qede_lock(edev);
975                 edev->ops->tunn_config(edev->cdev, &tunn_params);
976                 __qede_unlock(edev);
977
978                 edev->vxlan_dst_port = 0;
979
980                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
981                            t_port);
982
983                 break;
984         case UDP_TUNNEL_TYPE_GENEVE:
985                 if (t_port != edev->geneve_dst_port)
986                         return;
987
988                 tunn_params.update_geneve_port = 1;
989                 tunn_params.geneve_port = 0;
990
991                 __qede_lock(edev);
992                 edev->ops->tunn_config(edev->cdev, &tunn_params);
993                 __qede_unlock(edev);
994
995                 edev->geneve_dst_port = 0;
996
997                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
998                            t_port);
999                 break;
1000         default:
1001                 return;
1002         }
1003 }
1004
1005 static void qede_xdp_reload_func(struct qede_dev *edev,
1006                                  struct qede_reload_args *args)
1007 {
1008         struct bpf_prog *old;
1009
1010         old = xchg(&edev->xdp_prog, args->u.new_prog);
1011         if (old)
1012                 bpf_prog_put(old);
1013 }
1014
1015 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1016 {
1017         struct qede_reload_args args;
1018
1019         /* If we're called, there was already a bpf reference increment */
1020         args.func = &qede_xdp_reload_func;
1021         args.u.new_prog = prog;
1022         qede_reload(edev, &args, false);
1023
1024         return 0;
1025 }
1026
1027 int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1028 {
1029         struct qede_dev *edev = netdev_priv(dev);
1030
1031         switch (xdp->command) {
1032         case XDP_SETUP_PROG:
1033                 return qede_xdp_set(edev, xdp->prog);
1034         case XDP_QUERY_PROG:
1035                 xdp->prog_attached = !!edev->xdp_prog;
1036                 return 0;
1037         default:
1038                 return -EINVAL;
1039         }
1040 }
1041
1042 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1043                                  enum qed_filter_xcast_params_type opcode,
1044                                  unsigned char *mac, int num_macs)
1045 {
1046         struct qed_filter_params filter_cmd;
1047         int i;
1048
1049         memset(&filter_cmd, 0, sizeof(filter_cmd));
1050         filter_cmd.type = QED_FILTER_TYPE_MCAST;
1051         filter_cmd.filter.mcast.type = opcode;
1052         filter_cmd.filter.mcast.num = num_macs;
1053
1054         for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1055                 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1056
1057         return edev->ops->filter_config(edev->cdev, &filter_cmd);
1058 }
1059
1060 int qede_set_mac_addr(struct net_device *ndev, void *p)
1061 {
1062         struct qede_dev *edev = netdev_priv(ndev);
1063         struct sockaddr *addr = p;
1064         int rc;
1065
1066         ASSERT_RTNL(); /* @@@TBD To be removed */
1067
1068         DP_INFO(edev, "Set_mac_addr called\n");
1069
1070         if (!is_valid_ether_addr(addr->sa_data)) {
1071                 DP_NOTICE(edev, "The MAC address is not valid\n");
1072                 return -EFAULT;
1073         }
1074
1075         if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1076                 DP_NOTICE(edev, "qed prevents setting MAC\n");
1077                 return -EINVAL;
1078         }
1079
1080         ether_addr_copy(ndev->dev_addr, addr->sa_data);
1081
1082         if (!netif_running(ndev))  {
1083                 DP_NOTICE(edev, "The device is currently down\n");
1084                 return 0;
1085         }
1086
1087         /* Remove the previous primary mac */
1088         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1089                                    edev->primary_mac);
1090         if (rc)
1091                 return rc;
1092
1093         edev->ops->common->update_mac(edev->cdev, addr->sa_data);
1094
1095         /* Add MAC filter according to the new unicast HW MAC address */
1096         ether_addr_copy(edev->primary_mac, ndev->dev_addr);
1097         return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1098                                       edev->primary_mac);
1099 }
1100
1101 static int
1102 qede_configure_mcast_filtering(struct net_device *ndev,
1103                                enum qed_filter_rx_mode_type *accept_flags)
1104 {
1105         struct qede_dev *edev = netdev_priv(ndev);
1106         unsigned char *mc_macs, *temp;
1107         struct netdev_hw_addr *ha;
1108         int rc = 0, mc_count;
1109         size_t size;
1110
1111         size = 64 * ETH_ALEN;
1112
1113         mc_macs = kzalloc(size, GFP_KERNEL);
1114         if (!mc_macs) {
1115                 DP_NOTICE(edev,
1116                           "Failed to allocate memory for multicast MACs\n");
1117                 rc = -ENOMEM;
1118                 goto exit;
1119         }
1120
1121         temp = mc_macs;
1122
1123         /* Remove all previously configured MAC filters */
1124         rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1125                                    mc_macs, 1);
1126         if (rc)
1127                 goto exit;
1128
1129         netif_addr_lock_bh(ndev);
1130
1131         mc_count = netdev_mc_count(ndev);
1132         if (mc_count < 64) {
1133                 netdev_for_each_mc_addr(ha, ndev) {
1134                         ether_addr_copy(temp, ha->addr);
1135                         temp += ETH_ALEN;
1136                 }
1137         }
1138
1139         netif_addr_unlock_bh(ndev);
1140
1141         /* Check for all multicast @@@TBD resource allocation */
1142         if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1143                 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1144                         *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1145         } else {
1146                 /* Add all multicast MAC filters */
1147                 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1148                                            mc_macs, mc_count);
1149         }
1150
1151 exit:
1152         kfree(mc_macs);
1153         return rc;
1154 }
1155
1156 void qede_set_rx_mode(struct net_device *ndev)
1157 {
1158         struct qede_dev *edev = netdev_priv(ndev);
1159
1160         set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1161         schedule_delayed_work(&edev->sp_task, 0);
1162 }
1163
1164 /* Must be called with qede_lock held */
1165 void qede_config_rx_mode(struct net_device *ndev)
1166 {
1167         enum qed_filter_rx_mode_type accept_flags;
1168         struct qede_dev *edev = netdev_priv(ndev);
1169         struct qed_filter_params rx_mode;
1170         unsigned char *uc_macs, *temp;
1171         struct netdev_hw_addr *ha;
1172         int rc, uc_count;
1173         size_t size;
1174
1175         netif_addr_lock_bh(ndev);
1176
1177         uc_count = netdev_uc_count(ndev);
1178         size = uc_count * ETH_ALEN;
1179
1180         uc_macs = kzalloc(size, GFP_ATOMIC);
1181         if (!uc_macs) {
1182                 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1183                 netif_addr_unlock_bh(ndev);
1184                 return;
1185         }
1186
1187         temp = uc_macs;
1188         netdev_for_each_uc_addr(ha, ndev) {
1189                 ether_addr_copy(temp, ha->addr);
1190                 temp += ETH_ALEN;
1191         }
1192
1193         netif_addr_unlock_bh(ndev);
1194
1195         /* Configure the struct for the Rx mode */
1196         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1197         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1198
1199         /* Remove all previous unicast secondary macs and multicast macs
1200          * (configrue / leave the primary mac)
1201          */
1202         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1203                                    edev->primary_mac);
1204         if (rc)
1205                 goto out;
1206
1207         /* Check for promiscuous */
1208         if (ndev->flags & IFF_PROMISC)
1209                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1210         else
1211                 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1212
1213         /* Configure all filters regardless, in case promisc is rejected */
1214         if (uc_count < edev->dev_info.num_mac_filters) {
1215                 int i;
1216
1217                 temp = uc_macs;
1218                 for (i = 0; i < uc_count; i++) {
1219                         rc = qede_set_ucast_rx_mac(edev,
1220                                                    QED_FILTER_XCAST_TYPE_ADD,
1221                                                    temp);
1222                         if (rc)
1223                                 goto out;
1224
1225                         temp += ETH_ALEN;
1226                 }
1227         } else {
1228                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1229         }
1230
1231         rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1232         if (rc)
1233                 goto out;
1234
1235         /* take care of VLAN mode */
1236         if (ndev->flags & IFF_PROMISC) {
1237                 qede_config_accept_any_vlan(edev, true);
1238         } else if (!edev->non_configured_vlans) {
1239                 /* It's possible that accept_any_vlan mode is set due to a
1240                  * previous setting of IFF_PROMISC. If vlan credits are
1241                  * sufficient, disable accept_any_vlan.
1242                  */
1243                 qede_config_accept_any_vlan(edev, false);
1244         }
1245
1246         rx_mode.filter.accept_flags = accept_flags;
1247         edev->ops->filter_config(edev->cdev, &rx_mode);
1248 out:
1249         kfree(uc_macs);
1250 }