2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/vxlan.h>
54 struct mlx5_nic_flow_attr {
61 MLX5E_TC_FLOW_ESWITCH = BIT(0),
62 MLX5E_TC_FLOW_NIC = BIT(1),
63 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
66 struct mlx5e_tc_flow {
67 struct rhash_head node;
70 struct mlx5_flow_handle *rule;
71 struct list_head encap; /* flows sharing the same encap */
73 struct mlx5_esw_flow_attr esw_attr[0];
74 struct mlx5_nic_flow_attr nic_attr[0];
78 struct mlx5e_tc_flow_parse_attr {
79 struct mlx5_flow_spec spec;
80 int num_mod_hdr_actions;
81 void *mod_hdr_actions;
85 MLX5_HEADER_TYPE_VXLAN = 0x0,
86 MLX5_HEADER_TYPE_NVGRE = 0x1,
89 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
90 #define MLX5E_TC_TABLE_NUM_GROUPS 4
92 static struct mlx5_flow_handle *
93 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
94 struct mlx5e_tc_flow_parse_attr *parse_attr,
95 struct mlx5e_tc_flow *flow)
97 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
98 struct mlx5_core_dev *dev = priv->mdev;
99 struct mlx5_flow_destination dest = {};
100 struct mlx5_flow_act flow_act = {
101 .action = attr->action,
102 .flow_tag = attr->flow_tag,
105 struct mlx5_fc *counter = NULL;
106 struct mlx5_flow_handle *rule;
107 bool table_created = false;
110 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
111 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
112 dest.ft = priv->fs.vlan.ft.t;
113 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
114 counter = mlx5_fc_create(dev, true);
116 return ERR_CAST(counter);
118 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
119 dest.counter = counter;
122 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
123 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
124 parse_attr->num_mod_hdr_actions,
125 parse_attr->mod_hdr_actions,
127 flow_act.modify_id = attr->mod_hdr_id;
128 kfree(parse_attr->mod_hdr_actions);
131 goto err_create_mod_hdr_id;
135 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
137 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
139 MLX5E_TC_TABLE_NUM_ENTRIES,
140 MLX5E_TC_TABLE_NUM_GROUPS,
142 if (IS_ERR(priv->fs.tc.t)) {
143 netdev_err(priv->netdev,
144 "Failed to create tc offload table\n");
145 rule = ERR_CAST(priv->fs.tc.t);
149 table_created = true;
152 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
153 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
154 &flow_act, &dest, 1);
163 mlx5_destroy_flow_table(priv->fs.tc.t);
164 priv->fs.tc.t = NULL;
167 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
168 mlx5_modify_header_dealloc(priv->mdev,
170 err_create_mod_hdr_id:
171 mlx5_fc_destroy(dev, counter);
176 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
177 struct mlx5e_tc_flow *flow)
179 struct mlx5_fc *counter = NULL;
181 counter = mlx5_flow_rule_counter(flow->rule);
182 mlx5_del_flow_rules(flow->rule);
183 mlx5_fc_destroy(priv->mdev, counter);
185 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
186 mlx5_destroy_flow_table(priv->fs.tc.t);
187 priv->fs.tc.t = NULL;
190 if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
191 mlx5_modify_header_dealloc(priv->mdev,
192 flow->nic_attr->mod_hdr_id);
195 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
196 struct mlx5e_tc_flow *flow);
198 static struct mlx5_flow_handle *
199 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
200 struct mlx5e_tc_flow_parse_attr *parse_attr,
201 struct mlx5e_tc_flow *flow)
203 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
204 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
205 struct mlx5_flow_handle *rule;
208 err = mlx5_eswitch_add_vlan_action(esw, attr);
214 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
215 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
216 parse_attr->num_mod_hdr_actions,
217 parse_attr->mod_hdr_actions,
219 kfree(parse_attr->mod_hdr_actions);
226 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
233 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
234 mlx5_modify_header_dealloc(priv->mdev,
237 mlx5_eswitch_del_vlan_action(esw, attr);
239 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
240 mlx5e_detach_encap(priv, flow);
244 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
245 struct mlx5e_tc_flow *flow)
247 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
248 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
250 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
251 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
252 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
255 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
257 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
258 mlx5e_detach_encap(priv, flow);
259 kvfree(flow->esw_attr->parse_attr);
262 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
263 mlx5_modify_header_dealloc(priv->mdev,
267 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
268 struct mlx5e_encap_entry *e)
270 struct mlx5e_tc_flow *flow;
273 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
274 e->encap_size, e->encap_header,
277 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
281 e->flags |= MLX5_ENCAP_ENTRY_VALID;
282 mlx5e_rep_queue_neigh_stats_work(priv);
284 list_for_each_entry(flow, &e->flows, encap) {
285 flow->esw_attr->encap_id = e->encap_id;
286 flow->rule = mlx5e_tc_add_fdb_flow(priv,
287 flow->esw_attr->parse_attr,
289 if (IS_ERR(flow->rule)) {
290 err = PTR_ERR(flow->rule);
291 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
295 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
299 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
300 struct mlx5e_encap_entry *e)
302 struct mlx5e_tc_flow *flow;
303 struct mlx5_fc *counter;
305 list_for_each_entry(flow, &e->flows, encap) {
306 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
307 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
308 counter = mlx5_flow_rule_counter(flow->rule);
309 mlx5_del_flow_rules(flow->rule);
310 mlx5_fc_destroy(priv->mdev, counter);
314 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
315 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
316 mlx5_encap_dealloc(priv->mdev, e->encap_id);
320 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
322 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
323 u64 bytes, packets, lastuse = 0;
324 struct mlx5e_tc_flow *flow;
325 struct mlx5e_encap_entry *e;
326 struct mlx5_fc *counter;
327 struct neigh_table *tbl;
328 bool neigh_used = false;
331 if (m_neigh->family == AF_INET)
333 #if IS_ENABLED(CONFIG_IPV6)
334 else if (m_neigh->family == AF_INET6)
335 tbl = ipv6_stub->nd_tbl;
340 list_for_each_entry(e, &nhe->encap_list, encap_list) {
341 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
343 list_for_each_entry(flow, &e->flows, encap) {
344 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
345 counter = mlx5_flow_rule_counter(flow->rule);
346 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
347 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
356 nhe->reported_lastuse = jiffies;
358 /* find the relevant neigh according to the cached device and
361 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
363 WARN(1, "The neighbour already freed\n");
367 neigh_event_send(n, NULL);
372 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
373 struct mlx5e_tc_flow *flow)
375 struct list_head *next = flow->encap.next;
377 list_del(&flow->encap);
378 if (list_empty(next)) {
379 struct mlx5e_encap_entry *e;
381 e = list_entry(next, struct mlx5e_encap_entry, flows);
382 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
384 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
385 mlx5_encap_dealloc(priv->mdev, e->encap_id);
387 hlist_del_rcu(&e->encap_hlist);
388 kfree(e->encap_header);
393 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
394 struct mlx5e_tc_flow *flow)
396 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
397 mlx5e_tc_del_fdb_flow(priv, flow);
399 mlx5e_tc_del_nic_flow(priv, flow);
402 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
403 struct tc_cls_flower_offload *f)
405 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
407 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
409 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
411 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
414 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
415 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
417 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
418 struct flow_dissector_key_keyid *key =
419 skb_flow_dissector_target(f->dissector,
420 FLOW_DISSECTOR_KEY_ENC_KEYID,
422 struct flow_dissector_key_keyid *mask =
423 skb_flow_dissector_target(f->dissector,
424 FLOW_DISSECTOR_KEY_ENC_KEYID,
426 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
427 be32_to_cpu(mask->keyid));
428 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
429 be32_to_cpu(key->keyid));
433 static int parse_tunnel_attr(struct mlx5e_priv *priv,
434 struct mlx5_flow_spec *spec,
435 struct tc_cls_flower_offload *f)
437 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
439 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
442 struct flow_dissector_key_control *enc_control =
443 skb_flow_dissector_target(f->dissector,
444 FLOW_DISSECTOR_KEY_ENC_CONTROL,
447 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
448 struct flow_dissector_key_ports *key =
449 skb_flow_dissector_target(f->dissector,
450 FLOW_DISSECTOR_KEY_ENC_PORTS,
452 struct flow_dissector_key_ports *mask =
453 skb_flow_dissector_target(f->dissector,
454 FLOW_DISSECTOR_KEY_ENC_PORTS,
456 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
457 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
458 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
460 /* Full udp dst port must be given */
461 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
462 goto vxlan_match_offload_err;
464 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
465 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
466 parse_vxlan_attr(spec, f);
468 netdev_warn(priv->netdev,
469 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
473 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
474 udp_dport, ntohs(mask->dst));
475 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
476 udp_dport, ntohs(key->dst));
478 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
479 udp_sport, ntohs(mask->src));
480 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
481 udp_sport, ntohs(key->src));
482 } else { /* udp dst port must be given */
483 vxlan_match_offload_err:
484 netdev_warn(priv->netdev,
485 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
489 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
490 struct flow_dissector_key_ipv4_addrs *key =
491 skb_flow_dissector_target(f->dissector,
492 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
494 struct flow_dissector_key_ipv4_addrs *mask =
495 skb_flow_dissector_target(f->dissector,
496 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
498 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
499 src_ipv4_src_ipv6.ipv4_layout.ipv4,
501 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
502 src_ipv4_src_ipv6.ipv4_layout.ipv4,
505 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
506 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
508 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
509 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
512 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
513 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
514 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
515 struct flow_dissector_key_ipv6_addrs *key =
516 skb_flow_dissector_target(f->dissector,
517 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
519 struct flow_dissector_key_ipv6_addrs *mask =
520 skb_flow_dissector_target(f->dissector,
521 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
524 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
525 src_ipv4_src_ipv6.ipv6_layout.ipv6),
526 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
527 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
528 src_ipv4_src_ipv6.ipv6_layout.ipv6),
529 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
531 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
532 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
533 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
534 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
535 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
536 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
538 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
539 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
542 /* Enforce DMAC when offloading incoming tunneled flows.
543 * Flow counters require a match on the DMAC.
545 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
546 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
547 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
548 dmac_47_16), priv->netdev->dev_addr);
550 /* let software handle IP fragments */
551 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
552 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
557 static int __parse_cls_flower(struct mlx5e_priv *priv,
558 struct mlx5_flow_spec *spec,
559 struct tc_cls_flower_offload *f,
562 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
564 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
569 *min_inline = MLX5_INLINE_MODE_L2;
571 if (f->dissector->used_keys &
572 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
573 BIT(FLOW_DISSECTOR_KEY_BASIC) |
574 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
575 BIT(FLOW_DISSECTOR_KEY_VLAN) |
576 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
577 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
578 BIT(FLOW_DISSECTOR_KEY_PORTS) |
579 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
580 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
581 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
582 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
583 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
584 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
585 f->dissector->used_keys);
589 if ((dissector_uses_key(f->dissector,
590 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
591 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
592 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
593 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
594 struct flow_dissector_key_control *key =
595 skb_flow_dissector_target(f->dissector,
596 FLOW_DISSECTOR_KEY_ENC_CONTROL,
598 switch (key->addr_type) {
599 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
600 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
601 if (parse_tunnel_attr(priv, spec, f))
608 /* In decap flow, header pointers should point to the inner
609 * headers, outer header were already set by parse_tunnel_attr
611 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
613 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
617 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
618 struct flow_dissector_key_control *key =
619 skb_flow_dissector_target(f->dissector,
620 FLOW_DISSECTOR_KEY_CONTROL,
623 struct flow_dissector_key_control *mask =
624 skb_flow_dissector_target(f->dissector,
625 FLOW_DISSECTOR_KEY_CONTROL,
627 addr_type = key->addr_type;
629 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
630 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
631 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
632 key->flags & FLOW_DIS_IS_FRAGMENT);
634 /* the HW doesn't need L3 inline to match on frag=no */
635 if (key->flags & FLOW_DIS_IS_FRAGMENT)
636 *min_inline = MLX5_INLINE_MODE_IP;
640 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
641 struct flow_dissector_key_basic *key =
642 skb_flow_dissector_target(f->dissector,
643 FLOW_DISSECTOR_KEY_BASIC,
645 struct flow_dissector_key_basic *mask =
646 skb_flow_dissector_target(f->dissector,
647 FLOW_DISSECTOR_KEY_BASIC,
649 ip_proto = key->ip_proto;
651 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
652 ntohs(mask->n_proto));
653 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
654 ntohs(key->n_proto));
656 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
658 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
662 *min_inline = MLX5_INLINE_MODE_IP;
665 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
666 struct flow_dissector_key_eth_addrs *key =
667 skb_flow_dissector_target(f->dissector,
668 FLOW_DISSECTOR_KEY_ETH_ADDRS,
670 struct flow_dissector_key_eth_addrs *mask =
671 skb_flow_dissector_target(f->dissector,
672 FLOW_DISSECTOR_KEY_ETH_ADDRS,
675 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
678 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
682 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
685 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
690 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
691 struct flow_dissector_key_vlan *key =
692 skb_flow_dissector_target(f->dissector,
693 FLOW_DISSECTOR_KEY_VLAN,
695 struct flow_dissector_key_vlan *mask =
696 skb_flow_dissector_target(f->dissector,
697 FLOW_DISSECTOR_KEY_VLAN,
699 if (mask->vlan_id || mask->vlan_priority) {
700 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
701 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
703 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
704 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
706 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
707 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
711 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
712 struct flow_dissector_key_ipv4_addrs *key =
713 skb_flow_dissector_target(f->dissector,
714 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
716 struct flow_dissector_key_ipv4_addrs *mask =
717 skb_flow_dissector_target(f->dissector,
718 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
721 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
722 src_ipv4_src_ipv6.ipv4_layout.ipv4),
723 &mask->src, sizeof(mask->src));
724 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
725 src_ipv4_src_ipv6.ipv4_layout.ipv4),
726 &key->src, sizeof(key->src));
727 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
728 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
729 &mask->dst, sizeof(mask->dst));
730 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
731 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
732 &key->dst, sizeof(key->dst));
734 if (mask->src || mask->dst)
735 *min_inline = MLX5_INLINE_MODE_IP;
738 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
739 struct flow_dissector_key_ipv6_addrs *key =
740 skb_flow_dissector_target(f->dissector,
741 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
743 struct flow_dissector_key_ipv6_addrs *mask =
744 skb_flow_dissector_target(f->dissector,
745 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
748 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
749 src_ipv4_src_ipv6.ipv6_layout.ipv6),
750 &mask->src, sizeof(mask->src));
751 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
752 src_ipv4_src_ipv6.ipv6_layout.ipv6),
753 &key->src, sizeof(key->src));
755 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
756 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
757 &mask->dst, sizeof(mask->dst));
758 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
759 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
760 &key->dst, sizeof(key->dst));
762 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
763 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
764 *min_inline = MLX5_INLINE_MODE_IP;
767 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
768 struct flow_dissector_key_ports *key =
769 skb_flow_dissector_target(f->dissector,
770 FLOW_DISSECTOR_KEY_PORTS,
772 struct flow_dissector_key_ports *mask =
773 skb_flow_dissector_target(f->dissector,
774 FLOW_DISSECTOR_KEY_PORTS,
778 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
779 tcp_sport, ntohs(mask->src));
780 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
781 tcp_sport, ntohs(key->src));
783 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
784 tcp_dport, ntohs(mask->dst));
785 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
786 tcp_dport, ntohs(key->dst));
790 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
791 udp_sport, ntohs(mask->src));
792 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
793 udp_sport, ntohs(key->src));
795 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
796 udp_dport, ntohs(mask->dst));
797 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
798 udp_dport, ntohs(key->dst));
801 netdev_err(priv->netdev,
802 "Only UDP and TCP transport are supported\n");
806 if (mask->src || mask->dst)
807 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
813 static int parse_cls_flower(struct mlx5e_priv *priv,
814 struct mlx5e_tc_flow *flow,
815 struct mlx5_flow_spec *spec,
816 struct tc_cls_flower_offload *f)
818 struct mlx5_core_dev *dev = priv->mdev;
819 struct mlx5_eswitch *esw = dev->priv.eswitch;
820 struct mlx5e_rep_priv *rpriv = priv->ppriv;
821 struct mlx5_eswitch_rep *rep;
825 err = __parse_cls_flower(priv, spec, f, &min_inline);
827 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
829 if (rep->vport != FDB_UPLINK_VPORT &&
830 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
831 esw->offloads.inline_mode < min_inline)) {
832 netdev_warn(priv->netdev,
833 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
834 min_inline, esw->offloads.inline_mode);
842 struct pedit_headers {
850 static int pedit_header_offsets[] = {
851 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
852 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
853 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
854 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
855 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
858 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
860 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
861 struct pedit_headers *masks,
862 struct pedit_headers *vals)
864 u32 *curr_pmask, *curr_pval;
866 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
869 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
870 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
872 if (*curr_pmask & mask) /* disallow acting twice on the same location */
876 *curr_pval |= (val & mask);
890 static struct mlx5_fields fields[] = {
891 {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
892 {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
893 {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
894 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
895 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
897 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
898 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
899 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
900 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
902 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
903 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
904 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
905 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
906 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
907 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
908 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
909 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
911 {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
912 {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
913 {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
915 {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
916 {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
919 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
920 * max from the SW pedit action. On success, it says how many HW actions were
923 static int offload_pedit_fields(struct pedit_headers *masks,
924 struct pedit_headers *vals,
925 struct mlx5e_tc_flow_parse_attr *parse_attr)
927 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
928 int i, action_size, nactions, max_actions, first, last;
929 void *s_masks_p, *a_masks_p, *vals_p;
930 u32 s_mask, a_mask, val;
931 struct mlx5_fields *f;
936 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
937 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
938 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
939 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
941 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
942 action = parse_attr->mod_hdr_actions;
943 max_actions = parse_attr->num_mod_hdr_actions;
946 for (i = 0; i < ARRAY_SIZE(fields); i++) {
948 /* avoid seeing bits set from previous iterations */
949 s_mask = a_mask = mask = val = 0;
951 s_masks_p = (void *)set_masks + f->offset;
952 a_masks_p = (void *)add_masks + f->offset;
954 memcpy(&s_mask, s_masks_p, f->size);
955 memcpy(&a_mask, a_masks_p, f->size);
957 if (!s_mask && !a_mask) /* nothing to offload here */
960 if (s_mask && a_mask) {
961 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
965 if (nactions == max_actions) {
966 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
971 cmd = MLX5_ACTION_TYPE_SET;
973 vals_p = (void *)set_vals + f->offset;
974 /* clear to denote we consumed this field */
975 memset(s_masks_p, 0, f->size);
977 cmd = MLX5_ACTION_TYPE_ADD;
979 vals_p = (void *)add_vals + f->offset;
980 /* clear to denote we consumed this field */
981 memset(a_masks_p, 0, f->size);
984 memcpy(&val, vals_p, f->size);
986 field_bsize = f->size * BITS_PER_BYTE;
987 first = find_first_bit(&mask, field_bsize);
988 last = find_last_bit(&mask, field_bsize);
989 if (first > 0 || last != (field_bsize - 1)) {
990 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
995 MLX5_SET(set_action_in, action, action_type, cmd);
996 MLX5_SET(set_action_in, action, field, f->field);
998 if (cmd == MLX5_ACTION_TYPE_SET) {
999 MLX5_SET(set_action_in, action, offset, 0);
1000 /* length is num of bits to be written, zero means length of 32 */
1001 MLX5_SET(set_action_in, action, length, field_bsize);
1004 if (field_bsize == 32)
1005 MLX5_SET(set_action_in, action, data, ntohl(val));
1006 else if (field_bsize == 16)
1007 MLX5_SET(set_action_in, action, data, ntohs(val));
1008 else if (field_bsize == 8)
1009 MLX5_SET(set_action_in, action, data, val);
1011 action += action_size;
1015 parse_attr->num_mod_hdr_actions = nactions;
1019 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1020 const struct tc_action *a, int namespace,
1021 struct mlx5e_tc_flow_parse_attr *parse_attr)
1023 int nkeys, action_size, max_actions;
1025 nkeys = tcf_pedit_nkeys(a);
1026 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1028 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1029 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1030 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1031 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1033 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1034 max_actions = min(max_actions, nkeys * 16);
1036 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1037 if (!parse_attr->mod_hdr_actions)
1040 parse_attr->num_mod_hdr_actions = max_actions;
1044 static const struct pedit_headers zero_masks = {};
1046 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1047 const struct tc_action *a, int namespace,
1048 struct mlx5e_tc_flow_parse_attr *parse_attr)
1050 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1051 int nkeys, i, err = -EOPNOTSUPP;
1052 u32 mask, val, offset;
1055 nkeys = tcf_pedit_nkeys(a);
1057 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1058 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1060 for (i = 0; i < nkeys; i++) {
1061 htype = tcf_pedit_htype(a, i);
1062 cmd = tcf_pedit_cmd(a, i);
1063 err = -EOPNOTSUPP; /* can't be all optimistic */
1065 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1066 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1070 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1071 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1075 mask = tcf_pedit_mask(a, i);
1076 val = tcf_pedit_val(a, i);
1077 offset = tcf_pedit_offset(a, i);
1079 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1084 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1088 err = offload_pedit_fields(masks, vals, parse_attr);
1090 goto out_dealloc_parsed_actions;
1092 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1093 cmd_masks = &masks[cmd];
1094 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1095 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1097 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1098 16, 1, cmd_masks, sizeof(zero_masks), true);
1100 goto out_dealloc_parsed_actions;
1106 out_dealloc_parsed_actions:
1107 kfree(parse_attr->mod_hdr_actions);
1112 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1113 struct mlx5e_tc_flow_parse_attr *parse_attr,
1114 struct mlx5e_tc_flow *flow)
1116 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1117 const struct tc_action *a;
1121 if (tc_no_actions(exts))
1124 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1127 tcf_exts_to_list(exts, &actions);
1128 list_for_each_entry(a, &actions, list) {
1129 /* Only support a single action per rule */
1133 if (is_tcf_gact_shot(a)) {
1134 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1135 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1136 flow_table_properties_nic_receive.flow_counter))
1137 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1141 if (is_tcf_pedit(a)) {
1142 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1147 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1148 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1152 if (is_tcf_skbedit_mark(a)) {
1153 u32 mark = tcf_skbedit_mark(a);
1155 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1156 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1161 attr->flow_tag = mark;
1162 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1172 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1173 struct ip_tunnel_key *b)
1175 return memcmp(a, b, sizeof(*a));
1178 static inline int hash_encap_info(struct ip_tunnel_key *key)
1180 return jhash(key, sizeof(*key), 0);
1183 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1184 struct net_device *mirred_dev,
1185 struct net_device **out_dev,
1187 struct neighbour **out_n,
1190 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1192 struct neighbour *n = NULL;
1194 #if IS_ENABLED(CONFIG_INET)
1197 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1198 ret = PTR_ERR_OR_ZERO(rt);
1204 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1205 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1206 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1208 *out_dev = rt->dst.dev;
1210 *out_ttl = ip4_dst_hoplimit(&rt->dst);
1211 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1220 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1221 struct net_device *mirred_dev,
1222 struct net_device **out_dev,
1224 struct neighbour **out_n,
1227 struct neighbour *n = NULL;
1228 struct dst_entry *dst;
1230 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1231 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1234 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
1241 *out_ttl = ip6_dst_hoplimit(dst);
1243 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1244 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1245 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1247 *out_dev = dst->dev;
1252 n = dst_neigh_lookup(dst, &fl6->daddr);
1261 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1262 char buf[], int encap_size,
1263 unsigned char h_dest[ETH_ALEN],
1267 __be16 udp_dst_port,
1270 struct ethhdr *eth = (struct ethhdr *)buf;
1271 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1272 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1273 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1275 memset(buf, 0, encap_size);
1277 ether_addr_copy(eth->h_dest, h_dest);
1278 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1279 eth->h_proto = htons(ETH_P_IP);
1285 ip->protocol = IPPROTO_UDP;
1289 udp->dest = udp_dst_port;
1290 vxh->vx_flags = VXLAN_HF_VNI;
1291 vxh->vx_vni = vxlan_vni_field(vx_vni);
1294 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1295 char buf[], int encap_size,
1296 unsigned char h_dest[ETH_ALEN],
1298 struct in6_addr *daddr,
1299 struct in6_addr *saddr,
1300 __be16 udp_dst_port,
1303 struct ethhdr *eth = (struct ethhdr *)buf;
1304 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1305 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1306 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1308 memset(buf, 0, encap_size);
1310 ether_addr_copy(eth->h_dest, h_dest);
1311 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1312 eth->h_proto = htons(ETH_P_IPV6);
1314 ip6_flow_hdr(ip6h, 0, 0);
1315 /* the HW fills up ipv6 payload len */
1316 ip6h->nexthdr = IPPROTO_UDP;
1317 ip6h->hop_limit = ttl;
1318 ip6h->daddr = *daddr;
1319 ip6h->saddr = *saddr;
1321 udp->dest = udp_dst_port;
1322 vxh->vx_flags = VXLAN_HF_VNI;
1323 vxh->vx_vni = vxlan_vni_field(vx_vni);
1326 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1327 struct net_device *mirred_dev,
1328 struct mlx5e_encap_entry *e)
1330 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1331 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1332 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1333 struct net_device *out_dev;
1334 struct neighbour *n = NULL;
1335 struct flowi4 fl4 = {};
1340 if (max_encap_size < ipv4_encap_size) {
1341 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1342 ipv4_encap_size, max_encap_size);
1346 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1350 switch (e->tunnel_type) {
1351 case MLX5_HEADER_TYPE_VXLAN:
1352 fl4.flowi4_proto = IPPROTO_UDP;
1353 fl4.fl4_dport = tun_key->tp_dst;
1359 fl4.flowi4_tos = tun_key->tos;
1360 fl4.daddr = tun_key->u.ipv4.dst;
1361 fl4.saddr = tun_key->u.ipv4.src;
1363 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1368 /* used by mlx5e_detach_encap to lookup a neigh hash table
1369 * entry in the neigh hash table when a user deletes a rule
1371 e->m_neigh.dev = n->dev;
1372 e->m_neigh.family = n->ops->family;
1373 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1374 e->out_dev = out_dev;
1376 /* It's importent to add the neigh to the hash table before checking
1377 * the neigh validity state. So if we'll get a notification, in case the
1378 * neigh changes it's validity state, we would find the relevant neigh
1381 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1385 read_lock_bh(&n->lock);
1386 nud_state = n->nud_state;
1387 ether_addr_copy(e->h_dest, n->ha);
1388 read_unlock_bh(&n->lock);
1390 switch (e->tunnel_type) {
1391 case MLX5_HEADER_TYPE_VXLAN:
1392 gen_vxlan_header_ipv4(out_dev, encap_header,
1393 ipv4_encap_size, e->h_dest, ttl,
1395 fl4.saddr, tun_key->tp_dst,
1396 tunnel_id_to_key32(tun_key->tun_id));
1400 goto destroy_neigh_entry;
1402 e->encap_size = ipv4_encap_size;
1403 e->encap_header = encap_header;
1405 if (!(nud_state & NUD_VALID)) {
1406 neigh_event_send(n, NULL);
1411 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1412 ipv4_encap_size, encap_header, &e->encap_id);
1414 goto destroy_neigh_entry;
1416 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1417 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1421 destroy_neigh_entry:
1422 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1424 kfree(encap_header);
1430 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1431 struct net_device *mirred_dev,
1432 struct mlx5e_encap_entry *e)
1434 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1435 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
1436 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1437 struct net_device *out_dev;
1438 struct neighbour *n = NULL;
1439 struct flowi6 fl6 = {};
1444 if (max_encap_size < ipv6_encap_size) {
1445 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1446 ipv6_encap_size, max_encap_size);
1450 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
1454 switch (e->tunnel_type) {
1455 case MLX5_HEADER_TYPE_VXLAN:
1456 fl6.flowi6_proto = IPPROTO_UDP;
1457 fl6.fl6_dport = tun_key->tp_dst;
1464 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1465 fl6.daddr = tun_key->u.ipv6.dst;
1466 fl6.saddr = tun_key->u.ipv6.src;
1468 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1473 /* used by mlx5e_detach_encap to lookup a neigh hash table
1474 * entry in the neigh hash table when a user deletes a rule
1476 e->m_neigh.dev = n->dev;
1477 e->m_neigh.family = n->ops->family;
1478 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1479 e->out_dev = out_dev;
1481 /* It's importent to add the neigh to the hash table before checking
1482 * the neigh validity state. So if we'll get a notification, in case the
1483 * neigh changes it's validity state, we would find the relevant neigh
1486 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1490 read_lock_bh(&n->lock);
1491 nud_state = n->nud_state;
1492 ether_addr_copy(e->h_dest, n->ha);
1493 read_unlock_bh(&n->lock);
1495 switch (e->tunnel_type) {
1496 case MLX5_HEADER_TYPE_VXLAN:
1497 gen_vxlan_header_ipv6(out_dev, encap_header,
1498 ipv6_encap_size, e->h_dest, ttl,
1500 &fl6.saddr, tun_key->tp_dst,
1501 tunnel_id_to_key32(tun_key->tun_id));
1505 goto destroy_neigh_entry;
1508 e->encap_size = ipv6_encap_size;
1509 e->encap_header = encap_header;
1511 if (!(nud_state & NUD_VALID)) {
1512 neigh_event_send(n, NULL);
1517 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1518 ipv6_encap_size, encap_header, &e->encap_id);
1520 goto destroy_neigh_entry;
1522 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1523 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1527 destroy_neigh_entry:
1528 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1530 kfree(encap_header);
1536 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1537 struct ip_tunnel_info *tun_info,
1538 struct net_device *mirred_dev,
1539 struct net_device **encap_dev,
1540 struct mlx5e_tc_flow *flow)
1542 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1543 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1544 unsigned short family = ip_tunnel_info_af(tun_info);
1545 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1546 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1547 struct ip_tunnel_key *key = &tun_info->key;
1548 struct mlx5e_encap_entry *e;
1549 int tunnel_type, err = 0;
1553 /* udp dst port must be set */
1554 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1555 goto vxlan_encap_offload_err;
1557 /* setting udp src port isn't supported */
1558 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1559 vxlan_encap_offload_err:
1560 netdev_warn(priv->netdev,
1561 "must set udp dst port and not set udp src port\n");
1565 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1566 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1567 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1569 netdev_warn(priv->netdev,
1570 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1574 hash_key = hash_encap_info(key);
1576 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1577 encap_hlist, hash_key) {
1578 if (!cmp_encap_info(&e->tun_info.key, key)) {
1587 e = kzalloc(sizeof(*e), GFP_KERNEL);
1591 e->tun_info = *tun_info;
1592 e->tunnel_type = tunnel_type;
1593 INIT_LIST_HEAD(&e->flows);
1595 if (family == AF_INET)
1596 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
1597 else if (family == AF_INET6)
1598 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
1600 if (err && err != -EAGAIN)
1603 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1606 list_add(&flow->encap, &e->flows);
1607 *encap_dev = e->out_dev;
1608 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1609 attr->encap_id = e->encap_id;
1618 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1619 struct mlx5e_tc_flow_parse_attr *parse_attr,
1620 struct mlx5e_tc_flow *flow)
1622 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1623 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1624 struct ip_tunnel_info *info = NULL;
1625 const struct tc_action *a;
1630 if (tc_no_actions(exts))
1633 memset(attr, 0, sizeof(*attr));
1634 attr->in_rep = rpriv->rep;
1636 tcf_exts_to_list(exts, &actions);
1637 list_for_each_entry(a, &actions, list) {
1638 if (is_tcf_gact_shot(a)) {
1639 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1640 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1644 if (is_tcf_pedit(a)) {
1645 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1650 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1654 if (is_tcf_mirred_egress_redirect(a)) {
1655 int ifindex = tcf_mirred_ifindex(a);
1656 struct net_device *out_dev, *encap_dev = NULL;
1657 struct mlx5e_priv *out_priv;
1659 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1661 if (switchdev_port_same_parent_id(priv->netdev,
1663 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1664 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1665 out_priv = netdev_priv(out_dev);
1666 rpriv = out_priv->ppriv;
1667 attr->out_rep = rpriv->rep;
1669 err = mlx5e_attach_encap(priv, info,
1670 out_dev, &encap_dev, flow);
1671 if (err && err != -EAGAIN)
1673 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1674 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1675 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1676 out_priv = netdev_priv(encap_dev);
1677 rpriv = out_priv->ppriv;
1678 attr->out_rep = rpriv->rep;
1679 attr->parse_attr = parse_attr;
1681 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1682 priv->netdev->name, out_dev->name);
1688 if (is_tcf_tunnel_set(a)) {
1689 info = tcf_tunnel_info(a);
1697 if (is_tcf_vlan(a)) {
1698 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1699 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1700 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1701 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1704 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1705 attr->vlan = tcf_vlan_push_vid(a);
1706 } else { /* action is TCA_VLAN_ACT_MODIFY */
1712 if (is_tcf_tunnel_release(a)) {
1713 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1722 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1723 struct tc_cls_flower_offload *f)
1725 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1726 struct mlx5e_tc_flow_parse_attr *parse_attr;
1727 struct mlx5e_tc_table *tc = &priv->fs.tc;
1728 struct mlx5e_tc_flow *flow;
1729 int attr_size, err = 0;
1732 if (esw && esw->mode == SRIOV_OFFLOADS) {
1733 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1734 attr_size = sizeof(struct mlx5_esw_flow_attr);
1736 flow_flags = MLX5E_TC_FLOW_NIC;
1737 attr_size = sizeof(struct mlx5_nic_flow_attr);
1740 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1741 parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
1742 if (!parse_attr || !flow) {
1747 flow->cookie = f->cookie;
1748 flow->flags = flow_flags;
1750 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
1754 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1755 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
1757 goto err_handle_encap_flow;
1758 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
1760 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
1763 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
1766 if (IS_ERR(flow->rule)) {
1767 err = PTR_ERR(flow->rule);
1771 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
1772 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1777 if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
1778 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
1783 mlx5e_tc_del_flow(priv, flow);
1785 err_handle_encap_flow:
1786 if (err == -EAGAIN) {
1787 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1790 mlx5e_tc_del_flow(priv, flow);
1801 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1802 struct tc_cls_flower_offload *f)
1804 struct mlx5e_tc_flow *flow;
1805 struct mlx5e_tc_table *tc = &priv->fs.tc;
1807 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1812 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1814 mlx5e_tc_del_flow(priv, flow);
1821 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1822 struct tc_cls_flower_offload *f)
1824 struct mlx5e_tc_table *tc = &priv->fs.tc;
1825 struct mlx5e_tc_flow *flow;
1826 struct tc_action *a;
1827 struct mlx5_fc *counter;
1833 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1838 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
1841 counter = mlx5_flow_rule_counter(flow->rule);
1845 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1849 tcf_exts_to_list(f->exts, &actions);
1850 list_for_each_entry(a, &actions, list)
1851 tcf_action_stats_update(a, bytes, packets, lastuse);
1858 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1859 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1860 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1861 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1862 .automatic_shrinking = true,
1865 int mlx5e_tc_init(struct mlx5e_priv *priv)
1867 struct mlx5e_tc_table *tc = &priv->fs.tc;
1869 tc->ht_params = mlx5e_tc_flow_ht_params;
1870 return rhashtable_init(&tc->ht, &tc->ht_params);
1873 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1875 struct mlx5e_tc_flow *flow = ptr;
1876 struct mlx5e_priv *priv = arg;
1878 mlx5e_tc_del_flow(priv, flow);
1882 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1884 struct mlx5e_tc_table *tc = &priv->fs.tc;
1886 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1888 if (!IS_ERR_OR_NULL(tc->t)) {
1889 mlx5_destroy_flow_table(tc->t);