2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
55 struct mlx5_nic_flow_attr {
62 MLX5E_TC_FLOW_ESWITCH = BIT(0),
63 MLX5E_TC_FLOW_NIC = BIT(1),
64 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
67 struct mlx5e_tc_flow {
68 struct rhash_head node;
71 struct mlx5_flow_handle *rule;
72 struct list_head encap; /* flows sharing the same encap */
74 struct mlx5_esw_flow_attr esw_attr[0];
75 struct mlx5_nic_flow_attr nic_attr[0];
79 struct mlx5e_tc_flow_parse_attr {
80 struct mlx5_flow_spec spec;
81 int num_mod_hdr_actions;
82 void *mod_hdr_actions;
86 MLX5_HEADER_TYPE_VXLAN = 0x0,
87 MLX5_HEADER_TYPE_NVGRE = 0x1,
90 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
91 #define MLX5E_TC_TABLE_NUM_GROUPS 4
93 static struct mlx5_flow_handle *
94 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
95 struct mlx5e_tc_flow_parse_attr *parse_attr,
96 struct mlx5e_tc_flow *flow)
98 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
99 struct mlx5_core_dev *dev = priv->mdev;
100 struct mlx5_flow_destination dest = {};
101 struct mlx5_flow_act flow_act = {
102 .action = attr->action,
103 .flow_tag = attr->flow_tag,
106 struct mlx5_fc *counter = NULL;
107 struct mlx5_flow_handle *rule;
108 bool table_created = false;
111 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
112 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
113 dest.ft = priv->fs.vlan.ft.t;
114 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
115 counter = mlx5_fc_create(dev, true);
117 return ERR_CAST(counter);
119 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
120 dest.counter = counter;
123 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
124 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
125 parse_attr->num_mod_hdr_actions,
126 parse_attr->mod_hdr_actions,
128 flow_act.modify_id = attr->mod_hdr_id;
129 kfree(parse_attr->mod_hdr_actions);
132 goto err_create_mod_hdr_id;
136 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
138 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
140 MLX5E_TC_TABLE_NUM_ENTRIES,
141 MLX5E_TC_TABLE_NUM_GROUPS,
143 if (IS_ERR(priv->fs.tc.t)) {
144 netdev_err(priv->netdev,
145 "Failed to create tc offload table\n");
146 rule = ERR_CAST(priv->fs.tc.t);
150 table_created = true;
153 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
154 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
155 &flow_act, &dest, 1);
164 mlx5_destroy_flow_table(priv->fs.tc.t);
165 priv->fs.tc.t = NULL;
168 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
169 mlx5_modify_header_dealloc(priv->mdev,
171 err_create_mod_hdr_id:
172 mlx5_fc_destroy(dev, counter);
177 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
178 struct mlx5e_tc_flow *flow)
180 struct mlx5_fc *counter = NULL;
182 counter = mlx5_flow_rule_counter(flow->rule);
183 mlx5_del_flow_rules(flow->rule);
184 mlx5_fc_destroy(priv->mdev, counter);
186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187 mlx5_destroy_flow_table(priv->fs.tc.t);
188 priv->fs.tc.t = NULL;
191 if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
192 mlx5_modify_header_dealloc(priv->mdev,
193 flow->nic_attr->mod_hdr_id);
196 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
197 struct mlx5e_tc_flow *flow);
199 static struct mlx5_flow_handle *
200 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
201 struct mlx5e_tc_flow_parse_attr *parse_attr,
202 struct mlx5e_tc_flow *flow)
204 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
205 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
206 struct mlx5_flow_handle *rule;
209 err = mlx5_eswitch_add_vlan_action(esw, attr);
215 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
216 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
217 parse_attr->num_mod_hdr_actions,
218 parse_attr->mod_hdr_actions,
220 kfree(parse_attr->mod_hdr_actions);
227 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
234 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
235 mlx5_modify_header_dealloc(priv->mdev,
238 mlx5_eswitch_del_vlan_action(esw, attr);
240 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
241 mlx5e_detach_encap(priv, flow);
245 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
246 struct mlx5e_tc_flow *flow)
248 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
249 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
251 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
252 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
253 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
256 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
258 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
259 mlx5e_detach_encap(priv, flow);
260 kvfree(flow->esw_attr->parse_attr);
263 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
264 mlx5_modify_header_dealloc(priv->mdev,
268 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
269 struct mlx5e_encap_entry *e)
271 struct mlx5e_tc_flow *flow;
274 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
275 e->encap_size, e->encap_header,
278 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
282 e->flags |= MLX5_ENCAP_ENTRY_VALID;
283 mlx5e_rep_queue_neigh_stats_work(priv);
285 list_for_each_entry(flow, &e->flows, encap) {
286 flow->esw_attr->encap_id = e->encap_id;
287 flow->rule = mlx5e_tc_add_fdb_flow(priv,
288 flow->esw_attr->parse_attr,
290 if (IS_ERR(flow->rule)) {
291 err = PTR_ERR(flow->rule);
292 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
296 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
300 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
301 struct mlx5e_encap_entry *e)
303 struct mlx5e_tc_flow *flow;
304 struct mlx5_fc *counter;
306 list_for_each_entry(flow, &e->flows, encap) {
307 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
308 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
309 counter = mlx5_flow_rule_counter(flow->rule);
310 mlx5_del_flow_rules(flow->rule);
311 mlx5_fc_destroy(priv->mdev, counter);
315 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
316 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
317 mlx5_encap_dealloc(priv->mdev, e->encap_id);
321 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
323 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
324 u64 bytes, packets, lastuse = 0;
325 struct mlx5e_tc_flow *flow;
326 struct mlx5e_encap_entry *e;
327 struct mlx5_fc *counter;
328 struct neigh_table *tbl;
329 bool neigh_used = false;
332 if (m_neigh->family == AF_INET)
334 #if IS_ENABLED(CONFIG_IPV6)
335 else if (m_neigh->family == AF_INET6)
336 tbl = ipv6_stub->nd_tbl;
341 list_for_each_entry(e, &nhe->encap_list, encap_list) {
342 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
344 list_for_each_entry(flow, &e->flows, encap) {
345 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
346 counter = mlx5_flow_rule_counter(flow->rule);
347 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
348 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
357 nhe->reported_lastuse = jiffies;
359 /* find the relevant neigh according to the cached device and
362 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
364 WARN(1, "The neighbour already freed\n");
368 neigh_event_send(n, NULL);
373 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
374 struct mlx5e_tc_flow *flow)
376 struct list_head *next = flow->encap.next;
378 list_del(&flow->encap);
379 if (list_empty(next)) {
380 struct mlx5e_encap_entry *e;
382 e = list_entry(next, struct mlx5e_encap_entry, flows);
383 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
385 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
386 mlx5_encap_dealloc(priv->mdev, e->encap_id);
388 hash_del_rcu(&e->encap_hlist);
389 kfree(e->encap_header);
394 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
395 struct mlx5e_tc_flow *flow)
397 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
398 mlx5e_tc_del_fdb_flow(priv, flow);
400 mlx5e_tc_del_nic_flow(priv, flow);
403 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
404 struct tc_cls_flower_offload *f)
406 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
408 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
410 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
412 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
415 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
416 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
418 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
419 struct flow_dissector_key_keyid *key =
420 skb_flow_dissector_target(f->dissector,
421 FLOW_DISSECTOR_KEY_ENC_KEYID,
423 struct flow_dissector_key_keyid *mask =
424 skb_flow_dissector_target(f->dissector,
425 FLOW_DISSECTOR_KEY_ENC_KEYID,
427 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
428 be32_to_cpu(mask->keyid));
429 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
430 be32_to_cpu(key->keyid));
434 static int parse_tunnel_attr(struct mlx5e_priv *priv,
435 struct mlx5_flow_spec *spec,
436 struct tc_cls_flower_offload *f)
438 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
440 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
443 struct flow_dissector_key_control *enc_control =
444 skb_flow_dissector_target(f->dissector,
445 FLOW_DISSECTOR_KEY_ENC_CONTROL,
448 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
449 struct flow_dissector_key_ports *key =
450 skb_flow_dissector_target(f->dissector,
451 FLOW_DISSECTOR_KEY_ENC_PORTS,
453 struct flow_dissector_key_ports *mask =
454 skb_flow_dissector_target(f->dissector,
455 FLOW_DISSECTOR_KEY_ENC_PORTS,
457 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
458 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
459 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
461 /* Full udp dst port must be given */
462 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
463 goto vxlan_match_offload_err;
465 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
466 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
467 parse_vxlan_attr(spec, f);
469 netdev_warn(priv->netdev,
470 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
474 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
475 udp_dport, ntohs(mask->dst));
476 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
477 udp_dport, ntohs(key->dst));
479 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
480 udp_sport, ntohs(mask->src));
481 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
482 udp_sport, ntohs(key->src));
483 } else { /* udp dst port must be given */
484 vxlan_match_offload_err:
485 netdev_warn(priv->netdev,
486 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
490 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
491 struct flow_dissector_key_ipv4_addrs *key =
492 skb_flow_dissector_target(f->dissector,
493 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
495 struct flow_dissector_key_ipv4_addrs *mask =
496 skb_flow_dissector_target(f->dissector,
497 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
499 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
500 src_ipv4_src_ipv6.ipv4_layout.ipv4,
502 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
503 src_ipv4_src_ipv6.ipv4_layout.ipv4,
506 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
507 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
509 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
510 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
513 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
514 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
515 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
516 struct flow_dissector_key_ipv6_addrs *key =
517 skb_flow_dissector_target(f->dissector,
518 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
520 struct flow_dissector_key_ipv6_addrs *mask =
521 skb_flow_dissector_target(f->dissector,
522 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
525 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
526 src_ipv4_src_ipv6.ipv6_layout.ipv6),
527 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
528 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
529 src_ipv4_src_ipv6.ipv6_layout.ipv6),
530 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
532 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
533 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
534 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
536 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
537 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
539 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
540 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
543 /* Enforce DMAC when offloading incoming tunneled flows.
544 * Flow counters require a match on the DMAC.
546 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
547 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
548 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
549 dmac_47_16), priv->netdev->dev_addr);
551 /* let software handle IP fragments */
552 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
553 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
558 static int __parse_cls_flower(struct mlx5e_priv *priv,
559 struct mlx5_flow_spec *spec,
560 struct tc_cls_flower_offload *f,
563 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
565 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
570 *min_inline = MLX5_INLINE_MODE_L2;
572 if (f->dissector->used_keys &
573 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
574 BIT(FLOW_DISSECTOR_KEY_BASIC) |
575 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
576 BIT(FLOW_DISSECTOR_KEY_VLAN) |
577 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
578 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
579 BIT(FLOW_DISSECTOR_KEY_PORTS) |
580 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
581 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
582 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
583 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
584 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
585 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
586 f->dissector->used_keys);
590 if ((dissector_uses_key(f->dissector,
591 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
592 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
593 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
594 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
595 struct flow_dissector_key_control *key =
596 skb_flow_dissector_target(f->dissector,
597 FLOW_DISSECTOR_KEY_ENC_CONTROL,
599 switch (key->addr_type) {
600 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
601 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
602 if (parse_tunnel_attr(priv, spec, f))
609 /* In decap flow, header pointers should point to the inner
610 * headers, outer header were already set by parse_tunnel_attr
612 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
614 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
618 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
619 struct flow_dissector_key_control *key =
620 skb_flow_dissector_target(f->dissector,
621 FLOW_DISSECTOR_KEY_CONTROL,
624 struct flow_dissector_key_control *mask =
625 skb_flow_dissector_target(f->dissector,
626 FLOW_DISSECTOR_KEY_CONTROL,
628 addr_type = key->addr_type;
630 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
631 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
632 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
633 key->flags & FLOW_DIS_IS_FRAGMENT);
635 /* the HW doesn't need L3 inline to match on frag=no */
636 if (key->flags & FLOW_DIS_IS_FRAGMENT)
637 *min_inline = MLX5_INLINE_MODE_IP;
641 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
642 struct flow_dissector_key_basic *key =
643 skb_flow_dissector_target(f->dissector,
644 FLOW_DISSECTOR_KEY_BASIC,
646 struct flow_dissector_key_basic *mask =
647 skb_flow_dissector_target(f->dissector,
648 FLOW_DISSECTOR_KEY_BASIC,
650 ip_proto = key->ip_proto;
652 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
653 ntohs(mask->n_proto));
654 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
655 ntohs(key->n_proto));
657 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
659 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
663 *min_inline = MLX5_INLINE_MODE_IP;
666 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
667 struct flow_dissector_key_eth_addrs *key =
668 skb_flow_dissector_target(f->dissector,
669 FLOW_DISSECTOR_KEY_ETH_ADDRS,
671 struct flow_dissector_key_eth_addrs *mask =
672 skb_flow_dissector_target(f->dissector,
673 FLOW_DISSECTOR_KEY_ETH_ADDRS,
676 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
679 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
683 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
686 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
691 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
692 struct flow_dissector_key_vlan *key =
693 skb_flow_dissector_target(f->dissector,
694 FLOW_DISSECTOR_KEY_VLAN,
696 struct flow_dissector_key_vlan *mask =
697 skb_flow_dissector_target(f->dissector,
698 FLOW_DISSECTOR_KEY_VLAN,
700 if (mask->vlan_id || mask->vlan_priority) {
701 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
702 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
704 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
707 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
708 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
712 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
713 struct flow_dissector_key_ipv4_addrs *key =
714 skb_flow_dissector_target(f->dissector,
715 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
717 struct flow_dissector_key_ipv4_addrs *mask =
718 skb_flow_dissector_target(f->dissector,
719 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
722 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
723 src_ipv4_src_ipv6.ipv4_layout.ipv4),
724 &mask->src, sizeof(mask->src));
725 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
726 src_ipv4_src_ipv6.ipv4_layout.ipv4),
727 &key->src, sizeof(key->src));
728 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
729 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
730 &mask->dst, sizeof(mask->dst));
731 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
732 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
733 &key->dst, sizeof(key->dst));
735 if (mask->src || mask->dst)
736 *min_inline = MLX5_INLINE_MODE_IP;
739 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
740 struct flow_dissector_key_ipv6_addrs *key =
741 skb_flow_dissector_target(f->dissector,
742 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
744 struct flow_dissector_key_ipv6_addrs *mask =
745 skb_flow_dissector_target(f->dissector,
746 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
749 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
750 src_ipv4_src_ipv6.ipv6_layout.ipv6),
751 &mask->src, sizeof(mask->src));
752 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
753 src_ipv4_src_ipv6.ipv6_layout.ipv6),
754 &key->src, sizeof(key->src));
756 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
757 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
758 &mask->dst, sizeof(mask->dst));
759 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
760 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
761 &key->dst, sizeof(key->dst));
763 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
764 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
765 *min_inline = MLX5_INLINE_MODE_IP;
768 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
769 struct flow_dissector_key_ports *key =
770 skb_flow_dissector_target(f->dissector,
771 FLOW_DISSECTOR_KEY_PORTS,
773 struct flow_dissector_key_ports *mask =
774 skb_flow_dissector_target(f->dissector,
775 FLOW_DISSECTOR_KEY_PORTS,
779 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
780 tcp_sport, ntohs(mask->src));
781 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
782 tcp_sport, ntohs(key->src));
784 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
785 tcp_dport, ntohs(mask->dst));
786 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
787 tcp_dport, ntohs(key->dst));
791 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
792 udp_sport, ntohs(mask->src));
793 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
794 udp_sport, ntohs(key->src));
796 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
797 udp_dport, ntohs(mask->dst));
798 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
799 udp_dport, ntohs(key->dst));
802 netdev_err(priv->netdev,
803 "Only UDP and TCP transport are supported\n");
807 if (mask->src || mask->dst)
808 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
814 static int parse_cls_flower(struct mlx5e_priv *priv,
815 struct mlx5e_tc_flow *flow,
816 struct mlx5_flow_spec *spec,
817 struct tc_cls_flower_offload *f)
819 struct mlx5_core_dev *dev = priv->mdev;
820 struct mlx5_eswitch *esw = dev->priv.eswitch;
821 struct mlx5e_rep_priv *rpriv = priv->ppriv;
822 struct mlx5_eswitch_rep *rep;
826 err = __parse_cls_flower(priv, spec, f, &min_inline);
828 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
830 if (rep->vport != FDB_UPLINK_VPORT &&
831 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
832 esw->offloads.inline_mode < min_inline)) {
833 netdev_warn(priv->netdev,
834 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
835 min_inline, esw->offloads.inline_mode);
843 struct pedit_headers {
851 static int pedit_header_offsets[] = {
852 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
853 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
854 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
855 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
856 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
859 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
861 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
862 struct pedit_headers *masks,
863 struct pedit_headers *vals)
865 u32 *curr_pmask, *curr_pval;
867 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
870 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
871 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
873 if (*curr_pmask & mask) /* disallow acting twice on the same location */
877 *curr_pval |= (val & mask);
891 static struct mlx5_fields fields[] = {
892 {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
893 {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
894 {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
898 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
899 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
900 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
901 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
903 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
904 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
905 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
906 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
907 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
908 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
909 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
910 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
912 {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
913 {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
914 {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
916 {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
917 {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
920 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
921 * max from the SW pedit action. On success, it says how many HW actions were
924 static int offload_pedit_fields(struct pedit_headers *masks,
925 struct pedit_headers *vals,
926 struct mlx5e_tc_flow_parse_attr *parse_attr)
928 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
929 int i, action_size, nactions, max_actions, first, last, first_z;
930 void *s_masks_p, *a_masks_p, *vals_p;
931 struct mlx5_fields *f;
937 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
938 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
939 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
940 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
942 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
943 action = parse_attr->mod_hdr_actions;
944 max_actions = parse_attr->num_mod_hdr_actions;
947 for (i = 0; i < ARRAY_SIZE(fields); i++) {
949 /* avoid seeing bits set from previous iterations */
953 s_masks_p = (void *)set_masks + f->offset;
954 a_masks_p = (void *)add_masks + f->offset;
956 memcpy(&s_mask, s_masks_p, f->size);
957 memcpy(&a_mask, a_masks_p, f->size);
959 if (!s_mask && !a_mask) /* nothing to offload here */
962 if (s_mask && a_mask) {
963 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
967 if (nactions == max_actions) {
968 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
973 cmd = MLX5_ACTION_TYPE_SET;
975 vals_p = (void *)set_vals + f->offset;
976 /* clear to denote we consumed this field */
977 memset(s_masks_p, 0, f->size);
979 cmd = MLX5_ACTION_TYPE_ADD;
981 vals_p = (void *)add_vals + f->offset;
982 /* clear to denote we consumed this field */
983 memset(a_masks_p, 0, f->size);
986 field_bsize = f->size * BITS_PER_BYTE;
988 first_z = find_first_zero_bit(&mask, field_bsize);
989 first = find_first_bit(&mask, field_bsize);
990 last = find_last_bit(&mask, field_bsize);
991 if (first > 0 || last != (field_bsize - 1) || first_z < last) {
992 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
997 MLX5_SET(set_action_in, action, action_type, cmd);
998 MLX5_SET(set_action_in, action, field, f->field);
1000 if (cmd == MLX5_ACTION_TYPE_SET) {
1001 MLX5_SET(set_action_in, action, offset, 0);
1002 /* length is num of bits to be written, zero means length of 32 */
1003 MLX5_SET(set_action_in, action, length, field_bsize);
1006 if (field_bsize == 32)
1007 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
1008 else if (field_bsize == 16)
1009 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
1010 else if (field_bsize == 8)
1011 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
1013 action += action_size;
1017 parse_attr->num_mod_hdr_actions = nactions;
1021 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1022 const struct tc_action *a, int namespace,
1023 struct mlx5e_tc_flow_parse_attr *parse_attr)
1025 int nkeys, action_size, max_actions;
1027 nkeys = tcf_pedit_nkeys(a);
1028 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1030 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1031 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1032 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1033 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1035 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1036 max_actions = min(max_actions, nkeys * 16);
1038 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1039 if (!parse_attr->mod_hdr_actions)
1042 parse_attr->num_mod_hdr_actions = max_actions;
1046 static const struct pedit_headers zero_masks = {};
1048 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1049 const struct tc_action *a, int namespace,
1050 struct mlx5e_tc_flow_parse_attr *parse_attr)
1052 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1053 int nkeys, i, err = -EOPNOTSUPP;
1054 u32 mask, val, offset;
1057 nkeys = tcf_pedit_nkeys(a);
1059 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1060 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1062 for (i = 0; i < nkeys; i++) {
1063 htype = tcf_pedit_htype(a, i);
1064 cmd = tcf_pedit_cmd(a, i);
1065 err = -EOPNOTSUPP; /* can't be all optimistic */
1067 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1068 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1072 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1073 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1077 mask = tcf_pedit_mask(a, i);
1078 val = tcf_pedit_val(a, i);
1079 offset = tcf_pedit_offset(a, i);
1081 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1086 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1090 err = offload_pedit_fields(masks, vals, parse_attr);
1092 goto out_dealloc_parsed_actions;
1094 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1095 cmd_masks = &masks[cmd];
1096 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1097 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1099 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1100 16, 1, cmd_masks, sizeof(zero_masks), true);
1102 goto out_dealloc_parsed_actions;
1108 out_dealloc_parsed_actions:
1109 kfree(parse_attr->mod_hdr_actions);
1114 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1116 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1117 TCA_CSUM_UPDATE_FLAG_UDP;
1119 /* The HW recalcs checksums only if re-writing headers */
1120 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1121 netdev_warn(priv->netdev,
1122 "TC csum action is only offloaded with pedit\n");
1126 if (update_flags & ~prot_flags) {
1127 netdev_warn(priv->netdev,
1128 "can't offload TC csum action for some header/s - flags %#x\n",
1136 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1137 struct mlx5e_tc_flow_parse_attr *parse_attr,
1138 struct mlx5e_tc_flow *flow)
1140 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1141 const struct tc_action *a;
1145 if (tc_no_actions(exts))
1148 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1151 tcf_exts_to_list(exts, &actions);
1152 list_for_each_entry(a, &actions, list) {
1153 /* Only support a single action per rule */
1157 if (is_tcf_gact_shot(a)) {
1158 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1159 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1160 flow_table_properties_nic_receive.flow_counter))
1161 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1165 if (is_tcf_pedit(a)) {
1166 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1171 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1172 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1176 if (is_tcf_csum(a)) {
1177 if (csum_offload_supported(priv, attr->action,
1178 tcf_csum_update_flags(a)))
1184 if (is_tcf_skbedit_mark(a)) {
1185 u32 mark = tcf_skbedit_mark(a);
1187 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1188 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1193 attr->flow_tag = mark;
1194 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1204 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1205 struct ip_tunnel_key *b)
1207 return memcmp(a, b, sizeof(*a));
1210 static inline int hash_encap_info(struct ip_tunnel_key *key)
1212 return jhash(key, sizeof(*key), 0);
1215 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1216 struct net_device *mirred_dev,
1217 struct net_device **out_dev,
1219 struct neighbour **out_n,
1222 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1224 struct neighbour *n = NULL;
1226 #if IS_ENABLED(CONFIG_INET)
1229 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1230 ret = PTR_ERR_OR_ZERO(rt);
1236 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1237 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1238 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1240 *out_dev = rt->dst.dev;
1242 *out_ttl = ip4_dst_hoplimit(&rt->dst);
1243 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1252 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1253 struct net_device *mirred_dev,
1254 struct net_device **out_dev,
1256 struct neighbour **out_n,
1259 struct neighbour *n = NULL;
1260 struct dst_entry *dst;
1262 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1263 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1266 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
1273 *out_ttl = ip6_dst_hoplimit(dst);
1275 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1276 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1277 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1279 *out_dev = dst->dev;
1284 n = dst_neigh_lookup(dst, &fl6->daddr);
1293 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1294 char buf[], int encap_size,
1295 unsigned char h_dest[ETH_ALEN],
1299 __be16 udp_dst_port,
1302 struct ethhdr *eth = (struct ethhdr *)buf;
1303 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1304 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1305 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1307 memset(buf, 0, encap_size);
1309 ether_addr_copy(eth->h_dest, h_dest);
1310 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1311 eth->h_proto = htons(ETH_P_IP);
1317 ip->protocol = IPPROTO_UDP;
1321 udp->dest = udp_dst_port;
1322 vxh->vx_flags = VXLAN_HF_VNI;
1323 vxh->vx_vni = vxlan_vni_field(vx_vni);
1326 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1327 char buf[], int encap_size,
1328 unsigned char h_dest[ETH_ALEN],
1330 struct in6_addr *daddr,
1331 struct in6_addr *saddr,
1332 __be16 udp_dst_port,
1335 struct ethhdr *eth = (struct ethhdr *)buf;
1336 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1337 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1338 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1340 memset(buf, 0, encap_size);
1342 ether_addr_copy(eth->h_dest, h_dest);
1343 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1344 eth->h_proto = htons(ETH_P_IPV6);
1346 ip6_flow_hdr(ip6h, 0, 0);
1347 /* the HW fills up ipv6 payload len */
1348 ip6h->nexthdr = IPPROTO_UDP;
1349 ip6h->hop_limit = ttl;
1350 ip6h->daddr = *daddr;
1351 ip6h->saddr = *saddr;
1353 udp->dest = udp_dst_port;
1354 vxh->vx_flags = VXLAN_HF_VNI;
1355 vxh->vx_vni = vxlan_vni_field(vx_vni);
1358 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1359 struct net_device *mirred_dev,
1360 struct mlx5e_encap_entry *e)
1362 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1363 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1364 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1365 struct net_device *out_dev;
1366 struct neighbour *n = NULL;
1367 struct flowi4 fl4 = {};
1372 if (max_encap_size < ipv4_encap_size) {
1373 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1374 ipv4_encap_size, max_encap_size);
1378 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1382 switch (e->tunnel_type) {
1383 case MLX5_HEADER_TYPE_VXLAN:
1384 fl4.flowi4_proto = IPPROTO_UDP;
1385 fl4.fl4_dport = tun_key->tp_dst;
1391 fl4.flowi4_tos = tun_key->tos;
1392 fl4.daddr = tun_key->u.ipv4.dst;
1393 fl4.saddr = tun_key->u.ipv4.src;
1395 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1400 /* used by mlx5e_detach_encap to lookup a neigh hash table
1401 * entry in the neigh hash table when a user deletes a rule
1403 e->m_neigh.dev = n->dev;
1404 e->m_neigh.family = n->ops->family;
1405 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1406 e->out_dev = out_dev;
1408 /* It's importent to add the neigh to the hash table before checking
1409 * the neigh validity state. So if we'll get a notification, in case the
1410 * neigh changes it's validity state, we would find the relevant neigh
1413 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1417 read_lock_bh(&n->lock);
1418 nud_state = n->nud_state;
1419 ether_addr_copy(e->h_dest, n->ha);
1420 read_unlock_bh(&n->lock);
1422 switch (e->tunnel_type) {
1423 case MLX5_HEADER_TYPE_VXLAN:
1424 gen_vxlan_header_ipv4(out_dev, encap_header,
1425 ipv4_encap_size, e->h_dest, ttl,
1427 fl4.saddr, tun_key->tp_dst,
1428 tunnel_id_to_key32(tun_key->tun_id));
1432 goto destroy_neigh_entry;
1434 e->encap_size = ipv4_encap_size;
1435 e->encap_header = encap_header;
1437 if (!(nud_state & NUD_VALID)) {
1438 neigh_event_send(n, NULL);
1443 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1444 ipv4_encap_size, encap_header, &e->encap_id);
1446 goto destroy_neigh_entry;
1448 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1449 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1453 destroy_neigh_entry:
1454 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1456 kfree(encap_header);
1462 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1463 struct net_device *mirred_dev,
1464 struct mlx5e_encap_entry *e)
1466 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1467 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
1468 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1469 struct net_device *out_dev;
1470 struct neighbour *n = NULL;
1471 struct flowi6 fl6 = {};
1476 if (max_encap_size < ipv6_encap_size) {
1477 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1478 ipv6_encap_size, max_encap_size);
1482 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
1486 switch (e->tunnel_type) {
1487 case MLX5_HEADER_TYPE_VXLAN:
1488 fl6.flowi6_proto = IPPROTO_UDP;
1489 fl6.fl6_dport = tun_key->tp_dst;
1496 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1497 fl6.daddr = tun_key->u.ipv6.dst;
1498 fl6.saddr = tun_key->u.ipv6.src;
1500 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1505 /* used by mlx5e_detach_encap to lookup a neigh hash table
1506 * entry in the neigh hash table when a user deletes a rule
1508 e->m_neigh.dev = n->dev;
1509 e->m_neigh.family = n->ops->family;
1510 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1511 e->out_dev = out_dev;
1513 /* It's importent to add the neigh to the hash table before checking
1514 * the neigh validity state. So if we'll get a notification, in case the
1515 * neigh changes it's validity state, we would find the relevant neigh
1518 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1522 read_lock_bh(&n->lock);
1523 nud_state = n->nud_state;
1524 ether_addr_copy(e->h_dest, n->ha);
1525 read_unlock_bh(&n->lock);
1527 switch (e->tunnel_type) {
1528 case MLX5_HEADER_TYPE_VXLAN:
1529 gen_vxlan_header_ipv6(out_dev, encap_header,
1530 ipv6_encap_size, e->h_dest, ttl,
1532 &fl6.saddr, tun_key->tp_dst,
1533 tunnel_id_to_key32(tun_key->tun_id));
1537 goto destroy_neigh_entry;
1540 e->encap_size = ipv6_encap_size;
1541 e->encap_header = encap_header;
1543 if (!(nud_state & NUD_VALID)) {
1544 neigh_event_send(n, NULL);
1549 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1550 ipv6_encap_size, encap_header, &e->encap_id);
1552 goto destroy_neigh_entry;
1554 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1555 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1559 destroy_neigh_entry:
1560 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1562 kfree(encap_header);
1568 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1569 struct ip_tunnel_info *tun_info,
1570 struct net_device *mirred_dev,
1571 struct net_device **encap_dev,
1572 struct mlx5e_tc_flow *flow)
1574 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1575 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1576 unsigned short family = ip_tunnel_info_af(tun_info);
1577 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1578 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1579 struct ip_tunnel_key *key = &tun_info->key;
1580 struct mlx5e_encap_entry *e;
1581 int tunnel_type, err = 0;
1585 /* udp dst port must be set */
1586 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1587 goto vxlan_encap_offload_err;
1589 /* setting udp src port isn't supported */
1590 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1591 vxlan_encap_offload_err:
1592 netdev_warn(priv->netdev,
1593 "must set udp dst port and not set udp src port\n");
1597 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1598 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1599 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1601 netdev_warn(priv->netdev,
1602 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1606 hash_key = hash_encap_info(key);
1608 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1609 encap_hlist, hash_key) {
1610 if (!cmp_encap_info(&e->tun_info.key, key)) {
1619 e = kzalloc(sizeof(*e), GFP_KERNEL);
1623 e->tun_info = *tun_info;
1624 e->tunnel_type = tunnel_type;
1625 INIT_LIST_HEAD(&e->flows);
1627 if (family == AF_INET)
1628 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
1629 else if (family == AF_INET6)
1630 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
1632 if (err && err != -EAGAIN)
1635 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1638 list_add(&flow->encap, &e->flows);
1639 *encap_dev = e->out_dev;
1640 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1641 attr->encap_id = e->encap_id;
1650 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1651 struct mlx5e_tc_flow_parse_attr *parse_attr,
1652 struct mlx5e_tc_flow *flow)
1654 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1655 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1656 struct ip_tunnel_info *info = NULL;
1657 const struct tc_action *a;
1662 if (tc_no_actions(exts))
1665 memset(attr, 0, sizeof(*attr));
1666 attr->in_rep = rpriv->rep;
1668 tcf_exts_to_list(exts, &actions);
1669 list_for_each_entry(a, &actions, list) {
1670 if (is_tcf_gact_shot(a)) {
1671 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1672 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1676 if (is_tcf_pedit(a)) {
1677 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1682 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1686 if (is_tcf_csum(a)) {
1687 if (csum_offload_supported(priv, attr->action,
1688 tcf_csum_update_flags(a)))
1694 if (is_tcf_mirred_egress_redirect(a)) {
1695 int ifindex = tcf_mirred_ifindex(a);
1696 struct net_device *out_dev, *encap_dev = NULL;
1697 struct mlx5e_priv *out_priv;
1699 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1701 if (switchdev_port_same_parent_id(priv->netdev,
1703 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1704 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1705 out_priv = netdev_priv(out_dev);
1706 rpriv = out_priv->ppriv;
1707 attr->out_rep = rpriv->rep;
1709 err = mlx5e_attach_encap(priv, info,
1710 out_dev, &encap_dev, flow);
1711 if (err && err != -EAGAIN)
1713 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1714 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1715 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1716 out_priv = netdev_priv(encap_dev);
1717 rpriv = out_priv->ppriv;
1718 attr->out_rep = rpriv->rep;
1719 attr->parse_attr = parse_attr;
1721 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1722 priv->netdev->name, out_dev->name);
1728 if (is_tcf_tunnel_set(a)) {
1729 info = tcf_tunnel_info(a);
1737 if (is_tcf_vlan(a)) {
1738 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1739 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1740 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1741 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1744 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1745 attr->vlan = tcf_vlan_push_vid(a);
1746 } else { /* action is TCA_VLAN_ACT_MODIFY */
1752 if (is_tcf_tunnel_release(a)) {
1753 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1762 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1763 struct tc_cls_flower_offload *f)
1765 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1766 struct mlx5e_tc_flow_parse_attr *parse_attr;
1767 struct mlx5e_tc_table *tc = &priv->fs.tc;
1768 struct mlx5e_tc_flow *flow;
1769 int attr_size, err = 0;
1772 if (esw && esw->mode == SRIOV_OFFLOADS) {
1773 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1774 attr_size = sizeof(struct mlx5_esw_flow_attr);
1776 flow_flags = MLX5E_TC_FLOW_NIC;
1777 attr_size = sizeof(struct mlx5_nic_flow_attr);
1780 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1781 parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
1782 if (!parse_attr || !flow) {
1787 flow->cookie = f->cookie;
1788 flow->flags = flow_flags;
1790 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
1794 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1795 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
1797 goto err_handle_encap_flow;
1798 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
1800 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
1803 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
1806 if (IS_ERR(flow->rule)) {
1807 err = PTR_ERR(flow->rule);
1811 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
1812 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1817 if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
1818 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
1823 mlx5e_tc_del_flow(priv, flow);
1825 err_handle_encap_flow:
1826 if (err == -EAGAIN) {
1827 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1830 mlx5e_tc_del_flow(priv, flow);
1841 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1842 struct tc_cls_flower_offload *f)
1844 struct mlx5e_tc_flow *flow;
1845 struct mlx5e_tc_table *tc = &priv->fs.tc;
1847 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1852 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1854 mlx5e_tc_del_flow(priv, flow);
1861 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1862 struct tc_cls_flower_offload *f)
1864 struct mlx5e_tc_table *tc = &priv->fs.tc;
1865 struct mlx5e_tc_flow *flow;
1866 struct tc_action *a;
1867 struct mlx5_fc *counter;
1873 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1878 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
1881 counter = mlx5_flow_rule_counter(flow->rule);
1885 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1889 tcf_exts_to_list(f->exts, &actions);
1890 list_for_each_entry(a, &actions, list)
1891 tcf_action_stats_update(a, bytes, packets, lastuse);
1898 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1899 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1900 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1901 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1902 .automatic_shrinking = true,
1905 int mlx5e_tc_init(struct mlx5e_priv *priv)
1907 struct mlx5e_tc_table *tc = &priv->fs.tc;
1909 tc->ht_params = mlx5e_tc_flow_ht_params;
1910 return rhashtable_init(&tc->ht, &tc->ht_params);
1913 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1915 struct mlx5e_tc_flow *flow = ptr;
1916 struct mlx5e_priv *priv = arg;
1918 mlx5e_tc_del_flow(priv, flow);
1922 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1924 struct mlx5e_tc_table *tc = &priv->fs.tc;
1926 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1928 if (!IS_ERR_OR_NULL(tc->t)) {
1929 mlx5_destroy_flow_table(tc->t);