@@ -74,6 +74,30 @@ int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
return err;
}
+int mapping_find_by_data(struct mapping_ctx *ctx, void *data, u32 *id)
+{
+ struct mapping_item *mi;
+ u32 hash_key;
+
+ mutex_lock(&ctx->lock);
+
+ hash_key = jhash(data, ctx->data_size, 0);
+ hash_for_each_possible(ctx->ht, mi, node, hash_key) {
+ if (!memcmp(data, mi->data, ctx->data_size))
+ goto found;
+ }
+
+ mutex_unlock(&ctx->lock);
+ return -ENOENT;
+
+found:
+ if (id)
+ *id = mi->id;
+
+ mutex_unlock(&ctx->lock);
+ return 0;
+}
+
static void mapping_remove_and_free(struct mapping_ctx *ctx,
struct mapping_item *mi)
{
@@ -9,6 +9,7 @@
int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id);
int mapping_remove(struct mapping_ctx *ctx, u32 id);
int mapping_find(struct mapping_ctx *ctx, u32 id, void *data);
+int mapping_find_by_data(struct mapping_ctx *ctx, void *data, u32 *id);
/* mapping uses an xarray to map data to ids in add(), and for find().
* For locking, it uses a internal xarray spin lock for add()/remove(),
@@ -1791,7 +1791,8 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
}
}
-static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
+static int flow_has_tc_action(struct flow_cls_offload *f,
+ enum flow_action_id action)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_action *flow_action = &rule->action;
@@ -1799,12 +1800,8 @@ static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
int i;
flow_action_for_each(i, act, flow_action) {
- switch (act->id) {
- case FLOW_ACTION_GOTO:
+ if (act->id == action)
return true;
- default:
- continue;
- }
}
return false;
@@ -1856,10 +1853,37 @@ static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
sizeof(*__dst));\
})
+static void mlx5e_make_tunnel_match_key(struct flow_cls_offload *f,
+ struct net_device *filter_dev,
+ struct tunnel_match_key *tunnel_key)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+
+ memset(tunnel_key, 0, sizeof(*tunnel_key));
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ &tunnel_key->enc_control);
+ if (tunnel_key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ &tunnel_key->enc_ipv4);
+ else
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+ &tunnel_key->enc_ipv6);
+
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key->enc_ip);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
+ &tunnel_key->enc_tp);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
+ &tunnel_key->enc_key_id);
+
+ tunnel_key->filter_ifindex = filter_dev->ifindex;
+}
+
static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct flow_cls_offload *f,
- struct net_device *filter_dev)
+ struct net_device *filter_dev,
+ bool sets_mapping,
+ bool needs_mapping)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
@@ -1880,22 +1904,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
- memset(&tunnel_key, 0, sizeof(tunnel_key));
- COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
- &tunnel_key.enc_control);
- if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
- COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- &tunnel_key.enc_ipv4);
- else
- COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
- &tunnel_key.enc_ipv6);
- COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
- COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
- &tunnel_key.enc_tp);
- COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
- &tunnel_key.enc_key_id);
- tunnel_key.filter_ifindex = filter_dev->ifindex;
-
+ mlx5e_make_tunnel_match_key(f, filter_dev, &tunnel_key);
err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
if (err)
return err;
@@ -1925,10 +1934,10 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
mask = enc_opts_id ? TUNNEL_ID_MASK :
(TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
- if (attr->chain) {
+ if (needs_mapping) {
mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
TUNNEL_TO_REG, value, mask);
- } else {
+ } else if (sets_mapping) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
err = mlx5e_tc_match_to_reg_set(priv->mdev,
mod_hdr_acts,
@@ -1951,6 +1960,25 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
return err;
}
+static int mlx5e_lookup_flow_tunnel_id(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct flow_cls_offload *f,
+ struct net_device *filter_dev,
+ u32 *tun_id)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tunnel_match_key tunnel_key;
+ struct mlx5_eswitch *esw;
+
+ esw = priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ mlx5e_make_tunnel_match_key(f, filter_dev, &tunnel_key);
+ return mapping_find_by_data(uplink_priv->tunnel_mapping, &tunnel_key, tun_id);
+}
+
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
{
u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
@@ -1986,14 +2014,24 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
bool needs_mapping, sets_mapping;
+ bool pedit_action;
+ bool tunnel_decap;
int err;
if (!mlx5e_is_eswitch_flow(flow))
return -EOPNOTSUPP;
- needs_mapping = !!flow->esw_attr->chain;
- sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
- *match_inner = !needs_mapping;
+ pedit_action = flow_has_tc_action(f, FLOW_ACTION_MANGLE) ||
+ flow_has_tc_action(f, FLOW_ACTION_ADD);
+ tunnel_decap = flow_has_tc_action(f, FLOW_ACTION_TUNNEL_DECAP);
+
+ *match_inner = pedit_action || tunnel_decap;
+ sets_mapping = pedit_action &&
+ flow_has_tc_action(f, FLOW_ACTION_GOTO);
+
+ needs_mapping = !!flow->esw_attr->chain &&
+ !mlx5e_lookup_flow_tunnel_id(priv, flow, f,
+ filter_dev, NULL);
if ((needs_mapping || sets_mapping) &&
!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
@@ -2004,7 +2042,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (!flow->esw_attr->chain) {
+ if (*match_inner && !needs_mapping) {
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
match_level);
if (err) {
@@ -2021,7 +2059,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if (!needs_mapping && !sets_mapping)
return 0;
- return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
+ return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev,
+ sets_mapping, needs_mapping);
}
static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)