This is a note to let you know that I've just added the patch titled net/mlx5e: Update restore chain id for slow path packets to the 6.0-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: net-mlx5e-update-restore-chain-id-for-slow-path-pack.patch and it can be found in the queue-6.0 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 7dc3a6fecfdd387c2f807b8daba8966049004334 Author: Paul Blakey <paulb@xxxxxxxxxx> Date: Wed Oct 26 14:51:43 2022 +0100 net/mlx5e: Update restore chain id for slow path packets [ Upstream commit 8dc47c0527c1586e3ebe0efd323f1d8abb181c77 ] Currently encap slow path rules just forward to software without setting the chain id miss register, so driver doesn't restore the chain, and packets hitting this rule will restart from tc chain 0 instead of continuing to the chain the encap rule was on. Fix this by setting the chain id miss register to the chain id mapping. Fixes: 8f1e0b97cc70 ("net/mlx5: E-Switch, Mark miss packets with new chain id mapping") Signed-off-by: Paul Blakey <paulb@xxxxxxxxxx> Reviewed-by: Oz Shlomo <ozsh@xxxxxxxxxx> Signed-off-by: Saeed Mahameed <saeedm@xxxxxxxxxx> Link: https://lore.kernel.org/r/20221026135153.154807-6-saeed@xxxxxxxxxx Signed-off-by: Jakub Kicinski <kuba@xxxxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 10c9a8a79d00..2e42d7c5451e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -96,6 +96,7 @@ struct mlx5e_tc_flow { struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5e_tc_flow *peer_flow; struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ + struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ @@ -111,6 +112,7 @@ struct mlx5e_tc_flow { struct completion del_hw_done; struct mlx5_flow_attr *attr; struct list_head attrs; + u32 chain_mapping; }; struct mlx5_flow_handle * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index f154bda668ad..6a0df046064f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1394,8 +1394,13 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec) { + struct mlx5e_tc_mod_hdr_acts mod_acts = {}; + struct mlx5e_mod_hdr_handle *mh = NULL; struct mlx5_flow_attr *slow_attr; struct mlx5_flow_handle *rule; + bool fwd_and_modify_cap; + u32 chain_mapping = 0; + int err; slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); if (!slow_attr) @@ -1406,13 +1411,56 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, slow_attr->esw_attr->split_count = 0; slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; + fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table); + if (!fwd_and_modify_cap) + goto skip_restore; + + err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping); + if (err) + goto err_get_chain; + + err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, + CHAIN_TO_REG, chain_mapping); + if (err) + goto err_reg_set; + + mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow), + MLX5_FLOW_NAMESPACE_FDB, &mod_acts); + if (IS_ERR(mh)) { + err = PTR_ERR(mh); + goto err_attach; + } + + slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh); + +skip_restore: rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); - if (!IS_ERR(rule)) - flow_flag_set(flow, SLOW); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_offload; + } + flow->slow_mh = mh; + flow->chain_mapping = chain_mapping; + flow_flag_set(flow, SLOW); + + mlx5e_mod_hdr_dealloc(&mod_acts); kfree(slow_attr); return rule; + +err_offload: + if (fwd_and_modify_cap) + mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh); +err_attach: +err_reg_set: + if (fwd_and_modify_cap) + mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping); +err_get_chain: + mlx5e_mod_hdr_dealloc(&mod_acts); + kfree(slow_attr); + return ERR_PTR(err); } void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, @@ -1430,7 +1478,17 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->esw_attr->split_count = 0; slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; + if (flow->slow_mh) { + slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh); + } mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); + if (flow->slow_mh) { + mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh); + mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping); + flow->chain_mapping = 0; + flow->slow_mh = NULL; + } flow_flag_clear(flow, SLOW); kfree(slow_attr); }