From patchwork Mon Mar 23 23:44:56 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: wenxu X-Patchwork-Id: 221998 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH, MAILING_LIST_MULTI, SIGNED_OFF_BY, SPF_HELO_NONE, SPF_PASS, URIBL_BLOCKED, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 13731C4332B for ; Mon, 23 Mar 2020 23:45:02 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id D757720409 for ; Mon, 23 Mar 2020 23:45:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727067AbgCWXpB (ORCPT ); Mon, 23 Mar 2020 19:45:01 -0400 Received: from m9784.mail.qiye.163.com ([220.181.97.84]:42798 "EHLO m9784.mail.qiye.163.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726987AbgCWXpB (ORCPT ); Mon, 23 Mar 2020 19:45:01 -0400 Received: from localhost.localdomain (unknown [123.59.132.129]) by m9784.mail.qiye.163.com (Hmail) with ESMTPA id 3EFD44161B; Tue, 24 Mar 2020 07:44:58 +0800 (CST) From: wenxu@ucloud.cn To: saeedm@mellanox.com Cc: paulb@mellanox.com, vladbu@mellanox.com, netdev@vger.kernel.org Subject: [PATCH net-next v5 1/2] net/mlx5e: refactor indr setup block Date: Tue, 24 Mar 2020 07:44:56 +0800 Message-Id: <1585007097-28475-2-git-send-email-wenxu@ucloud.cn> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1585007097-28475-1-git-send-email-wenxu@ucloud.cn> References: <1585007097-28475-1-git-send-email-wenxu@ucloud.cn> X-HM-Spam-Status: e1kfGhgUHx5ZQUtXWQgYFAkeWUFZVklVS0JOS0tLSU1MS0lDQ0pZV1koWU FJQjdXWS1ZQUlXWQkOFx4IWUFZNTQpNjo3JCkuNz5ZBg++ X-HM-Sender-Digest: e1kMHhlZQR0aFwgeV1kSHx4VD1lBWUc6ODI6Nxw4UTgxSxMPTipCMhUc PxkwFCNVSlVKTkNOS0tMS0JDSE1MVTMWGhIXVQweFQMOOw4YFxQOH1UYFUVZV1kSC1lBWUpJSFVO QlVKSElVSklCWVdZCAFZQU5DSk03Bg++ X-HM-Tid: 0a7109c8f9a82086kuqy3efd44161b Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: wenxu Refactor indr setup block for support ft indr setup in the next patch. IThe function mlx5e_rep_indr_offload exposes 'flags' in order set additional flag for FT in next patch. Rename mlx5e_rep_indr_setup_tc_block to mlx5e_rep_indr_setup_block and add flow_setup_cb_t callback parameters in order set the specific callback for FT in next patch. Signed-off-by: wenxu --- v5: change the comments drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 42 ++++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a33d151..057f5f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -694,9 +694,9 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) static int mlx5e_rep_indr_offload(struct net_device *netdev, struct flow_cls_offload *flower, - struct mlx5e_rep_indr_block_priv *indr_priv) + struct mlx5e_rep_indr_block_priv *indr_priv, + unsigned long flags) { - unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); int err = 0; @@ -717,20 +717,22 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) return err; } -static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type, - void *type_data, void *indr_priv) +static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type, + void *type_data, void *indr_priv) { + unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_rep_indr_block_priv *priv = indr_priv; switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_indr_offload(priv->netdev, type_data, priv); + return mlx5e_rep_indr_offload(priv->netdev, type_data, priv, + flags); default: return -EOPNOTSUPP; } } -static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv) +static void mlx5e_rep_indr_block_unbind(void *cb_priv) { struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv; @@ -741,9 +743,10 @@ static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv) static LIST_HEAD(mlx5e_block_cb_list); static int -mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, - struct mlx5e_rep_priv *rpriv, - struct flow_block_offload *f) +mlx5e_rep_indr_setup_block(struct net_device *netdev, + struct mlx5e_rep_priv *rpriv, + struct flow_block_offload *f, + flow_setup_cb_t *setup_cb) { struct mlx5e_rep_indr_block_priv *indr_priv; struct flow_block_cb *block_cb; @@ -769,9 +772,8 @@ static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv) list_add(&indr_priv->list, &rpriv->uplink_priv.tc_indr_block_priv_list); - block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb, - indr_priv, indr_priv, - mlx5e_rep_indr_tc_block_unbind); + block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv, + mlx5e_rep_indr_block_unbind); if (IS_ERR(block_cb)) { list_del(&indr_priv->list); kfree(indr_priv); @@ -786,9 +788,7 @@ static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv) if (!indr_priv) return -ENOENT; - block_cb = flow_block_cb_lookup(f->block, - mlx5e_rep_indr_setup_block_cb, - indr_priv); + block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv); if (!block_cb) return -ENOENT; @@ -802,13 +802,13 @@ static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv) } static -int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data) +int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data) { switch (type) { case TC_SETUP_BLOCK: - return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv, - type_data); + return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, + mlx5e_rep_indr_setup_tc_cb); default: return -EOPNOTSUPP; } @@ -820,7 +820,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, int err; err = __flow_indr_block_cb_register(netdev, rpriv, - mlx5e_rep_indr_setup_tc_cb, + mlx5e_rep_indr_setup_cb, rpriv); if (err) { struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); @@ -834,7 +834,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, struct net_device *netdev) { - __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, + __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb, rpriv); } From patchwork Mon Mar 23 23:44:57 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: wenxu X-Patchwork-Id: 221997 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH, MAILING_LIST_MULTI, SIGNED_OFF_BY, SPF_HELO_NONE, SPF_PASS, URIBL_BLOCKED, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 881C6C54FCF for ; Mon, 23 Mar 2020 23:45:04 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 69E9D20409 for ; Mon, 23 Mar 2020 23:45:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727090AbgCWXpD (ORCPT ); Mon, 23 Mar 2020 19:45:03 -0400 Received: from m9784.mail.qiye.163.com ([220.181.97.84]:42806 "EHLO m9784.mail.qiye.163.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726982AbgCWXpB (ORCPT ); Mon, 23 Mar 2020 19:45:01 -0400 Received: from localhost.localdomain (unknown [123.59.132.129]) by m9784.mail.qiye.163.com (Hmail) with ESMTPA id 5FDA741650; Tue, 24 Mar 2020 07:44:58 +0800 (CST) From: wenxu@ucloud.cn To: saeedm@mellanox.com Cc: paulb@mellanox.com, vladbu@mellanox.com, netdev@vger.kernel.org Subject: [PATCH net-next v5 2/2] net/mlx5e: add mlx5e_rep_indr_setup_ft_cb support Date: Tue, 24 Mar 2020 07:44:57 +0800 Message-Id: <1585007097-28475-3-git-send-email-wenxu@ucloud.cn> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1585007097-28475-1-git-send-email-wenxu@ucloud.cn> References: <1585007097-28475-1-git-send-email-wenxu@ucloud.cn> X-HM-Spam-Status: e1kfGhgUHx5ZQUtXWQgYFAkeWUFZVklVSklOS0tLSUhDT0pDTkNZV1koWU FJQjdXWS1ZQUlXWQkOFx4IWUFZNTQpNjo3JCkuNz5ZBg++ X-HM-Sender-Digest: e1kMHhlZQR0aFwgeV1kSHx4VD1lBWUc6OT46Ghw6Lzg6HhNPCSgyMhYY PgMaCQNVSlVKTkNOS0tMS0JDT0JLVTMWGhIXVQweFQMOOw4YFxQOH1UYFUVZV1kSC1lBWUpJSFVO QlVKSElVSklCWVdZCAFZQUhPTkg3Bg++ X-HM-Tid: 0a7109c8fa2e2086kuqy5fda741650 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: wenxu Add mlx5e_rep_indr_setup_ft_cb to support indr block setup in FT mode. Signed-off-by: wenxu --- v5: no change drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 52 ++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 057f5f9..30c81c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -732,6 +732,55 @@ static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type, } } +static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type, + void *type_data, void *indr_priv) +{ + struct mlx5e_rep_indr_block_priv *priv = indr_priv; + struct flow_cls_offload *f = type_data; + struct flow_cls_offload tmp; + struct mlx5e_priv *mpriv; + struct mlx5_eswitch *esw; + unsigned long flags; + int err; + + mpriv = netdev_priv(priv->rpriv->netdev); + esw = mpriv->mdev->priv.eswitch; + + flags = MLX5_TC_FLAG(EGRESS) | + MLX5_TC_FLAG(ESW_OFFLOAD) | + MLX5_TC_FLAG(FT_OFFLOAD); + + switch (type) { + case TC_SETUP_CLSFLOWER: + memcpy(&tmp, f, sizeof(*f)); + + if (!mlx5_esw_chains_prios_supported(esw)) + return -EOPNOTSUPP; + + /* Re-use tc offload path by moving the ft flow to the + * reserved ft chain. + * + * FT offload can use prio range [0, INT_MAX], so we normalize + * it to range [1, mlx5_esw_chains_get_prio_range(esw)] + * as with tc, where prio 0 isn't supported. + * + * We only support chain 0 of FT offload. + */ + if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw)) + return -EOPNOTSUPP; + if (tmp.common.chain_index != 0) + return -EOPNOTSUPP; + + tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); + tmp.common.prio++; + err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags); + memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); + return err; + default: + return -EOPNOTSUPP; + } +} + static void mlx5e_rep_indr_block_unbind(void *cb_priv) { struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv; @@ -809,6 +858,9 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, case TC_SETUP_BLOCK: return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, mlx5e_rep_indr_setup_tc_cb); + case TC_SETUP_FT: + return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, + mlx5e_rep_indr_setup_ft_cb); default: return -EOPNOTSUPP; }