From patchwork Tue Mar 24 13:17:20 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Paul Blakey X-Patchwork-Id: 221967 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH, MAILING_LIST_MULTI, SIGNED_OFF_BY, SPF_HELO_NONE, SPF_PASS, UNPARSEABLE_RELAY, USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id D329FC43331 for ; Tue, 24 Mar 2020 13:32:38 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id B67C220753 for ; Tue, 24 Mar 2020 13:32:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727876AbgCXNch (ORCPT ); Tue, 24 Mar 2020 09:32:37 -0400 Received: from mail-il-dmz.mellanox.com ([193.47.165.129]:59760 "EHLO mellanox.co.il" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1728256AbgCXNRg (ORCPT ); Tue, 24 Mar 2020 09:17:36 -0400 Received: from Internal Mail-Server by MTLPINE1 (envelope-from paulb@mellanox.com) with ESMTPS (AES256-SHA encrypted); 24 Mar 2020 15:17:29 +0200 Received: from reg-r-vrt-019-120.mtr.labs.mlnx (reg-r-vrt-019-120.mtr.labs.mlnx [10.213.19.120]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 02ODHSYr030372; Tue, 24 Mar 2020 15:17:28 +0200 From: Paul Blakey To: Paul Blakey , Oz Shlomo , Pablo Neira Ayuso , Majd Dibbiny , Roi Dayan , netdev@vger.kernel.org, Saeed Mahameed Cc: netfilter-devel@vger.kernel.org Subject: [PATCH net-next 2/3] netfilter: flowtable: Use work entry per offload command Date: Tue, 24 Mar 2020 15:17:20 +0200 Message-Id: <1585055841-14256-3-git-send-email-paulb@mellanox.com> X-Mailer: git-send-email 1.8.4.3 In-Reply-To: <1585055841-14256-1-git-send-email-paulb@mellanox.com> References: <1585055841-14256-1-git-send-email-paulb@mellanox.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org To allow offload commands to execute in parallel, create workqueue for flow table offload, and use a work entry per offload command. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo --- net/netfilter/nf_flow_table_offload.c | 46 ++++++++++++----------------------- 1 file changed, 15 insertions(+), 31 deletions(-) diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 26591d2..7f41cb2 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -12,9 +12,7 @@ #include #include -static struct work_struct nf_flow_offload_work; -static DEFINE_SPINLOCK(flow_offload_pending_list_lock); -static LIST_HEAD(flow_offload_pending_list); +static struct workqueue_struct *nf_flow_offload_wq; struct flow_offload_work { struct list_head list; @@ -22,6 +20,7 @@ struct flow_offload_work { int priority; struct nf_flowtable *flowtable; struct flow_offload *flow; + struct work_struct work; }; #define NF_FLOW_DISSECTOR(__match, __type, __field) \ @@ -788,15 +787,10 @@ static void flow_offload_work_stats(struct flow_offload_work *offload) static void flow_offload_work_handler(struct work_struct *work) { - struct flow_offload_work *offload, *next; - LIST_HEAD(offload_pending_list); - - spin_lock_bh(&flow_offload_pending_list_lock); - list_replace_init(&flow_offload_pending_list, &offload_pending_list); - spin_unlock_bh(&flow_offload_pending_list_lock); + struct flow_offload_work *offload; - list_for_each_entry_safe(offload, next, &offload_pending_list, list) { - switch (offload->cmd) { + offload = container_of(work, struct flow_offload_work, work); + switch (offload->cmd) { case FLOW_CLS_REPLACE: flow_offload_work_add(offload); break; @@ -808,19 +802,14 @@ static void flow_offload_work_handler(struct work_struct *work) break; default: WARN_ON_ONCE(1); - } - list_del(&offload->list); - kfree(offload); } + + kfree(offload); } static void flow_offload_queue_work(struct flow_offload_work *offload) { - spin_lock_bh(&flow_offload_pending_list_lock); - list_add_tail(&offload->list, &flow_offload_pending_list); - spin_unlock_bh(&flow_offload_pending_list_lock); - - schedule_work(&nf_flow_offload_work); + queue_work(nf_flow_offload_wq, &offload->work); } static struct flow_offload_work * @@ -837,6 +826,7 @@ static void flow_offload_queue_work(struct flow_offload_work *offload) offload->flow = flow; offload->priority = flowtable->priority; offload->flowtable = flowtable; + INIT_WORK(&offload->work, flow_offload_work_handler); return offload; } @@ -887,7 +877,7 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable, void nf_flow_table_offload_flush(struct nf_flowtable *flowtable) { if (nf_flowtable_hw_offload(flowtable)) - flush_work(&nf_flow_offload_work); + flush_workqueue(nf_flow_offload_wq); } static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, @@ -1055,7 +1045,10 @@ static void nf_flow_table_indr_block_cb(struct net_device *dev, int nf_flow_table_offload_init(void) { - INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler); + nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + if (!nf_flow_offload_wq) + return -ENOMEM; flow_indr_add_block_cb(&block_ing_entry); @@ -1064,15 +1057,6 @@ int nf_flow_table_offload_init(void) void nf_flow_table_offload_exit(void) { - struct flow_offload_work *offload, *next; - LIST_HEAD(offload_pending_list); - flow_indr_del_block_cb(&block_ing_entry); - - cancel_work_sync(&nf_flow_offload_work); - - list_for_each_entry_safe(offload, next, &offload_pending_list, list) { - list_del(&offload->list); - kfree(offload); - } + destroy_workqueue(nf_flow_offload_wq); } From patchwork Tue Mar 24 13:17:21 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Paul Blakey X-Patchwork-Id: 221968 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH, MAILING_LIST_MULTI, SIGNED_OFF_BY, SPF_HELO_NONE, SPF_PASS, UNPARSEABLE_RELAY,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 474F1C41621 for ; Tue, 24 Mar 2020 13:32:35 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 2496620870 for ; Tue, 24 Mar 2020 13:32:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728610AbgCXNRd (ORCPT ); Tue, 24 Mar 2020 09:17:33 -0400 Received: from mail-il-dmz.mellanox.com ([193.47.165.129]:47538 "EHLO mellanox.co.il" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1728575AbgCXNRc (ORCPT ); Tue, 24 Mar 2020 09:17:32 -0400 Received: from Internal Mail-Server by MTLPINE2 (envelope-from paulb@mellanox.com) with ESMTPS (AES256-SHA encrypted); 24 Mar 2020 15:17:29 +0200 Received: from reg-r-vrt-019-120.mtr.labs.mlnx (reg-r-vrt-019-120.mtr.labs.mlnx [10.213.19.120]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 02ODHSYs030372; Tue, 24 Mar 2020 15:17:28 +0200 From: Paul Blakey To: Paul Blakey , Oz Shlomo , Pablo Neira Ayuso , Majd Dibbiny , Roi Dayan , netdev@vger.kernel.org, Saeed Mahameed Cc: netfilter-devel@vger.kernel.org Subject: [PATCH net-next 3/3] net/mlx5: CT: Use rhashtable's ct entries instead of a seperate list Date: Tue, 24 Mar 2020 15:17:21 +0200 Message-Id: <1585055841-14256-4-git-send-email-paulb@mellanox.com> X-Mailer: git-send-email 1.8.4.3 In-Reply-To: <1585055841-14256-1-git-send-email-paulb@mellanox.com> References: <1585055841-14256-1-git-send-email-paulb@mellanox.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org CT entries list is only used while freeing a ct zone flow table to go over all the ct entries offloaded on that zone/table, and flush the table. Rhashtable already provides an api to go over all the inserted entries. Use it instead, and remove the list. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo --- drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index a22ad6b..afc8ac3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -67,11 +67,9 @@ struct mlx5_ct_ft { struct nf_flowtable *nf_ft; struct mlx5_tc_ct_priv *ct_priv; struct rhashtable ct_entries_ht; - struct list_head ct_entries_list; }; struct mlx5_ct_entry { - struct list_head list; u16 zone; struct rhash_head node; struct flow_rule *flow_rule; @@ -617,8 +615,6 @@ struct mlx5_ct_entry { if (err) goto err_insert; - list_add(&entry->list, &ft->ct_entries_list); - return 0; err_insert: @@ -646,7 +642,6 @@ struct mlx5_ct_entry { WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params)); - list_del(&entry->list); kfree(entry); return 0; @@ -817,7 +812,6 @@ struct mlx5_ct_entry { ft->zone = zone; ft->nf_ft = nf_ft; ft->ct_priv = ct_priv; - INIT_LIST_HEAD(&ft->ct_entries_list); refcount_set(&ft->refcount, 1); err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params); @@ -846,12 +840,12 @@ struct mlx5_ct_entry { } static void -mlx5_tc_ct_flush_ft(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) +mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) { - struct mlx5_ct_entry *entry; + struct mlx5_tc_ct_priv *ct_priv = arg; + struct mlx5_ct_entry *entry = ptr; - list_for_each_entry(entry, &ft->ct_entries_list, list) - mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry); + mlx5_tc_ct_entry_del_rules(ct_priv, entry); } static void @@ -862,9 +856,10 @@ struct mlx5_ct_entry { nf_flow_table_offload_del_cb(ft->nf_ft, mlx5_tc_ct_block_flow_offload, ft); - mlx5_tc_ct_flush_ft(ct_priv, ft); rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params); - rhashtable_destroy(&ft->ct_entries_ht); + rhashtable_free_and_destroy(&ft->ct_entries_ht, + mlx5_tc_ct_flush_ft_entry, + ct_priv); kfree(ft); }