From patchwork Wed Jan 4 18:30:24 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pierre Gondois X-Patchwork-Id: 639087 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 92000C46467 for ; Wed, 4 Jan 2023 18:31:28 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235349AbjADSb0 (ORCPT ); Wed, 4 Jan 2023 13:31:26 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41440 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235374AbjADSbW (ORCPT ); Wed, 4 Jan 2023 13:31:22 -0500 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 4182FB84A; Wed, 4 Jan 2023 10:31:21 -0800 (PST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id BD9A01063; Wed, 4 Jan 2023 10:32:02 -0800 (PST) Received: from pierre123.arm.com (unknown [10.57.44.105]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id E6E573F663; Wed, 4 Jan 2023 10:31:16 -0800 (PST) From: Pierre Gondois To: linux-kernel@vger.kernel.org Cc: Pierre Gondois , Conor Dooley , Sudeep Holla , Palmer Dabbelt , Catalin Marinas , Will Deacon , Paul Walmsley , Palmer Dabbelt , Albert Ou , "Rafael J. Wysocki" , Len Brown , Greg Kroah-Hartman , Jeremy Linton , Gavin Shan , linux-arm-kernel@lists.infradead.org, linux-riscv@lists.infradead.org, linux-acpi@vger.kernel.org Subject: [PATCH v4 1/6] cacheinfo: Use RISC-V's init_cache_level() as generic OF implementation Date: Wed, 4 Jan 2023 19:30:24 +0100 Message-Id: <20230104183033.755668-2-pierre.gondois@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230104183033.755668-1-pierre.gondois@arm.com> References: <20230104183033.755668-1-pierre.gondois@arm.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-acpi@vger.kernel.org RISC-V's implementation of init_of_cache_level() is following the Devicetree Specification v0.3 regarding caches, cf.: - s3.7.3 'Internal (L1) Cache Properties' - s3.8 'Multi-level and Shared Cache Nodes' Allow reusing the implementation by moving it. Also make 'levels', 'leaves' and 'level' unsigned int. Signed-off-by: Pierre Gondois Reviewed-by: Conor Dooley Reviewed-by: Sudeep Holla Acked-by: Palmer Dabbelt --- arch/riscv/kernel/cacheinfo.c | 39 +------------------------------ drivers/base/cacheinfo.c | 44 +++++++++++++++++++++++++++++++++++ include/linux/cacheinfo.h | 1 + 3 files changed, 46 insertions(+), 38 deletions(-) diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 90deabfe63ea..440a3df5944c 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -115,44 +115,7 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf, int init_cache_level(unsigned int cpu) { - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct device_node *np = of_cpu_device_node_get(cpu); - struct device_node *prev = NULL; - int levels = 0, leaves = 0, level; - - if (of_property_read_bool(np, "cache-size")) - ++leaves; - if (of_property_read_bool(np, "i-cache-size")) - ++leaves; - if (of_property_read_bool(np, "d-cache-size")) - ++leaves; - if (leaves > 0) - levels = 1; - - prev = np; - while ((np = of_find_next_cache_node(np))) { - of_node_put(prev); - prev = np; - if (!of_device_is_compatible(np, "cache")) - break; - if (of_property_read_u32(np, "cache-level", &level)) - break; - if (level <= levels) - break; - if (of_property_read_bool(np, "cache-size")) - ++leaves; - if (of_property_read_bool(np, "i-cache-size")) - ++leaves; - if (of_property_read_bool(np, "d-cache-size")) - ++leaves; - levels = level; - } - - of_node_put(np); - this_cpu_ci->num_levels = levels; - this_cpu_ci->num_leaves = leaves; - - return 0; + return init_of_cache_level(cpu); } int populate_cache_leaves(unsigned int cpu) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 4b5cd08c5a65..5ae58e32a77f 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -224,8 +224,52 @@ static int cache_setup_of_node(unsigned int cpu) return 0; } + +int init_of_cache_level(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct device_node *np = of_cpu_device_node_get(cpu); + struct device_node *prev = NULL; + unsigned int levels = 0, leaves = 0, level; + + if (of_property_read_bool(np, "cache-size")) + ++leaves; + if (of_property_read_bool(np, "i-cache-size")) + ++leaves; + if (of_property_read_bool(np, "d-cache-size")) + ++leaves; + if (leaves > 0) + levels = 1; + + prev = np; + while ((np = of_find_next_cache_node(np))) { + of_node_put(prev); + prev = np; + if (!of_device_is_compatible(np, "cache")) + break; + if (of_property_read_u32(np, "cache-level", &level)) + break; + if (level <= levels) + break; + if (of_property_read_bool(np, "cache-size")) + ++leaves; + if (of_property_read_bool(np, "i-cache-size")) + ++leaves; + if (of_property_read_bool(np, "d-cache-size")) + ++leaves; + levels = level; + } + + of_node_put(np); + this_cpu_ci->num_levels = levels; + this_cpu_ci->num_leaves = leaves; + + return 0; +} + #else static inline int cache_setup_of_node(unsigned int cpu) { return 0; } +int init_of_cache_level(unsigned int cpu) { return 0; } #endif int __weak cache_setup_acpi(unsigned int cpu) diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 00b7a6ae8617..ff0328f3fbb0 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -80,6 +80,7 @@ struct cpu_cacheinfo { struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); int init_cache_level(unsigned int cpu); +int init_of_cache_level(unsigned int cpu); int populate_cache_leaves(unsigned int cpu); int cache_setup_acpi(unsigned int cpu); bool last_level_cache_is_valid(unsigned int cpu); From patchwork Wed Jan 4 18:30:25 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pierre Gondois X-Patchwork-Id: 639342 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4603EC4332F for ; Wed, 4 Jan 2023 18:31:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S239807AbjADSbl (ORCPT ); Wed, 4 Jan 2023 13:31:41 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41524 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S239845AbjADSbh (ORCPT ); Wed, 4 Jan 2023 13:31:37 -0500 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id A34071573E; Wed, 4 Jan 2023 10:31:29 -0800 (PST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 215671682; Wed, 4 Jan 2023 10:32:11 -0800 (PST) Received: from pierre123.arm.com (unknown [10.57.44.105]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id B7A963F663; Wed, 4 Jan 2023 10:31:25 -0800 (PST) From: Pierre Gondois To: linux-kernel@vger.kernel.org Cc: Pierre Gondois , Sudeep Holla , Palmer Dabbelt , Catalin Marinas , Will Deacon , Paul Walmsley , Palmer Dabbelt , Albert Ou , "Rafael J. Wysocki" , Len Brown , Greg Kroah-Hartman , Jeremy Linton , Conor Dooley , Gavin Shan , linux-arm-kernel@lists.infradead.org, linux-riscv@lists.infradead.org, linux-acpi@vger.kernel.org Subject: [PATCH v4 2/6] cacheinfo: Return error code in init_of_cache_level() Date: Wed, 4 Jan 2023 19:30:25 +0100 Message-Id: <20230104183033.755668-3-pierre.gondois@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230104183033.755668-1-pierre.gondois@arm.com> References: <20230104183033.755668-1-pierre.gondois@arm.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-acpi@vger.kernel.org Make init_of_cache_level() return an error code when the cache information parsing fails to help detecting missing information. init_of_cache_level() is only called for riscv. Returning an error code instead of 0 will prevent detect_cache_attributes() to allocate memory if an incomplete DT is parsed. Signed-off-by: Pierre Gondois Reviewed-by: Sudeep Holla Acked-by: Palmer Dabbelt --- drivers/base/cacheinfo.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 5ae58e32a77f..3e965ef6ee42 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -246,11 +246,11 @@ int init_of_cache_level(unsigned int cpu) of_node_put(prev); prev = np; if (!of_device_is_compatible(np, "cache")) - break; + goto err_out; if (of_property_read_u32(np, "cache-level", &level)) - break; + goto err_out; if (level <= levels) - break; + goto err_out; if (of_property_read_bool(np, "cache-size")) ++leaves; if (of_property_read_bool(np, "i-cache-size")) @@ -265,6 +265,10 @@ int init_of_cache_level(unsigned int cpu) this_cpu_ci->num_leaves = leaves; return 0; + +err_out: + of_node_put(np); + return -EINVAL; } #else From patchwork Wed Jan 4 18:30:26 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pierre Gondois X-Patchwork-Id: 639085 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 77CBFC54EBF for ; Wed, 4 Jan 2023 18:32:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S239524AbjADScP (ORCPT ); Wed, 4 Jan 2023 13:32:15 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41772 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S239814AbjADSbj (ORCPT ); Wed, 4 Jan 2023 13:31:39 -0500 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 276CADEA2; Wed, 4 Jan 2023 10:31:38 -0800 (PST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id A5FD41063; Wed, 4 Jan 2023 10:32:19 -0800 (PST) Received: from pierre123.arm.com (unknown [10.57.44.105]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 071103F663; Wed, 4 Jan 2023 10:31:33 -0800 (PST) From: Pierre Gondois To: linux-kernel@vger.kernel.org Cc: Pierre Gondois , Krzysztof Kozlowski , Catalin Marinas , Will Deacon , Paul Walmsley , Palmer Dabbelt , Albert Ou , "Rafael J. Wysocki" , Len Brown , Sudeep Holla , Greg Kroah-Hartman , Jeremy Linton , Conor Dooley , Gavin Shan , linux-arm-kernel@lists.infradead.org, linux-riscv@lists.infradead.org, linux-acpi@vger.kernel.org Subject: [PATCH v4 3/6] cacheinfo: Check 'cache-unified' property to count cache leaves Date: Wed, 4 Jan 2023 19:30:26 +0100 Message-Id: <20230104183033.755668-4-pierre.gondois@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230104183033.755668-1-pierre.gondois@arm.com> References: <20230104183033.755668-1-pierre.gondois@arm.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-acpi@vger.kernel.org The DeviceTree Specification v0.3 specifies that the cache node '[d-|i-|]cache-size' property is required. The 'cache-unified' property is specifies whether the cache level is separate or unified. If the cache-size property is missing, no cache leaves is accounted. This can lead to a 'BUG: KASAN: slab-out-of-bounds' [1] bug. Check 'cache-unified' property and always account for at least one cache leave when parsing the device tree. [1] https://lore.kernel.org/all/0f19cb3f-d6cf-4032-66d2-dedc9d09a0e3@linaro.org/ Reported-by: Krzysztof Kozlowski Signed-off-by: Pierre Gondois Tested-by: Krzysztof Kozlowski --- drivers/base/cacheinfo.c | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 3e965ef6ee42..339cd1e3d5af 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -225,12 +225,9 @@ static int cache_setup_of_node(unsigned int cpu) return 0; } -int init_of_cache_level(unsigned int cpu) +static int of_count_cache_leaves(struct device_node *np) { - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct device_node *np = of_cpu_device_node_get(cpu); - struct device_node *prev = NULL; - unsigned int levels = 0, leaves = 0, level; + unsigned int leaves = 0; if (of_property_read_bool(np, "cache-size")) ++leaves; @@ -238,6 +235,28 @@ int init_of_cache_level(unsigned int cpu) ++leaves; if (of_property_read_bool(np, "d-cache-size")) ++leaves; + + if (!leaves) { + /* The '[i-|d-|]cache-size' property is required, but + * if absent, fallback on the 'cache-unified' property. + */ + if (of_property_read_bool(np, "cache-unified")) + return 1; + else + return 2; + } + + return leaves; +} + +int init_of_cache_level(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct device_node *np = of_cpu_device_node_get(cpu); + struct device_node *prev = NULL; + unsigned int levels = 0, leaves, level; + + leaves = of_count_cache_leaves(np); if (leaves > 0) levels = 1; @@ -251,12 +270,8 @@ int init_of_cache_level(unsigned int cpu) goto err_out; if (level <= levels) goto err_out; - if (of_property_read_bool(np, "cache-size")) - ++leaves; - if (of_property_read_bool(np, "i-cache-size")) - ++leaves; - if (of_property_read_bool(np, "d-cache-size")) - ++leaves; + + leaves += of_count_cache_leaves(np); levels = level; } From patchwork Wed Jan 4 18:30:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pierre Gondois X-Patchwork-Id: 639086 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 42AF9C54EBD for ; Wed, 4 Jan 2023 18:32:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235420AbjADScM (ORCPT ); Wed, 4 Jan 2023 13:32:12 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41620 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S239938AbjADSbs (ORCPT ); Wed, 4 Jan 2023 13:31:48 -0500 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id DCBF6D2D5; Wed, 4 Jan 2023 10:31:46 -0800 (PST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 5D2281063; Wed, 4 Jan 2023 10:32:28 -0800 (PST) Received: from pierre123.arm.com (unknown [10.57.44.105]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 9E47F3F663; Wed, 4 Jan 2023 10:31:42 -0800 (PST) From: Pierre Gondois To: linux-kernel@vger.kernel.org Cc: Pierre Gondois , Sudeep Holla , Jeremy Linton , "Rafael J . Wysocki" , Palmer Dabbelt , Catalin Marinas , Will Deacon , Paul Walmsley , Palmer Dabbelt , Albert Ou , "Rafael J. Wysocki" , Len Brown , Greg Kroah-Hartman , Conor Dooley , Gavin Shan , linux-arm-kernel@lists.infradead.org, linux-riscv@lists.infradead.org, linux-acpi@vger.kernel.org Subject: [PATCH v4 4/6] ACPI: PPTT: Remove acpi_find_cache_levels() Date: Wed, 4 Jan 2023 19:30:27 +0100 Message-Id: <20230104183033.755668-5-pierre.gondois@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230104183033.755668-1-pierre.gondois@arm.com> References: <20230104183033.755668-1-pierre.gondois@arm.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-acpi@vger.kernel.org acpi_find_cache_levels() is used at a single place and is short enough to be merged into the calling function. The removal allows an easier renaming of the calling function in the next patch. Also reorder the local variables in the 'reversed Christmas tree' order. Signed-off-by: Pierre Gondois Reviewed-by: Sudeep Holla Reviewed-by: Jeremy Linton Acked-by: Rafael J. Wysocki Acked-by: Palmer Dabbelt --- drivers/acpi/pptt.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index c91342dcbcd6..97c1d33822d1 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -281,19 +281,6 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } -static int acpi_find_cache_levels(struct acpi_table_header *table_hdr, - u32 acpi_cpu_id) -{ - int number_of_levels = 0; - struct acpi_pptt_processor *cpu; - - cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id); - if (cpu) - number_of_levels = acpi_count_levels(table_hdr, cpu); - - return number_of_levels; -} - static u8 acpi_cache_type(enum cache_type type) { switch (type) { @@ -613,9 +600,10 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) */ int acpi_find_last_cache_level(unsigned int cpu) { - u32 acpi_cpu_id; + struct acpi_pptt_processor *cpu_node; struct acpi_table_header *table; int number_of_levels = 0; + u32 acpi_cpu_id; table = acpi_get_pptt(); if (!table) @@ -624,7 +612,10 @@ int acpi_find_last_cache_level(unsigned int cpu) pr_debug("Cache Setup find last level CPU=%d\n", cpu); acpi_cpu_id = get_acpi_id_for_cpu(cpu); - number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (cpu_node) + number_of_levels = acpi_count_levels(table, cpu_node); + pr_debug("Cache Setup find last level level=%d\n", number_of_levels); return number_of_levels; From patchwork Wed Jan 4 18:30:28 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pierre Gondois X-Patchwork-Id: 639341 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 65138C54EBE for ; Wed, 4 Jan 2023 18:32:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S239640AbjADScN (ORCPT ); Wed, 4 Jan 2023 13:32:13 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41860 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S240077AbjADSb5 (ORCPT ); Wed, 4 Jan 2023 13:31:57 -0500 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id AD56D5F95; Wed, 4 Jan 2023 10:31:55 -0800 (PST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 2E4691063; Wed, 4 Jan 2023 10:32:37 -0800 (PST) Received: from pierre123.arm.com (unknown [10.57.44.105]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 560783F663; Wed, 4 Jan 2023 10:31:51 -0800 (PST) From: Pierre Gondois To: linux-kernel@vger.kernel.org Cc: Pierre Gondois , Jeremy Linton , Sudeep Holla , "Rafael J . Wysocki" , Palmer Dabbelt , Catalin Marinas , Will Deacon , Paul Walmsley , Palmer Dabbelt , Albert Ou , "Rafael J. Wysocki" , Len Brown , Greg Kroah-Hartman , Conor Dooley , Gavin Shan , linux-arm-kernel@lists.infradead.org, linux-riscv@lists.infradead.org, linux-acpi@vger.kernel.org Subject: [PATCH v4 5/6] ACPI: PPTT: Update acpi_find_last_cache_level() to acpi_get_cache_info() Date: Wed, 4 Jan 2023 19:30:28 +0100 Message-Id: <20230104183033.755668-6-pierre.gondois@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230104183033.755668-1-pierre.gondois@arm.com> References: <20230104183033.755668-1-pierre.gondois@arm.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-acpi@vger.kernel.org acpi_find_last_cache_level() allows to find the last level of cache for a given CPU. The function is only called on arm64 ACPI based platforms to check for cache information that would be missing in the CLIDR_EL1 register. To allow populating (struct cpu_cacheinfo).num_leaves by only parsing a PPTT, update acpi_find_last_cache_level() to get the 'split_levels', i.e. the number of cache levels being split in data/instruction caches. It is assumed that there will not be data/instruction caches above a unified cache. If a split level consist of one data cache and no instruction cache (or opposite), then the missing cache will still be populated by default with minimal cache information, and maximal cpumask (all non-existing caches have the same fw_token). Suggested-by: Jeremy Linton Signed-off-by: Pierre Gondois Reviewed-by: Jeremy Linton Reviewed-by: Sudeep Holla Acked-by: Rafael J. Wysocki Acked-by: Palmer Dabbelt --- arch/arm64/kernel/cacheinfo.c | 11 +++-- drivers/acpi/pptt.c | 76 +++++++++++++++++++++++------------ include/linux/cacheinfo.h | 9 +++-- 3 files changed, 63 insertions(+), 33 deletions(-) diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 97c42be71338..36c3b07cdf2d 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -46,7 +46,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, int init_cache_level(unsigned int cpu) { unsigned int ctype, level, leaves; - int fw_level; + int fw_level, ret; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { @@ -59,10 +59,13 @@ int init_cache_level(unsigned int cpu) leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; } - if (acpi_disabled) + if (acpi_disabled) { fw_level = of_find_last_cache_level(cpu); - else - fw_level = acpi_find_last_cache_level(cpu); + } else { + ret = acpi_get_cache_info(cpu, &fw_level, NULL); + if (ret < 0) + return ret; + } if (fw_level < 0) return fw_level; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 97c1d33822d1..10975bb603fb 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -81,6 +81,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type) * acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache * @table_hdr: Pointer to the head of the PPTT table * @local_level: passed res reflects this cache level + * @split_levels: Number of split cache levels (data/instruction). * @res: cache resource in the PPTT we want to walk * @found: returns a pointer to the requested level if found * @level: the requested cache level @@ -100,6 +101,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type) */ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, unsigned int local_level, + unsigned int *split_levels, struct acpi_subtable_header *res, struct acpi_pptt_cache **found, unsigned int level, int type) @@ -113,8 +115,17 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, while (cache) { local_level++; + if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) { + cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache); + continue; + } + + if (split_levels && + (acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) || + acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR))) + *split_levels = local_level; + if (local_level == level && - cache->flags & ACPI_PPTT_CACHE_TYPE_VALID && acpi_pptt_match_type(cache->attributes, type)) { if (*found != NULL && cache != *found) pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); @@ -135,8 +146,8 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, static struct acpi_pptt_cache * acpi_find_cache_level(struct acpi_table_header *table_hdr, struct acpi_pptt_processor *cpu_node, - unsigned int *starting_level, unsigned int level, - int type) + unsigned int *starting_level, unsigned int *split_levels, + unsigned int level, int type) { struct acpi_subtable_header *res; unsigned int number_of_levels = *starting_level; @@ -149,7 +160,8 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, resource++; local_level = acpi_pptt_walk_cache(table_hdr, *starting_level, - res, &ret, level, type); + split_levels, res, &ret, + level, type); /* * we are looking for the max depth. Since its potentially * possible for a given node to have resources with differing @@ -165,29 +177,29 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, } /** - * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches + * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache + * levels and split cache levels (data/instruction). * @table_hdr: Pointer to the head of the PPTT table * @cpu_node: processor node we wish to count caches for + * @levels: Number of levels if success. + * @split_levels: Number of split cache levels (data/instruction) if + * success. Can by NULL. * * Given a processor node containing a processing unit, walk into it and count * how many levels exist solely for it, and then walk up each level until we hit * the root node (ignore the package level because it may be possible to have - * caches that exist across packages). Count the number of cache levels that - * exist at each level on the way up. - * - * Return: Total number of levels found. + * caches that exist across packages). Count the number of cache levels and + * split cache levels (data/instruction) that exist at each level on the way + * up. */ -static int acpi_count_levels(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu_node) +static void acpi_count_levels(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + unsigned int *levels, unsigned int *split_levels) { - int total_levels = 0; - do { - acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0); + acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0); cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); } while (cpu_node); - - return total_levels; } /** @@ -321,7 +333,7 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta while (cpu_node && !found) { found = acpi_find_cache_level(table_hdr, cpu_node, - &total_levels, level, acpi_type); + &total_levels, NULL, level, acpi_type); *node = cpu_node; cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); } @@ -589,36 +601,48 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) } /** - * acpi_find_last_cache_level() - Determines the number of cache levels for a PE + * acpi_get_cache_info() - Determine the number of cache levels and + * split cache levels (data/instruction) and for a PE. * @cpu: Kernel logical CPU number + * @levels: Number of levels if success. + * @split_levels: Number of levels being split (i.e. data/instruction) + * if success. Can by NULL. * * Given a logical CPU number, returns the number of levels of cache represented * in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0 * indicating we didn't find any cache levels. * - * Return: Cache levels visible to this core. + * Return: -ENOENT if no PPTT table or no PPTT processor struct found. + * 0 on success. */ -int acpi_find_last_cache_level(unsigned int cpu) +int acpi_get_cache_info(unsigned int cpu, unsigned int *levels, + unsigned int *split_levels) { struct acpi_pptt_processor *cpu_node; struct acpi_table_header *table; - int number_of_levels = 0; u32 acpi_cpu_id; + *levels = 0; + if (split_levels) + *split_levels = 0; + table = acpi_get_pptt(); if (!table) return -ENOENT; - pr_debug("Cache Setup find last level CPU=%d\n", cpu); + pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu); acpi_cpu_id = get_acpi_id_for_cpu(cpu); cpu_node = acpi_find_processor_node(table, acpi_cpu_id); - if (cpu_node) - number_of_levels = acpi_count_levels(table, cpu_node); + if (!cpu_node) + return -ENOENT; - pr_debug("Cache Setup find last level level=%d\n", number_of_levels); + acpi_count_levels(table, cpu_node, levels, split_levels); - return number_of_levels; + pr_debug("Cache Setup: last_level=%d split_levels=%d\n", + *levels, split_levels ? *split_levels : -1); + + return 0; } /** diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index ff0328f3fbb0..00d8e7f9d1c6 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -88,19 +88,22 @@ bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y); int detect_cache_attributes(unsigned int cpu); #ifndef CONFIG_ACPI_PPTT /* - * acpi_find_last_cache_level is only called on ACPI enabled + * acpi_get_cache_info() is only called on ACPI enabled * platforms using the PPTT for topology. This means that if * the platform supports other firmware configuration methods * we need to stub out the call when ACPI is disabled. * ACPI enabled platforms not using PPTT won't be making calls * to this function so we need not worry about them. */ -static inline int acpi_find_last_cache_level(unsigned int cpu) +static inline +int acpi_get_cache_info(unsigned int cpu, + unsigned int *levels, unsigned int *split_levels) { return 0; } #else -int acpi_find_last_cache_level(unsigned int cpu); +int acpi_get_cache_info(unsigned int cpu, + unsigned int *levels, unsigned int *split_levels); #endif const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); From patchwork Wed Jan 4 18:30:29 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pierre Gondois X-Patchwork-Id: 639340 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9AD7CC63705 for ; Wed, 4 Jan 2023 18:32:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S239677AbjADScO (ORCPT ); Wed, 4 Jan 2023 13:32:14 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42000 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S240122AbjADScE (ORCPT ); Wed, 4 Jan 2023 13:32:04 -0500 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 8628A9FEF; Wed, 4 Jan 2023 10:32:03 -0800 (PST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 104691063; Wed, 4 Jan 2023 10:32:45 -0800 (PST) Received: from pierre123.arm.com (unknown [10.57.44.105]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 0DA793F663; Wed, 4 Jan 2023 10:31:59 -0800 (PST) From: Pierre Gondois To: linux-kernel@vger.kernel.org Cc: Pierre Gondois , Sudeep Holla , Palmer Dabbelt , Catalin Marinas , Will Deacon , Paul Walmsley , Palmer Dabbelt , Albert Ou , "Rafael J. Wysocki" , Len Brown , Greg Kroah-Hartman , Conor Dooley , Gavin Shan , linux-arm-kernel@lists.infradead.org, linux-riscv@lists.infradead.org, linux-acpi@vger.kernel.org Subject: [PATCH v4 6/6] arch_topology: Build cacheinfo from primary CPU Date: Wed, 4 Jan 2023 19:30:29 +0100 Message-Id: <20230104183033.755668-7-pierre.gondois@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230104183033.755668-1-pierre.gondois@arm.com> References: <20230104183033.755668-1-pierre.gondois@arm.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-acpi@vger.kernel.org commit 3fcbf1c77d08 ("arch_topology: Fix cache attributes detection in the CPU hotplug path") adds a call to detect_cache_attributes() to populate the cacheinfo before updating the siblings mask. detect_cache_attributes() allocates memory and can take the PPTT mutex (on ACPI platforms). On PREEMPT_RT kernels, on secondary CPUs, this triggers a: 'BUG: sleeping function called from invalid context' [1] as the code is executed with preemption and interrupts disabled. The primary CPU was previously storing the cache information using the now removed (struct cpu_topology).llc_id: commit 5b8dc787ce4a ("arch_topology: Drop LLC identifier stash from the CPU topology") allocate_cache_info() tries to build the cacheinfo from the primary CPU prior secondary CPUs boot, if the DT/ACPI description contains cache information. If allocate_cache_info() fails, then fallback to the current state for the cacheinfo allocation. [1] will be triggered in such case. When unplugging a CPU, the cacheinfo memory cannot be freed. If it was, then the memory would be allocated early by the re-plugged CPU and would trigger [1]. Note that populate_cache_leaves() might be called multiple times due to populate_leaves being moved up. This is required since detect_cache_attributes() might be called with per_cpu_cacheinfo(cpu) being allocated but not populated. [1]: [ 7.560791] BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46 [ 7.560794] in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 0, name: swapper/111 [ 7.560796] preempt_count: 1, expected: 0 [ 7.560797] RCU nest depth: 1, expected: 1 [ 7.560799] 3 locks held by swapper/111/0: [ 7.560800] #0: ffff403e406cae98 (&pcp->lock){+.+.}-{3:3}, at: get_page_from_freelist+0x218/0x12c8 [ 7.560811] #1: ffffc5f8ed09f8e8 (rcu_read_lock){....}-{1:3}, at: rt_spin_trylock+0x48/0xf0 [ 7.560820] #2: ffff403f400b4fd8 (&zone->lock){+.+.}-{3:3}, at: rmqueue_bulk+0x64/0xa80 [ 7.560824] irq event stamp: 0 [ 7.560825] hardirqs last enabled at (0): [<0000000000000000>] 0x0 [ 7.560827] hardirqs last disabled at (0): [] copy_process+0x5dc/0x1ab8 [ 7.560830] softirqs last enabled at (0): [] copy_process+0x5dc/0x1ab8 [ 7.560833] softirqs last disabled at (0): [<0000000000000000>] 0x0 [ 7.560834] Preemption disabled at: [ 7.560835] [] migrate_enable+0x30/0x130 [ 7.560838] CPU: 111 PID: 0 Comm: swapper/111 Tainted: G W 6.0.0-rc4-rt6-[...] [ 7.560841] Call trace: [...] [ 7.560870] __kmalloc+0xbc/0x1e8 [ 7.560873] detect_cache_attributes+0x2d4/0x5f0 [ 7.560876] update_siblings_masks+0x30/0x368 [ 7.560880] store_cpu_topology+0x78/0xb8 [ 7.560883] secondary_start_kernel+0xd0/0x198 [ 7.560885] __secondary_switched+0xb0/0xb4 Signed-off-by: Pierre Gondois Reviewed-by: Sudeep Holla Acked-by: Palmer Dabbelt --- arch/riscv/kernel/cacheinfo.c | 5 --- drivers/base/arch_topology.c | 12 +++++- drivers/base/cacheinfo.c | 71 ++++++++++++++++++++++++++--------- include/linux/cacheinfo.h | 1 + 4 files changed, 65 insertions(+), 24 deletions(-) diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 440a3df5944c..3a13113f1b29 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -113,11 +113,6 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf, } } -int init_cache_level(unsigned int cpu) -{ - return init_of_cache_level(cpu); -} - int populate_cache_leaves(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index e7d6e6657ffa..b1c1dd38ab01 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -736,7 +736,7 @@ void update_siblings_masks(unsigned int cpuid) ret = detect_cache_attributes(cpuid); if (ret && ret != -ENOENT) - pr_info("Early cacheinfo failed, ret = %d\n", ret); + pr_info("Early cacheinfo allocation failed, ret = %d\n", ret); /* update core and thread sibling masks */ for_each_online_cpu(cpu) { @@ -825,7 +825,7 @@ __weak int __init parse_acpi_topology(void) #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) void __init init_cpu_topology(void) { - int ret; + int cpu, ret; reset_cpu_topology(); ret = parse_acpi_topology(); @@ -840,6 +840,14 @@ void __init init_cpu_topology(void) reset_cpu_topology(); return; } + + for_each_possible_cpu(cpu) { + ret = fetch_cache_info(cpu); + if (ret) { + pr_err("Early cacheinfo failed, ret = %d\n", ret); + break; + } + } } void store_cpu_topology(unsigned int cpuid) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 339cd1e3d5af..b57fbd0d7114 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -386,10 +386,6 @@ static void free_cache_attributes(unsigned int cpu) return; cache_shared_cpu_map_remove(cpu); - - kfree(per_cpu_cacheinfo(cpu)); - per_cpu_cacheinfo(cpu) = NULL; - cache_leaves(cpu) = 0; } int __weak init_cache_level(unsigned int cpu) @@ -402,29 +398,71 @@ int __weak populate_cache_leaves(unsigned int cpu) return -ENOENT; } +static inline +int allocate_cache_info(int cpu) +{ + per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), + sizeof(struct cacheinfo), GFP_ATOMIC); + if (!per_cpu_cacheinfo(cpu)) { + cache_leaves(cpu) = 0; + return -ENOMEM; + } + + return 0; +} + +int fetch_cache_info(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci; + unsigned int levels, split_levels; + int ret; + + if (acpi_disabled) { + ret = init_of_cache_level(cpu); + if (ret < 0) + return ret; + } else { + ret = acpi_get_cache_info(cpu, &levels, &split_levels); + if (ret < 0) + return ret; + + this_cpu_ci = get_cpu_cacheinfo(cpu); + this_cpu_ci->num_levels = levels; + /* + * This assumes that: + * - there cannot be any split caches (data/instruction) + * above a unified cache + * - data/instruction caches come by pair + */ + this_cpu_ci->num_leaves = levels + split_levels; + } + if (!cache_leaves(cpu)) + return -ENOENT; + + return allocate_cache_info(cpu); +} + int detect_cache_attributes(unsigned int cpu) { int ret; - /* Since early detection of the cacheinfo is allowed via this - * function and this also gets called as CPU hotplug callbacks via - * cacheinfo_cpu_online, the initialisation can be skipped and only - * CPU maps can be updated as the CPU online status would be update - * if called via cacheinfo_cpu_online path. + /* Since early initialization/allocation of the cacheinfo is allowed + * via fetch_cache_info() and this also gets called as CPU hotplug + * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped + * as it will happen only once (the cacheinfo memory is never freed). + * Just populate the cacheinfo. */ if (per_cpu_cacheinfo(cpu)) - goto update_cpu_map; + goto populate_leaves; if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; - per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), - sizeof(struct cacheinfo), GFP_ATOMIC); - if (per_cpu_cacheinfo(cpu) == NULL) { - cache_leaves(cpu) = 0; - return -ENOMEM; - } + ret = allocate_cache_info(cpu); + if (ret) + return ret; +populate_leaves: /* * populate_cache_leaves() may completely setup the cache leaves and * shared_cpu_map or it may leave it partially setup. @@ -433,7 +471,6 @@ int detect_cache_attributes(unsigned int cpu) if (ret) goto free_ci; -update_cpu_map: /* * For systems using DT for cache hierarchy, fw_token * and shared_cpu_map will be set up here only if they are diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 00d8e7f9d1c6..dfef57077cd0 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -85,6 +85,7 @@ int populate_cache_leaves(unsigned int cpu); int cache_setup_acpi(unsigned int cpu); bool last_level_cache_is_valid(unsigned int cpu); bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y); +int fetch_cache_info(unsigned int cpu); int detect_cache_attributes(unsigned int cpu); #ifndef CONFIG_ACPI_PPTT /*