@@ -53,6 +53,8 @@ static inline void bus_lock_init(void) {}
#ifdef CONFIG_CPU_SUP_INTEL
u8 get_this_hybrid_cpu_type(void);
u32 get_this_hybrid_cpu_native_id(void);
+u32 intel_native_model_id(struct cpuinfo_x86 *c);
+enum x86_topology_cpu_type intel_cpu_type(struct cpuinfo_x86 *c);
#else
static inline u8 get_this_hybrid_cpu_type(void)
{
@@ -63,6 +65,23 @@ static inline u32 get_this_hybrid_cpu_native_id(void)
{
return 0;
}
+
+static u32 intel_native_model_id(struct cpuinfo_x86 *c)
+{
+ return 0;
+}
+static enum x86_topology_cpu_type intel_cpu_type(struct cpuinfo_x86 *c)
+{
+ return TOPO_CPU_TYPE_UNKNOWN;
+}
+#endif
+#ifdef CONFIG_CPU_SUP_AMD
+enum x86_topology_cpu_type amd_cpu_type(struct cpuinfo_x86 *c);
+#else
+static inline enum x86_topology_cpu_type amd_cpu_type(struct cpuinfo_x86 *c)
+{
+ return TOPO_CPU_TYPE_UNKNOWN;
+}
#endif
#ifdef CONFIG_IA32_FEAT_CTL
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
@@ -105,6 +105,24 @@ struct cpuinfo_topology {
// Cache level topology IDs
u32 llc_id;
u32 l2c_id;
+
+ // Hardware defined CPU-type
+ union {
+ u32 cpu_type;
+ struct {
+ // CPUID.1A.EAX[23-0]
+ u32 intel_native_model_id:24;
+ // CPUID.1A.EAX[31-24]
+ u32 intel_type:8;
+ };
+ struct {
+ // CPUID 0x80000026.EBX
+ u32 amd_num_processors :16,
+ amd_power_efficiency_ranking :8,
+ amd_native_model_id :4,
+ amd_type :4;
+ };
+ };
};
struct cpuinfo_x86 {
@@ -114,6 +114,12 @@ enum x86_topology_domains {
TOPO_MAX_DOMAIN,
};
+enum x86_topology_cpu_type {
+ TOPO_CPU_TYPE_PERFORMANCE,
+ TOPO_CPU_TYPE_EFFICIENCY,
+ TOPO_CPU_TYPE_UNKNOWN,
+};
+
struct x86_topology_system {
unsigned int dom_shifts[TOPO_MAX_DOMAIN];
unsigned int dom_size[TOPO_MAX_DOMAIN];
@@ -149,6 +155,8 @@ extern unsigned int __max_threads_per_core;
extern unsigned int __num_threads_per_package;
extern unsigned int __num_cores_per_package;
+enum x86_topology_cpu_type topology_cpu_type(struct cpuinfo_x86 *c);
+
static inline unsigned int topology_max_packages(void)
{
return __max_logical_packages;
@@ -29,6 +29,9 @@
#include "cpu.h"
+#define TOPO_HW_CPU_TYPE_AMD_PERFORMANCE 0
+#define TOPO_HW_CPU_TYPE_AMD_EFFICIENCY 1
+
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
u32 gprs[8] = { 0 };
@@ -1205,3 +1208,14 @@ void amd_check_microcode(void)
if (cpu_feature_enabled(X86_FEATURE_ZEN2))
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
+
+enum x86_topology_cpu_type amd_cpu_type(struct cpuinfo_x86 *c)
+{
+ switch (c->topo.amd_type) {
+ case TOPO_HW_CPU_TYPE_AMD_PERFORMANCE:
+ return TOPO_CPU_TYPE_PERFORMANCE;
+ case TOPO_HW_CPU_TYPE_AMD_EFFICIENCY:
+ return TOPO_CPU_TYPE_EFFICIENCY;
+ }
+ return TOPO_CPU_TYPE_UNKNOWN;
+}
@@ -22,6 +22,7 @@ static int cpu_debug_show(struct seq_file *m, void *p)
seq_printf(m, "die_id: %u\n", c->topo.die_id);
seq_printf(m, "cu_id: %u\n", c->topo.cu_id);
seq_printf(m, "core_id: %u\n", c->topo.core_id);
+ seq_printf(m, "cpu_type: %u\n", topology_cpu_type(c));
seq_printf(m, "logical_pkg_id: %u\n", c->topo.logical_pkg_id);
seq_printf(m, "logical_die_id: %u\n", c->topo.logical_die_id);
seq_printf(m, "llc_id: %u\n", c->topo.llc_id);
@@ -878,6 +878,8 @@ static const struct cpu_dev intel_cpu_dev = {
cpu_dev_register(intel_cpu_dev);
#define X86_HYBRID_CPU_TYPE_ID_SHIFT 24
+#define TOPO_HW_CPU_TYPE_INTEL_ATOM 0x20
+#define TOPO_HW_CPU_TYPE_INTEL_CORE 0x40
/**
* get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
@@ -907,3 +909,19 @@ u32 get_this_hybrid_cpu_native_id(void)
return cpuid_eax(0x0000001a) &
(BIT_ULL(X86_HYBRID_CPU_TYPE_ID_SHIFT) - 1);
}
+
+u32 intel_native_model_id(struct cpuinfo_x86 *c)
+{
+ return c->topo.intel_native_model_id;
+}
+
+enum x86_topology_cpu_type intel_cpu_type(struct cpuinfo_x86 *c)
+{
+ switch (c->topo.intel_type) {
+ case TOPO_HW_CPU_TYPE_INTEL_ATOM:
+ return TOPO_CPU_TYPE_EFFICIENCY;
+ case TOPO_HW_CPU_TYPE_INTEL_CORE:
+ return TOPO_CPU_TYPE_PERFORMANCE;
+ }
+ return TOPO_CPU_TYPE_UNKNOWN;
+}
@@ -182,6 +182,9 @@ static void parse_topology_amd(struct topo_scan *tscan)
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
has_topoext = cpu_parse_topology_ext(tscan);
+ if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES))
+ tscan->c->topo.cpu_type = cpuid_ebx(0x80000026);
+
if (!has_topoext && !parse_8000_0008(tscan))
return;
@@ -27,6 +27,16 @@ void topology_set_dom(struct topo_scan *tscan, enum x86_topology_domains dom,
}
}
+enum x86_topology_cpu_type topology_cpu_type(struct cpuinfo_x86 *c)
+{
+ if (c->x86_vendor == X86_VENDOR_INTEL)
+ return intel_cpu_type(c);
+ if (c->x86_vendor == X86_VENDOR_AMD)
+ return amd_cpu_type(c);
+
+ return TOPO_CPU_TYPE_UNKNOWN;
+}
+
static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
{
struct {
@@ -87,6 +97,7 @@ static void parse_topology(struct topo_scan *tscan, bool early)
.cu_id = 0xff,
.llc_id = BAD_APICID,
.l2c_id = BAD_APICID,
+ .cpu_type = TOPO_CPU_TYPE_UNKNOWN,
};
struct cpuinfo_x86 *c = tscan->c;
struct {
@@ -132,6 +143,8 @@ static void parse_topology(struct topo_scan *tscan, bool early)
case X86_VENDOR_INTEL:
if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
parse_legacy(tscan);
+ if (c->cpuid_level >= 0x1a)
+ c->topo.cpu_type = cpuid_eax(0x1a);
break;
case X86_VENDOR_HYGON:
if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))