@@ -46,7 +46,6 @@ struct gdt_page {
} __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
-DECLARE_INIT_PER_CPU(gdt_page);
/* Provide the original GDT */
static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
@@ -20,12 +20,6 @@
#define PER_CPU_VAR(var) __percpu(var)__percpu_rel
-#ifdef CONFIG_X86_64_SMP
-# define INIT_PER_CPU_VAR(var) init_per_cpu__##var
-#else
-# define INIT_PER_CPU_VAR(var) var
-#endif
-
#else /* !__ASSEMBLY__: */
#include <linux/build_bug.h>
@@ -97,22 +91,6 @@
#define __percpu_arg(x) __percpu_prefix "%" #x
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
-/*
- * Initialized pointers to per-CPU variables needed for the boot
- * processor need to use these macros to get the proper address
- * offset from __per_cpu_load on SMP.
- *
- * There also must be an entry in vmlinux_64.lds.S
- */
-#define DECLARE_INIT_PER_CPU(var) \
- extern typeof(var) init_per_cpu_var(var)
-
-#ifdef CONFIG_X86_64_SMP
-# define init_per_cpu_var(var) init_per_cpu__##var
-#else
-# define init_per_cpu_var(var) var
-#endif
-
/*
* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though).
@@ -409,11 +409,12 @@ struct fixed_percpu_data {
};
DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
-DECLARE_INIT_PER_CPU(fixed_percpu_data);
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
{
- return (unsigned long)&per_cpu(fixed_percpu_data, cpu);
+ extern unsigned long __per_cpu_offset[];
+
+ return IS_ENABLED(CONFIG_SMP) ? __per_cpu_offset[cpu] : 0;
}
extern asmlinkage void entry_SYSCALL32_ignore(void);
@@ -559,7 +559,7 @@ void early_setup_idt(void)
*/
void __head startup_64_setup_gdt_idt(void)
{
- struct desc_struct *gdt = (void *)(__force unsigned long)init_per_cpu_var(gdt_page.gdt);
+ struct desc_struct *gdt = (void *)(__force unsigned long)gdt_page.gdt;
void *handler = NULL;
struct desc_ptr startup_gdt_descr = {
@@ -68,11 +68,10 @@ SYM_CODE_START_NOALIGN(startup_64)
/* Set up the stack for verify_cpu() */
leaq __top_init_kernel_stack(%rip), %rsp
- /* Setup GSBASE to allow stack canary access for C code */
+ /* Clear %gs so early per-CPU references target the per-CPU load area */
movl $MSR_GS_BASE, %ecx
- leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
- movl %edx, %eax
- shrq $32, %rdx
+ xorl %eax, %eax
+ cdq
wrmsr
call startup_64_setup_gdt_idt
@@ -361,15 +360,10 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
/* Set up %gs.
*
- * The base of %gs always points to fixed_percpu_data. If the
- * stack protector canary is enabled, it is located at %gs:40.
* Note that, on SMP, the boot cpu uses init data section until
* the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
-#ifndef CONFIG_SMP
- leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
-#endif
movl %edx, %eax
shrq $32, %rdx
wrmsr
@@ -27,7 +27,6 @@
#include <asm/apic.h>
DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
-DECLARE_INIT_PER_CPU(irq_stack_backing_store);
#ifdef CONFIG_VMAP_STACK
/*
@@ -23,17 +23,10 @@
#include <asm/cpumask.h>
#include <asm/cpu.h>
-#ifdef CONFIG_X86_64
-#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
-#else
-#define BOOT_PERCPU_OFFSET 0
-#endif
-
-DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = 0;
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
- [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
};
EXPORT_SYMBOL(__per_cpu_offset);
@@ -103,9 +103,6 @@ PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(6); /* RW_ */
#ifdef CONFIG_X86_64
-#ifdef CONFIG_SMP
- percpu PT_LOAD FLAGS(6); /* RW_ */
-#endif
init PT_LOAD FLAGS(7); /* RWE */
#endif
note PT_NOTE FLAGS(0); /* ___ */
@@ -225,17 +222,6 @@ SECTIONS
__init_begin = .; /* paired with __init_end */
}
-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
- /*
- * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
- * output PHDR, so the next output section - .init.text - should
- * start another segment - init.
- */
- PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
- ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
- "per-CPU data too large - increase CONFIG_PHYSICAL_START")
-#endif
-
INIT_TEXT_SECTION(PAGE_SIZE)
#ifdef CONFIG_X86_64
:init
@@ -356,9 +342,7 @@ SECTIONS
EXIT_DATA
}
-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
PERCPU_SECTION(INTERNODE_CACHE_BYTES)
-#endif
RUNTIME_CONST(shift, d_hash_shift)
RUNTIME_CONST(ptr, dentry_hashtable)
@@ -497,20 +481,6 @@ SECTIONS
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_X86_64
-/*
- * Per-cpu symbols which need to be offset from __per_cpu_load
- * for the boot processor.
- */
-#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
-INIT_PER_CPU(gdt_page);
-INIT_PER_CPU(fixed_percpu_data);
-INIT_PER_CPU(irq_stack_backing_store);
-
-#ifdef CONFIG_SMP
-. = ASSERT((fixed_percpu_data == 0),
- "fixed_percpu_data is not at start of per-cpu area");
-#endif
-
#ifdef CONFIG_MITIGATION_UNRET_ENTRY
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
#endif
@@ -95,9 +95,9 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
/* 64-bit entry point. */
.code64
1:
- /* Set base address in stack canary descriptor. */
+ /* Clear %gs so early per-CPU references target the per-CPU load area */
mov $MSR_GS_BASE,%ecx
- mov $_pa(canary), %eax
+ xor %eax, %eax
xor %edx, %edx
wrmsr
@@ -161,8 +161,6 @@ SYM_DATA_START_LOCAL(gdt_start)
SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
.balign 16
-SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
-
SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
@@ -88,7 +88,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"(jiffies|jiffies_64)|"
#if ELF_BITS == 64
"__per_cpu_load|"
- "init_per_cpu__.*|"
"__end_rodata_hpage_align|"
#endif
"__vvar_page|"
@@ -785,10 +784,6 @@ static void percpu_init(void)
* The GNU linker incorrectly associates:
* __init_begin
* __per_cpu_load
- *
- * The "gold" linker incorrectly associates:
- * init_per_cpu__fixed_percpu_data
- * init_per_cpu__gdt_page
*/
static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
{
@@ -796,8 +791,7 @@ static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
return (shndx == per_cpu_shndx) &&
strcmp(symname, "__init_begin") &&
- strcmp(symname, "__per_cpu_load") &&
- strncmp(symname, "init_per_cpu_", 13);
+ strcmp(symname, "__per_cpu_load");
}
@@ -51,15 +51,9 @@ SYM_CODE_START(startup_xen)
leaq __top_init_kernel_stack(%rip), %rsp
- /* Set up %gs.
- *
- * The base of %gs always points to fixed_percpu_data. If the
- * stack protector canary is enabled, it is located at %gs:40.
- * Note that, on SMP, the boot cpu uses init data section until
- * the per cpu areas are set up.
- */
+ /* Clear %gs so early per-CPU references target the per-CPU load area */
movl $MSR_GS_BASE,%ecx
- movq $INIT_PER_CPU_VAR(fixed_percpu_data),%rax
+ xorl %eax, %eax
cdq
wrmsr
@@ -1838,7 +1838,6 @@ config KALLSYMS_ALL
config KALLSYMS_ABSOLUTE_PERCPU
bool
depends on KALLSYMS
- default X86_64 && SMP
# end of the "standard kernel features (expert users)" menu