Message ID | 20240925150059.3955569-34-ardb+git@google.com |
---|---|
State | New |
Headers | show |
Series | x86: Rely on toolchain for relocatable code | expand |
On Wed, Sep 25, 2024 at 05:01:04PM +0200, Ard Biesheuvel wrote: > + if (r_type == R_X86_64_GOTPCREL) { > + Elf_Shdr *s = &secs[sec->shdr.sh_info].shdr; > + unsigned file_off = offset - s->sh_addr + s->sh_offset; > + > + /* > + * GOTPCREL relocations refer to instructions that load > + * a 64-bit address via a 32-bit relative reference to > + * the GOT. In this case, it is the GOT entry that > + * needs to be fixed up, not the immediate offset in > + * the opcode. Note that the linker will have applied an > + * addend of -4 to compensate for the delta between the > + * relocation offset and the value of RIP when the > + * instruction executes, and this needs to be backed out > + * again. (Addends other than -4 are permitted in > + * principle, but make no sense in practice so they are > + * not supported.) > + */ > + if (rel->r_addend != -4) { > + die("invalid addend (%ld) for %s relocation: %s\n", > + rel->r_addend, rel_type(r_type), symname); > + break; > + } For x86 PC-relative addressing, the addend is <reloc offset> - <subsequent insn offset>. So a PC-relative addend can be something other than -4 when the relocation applies to the middle of an instruction, e.g.: 5b381: 66 81 3d 00 00 00 00 01 06 cmpw $0x601,0x0(%rip) # 5b38a <generic_validate_add_page+0x4a> 5b384: R_X86_64_PC32 boot_cpu_data-0x6 5f283: 81 3d 00 00 00 00 ff ff ff 00 cmpl $0xffffff,0x0(%rip) # 5f28d <x86_acpi_suspend_lowlevel+0x9d> 5f285: R_X86_64_PC32 smpboot_control-0x8 72f67: c6 05 00 00 00 00 01 movb $0x1,0x0(%rip) # 72f6e <sched_itmt_update_handler+0x6e> 72f69: R_X86_64_PC32 x86_topology_update-0x5 Presumably that could also happen with R_X86_64_GOTPCREL?
On Tue, 1 Oct 2024 at 07:33, Josh Poimboeuf <jpoimboe@kernel.org> wrote: > > On Wed, Sep 25, 2024 at 05:01:04PM +0200, Ard Biesheuvel wrote: > > + if (r_type == R_X86_64_GOTPCREL) { > > + Elf_Shdr *s = &secs[sec->shdr.sh_info].shdr; > > + unsigned file_off = offset - s->sh_addr + s->sh_offset; > > + > > + /* > > + * GOTPCREL relocations refer to instructions that load > > + * a 64-bit address via a 32-bit relative reference to > > + * the GOT. In this case, it is the GOT entry that > > + * needs to be fixed up, not the immediate offset in > > + * the opcode. Note that the linker will have applied an > > + * addend of -4 to compensate for the delta between the > > + * relocation offset and the value of RIP when the > > + * instruction executes, and this needs to be backed out > > + * again. (Addends other than -4 are permitted in > > + * principle, but make no sense in practice so they are > > + * not supported.) > > + */ > > + if (rel->r_addend != -4) { > > + die("invalid addend (%ld) for %s relocation: %s\n", > > + rel->r_addend, rel_type(r_type), symname); > > + break; > > + } > > For x86 PC-relative addressing, the addend is <reloc offset> - > <subsequent insn offset>. So a PC-relative addend can be something > other than -4 when the relocation applies to the middle of an > instruction, e.g.: > > 5b381: 66 81 3d 00 00 00 00 01 06 cmpw $0x601,0x0(%rip) # 5b38a <generic_validate_add_page+0x4a> 5b384: R_X86_64_PC32 boot_cpu_data-0x6 > > 5f283: 81 3d 00 00 00 00 ff ff ff 00 cmpl $0xffffff,0x0(%rip) # 5f28d <x86_acpi_suspend_lowlevel+0x9d> 5f285: R_X86_64_PC32 smpboot_control-0x8 > > 72f67: c6 05 00 00 00 00 01 movb $0x1,0x0(%rip) # 72f6e <sched_itmt_update_handler+0x6e> 72f69: R_X86_64_PC32 x86_topology_update-0x5 > > Presumably that could also happen with R_X86_64_GOTPCREL? > In theory, yes. But for the class of GOTPCREL relaxable instructions listed in the psABI, the addend is always -4, and these are the only ones we might expect from the compiler when using -fpic with 'hidden' visibility and/or -mdirect-extern-access. Note that the memory operand foo@GOTPCREL(%rip) produces the *address* of foo, and so it is always the source operand, appearing at the end of the encoding. Alternatively, we might simply subtract the addend from 'offset' before applying the displacement from the opcode. Note that this code gets removed again in the last patch, after switching to PIE linking.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 801fd85c3ef6..6b3fe6e2aadd 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -192,6 +192,10 @@ else KBUILD_CFLAGS += -mcmodel=kernel KBUILD_RUSTFLAGS += -Cno-redzone=y KBUILD_RUSTFLAGS += -Ccode-model=kernel + + # Don't emit relaxable GOTPCREL relocations + KBUILD_AFLAGS_KERNEL += -Wa,-mrelax-relocations=no + KBUILD_CFLAGS_KERNEL += -Wa,-mrelax-relocations=no endif # diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 6e73403e874f..7f060d873f75 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -20,6 +20,9 @@ #define RUNTIME_DISCARD_EXIT #define EMITS_PT_NOTE #define RO_EXCEPTION_TABLE_ALIGN 16 +#ifdef CONFIG_X86_64 +#define GOT_IN_RODATA +#endif #include <asm-generic/vmlinux.lds.h> #include <asm/asm-offsets.h> @@ -464,10 +467,12 @@ SECTIONS * Sections that should stay zero sized, which is safer to * explicitly check instead of blindly discarding. */ +#ifdef CONFIG_X86_32 .got : { *(.got) *(.igot.*) } ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") +#endif .plt : { *(.plt) *(.plt.*) *(.iplt) diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 35a73e4aa74d..880f0f2e465e 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -223,6 +223,8 @@ static const char *rel_type(unsigned type) REL_TYPE(R_X86_64_JUMP_SLOT), REL_TYPE(R_X86_64_RELATIVE), REL_TYPE(R_X86_64_GOTPCREL), + REL_TYPE(R_X86_64_GOTPCRELX), + REL_TYPE(R_X86_64_REX_GOTPCRELX), REL_TYPE(R_X86_64_32), REL_TYPE(R_X86_64_32S), REL_TYPE(R_X86_64_16), @@ -843,6 +845,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, case R_X86_64_32: case R_X86_64_32S: case R_X86_64_64: + case R_X86_64_GOTPCREL: /* * References to the percpu area don't need to be adjusted. */ @@ -861,6 +864,31 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, break; } + if (r_type == R_X86_64_GOTPCREL) { + Elf_Shdr *s = &secs[sec->shdr.sh_info].shdr; + unsigned file_off = offset - s->sh_addr + s->sh_offset; + + /* + * GOTPCREL relocations refer to instructions that load + * a 64-bit address via a 32-bit relative reference to + * the GOT. In this case, it is the GOT entry that + * needs to be fixed up, not the immediate offset in + * the opcode. Note that the linker will have applied an + * addend of -4 to compensate for the delta between the + * relocation offset and the value of RIP when the + * instruction executes, and this needs to be backed out + * again. (Addends other than -4 are permitted in + * principle, but make no sense in practice so they are + * not supported.) + */ + if (rel->r_addend != -4) { + die("invalid addend (%ld) for %s relocation: %s\n", + rel->r_addend, rel_type(r_type), symname); + break; + } + offset += 4 + (int32_t)get_unaligned_le32(elf_image + file_off); + } + /* * Relocation offsets for 64 bit kernels are output * as 32 bits and sign extended back to 64 bits when @@ -870,7 +898,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, if ((int32_t)offset != (int64_t)offset) die("Relocation offset doesn't fit in 32 bits\n"); - if (r_type == R_X86_64_64) + if (r_type == R_X86_64_64 || r_type == R_X86_64_GOTPCREL) add_reloc(&relocs64, offset); else add_reloc(&relocs32, offset); @@ -1085,7 +1113,8 @@ static void emit_relocs(int as_text, int use_real_mode) /* Now print each relocation */ for (i = 0; i < relocs64.count; i++) - write_reloc(relocs64.offset[i], stdout); + if (!i || relocs64.offset[i] != relocs64.offset[i - 1]) + write_reloc(relocs64.offset[i], stdout); /* Print a stop */ write_reloc(0, stdout); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 19ec49a9179b..cc14d780c70d 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -443,6 +443,12 @@ #endif #endif +#ifdef GOT_IN_RODATA +#define GOT_RODATA *(.got .igot*) +#else +#define GOT_RODATA +#endif + /* * Read only Data */ @@ -454,6 +460,7 @@ SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ + GOT_RODATA \ BOUNDED_SECTION_BY(__tracepoints_ptrs, ___tracepoints_ptrs) \ *(__tracepoints_strings)/* Tracepoints: strings */ \ } \