From patchwork Sat May 23 12:00:21 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 206312 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-10.1 required=3.0 tests=DKIMWL_WL_HIGH, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, INCLUDES_PATCH, MAILING_LIST_MULTI, SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 49AE7C433DF for ; Sat, 23 May 2020 12:00:51 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 1F42B20814 for ; Sat, 23 May 2020 12:00:51 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1590235251; bh=y75yQmWflL5gb+Qr48qsUcaC4h/0vscQ5q/Ojnql+M8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=evQpB+G23O+J8t2Ul/EvttoYhDjZwYXX98vc1XCPsdmxepvX4ytiqIn8ruwja7sNY 8L8S6Y0TroOkVeIfAXzSi6eqINyjfecgIOYjYoHHIX7xTNHhoghkxxH6APfR69AKaZ IYvuuILeQxS9+4uhyGLWPqlRN/40vYsXzl+tbsCs= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S2387815AbgEWMAq (ORCPT ); Sat, 23 May 2020 08:00:46 -0400 Received: from mail.kernel.org ([198.145.29.99]:41088 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S2387814AbgEWMAo (ORCPT ); Sat, 23 May 2020 08:00:44 -0400 Received: from localhost.localdomain (82-64-249-211.subs.proxad.net [82.64.249.211]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 46FDF206C3; Sat, 23 May 2020 12:00:42 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1590235243; bh=y75yQmWflL5gb+Qr48qsUcaC4h/0vscQ5q/Ojnql+M8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=PmPtaA+ZH8/tAzmv3XJFqhmWH/kM/GlrNnwXlrSwpQF7iMGUT/KMMT0ti9H1ndhnW m+qmScHb0cOFw638Aga1BG9jTA8gjQTEfpevUA8y44JSjyxg0FlFEqkCW0LyvJmkgB /lLqVOTKc6zO7aWywVjA+kbG6lKXDHnOwPbDxHgk= From: Ard Biesheuvel To: linux-efi@vger.kernel.org Cc: x86@kernel.org, linux-kernel@vger.kernel.org, Ard Biesheuvel , Maarten Lankhorst , Linus Torvalds , Arvind Sankar Subject: [PATCH v2 3/3] x86/boot/compressed: get rid of GOT fixup code Date: Sat, 23 May 2020 14:00:21 +0200 Message-Id: <20200523120021.34996-4-ardb@kernel.org> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20200523120021.34996-1-ardb@kernel.org> References: <20200523120021.34996-1-ardb@kernel.org> MIME-Version: 1.0 Sender: linux-efi-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-efi@vger.kernel.org In a previous patch, we have eliminated GOT entries from the decompressor binary and added an assertion that the .got section is empty. This means that the GOT fixup routines that exist in both the 32-bit and 64-bit startup routines have become dead code, and can be removed. Signed-off-by: Ard Biesheuvel --- arch/x86/boot/compressed/head_32.S | 22 ++------ arch/x86/boot/compressed/head_64.S | 57 -------------------- arch/x86/boot/compressed/vmlinux.lds.S | 2 - 3 files changed, 3 insertions(+), 78 deletions(-) diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index ab3307036ba4..dfa4131c65df 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -49,16 +49,13 @@ * Position Independent Executable (PIE) so that linker won't optimize * R_386_GOT32X relocation to its fixed symbol address. Older * linkers generate R_386_32 relocations against locally defined symbols, - * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less - * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle + * _bss, _ebss, in PIE. It isn't wrong, just suboptimal compared + * to R_386_RELATIVE. But the x86 kernel fails to properly handle * R_386_32 relocations when relocating the kernel. To generate - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as - * hidden: + * R_386_RELATIVE relocations, we mark _bss and _ebss as hidden: */ .hidden _bss .hidden _ebss - .hidden _got - .hidden _egot __HEAD SYM_FUNC_START(startup_32) @@ -191,19 +188,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) shrl $2, %ecx rep stosl -/* - * Adjust our own GOT - */ - leal _got(%ebx), %edx - leal _egot(%ebx), %ecx -1: - cmpl %ecx, %edx - jae 2f - addl %ebx, (%edx) - addl $4, %edx - jmp 1b -2: - /* * Do the extraction, and jump to the new kernel.. */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 4f7e6b84be07..706fbf6eef53 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -40,8 +40,6 @@ */ .hidden _bss .hidden _ebss - .hidden _got - .hidden _egot __HEAD .code32 @@ -344,25 +342,6 @@ SYM_CODE_START(startup_64) /* Set up the stack */ leaq boot_stack_end(%rbx), %rsp - /* - * paging_prepare() and cleanup_trampoline() below can have GOT - * references. Adjust the table with address we are running at. - * - * Zero RAX for adjust_got: the GOT was not adjusted before; - * there's no adjustment to undo. - */ - xorq %rax, %rax - - /* - * Calculate the address the binary is loaded at and use it as - * a GOT adjustment. - */ - call 1f -1: popq %rdi - subq $1b, %rdi - - call .Ladjust_got - /* * At this point we are in long mode with 4-level paging enabled, * but we might want to enable 5-level paging or vice versa. @@ -447,21 +426,6 @@ trampoline_return: pushq $0 popfq - /* - * Previously we've adjusted the GOT with address the binary was - * loaded at. Now we need to re-adjust for relocation address. - * - * Calculate the address the binary is loaded at, so that we can - * undo the previous GOT adjustment. - */ - call 1f -1: popq %rax - subq $1b, %rax - - /* The new adjustment is the relocation address */ - movq %rbx, %rdi - call .Ladjust_got - /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. @@ -539,27 +503,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) jmp *%rax SYM_FUNC_END(.Lrelocated) -/* - * Adjust the global offset table - * - * RAX is the previous adjustment of the table to undo (use 0 if it's the - * first time we touch GOT). - * RDI is the new adjustment to apply. - */ -.Ladjust_got: - /* Walk through the GOT adding the address to the entries */ - leaq _got(%rip), %rdx - leaq _egot(%rip), %rcx -1: - cmpq %rcx, %rdx - jae 2f - subq %rax, (%rdx) /* Undo previous adjustment */ - addq %rdi, (%rdx) /* Apply the new adjustment */ - addq $8, %rdx - jmp 1b -2: - ret - .code32 /* * This is the 32-bit trampoline that will be copied over to low memory. diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S index 60a99dfb9d72..d91fdda51aa8 100644 --- a/arch/x86/boot/compressed/vmlinux.lds.S +++ b/arch/x86/boot/compressed/vmlinux.lds.S @@ -43,9 +43,7 @@ SECTIONS _erodata = . ; } .got : { - _got = .; KEEP(*(.got)) - _egot = .; } .got.plt : { KEEP(*(.got.plt))