Message ID | 20240108125025.1389849-3-adhemerval.zanella@linaro.org |
---|---|
State | Accepted |
Commit | 25f1e16ef03a6a8fb1701c4647d46c564480d88c |
Headers | show |
Series | x86: Move CET infrastructure to x86_64 | expand |
On Mon, Jan 8, 2024 at 4:50 AM Adhemerval Zanella <adhemerval.zanella@linaro.org> wrote: > > CET is only support for x86_64, this patch reverts: > > - faaee1f07ed x86: Support shadow stack pointer in setjmp/longjmp. > - be9ccd27c09 i386: Add _CET_ENDBR to indirect jump targets in > add_n.S/sub_n.S > - c02695d7764 x86/CET: Update vfork to prevent child return > - 5d844e1b725 i386: Enable CET support in ucontext functions > - 124bcde683 x86: Add _CET_ENDBR to functions in crti.S > - 562837c002 x86: Add _CET_ENDBR to functions in dl-tlsdesc.S > - f753fa7dea x86: Support IBT and SHSTK in Intel CET [BZ #21598] > - 825b58f3fb i386-mcount.S: Add _CET_ENDBR to _mcount and __fentry__ > - 7e119cd582 i386: Use _CET_NOTRACK in i686/memcmp.S > - 177824e232 i386: Use _CET_NOTRACK in memcmp-sse4.S > - 0a899af097 i386: Use _CET_NOTRACK in memcpy-ssse3-rep.S > - 7fb613361c i386: Use _CET_NOTRACK in memcpy-ssse3.S > - 77a8ae0948 i386: Use _CET_NOTRACK in memset-sse2-rep.S > - 00e7b76a8f i386: Use _CET_NOTRACK in memset-sse2.S > - 90d15dc577 i386: Use _CET_NOTRACK in strcat-sse2.S > - f1574581c7 i386: Use _CET_NOTRACK in strcpy-sse2.S > - 4031d7484a i386/sub_n.S: Add a missing _CET_ENDBR to indirect jump > - target > - > Checked on i686-linux-gnu. > --- > sysdeps/i386/__longjmp.S | 73 --------- > sysdeps/i386/add_n.S | 25 ---- > sysdeps/i386/bsd-_setjmp.S | 21 --- > sysdeps/i386/bsd-setjmp.S | 21 --- > sysdeps/i386/crti.S | 2 - > sysdeps/i386/dl-tlsdesc.S | 3 - > sysdeps/i386/dl-trampoline.S | 4 - > sysdeps/i386/i386-mcount.S | 2 - > sysdeps/i386/i686/add_n.S | 25 ---- > sysdeps/i386/i686/memcmp.S | 4 +- > sysdeps/i386/i686/multiarch/memcmp-sse4.S | 4 +- > .../i386/i686/multiarch/memcpy-ssse3-rep.S | 8 +- > sysdeps/i386/i686/multiarch/memcpy-ssse3.S | 4 +- > sysdeps/i386/i686/multiarch/memset-sse2-rep.S | 4 +- > sysdeps/i386/i686/multiarch/memset-sse2.S | 4 +- > sysdeps/i386/i686/multiarch/strcat-sse2.S | 4 +- > sysdeps/i386/i686/multiarch/strcpy-sse2.S | 4 +- > sysdeps/i386/setjmp.S | 21 --- > sysdeps/i386/start.S | 1 - > sysdeps/i386/sub_n.S | 25 ---- > sysdeps/i386/sysdep.h | 11 ++ > .../unix/sysv/linux/i386/____longjmp_chk.S | 37 ----- > sysdeps/unix/sysv/linux/i386/getcontext.S | 56 ------- > sysdeps/unix/sysv/linux/i386/makecontext.S | 123 ---------------- > sysdeps/unix/sysv/linux/i386/setcontext.S | 101 +------------ > sysdeps/unix/sysv/linux/i386/swapcontext.S | 139 ------------------ > sysdeps/unix/sysv/linux/i386/sysdep.h | 5 - > sysdeps/unix/sysv/linux/i386/ucontext_i.sym | 4 - > sysdeps/unix/sysv/linux/i386/vfork.S | 24 +-- > sysdeps/x86/sysdep.h | 44 ------ > sysdeps/x86_64/sysdep.h | 42 ++++++ > 31 files changed, 75 insertions(+), 770 deletions(-) > > diff --git a/sysdeps/i386/__longjmp.S b/sysdeps/i386/__longjmp.S > index 302c00ac62..bb83609d57 100644 > --- a/sysdeps/i386/__longjmp.S > +++ b/sysdeps/i386/__longjmp.S > @@ -19,55 +19,14 @@ > #include <sysdep.h> > #include <pointer_guard.h> > #include <jmpbuf-offsets.h> > -#include <jmp_buf-ssp.h> > #include <asm-syntax.h> > #include <stap-probe.h> > > -/* Don't restore shadow stack register if > - 1. Shadow stack isn't enabled. Or > - 2. __longjmp is defined for __longjmp_cancel. > - */ > -#if !SHSTK_ENABLED || defined __longjmp > -# undef SHADOW_STACK_POINTER_OFFSET > -#endif > - > .text > ENTRY (__longjmp) > #ifdef PTR_DEMANGLE > movl 4(%esp), %eax /* User's jmp_buf in %eax. */ > > -# ifdef SHADOW_STACK_POINTER_OFFSET > -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(skip_ssp) > -# else > - xorl %edx, %edx > -# endif > - /* Check and adjust the Shadow-Stack-Pointer. */ > - rdsspd %edx > - /* And compare it with the saved ssp value. */ > - subl SHADOW_STACK_POINTER_OFFSET(%eax), %edx > - je L(skip_ssp) > - /* Count the number of frames to adjust and adjust it > - with incssp instruction. The instruction can adjust > - the ssp by [0..255] value only thus use a loop if > - the number of frames is bigger than 255. */ > - negl %edx > - shrl $2, %edx > - /* NB: We saved Shadow-Stack-Pointer of setjmp. Since we are > - restoring Shadow-Stack-Pointer of setjmp's caller, we > - need to unwind shadow stack by one more frame. */ > - addl $1, %edx > - movl $255, %ebx > -L(loop): > - cmpl %ebx, %edx > - cmovb %edx, %ebx > - incsspd %ebx > - subl %ebx, %edx > - ja L(loop) > -L(skip_ssp): > -# endif > /* Save the return address now. */ > movl (JB_PC*4)(%eax), %edx > /* Get the stack pointer. */ > @@ -98,38 +57,6 @@ L(skip_ssp): > #else > movl 4(%esp), %ecx /* User's jmp_buf in %ecx. */ > movl 8(%esp), %eax /* Second argument is return value. */ > -# ifdef SHADOW_STACK_POINTER_OFFSET > -# if IS_IN (libc) && defined SHARED > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(skip_ssp) > -# endif > - /* Check and adjust the Shadow-Stack-Pointer. */ > - xorl %edx, %edx > - /* Get the current ssp. */ > - rdsspd %edx > - /* And compare it with the saved ssp value. */ > - subl SHADOW_STACK_POINTER_OFFSET(%ecx), %edx > - je L(skip_ssp) > - /* Count the number of frames to adjust and adjust it > - with incssp instruction. The instruction can adjust > - the ssp by [0..255] value only thus use a loop if > - the number of frames is bigger than 255. */ > - negl %edx > - shrl $2, %edx > - /* NB: We saved Shadow-Stack-Pointer of setjmp. Since we are > - restoring Shadow-Stack-Pointer of setjmp's caller, we > - need to unwind shadow stack by one more frame. */ > - addl $1, %edx > - movl $255, %ebx > -L(loop): > - cmpl %ebx, %edx > - cmovb %edx, %ebx > - incsspd %ebx > - subl %ebx, %edx > - ja L(loop) > -L(skip_ssp): > -# endif > /* Save the return address now. */ > movl (JB_PC*4)(%ecx), %edx > LIBC_PROBE (longjmp, 3, 4@%ecx, -4@%eax, 4@%edx) > diff --git a/sysdeps/i386/add_n.S b/sysdeps/i386/add_n.S > index d4af6d92ec..c1b7098b1c 100644 > --- a/sysdeps/i386/add_n.S > +++ b/sysdeps/i386/add_n.S > @@ -40,13 +40,6 @@ ENTRY (__mpn_add_n) > cfi_rel_offset (esi, 0) > movl S2(%esp),%edx > movl SIZE(%esp),%ecx > - > -#if IBT_ENABLED > - pushl %ebx > - cfi_adjust_cfa_offset (4) > - cfi_rel_offset (ebx, 0) > -#endif > - > movl %ecx,%eax > shrl $3,%ecx /* compute count for unrolled loop */ > negl %eax > @@ -58,9 +51,6 @@ ENTRY (__mpn_add_n) > subl %eax,%esi /* ... by a constant when we ... */ > subl %eax,%edx /* ... enter the loop */ > shrl $2,%eax /* restore previous value */ > -#if IBT_ENABLED > - leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */ > -#endif > #ifdef PIC > /* Calculate start address in loop for PIC. Due to limitations in some > assemblers, Loop-L0-3 cannot be put into the leal */ > @@ -74,40 +64,30 @@ L(0): leal (%eax,%eax,8),%eax > #else > /* Calculate start address in loop for non-PIC. */ > leal (L(oop) - 3)(%eax,%eax,8),%eax > -#endif > -#if IBT_ENABLED > - addl %ebx,%eax /* Adjust for endbr32 */ > #endif > jmp *%eax /* jump into loop */ > ALIGN (3) > L(oop): movl (%esi),%eax > adcl (%edx),%eax > movl %eax,(%edi) > - _CET_ENDBR > movl 4(%esi),%eax > adcl 4(%edx),%eax > movl %eax,4(%edi) > - _CET_ENDBR > movl 8(%esi),%eax > adcl 8(%edx),%eax > movl %eax,8(%edi) > - _CET_ENDBR > movl 12(%esi),%eax > adcl 12(%edx),%eax > movl %eax,12(%edi) > - _CET_ENDBR > movl 16(%esi),%eax > adcl 16(%edx),%eax > movl %eax,16(%edi) > - _CET_ENDBR > movl 20(%esi),%eax > adcl 20(%edx),%eax > movl %eax,20(%edi) > - _CET_ENDBR > movl 24(%esi),%eax > adcl 24(%edx),%eax > movl %eax,24(%edi) > - _CET_ENDBR > movl 28(%esi),%eax > adcl 28(%edx),%eax > movl %eax,28(%edi) > @@ -120,11 +100,6 @@ L(oop): movl (%esi),%eax > sbbl %eax,%eax > negl %eax > > -#if IBT_ENABLED > - popl %ebx > - cfi_adjust_cfa_offset (-4) > - cfi_restore (ebx) > -#endif > popl %esi > cfi_adjust_cfa_offset (-4) > cfi_restore (esi) > diff --git a/sysdeps/i386/bsd-_setjmp.S b/sysdeps/i386/bsd-_setjmp.S > index eb3ac9c8e9..5fd671a477 100644 > --- a/sysdeps/i386/bsd-_setjmp.S > +++ b/sysdeps/i386/bsd-_setjmp.S > @@ -23,18 +23,12 @@ > #include <sysdep.h> > #include <pointer_guard.h> > #include <jmpbuf-offsets.h> > -#include <jmp_buf-ssp.h> > #include <stap-probe.h> > > #define PARMS 4 /* no space for saved regs */ > #define JMPBUF PARMS > #define SIGMSK JMPBUF+4 > > -/* Don't save shadow stack register if shadow stack isn't enabled. */ > -#if !SHSTK_ENABLED > -# undef SHADOW_STACK_POINTER_OFFSET > -#endif > - > ENTRY (_setjmp) > > xorl %eax, %eax > @@ -58,21 +52,6 @@ ENTRY (_setjmp) > movl %ebp, (JB_BP*4)(%edx) /* Save caller's frame pointer. */ > > movl %eax, JB_SIZE(%edx) /* No signal mask set. */ > -#ifdef SHADOW_STACK_POINTER_OFFSET > -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(skip_ssp) > -# else > - xorl %ecx, %ecx > -# endif > - /* Get the current Shadow-Stack-Pointer and save it. */ > - rdsspd %ecx > - movl %ecx, SHADOW_STACK_POINTER_OFFSET(%edx) > -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET > -L(skip_ssp): > -# endif > -#endif > ret > END (_setjmp) > libc_hidden_def (_setjmp) > diff --git a/sysdeps/i386/bsd-setjmp.S b/sysdeps/i386/bsd-setjmp.S > index c03f235d0f..13338a6480 100644 > --- a/sysdeps/i386/bsd-setjmp.S > +++ b/sysdeps/i386/bsd-setjmp.S > @@ -23,18 +23,12 @@ > #include <sysdep.h> > #include <pointer_guard.h> > #include <jmpbuf-offsets.h> > -#include <jmp_buf-ssp.h> > #include <stap-probe.h> > > #define PARMS 4 /* no space for saved regs */ > #define JMPBUF PARMS > #define SIGMSK JMPBUF+4 > > -/* Don't save shadow stack register if shadow stack isn't enabled. */ > -#if !SHSTK_ENABLED > -# undef SHADOW_STACK_POINTER_OFFSET > -#endif > - > ENTRY (setjmp) > /* Note that we have to use a non-exported symbol in the next > jump since otherwise gas will emit it as a jump through the > @@ -58,21 +52,6 @@ ENTRY (setjmp) > #endif > movl %ecx, (JB_PC*4)(%eax) > movl %ebp, (JB_BP*4)(%eax) /* Save caller's frame pointer. */ > -#ifdef SHADOW_STACK_POINTER_OFFSET > -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(skip_ssp) > -# else > - xorl %ecx, %ecx > -# endif > - /* Get the current Shadow-Stack-Pointer and save it. */ > - rdsspd %ecx > - movl %ecx, SHADOW_STACK_POINTER_OFFSET(%eax) > -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET > -L(skip_ssp): > -# endif > -#endif > > /* Call __sigjmp_save. */ > pushl $1 > diff --git a/sysdeps/i386/crti.S b/sysdeps/i386/crti.S > index 71d19b698c..f9662eeb5a 100644 > --- a/sysdeps/i386/crti.S > +++ b/sysdeps/i386/crti.S > @@ -61,7 +61,6 @@ > .hidden _init > .type _init, @function > _init: > - _CET_ENDBR > pushl %ebx > /* Maintain 16-byte stack alignment for called functions. */ > subl $8, %esp > @@ -82,7 +81,6 @@ _init: > .hidden _fini > .type _fini, @function > _fini: > - _CET_ENDBR > pushl %ebx > subl $8, %esp > LOAD_PIC_REG (bx) > diff --git a/sysdeps/i386/dl-tlsdesc.S b/sysdeps/i386/dl-tlsdesc.S > index 318b82a561..90d93caa0c 100644 > --- a/sysdeps/i386/dl-tlsdesc.S > +++ b/sysdeps/i386/dl-tlsdesc.S > @@ -37,7 +37,6 @@ > cfi_startproc > .align 16 > _dl_tlsdesc_return: > - _CET_ENDBR > movl 4(%eax), %eax > ret > cfi_endproc > @@ -59,7 +58,6 @@ _dl_tlsdesc_return: > cfi_startproc > .align 16 > _dl_tlsdesc_undefweak: > - _CET_ENDBR > movl 4(%eax), %eax > subl %gs:0, %eax > ret > @@ -101,7 +99,6 @@ _dl_tlsdesc_dynamic (struct tlsdesc *tdp) > cfi_startproc > .align 16 > _dl_tlsdesc_dynamic: > - _CET_ENDBR > /* Like all TLS resolvers, preserve call-clobbered registers. > We need two scratch regs anyway. */ > subl $28, %esp > diff --git a/sysdeps/i386/dl-trampoline.S b/sysdeps/i386/dl-trampoline.S > index ecba034958..2632020145 100644 > --- a/sysdeps/i386/dl-trampoline.S > +++ b/sysdeps/i386/dl-trampoline.S > @@ -26,7 +26,6 @@ > .align 16 > _dl_runtime_resolve: > cfi_adjust_cfa_offset (8) > - _CET_ENDBR > pushl %eax # Preserve registers otherwise clobbered. > cfi_adjust_cfa_offset (4) > pushl %ecx > @@ -53,7 +52,6 @@ _dl_runtime_resolve: > .align 16 > _dl_runtime_resolve_shstk: > cfi_adjust_cfa_offset (8) > - _CET_ENDBR > pushl %eax # Preserve registers otherwise clobbered. > cfi_adjust_cfa_offset (4) > pushl %edx > @@ -78,7 +76,6 @@ _dl_runtime_resolve_shstk: > .align 16 > _dl_runtime_profile_shstk: > cfi_adjust_cfa_offset (8) > - _CET_ENDBR > pushl %esp > cfi_adjust_cfa_offset (4) > addl $8, (%esp) # Account for the pushed PLT data > @@ -123,7 +120,6 @@ _dl_runtime_profile_shstk: > .align 16 > _dl_runtime_profile: > cfi_adjust_cfa_offset (8) > - _CET_ENDBR > pushl %esp > cfi_adjust_cfa_offset (4) > addl $8, (%esp) # Account for the pushed PLT data > diff --git a/sysdeps/i386/i386-mcount.S b/sysdeps/i386/i386-mcount.S > index 8066649f8e..6082e1a7de 100644 > --- a/sysdeps/i386/i386-mcount.S > +++ b/sysdeps/i386/i386-mcount.S > @@ -29,7 +29,6 @@ > .type C_SYMBOL_NAME(_mcount), @function > .align ALIGNARG(4) > C_LABEL(_mcount) > - _CET_ENDBR > /* Save the caller-clobbered registers. */ > pushl %eax > pushl %ecx > @@ -58,7 +57,6 @@ weak_alias (_mcount, mcount) > .type C_SYMBOL_NAME(__fentry__), @function > .align ALIGNARG(4) > C_LABEL(__fentry__) > - _CET_ENDBR > /* Save the caller-clobbered registers. */ > pushl %eax > pushl %ecx > diff --git a/sysdeps/i386/i686/add_n.S b/sysdeps/i386/i686/add_n.S > index c2bc798187..bbc0cc71a2 100644 > --- a/sysdeps/i386/i686/add_n.S > +++ b/sysdeps/i386/i686/add_n.S > @@ -44,13 +44,6 @@ ENTRY (__mpn_add_n) > cfi_rel_offset (esi, 0) > movl S2(%esp),%edx > movl SIZE(%esp),%ecx > - > -#if IBT_ENABLED > - pushl %ebx > - cfi_adjust_cfa_offset (4) > - cfi_rel_offset (ebx, 0) > -#endif > - > movl %ecx,%eax > shrl $3,%ecx /* compute count for unrolled loop */ > negl %eax > @@ -62,9 +55,6 @@ ENTRY (__mpn_add_n) > subl %eax,%esi /* ... by a constant when we ... */ > subl %eax,%edx /* ... enter the loop */ > shrl $2,%eax /* restore previous value */ > -#if IBT_ENABLED > - leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */ > -#endif > #ifdef PIC > /* Calculate start address in loop for PIC. */ > leal (L(oop)-L(0)-3)(%eax,%eax,8),%eax > @@ -73,40 +63,30 @@ L(0): > #else > /* Calculate start address in loop for non-PIC. */ > leal (L(oop) - 3)(%eax,%eax,8),%eax > -#endif > -#if IBT_ENABLED > - addl %ebx,%eax /* Adjust for endbr32 */ > #endif > jmp *%eax /* jump into loop */ > ALIGN (3) > L(oop): movl (%esi),%eax > adcl (%edx),%eax > movl %eax,(%edi) > - _CET_ENDBR > movl 4(%esi),%eax > adcl 4(%edx),%eax > movl %eax,4(%edi) > - _CET_ENDBR > movl 8(%esi),%eax > adcl 8(%edx),%eax > movl %eax,8(%edi) > - _CET_ENDBR > movl 12(%esi),%eax > adcl 12(%edx),%eax > movl %eax,12(%edi) > - _CET_ENDBR > movl 16(%esi),%eax > adcl 16(%edx),%eax > movl %eax,16(%edi) > - _CET_ENDBR > movl 20(%esi),%eax > adcl 20(%edx),%eax > movl %eax,20(%edi) > - _CET_ENDBR > movl 24(%esi),%eax > adcl 24(%edx),%eax > movl %eax,24(%edi) > - _CET_ENDBR > movl 28(%esi),%eax > adcl 28(%edx),%eax > movl %eax,28(%edi) > @@ -119,11 +99,6 @@ L(oop): movl (%esi),%eax > sbbl %eax,%eax > negl %eax > > -#if IBT_ENABLED > - popl %ebx > - cfi_adjust_cfa_offset (-4) > - cfi_restore (ebx) > -#endif > popl %esi > cfi_adjust_cfa_offset (-4) > cfi_restore (esi) > diff --git a/sysdeps/i386/i686/memcmp.S b/sysdeps/i386/i686/memcmp.S > index 94600f5e53..0738ee94fd 100644 > --- a/sysdeps/i386/i686/memcmp.S > +++ b/sysdeps/i386/i686/memcmp.S > @@ -80,7 +80,7 @@ L(not_1): > LOAD_JUMP_TABLE_ENTRY (L(table_32bytes), %ecx) > addl %ecx, %edx > addl %ecx, %esi > - _CET_NOTRACK jmp *%ebx > + jmp *%ebx > > ALIGN (4) > L(28bytes): > @@ -326,7 +326,7 @@ L(32bytesormore): > LOAD_JUMP_TABLE_ENTRY (L(table_32bytes), %ecx) > addl %ecx, %edx > addl %ecx, %esi > - _CET_NOTRACK jmp *%ebx > + jmp *%ebx > > L(load_ecx_28): > addl $0x4, %edx > diff --git a/sysdeps/i386/i686/multiarch/memcmp-sse4.S b/sysdeps/i386/i686/multiarch/memcmp-sse4.S > index f0b2d8429f..2cdda5f80d 100644 > --- a/sysdeps/i386/i686/multiarch/memcmp-sse4.S > +++ b/sysdeps/i386/i686/multiarch/memcmp-sse4.S > @@ -58,7 +58,7 @@ > absolute address. */ \ > addl (%ebx,INDEX,SCALE), %ebx; \ > /* We loaded the jump table and adjusted EDX/ESI. Go. */ \ > - _CET_NOTRACK jmp *%ebx > + jmp *%ebx > # else > # define JMPTBL(I, B) I > > @@ -66,7 +66,7 @@ > jump table with relative offsets. INDEX is a register contains the > index into the jump table. SCALE is the scale of INDEX. */ > # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ > - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) > + jmp *TABLE(,INDEX,SCALE) > # endif > > > diff --git a/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S b/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S > index abdde55001..a7e80dcd90 100644 > --- a/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S > +++ b/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S > @@ -64,7 +64,7 @@ > absolute address. */ \ > addl (%ebx,INDEX,SCALE), %ebx; \ > /* We loaded the jump table. Go. */ \ > - _CET_NOTRACK jmp *%ebx > + jmp *%ebx > > # define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE) \ > addl $(TABLE - .), %ebx > @@ -72,7 +72,7 @@ > # define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \ > addl (%ebx,INDEX,SCALE), %ebx; \ > /* We loaded the jump table. Go. */ \ > - _CET_NOTRACK jmp *%ebx > + jmp *%ebx > #else > # define PARMS 4 > # define ENTRANCE > @@ -84,12 +84,12 @@ > absolute offsets. INDEX is a register contains the index into the > jump table. SCALE is the scale of INDEX. */ > # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ > - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) > + jmp *TABLE(,INDEX,SCALE) > > # define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE) > > # define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \ > - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) > + jmp *TABLE(,INDEX,SCALE) > #endif > > .section .text.ssse3,"ax",@progbits > diff --git a/sysdeps/i386/i686/multiarch/memcpy-ssse3.S b/sysdeps/i386/i686/multiarch/memcpy-ssse3.S > index 60cc5f14ea..713c5bdb76 100644 > --- a/sysdeps/i386/i686/multiarch/memcpy-ssse3.S > +++ b/sysdeps/i386/i686/multiarch/memcpy-ssse3.S > @@ -64,7 +64,7 @@ > absolute address. */ \ > addl (%ebx, INDEX, SCALE), %ebx; \ > /* We loaded the jump table. Go. */ \ > - _CET_NOTRACK jmp *%ebx > + jmp *%ebx > # else > > # define PARMS 4 > @@ -78,7 +78,7 @@ > jump table. SCALE is the scale of INDEX. */ > > # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ > - _CET_NOTRACK jmp *TABLE(, INDEX, SCALE) > + jmp *TABLE(, INDEX, SCALE) > # endif > > .section .text.ssse3,"ax",@progbits > diff --git a/sysdeps/i386/i686/multiarch/memset-sse2-rep.S b/sysdeps/i386/i686/multiarch/memset-sse2-rep.S > index 52d046d12b..d1a047319c 100644 > --- a/sysdeps/i386/i686/multiarch/memset-sse2-rep.S > +++ b/sysdeps/i386/i686/multiarch/memset-sse2-rep.S > @@ -56,7 +56,7 @@ > add (%ebx,%ecx,4), %ebx; \ > add %ecx, %edx; \ > /* We loaded the jump table and adjusted EDX. Go. */ \ > - _CET_NOTRACK jmp *%ebx > + jmp *%ebx > #else > # define ENTRANCE > # define RETURN_END ret > @@ -68,7 +68,7 @@ > absolute offsets. */ > # define BRANCH_TO_JMPTBL_ENTRY(TABLE) \ > add %ecx, %edx; \ > - _CET_NOTRACK jmp *TABLE(,%ecx,4) > + jmp *TABLE(,%ecx,4) > #endif > > .section .text.sse2,"ax",@progbits > diff --git a/sysdeps/i386/i686/multiarch/memset-sse2.S b/sysdeps/i386/i686/multiarch/memset-sse2.S > index ac21fcb00b..2e00743477 100644 > --- a/sysdeps/i386/i686/multiarch/memset-sse2.S > +++ b/sysdeps/i386/i686/multiarch/memset-sse2.S > @@ -56,7 +56,7 @@ > add (%ebx,%ecx,4), %ebx; \ > add %ecx, %edx; \ > /* We loaded the jump table and adjusted EDX. Go. */ \ > - _CET_NOTRACK jmp *%ebx > + jmp *%ebx > #else > # define ENTRANCE > # define RETURN_END ret > @@ -68,7 +68,7 @@ > absolute offsets. */ > # define BRANCH_TO_JMPTBL_ENTRY(TABLE) \ > add %ecx, %edx; \ > - _CET_NOTRACK jmp *TABLE(,%ecx,4) > + jmp *TABLE(,%ecx,4) > #endif > > .section .text.sse2,"ax",@progbits > diff --git a/sysdeps/i386/i686/multiarch/strcat-sse2.S b/sysdeps/i386/i686/multiarch/strcat-sse2.S > index 7ac4827bf7..682f43ff5f 100644 > --- a/sysdeps/i386/i686/multiarch/strcat-sse2.S > +++ b/sysdeps/i386/i686/multiarch/strcat-sse2.S > @@ -49,7 +49,7 @@ > absolute address. */ \ > addl (%ecx,INDEX,SCALE), %ecx; \ > /* We loaded the jump table and adjusted ECX. Go. */ \ > - _CET_NOTRACK jmp *%ecx > + jmp *%ecx > # else > # define JMPTBL(I, B) I > > @@ -58,7 +58,7 @@ > jump table. SCALE is the scale of INDEX. */ > > # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ > - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) > + jmp *TABLE(,INDEX,SCALE) > # endif > > # ifndef STRCAT > diff --git a/sysdeps/i386/i686/multiarch/strcpy-sse2.S b/sysdeps/i386/i686/multiarch/strcpy-sse2.S > index 5c358e5e7e..a18a8ca5ba 100644 > --- a/sysdeps/i386/i686/multiarch/strcpy-sse2.S > +++ b/sysdeps/i386/i686/multiarch/strcpy-sse2.S > @@ -64,7 +64,7 @@ > absolute address. */ \ > addl (%ecx,INDEX,SCALE), %ecx; \ > /* We loaded the jump table and adjusted ECX. Go. */ \ > - _CET_NOTRACK jmp *%ecx > + jmp *%ecx > # else > # define JMPTBL(I, B) I > > @@ -73,7 +73,7 @@ > jump table. SCALE is the scale of INDEX. */ > > # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ > - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) > + jmp *TABLE(,INDEX,SCALE) > # endif > > .text > diff --git a/sysdeps/i386/setjmp.S b/sysdeps/i386/setjmp.S > index ab1a8090cb..08a98f2388 100644 > --- a/sysdeps/i386/setjmp.S > +++ b/sysdeps/i386/setjmp.S > @@ -19,7 +19,6 @@ > #include <sysdep.h> > #include <pointer_guard.h> > #include <jmpbuf-offsets.h> > -#include <jmp_buf-ssp.h> > #include <asm-syntax.h> > #include <stap-probe.h> > > @@ -27,11 +26,6 @@ > #define JMPBUF PARMS > #define SIGMSK JMPBUF+4 > > -/* Don't save shadow stack register if shadow stack isn't enabled. */ > -#if !SHSTK_ENABLED > -# undef SHADOW_STACK_POINTER_OFFSET > -#endif > - > ENTRY (__sigsetjmp) > > movl JMPBUF(%esp), %eax > @@ -53,21 +47,6 @@ ENTRY (__sigsetjmp) > movl %ecx, (JB_PC*4)(%eax) > movl %ebp, (JB_BP*4)(%eax) /* Save caller's frame pointer. */ > > -#ifdef SHADOW_STACK_POINTER_OFFSET > -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(skip_ssp) > -# else > - xorl %ecx, %ecx > -# endif > - /* Get the current Shadow-Stack-Pointer and save it. */ > - rdsspd %ecx > - movl %ecx, SHADOW_STACK_POINTER_OFFSET(%eax) > -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET > -L(skip_ssp): > -# endif > -#endif > #if IS_IN (rtld) > /* In ld.so we never save the signal mask. */ > xorl %eax, %eax > diff --git a/sysdeps/i386/start.S b/sysdeps/i386/start.S > index e042ed156c..30577176f8 100644 > --- a/sysdeps/i386/start.S > +++ b/sysdeps/i386/start.S > @@ -132,7 +132,6 @@ ENTRY (_start) > > #if defined PIC && !defined SHARED > __wrap_main: > - _CET_ENDBR > jmp main@PLT > #endif > END (_start) > diff --git a/sysdeps/i386/sub_n.S b/sysdeps/i386/sub_n.S > index 3ebe984672..c111bf3f03 100644 > --- a/sysdeps/i386/sub_n.S > +++ b/sysdeps/i386/sub_n.S > @@ -40,13 +40,6 @@ ENTRY (__mpn_sub_n) > cfi_rel_offset (esi, 0) > movl S2(%esp),%edx > movl SIZE(%esp),%ecx > - > -#if IBT_ENABLED > - pushl %ebx > - cfi_adjust_cfa_offset (4) > - cfi_rel_offset (ebx, 0) > -#endif > - > movl %ecx,%eax > shrl $3,%ecx /* compute count for unrolled loop */ > negl %eax > @@ -58,9 +51,6 @@ ENTRY (__mpn_sub_n) > subl %eax,%esi /* ... by a constant when we ... */ > subl %eax,%edx /* ... enter the loop */ > shrl $2,%eax /* restore previous value */ > -#if defined __CET__ && (__CET__ & 1) != 0 > - leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */ > -#endif > #ifdef PIC > /* Calculate start address in loop for PIC. Due to limitations in some > assemblers, Loop-L0-3 cannot be put into the leal */ > @@ -74,40 +64,30 @@ L(0): leal (%eax,%eax,8),%eax > #else > /* Calculate start address in loop for non-PIC. */ > leal (L(oop) - 3)(%eax,%eax,8),%eax > -#endif > -#if defined __CET__ && (__CET__ & 1) != 0 > - addl %ebx,%eax /* Adjust for endbr32 */ > #endif > jmp *%eax /* jump into loop */ > ALIGN (3) > L(oop): movl (%esi),%eax > sbbl (%edx),%eax > movl %eax,(%edi) > - _CET_ENDBR > movl 4(%esi),%eax > sbbl 4(%edx),%eax > movl %eax,4(%edi) > - _CET_ENDBR > movl 8(%esi),%eax > sbbl 8(%edx),%eax > movl %eax,8(%edi) > - _CET_ENDBR > movl 12(%esi),%eax > sbbl 12(%edx),%eax > movl %eax,12(%edi) > - _CET_ENDBR > movl 16(%esi),%eax > sbbl 16(%edx),%eax > movl %eax,16(%edi) > - _CET_ENDBR > movl 20(%esi),%eax > sbbl 20(%edx),%eax > movl %eax,20(%edi) > - _CET_ENDBR > movl 24(%esi),%eax > sbbl 24(%edx),%eax > movl %eax,24(%edi) > - _CET_ENDBR > movl 28(%esi),%eax > sbbl 28(%edx),%eax > movl %eax,28(%edi) > @@ -120,11 +100,6 @@ L(oop): movl (%esi),%eax > sbbl %eax,%eax > negl %eax > > -#if defined __CET__ && (__CET__ & 1) != 0 > - popl %ebx > - cfi_adjust_cfa_offset (-4) > - cfi_restore (ebx) > -#endif > popl %esi > cfi_adjust_cfa_offset (-4) > cfi_restore (esi) > diff --git a/sysdeps/i386/sysdep.h b/sysdeps/i386/sysdep.h > index 69c8b51487..86b5fdd6ae 100644 > --- a/sysdeps/i386/sysdep.h > +++ b/sysdeps/i386/sysdep.h > @@ -18,6 +18,8 @@ > > #include <sysdeps/x86/sysdep.h> > > +#define CET_ENABLED 0 > + > /* It is desirable that the names of PIC thunks match those used by > GCC so that multiple copies are eliminated by the linker. Because > GCC 4.6 and earlier use __i686 in the names, it is necessary to > @@ -37,6 +39,15 @@ > > /* Syntactic details of assembler. */ > > +/* Define an entry point visible from C. */ > +#define ENTRY_P2ALIGN(name, alignment) \ > + .globl C_SYMBOL_NAME(name); \ > + .type C_SYMBOL_NAME(name),@function; \ > + .align ALIGNARG(alignment); \ > + C_LABEL(name) \ > + cfi_startproc; \ > + CALL_MCOUNT > + > /* If compiled for profiling, call `mcount' at the start of each function. */ > #ifdef PROF > /* The mcount code relies on a normal frame pointer being on the stack > diff --git a/sysdeps/unix/sysv/linux/i386/____longjmp_chk.S b/sysdeps/unix/sysv/linux/i386/____longjmp_chk.S > index 0c49010f93..35538f6df6 100644 > --- a/sysdeps/unix/sysv/linux/i386/____longjmp_chk.S > +++ b/sysdeps/unix/sysv/linux/i386/____longjmp_chk.S > @@ -18,14 +18,9 @@ > #include <sysdep.h> > #include <pointer_guard.h> > #include <jmpbuf-offsets.h> > -#include <jmp_buf-ssp.h> > #include <asm-syntax.h> > #include <stap-probe.h> > > -/* Don't restore shadow stack register if shadow stack isn't enabled. */ > -#if !SHSTK_ENABLED > -# undef SHADOW_STACK_POINTER_OFFSET > -#endif > > .section .rodata.str1.1,"aMS",@progbits,1 > .type longjmp_msg,@object > @@ -52,38 +47,6 @@ longjmp_msg: > ENTRY (____longjmp_chk) > movl 4(%esp), %ecx /* User's jmp_buf in %ecx. */ > > -#ifdef SHADOW_STACK_POINTER_OFFSET > -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(skip_ssp) > -# else > - xorl %edx, %edx > -# endif > - /* Check and adjust the Shadow-Stack-Pointer. */ > - rdsspd %edx > - /* And compare it with the saved ssp value. */ > - subl SHADOW_STACK_POINTER_OFFSET(%ecx), %edx > - je L(skip_ssp) > - /* Count the number of frames to adjust and adjust it > - with incssp instruction. The instruction can adjust > - the ssp by [0..255] value only thus use a loop if > - the number of frames is bigger than 255. */ > - negl %edx > - shrl $2, %edx > - /* NB: We saved Shadow-Stack-Pointer of setjmp. Since we are > - restoring Shadow-Stack-Pointer of setjmp's caller, we > - need to unwind shadow stack by one more frame. */ > - addl $1, %edx > - movl $255, %ebx > -L(loop): > - cmpl %ebx, %edx > - cmovb %edx, %ebx > - incsspd %ebx > - subl %ebx, %edx > - ja L(loop) > -L(skip_ssp): > -#endif > /* Save the return address now. */ > movl (JB_PC*4)(%ecx), %edx > /* Get the stack pointer. */ > diff --git a/sysdeps/unix/sysv/linux/i386/getcontext.S b/sysdeps/unix/sysv/linux/i386/getcontext.S > index 3202ac002e..9c1ca3c263 100644 > --- a/sysdeps/unix/sysv/linux/i386/getcontext.S > +++ b/sysdeps/unix/sysv/linux/i386/getcontext.S > @@ -17,7 +17,6 @@ > <https://www.gnu.org/licenses/>. */ > > #include <sysdep.h> > -#include <asm/prctl.h> > > #include "ucontext_i.h" > > @@ -42,61 +41,6 @@ ENTRY(__getcontext) > movw %fs, %dx > movl %edx, oFS(%eax) > > -#if SHSTK_ENABLED > - /* Check if shadow stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(no_shstk) > - > - /* Save EAX in EDX. */ > - movl %eax, %edx > - > - xorl %eax, %eax > - cmpl %gs:SSP_BASE_OFFSET, %eax > - jnz L(shadow_stack_bound_recorded) > - > - /* Save EBX in the first scratch register slot. */ > - movl %ebx, oSCRATCH1(%edx) > - > - /* Get the base address and size of the default shadow stack > - which must be the current shadow stack since nothing has > - been recorded yet. */ > - sub $24, %esp > - mov %esp, %ecx > - movl $ARCH_CET_STATUS, %ebx > - movl $__NR_arch_prctl, %eax > - ENTER_KERNEL > - testl %eax, %eax > - jz L(continue_no_err) > - > - /* This should never happen. */ > - hlt > - > -L(continue_no_err): > - /* Restore EBX from the first scratch register slot. */ > - movl oSCRATCH1(%edx), %ebx > - > - /* Record the base of the current shadow stack. */ > - movl 8(%esp), %eax > - movl %eax, %gs:SSP_BASE_OFFSET > - add $24, %esp > - > -L(shadow_stack_bound_recorded): > - /* Load address of the context data structure. */ > - movl 4(%esp), %eax > - > - /* Get the current shadow stack pointer. */ > - rdsspd %edx > - /* NB: Save the caller's shadow stack so that we can jump back > - to the caller directly. */ > - addl $4, %edx > - movl %edx, oSSP(%eax) > - > - /* Save the current shadow stack base in ucontext. */ > - movl %gs:SSP_BASE_OFFSET, %edx > - movl %edx, (oSSP + 4)(%eax) > - > -L(no_shstk): > -#endif > /* We have separate floating-point register content memory on the > stack. We use the __fpregs_mem block in the context. Set the > links up correctly. */ > diff --git a/sysdeps/unix/sysv/linux/i386/makecontext.S b/sysdeps/unix/sysv/linux/i386/makecontext.S > index 814127d130..7ee56300fa 100644 > --- a/sysdeps/unix/sysv/linux/i386/makecontext.S > +++ b/sysdeps/unix/sysv/linux/i386/makecontext.S > @@ -17,7 +17,6 @@ > <https://www.gnu.org/licenses/>. */ > > #include <sysdep.h> > -#include <asm/prctl.h> > > #include "ucontext_i.h" > > @@ -68,127 +67,6 @@ ENTRY(__makecontext) > jnz 1b > 2: > > -#if SHSTK_ENABLED > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(skip_ssp) > - > - /* Reload the pointer to ucontext. */ > - movl 4(%esp), %eax > - > - /* Shadow stack is enabled. We need to allocate a new shadow > - stack. */ > - subl oSS_SP(%eax), %edx > - shrl $STACK_SIZE_TO_SHADOW_STACK_SIZE_SHIFT, %edx > - > - /* Align shadow stack size to 8 bytes. */ > - addl $7, %edx > - andl $-8, %edx > - > - /* Store shadow stack size in __ssp[2]. */ > - movl %edx, (oSSP + 8)(%eax) > - > - /* Save ESI in the second scratch register slot. */ > - movl %esi, oSCRATCH2(%eax) > - /* Save EDI in the third scratch register slot. */ > - movl %edi, oSCRATCH3(%eax) > - > - /* Save the pointer to ucontext. */ > - movl %eax, %edi > - > - /* Get the original shadow stack pointer. */ > - rdsspd %esi > - > - /* Align the saved original shadow stack pointer to the next > - 8 byte aligned boundary. */ > - andl $-8, %esi > - > - /* Load the top of the new stack into EDX. */ > - movl oESP(%eax), %edx > - > - /* We need to terminate the FDE here because the unwinder looks > - at ra-1 for unwind information. */ > - cfi_endproc > - > - /* Swap the original stack pointer with the top of the new > - stack. */ > - xchgl %esp, %edx > - > - /* Add 4 bytes since CALL will push the 4-byte return address > - onto stack. */ > - addl $4, %esp > - > - /* Allocate the new shadow stack. Save EBX in the first scratch > - register slot. */ > - movl %ebx, oSCRATCH1(%eax) > - > - /* CET syscall takes 64-bit sizes. */ > - subl $16, %esp > - movl (oSSP + 8)(%eax), %ecx > - movl %ecx, (%esp) > - movl $0, 4(%esp) > - movl %ecx, 8(%esp) > - movl $0, 12(%esp) > - movl %esp, %ecx > - > - movl $ARCH_CET_ALLOC_SHSTK, %ebx > - movl $__NR_arch_prctl, %eax > - ENTER_KERNEL > - testl %eax, %eax > - jne L(hlt) /* This should never happen. */ > - > - /* Copy the base address of the new shadow stack to __ssp[1]. */ > - movl (%esp), %eax > - movl %eax, (oSSP + 4)(%edi) > - > - addl $16, %esp > - > - /* Restore EBX from the first scratch register slot. */ > - movl oSCRATCH1(%edi), %ebx > - > - /* Get the size of the new shadow stack. */ > - movl (oSSP + 8)(%edi), %ecx > - > - /* Use the restore stoken to restore the new shadow stack. */ > - rstorssp -8(%eax, %ecx) > - > - /* Save the restore token at the next 8 byte aligned boundary > - on the original shadow stack. */ > - saveprevssp > - > - /* Push the address of "jmp exitcode" onto the new stack as > - well as the new shadow stack. */ > - call 1f > - jmp L(exitcode) > -1: > - > - /* Get the new shadow stack pointer. */ > - rdsspd %eax > - > - /* Use the restore stoken to restore the original shadow stack. */ > - rstorssp -8(%esi) > - > - /* Save the restore token on the new shadow stack. */ > - saveprevssp > - > - /* Store the new shadow stack pointer in __ssp[0]. */ > - movl %eax, oSSP(%edi) > - > - /* Restore the original stack. */ > - mov %edx, %esp > - > - cfi_startproc > - > - /* Restore ESI from the second scratch register slot. */ > - movl oSCRATCH2(%edi), %esi > - /* Restore EDI from the third scratch register slot. */ > - movl oSCRATCH3(%edi), %edi > - > - ret > - > -L(skip_ssp): > -#endif > - > /* If the function we call returns we must continue with the > context which is given in the uc_link element. To do this > set the return address for the function the user provides > @@ -244,7 +122,6 @@ L(call_exit): > call HIDDEN_JUMPTARGET(exit) > /* The 'exit' call should never return. In case it does cause > the process to terminate. */ > -L(hlt): > hlt > cfi_startproc > END(__makecontext) > diff --git a/sysdeps/unix/sysv/linux/i386/setcontext.S b/sysdeps/unix/sysv/linux/i386/setcontext.S > index 966fcbee1e..b6d827d11f 100644 > --- a/sysdeps/unix/sysv/linux/i386/setcontext.S > +++ b/sysdeps/unix/sysv/linux/i386/setcontext.S > @@ -17,7 +17,6 @@ > <https://www.gnu.org/licenses/>. */ > > #include <sysdep.h> > -#include <asm/prctl.h> > > #include "ucontext_i.h" > > @@ -56,6 +55,9 @@ ENTRY(__setcontext) > movl oFS(%eax), %ecx > movw %cx, %fs > > + /* Fetch the address to return to. */ > + movl oEIP(%eax), %ecx > + > /* Load the new stack pointer. */ > cfi_def_cfa (eax, 0) > cfi_offset (edi, oEDI) > @@ -64,103 +66,6 @@ ENTRY(__setcontext) > cfi_offset (ebx, oEBX) > movl oESP(%eax), %esp > > -#if SHSTK_ENABLED > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(no_shstk) > - > - /* If the base of the target shadow stack is the same as the > - base of the current shadow stack, we unwind the shadow > - stack. Otherwise it is a stack switch and we look for a > - restore token. */ > - movl oSSP(%eax), %esi > - movl %esi, %edi > - > - /* Get the base of the target shadow stack. */ > - movl (oSSP + 4)(%eax), %ecx > - cmpl %gs:SSP_BASE_OFFSET, %ecx > - je L(unwind_shadow_stack) > - > - /* Align the saved original shadow stack pointer to the next > - 8 byte aligned boundary. */ > - andl $-8, %esi > - > -L(find_restore_token_loop): > - /* Look for a restore token. */ > - movl -8(%esi), %ebx > - andl $-8, %ebx > - cmpl %esi, %ebx > - je L(restore_shadow_stack) > - > - /* Try the next slot. */ > - subl $8, %esi > - jmp L(find_restore_token_loop) > - > -L(restore_shadow_stack): > - /* Pop return address from the shadow stack since setcontext > - will not return. */ > - movl $1, %ebx > - incsspd %ebx > - > - /* Use the restore stoken to restore the target shadow stack. */ > - rstorssp -8(%esi) > - > - /* Save the restore token on the old shadow stack. NB: This > - restore token may be checked by setcontext or swapcontext > - later. */ > - saveprevssp > - > - /* Record the new shadow stack base that was switched to. */ > - movl (oSSP + 4)(%eax), %ebx > - movl %ebx, %gs:SSP_BASE_OFFSET > - > -L(unwind_shadow_stack): > - rdsspd %ebx > - subl %edi, %ebx > - je L(skip_unwind_shadow_stack) > - negl %ebx > - shrl $2, %ebx > - movl $255, %esi > -L(loop): > - cmpl %esi, %ebx > - cmovb %ebx, %esi > - incsspd %esi > - subl %esi, %ebx > - ja L(loop) > - > -L(skip_unwind_shadow_stack): > - > - /* Load the values of all the preserved registers (except ESP). */ > - movl oEDI(%eax), %edi > - movl oESI(%eax), %esi > - movl oEBP(%eax), %ebp > - movl oEBX(%eax), %ebx > - > - /* Get the return address set with getcontext. */ > - movl oEIP(%eax), %ecx > - > - /* Check if return address is valid for the case when setcontext > - is invoked from L(exitcode) with linked context. */ > - rdsspd %eax > - cmpl (%eax), %ecx > - /* Clear EAX to indicate success. NB: Don't use xorl to keep > - EFLAGS for jne. */ > - movl $0, %eax > - jne L(jmp) > - /* Return to the new context if return address valid. */ > - pushl %ecx > - ret > - > -L(jmp): > - /* Jump to the new context directly. */ > - jmp *%ecx > - > -L(no_shstk): > -#endif > - > - /* Fetch the address to return to. */ > - movl oEIP(%eax), %ecx > - > /* Push the return address on the new stack so we can return there. */ > pushl %ecx > > diff --git a/sysdeps/unix/sysv/linux/i386/swapcontext.S b/sysdeps/unix/sysv/linux/i386/swapcontext.S > index b8367f025e..bb736ae7d2 100644 > --- a/sysdeps/unix/sysv/linux/i386/swapcontext.S > +++ b/sysdeps/unix/sysv/linux/i386/swapcontext.S > @@ -17,7 +17,6 @@ > <https://www.gnu.org/licenses/>. */ > > #include <sysdep.h> > -#include <asm/prctl.h> > > #include "ucontext_i.h" > > @@ -76,144 +75,6 @@ ENTRY(__swapcontext) > movl oFS(%eax), %edx > movw %dx, %fs > > -#if SHSTK_ENABLED > - /* Check if Shadow Stack is enabled. */ > - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET > - jz L(no_shstk) > - > - xorl %eax, %eax > - cmpl %gs:SSP_BASE_OFFSET, %eax > - jnz L(shadow_stack_bound_recorded) > - > - /* Get the base address and size of the default shadow stack > - which must be the current shadow stack since nothing has > - been recorded yet. */ > - sub $24, %esp > - mov %esp, %ecx > - movl $ARCH_CET_STATUS, %ebx > - movl $__NR_arch_prctl, %eax > - ENTER_KERNEL > - testl %eax, %eax > - jz L(continue_no_err) > - > - /* This should never happen. */ > - hlt > - > -L(continue_no_err): > - /* Record the base of the current shadow stack. */ > - movl 8(%esp), %eax > - movl %eax, %gs:SSP_BASE_OFFSET > - add $24, %esp > - > -L(shadow_stack_bound_recorded): > - /* Load address of the context data structure we save in. */ > - movl 4(%esp), %eax > - > - /* Load address of the context data structure we swap in */ > - movl 8(%esp), %edx > - > - /* If we unwind the stack, we can't undo stack unwinding. Just > - save the target shadow stack pointer as the current shadow > - stack pointer. */ > - movl oSSP(%edx), %ecx > - movl %ecx, oSSP(%eax) > - > - /* Save the current shadow stack base in ucontext. */ > - movl %gs:SSP_BASE_OFFSET, %ecx > - movl %ecx, (oSSP + 4)(%eax) > - > - /* If the base of the target shadow stack is the same as the > - base of the current shadow stack, we unwind the shadow > - stack. Otherwise it is a stack switch and we look for a > - restore token. */ > - movl oSSP(%edx), %esi > - movl %esi, %edi > - > - /* Get the base of the target shadow stack. */ > - movl (oSSP + 4)(%edx), %ecx > - cmpl %gs:SSP_BASE_OFFSET, %ecx > - je L(unwind_shadow_stack) > - > - /* Align the saved original shadow stack pointer to the next > - 8 byte aligned boundary. */ > - andl $-8, %esi > - > -L(find_restore_token_loop): > - /* Look for a restore token. */ > - movl -8(%esi), %ebx > - andl $-8, %ebx > - cmpl %esi, %ebx > - je L(restore_shadow_stack) > - > - /* Try the next slot. */ > - subl $8, %esi > - jmp L(find_restore_token_loop) > - > -L(restore_shadow_stack): > - /* The target shadow stack will be restored. Save the current > - shadow stack pointer. */ > - rdsspd %ecx > - movl %ecx, oSSP(%eax) > - > - /* Use the restore stoken to restore the target shadow stack. */ > - rstorssp -8(%esi) > - > - /* Save the restore token on the old shadow stack. NB: This > - restore token may be checked by setcontext or swapcontext > - later. */ > - saveprevssp > - > - /* Record the new shadow stack base that was switched to. */ > - movl (oSSP + 4)(%edx), %ebx > - movl %ebx, %gs:SSP_BASE_OFFSET > - > -L(unwind_shadow_stack): > - rdsspd %ebx > - subl %edi, %ebx > - je L(skip_unwind_shadow_stack) > - negl %ebx > - shrl $2, %ebx > - movl $255, %esi > -L(loop): > - cmpl %esi, %ebx > - cmovb %ebx, %esi > - incsspd %esi > - subl %esi, %ebx > - ja L(loop) > - > -L(skip_unwind_shadow_stack): > - > - /* Load the new stack pointer. */ > - movl oESP(%edx), %esp > - > - /* Load the values of all the preserved registers (except ESP). */ > - movl oEDI(%edx), %edi > - movl oESI(%edx), %esi > - movl oEBP(%edx), %ebp > - movl oEBX(%edx), %ebx > - > - /* Get the return address set with getcontext. */ > - movl oEIP(%edx), %ecx > - > - /* Check if return address is valid for the case when setcontext > - is invoked from L(exitcode) with linked context. */ > - rdsspd %eax > - cmpl (%eax), %ecx > - /* Clear EAX to indicate success. NB: Don't use xorl to keep > - EFLAGS for jne. */ > - movl $0, %eax > - jne L(jmp) > - /* Return to the new context if return address valid. */ > - pushl %ecx > - ret > - > -L(jmp): > - /* Jump to the new context directly. */ > - jmp *%ecx > - > -L(no_shstk): > -#endif > - > /* Fetch the address to return to. */ > movl oEIP(%eax), %ecx > > diff --git a/sysdeps/unix/sysv/linux/i386/sysdep.h b/sysdeps/unix/sysv/linux/i386/sysdep.h > index 516a85ce12..25852f894e 100644 > --- a/sysdeps/unix/sysv/linux/i386/sysdep.h > +++ b/sysdeps/unix/sysv/linux/i386/sysdep.h > @@ -446,9 +446,4 @@ struct libc_do_syscall_args > > #endif /* __ASSEMBLER__ */ > > -/* Each shadow stack slot takes 4 bytes. Assuming that each stack > - frame takes 128 bytes, this is used to compute shadow stack size > - from stack size. */ > -#define STACK_SIZE_TO_SHADOW_STACK_SIZE_SHIFT 5 > - > #endif /* linux/i386/sysdep.h */ > diff --git a/sysdeps/unix/sysv/linux/i386/ucontext_i.sym b/sysdeps/unix/sysv/linux/i386/ucontext_i.sym > index 1d8608eafc..1dfe03d2cc 100644 > --- a/sysdeps/unix/sysv/linux/i386/ucontext_i.sym > +++ b/sysdeps/unix/sysv/linux/i386/ucontext_i.sym > @@ -22,10 +22,6 @@ oEBP mreg (EBP) > oESP mreg (ESP) > oEBX mreg (EBX) > oEIP mreg (EIP) > -oSCRATCH1 mreg (EAX) > -oSCRATCH2 mreg (ECX) > -oSCRATCH3 mreg (EDX) > oFPREGS mcontext (fpregs) > oSIGMASK ucontext (uc_sigmask) > oFPREGSMEM ucontext (__fpregs_mem) > -oSSP ucontext (__ssp) > diff --git a/sysdeps/unix/sysv/linux/i386/vfork.S b/sysdeps/unix/sysv/linux/i386/vfork.S > index 80c2058f1e..8846b61b96 100644 > --- a/sysdeps/unix/sysv/linux/i386/vfork.S > +++ b/sysdeps/unix/sysv/linux/i386/vfork.S > @@ -20,6 +20,7 @@ > #include <bits/errno.h> > #include <tcb-offsets.h> > > + > /* Clone the calling process, but without copying the whole address space. > The calling process is suspended until the new process exits or is > replaced by a call to `execve'. Return -1 for errors, 0 to the new process, > @@ -46,29 +47,6 @@ ENTRY (__vfork) > /* Branch forward if it failed. */ > jae SYSCALL_ERROR_LABEL > > -#if SHSTK_ENABLED > - /* Check if shadow stack is in use. */ > - xorl %edx, %edx > - rdsspd %edx > - testl %edx, %edx > - /* Normal return if shadow stack isn't in use. */ > - je L(no_shstk) > - > - testl %eax, %eax > - /* In parent, normal return. */ > - jnz L(no_shstk) > - > - /* NB: In child, jump back to caller via indirect branch without > - popping shadow stack which is shared with parent. Keep shadow > - stack mismatched so that child returns in the vfork-calling > - function will trigger SIGSEGV. */ > - popl %ecx > - cfi_adjust_cfa_offset (-4) > - jmp *%ecx > - > -L(no_shstk): > -#endif > - > ret > > PSEUDO_END (__vfork) > diff --git a/sysdeps/x86/sysdep.h b/sysdeps/x86/sysdep.h > index 5a14ca5110..85d0a8c943 100644 > --- a/sysdeps/x86/sysdep.h > +++ b/sysdeps/x86/sysdep.h > @@ -21,33 +21,6 @@ > > #include <sysdeps/generic/sysdep.h> > > -/* __CET__ is defined by GCC with Control-Flow Protection values: > - > -enum cf_protection_level > -{ > - CF_NONE = 0, > - CF_BRANCH = 1 << 0, > - CF_RETURN = 1 << 1, > - CF_FULL = CF_BRANCH | CF_RETURN, > - CF_SET = 1 << 2 > -}; > -*/ > - > -/* Set if CF_BRANCH (IBT) is enabled. */ > -#define X86_FEATURE_1_IBT (1U << 0) > -/* Set if CF_RETURN (SHSTK) is enabled. */ > -#define X86_FEATURE_1_SHSTK (1U << 1) > - > -#ifdef __CET__ > -# define CET_ENABLED 1 > -# define IBT_ENABLED (__CET__ & X86_FEATURE_1_IBT) > -# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK) > -#else > -# define CET_ENABLED 0 > -# define IBT_ENABLED 0 > -# define SHSTK_ENABLED 0 > -#endif > - > /* Offset for fxsave/xsave area used by _dl_runtime_resolve. Also need > space to preserve RCX, RDX, RSI, RDI, R8, R9 and RAX. It must be > aligned to 16 bytes for fxsave and 64 bytes for xsave. */ > @@ -66,27 +39,10 @@ enum cf_protection_level > > /* Syntactic details of assembler. */ > > -#ifdef _CET_ENDBR > -# define _CET_NOTRACK notrack > -#else > -# define _CET_ENDBR > -# define _CET_NOTRACK > -#endif > - > /* ELF uses byte-counts for .align, most others use log2 of count of bytes. */ > #define ALIGNARG(log2) 1<<log2 > #define ASM_SIZE_DIRECTIVE(name) .size name,.-name; > > -/* Define an entry point visible from C. */ > -#define ENTRY_P2ALIGN(name, alignment) \ > - .globl C_SYMBOL_NAME(name); \ > - .type C_SYMBOL_NAME(name),@function; \ > - .align ALIGNARG(alignment); \ > - C_LABEL(name) \ > - cfi_startproc; \ > - _CET_ENDBR; \ > - CALL_MCOUNT > - > /* Common entry 16 byte aligns. */ > #define ENTRY(name) ENTRY_P2ALIGN (name, 4) > > diff --git a/sysdeps/x86_64/sysdep.h b/sysdeps/x86_64/sysdep.h > index 3e7f4cbd8a..db6e36b2dd 100644 > --- a/sysdeps/x86_64/sysdep.h > +++ b/sysdeps/x86_64/sysdep.h > @@ -22,10 +22,52 @@ > #include <sysdeps/x86/sysdep.h> > #include <x86-lp_size.h> > > +/* __CET__ is defined by GCC with Control-Flow Protection values: > + > +enum cf_protection_level > +{ > + CF_NONE = 0, > + CF_BRANCH = 1 << 0, > + CF_RETURN = 1 << 1, > + CF_FULL = CF_BRANCH | CF_RETURN, > + CF_SET = 1 << 2 > +}; > +*/ > + > +/* Set if CF_BRANCH (IBT) is enabled. */ > +#define X86_FEATURE_1_IBT (1U << 0) > +/* Set if CF_RETURN (SHSTK) is enabled. */ > +#define X86_FEATURE_1_SHSTK (1U << 1) > + > +#ifdef __CET__ > +# define CET_ENABLED 1 > +# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK) > +#else > +# define CET_ENABLED 0 > +# define SHSTK_ENABLED 0 > +#endif > + > #ifdef __ASSEMBLER__ > > /* Syntactic details of assembler. */ > > +#ifdef _CET_ENDBR > +# define _CET_NOTRACK notrack > +#else > +# define _CET_ENDBR > +# define _CET_NOTRACK > +#endif > + > +/* Define an entry point visible from C. */ > +#define ENTRY_P2ALIGN(name, alignment) \ > + .globl C_SYMBOL_NAME(name); \ > + .type C_SYMBOL_NAME(name),@function; \ > + .align ALIGNARG(alignment); \ > + C_LABEL(name) \ > + cfi_startproc; \ > + _CET_ENDBR; \ > + CALL_MCOUNT > + > /* This macro is for setting proper CFI with DW_CFA_expression describing > the register as saved relative to %rsp instead of relative to the CFA. > Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset > -- > 2.34.1 > LGTM. Thanks.
diff --git a/sysdeps/i386/__longjmp.S b/sysdeps/i386/__longjmp.S index 302c00ac62..bb83609d57 100644 --- a/sysdeps/i386/__longjmp.S +++ b/sysdeps/i386/__longjmp.S @@ -19,55 +19,14 @@ #include <sysdep.h> #include <pointer_guard.h> #include <jmpbuf-offsets.h> -#include <jmp_buf-ssp.h> #include <asm-syntax.h> #include <stap-probe.h> -/* Don't restore shadow stack register if - 1. Shadow stack isn't enabled. Or - 2. __longjmp is defined for __longjmp_cancel. - */ -#if !SHSTK_ENABLED || defined __longjmp -# undef SHADOW_STACK_POINTER_OFFSET -#endif - .text ENTRY (__longjmp) #ifdef PTR_DEMANGLE movl 4(%esp), %eax /* User's jmp_buf in %eax. */ -# ifdef SHADOW_STACK_POINTER_OFFSET -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(skip_ssp) -# else - xorl %edx, %edx -# endif - /* Check and adjust the Shadow-Stack-Pointer. */ - rdsspd %edx - /* And compare it with the saved ssp value. */ - subl SHADOW_STACK_POINTER_OFFSET(%eax), %edx - je L(skip_ssp) - /* Count the number of frames to adjust and adjust it - with incssp instruction. The instruction can adjust - the ssp by [0..255] value only thus use a loop if - the number of frames is bigger than 255. */ - negl %edx - shrl $2, %edx - /* NB: We saved Shadow-Stack-Pointer of setjmp. Since we are - restoring Shadow-Stack-Pointer of setjmp's caller, we - need to unwind shadow stack by one more frame. */ - addl $1, %edx - movl $255, %ebx -L(loop): - cmpl %ebx, %edx - cmovb %edx, %ebx - incsspd %ebx - subl %ebx, %edx - ja L(loop) -L(skip_ssp): -# endif /* Save the return address now. */ movl (JB_PC*4)(%eax), %edx /* Get the stack pointer. */ @@ -98,38 +57,6 @@ L(skip_ssp): #else movl 4(%esp), %ecx /* User's jmp_buf in %ecx. */ movl 8(%esp), %eax /* Second argument is return value. */ -# ifdef SHADOW_STACK_POINTER_OFFSET -# if IS_IN (libc) && defined SHARED - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(skip_ssp) -# endif - /* Check and adjust the Shadow-Stack-Pointer. */ - xorl %edx, %edx - /* Get the current ssp. */ - rdsspd %edx - /* And compare it with the saved ssp value. */ - subl SHADOW_STACK_POINTER_OFFSET(%ecx), %edx - je L(skip_ssp) - /* Count the number of frames to adjust and adjust it - with incssp instruction. The instruction can adjust - the ssp by [0..255] value only thus use a loop if - the number of frames is bigger than 255. */ - negl %edx - shrl $2, %edx - /* NB: We saved Shadow-Stack-Pointer of setjmp. Since we are - restoring Shadow-Stack-Pointer of setjmp's caller, we - need to unwind shadow stack by one more frame. */ - addl $1, %edx - movl $255, %ebx -L(loop): - cmpl %ebx, %edx - cmovb %edx, %ebx - incsspd %ebx - subl %ebx, %edx - ja L(loop) -L(skip_ssp): -# endif /* Save the return address now. */ movl (JB_PC*4)(%ecx), %edx LIBC_PROBE (longjmp, 3, 4@%ecx, -4@%eax, 4@%edx) diff --git a/sysdeps/i386/add_n.S b/sysdeps/i386/add_n.S index d4af6d92ec..c1b7098b1c 100644 --- a/sysdeps/i386/add_n.S +++ b/sysdeps/i386/add_n.S @@ -40,13 +40,6 @@ ENTRY (__mpn_add_n) cfi_rel_offset (esi, 0) movl S2(%esp),%edx movl SIZE(%esp),%ecx - -#if IBT_ENABLED - pushl %ebx - cfi_adjust_cfa_offset (4) - cfi_rel_offset (ebx, 0) -#endif - movl %ecx,%eax shrl $3,%ecx /* compute count for unrolled loop */ negl %eax @@ -58,9 +51,6 @@ ENTRY (__mpn_add_n) subl %eax,%esi /* ... by a constant when we ... */ subl %eax,%edx /* ... enter the loop */ shrl $2,%eax /* restore previous value */ -#if IBT_ENABLED - leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */ -#endif #ifdef PIC /* Calculate start address in loop for PIC. Due to limitations in some assemblers, Loop-L0-3 cannot be put into the leal */ @@ -74,40 +64,30 @@ L(0): leal (%eax,%eax,8),%eax #else /* Calculate start address in loop for non-PIC. */ leal (L(oop) - 3)(%eax,%eax,8),%eax -#endif -#if IBT_ENABLED - addl %ebx,%eax /* Adjust for endbr32 */ #endif jmp *%eax /* jump into loop */ ALIGN (3) L(oop): movl (%esi),%eax adcl (%edx),%eax movl %eax,(%edi) - _CET_ENDBR movl 4(%esi),%eax adcl 4(%edx),%eax movl %eax,4(%edi) - _CET_ENDBR movl 8(%esi),%eax adcl 8(%edx),%eax movl %eax,8(%edi) - _CET_ENDBR movl 12(%esi),%eax adcl 12(%edx),%eax movl %eax,12(%edi) - _CET_ENDBR movl 16(%esi),%eax adcl 16(%edx),%eax movl %eax,16(%edi) - _CET_ENDBR movl 20(%esi),%eax adcl 20(%edx),%eax movl %eax,20(%edi) - _CET_ENDBR movl 24(%esi),%eax adcl 24(%edx),%eax movl %eax,24(%edi) - _CET_ENDBR movl 28(%esi),%eax adcl 28(%edx),%eax movl %eax,28(%edi) @@ -120,11 +100,6 @@ L(oop): movl (%esi),%eax sbbl %eax,%eax negl %eax -#if IBT_ENABLED - popl %ebx - cfi_adjust_cfa_offset (-4) - cfi_restore (ebx) -#endif popl %esi cfi_adjust_cfa_offset (-4) cfi_restore (esi) diff --git a/sysdeps/i386/bsd-_setjmp.S b/sysdeps/i386/bsd-_setjmp.S index eb3ac9c8e9..5fd671a477 100644 --- a/sysdeps/i386/bsd-_setjmp.S +++ b/sysdeps/i386/bsd-_setjmp.S @@ -23,18 +23,12 @@ #include <sysdep.h> #include <pointer_guard.h> #include <jmpbuf-offsets.h> -#include <jmp_buf-ssp.h> #include <stap-probe.h> #define PARMS 4 /* no space for saved regs */ #define JMPBUF PARMS #define SIGMSK JMPBUF+4 -/* Don't save shadow stack register if shadow stack isn't enabled. */ -#if !SHSTK_ENABLED -# undef SHADOW_STACK_POINTER_OFFSET -#endif - ENTRY (_setjmp) xorl %eax, %eax @@ -58,21 +52,6 @@ ENTRY (_setjmp) movl %ebp, (JB_BP*4)(%edx) /* Save caller's frame pointer. */ movl %eax, JB_SIZE(%edx) /* No signal mask set. */ -#ifdef SHADOW_STACK_POINTER_OFFSET -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(skip_ssp) -# else - xorl %ecx, %ecx -# endif - /* Get the current Shadow-Stack-Pointer and save it. */ - rdsspd %ecx - movl %ecx, SHADOW_STACK_POINTER_OFFSET(%edx) -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET -L(skip_ssp): -# endif -#endif ret END (_setjmp) libc_hidden_def (_setjmp) diff --git a/sysdeps/i386/bsd-setjmp.S b/sysdeps/i386/bsd-setjmp.S index c03f235d0f..13338a6480 100644 --- a/sysdeps/i386/bsd-setjmp.S +++ b/sysdeps/i386/bsd-setjmp.S @@ -23,18 +23,12 @@ #include <sysdep.h> #include <pointer_guard.h> #include <jmpbuf-offsets.h> -#include <jmp_buf-ssp.h> #include <stap-probe.h> #define PARMS 4 /* no space for saved regs */ #define JMPBUF PARMS #define SIGMSK JMPBUF+4 -/* Don't save shadow stack register if shadow stack isn't enabled. */ -#if !SHSTK_ENABLED -# undef SHADOW_STACK_POINTER_OFFSET -#endif - ENTRY (setjmp) /* Note that we have to use a non-exported symbol in the next jump since otherwise gas will emit it as a jump through the @@ -58,21 +52,6 @@ ENTRY (setjmp) #endif movl %ecx, (JB_PC*4)(%eax) movl %ebp, (JB_BP*4)(%eax) /* Save caller's frame pointer. */ -#ifdef SHADOW_STACK_POINTER_OFFSET -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(skip_ssp) -# else - xorl %ecx, %ecx -# endif - /* Get the current Shadow-Stack-Pointer and save it. */ - rdsspd %ecx - movl %ecx, SHADOW_STACK_POINTER_OFFSET(%eax) -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET -L(skip_ssp): -# endif -#endif /* Call __sigjmp_save. */ pushl $1 diff --git a/sysdeps/i386/crti.S b/sysdeps/i386/crti.S index 71d19b698c..f9662eeb5a 100644 --- a/sysdeps/i386/crti.S +++ b/sysdeps/i386/crti.S @@ -61,7 +61,6 @@ .hidden _init .type _init, @function _init: - _CET_ENDBR pushl %ebx /* Maintain 16-byte stack alignment for called functions. */ subl $8, %esp @@ -82,7 +81,6 @@ _init: .hidden _fini .type _fini, @function _fini: - _CET_ENDBR pushl %ebx subl $8, %esp LOAD_PIC_REG (bx) diff --git a/sysdeps/i386/dl-tlsdesc.S b/sysdeps/i386/dl-tlsdesc.S index 318b82a561..90d93caa0c 100644 --- a/sysdeps/i386/dl-tlsdesc.S +++ b/sysdeps/i386/dl-tlsdesc.S @@ -37,7 +37,6 @@ cfi_startproc .align 16 _dl_tlsdesc_return: - _CET_ENDBR movl 4(%eax), %eax ret cfi_endproc @@ -59,7 +58,6 @@ _dl_tlsdesc_return: cfi_startproc .align 16 _dl_tlsdesc_undefweak: - _CET_ENDBR movl 4(%eax), %eax subl %gs:0, %eax ret @@ -101,7 +99,6 @@ _dl_tlsdesc_dynamic (struct tlsdesc *tdp) cfi_startproc .align 16 _dl_tlsdesc_dynamic: - _CET_ENDBR /* Like all TLS resolvers, preserve call-clobbered registers. We need two scratch regs anyway. */ subl $28, %esp diff --git a/sysdeps/i386/dl-trampoline.S b/sysdeps/i386/dl-trampoline.S index ecba034958..2632020145 100644 --- a/sysdeps/i386/dl-trampoline.S +++ b/sysdeps/i386/dl-trampoline.S @@ -26,7 +26,6 @@ .align 16 _dl_runtime_resolve: cfi_adjust_cfa_offset (8) - _CET_ENDBR pushl %eax # Preserve registers otherwise clobbered. cfi_adjust_cfa_offset (4) pushl %ecx @@ -53,7 +52,6 @@ _dl_runtime_resolve: .align 16 _dl_runtime_resolve_shstk: cfi_adjust_cfa_offset (8) - _CET_ENDBR pushl %eax # Preserve registers otherwise clobbered. cfi_adjust_cfa_offset (4) pushl %edx @@ -78,7 +76,6 @@ _dl_runtime_resolve_shstk: .align 16 _dl_runtime_profile_shstk: cfi_adjust_cfa_offset (8) - _CET_ENDBR pushl %esp cfi_adjust_cfa_offset (4) addl $8, (%esp) # Account for the pushed PLT data @@ -123,7 +120,6 @@ _dl_runtime_profile_shstk: .align 16 _dl_runtime_profile: cfi_adjust_cfa_offset (8) - _CET_ENDBR pushl %esp cfi_adjust_cfa_offset (4) addl $8, (%esp) # Account for the pushed PLT data diff --git a/sysdeps/i386/i386-mcount.S b/sysdeps/i386/i386-mcount.S index 8066649f8e..6082e1a7de 100644 --- a/sysdeps/i386/i386-mcount.S +++ b/sysdeps/i386/i386-mcount.S @@ -29,7 +29,6 @@ .type C_SYMBOL_NAME(_mcount), @function .align ALIGNARG(4) C_LABEL(_mcount) - _CET_ENDBR /* Save the caller-clobbered registers. */ pushl %eax pushl %ecx @@ -58,7 +57,6 @@ weak_alias (_mcount, mcount) .type C_SYMBOL_NAME(__fentry__), @function .align ALIGNARG(4) C_LABEL(__fentry__) - _CET_ENDBR /* Save the caller-clobbered registers. */ pushl %eax pushl %ecx diff --git a/sysdeps/i386/i686/add_n.S b/sysdeps/i386/i686/add_n.S index c2bc798187..bbc0cc71a2 100644 --- a/sysdeps/i386/i686/add_n.S +++ b/sysdeps/i386/i686/add_n.S @@ -44,13 +44,6 @@ ENTRY (__mpn_add_n) cfi_rel_offset (esi, 0) movl S2(%esp),%edx movl SIZE(%esp),%ecx - -#if IBT_ENABLED - pushl %ebx - cfi_adjust_cfa_offset (4) - cfi_rel_offset (ebx, 0) -#endif - movl %ecx,%eax shrl $3,%ecx /* compute count for unrolled loop */ negl %eax @@ -62,9 +55,6 @@ ENTRY (__mpn_add_n) subl %eax,%esi /* ... by a constant when we ... */ subl %eax,%edx /* ... enter the loop */ shrl $2,%eax /* restore previous value */ -#if IBT_ENABLED - leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */ -#endif #ifdef PIC /* Calculate start address in loop for PIC. */ leal (L(oop)-L(0)-3)(%eax,%eax,8),%eax @@ -73,40 +63,30 @@ L(0): #else /* Calculate start address in loop for non-PIC. */ leal (L(oop) - 3)(%eax,%eax,8),%eax -#endif -#if IBT_ENABLED - addl %ebx,%eax /* Adjust for endbr32 */ #endif jmp *%eax /* jump into loop */ ALIGN (3) L(oop): movl (%esi),%eax adcl (%edx),%eax movl %eax,(%edi) - _CET_ENDBR movl 4(%esi),%eax adcl 4(%edx),%eax movl %eax,4(%edi) - _CET_ENDBR movl 8(%esi),%eax adcl 8(%edx),%eax movl %eax,8(%edi) - _CET_ENDBR movl 12(%esi),%eax adcl 12(%edx),%eax movl %eax,12(%edi) - _CET_ENDBR movl 16(%esi),%eax adcl 16(%edx),%eax movl %eax,16(%edi) - _CET_ENDBR movl 20(%esi),%eax adcl 20(%edx),%eax movl %eax,20(%edi) - _CET_ENDBR movl 24(%esi),%eax adcl 24(%edx),%eax movl %eax,24(%edi) - _CET_ENDBR movl 28(%esi),%eax adcl 28(%edx),%eax movl %eax,28(%edi) @@ -119,11 +99,6 @@ L(oop): movl (%esi),%eax sbbl %eax,%eax negl %eax -#if IBT_ENABLED - popl %ebx - cfi_adjust_cfa_offset (-4) - cfi_restore (ebx) -#endif popl %esi cfi_adjust_cfa_offset (-4) cfi_restore (esi) diff --git a/sysdeps/i386/i686/memcmp.S b/sysdeps/i386/i686/memcmp.S index 94600f5e53..0738ee94fd 100644 --- a/sysdeps/i386/i686/memcmp.S +++ b/sysdeps/i386/i686/memcmp.S @@ -80,7 +80,7 @@ L(not_1): LOAD_JUMP_TABLE_ENTRY (L(table_32bytes), %ecx) addl %ecx, %edx addl %ecx, %esi - _CET_NOTRACK jmp *%ebx + jmp *%ebx ALIGN (4) L(28bytes): @@ -326,7 +326,7 @@ L(32bytesormore): LOAD_JUMP_TABLE_ENTRY (L(table_32bytes), %ecx) addl %ecx, %edx addl %ecx, %esi - _CET_NOTRACK jmp *%ebx + jmp *%ebx L(load_ecx_28): addl $0x4, %edx diff --git a/sysdeps/i386/i686/multiarch/memcmp-sse4.S b/sysdeps/i386/i686/multiarch/memcmp-sse4.S index f0b2d8429f..2cdda5f80d 100644 --- a/sysdeps/i386/i686/multiarch/memcmp-sse4.S +++ b/sysdeps/i386/i686/multiarch/memcmp-sse4.S @@ -58,7 +58,7 @@ absolute address. */ \ addl (%ebx,INDEX,SCALE), %ebx; \ /* We loaded the jump table and adjusted EDX/ESI. Go. */ \ - _CET_NOTRACK jmp *%ebx + jmp *%ebx # else # define JMPTBL(I, B) I @@ -66,7 +66,7 @@ jump table with relative offsets. INDEX is a register contains the index into the jump table. SCALE is the scale of INDEX. */ # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) + jmp *TABLE(,INDEX,SCALE) # endif diff --git a/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S b/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S index abdde55001..a7e80dcd90 100644 --- a/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S +++ b/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S @@ -64,7 +64,7 @@ absolute address. */ \ addl (%ebx,INDEX,SCALE), %ebx; \ /* We loaded the jump table. Go. */ \ - _CET_NOTRACK jmp *%ebx + jmp *%ebx # define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE) \ addl $(TABLE - .), %ebx @@ -72,7 +72,7 @@ # define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \ addl (%ebx,INDEX,SCALE), %ebx; \ /* We loaded the jump table. Go. */ \ - _CET_NOTRACK jmp *%ebx + jmp *%ebx #else # define PARMS 4 # define ENTRANCE @@ -84,12 +84,12 @@ absolute offsets. INDEX is a register contains the index into the jump table. SCALE is the scale of INDEX. */ # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) + jmp *TABLE(,INDEX,SCALE) # define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE) # define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \ - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) + jmp *TABLE(,INDEX,SCALE) #endif .section .text.ssse3,"ax",@progbits diff --git a/sysdeps/i386/i686/multiarch/memcpy-ssse3.S b/sysdeps/i386/i686/multiarch/memcpy-ssse3.S index 60cc5f14ea..713c5bdb76 100644 --- a/sysdeps/i386/i686/multiarch/memcpy-ssse3.S +++ b/sysdeps/i386/i686/multiarch/memcpy-ssse3.S @@ -64,7 +64,7 @@ absolute address. */ \ addl (%ebx, INDEX, SCALE), %ebx; \ /* We loaded the jump table. Go. */ \ - _CET_NOTRACK jmp *%ebx + jmp *%ebx # else # define PARMS 4 @@ -78,7 +78,7 @@ jump table. SCALE is the scale of INDEX. */ # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ - _CET_NOTRACK jmp *TABLE(, INDEX, SCALE) + jmp *TABLE(, INDEX, SCALE) # endif .section .text.ssse3,"ax",@progbits diff --git a/sysdeps/i386/i686/multiarch/memset-sse2-rep.S b/sysdeps/i386/i686/multiarch/memset-sse2-rep.S index 52d046d12b..d1a047319c 100644 --- a/sysdeps/i386/i686/multiarch/memset-sse2-rep.S +++ b/sysdeps/i386/i686/multiarch/memset-sse2-rep.S @@ -56,7 +56,7 @@ add (%ebx,%ecx,4), %ebx; \ add %ecx, %edx; \ /* We loaded the jump table and adjusted EDX. Go. */ \ - _CET_NOTRACK jmp *%ebx + jmp *%ebx #else # define ENTRANCE # define RETURN_END ret @@ -68,7 +68,7 @@ absolute offsets. */ # define BRANCH_TO_JMPTBL_ENTRY(TABLE) \ add %ecx, %edx; \ - _CET_NOTRACK jmp *TABLE(,%ecx,4) + jmp *TABLE(,%ecx,4) #endif .section .text.sse2,"ax",@progbits diff --git a/sysdeps/i386/i686/multiarch/memset-sse2.S b/sysdeps/i386/i686/multiarch/memset-sse2.S index ac21fcb00b..2e00743477 100644 --- a/sysdeps/i386/i686/multiarch/memset-sse2.S +++ b/sysdeps/i386/i686/multiarch/memset-sse2.S @@ -56,7 +56,7 @@ add (%ebx,%ecx,4), %ebx; \ add %ecx, %edx; \ /* We loaded the jump table and adjusted EDX. Go. */ \ - _CET_NOTRACK jmp *%ebx + jmp *%ebx #else # define ENTRANCE # define RETURN_END ret @@ -68,7 +68,7 @@ absolute offsets. */ # define BRANCH_TO_JMPTBL_ENTRY(TABLE) \ add %ecx, %edx; \ - _CET_NOTRACK jmp *TABLE(,%ecx,4) + jmp *TABLE(,%ecx,4) #endif .section .text.sse2,"ax",@progbits diff --git a/sysdeps/i386/i686/multiarch/strcat-sse2.S b/sysdeps/i386/i686/multiarch/strcat-sse2.S index 7ac4827bf7..682f43ff5f 100644 --- a/sysdeps/i386/i686/multiarch/strcat-sse2.S +++ b/sysdeps/i386/i686/multiarch/strcat-sse2.S @@ -49,7 +49,7 @@ absolute address. */ \ addl (%ecx,INDEX,SCALE), %ecx; \ /* We loaded the jump table and adjusted ECX. Go. */ \ - _CET_NOTRACK jmp *%ecx + jmp *%ecx # else # define JMPTBL(I, B) I @@ -58,7 +58,7 @@ jump table. SCALE is the scale of INDEX. */ # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) + jmp *TABLE(,INDEX,SCALE) # endif # ifndef STRCAT diff --git a/sysdeps/i386/i686/multiarch/strcpy-sse2.S b/sysdeps/i386/i686/multiarch/strcpy-sse2.S index 5c358e5e7e..a18a8ca5ba 100644 --- a/sysdeps/i386/i686/multiarch/strcpy-sse2.S +++ b/sysdeps/i386/i686/multiarch/strcpy-sse2.S @@ -64,7 +64,7 @@ absolute address. */ \ addl (%ecx,INDEX,SCALE), %ecx; \ /* We loaded the jump table and adjusted ECX. Go. */ \ - _CET_NOTRACK jmp *%ecx + jmp *%ecx # else # define JMPTBL(I, B) I @@ -73,7 +73,7 @@ jump table. SCALE is the scale of INDEX. */ # define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ - _CET_NOTRACK jmp *TABLE(,INDEX,SCALE) + jmp *TABLE(,INDEX,SCALE) # endif .text diff --git a/sysdeps/i386/setjmp.S b/sysdeps/i386/setjmp.S index ab1a8090cb..08a98f2388 100644 --- a/sysdeps/i386/setjmp.S +++ b/sysdeps/i386/setjmp.S @@ -19,7 +19,6 @@ #include <sysdep.h> #include <pointer_guard.h> #include <jmpbuf-offsets.h> -#include <jmp_buf-ssp.h> #include <asm-syntax.h> #include <stap-probe.h> @@ -27,11 +26,6 @@ #define JMPBUF PARMS #define SIGMSK JMPBUF+4 -/* Don't save shadow stack register if shadow stack isn't enabled. */ -#if !SHSTK_ENABLED -# undef SHADOW_STACK_POINTER_OFFSET -#endif - ENTRY (__sigsetjmp) movl JMPBUF(%esp), %eax @@ -53,21 +47,6 @@ ENTRY (__sigsetjmp) movl %ecx, (JB_PC*4)(%eax) movl %ebp, (JB_BP*4)(%eax) /* Save caller's frame pointer. */ -#ifdef SHADOW_STACK_POINTER_OFFSET -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(skip_ssp) -# else - xorl %ecx, %ecx -# endif - /* Get the current Shadow-Stack-Pointer and save it. */ - rdsspd %ecx - movl %ecx, SHADOW_STACK_POINTER_OFFSET(%eax) -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET -L(skip_ssp): -# endif -#endif #if IS_IN (rtld) /* In ld.so we never save the signal mask. */ xorl %eax, %eax diff --git a/sysdeps/i386/start.S b/sysdeps/i386/start.S index e042ed156c..30577176f8 100644 --- a/sysdeps/i386/start.S +++ b/sysdeps/i386/start.S @@ -132,7 +132,6 @@ ENTRY (_start) #if defined PIC && !defined SHARED __wrap_main: - _CET_ENDBR jmp main@PLT #endif END (_start) diff --git a/sysdeps/i386/sub_n.S b/sysdeps/i386/sub_n.S index 3ebe984672..c111bf3f03 100644 --- a/sysdeps/i386/sub_n.S +++ b/sysdeps/i386/sub_n.S @@ -40,13 +40,6 @@ ENTRY (__mpn_sub_n) cfi_rel_offset (esi, 0) movl S2(%esp),%edx movl SIZE(%esp),%ecx - -#if IBT_ENABLED - pushl %ebx - cfi_adjust_cfa_offset (4) - cfi_rel_offset (ebx, 0) -#endif - movl %ecx,%eax shrl $3,%ecx /* compute count for unrolled loop */ negl %eax @@ -58,9 +51,6 @@ ENTRY (__mpn_sub_n) subl %eax,%esi /* ... by a constant when we ... */ subl %eax,%edx /* ... enter the loop */ shrl $2,%eax /* restore previous value */ -#if defined __CET__ && (__CET__ & 1) != 0 - leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */ -#endif #ifdef PIC /* Calculate start address in loop for PIC. Due to limitations in some assemblers, Loop-L0-3 cannot be put into the leal */ @@ -74,40 +64,30 @@ L(0): leal (%eax,%eax,8),%eax #else /* Calculate start address in loop for non-PIC. */ leal (L(oop) - 3)(%eax,%eax,8),%eax -#endif -#if defined __CET__ && (__CET__ & 1) != 0 - addl %ebx,%eax /* Adjust for endbr32 */ #endif jmp *%eax /* jump into loop */ ALIGN (3) L(oop): movl (%esi),%eax sbbl (%edx),%eax movl %eax,(%edi) - _CET_ENDBR movl 4(%esi),%eax sbbl 4(%edx),%eax movl %eax,4(%edi) - _CET_ENDBR movl 8(%esi),%eax sbbl 8(%edx),%eax movl %eax,8(%edi) - _CET_ENDBR movl 12(%esi),%eax sbbl 12(%edx),%eax movl %eax,12(%edi) - _CET_ENDBR movl 16(%esi),%eax sbbl 16(%edx),%eax movl %eax,16(%edi) - _CET_ENDBR movl 20(%esi),%eax sbbl 20(%edx),%eax movl %eax,20(%edi) - _CET_ENDBR movl 24(%esi),%eax sbbl 24(%edx),%eax movl %eax,24(%edi) - _CET_ENDBR movl 28(%esi),%eax sbbl 28(%edx),%eax movl %eax,28(%edi) @@ -120,11 +100,6 @@ L(oop): movl (%esi),%eax sbbl %eax,%eax negl %eax -#if defined __CET__ && (__CET__ & 1) != 0 - popl %ebx - cfi_adjust_cfa_offset (-4) - cfi_restore (ebx) -#endif popl %esi cfi_adjust_cfa_offset (-4) cfi_restore (esi) diff --git a/sysdeps/i386/sysdep.h b/sysdeps/i386/sysdep.h index 69c8b51487..86b5fdd6ae 100644 --- a/sysdeps/i386/sysdep.h +++ b/sysdeps/i386/sysdep.h @@ -18,6 +18,8 @@ #include <sysdeps/x86/sysdep.h> +#define CET_ENABLED 0 + /* It is desirable that the names of PIC thunks match those used by GCC so that multiple copies are eliminated by the linker. Because GCC 4.6 and earlier use __i686 in the names, it is necessary to @@ -37,6 +39,15 @@ /* Syntactic details of assembler. */ +/* Define an entry point visible from C. */ +#define ENTRY_P2ALIGN(name, alignment) \ + .globl C_SYMBOL_NAME(name); \ + .type C_SYMBOL_NAME(name),@function; \ + .align ALIGNARG(alignment); \ + C_LABEL(name) \ + cfi_startproc; \ + CALL_MCOUNT + /* If compiled for profiling, call `mcount' at the start of each function. */ #ifdef PROF /* The mcount code relies on a normal frame pointer being on the stack diff --git a/sysdeps/unix/sysv/linux/i386/____longjmp_chk.S b/sysdeps/unix/sysv/linux/i386/____longjmp_chk.S index 0c49010f93..35538f6df6 100644 --- a/sysdeps/unix/sysv/linux/i386/____longjmp_chk.S +++ b/sysdeps/unix/sysv/linux/i386/____longjmp_chk.S @@ -18,14 +18,9 @@ #include <sysdep.h> #include <pointer_guard.h> #include <jmpbuf-offsets.h> -#include <jmp_buf-ssp.h> #include <asm-syntax.h> #include <stap-probe.h> -/* Don't restore shadow stack register if shadow stack isn't enabled. */ -#if !SHSTK_ENABLED -# undef SHADOW_STACK_POINTER_OFFSET -#endif .section .rodata.str1.1,"aMS",@progbits,1 .type longjmp_msg,@object @@ -52,38 +47,6 @@ longjmp_msg: ENTRY (____longjmp_chk) movl 4(%esp), %ecx /* User's jmp_buf in %ecx. */ -#ifdef SHADOW_STACK_POINTER_OFFSET -# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(skip_ssp) -# else - xorl %edx, %edx -# endif - /* Check and adjust the Shadow-Stack-Pointer. */ - rdsspd %edx - /* And compare it with the saved ssp value. */ - subl SHADOW_STACK_POINTER_OFFSET(%ecx), %edx - je L(skip_ssp) - /* Count the number of frames to adjust and adjust it - with incssp instruction. The instruction can adjust - the ssp by [0..255] value only thus use a loop if - the number of frames is bigger than 255. */ - negl %edx - shrl $2, %edx - /* NB: We saved Shadow-Stack-Pointer of setjmp. Since we are - restoring Shadow-Stack-Pointer of setjmp's caller, we - need to unwind shadow stack by one more frame. */ - addl $1, %edx - movl $255, %ebx -L(loop): - cmpl %ebx, %edx - cmovb %edx, %ebx - incsspd %ebx - subl %ebx, %edx - ja L(loop) -L(skip_ssp): -#endif /* Save the return address now. */ movl (JB_PC*4)(%ecx), %edx /* Get the stack pointer. */ diff --git a/sysdeps/unix/sysv/linux/i386/getcontext.S b/sysdeps/unix/sysv/linux/i386/getcontext.S index 3202ac002e..9c1ca3c263 100644 --- a/sysdeps/unix/sysv/linux/i386/getcontext.S +++ b/sysdeps/unix/sysv/linux/i386/getcontext.S @@ -17,7 +17,6 @@ <https://www.gnu.org/licenses/>. */ #include <sysdep.h> -#include <asm/prctl.h> #include "ucontext_i.h" @@ -42,61 +41,6 @@ ENTRY(__getcontext) movw %fs, %dx movl %edx, oFS(%eax) -#if SHSTK_ENABLED - /* Check if shadow stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(no_shstk) - - /* Save EAX in EDX. */ - movl %eax, %edx - - xorl %eax, %eax - cmpl %gs:SSP_BASE_OFFSET, %eax - jnz L(shadow_stack_bound_recorded) - - /* Save EBX in the first scratch register slot. */ - movl %ebx, oSCRATCH1(%edx) - - /* Get the base address and size of the default shadow stack - which must be the current shadow stack since nothing has - been recorded yet. */ - sub $24, %esp - mov %esp, %ecx - movl $ARCH_CET_STATUS, %ebx - movl $__NR_arch_prctl, %eax - ENTER_KERNEL - testl %eax, %eax - jz L(continue_no_err) - - /* This should never happen. */ - hlt - -L(continue_no_err): - /* Restore EBX from the first scratch register slot. */ - movl oSCRATCH1(%edx), %ebx - - /* Record the base of the current shadow stack. */ - movl 8(%esp), %eax - movl %eax, %gs:SSP_BASE_OFFSET - add $24, %esp - -L(shadow_stack_bound_recorded): - /* Load address of the context data structure. */ - movl 4(%esp), %eax - - /* Get the current shadow stack pointer. */ - rdsspd %edx - /* NB: Save the caller's shadow stack so that we can jump back - to the caller directly. */ - addl $4, %edx - movl %edx, oSSP(%eax) - - /* Save the current shadow stack base in ucontext. */ - movl %gs:SSP_BASE_OFFSET, %edx - movl %edx, (oSSP + 4)(%eax) - -L(no_shstk): -#endif /* We have separate floating-point register content memory on the stack. We use the __fpregs_mem block in the context. Set the links up correctly. */ diff --git a/sysdeps/unix/sysv/linux/i386/makecontext.S b/sysdeps/unix/sysv/linux/i386/makecontext.S index 814127d130..7ee56300fa 100644 --- a/sysdeps/unix/sysv/linux/i386/makecontext.S +++ b/sysdeps/unix/sysv/linux/i386/makecontext.S @@ -17,7 +17,6 @@ <https://www.gnu.org/licenses/>. */ #include <sysdep.h> -#include <asm/prctl.h> #include "ucontext_i.h" @@ -68,127 +67,6 @@ ENTRY(__makecontext) jnz 1b 2: -#if SHSTK_ENABLED - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(skip_ssp) - - /* Reload the pointer to ucontext. */ - movl 4(%esp), %eax - - /* Shadow stack is enabled. We need to allocate a new shadow - stack. */ - subl oSS_SP(%eax), %edx - shrl $STACK_SIZE_TO_SHADOW_STACK_SIZE_SHIFT, %edx - - /* Align shadow stack size to 8 bytes. */ - addl $7, %edx - andl $-8, %edx - - /* Store shadow stack size in __ssp[2]. */ - movl %edx, (oSSP + 8)(%eax) - - /* Save ESI in the second scratch register slot. */ - movl %esi, oSCRATCH2(%eax) - /* Save EDI in the third scratch register slot. */ - movl %edi, oSCRATCH3(%eax) - - /* Save the pointer to ucontext. */ - movl %eax, %edi - - /* Get the original shadow stack pointer. */ - rdsspd %esi - - /* Align the saved original shadow stack pointer to the next - 8 byte aligned boundary. */ - andl $-8, %esi - - /* Load the top of the new stack into EDX. */ - movl oESP(%eax), %edx - - /* We need to terminate the FDE here because the unwinder looks - at ra-1 for unwind information. */ - cfi_endproc - - /* Swap the original stack pointer with the top of the new - stack. */ - xchgl %esp, %edx - - /* Add 4 bytes since CALL will push the 4-byte return address - onto stack. */ - addl $4, %esp - - /* Allocate the new shadow stack. Save EBX in the first scratch - register slot. */ - movl %ebx, oSCRATCH1(%eax) - - /* CET syscall takes 64-bit sizes. */ - subl $16, %esp - movl (oSSP + 8)(%eax), %ecx - movl %ecx, (%esp) - movl $0, 4(%esp) - movl %ecx, 8(%esp) - movl $0, 12(%esp) - movl %esp, %ecx - - movl $ARCH_CET_ALLOC_SHSTK, %ebx - movl $__NR_arch_prctl, %eax - ENTER_KERNEL - testl %eax, %eax - jne L(hlt) /* This should never happen. */ - - /* Copy the base address of the new shadow stack to __ssp[1]. */ - movl (%esp), %eax - movl %eax, (oSSP + 4)(%edi) - - addl $16, %esp - - /* Restore EBX from the first scratch register slot. */ - movl oSCRATCH1(%edi), %ebx - - /* Get the size of the new shadow stack. */ - movl (oSSP + 8)(%edi), %ecx - - /* Use the restore stoken to restore the new shadow stack. */ - rstorssp -8(%eax, %ecx) - - /* Save the restore token at the next 8 byte aligned boundary - on the original shadow stack. */ - saveprevssp - - /* Push the address of "jmp exitcode" onto the new stack as - well as the new shadow stack. */ - call 1f - jmp L(exitcode) -1: - - /* Get the new shadow stack pointer. */ - rdsspd %eax - - /* Use the restore stoken to restore the original shadow stack. */ - rstorssp -8(%esi) - - /* Save the restore token on the new shadow stack. */ - saveprevssp - - /* Store the new shadow stack pointer in __ssp[0]. */ - movl %eax, oSSP(%edi) - - /* Restore the original stack. */ - mov %edx, %esp - - cfi_startproc - - /* Restore ESI from the second scratch register slot. */ - movl oSCRATCH2(%edi), %esi - /* Restore EDI from the third scratch register slot. */ - movl oSCRATCH3(%edi), %edi - - ret - -L(skip_ssp): -#endif - /* If the function we call returns we must continue with the context which is given in the uc_link element. To do this set the return address for the function the user provides @@ -244,7 +122,6 @@ L(call_exit): call HIDDEN_JUMPTARGET(exit) /* The 'exit' call should never return. In case it does cause the process to terminate. */ -L(hlt): hlt cfi_startproc END(__makecontext) diff --git a/sysdeps/unix/sysv/linux/i386/setcontext.S b/sysdeps/unix/sysv/linux/i386/setcontext.S index 966fcbee1e..b6d827d11f 100644 --- a/sysdeps/unix/sysv/linux/i386/setcontext.S +++ b/sysdeps/unix/sysv/linux/i386/setcontext.S @@ -17,7 +17,6 @@ <https://www.gnu.org/licenses/>. */ #include <sysdep.h> -#include <asm/prctl.h> #include "ucontext_i.h" @@ -56,6 +55,9 @@ ENTRY(__setcontext) movl oFS(%eax), %ecx movw %cx, %fs + /* Fetch the address to return to. */ + movl oEIP(%eax), %ecx + /* Load the new stack pointer. */ cfi_def_cfa (eax, 0) cfi_offset (edi, oEDI) @@ -64,103 +66,6 @@ ENTRY(__setcontext) cfi_offset (ebx, oEBX) movl oESP(%eax), %esp -#if SHSTK_ENABLED - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(no_shstk) - - /* If the base of the target shadow stack is the same as the - base of the current shadow stack, we unwind the shadow - stack. Otherwise it is a stack switch and we look for a - restore token. */ - movl oSSP(%eax), %esi - movl %esi, %edi - - /* Get the base of the target shadow stack. */ - movl (oSSP + 4)(%eax), %ecx - cmpl %gs:SSP_BASE_OFFSET, %ecx - je L(unwind_shadow_stack) - - /* Align the saved original shadow stack pointer to the next - 8 byte aligned boundary. */ - andl $-8, %esi - -L(find_restore_token_loop): - /* Look for a restore token. */ - movl -8(%esi), %ebx - andl $-8, %ebx - cmpl %esi, %ebx - je L(restore_shadow_stack) - - /* Try the next slot. */ - subl $8, %esi - jmp L(find_restore_token_loop) - -L(restore_shadow_stack): - /* Pop return address from the shadow stack since setcontext - will not return. */ - movl $1, %ebx - incsspd %ebx - - /* Use the restore stoken to restore the target shadow stack. */ - rstorssp -8(%esi) - - /* Save the restore token on the old shadow stack. NB: This - restore token may be checked by setcontext or swapcontext - later. */ - saveprevssp - - /* Record the new shadow stack base that was switched to. */ - movl (oSSP + 4)(%eax), %ebx - movl %ebx, %gs:SSP_BASE_OFFSET - -L(unwind_shadow_stack): - rdsspd %ebx - subl %edi, %ebx - je L(skip_unwind_shadow_stack) - negl %ebx - shrl $2, %ebx - movl $255, %esi -L(loop): - cmpl %esi, %ebx - cmovb %ebx, %esi - incsspd %esi - subl %esi, %ebx - ja L(loop) - -L(skip_unwind_shadow_stack): - - /* Load the values of all the preserved registers (except ESP). */ - movl oEDI(%eax), %edi - movl oESI(%eax), %esi - movl oEBP(%eax), %ebp - movl oEBX(%eax), %ebx - - /* Get the return address set with getcontext. */ - movl oEIP(%eax), %ecx - - /* Check if return address is valid for the case when setcontext - is invoked from L(exitcode) with linked context. */ - rdsspd %eax - cmpl (%eax), %ecx - /* Clear EAX to indicate success. NB: Don't use xorl to keep - EFLAGS for jne. */ - movl $0, %eax - jne L(jmp) - /* Return to the new context if return address valid. */ - pushl %ecx - ret - -L(jmp): - /* Jump to the new context directly. */ - jmp *%ecx - -L(no_shstk): -#endif - - /* Fetch the address to return to. */ - movl oEIP(%eax), %ecx - /* Push the return address on the new stack so we can return there. */ pushl %ecx diff --git a/sysdeps/unix/sysv/linux/i386/swapcontext.S b/sysdeps/unix/sysv/linux/i386/swapcontext.S index b8367f025e..bb736ae7d2 100644 --- a/sysdeps/unix/sysv/linux/i386/swapcontext.S +++ b/sysdeps/unix/sysv/linux/i386/swapcontext.S @@ -17,7 +17,6 @@ <https://www.gnu.org/licenses/>. */ #include <sysdep.h> -#include <asm/prctl.h> #include "ucontext_i.h" @@ -76,144 +75,6 @@ ENTRY(__swapcontext) movl oFS(%eax), %edx movw %dx, %fs -#if SHSTK_ENABLED - /* Check if Shadow Stack is enabled. */ - testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET - jz L(no_shstk) - - xorl %eax, %eax - cmpl %gs:SSP_BASE_OFFSET, %eax - jnz L(shadow_stack_bound_recorded) - - /* Get the base address and size of the default shadow stack - which must be the current shadow stack since nothing has - been recorded yet. */ - sub $24, %esp - mov %esp, %ecx - movl $ARCH_CET_STATUS, %ebx - movl $__NR_arch_prctl, %eax - ENTER_KERNEL - testl %eax, %eax - jz L(continue_no_err) - - /* This should never happen. */ - hlt - -L(continue_no_err): - /* Record the base of the current shadow stack. */ - movl 8(%esp), %eax - movl %eax, %gs:SSP_BASE_OFFSET - add $24, %esp - -L(shadow_stack_bound_recorded): - /* Load address of the context data structure we save in. */ - movl 4(%esp), %eax - - /* Load address of the context data structure we swap in */ - movl 8(%esp), %edx - - /* If we unwind the stack, we can't undo stack unwinding. Just - save the target shadow stack pointer as the current shadow - stack pointer. */ - movl oSSP(%edx), %ecx - movl %ecx, oSSP(%eax) - - /* Save the current shadow stack base in ucontext. */ - movl %gs:SSP_BASE_OFFSET, %ecx - movl %ecx, (oSSP + 4)(%eax) - - /* If the base of the target shadow stack is the same as the - base of the current shadow stack, we unwind the shadow - stack. Otherwise it is a stack switch and we look for a - restore token. */ - movl oSSP(%edx), %esi - movl %esi, %edi - - /* Get the base of the target shadow stack. */ - movl (oSSP + 4)(%edx), %ecx - cmpl %gs:SSP_BASE_OFFSET, %ecx - je L(unwind_shadow_stack) - - /* Align the saved original shadow stack pointer to the next - 8 byte aligned boundary. */ - andl $-8, %esi - -L(find_restore_token_loop): - /* Look for a restore token. */ - movl -8(%esi), %ebx - andl $-8, %ebx - cmpl %esi, %ebx - je L(restore_shadow_stack) - - /* Try the next slot. */ - subl $8, %esi - jmp L(find_restore_token_loop) - -L(restore_shadow_stack): - /* The target shadow stack will be restored. Save the current - shadow stack pointer. */ - rdsspd %ecx - movl %ecx, oSSP(%eax) - - /* Use the restore stoken to restore the target shadow stack. */ - rstorssp -8(%esi) - - /* Save the restore token on the old shadow stack. NB: This - restore token may be checked by setcontext or swapcontext - later. */ - saveprevssp - - /* Record the new shadow stack base that was switched to. */ - movl (oSSP + 4)(%edx), %ebx - movl %ebx, %gs:SSP_BASE_OFFSET - -L(unwind_shadow_stack): - rdsspd %ebx - subl %edi, %ebx - je L(skip_unwind_shadow_stack) - negl %ebx - shrl $2, %ebx - movl $255, %esi -L(loop): - cmpl %esi, %ebx - cmovb %ebx, %esi - incsspd %esi - subl %esi, %ebx - ja L(loop) - -L(skip_unwind_shadow_stack): - - /* Load the new stack pointer. */ - movl oESP(%edx), %esp - - /* Load the values of all the preserved registers (except ESP). */ - movl oEDI(%edx), %edi - movl oESI(%edx), %esi - movl oEBP(%edx), %ebp - movl oEBX(%edx), %ebx - - /* Get the return address set with getcontext. */ - movl oEIP(%edx), %ecx - - /* Check if return address is valid for the case when setcontext - is invoked from L(exitcode) with linked context. */ - rdsspd %eax - cmpl (%eax), %ecx - /* Clear EAX to indicate success. NB: Don't use xorl to keep - EFLAGS for jne. */ - movl $0, %eax - jne L(jmp) - /* Return to the new context if return address valid. */ - pushl %ecx - ret - -L(jmp): - /* Jump to the new context directly. */ - jmp *%ecx - -L(no_shstk): -#endif - /* Fetch the address to return to. */ movl oEIP(%eax), %ecx diff --git a/sysdeps/unix/sysv/linux/i386/sysdep.h b/sysdeps/unix/sysv/linux/i386/sysdep.h index 516a85ce12..25852f894e 100644 --- a/sysdeps/unix/sysv/linux/i386/sysdep.h +++ b/sysdeps/unix/sysv/linux/i386/sysdep.h @@ -446,9 +446,4 @@ struct libc_do_syscall_args #endif /* __ASSEMBLER__ */ -/* Each shadow stack slot takes 4 bytes. Assuming that each stack - frame takes 128 bytes, this is used to compute shadow stack size - from stack size. */ -#define STACK_SIZE_TO_SHADOW_STACK_SIZE_SHIFT 5 - #endif /* linux/i386/sysdep.h */ diff --git a/sysdeps/unix/sysv/linux/i386/ucontext_i.sym b/sysdeps/unix/sysv/linux/i386/ucontext_i.sym index 1d8608eafc..1dfe03d2cc 100644 --- a/sysdeps/unix/sysv/linux/i386/ucontext_i.sym +++ b/sysdeps/unix/sysv/linux/i386/ucontext_i.sym @@ -22,10 +22,6 @@ oEBP mreg (EBP) oESP mreg (ESP) oEBX mreg (EBX) oEIP mreg (EIP) -oSCRATCH1 mreg (EAX) -oSCRATCH2 mreg (ECX) -oSCRATCH3 mreg (EDX) oFPREGS mcontext (fpregs) oSIGMASK ucontext (uc_sigmask) oFPREGSMEM ucontext (__fpregs_mem) -oSSP ucontext (__ssp) diff --git a/sysdeps/unix/sysv/linux/i386/vfork.S b/sysdeps/unix/sysv/linux/i386/vfork.S index 80c2058f1e..8846b61b96 100644 --- a/sysdeps/unix/sysv/linux/i386/vfork.S +++ b/sysdeps/unix/sysv/linux/i386/vfork.S @@ -20,6 +20,7 @@ #include <bits/errno.h> #include <tcb-offsets.h> + /* Clone the calling process, but without copying the whole address space. The calling process is suspended until the new process exits or is replaced by a call to `execve'. Return -1 for errors, 0 to the new process, @@ -46,29 +47,6 @@ ENTRY (__vfork) /* Branch forward if it failed. */ jae SYSCALL_ERROR_LABEL -#if SHSTK_ENABLED - /* Check if shadow stack is in use. */ - xorl %edx, %edx - rdsspd %edx - testl %edx, %edx - /* Normal return if shadow stack isn't in use. */ - je L(no_shstk) - - testl %eax, %eax - /* In parent, normal return. */ - jnz L(no_shstk) - - /* NB: In child, jump back to caller via indirect branch without - popping shadow stack which is shared with parent. Keep shadow - stack mismatched so that child returns in the vfork-calling - function will trigger SIGSEGV. */ - popl %ecx - cfi_adjust_cfa_offset (-4) - jmp *%ecx - -L(no_shstk): -#endif - ret PSEUDO_END (__vfork) diff --git a/sysdeps/x86/sysdep.h b/sysdeps/x86/sysdep.h index 5a14ca5110..85d0a8c943 100644 --- a/sysdeps/x86/sysdep.h +++ b/sysdeps/x86/sysdep.h @@ -21,33 +21,6 @@ #include <sysdeps/generic/sysdep.h> -/* __CET__ is defined by GCC with Control-Flow Protection values: - -enum cf_protection_level -{ - CF_NONE = 0, - CF_BRANCH = 1 << 0, - CF_RETURN = 1 << 1, - CF_FULL = CF_BRANCH | CF_RETURN, - CF_SET = 1 << 2 -}; -*/ - -/* Set if CF_BRANCH (IBT) is enabled. */ -#define X86_FEATURE_1_IBT (1U << 0) -/* Set if CF_RETURN (SHSTK) is enabled. */ -#define X86_FEATURE_1_SHSTK (1U << 1) - -#ifdef __CET__ -# define CET_ENABLED 1 -# define IBT_ENABLED (__CET__ & X86_FEATURE_1_IBT) -# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK) -#else -# define CET_ENABLED 0 -# define IBT_ENABLED 0 -# define SHSTK_ENABLED 0 -#endif - /* Offset for fxsave/xsave area used by _dl_runtime_resolve. Also need space to preserve RCX, RDX, RSI, RDI, R8, R9 and RAX. It must be aligned to 16 bytes for fxsave and 64 bytes for xsave. */ @@ -66,27 +39,10 @@ enum cf_protection_level /* Syntactic details of assembler. */ -#ifdef _CET_ENDBR -# define _CET_NOTRACK notrack -#else -# define _CET_ENDBR -# define _CET_NOTRACK -#endif - /* ELF uses byte-counts for .align, most others use log2 of count of bytes. */ #define ALIGNARG(log2) 1<<log2 #define ASM_SIZE_DIRECTIVE(name) .size name,.-name; -/* Define an entry point visible from C. */ -#define ENTRY_P2ALIGN(name, alignment) \ - .globl C_SYMBOL_NAME(name); \ - .type C_SYMBOL_NAME(name),@function; \ - .align ALIGNARG(alignment); \ - C_LABEL(name) \ - cfi_startproc; \ - _CET_ENDBR; \ - CALL_MCOUNT - /* Common entry 16 byte aligns. */ #define ENTRY(name) ENTRY_P2ALIGN (name, 4) diff --git a/sysdeps/x86_64/sysdep.h b/sysdeps/x86_64/sysdep.h index 3e7f4cbd8a..db6e36b2dd 100644 --- a/sysdeps/x86_64/sysdep.h +++ b/sysdeps/x86_64/sysdep.h @@ -22,10 +22,52 @@ #include <sysdeps/x86/sysdep.h> #include <x86-lp_size.h> +/* __CET__ is defined by GCC with Control-Flow Protection values: + +enum cf_protection_level +{ + CF_NONE = 0, + CF_BRANCH = 1 << 0, + CF_RETURN = 1 << 1, + CF_FULL = CF_BRANCH | CF_RETURN, + CF_SET = 1 << 2 +}; +*/ + +/* Set if CF_BRANCH (IBT) is enabled. */ +#define X86_FEATURE_1_IBT (1U << 0) +/* Set if CF_RETURN (SHSTK) is enabled. */ +#define X86_FEATURE_1_SHSTK (1U << 1) + +#ifdef __CET__ +# define CET_ENABLED 1 +# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK) +#else +# define CET_ENABLED 0 +# define SHSTK_ENABLED 0 +#endif + #ifdef __ASSEMBLER__ /* Syntactic details of assembler. */ +#ifdef _CET_ENDBR +# define _CET_NOTRACK notrack +#else +# define _CET_ENDBR +# define _CET_NOTRACK +#endif + +/* Define an entry point visible from C. */ +#define ENTRY_P2ALIGN(name, alignment) \ + .globl C_SYMBOL_NAME(name); \ + .type C_SYMBOL_NAME(name),@function; \ + .align ALIGNARG(alignment); \ + C_LABEL(name) \ + cfi_startproc; \ + _CET_ENDBR; \ + CALL_MCOUNT + /* This macro is for setting proper CFI with DW_CFA_expression describing the register as saved relative to %rsp instead of relative to the CFA. Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset