Message ID | 20180522174254.27551-3-julien.grall@arm.com |
---|---|
State | Superseded |
Headers | show |
Series | xen/arm: SSBD (aka Spectre-v4) mitigation (XSA-263) | expand |
On Tue, 22 May 2018, Julien Grall wrote: > This will improve readability for future changes. > > This is part of XSA-263. > > Signed-off-by: Julien Grall <julien.grall@arm.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> > --- > xen/arch/arm/arm64/entry.S | 8 ++++---- > 1 file changed, 4 insertions(+), 4 deletions(-) > > diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S > index ffa9a1c492..e2344e565f 100644 > --- a/xen/arch/arm/arm64/entry.S > +++ b/xen/arch/arm/arm64/entry.S > @@ -226,11 +226,11 @@ guest_sync: > mrs x1, esr_el2 > lsr x1, x1, #HSR_EC_SHIFT /* x1 = ESR_EL2.EC */ > cmp x1, #HSR_EC_HVC64 > - b.ne 1f /* Not a HVC skip fastpath. */ > + b.ne guest_sync_slowpath /* Not a HVC skip fastpath. */ > > mrs x1, esr_el2 > and x1, x1, #0xffff /* Check the immediate [0:16] */ > - cbnz x1, 1f /* should be 0 for HVC #0 */ > + cbnz x1, guest_sync_slowpath /* should be 0 for HVC #0 */ > > /* > * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1. > @@ -241,7 +241,7 @@ guest_sync: > * be encoded as an immediate for cmp. > */ > eor w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID > - cbnz w0, 1f > + cbnz w0, guest_sync_slowpath > > /* > * Clobber both x0 and x1 to prevent leakage. Note that thanks > @@ -250,7 +250,7 @@ guest_sync: > mov x1, xzr > eret > > -1: > +guest_sync_slowpath: > /* > * x0/x1 may have been scratch by the fast path above, so avoid > * to save them. > -- > 2.11.0 >
diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index ffa9a1c492..e2344e565f 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -226,11 +226,11 @@ guest_sync: mrs x1, esr_el2 lsr x1, x1, #HSR_EC_SHIFT /* x1 = ESR_EL2.EC */ cmp x1, #HSR_EC_HVC64 - b.ne 1f /* Not a HVC skip fastpath. */ + b.ne guest_sync_slowpath /* Not a HVC skip fastpath. */ mrs x1, esr_el2 and x1, x1, #0xffff /* Check the immediate [0:16] */ - cbnz x1, 1f /* should be 0 for HVC #0 */ + cbnz x1, guest_sync_slowpath /* should be 0 for HVC #0 */ /* * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1. @@ -241,7 +241,7 @@ guest_sync: * be encoded as an immediate for cmp. */ eor w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID - cbnz w0, 1f + cbnz w0, guest_sync_slowpath /* * Clobber both x0 and x1 to prevent leakage. Note that thanks @@ -250,7 +250,7 @@ guest_sync: mov x1, xzr eret -1: +guest_sync_slowpath: /* * x0/x1 may have been scratch by the fast path above, so avoid * to save them.
This will improve readability for future changes. This is part of XSA-263. Signed-off-by: Julien Grall <julien.grall@arm.com> --- xen/arch/arm/arm64/entry.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)