Message ID | 20191209222956.239798-3-ndesaulniers@google.com |
---|---|
State | Accepted |
Commit | 780a0cfda9006a9a22d6473c2d4c527f5c68eb2e |
Headers | show |
Series | Hexagon fixes | expand |
> -----Original Message----- > From: Nick Desaulniers <ndesaulniers@google.com> > Sent: Monday, December 9, 2019 4:30 PM > To: bcain@codeaurora.org > Cc: Nick Desaulniers <ndesaulniers@google.com>; lee.jones@linaro.org; > andriy.shevchenko@linux.intel.com; ztuowen@gmail.com; > mika.westerberg@linux.intel.com; mcgrof@kernel.org; > gregkh@linuxfoundation.org; alexios.zavras@intel.com; > allison@lohutok.net; will@kernel.org; rfontana@redhat.com; > tglx@linutronix.de; peterz@infradead.org; boqun.feng@gmail.com; > mingo@redhat.com; akpm@linux-foundation.org; geert@linux-m68k.org; > linux-hexagon@vger.kernel.org; clang-built-linux@googlegroups.com; linux- > kernel@vger.kernel.org; Sid Manning <sidneym@codeaurora.org> > Subject: [PATCH 2/2] hexagon: parenthesize registers in asm predicates > > Hexagon requires that register predicates in assembly be parenthesized. > > Link: https://github.com/ClangBuiltLinux/linux/issues/754 > Suggested-by: Sid Manning <sidneym@codeaurora.org> > Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> > --- > arch/hexagon/include/asm/atomic.h | 8 ++++---- > arch/hexagon/include/asm/bitops.h | 8 ++++---- > arch/hexagon/include/asm/cmpxchg.h | 2 +- > arch/hexagon/include/asm/futex.h | 6 +++--- > arch/hexagon/include/asm/spinlock.h | 20 ++++++++++---------- > arch/hexagon/kernel/vm_entry.S | 2 +- > 6 files changed, 23 insertions(+), 23 deletions(-) > > diff --git a/arch/hexagon/include/asm/atomic.h > b/arch/hexagon/include/asm/atomic.h > index 12cd9231c4b8..0231d69c8bf2 100644 > --- a/arch/hexagon/include/asm/atomic.h > +++ b/arch/hexagon/include/asm/atomic.h > @@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) > \ > "1: %0 = memw_locked(%1);\n" \ > " %0 = "#op "(%0,%2);\n" > \ > " memw_locked(%1,P3)=%0;\n" \ > - " if !P3 jump 1b;\n" \ > + " if (!P3) jump 1b;\n" \ > : "=&r" (output) \ > : "r" (&v->counter), "r" (i) \ > : "memory", "p3" \ > @@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t > *v) \ > "1: %0 = memw_locked(%1);\n" \ > " %0 = "#op "(%0,%2);\n" > \ > " memw_locked(%1,P3)=%0;\n" \ > - " if !P3 jump 1b;\n" \ > + " if (!P3) jump 1b;\n" \ > : "=&r" (output) \ > : "r" (&v->counter), "r" (i) \ > : "memory", "p3" \ > @@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) > \ > "1: %0 = memw_locked(%2);\n" \ > " %1 = "#op "(%0,%3);\n" > \ > " memw_locked(%2,P3)=%1;\n" \ > - " if !P3 jump 1b;\n" \ > + " if (!P3) jump 1b;\n" \ > : "=&r" (output), "=&r" (val) \ > : "r" (&v->counter), "r" (i) \ > : "memory", "p3" \ > @@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t > *v, int a, int u) > " }" > " memw_locked(%2, p3) = %1;" > " {" > - " if !p3 jump 1b;" > + " if (!p3) jump 1b;" > " }" > "2:" > : "=&r" (__oldval), "=&r" (tmp) > diff --git a/arch/hexagon/include/asm/bitops.h > b/arch/hexagon/include/asm/bitops.h > index 47384b094b94..71429f756af0 100644 > --- a/arch/hexagon/include/asm/bitops.h > +++ b/arch/hexagon/include/asm/bitops.h > @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void > *addr) > "1: R12 = memw_locked(R10);\n" > " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" > " memw_locked(R10,P1) = R12;\n" > - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" > + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" > : "=&r" (oldval) > : "r" (addr), "r" (nr) > : "r10", "r11", "r12", "p0", "p1", "memory" > @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void > *addr) > "1: R12 = memw_locked(R10);\n" > " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" > " memw_locked(R10,P1) = R12;\n" > - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" > + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" > : "=&r" (oldval) > : "r" (addr), "r" (nr) > : "r10", "r11", "r12", "p0", "p1", "memory" > @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void > *addr) > "1: R12 = memw_locked(R10);\n" > " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" > " memw_locked(R10,P1) = R12;\n" > - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" > + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" > : "=&r" (oldval) > : "r" (addr), "r" (nr) > : "r10", "r11", "r12", "p0", "p1", "memory" > @@ -223,7 +223,7 @@ static inline int ffs(int x) > int r; > > asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" > - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" > + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" > : "=&r" (r) > : "r" (x) > : "p0"); > diff --git a/arch/hexagon/include/asm/cmpxchg.h > b/arch/hexagon/include/asm/cmpxchg.h > index 6091322c3af9..92b8a02e588a 100644 > --- a/arch/hexagon/include/asm/cmpxchg.h > +++ b/arch/hexagon/include/asm/cmpxchg.h > @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, > volatile void *ptr, > __asm__ __volatile__ ( > "1: %0 = memw_locked(%1);\n" /* load into retval */ > " memw_locked(%1,P0) = %2;\n" /* store into memory */ > - " if !P0 jump 1b;\n" > + " if (!P0) jump 1b;\n" > : "=&r" (retval) > : "r" (ptr), "r" (x) > : "memory", "p0" > diff --git a/arch/hexagon/include/asm/futex.h > b/arch/hexagon/include/asm/futex.h > index cb635216a732..0191f7c7193e 100644 > --- a/arch/hexagon/include/asm/futex.h > +++ b/arch/hexagon/include/asm/futex.h > @@ -16,7 +16,7 @@ > /* For example: %1 = %4 */ \ > insn \ > "2: memw_locked(%3,p2) = %1;\n" \ > - " if !p2 jump 1b;\n" \ > + " if (!p2) jump 1b;\n" \ > " %1 = #0;\n" \ > "3:\n" \ > ".section .fixup,\"ax\"\n" \ > @@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 > __user *uaddr, u32 oldval, > "1: %1 = memw_locked(%3)\n" > " {\n" > " p2 = cmp.eq(%1,%4)\n" > - " if !p2.new jump:NT 3f\n" > + " if (!p2.new) jump:NT 3f\n" > " }\n" > "2: memw_locked(%3,p2) = %5\n" > - " if !p2 jump 1b\n" > + " if (!p2) jump 1b\n" > "3:\n" > ".section .fixup,\"ax\"\n" > "4: %0 = #%6\n" > diff --git a/arch/hexagon/include/asm/spinlock.h > b/arch/hexagon/include/asm/spinlock.h > index bfe07d842ff3..ef103b73bec8 100644 > --- a/arch/hexagon/include/asm/spinlock.h > +++ b/arch/hexagon/include/asm/spinlock.h > @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) > __asm__ __volatile__( > "1: R6 = memw_locked(%0);\n" > " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" > - " { if !P3 jump 1b; }\n" > + " { if (!P3) jump 1b; }\n" > " memw_locked(%0,P3) = R6;\n" > - " { if !P3 jump 1b; }\n" > + " { if (!P3) jump 1b; }\n" > : > : "r" (&lock->lock) > : "memory", "r6", "p3" > @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t > *lock) > "1: R6 = memw_locked(%0);\n" > " R6 = add(R6,#-1);\n" > " memw_locked(%0,P3) = R6\n" > - " if !P3 jump 1b;\n" > + " if (!P3) jump 1b;\n" > : > : "r" (&lock->lock) > : "memory", "r6", "p3" > @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) > __asm__ __volatile__( > " R6 = memw_locked(%1);\n" > " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" > - " { if !P3 jump 1f; }\n" > + " { if (!P3) jump 1f; }\n" > " memw_locked(%1,P3) = R6;\n" > " { %0 = P3 }\n" > "1:\n" > @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) > __asm__ __volatile__( > "1: R6 = memw_locked(%0)\n" > " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" > - " { if !P3 jump 1b; }\n" > + " { if (!P3) jump 1b; }\n" > " memw_locked(%0,P3) = R6;\n" > - " { if !P3 jump 1b; }\n" > + " { if (!P3) jump 1b; }\n" > : > : "r" (&lock->lock) > : "memory", "r6", "p3" > @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) > __asm__ __volatile__( > " R6 = memw_locked(%1)\n" > " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" > - " { if !P3 jump 1f; }\n" > + " { if (!P3) jump 1f; }\n" > " memw_locked(%1,P3) = R6;\n" > " %0 = P3;\n" > "1:\n" > @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t > *lock) > __asm__ __volatile__( > "1: R6 = memw_locked(%0);\n" > " P3 = cmp.eq(R6,#0);\n" > - " { if !P3 jump 1b; R6 = #1; }\n" > + " { if (!P3) jump 1b; R6 = #1; }\n" > " memw_locked(%0,P3) = R6;\n" > - " { if !P3 jump 1b; }\n" > + " { if (!P3) jump 1b; }\n" > : > : "r" (&lock->lock) > : "memory", "r6", "p3" > @@ -139,7 +139,7 @@ static inline unsigned int > arch_spin_trylock(arch_spinlock_t *lock) > __asm__ __volatile__( > " R6 = memw_locked(%1);\n" > " P3 = cmp.eq(R6,#0);\n" > - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" > + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" > " memw_locked(%1,P3) = R6;\n" > " %0 = P3;\n" > "1:\n" > diff --git a/arch/hexagon/kernel/vm_entry.S > b/arch/hexagon/kernel/vm_entry.S index 65a1ea0eed2f..554371d92bed > 100644 > --- a/arch/hexagon/kernel/vm_entry.S > +++ b/arch/hexagon/kernel/vm_entry.S > @@ -369,7 +369,7 @@ ret_from_fork: > R26.L = #LO(do_work_pending); > R0 = #VM_INT_DISABLE; > } > - if P0 jump check_work_pending > + if (P0) jump check_work_pending > { > R0 = R25; > callr R24 > -- > 2.24.0.393.g34dc348eaf-goog Acked-by: Brian Cain <bcain@codeaurora.org>
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 12cd9231c4b8..0231d69c8bf2 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%2);\n" \ " %1 = "#op "(%0,%3);\n" \ " memw_locked(%2,P3)=%1;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output), "=&r" (val) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) " }" " memw_locked(%2, p3) = %1;" " {" - " if !p3 jump 1b;" + " if (!p3) jump 1b;" " }" "2:" : "=&r" (__oldval), "=&r" (tmp) diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 47384b094b94..71429f756af0 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -223,7 +223,7 @@ static inline int ffs(int x) int r; asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" : "=&r" (r) : "r" (x) : "p0"); diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index 6091322c3af9..92b8a02e588a 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __asm__ __volatile__ ( "1: %0 = memw_locked(%1);\n" /* load into retval */ " memw_locked(%1,P0) = %2;\n" /* store into memory */ - " if !P0 jump 1b;\n" + " if (!P0) jump 1b;\n" : "=&r" (retval) : "r" (ptr), "r" (x) : "memory", "p0" diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index cb635216a732..0191f7c7193e 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -16,7 +16,7 @@ /* For example: %1 = %4 */ \ insn \ "2: memw_locked(%3,p2) = %1;\n" \ - " if !p2 jump 1b;\n" \ + " if (!p2) jump 1b;\n" \ " %1 = #0;\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ @@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "1: %1 = memw_locked(%3)\n" " {\n" " p2 = cmp.eq(%1,%4)\n" - " if !p2.new jump:NT 3f\n" + " if (!p2.new) jump:NT 3f\n" " }\n" "2: memw_locked(%3,p2) = %5\n" - " if !p2 jump 1b\n" + " if (!p2) jump 1b\n" "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h index bfe07d842ff3..ef103b73bec8 100644 --- a/arch/hexagon/include/asm/spinlock.h +++ b/arch/hexagon/include/asm/spinlock.h @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) "1: R6 = memw_locked(%0);\n" " R6 = add(R6,#-1);\n" " memw_locked(%0,P3) = R6\n" - " if !P3 jump 1b;\n" + " if (!P3) jump 1b;\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " { %0 = P3 }\n" "1:\n" @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0)\n" " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1)\n" " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1b; R6 = #1; }\n" + " { if (!P3) jump 1b; R6 = #1; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S index 65a1ea0eed2f..554371d92bed 100644 --- a/arch/hexagon/kernel/vm_entry.S +++ b/arch/hexagon/kernel/vm_entry.S @@ -369,7 +369,7 @@ ret_from_fork: R26.L = #LO(do_work_pending); R0 = #VM_INT_DISABLE; } - if P0 jump check_work_pending + if (P0) jump check_work_pending { R0 = R25; callr R24
Hexagon requires that register predicates in assembly be parenthesized. Link: https://github.com/ClangBuiltLinux/linux/issues/754 Suggested-by: Sid Manning <sidneym@codeaurora.org> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> --- arch/hexagon/include/asm/atomic.h | 8 ++++---- arch/hexagon/include/asm/bitops.h | 8 ++++---- arch/hexagon/include/asm/cmpxchg.h | 2 +- arch/hexagon/include/asm/futex.h | 6 +++--- arch/hexagon/include/asm/spinlock.h | 20 ++++++++++---------- arch/hexagon/kernel/vm_entry.S | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) -- 2.24.0.393.g34dc348eaf-goog