diff mbox series

[10/13] atomics/powerpc: define atomic64_fetch_add_unless()

Message ID 20180523133533.1076-11-mark.rutland@arm.com
State Superseded
Headers show
Series atomics: API cleanups | expand

Commit Message

Mark Rutland May 23, 2018, 1:35 p.m. UTC
As a step towards unifying the atomic/atomic64/atomic_long APIs, this
patch converts the arch/powerpc implementation of atomic64_add_unless()
into an implementation of atomic64_fetch_add_unless().

A wrapper in <linux/atomic.h> will build atomic_add_unless() atop of
this, provided it is given a preprocessor definition.

No functional change is intended as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>

Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/atomic.h | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

-- 
2.11.0

Comments

Michael Ellerman May 24, 2018, 1:50 a.m. UTC | #1
Hi Mark,

Mark Rutland <mark.rutland@arm.com> writes:
> As a step towards unifying the atomic/atomic64/atomic_long APIs, this

> patch converts the arch/powerpc implementation of atomic64_add_unless()

> into an implementation of atomic64_fetch_add_unless().

>

> A wrapper in <linux/atomic.h> will build atomic_add_unless() atop of

> this, provided it is given a preprocessor definition.

>

> No functional change is intended as a result of this patch.

>

> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

> Cc: Boqun Feng <boqun.feng@gmail.com>

> Cc: Peter Zijlstra <peterz@infradead.org>

> Cc: Will Deacon <will.deacon@arm.com>

> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>

> Cc: Paul Mackerras <paulus@samba.org>

> Cc: Michael Ellerman <mpe@ellerman.id.au>

> ---

>  arch/powerpc/include/asm/atomic.h | 9 +++++----

>  1 file changed, 5 insertions(+), 4 deletions(-)

>

> diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h

> index b5646c079c16..233dbf31911c 100644

> --- a/arch/powerpc/include/asm/atomic.h

> +++ b/arch/powerpc/include/asm/atomic.h

> @@ -525,7 +525,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)

>  #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))

>  

>  /**

> - * atomic64_add_unless - add unless the number is a given value

> + * atomic64_fetch_add_unless - add unless the number is a given value

>   * @v: pointer of type atomic64_t

>   * @a: the amount to add to v...

>   * @u: ...unless v is equal to u.

> @@ -533,13 +533,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)

>   * Atomically adds @a to @v, so long as it was not @u.

>   * Returns the old value of @v.


Comment was wrong, but is right now. Win.

>   */

> -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)

> +static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)

>  {

>  	long t;

>  

>  	__asm__ __volatile__ (

>  	PPC_ATOMIC_ENTRY_BARRIER

> -"1:	ldarx	%0,0,%1		# atomic_fetch_add_unless\n\

> +"1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\

>  	cmpd	0,%0,%3 \n\

>  	beq	2f \n\

>  	add	%0,%2,%0 \n"


We overwrite t here with the new value ...

> @@ -552,8 +552,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)


But then in the context above here we do:

"	subf	%0,%2,%0 \n\

Which puts the old value back into t.

>  	: "r" (&v->counter), "r" (a), "r" (u)

>  	: "cc", "memory");

>  

> -	return t != u;

> +	return t;


ie. this is correct.

I'm not sure why we wrote it that way, to add and then subtract, but
that's not your problem.

So LGTM.

Acked-by: Michael Ellerman <mpe@ellerman.id.au>


cheers
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b5646c079c16..233dbf31911c 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -525,7 +525,7 @@  static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
 
 /**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -533,13 +533,13 @@  static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
 {
 	long t;
 
 	__asm__ __volatile__ (
 	PPC_ATOMIC_ENTRY_BARRIER
-"1:	ldarx	%0,0,%1		# atomic_fetch_add_unless\n\
+"1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
 	cmpd	0,%0,%3 \n\
 	beq	2f \n\
 	add	%0,%2,%0 \n"
@@ -552,8 +552,9 @@  static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 	: "r" (&v->counter), "r" (a), "r" (u)
 	: "cc", "memory");
 
-	return t != u;
+	return t;
 }
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
 
 /**
  * atomic_inc64_not_zero - increment unless the number is zero