diff mbox

[Xen-devel] xen: arm: bitops take unsigned int (Was: Re: [PATCH] xen/arm64: disable alignment check)

Message ID 1399641896.561.9.camel@kazak.uk.xensource.com
State Accepted
Commit cd338e967c598bf747b03dcfd9d8d45dc40bac1a
Headers show

Commit Message

Ian Campbell May 9, 2014, 1:24 p.m. UTC
On Tue, 2014-04-29 at 09:58 +0100, Ian Campbell wrote:
> On Tue, 2014-04-29 at 08:38 +0100, Vladimir Murzin wrote:
> > On Mon, Apr 28, 2014 at 11:43 AM, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > > But I also wanted confirmation that the problematic instruction was
> > > generated by gcc and not by some handcoded asm somewhere which we hadn't
> > > properly fixed.
> 
> > I believe it comes form test_bit (xen/include/asm-arm/bitops.h).
> 
> Ah, then I think this code needs fixing too. Probably switching to
> unsigned int * throughout would work, what do you think?

I finally managed to upgrade to a new enough kernel to trigger this.

This Works For Me(tm), along with the Linux patch "xen/events/fifo:
correctly align bitops" which is queued for 3.15 Linus (but not sent
yet?)

8<-------------------

From aa6afe6520ea22241fb0ce430ef315c49a73867f Mon Sep 17 00:00:00 2001
From: Ian Campbell <ian.campbell@citrix.com>
Date: Thu, 8 May 2014 16:13:55 +0100
Subject: [PATCH] xen: arm: bitops take unsigned int

Xen bitmaps can be 4 rather than 8 byte aligned, so use the appropriate type.
Otherwise the compiler can generate unaligned 8 byte accesses and cause traps.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 xen/include/asm-arm/bitops.h |   37 +++++++++++++++++++------------------
 1 file changed, 19 insertions(+), 18 deletions(-)

Comments

Ian Campbell May 9, 2014, 4:19 p.m. UTC | #1
(Just adding the other ARM guys...)

On Fri, 2014-05-09 at 14:24 +0100, Ian Campbell wrote:
> On Tue, 2014-04-29 at 09:58 +0100, Ian Campbell wrote:
> > On Tue, 2014-04-29 at 08:38 +0100, Vladimir Murzin wrote:
> > > On Mon, Apr 28, 2014 at 11:43 AM, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > > > But I also wanted confirmation that the problematic instruction was
> > > > generated by gcc and not by some handcoded asm somewhere which we hadn't
> > > > properly fixed.
> > 
> > > I believe it comes form test_bit (xen/include/asm-arm/bitops.h).
> > 
> > Ah, then I think this code needs fixing too. Probably switching to
> > unsigned int * throughout would work, what do you think?
> 
> I finally managed to upgrade to a new enough kernel to trigger this.
> 
> This Works For Me(tm), along with the Linux patch "xen/events/fifo:
> correctly align bitops" which is queued for 3.15 Linus (but not sent
> yet?)
> 
> 8<-------------------
> 
> From aa6afe6520ea22241fb0ce430ef315c49a73867f Mon Sep 17 00:00:00 2001
> From: Ian Campbell <ian.campbell@citrix.com>
> Date: Thu, 8 May 2014 16:13:55 +0100
> Subject: [PATCH] xen: arm: bitops take unsigned int
> 
> Xen bitmaps can be 4 rather than 8 byte aligned, so use the appropriate type.
> Otherwise the compiler can generate unaligned 8 byte accesses and cause traps.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> ---
>  xen/include/asm-arm/bitops.h |   37 +++++++++++++++++++------------------
>  1 file changed, 19 insertions(+), 18 deletions(-)
> 
> diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
> index 0a7caee..25f96c8 100644
> --- a/xen/include/asm-arm/bitops.h
> +++ b/xen/include/asm-arm/bitops.h
> @@ -18,13 +18,14 @@
>  #define __set_bit(n,p)            set_bit(n,p)
>  #define __clear_bit(n,p)          clear_bit(n,p)
>  
> +#define BITS_PER_WORD           32
>  #define BIT(nr)                 (1UL << (nr))
> -#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
> -#define BIT_WORD(nr)            ((nr) / BITS_PER_LONG)
> +#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_WORD))
> +#define BIT_WORD(nr)            ((nr) / BITS_PER_WORD)
>  #define BITS_PER_BYTE           8
>  
> -#define ADDR (*(volatile long *) addr)
> -#define CONST_ADDR (*(const volatile long *) addr)
> +#define ADDR (*(volatile int *) addr)
> +#define CONST_ADDR (*(const volatile int *) addr)
>  
>  #if defined(CONFIG_ARM_32)
>  # include <asm/arm32/bitops.h>
> @@ -45,10 +46,10 @@
>   */
>  static inline int __test_and_set_bit(int nr, volatile void *addr)
>  {
> -        unsigned long mask = BIT_MASK(nr);
> -        volatile unsigned long *p =
> -                ((volatile unsigned long *)addr) + BIT_WORD(nr);
> -        unsigned long old = *p;
> +        unsigned int mask = BIT_MASK(nr);
> +        volatile unsigned int *p =
> +                ((volatile unsigned int *)addr) + BIT_WORD(nr);
> +        unsigned int old = *p;
>  
>          *p = old | mask;
>          return (old & mask) != 0;
> @@ -65,10 +66,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
>   */
>  static inline int __test_and_clear_bit(int nr, volatile void *addr)
>  {
> -        unsigned long mask = BIT_MASK(nr);
> -        volatile unsigned long *p =
> -                ((volatile unsigned long *)addr) + BIT_WORD(nr);
> -        unsigned long old = *p;
> +        unsigned int mask = BIT_MASK(nr);
> +        volatile unsigned int *p =
> +                ((volatile unsigned int *)addr) + BIT_WORD(nr);
> +        unsigned int old = *p;
>  
>          *p = old & ~mask;
>          return (old & mask) != 0;
> @@ -78,10 +79,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
>  static inline int __test_and_change_bit(int nr,
>                                              volatile void *addr)
>  {
> -        unsigned long mask = BIT_MASK(nr);
> -        volatile unsigned long *p =
> -                ((volatile unsigned long *)addr) + BIT_WORD(nr);
> -        unsigned long old = *p;
> +        unsigned int mask = BIT_MASK(nr);
> +        volatile unsigned int *p =
> +                ((volatile unsigned int *)addr) + BIT_WORD(nr);
> +        unsigned int old = *p;
>  
>          *p = old ^ mask;
>          return (old & mask) != 0;
> @@ -94,8 +95,8 @@ static inline int __test_and_change_bit(int nr,
>   */
>  static inline int test_bit(int nr, const volatile void *addr)
>  {
> -        const volatile unsigned long *p = (const volatile unsigned long *)addr;
> -        return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
> +        const volatile unsigned int *p = (const volatile unsigned int *)addr;
> +        return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_WORD-1)));
>  }
>  
>  static inline int constant_fls(int x)
Stefano Stabellini May 11, 2014, 6:34 p.m. UTC | #2
On Fri, 9 May 2014, Ian Campbell wrote:
> (Just adding the other ARM guys...)
> 
> On Fri, 2014-05-09 at 14:24 +0100, Ian Campbell wrote:
> > On Tue, 2014-04-29 at 09:58 +0100, Ian Campbell wrote:
> > > On Tue, 2014-04-29 at 08:38 +0100, Vladimir Murzin wrote:
> > > > On Mon, Apr 28, 2014 at 11:43 AM, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > > > > But I also wanted confirmation that the problematic instruction was
> > > > > generated by gcc and not by some handcoded asm somewhere which we hadn't
> > > > > properly fixed.
> > > 
> > > > I believe it comes form test_bit (xen/include/asm-arm/bitops.h).
> > > 
> > > Ah, then I think this code needs fixing too. Probably switching to
> > > unsigned int * throughout would work, what do you think?
> > 
> > I finally managed to upgrade to a new enough kernel to trigger this.
> > 
> > This Works For Me(tm), along with the Linux patch "xen/events/fifo:
> > correctly align bitops" which is queued for 3.15 Linus (but not sent
> > yet?)

The change looks good to me.


> > 8<-------------------
> > 
> > From aa6afe6520ea22241fb0ce430ef315c49a73867f Mon Sep 17 00:00:00 2001
> > From: Ian Campbell <ian.campbell@citrix.com>
> > Date: Thu, 8 May 2014 16:13:55 +0100
> > Subject: [PATCH] xen: arm: bitops take unsigned int
> > 
> > Xen bitmaps can be 4 rather than 8 byte aligned, so use the appropriate type.
> > Otherwise the compiler can generate unaligned 8 byte accesses and cause traps.
> > 
> > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > ---
> >  xen/include/asm-arm/bitops.h |   37 +++++++++++++++++++------------------
> >  1 file changed, 19 insertions(+), 18 deletions(-)
> > 
> > diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
> > index 0a7caee..25f96c8 100644
> > --- a/xen/include/asm-arm/bitops.h
> > +++ b/xen/include/asm-arm/bitops.h
> > @@ -18,13 +18,14 @@
> >  #define __set_bit(n,p)            set_bit(n,p)
> >  #define __clear_bit(n,p)          clear_bit(n,p)
> >  
> > +#define BITS_PER_WORD           32
> >  #define BIT(nr)                 (1UL << (nr))
> > -#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
> > -#define BIT_WORD(nr)            ((nr) / BITS_PER_LONG)
> > +#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_WORD))
> > +#define BIT_WORD(nr)            ((nr) / BITS_PER_WORD)
> >  #define BITS_PER_BYTE           8
> >  
> > -#define ADDR (*(volatile long *) addr)
> > -#define CONST_ADDR (*(const volatile long *) addr)
> > +#define ADDR (*(volatile int *) addr)
> > +#define CONST_ADDR (*(const volatile int *) addr)
> >  
> >  #if defined(CONFIG_ARM_32)
> >  # include <asm/arm32/bitops.h>
> > @@ -45,10 +46,10 @@
> >   */
> >  static inline int __test_and_set_bit(int nr, volatile void *addr)
> >  {
> > -        unsigned long mask = BIT_MASK(nr);
> > -        volatile unsigned long *p =
> > -                ((volatile unsigned long *)addr) + BIT_WORD(nr);
> > -        unsigned long old = *p;
> > +        unsigned int mask = BIT_MASK(nr);
> > +        volatile unsigned int *p =
> > +                ((volatile unsigned int *)addr) + BIT_WORD(nr);
> > +        unsigned int old = *p;
> >  
> >          *p = old | mask;
> >          return (old & mask) != 0;
> > @@ -65,10 +66,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
> >   */
> >  static inline int __test_and_clear_bit(int nr, volatile void *addr)
> >  {
> > -        unsigned long mask = BIT_MASK(nr);
> > -        volatile unsigned long *p =
> > -                ((volatile unsigned long *)addr) + BIT_WORD(nr);
> > -        unsigned long old = *p;
> > +        unsigned int mask = BIT_MASK(nr);
> > +        volatile unsigned int *p =
> > +                ((volatile unsigned int *)addr) + BIT_WORD(nr);
> > +        unsigned int old = *p;
> >  
> >          *p = old & ~mask;
> >          return (old & mask) != 0;
> > @@ -78,10 +79,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
> >  static inline int __test_and_change_bit(int nr,
> >                                              volatile void *addr)
> >  {
> > -        unsigned long mask = BIT_MASK(nr);
> > -        volatile unsigned long *p =
> > -                ((volatile unsigned long *)addr) + BIT_WORD(nr);
> > -        unsigned long old = *p;
> > +        unsigned int mask = BIT_MASK(nr);
> > +        volatile unsigned int *p =
> > +                ((volatile unsigned int *)addr) + BIT_WORD(nr);
> > +        unsigned int old = *p;
> >  
> >          *p = old ^ mask;
> >          return (old & mask) != 0;
> > @@ -94,8 +95,8 @@ static inline int __test_and_change_bit(int nr,
> >   */
> >  static inline int test_bit(int nr, const volatile void *addr)
> >  {
> > -        const volatile unsigned long *p = (const volatile unsigned long *)addr;
> > -        return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
> > +        const volatile unsigned int *p = (const volatile unsigned int *)addr;
> > +        return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_WORD-1)));
> >  }
> >  
> >  static inline int constant_fls(int x)
> 
>
Ian Campbell May 12, 2014, 8:36 a.m. UTC | #3
On Sun, 2014-05-11 at 19:34 +0100, Stefano Stabellini wrote:
> On Fri, 9 May 2014, Ian Campbell wrote:
> > (Just adding the other ARM guys...)
> > 
> > On Fri, 2014-05-09 at 14:24 +0100, Ian Campbell wrote:
> > > On Tue, 2014-04-29 at 09:58 +0100, Ian Campbell wrote:
> > > > On Tue, 2014-04-29 at 08:38 +0100, Vladimir Murzin wrote:
> > > > > On Mon, Apr 28, 2014 at 11:43 AM, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > > > > > But I also wanted confirmation that the problematic instruction was
> > > > > > generated by gcc and not by some handcoded asm somewhere which we hadn't
> > > > > > properly fixed.
> > > > 
> > > > > I believe it comes form test_bit (xen/include/asm-arm/bitops.h).
> > > > 
> > > > Ah, then I think this code needs fixing too. Probably switching to
> > > > unsigned int * throughout would work, what do you think?
> > > 
> > > I finally managed to upgrade to a new enough kernel to trigger this.
> > > 
> > > This Works For Me(tm), along with the Linux patch "xen/events/fifo:
> > > correctly align bitops" which is queued for 3.15 Linus (but not sent
> > > yet?)
> 
> The change looks good to me.

Are you acking it?
Stefano Stabellini May 12, 2014, 9:45 a.m. UTC | #4
On Mon, 12 May 2014, Ian Campbell wrote:
> On Sun, 2014-05-11 at 19:34 +0100, Stefano Stabellini wrote:
> > On Fri, 9 May 2014, Ian Campbell wrote:
> > > (Just adding the other ARM guys...)
> > > 
> > > On Fri, 2014-05-09 at 14:24 +0100, Ian Campbell wrote:
> > > > On Tue, 2014-04-29 at 09:58 +0100, Ian Campbell wrote:
> > > > > On Tue, 2014-04-29 at 08:38 +0100, Vladimir Murzin wrote:
> > > > > > On Mon, Apr 28, 2014 at 11:43 AM, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > > > > > > But I also wanted confirmation that the problematic instruction was
> > > > > > > generated by gcc and not by some handcoded asm somewhere which we hadn't
> > > > > > > properly fixed.
> > > > > 
> > > > > > I believe it comes form test_bit (xen/include/asm-arm/bitops.h).
> > > > > 
> > > > > Ah, then I think this code needs fixing too. Probably switching to
> > > > > unsigned int * throughout would work, what do you think?
> > > > 
> > > > I finally managed to upgrade to a new enough kernel to trigger this.
> > > > 
> > > > This Works For Me(tm), along with the Linux patch "xen/events/fifo:
> > > > correctly align bitops" which is queued for 3.15 Linus (but not sent
> > > > yet?)
> > 
> > The change looks good to me.
> 
> Are you acking it?

Yes
Julien Grall May 12, 2014, 12:15 p.m. UTC | #5
Hi Ian,

On 05/09/2014 05:19 PM, Ian Campbell wrote:
> (Just adding the other ARM guys...)
> 
> On Fri, 2014-05-09 at 14:24 +0100, Ian Campbell wrote:
>> On Tue, 2014-04-29 at 09:58 +0100, Ian Campbell wrote:
>>> On Tue, 2014-04-29 at 08:38 +0100, Vladimir Murzin wrote:
>>>> On Mon, Apr 28, 2014 at 11:43 AM, Ian Campbell <Ian.Campbell@citrix.com> wrote:
>>>>> But I also wanted confirmation that the problematic instruction was
>>>>> generated by gcc and not by some handcoded asm somewhere which we hadn't
>>>>> properly fixed.
>>>
>>>> I believe it comes form test_bit (xen/include/asm-arm/bitops.h).
>>>
>>> Ah, then I think this code needs fixing too. Probably switching to
>>> unsigned int * throughout would work, what do you think?
>>
>> I finally managed to upgrade to a new enough kernel to trigger this.
>>
>> This Works For Me(tm), along with the Linux patch "xen/events/fifo:
>> correctly align bitops" which is queued for 3.15 Linus (but not sent
>> yet?)
>>
>> 8<-------------------
>>
>> From aa6afe6520ea22241fb0ce430ef315c49a73867f Mon Sep 17 00:00:00 2001
>> From: Ian Campbell <ian.campbell@citrix.com>
>> Date: Thu, 8 May 2014 16:13:55 +0100
>> Subject: [PATCH] xen: arm: bitops take unsigned int
>>
>> Xen bitmaps can be 4 rather than 8 byte aligned, so use the appropriate type.
>> Otherwise the compiler can generate unaligned 8 byte accesses and cause traps.
>>
>> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
>> ---
>>  xen/include/asm-arm/bitops.h |   37 +++++++++++++++++++------------------
>>  1 file changed, 19 insertions(+), 18 deletions(-)
>>
>> diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
>> index 0a7caee..25f96c8 100644
>> --- a/xen/include/asm-arm/bitops.h
>> +++ b/xen/include/asm-arm/bitops.h
>> @@ -18,13 +18,14 @@
>>  #define __set_bit(n,p)            set_bit(n,p)
>>  #define __clear_bit(n,p)          clear_bit(n,p)
>>  
>> +#define BITS_PER_WORD           32

Can you define BITS_PER_WORD in asm-arm/config.h rather than here?

Regards,
Ian Campbell May 12, 2014, 12:18 p.m. UTC | #6
On Mon, 2014-05-12 at 13:15 +0100, Julien Grall wrote:
> Hi Ian,
> 
> On 05/09/2014 05:19 PM, Ian Campbell wrote:
> > (Just adding the other ARM guys...)
> > 
> > On Fri, 2014-05-09 at 14:24 +0100, Ian Campbell wrote:
> >> On Tue, 2014-04-29 at 09:58 +0100, Ian Campbell wrote:
> >>> On Tue, 2014-04-29 at 08:38 +0100, Vladimir Murzin wrote:
> >>>> On Mon, Apr 28, 2014 at 11:43 AM, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> >>>>> But I also wanted confirmation that the problematic instruction was
> >>>>> generated by gcc and not by some handcoded asm somewhere which we hadn't
> >>>>> properly fixed.
> >>>
> >>>> I believe it comes form test_bit (xen/include/asm-arm/bitops.h).
> >>>
> >>> Ah, then I think this code needs fixing too. Probably switching to
> >>> unsigned int * throughout would work, what do you think?
> >>
> >> I finally managed to upgrade to a new enough kernel to trigger this.
> >>
> >> This Works For Me(tm), along with the Linux patch "xen/events/fifo:
> >> correctly align bitops" which is queued for 3.15 Linus (but not sent
> >> yet?)
> >>
> >> 8<-------------------
> >>
> >> From aa6afe6520ea22241fb0ce430ef315c49a73867f Mon Sep 17 00:00:00 2001
> >> From: Ian Campbell <ian.campbell@citrix.com>
> >> Date: Thu, 8 May 2014 16:13:55 +0100
> >> Subject: [PATCH] xen: arm: bitops take unsigned int
> >>
> >> Xen bitmaps can be 4 rather than 8 byte aligned, so use the appropriate type.
> >> Otherwise the compiler can generate unaligned 8 byte accesses and cause traps.
> >>
> >> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> >> ---
> >>  xen/include/asm-arm/bitops.h |   37 +++++++++++++++++++------------------
> >>  1 file changed, 19 insertions(+), 18 deletions(-)
> >>
> >> diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
> >> index 0a7caee..25f96c8 100644
> >> --- a/xen/include/asm-arm/bitops.h
> >> +++ b/xen/include/asm-arm/bitops.h
> >> @@ -18,13 +18,14 @@
> >>  #define __set_bit(n,p)            set_bit(n,p)
> >>  #define __clear_bit(n,p)          clear_bit(n,p)
> >>  
> >> +#define BITS_PER_WORD           32
> 
> Can you define BITS_PER_WORD in asm-arm/config.h rather than here?

For better or worse BITS_PER_BYTE is already defined in bitops.h and
since I've already run the majority of my pre-push commit checks on a
branch containing this fix (along with some other bits and bobs) I'm not
inclined to restart that process just for this change.

Ian.
Julien Grall May 12, 2014, 1:44 p.m. UTC | #7
On 05/12/2014 01:18 PM, Ian Campbell wrote:
> On Mon, 2014-05-12 at 13:15 +0100, Julien Grall wrote:
>> Hi Ian,
>>
>> On 05/09/2014 05:19 PM, Ian Campbell wrote:
>>> (Just adding the other ARM guys...)
>>>
>>> On Fri, 2014-05-09 at 14:24 +0100, Ian Campbell wrote:
>>>> On Tue, 2014-04-29 at 09:58 +0100, Ian Campbell wrote:
>>>>> On Tue, 2014-04-29 at 08:38 +0100, Vladimir Murzin wrote:
>>>>>> On Mon, Apr 28, 2014 at 11:43 AM, Ian Campbell <Ian.Campbell@citrix.com> wrote:
>>>>>>> But I also wanted confirmation that the problematic instruction was
>>>>>>> generated by gcc and not by some handcoded asm somewhere which we hadn't
>>>>>>> properly fixed.
>>>>>
>>>>>> I believe it comes form test_bit (xen/include/asm-arm/bitops.h).
>>>>>
>>>>> Ah, then I think this code needs fixing too. Probably switching to
>>>>> unsigned int * throughout would work, what do you think?
>>>>
>>>> I finally managed to upgrade to a new enough kernel to trigger this.
>>>>
>>>> This Works For Me(tm), along with the Linux patch "xen/events/fifo:
>>>> correctly align bitops" which is queued for 3.15 Linus (but not sent
>>>> yet?)
>>>>
>>>> 8<-------------------
>>>>
>>>> From aa6afe6520ea22241fb0ce430ef315c49a73867f Mon Sep 17 00:00:00 2001
>>>> From: Ian Campbell <ian.campbell@citrix.com>
>>>> Date: Thu, 8 May 2014 16:13:55 +0100
>>>> Subject: [PATCH] xen: arm: bitops take unsigned int
>>>>
>>>> Xen bitmaps can be 4 rather than 8 byte aligned, so use the appropriate type.
>>>> Otherwise the compiler can generate unaligned 8 byte accesses and cause traps.
>>>>
>>>> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
>>>> ---
>>>>  xen/include/asm-arm/bitops.h |   37 +++++++++++++++++++------------------
>>>>  1 file changed, 19 insertions(+), 18 deletions(-)
>>>>
>>>> diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
>>>> index 0a7caee..25f96c8 100644
>>>> --- a/xen/include/asm-arm/bitops.h
>>>> +++ b/xen/include/asm-arm/bitops.h
>>>> @@ -18,13 +18,14 @@
>>>>  #define __set_bit(n,p)            set_bit(n,p)
>>>>  #define __clear_bit(n,p)          clear_bit(n,p)
>>>>  
>>>> +#define BITS_PER_WORD           32
>>
>> Can you define BITS_PER_WORD in asm-arm/config.h rather than here?
> 
> For better or worse BITS_PER_BYTE is already defined in bitops.h and
> since I've already run the majority of my pre-push commit checks on a
> branch containing this fix (along with some other bits and bobs) I'm not
> inclined to restart that process just for this change.

No problem. I guess you plan to backport this patch for Xen 4.4?

Regards,
Ian Campbell May 12, 2014, 2:08 p.m. UTC | #8
On Mon, 2014-05-12 at 14:44 +0100, Julien Grall wrote:
> I guess you plan to backport this patch for Xen 4.4?

Yes.
Ian.
diff mbox

Patch

diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
index 0a7caee..25f96c8 100644
--- a/xen/include/asm-arm/bitops.h
+++ b/xen/include/asm-arm/bitops.h
@@ -18,13 +18,14 @@ 
 #define __set_bit(n,p)            set_bit(n,p)
 #define __clear_bit(n,p)          clear_bit(n,p)
 
+#define BITS_PER_WORD           32
 #define BIT(nr)                 (1UL << (nr))
-#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr)            ((nr) / BITS_PER_LONG)
+#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_WORD))
+#define BIT_WORD(nr)            ((nr) / BITS_PER_WORD)
 #define BITS_PER_BYTE           8
 
-#define ADDR (*(volatile long *) addr)
-#define CONST_ADDR (*(const volatile long *) addr)
+#define ADDR (*(volatile int *) addr)
+#define CONST_ADDR (*(const volatile int *) addr)
 
 #if defined(CONFIG_ARM_32)
 # include <asm/arm32/bitops.h>
@@ -45,10 +46,10 @@ 
  */
 static inline int __test_and_set_bit(int nr, volatile void *addr)
 {
-        unsigned long mask = BIT_MASK(nr);
-        volatile unsigned long *p =
-                ((volatile unsigned long *)addr) + BIT_WORD(nr);
-        unsigned long old = *p;
+        unsigned int mask = BIT_MASK(nr);
+        volatile unsigned int *p =
+                ((volatile unsigned int *)addr) + BIT_WORD(nr);
+        unsigned int old = *p;
 
         *p = old | mask;
         return (old & mask) != 0;
@@ -65,10 +66,10 @@  static inline int __test_and_set_bit(int nr, volatile void *addr)
  */
 static inline int __test_and_clear_bit(int nr, volatile void *addr)
 {
-        unsigned long mask = BIT_MASK(nr);
-        volatile unsigned long *p =
-                ((volatile unsigned long *)addr) + BIT_WORD(nr);
-        unsigned long old = *p;
+        unsigned int mask = BIT_MASK(nr);
+        volatile unsigned int *p =
+                ((volatile unsigned int *)addr) + BIT_WORD(nr);
+        unsigned int old = *p;
 
         *p = old & ~mask;
         return (old & mask) != 0;
@@ -78,10 +79,10 @@  static inline int __test_and_clear_bit(int nr, volatile void *addr)
 static inline int __test_and_change_bit(int nr,
                                             volatile void *addr)
 {
-        unsigned long mask = BIT_MASK(nr);
-        volatile unsigned long *p =
-                ((volatile unsigned long *)addr) + BIT_WORD(nr);
-        unsigned long old = *p;
+        unsigned int mask = BIT_MASK(nr);
+        volatile unsigned int *p =
+                ((volatile unsigned int *)addr) + BIT_WORD(nr);
+        unsigned int old = *p;
 
         *p = old ^ mask;
         return (old & mask) != 0;
@@ -94,8 +95,8 @@  static inline int __test_and_change_bit(int nr,
  */
 static inline int test_bit(int nr, const volatile void *addr)
 {
-        const volatile unsigned long *p = (const volatile unsigned long *)addr;
-        return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+        const volatile unsigned int *p = (const volatile unsigned int *)addr;
+        return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_WORD-1)));
 }
 
 static inline int constant_fls(int x)