diff mbox

arm64: LLVMLinux: Fix inline arm64 assembly for use with clang

Message ID 1409959460-15989-1-git-send-email-behanw@converseincode.com
State New
Headers show

Commit Message

Behan Webster Sept. 5, 2014, 11:24 p.m. UTC
From: Mark Charlebois <charlebm@gmail.com>

Fix variable types for 64-bit inline assembly.

This patch now works with both gcc and clang.

Signed-off-by: Mark Charlebois <charlebm@gmail.com>
Signed-off-by: Behan Webster <behanw@converseincode.com>
---
 arch/arm64/include/asm/arch_timer.h | 26 +++++++++++++++-----------
 arch/arm64/include/asm/uaccess.h    |  2 +-
 arch/arm64/kernel/debug-monitors.c  |  8 ++++----
 arch/arm64/kernel/perf_event.c      | 34 +++++++++++++++++-----------------
 arch/arm64/mm/mmu.c                 |  2 +-
 5 files changed, 38 insertions(+), 34 deletions(-)

Comments

Olof Johansson Sept. 8, 2014, 9:30 a.m. UTC | #1
On Fri, Sep 05, 2014 at 04:24:20PM -0700, behanw@converseincode.com wrote:
> From: Mark Charlebois <charlebm@gmail.com>
> 
> Fix variable types for 64-bit inline assembly.
> 
> This patch now works with both gcc and clang.
> 
> Signed-off-by: Mark Charlebois <charlebm@gmail.com>
> Signed-off-by: Behan Webster <behanw@converseincode.com>
> ---
>  arch/arm64/include/asm/arch_timer.h | 26 +++++++++++++++-----------
>  arch/arm64/include/asm/uaccess.h    |  2 +-
>  arch/arm64/kernel/debug-monitors.c  |  8 ++++----
>  arch/arm64/kernel/perf_event.c      | 34 +++++++++++++++++-----------------
>  arch/arm64/mm/mmu.c                 |  2 +-
>  5 files changed, 38 insertions(+), 34 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
> index 9400596..c1f87e0 100644
> --- a/arch/arm64/include/asm/arch_timer.h
> +++ b/arch/arm64/include/asm/arch_timer.h
> @@ -37,19 +37,23 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
>  	if (access == ARCH_TIMER_PHYS_ACCESS) {
>  		switch (reg) {
>  		case ARCH_TIMER_REG_CTRL:
> -			asm volatile("msr cntp_ctl_el0,  %0" : : "r" (val));
> +			asm volatile("msr cntp_ctl_el0,  %0"
> +				: : "r" ((u64)val));

Ick. Care to elaborate in the patch description why this is needed with
LLVM? It's really messy and very annoying having to cast register values
every time they're passed in, instead of the compiler handling it for you.

Is there a way to catch this with GCC? If not, I expect you to get broken
all the time on this by people who don't notice.



-Olof
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Will Deacon Sept. 8, 2014, 10:53 a.m. UTC | #2
On Sat, Sep 06, 2014 at 12:24:20AM +0100, behanw@converseincode.com wrote:
> From: Mark Charlebois <charlebm@gmail.com>
> 
> Fix variable types for 64-bit inline assembly.
> 
> This patch now works with both gcc and clang.

Really? This looks like something the clang needs to do better on, as I
really don't see people adding these casts to future code. They're ugly and
redundant (or GCC).

This hunk:

> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index c555672..6894ef3 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -94,7 +94,7 @@ static int __init early_cachepolicy(char *p)
>  	 */
>  	asm volatile(
>  	"	mrs	%0, mair_el1\n"
> -	"	bfi	%0, %1, #%2, #8\n"
> +	"	bfi	%0, %1, %2, #8\n"
>  	"	msr	mair_el1, %0\n"
>  	"	isb\n"
>  	: "=&r" (tmp)

also looks fishy. Does gas accept that without the '#' prefix?

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Mark Charlebois Sept. 8, 2014, 6:35 p.m. UTC | #3
When I compile

int main()
{
        u64 foo, tmp;

        // This works for both clang and gcc
        asm volatile(
        "       mrs     %0, mair_el1\n"
        "       bfi     %0, %1, %2, #8\n"
        "       msr     mair_el1, %0\n"
        "       isb\n"
        : "=&r" (tmp)
        : "r" (foo), "i" (MT_NORMAL * 8));
}

with clang I get:

00000000004004f0 <main>:
  4004f0: d538a208 mrs x8, mair_el1
  4004f4: b3601d08 bfi x8, x8, #32, #8
  4004f8: d518a208 msr mair_el1, x8
  4004fc: d5033fdf isb
  400500: 2a1f03e0 mov w0, wzr
  400504: d65f03c0 ret

When I compile it with GCC I get:

0000000000400510 <main>:
  400510: d10043ff sub sp, sp, #0x10
  400514: f94003e1 ldr x1, [sp]
  400518: d538a200 mrs x0, mair_el1
  40051c: b3601c20 bfi x0, x1, #32, #8
  400520: d518a200 msr mair_el1, x0
  400524: d5033fdf isb
  400528: f90007e0 str x0, [sp,#8]
  40052c: 910043ff add sp, sp, #0x10
  400530: d65f03c0 ret

When I compile

int main()
{
        u64 foo, tmp;

       // This fails for clang but not gcc
        asm volatile(
        "       mrs     %0, mair_el1\n"
        "       bfi     %0, %1, #%2, #8\n"
        "       msr     mair_el1, %0\n"
        "       isb\n"
        : "=&r" (tmp)
        : "r" (foo), "i" (MT_NORMAL * 8));
}

Clang fails and GCC generates:

00000000004004f0 <main>:
  4004f0: d538a208 mrs x8, mair_el1
  4004f4: b3601d08 bfi x8, x8, #32, #8
  4004f8: d518a208 msr mair_el1, x8
  4004fc: d5033fdf isb
  400500: 2a1f03e0 mov w0, wzr
  400504: d65f03c0 ret

On Mon, Sep 8, 2014 at 3:53 AM, Will Deacon <will.deacon@arm.com> wrote:
> On Sat, Sep 06, 2014 at 12:24:20AM +0100, behanw@converseincode.com wrote:
>> From: Mark Charlebois <charlebm@gmail.com>
>>
>> Fix variable types for 64-bit inline assembly.
>>
>> This patch now works with both gcc and clang.
>
> Really? This looks like something the clang needs to do better on, as I
> really don't see people adding these casts to future code. They're ugly and
> redundant (or GCC).
>
> This hunk:
>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index c555672..6894ef3 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -94,7 +94,7 @@ static int __init early_cachepolicy(char *p)
>>        */
>>       asm volatile(
>>       "       mrs     %0, mair_el1\n"
>> -     "       bfi     %0, %1, #%2, #8\n"
>> +     "       bfi     %0, %1, %2, #8\n"
>>       "       msr     mair_el1, %0\n"
>>       "       isb\n"
>>       : "=&r" (tmp)
>
> also looks fishy. Does gas accept that without the '#' prefix?
>
> Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Will Deacon Sept. 9, 2014, 10:15 a.m. UTC | #4
On Mon, Sep 08, 2014 at 07:35:47PM +0100, Mark Charlebois wrote:
> When I compile
> 
> int main()
> {
>         u64 foo, tmp;
> 
>        // This fails for clang but not gcc
>         asm volatile(
>         "       mrs     %0, mair_el1\n"
>         "       bfi     %0, %1, #%2, #8\n"
>         "       msr     mair_el1, %0\n"
>         "       isb\n"
>         : "=&r" (tmp)
>         : "r" (foo), "i" (MT_NORMAL * 8));
> }
> 
> Clang fails and GCC generates:
> 
> 00000000004004f0 <main>:
>   4004f0: d538a208 mrs x8, mair_el1
>   4004f4: b3601d08 bfi x8, x8, #32, #8
>   4004f8: d518a208 msr mair_el1, x8
>   4004fc: d5033fdf isb
>   400500: 2a1f03e0 mov w0, wzr
>   400504: d65f03c0 ret

Ok, so we can just drop the '#' prefix as it probably shouldn't be there
anyway. Can you send a patch making just that change, please?

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Will Deacon Sept. 10, 2014, 5:38 p.m. UTC | #5
On Mon, Sep 08, 2014 at 10:30:51AM +0100, Olof Johansson wrote:
> On Fri, Sep 05, 2014 at 04:24:20PM -0700, behanw@converseincode.com wrote:
> > From: Mark Charlebois <charlebm@gmail.com>
> > 
> > Fix variable types for 64-bit inline assembly.
> > 
> > This patch now works with both gcc and clang.
> > 
> > Signed-off-by: Mark Charlebois <charlebm@gmail.com>
> > Signed-off-by: Behan Webster <behanw@converseincode.com>
> > ---
> >  arch/arm64/include/asm/arch_timer.h | 26 +++++++++++++++-----------
> >  arch/arm64/include/asm/uaccess.h    |  2 +-
> >  arch/arm64/kernel/debug-monitors.c  |  8 ++++----
> >  arch/arm64/kernel/perf_event.c      | 34 +++++++++++++++++-----------------
> >  arch/arm64/mm/mmu.c                 |  2 +-
> >  5 files changed, 38 insertions(+), 34 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
> > index 9400596..c1f87e0 100644
> > --- a/arch/arm64/include/asm/arch_timer.h
> > +++ b/arch/arm64/include/asm/arch_timer.h
> > @@ -37,19 +37,23 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
> >  	if (access == ARCH_TIMER_PHYS_ACCESS) {
> >  		switch (reg) {
> >  		case ARCH_TIMER_REG_CTRL:
> > -			asm volatile("msr cntp_ctl_el0,  %0" : : "r" (val));
> > +			asm volatile("msr cntp_ctl_el0,  %0"
> > +				: : "r" ((u64)val));
> 
> Ick. Care to elaborate in the patch description why this is needed with
> LLVM? It's really messy and very annoying having to cast register values
> every time they're passed in, instead of the compiler handling it for you.
> 
> Is there a way to catch this with GCC? If not, I expect you to get broken
> all the time on this by people who don't notice.

Question to the clang people (Clangers?): what happens if the %0 above is
rewritten as %x0 and the cast on val is dropped? I could stomach a change
adding that, but it's still likely to regress without regular build testing.

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Ard Biesheuvel Sept. 10, 2014, 5:49 p.m. UTC | #6
On 10 September 2014 19:38, Will Deacon <will.deacon@arm.com> wrote:
> On Mon, Sep 08, 2014 at 10:30:51AM +0100, Olof Johansson wrote:
>> On Fri, Sep 05, 2014 at 04:24:20PM -0700, behanw@converseincode.com wrote:
>> > From: Mark Charlebois <charlebm@gmail.com>
>> >
>> > Fix variable types for 64-bit inline assembly.
>> >
>> > This patch now works with both gcc and clang.
>> >
>> > Signed-off-by: Mark Charlebois <charlebm@gmail.com>
>> > Signed-off-by: Behan Webster <behanw@converseincode.com>
>> > ---
>> >  arch/arm64/include/asm/arch_timer.h | 26 +++++++++++++++-----------
>> >  arch/arm64/include/asm/uaccess.h    |  2 +-
>> >  arch/arm64/kernel/debug-monitors.c  |  8 ++++----
>> >  arch/arm64/kernel/perf_event.c      | 34 +++++++++++++++++-----------------
>> >  arch/arm64/mm/mmu.c                 |  2 +-
>> >  5 files changed, 38 insertions(+), 34 deletions(-)
>> >
>> > diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
>> > index 9400596..c1f87e0 100644
>> > --- a/arch/arm64/include/asm/arch_timer.h
>> > +++ b/arch/arm64/include/asm/arch_timer.h
>> > @@ -37,19 +37,23 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
>> >     if (access == ARCH_TIMER_PHYS_ACCESS) {
>> >             switch (reg) {
>> >             case ARCH_TIMER_REG_CTRL:
>> > -                   asm volatile("msr cntp_ctl_el0,  %0" : : "r" (val));
>> > +                   asm volatile("msr cntp_ctl_el0,  %0"
>> > +                           : : "r" ((u64)val));
>>
>> Ick. Care to elaborate in the patch description why this is needed with
>> LLVM? It's really messy and very annoying having to cast register values
>> every time they're passed in, instead of the compiler handling it for you.
>>
>> Is there a way to catch this with GCC? If not, I expect you to get broken
>> all the time on this by people who don't notice.
>
> Question to the clang people (Clangers?): what happens if the %0 above is
> rewritten as %x0 and the cast on val is dropped? I could stomach a change
> adding that, but it's still likely to regress without regular build testing.
>

I did a quick test with Clang, and indeed, it infers the type of
register from the size of the operand, but it also supports explicit
%x0 and %w0 casts.
So in this particular case, we could work around it in this way.

However, I think this uncovers a much more serious issue: while we
catch the problem here because msr simply does not support 32-bit
operands, most other instructions do, and we have no idea how the
Clang generated code deviates from what GCC produces. Or am I being
paranoid here?
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
diff mbox

Patch

diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 9400596..c1f87e0 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -37,19 +37,23 @@  void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
 	if (access == ARCH_TIMER_PHYS_ACCESS) {
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			asm volatile("msr cntp_ctl_el0,  %0" : : "r" (val));
+			asm volatile("msr cntp_ctl_el0,  %0"
+				: : "r" ((u64)val));
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
+			asm volatile("msr cntp_tval_el0, %0"
+				: : "r" ((u64)val));
 			break;
 		}
 	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			asm volatile("msr cntv_ctl_el0,  %0" : : "r" (val));
+			asm volatile("msr cntv_ctl_el0,  %0"
+				: : "r" ((u64)val));
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
+			asm volatile("msr cntv_tval_el0, %0"
+				: : "r" ((u64)val));
 			break;
 		}
 	}
@@ -60,7 +64,7 @@  void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
 static __always_inline
 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
 {
-	u32 val;
+	u64 val;
 
 	if (access == ARCH_TIMER_PHYS_ACCESS) {
 		switch (reg) {
@@ -82,26 +86,26 @@  u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
 		}
 	}
 
-	return val;
+	return (u32)val;
 }
 
 static inline u32 arch_timer_get_cntfrq(void)
 {
-	u32 val;
+	u64 val;
 	asm volatile("mrs %0,   cntfrq_el0" : "=r" (val));
-	return val;
+	return (u32)val;
 }
 
 static inline u32 arch_timer_get_cntkctl(void)
 {
-	u32 cntkctl;
+	u64 cntkctl;
 	asm volatile("mrs	%0, cntkctl_el1" : "=r" (cntkctl));
-	return cntkctl;
+	return (u32)cntkctl;
 }
 
 static inline void arch_timer_set_cntkctl(u32 cntkctl)
 {
-	asm volatile("msr	cntkctl_el1, %0" : : "r" (cntkctl));
+	asm volatile("msr	cntkctl_el1, %0" : : "r" ((u64)cntkctl));
 }
 
 static inline void arch_counter_set_user_access(void)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 3bf8f4e..104719b 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -93,7 +93,7 @@  static inline void set_fs(mm_segment_t fs)
 	__chk_user_ptr(addr);						\
 	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\
 		: "=&r" (flag), "=&r" (roksum)				\
-		: "1" (addr), "Ir" (size),				\
+		: "1" (addr), "r" ((u64)size),				\
 		  "r" (current_thread_info()->addr_limit)		\
 		: "cc");						\
 	flag;								\
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index b056369..695a18f 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -43,15 +43,15 @@  static void mdscr_write(u32 mdscr)
 {
 	unsigned long flags;
 	local_dbg_save(flags);
-	asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
+	asm volatile("msr mdscr_el1, %0" : : "r" ((u64)mdscr));
 	local_dbg_restore(flags);
 }
 
 static u32 mdscr_read(void)
 {
-	u32 mdscr;
+	u64 mdscr;
 	asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
-	return mdscr;
+	return (u32)mdscr;
 }
 
 /*
@@ -127,7 +127,7 @@  void disable_debug_monitors(enum debug_el el)
  */
 static void clear_os_lock(void *unused)
 {
-	asm volatile("msr oslar_el1, %0" : : "r" (0));
+	asm volatile("msr oslar_el1, %0" : : "r" ((u64)0));
 }
 
 static int os_lock_notify(struct notifier_block *self,
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index baf5afb..f2e399c 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -844,16 +844,16 @@  static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 
 static inline u32 armv8pmu_pmcr_read(void)
 {
-	u32 val;
+	u64 val;
 	asm volatile("mrs %0, pmcr_el0" : "=r" (val));
-	return val;
+	return (u32)val;
 }
 
 static inline void armv8pmu_pmcr_write(u32 val)
 {
 	val &= ARMV8_PMCR_MASK;
 	isb();
-	asm volatile("msr pmcr_el0, %0" :: "r" (val));
+	asm volatile("msr pmcr_el0, %0" : : "r" ((u64)val));
 }
 
 static inline int armv8pmu_has_overflowed(u32 pmovsr)
@@ -893,7 +893,7 @@  static inline int armv8pmu_select_counter(int idx)
 	}
 
 	counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmselr_el0, %0" :: "r" (counter));
+	asm volatile("msr pmselr_el0, %0" : : "r" ((u64)counter));
 	isb();
 
 	return idx;
@@ -901,7 +901,7 @@  static inline int armv8pmu_select_counter(int idx)
 
 static inline u32 armv8pmu_read_counter(int idx)
 {
-	u32 value = 0;
+	u64 value = 0;
 
 	if (!armv8pmu_counter_valid(idx))
 		pr_err("CPU%u reading wrong counter %d\n",
@@ -911,7 +911,7 @@  static inline u32 armv8pmu_read_counter(int idx)
 	else if (armv8pmu_select_counter(idx) == idx)
 		asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
 
-	return value;
+	return (u32)value;
 }
 
 static inline void armv8pmu_write_counter(int idx, u32 value)
@@ -920,16 +920,16 @@  static inline void armv8pmu_write_counter(int idx, u32 value)
 		pr_err("CPU%u writing wrong counter %d\n",
 			smp_processor_id(), idx);
 	else if (idx == ARMV8_IDX_CYCLE_COUNTER)
-		asm volatile("msr pmccntr_el0, %0" :: "r" (value));
+		asm volatile("msr pmccntr_el0, %0" : : "r" ((u64)value));
 	else if (armv8pmu_select_counter(idx) == idx)
-		asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
+		asm volatile("msr pmxevcntr_el0, %0" : : "r" ((u64)value));
 }
 
 static inline void armv8pmu_write_evtype(int idx, u32 val)
 {
 	if (armv8pmu_select_counter(idx) == idx) {
 		val &= ARMV8_EVTYPE_MASK;
-		asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
+		asm volatile("msr pmxevtyper_el0, %0" : : "r" ((u64)val));
 	}
 }
 
@@ -944,7 +944,7 @@  static inline int armv8pmu_enable_counter(int idx)
 	}
 
 	counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
+	asm volatile("msr pmcntenset_el0, %0" : : "r" ((u64)BIT(counter)));
 	return idx;
 }
 
@@ -959,7 +959,7 @@  static inline int armv8pmu_disable_counter(int idx)
 	}
 
 	counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
+	asm volatile("msr pmcntenclr_el0, %0" : : "r" ((u64)BIT(counter)));
 	return idx;
 }
 
@@ -974,7 +974,7 @@  static inline int armv8pmu_enable_intens(int idx)
 	}
 
 	counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
+	asm volatile("msr pmintenset_el1, %0" : : "r" ((u64)BIT(counter)));
 	return idx;
 }
 
@@ -989,17 +989,17 @@  static inline int armv8pmu_disable_intens(int idx)
 	}
 
 	counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
+	asm volatile("msr pmintenclr_el1, %0" : : "r" ((u64)BIT(counter)));
 	isb();
 	/* Clear the overflow flag in case an interrupt is pending. */
-	asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
+	asm volatile("msr pmovsclr_el0, %0" : : "r" ((u64)BIT(counter)));
 	isb();
 	return idx;
 }
 
 static inline u32 armv8pmu_getreset_flags(void)
 {
-	u32 value;
+	u64 value;
 
 	/* Read */
 	asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
@@ -1008,7 +1008,7 @@  static inline u32 armv8pmu_getreset_flags(void)
 	value &= ARMV8_OVSR_MASK;
 	asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
 
-	return value;
+	return (u32)value;
 }
 
 static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
@@ -1217,7 +1217,7 @@  static void armv8pmu_reset(void *info)
 	armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
 
 	/* Disable access from userspace. */
-	asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
+	asm volatile("msr pmuserenr_el0, %0" : : "r" ((u64)0));
 }
 
 static int armv8_pmuv3_map_event(struct perf_event *event)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c555672..6894ef3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -94,7 +94,7 @@  static int __init early_cachepolicy(char *p)
 	 */
 	asm volatile(
 	"	mrs	%0, mair_el1\n"
-	"	bfi	%0, %1, #%2, #8\n"
+	"	bfi	%0, %1, %2, #8\n"
 	"	msr	mair_el1, %0\n"
 	"	isb\n"
 	: "=&r" (tmp)