diff mbox

ARM: signal: fix armv7-m build issue in sigreturn_codes.S

Message ID 1384157427-31613-2-git-send-email-victor.kamensky@linaro.org
State New
Headers show

Commit Message

vkamensky Nov. 11, 2013, 8:10 a.m. UTC
In case of armv7-m architecture arm instructions are not allowed.
For this architecture CONFIG_CPU_THUMBONLY is set. Use this macro
to emit conditionally arm instructions or nops in thumb mode.

Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
---
 arch/arm/kernel/sigreturn_codes.S | 25 ++++++++++++++++++-------
 1 file changed, 18 insertions(+), 7 deletions(-)

Comments

Will Deacon Nov. 11, 2013, 10:51 a.m. UTC | #1
On Mon, Nov 11, 2013 at 08:10:27AM +0000, Victor Kamensky wrote:
> In case of armv7-m architecture arm instructions are not allowed.
> For this architecture CONFIG_CPU_THUMBONLY is set. Use this macro
> to emit conditionally arm instructions or nops in thumb mode.
> 
> Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
> ---
>  arch/arm/kernel/sigreturn_codes.S | 25 ++++++++++++++++++-------
>  1 file changed, 18 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
> index 3c5d0f2..899fb86 100644
> --- a/arch/arm/kernel/sigreturn_codes.S
> +++ b/arch/arm/kernel/sigreturn_codes.S
> @@ -41,17 +41,29 @@
>  	.arch armv4t
>  #endif
>  
> +/*
> + * In CPU_THUMBONLY kernel case arm opcodes are not allowed
> + */
> +#ifndef CONFIG_CPU_THUMBONLY

Is this THUMBONLY stuff actually destined for mainline?

> +#define ARM_INSTR(code...) .arm ; \
> +			   code
> +#else
> +#define ARM_INSTR(code...) .thumb ; \
> +			   nop ;    \
> +			   nop ;
> +#endif

Why can't you solve this with the ARM(...) and THUMB(...) macros, like we do
in places like head.S?

Will
vkamensky Nov. 12, 2013, 6:51 a.m. UTC | #2
On 11 November 2013 02:51, Will Deacon <will.deacon@arm.com> wrote:
> On Mon, Nov 11, 2013 at 08:10:27AM +0000, Victor Kamensky wrote:
>> In case of armv7-m architecture arm instructions are not allowed.
>> For this architecture CONFIG_CPU_THUMBONLY is set. Use this macro
>> to emit conditionally arm instructions or nops in thumb mode.
>>
>> Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
>> ---
>>  arch/arm/kernel/sigreturn_codes.S | 25 ++++++++++++++++++-------
>>  1 file changed, 18 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
>> index 3c5d0f2..899fb86 100644
>> --- a/arch/arm/kernel/sigreturn_codes.S
>> +++ b/arch/arm/kernel/sigreturn_codes.S
>> @@ -41,17 +41,29 @@
>>       .arch armv4t
>>  #endif
>>
>> +/*
>> + * In CPU_THUMBONLY kernel case arm opcodes are not allowed
>> + */
>> +#ifndef CONFIG_CPU_THUMBONLY
>
> Is this THUMBONLY stuff actually destined for mainline?
>
>> +#define ARM_INSTR(code...) .arm ; \
>> +                        code
>> +#else
>> +#define ARM_INSTR(code...) .thumb ; \
>> +                        nop ;    \
>> +                        nop ;
>> +#endif
>
> Why can't you solve this with the ARM(...) and THUMB(...) macros, like we do
> in places like head.S?

If we pursue this route (otherwise see Dave's suggestion on [1]),
sigreturn_codes is array of instructions snipets. It
is indexed by signal.c. Layout is very important here.
I don't see how ARM and THUMB can help us to achieve this.
The conditionally include instruction or empty. If I use them, it
will not help because it will change layout of sigreturn_codes.
Local macro I introduced uses .arm instruction
if not THUMBONLY and put two thumb instructions
to fill the space so sigreturn_codes keeps the same
layout.

Thanks,
Victor

[1] http://lists.infradead.org/pipermail/linux-arm-kernel/2013-November/210631.html

> Will
Dave Martin Nov. 18, 2013, 1:39 p.m. UTC | #3
On Mon, Nov 11, 2013 at 12:10:27AM -0800, Victor Kamensky wrote:
> In case of armv7-m architecture arm instructions are not allowed.
> For this architecture CONFIG_CPU_THUMBONLY is set. Use this macro
> to emit conditionally arm instructions or nops in thumb mode.
> 
> Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
> ---
>  arch/arm/kernel/sigreturn_codes.S | 25 ++++++++++++++++++-------
>  1 file changed, 18 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
> index 3c5d0f2..899fb86 100644
> --- a/arch/arm/kernel/sigreturn_codes.S
> +++ b/arch/arm/kernel/sigreturn_codes.S
> @@ -41,17 +41,29 @@
>  	.arch armv4t
>  #endif
>  
> +/*
> + * In CPU_THUMBONLY kernel case arm opcodes are not allowed
> + */
> +#ifndef CONFIG_CPU_THUMBONLY
> +#define ARM_INSTR(code...) .arm ; \
> +			   code
> +#else
> +#define ARM_INSTR(code...) .thumb ; \
> +			   nop ;    \
> +			   nop ;
> +#endif
> +

Maybe use .org to force the layout, instead of nop-padding,
and don't emit the ARM instructions at all in the THUMBONLY
case.

So, this becomes something like:

sigreturn_codes:

#ifndef CONFIG_CPU_THUMBONLY
	.arm
	mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
	swi	#(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
#endif
	.org	sigreturn_codes + 8 + 12 * 0
	.thumb
        movs	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
        swi	#0

#ifndef CONFIG_CPU_THUMBONLY
	.arm
	mov	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
	swi	#(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
#endif
	.org	sigreturn_codes + 8 + 12 * 1
	.thumb
	movs	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
	swi	#0

...etc.

This will just leave gaps in the right places without needing
to worry about what to fill them with (which shouldn't matter).

Ideally the ifdefs can be hidden with macros to make things
look a bit less ugly.  You could keep ARM_INSTR() unmodified,
and define separate macros that spit out the .org and related
stuff, like:

.macro arm_slot n
		.org	sigreturn_codes + 12 * (\n)
 ARM_INSTR(	.arm	)
.endm

.macro thumb_slot n
		.org	sigreturn_codes + 12 * (\n) + 8
		.thumb
.endm


Then you get

sigreturn_codes:

arm_slot 0
 ARM_INSTR(	mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)	)
 ARM_INSTR(	mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)	)
thumb_slot 0
	        movs	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
	        swi	#0

arm_slot 1
 ARM_INSTR(	mov	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)	)
 ARM_INSTR(	swi	#(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)	)
thumb_slot 1
		movs	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
		swi	#0

Still not ideal, but it might work.

Cheers
---Dave

>  	.section .rodata
>  	.global sigreturn_codes
>  	.type	sigreturn_codes, #object
>  
> -	.arm
> +	.align
>  
>  sigreturn_codes:
>  
>  	/* ARM sigreturn syscall code snippet */
> -	mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
> -	swi	#(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
> +ARM_INSTR(mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE))
> +ARM_INSTR(swi	#(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
>  
>  	/* Thumb sigreturn syscall code snippet */
>  	.thumb
> @@ -59,9 +71,8 @@ sigreturn_codes:
>  	swi	#0
>  
>  	/* ARM sigreturn_rt syscall code snippet */
> -	.arm
> -	mov	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
> -	swi	#(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
> +ARM_INSTR(mov	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE))
> +ARM_INSTR(swi	#(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
>  
>  	/* Thumb sigreturn_rt syscall code snippet */
>  	.thumb
> @@ -74,7 +85,7 @@ sigreturn_codes:
>  	 * it is thumb case or not, so we need additional
>  	 * word after real last entry.
>  	 */
> -	.arm
> +	.align
>  	.space	4
>  
>  	.size	sigreturn_codes, . - sigreturn_codes
> -- 
> 1.8.1.4
> 
> 
> _______________________________________________
> linaro-kernel mailing list
> linaro-kernel@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/linaro-kernel
vkamensky Nov. 19, 2013, 6:55 a.m. UTC | #4
On 18 November 2013 05:39, Dave Martin <Dave.Martin@arm.com> wrote:
> On Mon, Nov 11, 2013 at 12:10:27AM -0800, Victor Kamensky wrote:
>> In case of armv7-m architecture arm instructions are not allowed.
>> For this architecture CONFIG_CPU_THUMBONLY is set. Use this macro
>> to emit conditionally arm instructions or nops in thumb mode.
>>
>> Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
>> ---
>>  arch/arm/kernel/sigreturn_codes.S | 25 ++++++++++++++++++-------
>>  1 file changed, 18 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
>> index 3c5d0f2..899fb86 100644
>> --- a/arch/arm/kernel/sigreturn_codes.S
>> +++ b/arch/arm/kernel/sigreturn_codes.S
>> @@ -41,17 +41,29 @@
>>       .arch armv4t
>>  #endif
>>
>> +/*
>> + * In CPU_THUMBONLY kernel case arm opcodes are not allowed
>> + */
>> +#ifndef CONFIG_CPU_THUMBONLY
>> +#define ARM_INSTR(code...) .arm ; \
>> +                        code
>> +#else
>> +#define ARM_INSTR(code...) .thumb ; \
>> +                        nop ;    \
>> +                        nop ;
>> +#endif
>> +
>
> Maybe use .org to force the layout, instead of nop-padding,
> and don't emit the ARM instructions at all in the THUMBONLY
> case.
>
> So, this becomes something like:
>
> sigreturn_codes:
>
> #ifndef CONFIG_CPU_THUMBONLY
>         .arm
>         mov     r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
>         swi     #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
> #endif
>         .org    sigreturn_codes + 8 + 12 * 0
>         .thumb
>         movs    r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
>         swi     #0
>
> #ifndef CONFIG_CPU_THUMBONLY
>         .arm
>         mov     r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
>         swi     #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
> #endif
>         .org    sigreturn_codes + 8 + 12 * 1
>         .thumb
>         movs    r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
>         swi     #0
>
> ...etc.
>
> This will just leave gaps in the right places without needing
> to worry about what to fill them with (which shouldn't matter).
>
> Ideally the ifdefs can be hidden with macros to make things
> look a bit less ugly.  You could keep ARM_INSTR() unmodified,
> and define separate macros that spit out the .org and related
> stuff, like:
>
> .macro arm_slot n
>                 .org    sigreturn_codes + 12 * (\n)
>  ARM_INSTR(     .arm    )
> .endm
>
> .macro thumb_slot n
>                 .org    sigreturn_codes + 12 * (\n) + 8
>                 .thumb
> .endm
>
>
> Then you get
>
> sigreturn_codes:
>
> arm_slot 0
>  ARM_INSTR(     mov     r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)       )
>  ARM_INSTR(     mov     r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)       )
> thumb_slot 0
>                 movs    r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
>                 swi     #0
>
> arm_slot 1
>  ARM_INSTR(     mov     r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)    )
>  ARM_INSTR(     swi     #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)   )
> thumb_slot 1
>                 movs    r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
>                 swi     #0
>
> Still not ideal, but it might work.

Thanks, Dave! I've tried above and it works. I've sent version 3
of patch as [1]. Please take a look at it.

Thanks,
Victor

[1] http://lists.infradead.org/pipermail/linux-arm-kernel/2013-November/212307.html

> Cheers
> ---Dave
>
>>       .section .rodata
>>       .global sigreturn_codes
>>       .type   sigreturn_codes, #object
>>
>> -     .arm
>> +     .align
>>
>>  sigreturn_codes:
>>
>>       /* ARM sigreturn syscall code snippet */
>> -     mov     r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
>> -     swi     #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
>> +ARM_INSTR(mov        r7, #(__NR_sigreturn - __NR_SYSCALL_BASE))
>> +ARM_INSTR(swi        #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
>>
>>       /* Thumb sigreturn syscall code snippet */
>>       .thumb
>> @@ -59,9 +71,8 @@ sigreturn_codes:
>>       swi     #0
>>
>>       /* ARM sigreturn_rt syscall code snippet */
>> -     .arm
>> -     mov     r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
>> -     swi     #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
>> +ARM_INSTR(mov        r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE))
>> +ARM_INSTR(swi        #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
>>
>>       /* Thumb sigreturn_rt syscall code snippet */
>>       .thumb
>> @@ -74,7 +85,7 @@ sigreturn_codes:
>>        * it is thumb case or not, so we need additional
>>        * word after real last entry.
>>        */
>> -     .arm
>> +     .align
>>       .space  4
>>
>>       .size   sigreturn_codes, . - sigreturn_codes
>> --
>> 1.8.1.4
>>
>>
>> _______________________________________________
>> linaro-kernel mailing list
>> linaro-kernel@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/linaro-kernel
>
diff mbox

Patch

diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
index 3c5d0f2..899fb86 100644
--- a/arch/arm/kernel/sigreturn_codes.S
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -41,17 +41,29 @@ 
 	.arch armv4t
 #endif
 
+/*
+ * In CPU_THUMBONLY kernel case arm opcodes are not allowed
+ */
+#ifndef CONFIG_CPU_THUMBONLY
+#define ARM_INSTR(code...) .arm ; \
+			   code
+#else
+#define ARM_INSTR(code...) .thumb ; \
+			   nop ;    \
+			   nop ;
+#endif
+
 	.section .rodata
 	.global sigreturn_codes
 	.type	sigreturn_codes, #object
 
-	.arm
+	.align
 
 sigreturn_codes:
 
 	/* ARM sigreturn syscall code snippet */
-	mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
-	swi	#(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+ARM_INSTR(mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE))
+ARM_INSTR(swi	#(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
 
 	/* Thumb sigreturn syscall code snippet */
 	.thumb
@@ -59,9 +71,8 @@  sigreturn_codes:
 	swi	#0
 
 	/* ARM sigreturn_rt syscall code snippet */
-	.arm
-	mov	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
-	swi	#(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+ARM_INSTR(mov	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE))
+ARM_INSTR(swi	#(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
 
 	/* Thumb sigreturn_rt syscall code snippet */
 	.thumb
@@ -74,7 +85,7 @@  sigreturn_codes:
 	 * it is thumb case or not, so we need additional
 	 * word after real last entry.
 	 */
-	.arm
+	.align
 	.space	4
 
 	.size	sigreturn_codes, . - sigreturn_codes