@@ -180,10 +180,7 @@ ENTRY(vfp_support_entry)
@ always subtract 4 from the following
@ instruction address.
-local_bh_enable_and_ret:
- adr r0, .
- mov r1, #SOFTIRQ_DISABLE_OFFSET
- b __local_bh_enable_ip @ tail call
+ b vfp_exit @ tail call
look_for_VFP_exceptions:
@ Check for synchronous or asynchronous exception
@@ -206,7 +203,7 @@ ENTRY(vfp_support_entry)
@ not recognised by VFP
DBGSTR "not VFP"
- b local_bh_enable_and_ret
+ b vfp_exit @ tail call
process_exception:
DBGSTR "bounce"
@@ -267,7 +267,7 @@ static void vfp_panic(char *reason, u32 inst)
/*
* Process bitmask of exception conditions.
*/
-static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
+static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
{
int si_code = 0;
@@ -275,8 +275,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
if (exceptions == VFP_EXCEPTION_ERROR) {
vfp_panic("unhandled bounce", inst);
- vfp_raise_sigfpe(FPE_FLTINV, regs);
- return;
+ return FPE_FLTINV;
}
/*
@@ -304,8 +303,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
- if (si_code)
- vfp_raise_sigfpe(si_code, regs);
+ return si_code;
}
/*
@@ -350,6 +348,8 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
u32 fpscr, orig_fpscr, fpsid, exceptions;
+ int si_code2 = 0;
+ int si_code = 0;
pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
@@ -397,7 +397,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* unallocated VFP instruction but with FPSCR.IXE set and not
* on VFP subarch 1.
*/
- vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
+ si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
goto exit;
}
@@ -422,7 +422,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
*/
exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
if (exceptions)
- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
/*
* If there isn't a second FP instruction, exit now. Note that
@@ -441,9 +441,14 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
emulate:
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
if (exceptions)
- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
+
exit:
- local_bh_enable();
+ vfp_unlock();
+ if (si_code2)
+ vfp_raise_sigfpe(si_code2, regs);
+ if (si_code)
+ vfp_raise_sigfpe(si_code, regs);
}
static void vfp_enable(void *unused)
@@ -684,10 +689,15 @@ asmlinkage void vfp_entry(u32 trigger, struct thread_info *ti, u32 resume_pc,
if (unlikely(!have_vfp))
return;
- local_bh_disable();
+ vfp_lock();
vfp_support_entry(trigger, ti, resume_pc, resume_return_address);
}
+asmlinkage void vfp_exit(void)
+{
+ vfp_unlock();
+}
+
#ifdef CONFIG_KERNEL_MODE_NEON
static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)
vfp_entry() is invoked from exception handler and is fully preemptible. It uses local_bh_disable() to remain uninterrupted while checking the VFP state. This is not working on PREEMPT_RT because local_bh_disable() synchronizes the relevant section but the context remains fully preemptible. Use vfp_lock() for uninterrupted access. VFP_bounce() is invoked from within vfp_entry() and may send a signal. Sending a signal uses spinlock_t which becomes a sleeping lock on PREEMPT_RT and must not be acquired within a preempt-disabled section. Move the vfp_raise_sigfpe() block outside of the preempt-disabled section. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- arch/arm/vfp/vfphw.S | 7 ++----- arch/arm/vfp/vfpmodule.c | 30 ++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 15 deletions(-)