Message ID | 20190509060246.4031-2-richard.henderson@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | tcg: Add CPUClass::tlb_fill | expand |
On Wed, May 8, 2019 at 11:03 PM Richard Henderson <richard.henderson@linaro.org> wrote: > > This hook will replace the (user-only mode specific) handle_mmu_fault > hook, and the (system mode specific) tlb_fill function. > > The handle_mmu_fault hook was written as if there was a valid > way to recover from an mmu fault, and had 3 possible return states. > In reality, the only valid action is to raise an exception, > return to the main loop, and deliver the SIGSEGV to the guest. > > Note that all of the current implementations of handle_mmu_fault > for guests which support linux-user do in fact only ever return 1, > which is the signal to return to the main loop. > > Using the hook for system mode requires that all targets be converted, > so for now the hook is (optionally) used only from user-only mode. > > Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> > Reviewed-by: Peter Maydell <peter.maydell@linaro.org> > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > include/qom/cpu.h | 9 +++++++++ > accel/tcg/user-exec.c | 39 ++++++++++++++------------------------- > 2 files changed, 23 insertions(+), 25 deletions(-) > > diff --git a/include/qom/cpu.h b/include/qom/cpu.h > index 08abcbd3fe..c1f267b4e0 100644 > --- a/include/qom/cpu.h > +++ b/include/qom/cpu.h > @@ -118,6 +118,12 @@ struct TranslationBlock; > * will need to do more. If this hook is not implemented then the > * default is to call @set_pc(tb->pc). > * @handle_mmu_fault: Callback for handling an MMU fault. > + * @tlb_fill: Callback for handling a softmmu tlb miss or user-only > + * address fault. For system mode, if the access is valid, call > + * tlb_set_page and return true; if the access is invalid, and > + * probe is true, return false; otherwise raise an exception and > + * do not return. For user-only mode, always raise an exception > + * and do not return. > * @get_phys_page_debug: Callback for obtaining a physical address. > * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the > * associated memory transaction attributes to use for the access. > @@ -191,6 +197,9 @@ typedef struct CPUClass { > void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); > int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw, > int mmu_index); > + bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, > + MMUAccessType access_type, int mmu_idx, > + bool probe, uintptr_t retaddr); > hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); > hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr, > MemTxAttrs *attrs); > diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c > index 0789984fe6..199f88c826 100644 > --- a/accel/tcg/user-exec.c > +++ b/accel/tcg/user-exec.c > @@ -65,6 +65,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, > CPUClass *cc; > int ret; > unsigned long address = (unsigned long)info->si_addr; > + MMUAccessType access_type; > > /* We must handle PC addresses from two different sources: > * a call return address and a signal frame address. > @@ -147,35 +148,23 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, > are still valid segv ones */ > address = h2g_nocheck(address); > > - cc = CPU_GET_CLASS(cpu); > - /* see if it is an MMU fault */ > - g_assert(cc->handle_mmu_fault); > - ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX); > - > - if (ret == 0) { > - /* The MMU fault was handled without causing real CPU fault. > - * Retain helper_retaddr for a possible second fault. > - */ > - return 1; > - } > - > - /* All other paths lead to cpu_exit; clear helper_retaddr > - * for next execution. > + /* > + * There is no way the target can handle this other than raising > + * an exception. Undo signal and retaddr state prior to longjmp. > */ > + sigprocmask(SIG_SETMASK, old_set, NULL); > helper_retaddr = 0; > > - if (ret < 0) { > - return 0; /* not an MMU fault */ > + cc = CPU_GET_CLASS(cpu); > + if (cc->tlb_fill) { > + access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; > + cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc); > + g_assert_not_reached(); > + } else { > + ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX); > + g_assert(ret > 0); > + cpu_loop_exit_restore(cpu, pc); > } > - > - /* Now we have a real cpu fault. */ > - cpu_restore_state(cpu, pc, true); > - > - sigprocmask(SIG_SETMASK, old_set, NULL); > - cpu_loop_exit(cpu); > - > - /* never comes here */ > - return 1; > } > > #if defined(__i386__) > -- > 2.17.1 > >
diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 08abcbd3fe..c1f267b4e0 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -118,6 +118,12 @@ struct TranslationBlock; * will need to do more. If this hook is not implemented then the * default is to call @set_pc(tb->pc). * @handle_mmu_fault: Callback for handling an MMU fault. + * @tlb_fill: Callback for handling a softmmu tlb miss or user-only + * address fault. For system mode, if the access is valid, call + * tlb_set_page and return true; if the access is invalid, and + * probe is true, return false; otherwise raise an exception and + * do not return. For user-only mode, always raise an exception + * and do not return. * @get_phys_page_debug: Callback for obtaining a physical address. * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the * associated memory transaction attributes to use for the access. @@ -191,6 +197,9 @@ typedef struct CPUClass { void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw, int mmu_index); + bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 0789984fe6..199f88c826 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -65,6 +65,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, CPUClass *cc; int ret; unsigned long address = (unsigned long)info->si_addr; + MMUAccessType access_type; /* We must handle PC addresses from two different sources: * a call return address and a signal frame address. @@ -147,35 +148,23 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, are still valid segv ones */ address = h2g_nocheck(address); - cc = CPU_GET_CLASS(cpu); - /* see if it is an MMU fault */ - g_assert(cc->handle_mmu_fault); - ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX); - - if (ret == 0) { - /* The MMU fault was handled without causing real CPU fault. - * Retain helper_retaddr for a possible second fault. - */ - return 1; - } - - /* All other paths lead to cpu_exit; clear helper_retaddr - * for next execution. + /* + * There is no way the target can handle this other than raising + * an exception. Undo signal and retaddr state prior to longjmp. */ + sigprocmask(SIG_SETMASK, old_set, NULL); helper_retaddr = 0; - if (ret < 0) { - return 0; /* not an MMU fault */ + cc = CPU_GET_CLASS(cpu); + if (cc->tlb_fill) { + access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; + cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc); + g_assert_not_reached(); + } else { + ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX); + g_assert(ret > 0); + cpu_loop_exit_restore(cpu, pc); } - - /* Now we have a real cpu fault. */ - cpu_restore_state(cpu, pc, true); - - sigprocmask(SIG_SETMASK, old_set, NULL); - cpu_loop_exit(cpu); - - /* never comes here */ - return 1; } #if defined(__i386__)