Message ID | 20191216221158.29572-9-richard.henderson@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | cputlb: Remove support for MMU_MODE*_SUFFIX | expand |
Richard Henderson <richard.henderson@linaro.org> writes: > Do not use exec/cpu_ldst_{,useronly_}template.h directly, > but instead use the functional interface. > > Cc: Eduardo Habkost <ehabkost@redhat.com> > Acked-by: Paolo Bonzini <pbonzini@redhat.com> > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > target/i386/seg_helper.c | 56 ++++++++++++++++++++-------------------- > 1 file changed, 28 insertions(+), 28 deletions(-) > > diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c > index 87a627f9dc..b96de068ca 100644 > --- a/target/i386/seg_helper.c > +++ b/target/i386/seg_helper.c > @@ -37,37 +37,37 @@ > # define LOG_PCALL_STATE(cpu) do { } while (0) > #endif > > -#ifdef CONFIG_USER_ONLY > -#define MEMSUFFIX _kernel > -#define DATA_SIZE 1 > -#include "exec/cpu_ldst_useronly_template.h" > +/* > + * TODO: Convert callers to compute cpu_mmu_index_kernel once > + * and use *_mmuidx_ra directly. > + */ I guess this would only be if it was a significant performance impact? They seem to be mainly called for (I assume) infrequently called helpers. Anyway lgtm: Reviewed-by: Alex Bennée <alex.bennee@linaro.org> > +#define cpu_ldub_kernel_ra(e, p, r) \ > + cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) > +#define cpu_lduw_kernel_ra(e, p, r) \ > + cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) > +#define cpu_ldl_kernel_ra(e, p, r) \ > + cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) > +#define cpu_ldq_kernel_ra(e, p, r) \ > + cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) > > -#define DATA_SIZE 2 > -#include "exec/cpu_ldst_useronly_template.h" > +#define cpu_stb_kernel_ra(e, p, v, r) \ > + cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) > +#define cpu_stw_kernel_ra(e, p, v, r) \ > + cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) > +#define cpu_stl_kernel_ra(e, p, v, r) \ > + cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) > +#define cpu_stq_kernel_ra(e, p, v, r) \ > + cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) > > -#define DATA_SIZE 4 > -#include "exec/cpu_ldst_useronly_template.h" > +#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0) > +#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0) > +#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0) > +#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0) > > -#define DATA_SIZE 8 > -#include "exec/cpu_ldst_useronly_template.h" > -#undef MEMSUFFIX > -#else > -#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env)) > -#define MEMSUFFIX _kernel > -#define DATA_SIZE 1 > -#include "exec/cpu_ldst_template.h" > - > -#define DATA_SIZE 2 > -#include "exec/cpu_ldst_template.h" > - > -#define DATA_SIZE 4 > -#include "exec/cpu_ldst_template.h" > - > -#define DATA_SIZE 8 > -#include "exec/cpu_ldst_template.h" > -#undef CPU_MMU_INDEX > -#undef MEMSUFFIX > -#endif > +#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0) > +#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0) > +#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0) > +#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0) > > /* return non zero if error */ > static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, -- Alex Bennée
On 12/21/19 4:42 AM, Alex Bennée wrote: >> +/* >> + * TODO: Convert callers to compute cpu_mmu_index_kernel once >> + * and use *_mmuidx_ra directly. >> + */ > > I guess this would only be if it was a significant performance impact? > They seem to be mainly called for (I assume) infrequently called > helpers. Yes, they are infrequent. But if you expose all of the hidden cpu_mmu_index_kernel() invocations, one should immediately ask why we're not caching the result in a local variable. If for nothing else, readability. r~
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c index 87a627f9dc..b96de068ca 100644 --- a/target/i386/seg_helper.c +++ b/target/i386/seg_helper.c @@ -37,37 +37,37 @@ # define LOG_PCALL_STATE(cpu) do { } while (0) #endif -#ifdef CONFIG_USER_ONLY -#define MEMSUFFIX _kernel -#define DATA_SIZE 1 -#include "exec/cpu_ldst_useronly_template.h" +/* + * TODO: Convert callers to compute cpu_mmu_index_kernel once + * and use *_mmuidx_ra directly. + */ +#define cpu_ldub_kernel_ra(e, p, r) \ + cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_lduw_kernel_ra(e, p, r) \ + cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_ldl_kernel_ra(e, p, r) \ + cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_ldq_kernel_ra(e, p, r) \ + cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) -#define DATA_SIZE 2 -#include "exec/cpu_ldst_useronly_template.h" +#define cpu_stb_kernel_ra(e, p, v, r) \ + cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stw_kernel_ra(e, p, v, r) \ + cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stl_kernel_ra(e, p, v, r) \ + cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stq_kernel_ra(e, p, v, r) \ + cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) -#define DATA_SIZE 4 -#include "exec/cpu_ldst_useronly_template.h" +#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0) +#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0) +#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0) +#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0) -#define DATA_SIZE 8 -#include "exec/cpu_ldst_useronly_template.h" -#undef MEMSUFFIX -#else -#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env)) -#define MEMSUFFIX _kernel -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif +#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0) +#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0) +#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0) +#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0) /* return non zero if error */ static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,