@@ -89,10 +89,10 @@ static int gdb_read_reg_cs64(uint32_t hflags, GByteArray *buf, target_ulong val)
static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
{
if (hflags & HF_CS64_MASK) {
- *val = ldq_p(buf);
+ *val = ldq_le_p(buf);
return 8;
}
- *val = ldl_p(buf);
+ *val = ldl_le_p(buf);
return 4;
}
@@ -221,7 +221,7 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf)
{
CPUX86State *env = &cpu->env;
- uint16_t selector = ldl_p(mem_buf);
+ uint16_t selector = ldl_le_p(mem_buf);
if (selector != env->segs[sreg].selector) {
#if defined(CONFIG_USER_ONLY)
@@ -262,15 +262,15 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
if (n < CPU_NB_REGS) {
if (TARGET_LONG_BITS == 64) {
if (env->hflags & HF_CS64_MASK) {
- env->regs[gpr_map[n]] = ldtul_p(mem_buf);
+ env->regs[gpr_map[n]] = ldtul_le_p(mem_buf);
} else if (n < CPU_NB_REGS32) {
- env->regs[gpr_map[n]] = ldtul_p(mem_buf) & 0xffffffffUL;
+ env->regs[gpr_map[n]] = ldtul_le_p(mem_buf) & 0xffffffffUL;
}
return sizeof(target_ulong);
} else if (n < CPU_NB_REGS32) {
n = gpr_map32[n];
env->regs[n] &= ~0xffffffffUL;
- env->regs[n] |= (uint32_t)ldl_p(mem_buf);
+ env->regs[n] |= (uint32_t)ldl_le_p(mem_buf);
return 4;
}
} else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
@@ -281,8 +281,8 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
} else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
n -= IDX_XMM_REGS;
if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) {
- env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf);
- env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8);
+ env->xmm_regs[n].ZMM_Q(0) = ldq_le_p(mem_buf);
+ env->xmm_regs[n].ZMM_Q(1) = ldq_le_p(mem_buf + 8);
return 16;
}
} else {
@@ -290,18 +290,18 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
case IDX_IP_REG:
if (TARGET_LONG_BITS == 64) {
if (env->hflags & HF_CS64_MASK) {
- env->eip = ldq_p(mem_buf);
+ env->eip = ldq_le_p(mem_buf);
} else {
- env->eip = ldq_p(mem_buf) & 0xffffffffUL;
+ env->eip = ldq_le_p(mem_buf) & 0xffffffffUL;
}
return 8;
} else {
env->eip &= ~0xffffffffUL;
- env->eip |= (uint32_t)ldl_p(mem_buf);
+ env->eip |= (uint32_t)ldl_le_p(mem_buf);
return 4;
}
case IDX_FLAGS_REG:
- env->eflags = ldl_p(mem_buf);
+ env->eflags = ldl_le_p(mem_buf);
return 4;
case IDX_SEG_REGS:
@@ -327,10 +327,10 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 4;
case IDX_FP_REGS + 8:
- cpu_set_fpuc(env, ldl_p(mem_buf));
+ cpu_set_fpuc(env, ldl_le_p(mem_buf));
return 4;
case IDX_FP_REGS + 9:
- tmp = ldl_p(mem_buf);
+ tmp = ldl_le_p(mem_buf);
env->fpstt = (tmp >> 11) & 7;
env->fpus = tmp & ~0x3800;
return 4;
@@ -348,7 +348,7 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 4;
case IDX_MXCSR_REG:
- cpu_set_mxcsr(env, ldl_p(mem_buf));
+ cpu_set_mxcsr(env, ldl_le_p(mem_buf));
return 4;
case IDX_CTL_CR0_REG:
@@ -86,7 +86,7 @@ static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra)
static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra)
{
if (likely(in->haddr)) {
- return ldl_p(in->haddr);
+ return ldl_le_p(in->haddr);
}
return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
}
@@ -94,7 +94,7 @@ static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra)
static inline uint64_t ptw_ldq(const PTETranslate *in, uint64_t ra)
{
if (likely(in->haddr)) {
- return ldq_p(in->haddr);
+ return ldq_le_p(in->haddr);
}
return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
}
@@ -43,8 +43,8 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
uint8_t *xmm = legacy->xmm_regs[i];
- stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
- stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
+ stq_le_p(xmm, env->xmm_regs[i].ZMM_Q(0));
+ stq_le_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
}
header->xstate_bv = env->xstate_bv;
@@ -58,8 +58,8 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
uint8_t *ymmh = avx->ymmh[i];
- stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
- stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
+ stq_le_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
+ stq_le_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
}
}
@@ -101,10 +101,10 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
- stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
- stq_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
- stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
- stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
+ stq_le_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
+ stq_le_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
+ stq_le_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
+ stq_le_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
}
#ifdef TARGET_X86_64
@@ -177,8 +177,8 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
const uint8_t *xmm = legacy->xmm_regs[i];
- env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
- env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
+ env->xmm_regs[i].ZMM_Q(0) = ldq_le_p(xmm);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_le_p(xmm + 8);
}
env->xstate_bv = header->xstate_bv;
@@ -191,8 +191,8 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
const uint8_t *ymmh = avx->ymmh[i];
- env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
- env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
+ env->xmm_regs[i].ZMM_Q(2) = ldq_le_p(ymmh);
+ env->xmm_regs[i].ZMM_Q(3) = ldq_le_p(ymmh + 8);
}
}
@@ -241,10 +241,10 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
- env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
- env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
- env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
- env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
+ env->xmm_regs[i].ZMM_Q(4) = ldq_le_p(zmmh);
+ env->xmm_regs[i].ZMM_Q(5) = ldq_le_p(zmmh + 8);
+ env->xmm_regs[i].ZMM_Q(6) = ldq_le_p(zmmh + 16);
+ env->xmm_regs[i].ZMM_Q(7) = ldq_le_p(zmmh + 24);
}
#ifdef TARGET_X86_64
The x86 architecture uses little endianness. Directly use the little-endian LD/ST API. Mechanical change using: $ end=le; \ for acc in uw w l q tul; do \ sed -i -e "s/ld${acc}_p(/ld${acc}_${end}_p(/" \ -e "s/st${acc}_p(/st${acc}_${end}_p(/" \ $(git grep -wlE '(ld|st)t?u?[wlq]_p' target/i386/); \ done Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> --- target/i386/gdbstub.c | 30 +++++++++++++------------- target/i386/tcg/sysemu/excp_helper.c | 4 ++-- target/i386/xsave_helper.c | 32 ++++++++++++++-------------- 3 files changed, 33 insertions(+), 33 deletions(-)