Message ID | 20210303024423.3125722-1-richard.henderson@linaro.org |
---|---|
State | New |
Headers | show |
Series | [v3] target/s390x: Implement the MVPG condition-code-option bit | expand |
On 03/03/2021 03.44, Richard Henderson wrote: > If the CCO bit is set, MVPG should not generate an exception but > report page translation faults via a CC code. > > Create a new helper, access_prepare_nf, which can use probe_access_flags > in non-faulting mode, and then handle watchpoints. > > Cc: David Hildenbrand <david@redhat.com> > Reported-by: Thomas Huth <thuth@redhat.com> > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > target/s390x/mem_helper.c | 87 ++++++++++++++++++++++++++++----------- > 1 file changed, 64 insertions(+), 23 deletions(-) > > diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c > index 25cfede806..b397333c0b 100644 > --- a/target/s390x/mem_helper.c > +++ b/target/s390x/mem_helper.c > @@ -130,28 +130,62 @@ typedef struct S390Access { > int mmu_idx; > } S390Access; > > +static bool access_prepare_nf(S390Access *access, CPUS390XState *env, > + bool nofault, vaddr vaddr1, int size, > + MMUAccessType access_type, > + int mmu_idx, uintptr_t ra) > +{ > + void *haddr1, *haddr2 = NULL; > + int size1, size2; > + vaddr vaddr2 = 0; > + int flags; > + > + assert(size > 0 && size <= 4096); > + > + size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), > + size2 = size - size1; > + > + flags = probe_access_flags(env, vaddr1, access_type, mmu_idx, > + nofault, &haddr1, ra); > + if (unlikely(size2)) { > + /* The access crosses page boundaries. */ > + vaddr2 = wrap_address(env, vaddr1 + size1); > + flags |= probe_access_flags(env, vaddr2, access_type, mmu_idx, > + nofault, &haddr2, ra); > + } > + > + if (unlikely(flags & TLB_INVALID_MASK)) { > + return false; > + } > + if (unlikely(flags & TLB_WATCHPOINT)) { > + /* S390 does not presently use transaction attributes. */ > + cpu_check_watchpoint(env_cpu(env), vaddr1, size, > + MEMTXATTRS_UNSPECIFIED, > + (access_type == MMU_DATA_STORE > + ? BP_MEM_WRITE : BP_MEM_READ), ra); > + } > + > + *access = (S390Access) { > + .vaddr1 = vaddr1, > + .vaddr2 = vaddr2, > + .haddr1 = haddr1, > + .haddr2 = haddr2, > + .size1 = size1, > + .size2 = size2, > + .mmu_idx = mmu_idx > + }; > + return true; > +} > + > static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, > MMUAccessType access_type, int mmu_idx, > uintptr_t ra) > { > - S390Access access = { > - .vaddr1 = vaddr, > - .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)), > - .mmu_idx = mmu_idx, > - }; > - > - g_assert(size > 0 && size <= 4096); > - access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type, > - mmu_idx, ra); > - > - if (unlikely(access.size1 != size)) { > - /* The access crosses page boundaries. */ > - access.vaddr2 = wrap_address(env, vaddr + access.size1); > - access.size2 = size - access.size1; > - access.haddr2 = probe_access(env, access.vaddr2, access.size2, > - access_type, mmu_idx, ra); > - } > - return access; > + S390Access ret; > + bool ok = access_prepare_nf(&ret, env, false, vaddr, size, > + access_type, mmu_idx, ra); > + assert(ok); > + return ret; > } > > /* Helper to handle memset on a single page. */ > @@ -845,8 +879,10 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) > const int mmu_idx = cpu_mmu_index(env, false); > const bool f = extract64(r0, 11, 1); > const bool s = extract64(r0, 10, 1); > + const bool cco = extract64(r0, 8, 1); > uintptr_t ra = GETPC(); > S390Access srca, desta; > + bool ok; > > if ((f && s) || extract64(r0, 12, 4)) { > tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); > @@ -858,13 +894,18 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) > /* > * TODO: > * - Access key handling > - * - CC-option with surpression of page-translation exceptions > * - Store r1/r2 register identifiers at real location 162 > */ > - srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx, > - ra); > - desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx, > - ra); > + ok = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE, > + MMU_DATA_LOAD, mmu_idx, ra); > + if (!ok) { > + return 2; > + } > + ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, > + MMU_DATA_STORE, mmu_idx, ra); > + if (!ok) { > + return 1; > + } Thanks, this looks promising, but one of the MVPG kvm-unit-tests is still failing with this patch - the one that checks for an exception if the destination page is marked as read-only. MVPG only returns CC1 for invalid page table entries - but if the page is write-protected, it still causes a protection exception. That's why I've been checking "if (exc && exc != PGM_PROTECTION)" in my version of the patch. Thomas
On 03/03/2021 07.25, Thomas Huth wrote: > On 03/03/2021 03.44, Richard Henderson wrote: >> If the CCO bit is set, MVPG should not generate an exception but >> report page translation faults via a CC code. >> >> Create a new helper, access_prepare_nf, which can use probe_access_flags >> in non-faulting mode, and then handle watchpoints. >> >> Cc: David Hildenbrand <david@redhat.com> >> Reported-by: Thomas Huth <thuth@redhat.com> >> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> >> --- >> target/s390x/mem_helper.c | 87 ++++++++++++++++++++++++++++----------- >> 1 file changed, 64 insertions(+), 23 deletions(-) >> >> diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c >> index 25cfede806..b397333c0b 100644 >> --- a/target/s390x/mem_helper.c >> +++ b/target/s390x/mem_helper.c >> @@ -130,28 +130,62 @@ typedef struct S390Access { >> int mmu_idx; >> } S390Access; >> +static bool access_prepare_nf(S390Access *access, CPUS390XState *env, >> + bool nofault, vaddr vaddr1, int size, >> + MMUAccessType access_type, >> + int mmu_idx, uintptr_t ra) >> +{ >> + void *haddr1, *haddr2 = NULL; >> + int size1, size2; >> + vaddr vaddr2 = 0; >> + int flags; >> + >> + assert(size > 0 && size <= 4096); >> + >> + size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), >> + size2 = size - size1; >> + >> + flags = probe_access_flags(env, vaddr1, access_type, mmu_idx, >> + nofault, &haddr1, ra); >> + if (unlikely(size2)) { >> + /* The access crosses page boundaries. */ >> + vaddr2 = wrap_address(env, vaddr1 + size1); >> + flags |= probe_access_flags(env, vaddr2, access_type, mmu_idx, >> + nofault, &haddr2, ra); >> + } >> + >> + if (unlikely(flags & TLB_INVALID_MASK)) { >> + return false; >> + } >> + if (unlikely(flags & TLB_WATCHPOINT)) { >> + /* S390 does not presently use transaction attributes. */ >> + cpu_check_watchpoint(env_cpu(env), vaddr1, size, >> + MEMTXATTRS_UNSPECIFIED, >> + (access_type == MMU_DATA_STORE >> + ? BP_MEM_WRITE : BP_MEM_READ), ra); >> + } >> + >> + *access = (S390Access) { >> + .vaddr1 = vaddr1, >> + .vaddr2 = vaddr2, >> + .haddr1 = haddr1, >> + .haddr2 = haddr2, >> + .size1 = size1, >> + .size2 = size2, >> + .mmu_idx = mmu_idx >> + }; >> + return true; >> +} >> + >> static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, >> MMUAccessType access_type, int mmu_idx, >> uintptr_t ra) >> { >> - S390Access access = { >> - .vaddr1 = vaddr, >> - .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)), >> - .mmu_idx = mmu_idx, >> - }; >> - >> - g_assert(size > 0 && size <= 4096); >> - access.haddr1 = probe_access(env, access.vaddr1, access.size1, >> access_type, >> - mmu_idx, ra); >> - >> - if (unlikely(access.size1 != size)) { >> - /* The access crosses page boundaries. */ >> - access.vaddr2 = wrap_address(env, vaddr + access.size1); >> - access.size2 = size - access.size1; >> - access.haddr2 = probe_access(env, access.vaddr2, access.size2, >> - access_type, mmu_idx, ra); >> - } >> - return access; >> + S390Access ret; >> + bool ok = access_prepare_nf(&ret, env, false, vaddr, size, >> + access_type, mmu_idx, ra); >> + assert(ok); >> + return ret; >> } >> /* Helper to handle memset on a single page. */ >> @@ -845,8 +879,10 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t >> r0, uint64_t r1, uint64_t r2) >> const int mmu_idx = cpu_mmu_index(env, false); >> const bool f = extract64(r0, 11, 1); >> const bool s = extract64(r0, 10, 1); >> + const bool cco = extract64(r0, 8, 1); >> uintptr_t ra = GETPC(); >> S390Access srca, desta; >> + bool ok; >> if ((f && s) || extract64(r0, 12, 4)) { >> tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); >> @@ -858,13 +894,18 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t >> r0, uint64_t r1, uint64_t r2) >> /* >> * TODO: >> * - Access key handling >> - * - CC-option with surpression of page-translation exceptions >> * - Store r1/r2 register identifiers at real location 162 >> */ >> - srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx, >> - ra); >> - desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, >> mmu_idx, >> - ra); >> + ok = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE, >> + MMU_DATA_LOAD, mmu_idx, ra); >> + if (!ok) { >> + return 2; >> + } >> + ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, >> + MMU_DATA_STORE, mmu_idx, ra); >> + if (!ok) { >> + return 1; >> + } > > Thanks, this looks promising, but one of the MVPG kvm-unit-tests is still > failing with this patch - the one that checks for an exception if the > destination page is marked as read-only. MVPG only returns CC1 for invalid > page table entries - but if the page is write-protected, it still causes a > protection exception. That's why I've been checking "if (exc && exc != > PGM_PROTECTION)" in my version of the patch. FWIW, I can get the MVPG kvm-unit-test working with your patch if I add this on top: diff a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -904,7 +904,14 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx, ra); if (!ok) { - return 1; + ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, + MMU_DATA_LOAD, mmu_idx, ra); + if (!ok) { + return 1; + } + /* If reading was ok, then the page must be protected. */ + /* TODO: Set a translation exception code in lowcore? */ + tcg_s390_program_interrupt(env, PGM_PROTECTION, ra); } access_memmove(env, &desta, &srca, ra); return 0; /* data moved */ ... yeah, it's ugly to call access_prepare_nf() again with MMU_DATA_LOAD, and it's still missing the translation exception code ... but at least the kvm-unit-test is happy that way... Thomas
On 03.03.21 12:36, Thomas Huth wrote: > On 03/03/2021 07.25, Thomas Huth wrote: >> On 03/03/2021 03.44, Richard Henderson wrote: >>> If the CCO bit is set, MVPG should not generate an exception but >>> report page translation faults via a CC code. >>> >>> Create a new helper, access_prepare_nf, which can use probe_access_flags >>> in non-faulting mode, and then handle watchpoints. >>> >>> Cc: David Hildenbrand <david@redhat.com> >>> Reported-by: Thomas Huth <thuth@redhat.com> >>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> >>> --- >>> target/s390x/mem_helper.c | 87 ++++++++++++++++++++++++++++----------- >>> 1 file changed, 64 insertions(+), 23 deletions(-) >>> >>> diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c >>> index 25cfede806..b397333c0b 100644 >>> --- a/target/s390x/mem_helper.c >>> +++ b/target/s390x/mem_helper.c >>> @@ -130,28 +130,62 @@ typedef struct S390Access { >>> int mmu_idx; >>> } S390Access; >>> +static bool access_prepare_nf(S390Access *access, CPUS390XState *env, >>> + bool nofault, vaddr vaddr1, int size, >>> + MMUAccessType access_type, >>> + int mmu_idx, uintptr_t ra) >>> +{ >>> + void *haddr1, *haddr2 = NULL; >>> + int size1, size2; >>> + vaddr vaddr2 = 0; >>> + int flags; >>> + >>> + assert(size > 0 && size <= 4096); >>> + >>> + size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), >>> + size2 = size - size1; >>> + >>> + flags = probe_access_flags(env, vaddr1, access_type, mmu_idx, >>> + nofault, &haddr1, ra); >>> + if (unlikely(size2)) { >>> + /* The access crosses page boundaries. */ >>> + vaddr2 = wrap_address(env, vaddr1 + size1); >>> + flags |= probe_access_flags(env, vaddr2, access_type, mmu_idx, >>> + nofault, &haddr2, ra); >>> + } >>> + >>> + if (unlikely(flags & TLB_INVALID_MASK)) { >>> + return false; >>> + } >>> + if (unlikely(flags & TLB_WATCHPOINT)) { >>> + /* S390 does not presently use transaction attributes. */ >>> + cpu_check_watchpoint(env_cpu(env), vaddr1, size, >>> + MEMTXATTRS_UNSPECIFIED, >>> + (access_type == MMU_DATA_STORE >>> + ? BP_MEM_WRITE : BP_MEM_READ), ra); >>> + } >>> + >>> + *access = (S390Access) { >>> + .vaddr1 = vaddr1, >>> + .vaddr2 = vaddr2, >>> + .haddr1 = haddr1, >>> + .haddr2 = haddr2, >>> + .size1 = size1, >>> + .size2 = size2, >>> + .mmu_idx = mmu_idx >>> + }; >>> + return true; >>> +} >>> + >>> static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, >>> MMUAccessType access_type, int mmu_idx, >>> uintptr_t ra) >>> { >>> - S390Access access = { >>> - .vaddr1 = vaddr, >>> - .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)), >>> - .mmu_idx = mmu_idx, >>> - }; >>> - >>> - g_assert(size > 0 && size <= 4096); >>> - access.haddr1 = probe_access(env, access.vaddr1, access.size1, >>> access_type, >>> - mmu_idx, ra); >>> - >>> - if (unlikely(access.size1 != size)) { >>> - /* The access crosses page boundaries. */ >>> - access.vaddr2 = wrap_address(env, vaddr + access.size1); >>> - access.size2 = size - access.size1; >>> - access.haddr2 = probe_access(env, access.vaddr2, access.size2, >>> - access_type, mmu_idx, ra); >>> - } >>> - return access; >>> + S390Access ret; >>> + bool ok = access_prepare_nf(&ret, env, false, vaddr, size, >>> + access_type, mmu_idx, ra); >>> + assert(ok); >>> + return ret; >>> } >>> /* Helper to handle memset on a single page. */ >>> @@ -845,8 +879,10 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t >>> r0, uint64_t r1, uint64_t r2) >>> const int mmu_idx = cpu_mmu_index(env, false); >>> const bool f = extract64(r0, 11, 1); >>> const bool s = extract64(r0, 10, 1); >>> + const bool cco = extract64(r0, 8, 1); >>> uintptr_t ra = GETPC(); >>> S390Access srca, desta; >>> + bool ok; >>> if ((f && s) || extract64(r0, 12, 4)) { >>> tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); >>> @@ -858,13 +894,18 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t >>> r0, uint64_t r1, uint64_t r2) >>> /* >>> * TODO: >>> * - Access key handling >>> - * - CC-option with surpression of page-translation exceptions >>> * - Store r1/r2 register identifiers at real location 162 >>> */ >>> - srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx, >>> - ra); >>> - desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, >>> mmu_idx, >>> - ra); >>> + ok = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE, >>> + MMU_DATA_LOAD, mmu_idx, ra); >>> + if (!ok) { >>> + return 2; >>> + } >>> + ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, >>> + MMU_DATA_STORE, mmu_idx, ra); >>> + if (!ok) { >>> + return 1; >>> + } >> >> Thanks, this looks promising, but one of the MVPG kvm-unit-tests is still >> failing with this patch - the one that checks for an exception if the >> destination page is marked as read-only. MVPG only returns CC1 for invalid >> page table entries - but if the page is write-protected, it still causes a >> protection exception. That's why I've been checking "if (exc && exc != >> PGM_PROTECTION)" in my version of the patch. > > FWIW, I can get the MVPG kvm-unit-test working with your patch if I add > this on top: > > diff a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c > --- a/target/s390x/mem_helper.c > +++ b/target/s390x/mem_helper.c > @@ -904,7 +904,14 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) > ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, > MMU_DATA_STORE, mmu_idx, ra); > if (!ok) { > - return 1; > + ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, > + MMU_DATA_LOAD, mmu_idx, ra); > + if (!ok) { > + return 1; > + } > + /* If reading was ok, then the page must be protected. */ > + /* TODO: Set a translation exception code in lowcore? */ > + tcg_s390_program_interrupt(env, PGM_PROTECTION, ra); > } > access_memmove(env, &desta, &srca, ra); > return 0; /* data moved */ > > ... yeah, it's ugly to call access_prepare_nf() again with MMU_DATA_LOAD, > and it's still missing the translation exception code ... but at least > the kvm-unit-test is happy that way... As I said, can't we store the last exception we had during tlb_fill and use that in case returns access_prepare_nf() returns an error to identify the actual exception? -- Thanks, David / dhildenb
On 03/03/2021 12.40, David Hildenbrand wrote: > On 03.03.21 12:36, Thomas Huth wrote: >> On 03/03/2021 07.25, Thomas Huth wrote: >>> On 03/03/2021 03.44, Richard Henderson wrote: >>>> If the CCO bit is set, MVPG should not generate an exception but >>>> report page translation faults via a CC code. >>>> >>>> Create a new helper, access_prepare_nf, which can use probe_access_flags >>>> in non-faulting mode, and then handle watchpoints. >>>> >>>> Cc: David Hildenbrand <david@redhat.com> >>>> Reported-by: Thomas Huth <thuth@redhat.com> >>>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> >>>> --- >>>> target/s390x/mem_helper.c | 87 ++++++++++++++++++++++++++++----------- >>>> 1 file changed, 64 insertions(+), 23 deletions(-) >>>> >>>> diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c >>>> index 25cfede806..b397333c0b 100644 >>>> --- a/target/s390x/mem_helper.c >>>> +++ b/target/s390x/mem_helper.c >>>> @@ -130,28 +130,62 @@ typedef struct S390Access { >>>> int mmu_idx; >>>> } S390Access; >>>> +static bool access_prepare_nf(S390Access *access, CPUS390XState *env, >>>> + bool nofault, vaddr vaddr1, int size, >>>> + MMUAccessType access_type, >>>> + int mmu_idx, uintptr_t ra) >>>> +{ >>>> + void *haddr1, *haddr2 = NULL; >>>> + int size1, size2; >>>> + vaddr vaddr2 = 0; >>>> + int flags; >>>> + >>>> + assert(size > 0 && size <= 4096); >>>> + >>>> + size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), >>>> + size2 = size - size1; >>>> + >>>> + flags = probe_access_flags(env, vaddr1, access_type, mmu_idx, >>>> + nofault, &haddr1, ra); >>>> + if (unlikely(size2)) { >>>> + /* The access crosses page boundaries. */ >>>> + vaddr2 = wrap_address(env, vaddr1 + size1); >>>> + flags |= probe_access_flags(env, vaddr2, access_type, mmu_idx, >>>> + nofault, &haddr2, ra); >>>> + } >>>> + >>>> + if (unlikely(flags & TLB_INVALID_MASK)) { >>>> + return false; >>>> + } >>>> + if (unlikely(flags & TLB_WATCHPOINT)) { >>>> + /* S390 does not presently use transaction attributes. */ >>>> + cpu_check_watchpoint(env_cpu(env), vaddr1, size, >>>> + MEMTXATTRS_UNSPECIFIED, >>>> + (access_type == MMU_DATA_STORE >>>> + ? BP_MEM_WRITE : BP_MEM_READ), ra); >>>> + } >>>> + >>>> + *access = (S390Access) { >>>> + .vaddr1 = vaddr1, >>>> + .vaddr2 = vaddr2, >>>> + .haddr1 = haddr1, >>>> + .haddr2 = haddr2, >>>> + .size1 = size1, >>>> + .size2 = size2, >>>> + .mmu_idx = mmu_idx >>>> + }; >>>> + return true; >>>> +} >>>> + >>>> static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int >>>> size, >>>> MMUAccessType access_type, int mmu_idx, >>>> uintptr_t ra) >>>> { >>>> - S390Access access = { >>>> - .vaddr1 = vaddr, >>>> - .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)), >>>> - .mmu_idx = mmu_idx, >>>> - }; >>>> - >>>> - g_assert(size > 0 && size <= 4096); >>>> - access.haddr1 = probe_access(env, access.vaddr1, access.size1, >>>> access_type, >>>> - mmu_idx, ra); >>>> - >>>> - if (unlikely(access.size1 != size)) { >>>> - /* The access crosses page boundaries. */ >>>> - access.vaddr2 = wrap_address(env, vaddr + access.size1); >>>> - access.size2 = size - access.size1; >>>> - access.haddr2 = probe_access(env, access.vaddr2, access.size2, >>>> - access_type, mmu_idx, ra); >>>> - } >>>> - return access; >>>> + S390Access ret; >>>> + bool ok = access_prepare_nf(&ret, env, false, vaddr, size, >>>> + access_type, mmu_idx, ra); >>>> + assert(ok); >>>> + return ret; >>>> } >>>> /* Helper to handle memset on a single page. */ >>>> @@ -845,8 +879,10 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t >>>> r0, uint64_t r1, uint64_t r2) >>>> const int mmu_idx = cpu_mmu_index(env, false); >>>> const bool f = extract64(r0, 11, 1); >>>> const bool s = extract64(r0, 10, 1); >>>> + const bool cco = extract64(r0, 8, 1); >>>> uintptr_t ra = GETPC(); >>>> S390Access srca, desta; >>>> + bool ok; >>>> if ((f && s) || extract64(r0, 12, 4)) { >>>> tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); >>>> @@ -858,13 +894,18 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t >>>> r0, uint64_t r1, uint64_t r2) >>>> /* >>>> * TODO: >>>> * - Access key handling >>>> - * - CC-option with surpression of page-translation exceptions >>>> * - Store r1/r2 register identifiers at real location 162 >>>> */ >>>> - srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, >>>> mmu_idx, >>>> - ra); >>>> - desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, >>>> mmu_idx, >>>> - ra); >>>> + ok = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE, >>>> + MMU_DATA_LOAD, mmu_idx, ra); >>>> + if (!ok) { >>>> + return 2; >>>> + } >>>> + ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, >>>> + MMU_DATA_STORE, mmu_idx, ra); >>>> + if (!ok) { >>>> + return 1; >>>> + } >>> >>> Thanks, this looks promising, but one of the MVPG kvm-unit-tests is still >>> failing with this patch - the one that checks for an exception if the >>> destination page is marked as read-only. MVPG only returns CC1 for invalid >>> page table entries - but if the page is write-protected, it still causes a >>> protection exception. That's why I've been checking "if (exc && exc != >>> PGM_PROTECTION)" in my version of the patch. >> >> FWIW, I can get the MVPG kvm-unit-test working with your patch if I add >> this on top: >> >> diff a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c >> --- a/target/s390x/mem_helper.c >> +++ b/target/s390x/mem_helper.c >> @@ -904,7 +904,14 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t >> r0, uint64_t r1, uint64_t r2) >> ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, >> MMU_DATA_STORE, mmu_idx, ra); >> if (!ok) { >> - return 1; >> + ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, >> + MMU_DATA_LOAD, mmu_idx, ra); >> + if (!ok) { >> + return 1; >> + } >> + /* If reading was ok, then the page must be protected. */ >> + /* TODO: Set a translation exception code in lowcore? */ >> + tcg_s390_program_interrupt(env, PGM_PROTECTION, ra); >> } >> access_memmove(env, &desta, &srca, ra); >> return 0; /* data moved */ >> >> ... yeah, it's ugly to call access_prepare_nf() again with MMU_DATA_LOAD, >> and it's still missing the translation exception code ... but at least >> the kvm-unit-test is happy that way... > > As I said, can't we store the last exception we had during tlb_fill and use > that in case returns access_prepare_nf() returns an error to identify the > actual exception? Ah, right, thanks. That's likely the best way to do it. I just gave it a try and it seems to work ... I'll send out that modified versions as a v4... Thomas
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 25cfede806..b397333c0b 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -130,28 +130,62 @@ typedef struct S390Access { int mmu_idx; } S390Access; +static bool access_prepare_nf(S390Access *access, CPUS390XState *env, + bool nofault, vaddr vaddr1, int size, + MMUAccessType access_type, + int mmu_idx, uintptr_t ra) +{ + void *haddr1, *haddr2 = NULL; + int size1, size2; + vaddr vaddr2 = 0; + int flags; + + assert(size > 0 && size <= 4096); + + size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), + size2 = size - size1; + + flags = probe_access_flags(env, vaddr1, access_type, mmu_idx, + nofault, &haddr1, ra); + if (unlikely(size2)) { + /* The access crosses page boundaries. */ + vaddr2 = wrap_address(env, vaddr1 + size1); + flags |= probe_access_flags(env, vaddr2, access_type, mmu_idx, + nofault, &haddr2, ra); + } + + if (unlikely(flags & TLB_INVALID_MASK)) { + return false; + } + if (unlikely(flags & TLB_WATCHPOINT)) { + /* S390 does not presently use transaction attributes. */ + cpu_check_watchpoint(env_cpu(env), vaddr1, size, + MEMTXATTRS_UNSPECIFIED, + (access_type == MMU_DATA_STORE + ? BP_MEM_WRITE : BP_MEM_READ), ra); + } + + *access = (S390Access) { + .vaddr1 = vaddr1, + .vaddr2 = vaddr2, + .haddr1 = haddr1, + .haddr2 = haddr2, + .size1 = size1, + .size2 = size2, + .mmu_idx = mmu_idx + }; + return true; +} + static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t ra) { - S390Access access = { - .vaddr1 = vaddr, - .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)), - .mmu_idx = mmu_idx, - }; - - g_assert(size > 0 && size <= 4096); - access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type, - mmu_idx, ra); - - if (unlikely(access.size1 != size)) { - /* The access crosses page boundaries. */ - access.vaddr2 = wrap_address(env, vaddr + access.size1); - access.size2 = size - access.size1; - access.haddr2 = probe_access(env, access.vaddr2, access.size2, - access_type, mmu_idx, ra); - } - return access; + S390Access ret; + bool ok = access_prepare_nf(&ret, env, false, vaddr, size, + access_type, mmu_idx, ra); + assert(ok); + return ret; } /* Helper to handle memset on a single page. */ @@ -845,8 +879,10 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) const int mmu_idx = cpu_mmu_index(env, false); const bool f = extract64(r0, 11, 1); const bool s = extract64(r0, 10, 1); + const bool cco = extract64(r0, 8, 1); uintptr_t ra = GETPC(); S390Access srca, desta; + bool ok; if ((f && s) || extract64(r0, 12, 4)) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); @@ -858,13 +894,18 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) /* * TODO: * - Access key handling - * - CC-option with surpression of page-translation exceptions * - Store r1/r2 register identifiers at real location 162 */ - srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx, - ra); - desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx, - ra); + ok = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE, + MMU_DATA_LOAD, mmu_idx, ra); + if (!ok) { + return 2; + } + ok = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE, + MMU_DATA_STORE, mmu_idx, ra); + if (!ok) { + return 1; + } access_memmove(env, &desta, &srca, ra); return 0; /* data moved */ }
If the CCO bit is set, MVPG should not generate an exception but report page translation faults via a CC code. Create a new helper, access_prepare_nf, which can use probe_access_flags in non-faulting mode, and then handle watchpoints. Cc: David Hildenbrand <david@redhat.com> Reported-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/s390x/mem_helper.c | 87 ++++++++++++++++++++++++++++----------- 1 file changed, 64 insertions(+), 23 deletions(-) -- 2.25.1