@@ -301,7 +301,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
* be signaled by probe_access_flags().
*/
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
-#define TLB_MMIO 0
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
#define TLB_WATCHPOINT 0
#else
@@ -983,6 +983,23 @@ void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
#endif
+/**
+ * cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled?
+ * @cs: CPUState pointer
+ *
+ * The memory callbacks are installed if a plugin has instrumented an
+ * instruction for memory. This can be useful to know if you want to
+ * force a slow path for a series of memory accesses.
+ */
+static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
+{
+#ifdef CONFIG_PLUGIN
+ return !!cpu->plugin_mem_cbs;
+#else
+ return false;
+#endif
+}
+
/**
* cpu_get_address_space:
* @cpu: CPU to get address space from
@@ -1540,7 +1540,9 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
*pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
- if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
+ if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
+ ||
+ (access_type != MMU_INST_FETCH && cpu_plugin_mem_cbs_enabled(env_cpu(env)))) {
*phost = NULL;
return TLB_MMIO;
}
@@ -745,6 +745,10 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
if (guest_addr_valid_untagged(addr)) {
int page_flags = page_get_flags(addr);
if (page_flags & acc_flag) {
+ if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
+ && cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
+ return TLB_MMIO;
+ }
return 0; /* success */
}
maperr = !(page_flags & PAGE_VALID);
@@ -767,7 +771,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
- *phost = flags ? NULL : g2h(env_cpu(env), addr);
+ *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
return flags;
}
@@ -5688,9 +5688,6 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
flags = info.page[0].flags | info.page[1].flags;
if (unlikely(flags != 0)) {
-#ifdef CONFIG_USER_ONLY
- g_assert_not_reached();
-#else
/*
* At least one page includes MMIO.
* Any bus operation can fail with cpu_transaction_failed,
@@ -5727,7 +5724,6 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
memcpy(&env->vfp.zregs[(rd + i) & 31], &scratch[i], reg_max);
}
return;
-#endif
}
/* The entire operation is in RAM, on valid pages. */
@@ -80,6 +80,14 @@ sha512-vector: sha512.c
TESTS += sha512-vector
+ifneq ($(CROSS_CC_HAS_SVE),)
+sha512-sve: CFLAGS=-O3 -march=armv8.1-a+sve
+sha512-sve: sha512.c
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
+
+TESTS += sha512-sve
+endif
+
ifeq ($(HOST_GDB_SUPPORTS_ARCH),y)
GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py