@@ -910,6 +910,7 @@ struct ARMCPU {
#ifdef CONFIG_USER_ONLY
bool guarded_pages;
+ bool tagged_pages;
#endif
QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks;
@@ -293,6 +293,18 @@ static void aarch64_cpu_set_guarded_pages(Object *obj, bool val, Error **errp)
ARMCPU *cpu = ARM_CPU(obj);
cpu->guarded_pages = val;
}
+
+static bool aarch64_cpu_get_tagged_pages(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ return cpu->tagged_pages;
+}
+
+static void aarch64_cpu_set_tagged_pages(Object *obj, bool val, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ cpu->tagged_pages = val;
+}
#endif
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
@@ -380,6 +392,12 @@ static void aarch64_max_initfn(Object *obj)
aarch64_cpu_set_guarded_pages, NULL);
object_property_set_description(obj, "x-guarded-pages",
"Set on/off GuardPage bit for all pages", NULL);
+
+ object_property_add_bool(obj, "x-tagged-pages",
+ aarch64_cpu_get_tagged_pages,
+ aarch64_cpu_set_tagged_pages, NULL);
+ object_property_set_description(obj, "x-tagged-pages",
+ "Set on/off MemAttr Tagged for all pages", NULL);
#endif
cpu->sve_max_vq = ARM_MAX_VQ;
@@ -53,8 +53,45 @@ static uint64_t strip_tbi(CPUARMState *env, uint64_t ptr)
static uint8_t *allocation_tag_mem(CPUARMState *env, uint64_t ptr,
bool write, uintptr_t ra)
{
+#ifdef CONFIG_USER_ONLY
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t clean_ptr = strip_tbi(env, ptr);
+ uint8_t *tags;
+ uintptr_t index;
+ int flags;
+
+ flags = page_get_flags(clean_ptr);
+
+ if (!(flags & PAGE_VALID) || !(flags & (write ? PAGE_WRITE : PAGE_READ))) {
+ /* SIGSEGV */
+ env->exception.vaddress = ptr;
+ cpu_restore_state(CPU(cpu), ra, true);
+ raise_exception(env, EXCP_DATA_ABORT, 0, 1);
+ }
+
+ if (!cpu->tagged_pages) {
+ /* Tag storage is disabled. */
+ return NULL;
+ }
+ if (flags & PAGE_SHARED) {
+ /* There may be multiple mappings; pretend not implemented. */
+ return NULL;
+ }
+
+ tags = page_get_target_data(clean_ptr);
+ if (tags == NULL) {
+ size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
+ tags = page_alloc_target_data(clean_ptr, alloc_size);
+ assert(tags != NULL);
+ }
+
+ index = extract32(clean_ptr, LOG2_TAG_GRANULE + 1,
+ TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
+ return tags + index;
+#else
/* Tag storage not implemented. */
return NULL;
+#endif
}
static int get_allocation_tag(CPUARMState *env, uint64_t ptr, uintptr_t ra)
Control this with x-tagged-pages, which is off by default. The limitation to non-shared pages is not part of a future kernel API, but a limitation of linux-user not being able to map virtual pages back to physical pages. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- v2: Add the x-tagged-pages cpu property --- target/arm/cpu.h | 1 + target/arm/cpu64.c | 18 ++++++++++++++++++ target/arm/mte_helper.c | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+) -- 2.17.2