@@ -1,6 +1,6 @@
TARGET_ARCH=aarch64
TARGET_BASE_ARCH=arm
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml
TARGET_HAS_BFLT=y
CONFIG_SEMIHOSTING=y
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
@@ -1,5 +1,5 @@
TARGET_ARCH=aarch64
TARGET_BASE_ARCH=arm
TARGET_SUPPORTS_MTTCG=y
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml gdb-xml/aarch64-pauth.xml
TARGET_NEED_FDT=y
@@ -1,7 +1,7 @@
TARGET_ARCH=aarch64
TARGET_BASE_ARCH=arm
TARGET_BIG_ENDIAN=y
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml
TARGET_HAS_BFLT=y
CONFIG_SEMIHOSTING=y
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
@@ -1345,6 +1345,8 @@ int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
+int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
+int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
@@ -355,6 +355,11 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
aarch64_gdb_set_fpu_reg,
34, "aarch64-fpu.xml", 0);
}
+ if (isar_feature_aa64_pauth(&cpu->isar)) {
+ gdb_register_coprocessor(cs, aarch64_gdb_get_pauth_reg,
+ aarch64_gdb_set_pauth_reg,
+ 4, "aarch64-pauth.xml", 0);
+ }
#endif
} else {
if (arm_feature(env, ARM_FEATURE_NEON)) {
@@ -210,6 +210,35 @@ int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
return 0;
}
+int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ switch (reg) {
+ case 0: /* pauth_dmask */
+ case 1: /* pauth_cmask */
+ case 2: /* pauth_dmask_high */
+ case 3: /* pauth_cmask_high */
+ /*
+ * Note that older versions of this feature only contained
+ * pauth_{d,c}mask, for use with Linux user processes, and
+ * thus exclusively in the low half of the address space.
+ *
+ * To support system mode, and to debug kernels, two new regs
+ * were added to cover the high half of the address space.
+ * For the purpose of pauth_ptr_mask, we can use any well-formed
+ * address within the address space half -- here, 0 and -2.
+ */
+ return gdb_get_reg64(buf, pauth_ptr_mask(env, -(reg & 2), ~reg & 1));
+ default:
+ return 0;
+ }
+}
+
+int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ /* All pseudo registers are read-only. */
+ return 0;
+}
+
static void output_vector_union_type(GString *s, int reg_width,
const char *name)
{
new file mode 100644
@@ -0,0 +1,15 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2018-2022 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
+<feature name="org.gnu.gdb.aarch64.pauth">
+ <reg name="pauth_dmask" bitsize="64"/>
+ <reg name="pauth_cmask" bitsize="64"/>
+ <reg name="pauth_dmask_high" bitsize="64"/>
+ <reg name="pauth_cmask_high" bitsize="64"/>
+</feature>
+
The extension is primarily defined by the Linux kernel NT_ARM_PAC_MASK ptrace register set. The original gdb feature consists of two masks, data and code, which are used to mask out the authentication code within a pointer. Following discussion with Luis Machado, add two more masks in order to support pointers within the high half of the address space (i.e. TTBR1 vs TTBR0). Cc: Luis Machado <luis.machado@arm.com> Cc: Thiago Jung Bauermann <thiago.bauermann@linaro.org> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1105 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- configs/targets/aarch64-linux-user.mak | 2 +- configs/targets/aarch64-softmmu.mak | 2 +- configs/targets/aarch64_be-linux-user.mak | 2 +- target/arm/internals.h | 2 ++ target/arm/gdbstub.c | 5 ++++ target/arm/gdbstub64.c | 29 +++++++++++++++++++++++ gdb-xml/aarch64-pauth.xml | 15 ++++++++++++ 7 files changed, 54 insertions(+), 3 deletions(-) create mode 100644 gdb-xml/aarch64-pauth.xml