@@ -1,4 +1,5 @@
#include <asm/asm_defns.h>
+#include <asm/sysregs.h>
#include <asm/regs.h>
#include <asm/alternative.h>
#include <public/xen.h>
@@ -19,6 +19,7 @@
#include <asm/asm_defns.h>
#include <asm/arm32/processor.h>
+#include <asm/sysregs.h>
ca15mp_init:
ca7mp_init:
@@ -30,6 +30,7 @@
#include <asm/gic_v3_its.h>
#include <asm/io.h>
#include <asm/page.h>
+#include <asm/sysregs.h>
/*
* There could be a lot of LPIs on the host side, and they always go to
@@ -42,6 +42,7 @@
#include <asm/gic_v3_defs.h>
#include <asm/gic_v3_its.h>
#include <asm/io.h>
+#include <asm/sysregs.h>
/* Global state */
static struct {
@@ -1,8 +1,6 @@
#ifndef __ASM_ARM_ARM32_PROCESSOR_H
#define __ASM_ARM_ARM32_PROCESSOR_H
-#include <asm/cpregs.h>
-
#define ACTLR_CAXX_SMP (1<<6)
#ifndef __ASSEMBLY__
@@ -60,66 +58,6 @@ struct cpu_user_regs
#endif
-/* Layout as used in assembly, with src/dest registers mixed in */
-#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2
-#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm
-#define CP32(r, name...) __CP32(r, name)
-#define CP64(r, name...) __CP64(r, name)
-
-/* Stringified for inline assembly */
-#define LOAD_CP32(r, name...) "mrc " __stringify(CP32(%r, name)) ";"
-#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";"
-#define LOAD_CP64(r, name...) "mrrc " __stringify(CP64(%r, %H##r, name)) ";"
-#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";"
-
-/* Issue a CP operation which takes no argument,
- * uses r0 as a placeholder register. */
-#define CMD_CP32(name...) "mcr " __stringify(CP32(r0, name)) ";"
-
-#ifndef __ASSEMBLY__
-
-/* C wrappers */
-#define READ_CP32(name...) ({ \
- register uint32_t _r; \
- asm volatile(LOAD_CP32(0, name) : "=r" (_r)); \
- _r; })
-
-#define WRITE_CP32(v, name...) do { \
- register uint32_t _r = (v); \
- asm volatile(STORE_CP32(0, name) : : "r" (_r)); \
-} while (0)
-
-#define READ_CP64(name...) ({ \
- register uint64_t _r; \
- asm volatile(LOAD_CP64(0, name) : "=r" (_r)); \
- _r; })
-
-#define WRITE_CP64(v, name...) do { \
- register uint64_t _r = (v); \
- asm volatile(STORE_CP64(0, name) : : "r" (_r)); \
-} while (0)
-
-/*
- * C wrappers for accessing system registers.
- *
- * Registers come in 3 types:
- * - those which are always 32-bit regardless of AArch32 vs AArch64
- * (use {READ,WRITE}_SYSREG32).
- * - those which are always 64-bit regardless of AArch32 vs AArch64
- * (use {READ,WRITE}_SYSREG64).
- * - those which vary between AArch32 and AArch64 (use {READ,WRITE}_SYSREG).
- */
-#define READ_SYSREG32(R...) READ_CP32(R)
-#define WRITE_SYSREG32(V, R...) WRITE_CP32(V, R)
-
-#define READ_SYSREG64(R...) READ_CP64(R)
-#define WRITE_SYSREG64(V, R...) WRITE_CP64(V, R)
-
-#define READ_SYSREG(R...) READ_SYSREG32(R)
-#define WRITE_SYSREG(V, R...) WRITE_SYSREG32(V, R)
-
-#endif /* __ASSEMBLY__ */
-
#endif /* __ASM_ARM_ARM32_PROCESSOR_H */
/*
* Local variables:
new file mode 100644
@@ -0,0 +1,74 @@
+#ifndef __ASM_ARM_ARM32_SYSREGS_H
+#define __ASM_ARM_ARM32_SYSREGS_H
+
+#include <asm/cpregs.h>
+
+/* Layout as used in assembly, with src/dest registers mixed in */
+#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2
+#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm
+#define CP32(r, name...) __CP32(r, name)
+#define CP64(r, name...) __CP64(r, name)
+
+/* Stringified for inline assembly */
+#define LOAD_CP32(r, name...) "mrc " __stringify(CP32(%r, name)) ";"
+#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";"
+#define LOAD_CP64(r, name...) "mrrc " __stringify(CP64(%r, %H##r, name)) ";"
+#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";"
+
+/* Issue a CP operation which takes no argument,
+ * uses r0 as a placeholder register. */
+#define CMD_CP32(name...) "mcr " __stringify(CP32(r0, name)) ";"
+
+#ifndef __ASSEMBLY__
+
+/* C wrappers */
+#define READ_CP32(name...) ({ \
+ register uint32_t _r; \
+ asm volatile(LOAD_CP32(0, name) : "=r" (_r)); \
+ _r; })
+
+#define WRITE_CP32(v, name...) do { \
+ register uint32_t _r = (v); \
+ asm volatile(STORE_CP32(0, name) : : "r" (_r)); \
+} while (0)
+
+#define READ_CP64(name...) ({ \
+ register uint64_t _r; \
+ asm volatile(LOAD_CP64(0, name) : "=r" (_r)); \
+ _r; })
+
+#define WRITE_CP64(v, name...) do { \
+ register uint64_t _r = (v); \
+ asm volatile(STORE_CP64(0, name) : : "r" (_r)); \
+} while (0)
+
+/*
+ * C wrappers for accessing system registers.
+ *
+ * Registers come in 3 types:
+ * - those which are always 32-bit regardless of AArch32 vs AArch64
+ * (use {READ,WRITE}_SYSREG32).
+ * - those which are always 64-bit regardless of AArch32 vs AArch64
+ * (use {READ,WRITE}_SYSREG64).
+ * - those which vary between AArch32 and AArch64 (use {READ,WRITE}_SYSREG).
+ */
+#define READ_SYSREG32(R...) READ_CP32(R)
+#define WRITE_SYSREG32(V, R...) WRITE_CP32(V, R)
+
+#define READ_SYSREG64(R...) READ_CP64(R)
+#define WRITE_SYSREG64(V, R...) WRITE_CP64(V, R)
+
+#define READ_SYSREG(R...) READ_SYSREG32(R)
+#define WRITE_SYSREG(V, R...) WRITE_SYSREG32(V, R)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARM_ARM32_SYSREGS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -3,8 +3,6 @@
#include <xen/stringify.h>
-#include <asm/arm64/sysregs.h>
-
#ifndef __ASSEMBLY__
/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
@@ -89,29 +87,6 @@ struct cpu_user_regs
#undef __DECL_REG
-/* Access to system registers */
-
-#define READ_SYSREG32(name) ({ \
- uint32_t _r; \
- asm volatile("mrs %0, "__stringify(name) : "=r" (_r)); \
- _r; })
-#define WRITE_SYSREG32(v, name) do { \
- uint32_t _r = v; \
- asm volatile("msr "__stringify(name)", %0" : : "r" (_r)); \
-} while (0)
-
-#define WRITE_SYSREG64(v, name) do { \
- uint64_t _r = v; \
- asm volatile("msr "__stringify(name)", %0" : : "r" (_r)); \
-} while (0)
-#define READ_SYSREG64(name) ({ \
- uint64_t _r; \
- asm volatile("mrs %0, "__stringify(name) : "=r" (_r)); \
- _r; })
-
-#define READ_SYSREG(name) READ_SYSREG64(name)
-#define WRITE_SYSREG(v, name) WRITE_SYSREG64(v, name)
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARM_ARM64_PROCESSOR_H */
@@ -57,6 +57,29 @@
#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
+/* Access to system registers */
+
+#define READ_SYSREG32(name) ({ \
+ uint32_t _r; \
+ asm volatile("mrs %0, "__stringify(name) : "=r" (_r)); \
+ _r; })
+#define WRITE_SYSREG32(v, name) do { \
+ uint32_t _r = v; \
+ asm volatile("msr "__stringify(name)", %0" : : "r" (_r)); \
+} while (0)
+
+#define WRITE_SYSREG64(v, name) do { \
+ uint64_t _r = v; \
+ asm volatile("msr "__stringify(name)", %0" : : "r" (_r)); \
+} while (0)
+#define READ_SYSREG64(name) ({ \
+ uint64_t _r; \
+ asm volatile("mrs %0, "__stringify(name) : "=r" (_r)); \
+ _r; })
+
+#define READ_SYSREG(name) READ_SYSREG64(name)
+#define WRITE_SYSREG(v, name) WRITE_SYSREG64(v, name)
+
#endif /* _ASM_ARM_ARM64_SYSREGS_H */
/*
@@ -4,6 +4,7 @@
#include <public/xen.h>
#include <asm/processor.h>
#include <asm/lpae.h>
+#include <asm/sysregs.h>
#ifdef CONFIG_ARM_64
#define PADDR_BITS 48
@@ -4,13 +4,7 @@
#ifndef __ASSEMBLY__
#include <xen/types.h>
-#if defined(CONFIG_ARM_32)
-# include <asm/arm32/processor.h>
-#elif defined(CONFIG_ARM_64)
-# include <asm/arm64/processor.h>
-#else
-# error "unknown ARM variant"
-#endif
+#include <asm/sysregs.h>
extern char __per_cpu_start[], __per_cpu_data_end[];
extern unsigned long __per_cpu_offset[NR_CPUS];
new file mode 100644
@@ -0,0 +1,22 @@
+#ifndef __ASM_ARM_SYSREGS_H
+#define __ASM_ARM_SYSREGS_H
+
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/sysregs.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/sysregs.h>
+#else
+# error "unknown ARM variant"
+#endif
+
+#endif /* __ASM_ARM_SYSREGS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
+
+
@@ -1,7 +1,7 @@
#ifndef __ARM_TIME_H__
#define __ARM_TIME_H__
-#include <asm/processor.h>
+#include <asm/sysregs.h>
#define DT_MATCH_TIMER \
DT_MATCH_COMPATIBLE("arm,armv7-timer"), \