@@ -8,7 +8,7 @@
#ifndef SYSEMU_TCG_H
#define SYSEMU_TCG_H
-void tcg_exec_init(unsigned long tb_size);
+void tcg_exec_init(unsigned long tb_size, bool mirror_rwx);
#ifdef CONFIG_TCG
extern bool tcg_allowed;
#define tcg_enabled() (tcg_allowed)
@@ -39,6 +39,7 @@ struct TCGState {
bool mttcg_enabled;
unsigned long tb_size;
+ bool mirror_rwx;
};
typedef struct TCGState TCGState;
@@ -94,6 +95,7 @@ static void tcg_accel_instance_init(Object *obj)
TCGState *s = TCG_STATE(obj);
s->mttcg_enabled = default_mttcg_enabled();
+ s->mirror_rwx = false;
}
bool mttcg_enabled;
@@ -102,7 +104,7 @@ static int tcg_init(MachineState *ms)
{
TCGState *s = TCG_STATE(current_accel());
- tcg_exec_init(s->tb_size * 1024 * 1024);
+ tcg_exec_init(s->tb_size * 1024 * 1024, s->mirror_rwx);
mttcg_enabled = s->mttcg_enabled;
cpus_register_accel(&tcg_cpus);
@@ -168,6 +170,22 @@ static void tcg_set_tb_size(Object *obj, Visitor *v,
s->tb_size = value;
}
+#ifdef CONFIG_IOS_JIT
+static bool tcg_get_mirror_rwx(Object *obj, Error **errp)
+{
+ TCGState *s = TCG_STATE(obj);
+
+ return s->mirror_rwx;
+}
+
+static void tcg_set_mirror_rwx(Object *obj, bool value, Error **errp)
+{
+ TCGState *s = TCG_STATE(obj);
+
+ s->mirror_rwx = value;
+}
+#endif
+
static void tcg_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
@@ -185,6 +203,13 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "tb-size",
"TCG translation block cache size");
+#ifdef CONFIG_IOS_JIT
+ object_class_property_add_bool(oc, "mirror-rwx",
+ tcg_get_mirror_rwx, tcg_set_mirror_rwx);
+ object_class_property_set_description(oc, "mirror-rwx",
+ "mirror map executable pages for TCG on iOS");
+#endif
+
}
static const TypeInfo tcg_accel_type = {
@@ -1042,12 +1042,15 @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
-static inline void *alloc_code_gen_buffer(void)
+static inline void *alloc_code_gen_buffer(bool no_rwx_pages)
{
void *buf = static_code_gen_buffer;
void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
size_t size;
+ /* not applicable */
+ assert(!no_rwx_pages);
+
/* page-align the beginning and end of the buffer */
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
@@ -1076,24 +1079,32 @@ static inline void *alloc_code_gen_buffer(void)
return buf;
}
#elif defined(_WIN32)
-static inline void *alloc_code_gen_buffer(void)
+static inline void *alloc_code_gen_buffer(bool no_rwx_pages)
{
size_t size = tcg_ctx->code_gen_buffer_size;
+ assert(!no_rwx_pages); /* not applicable */
return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
}
#else
-static inline void *alloc_code_gen_buffer(void)
+static inline void *alloc_code_gen_buffer(bool no_rwx_pages)
{
-#if defined(CONFIG_IOS_JIT)
int prot = PROT_READ | PROT_EXEC;
-#else
- int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
-#endif
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
size_t size = tcg_ctx->code_gen_buffer_size;
void *buf;
+#if defined(CONFIG_DARWIN) /* both iOS and macOS (Apple Silicon) applicable */
+ if (!no_rwx_pages) {
+ prot |= PROT_WRITE;
+ flags |= MAP_JIT;
+ }
+#else
+ /* not applicable */
+ assert(!no_rwx_pages);
+ prot |= PROT_WRITE;
+#endif
+
buf = mmap(NULL, size, prot, flags, -1, 0);
if (buf == MAP_FAILED) {
return NULL;
@@ -1173,10 +1184,10 @@ static inline void *alloc_jit_rw_mirror(void *base, size_t size)
}
#endif /* CONFIG_IOS_JIT */
-static inline void code_gen_alloc(size_t tb_size)
+static inline void code_gen_alloc(size_t tb_size, bool mirror_rwx)
{
tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
- tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
+ tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(mirror_rwx);
if (tcg_ctx->code_gen_buffer == NULL) {
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
exit(1);
@@ -1184,13 +1195,18 @@ static inline void code_gen_alloc(size_t tb_size)
#if defined(CONFIG_IOS_JIT)
void *mirror;
- /* For iOS JIT we need a mirror mapping for code execution */
- mirror = alloc_jit_rw_mirror(tcg_ctx->code_gen_buffer,
- tcg_ctx->code_gen_buffer_size
- );
- if (mirror == NULL) {
- fprintf(stderr, "Could not remap code buffer mirror\n");
- exit(1);
+ if (mirror_rwx) {
+ /* For iOS JIT we need a mirror mapping for code execution */
+ mirror = alloc_jit_rw_mirror(tcg_ctx->code_gen_buffer,
+ tcg_ctx->code_gen_buffer_size
+ );
+ if (mirror == NULL) {
+ fprintf(stderr, "Could not remap code buffer mirror\n");
+ exit(1);
+ }
+ } else {
+ /* If we have JIT entitlements */
+ mirror = tcg_ctx->code_gen_buffer;
}
tcg_ctx->code_rw_mirror_diff = mirror - tcg_ctx->code_gen_buffer;
#endif /* CONFIG_IOS_JIT */
@@ -1217,16 +1233,18 @@ static void tb_htable_init(void)
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
}
-/* Must be called before using the QEMU cpus. 'tb_size' is the size
- (in bytes) allocated to the translation buffer. Zero means default
- size. */
-void tcg_exec_init(unsigned long tb_size)
+/*
+ * Must be called before using the QEMU cpus. 'tb_size' is the size
+ * (in bytes) allocated to the translation buffer. Zero means default
+ * size. mirror_rwx only applicable on iOS.
+ */
+void tcg_exec_init(unsigned long tb_size, bool mirror_rwx)
{
tcg_allowed = true;
cpu_gen_init();
page_init();
tb_htable_init();
- code_gen_alloc(tb_size);
+ code_gen_alloc(tb_size, mirror_rwx);
#if defined(CONFIG_SOFTMMU)
/* There's no guest base to take into account, so go ahead and
initialize the prologue now. */
@@ -910,7 +910,7 @@ int main(int argc, char **argv)
}
/* init tcg before creating CPUs and to get qemu_host_page_size */
- tcg_exec_init(0);
+ tcg_exec_init(0, false);
cpu_type = parse_cpu_option(cpu_model);
cpu = cpu_create(cpu_type);
@@ -705,7 +705,7 @@ int main(int argc, char **argv, char **envp)
cpu_type = parse_cpu_option(cpu_model);
/* init tcg before creating CPUs and to get qemu_host_page_size */
- tcg_exec_init(0);
+ tcg_exec_init(0, false);
cpu = cpu_create(cpu_type);
env = cpu->env_ptr;
@@ -123,6 +123,9 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel,
" igd-passthru=on|off (enable Xen integrated Intel graphics passthrough, default=off)\n"
" kernel-irqchip=on|off|split controls accelerated irqchip support (default=on)\n"
" kvm-shadow-mem=size of KVM shadow MMU in bytes\n"
+#ifdef CONFIG_IOS_JIT
+ " mirror-rwx=on|off (mirror map executable pages for TCG on iOS)\n"
+#endif
" tb-size=n (TCG translation block cache size)\n"
" thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
SRST
@@ -148,6 +151,14 @@ SRST
``kvm-shadow-mem=size``
Defines the size of the KVM shadow MMU.
+#ifdef CONFIG_IOS_JIT
+
+ ``mirror-rwx=on|off``
+ Only applicable to TCG running on iOS hosts. When enabled, TB code
+ is written to a mirror mapped address separate from the address that is
+ executed. By default, this is disabled.
+#endif
+
``tb-size=n``
Controls the size (in MiB) of the TCG translation block cache.