@@ -11,7 +11,7 @@
ARCH_REL_TYPE_ABS := R_AARCH64_JUMP_SLOT|R_AARCH64_GLOB_DAT|R_AARCH64_ABS64
include $(srctree)/lib/vdso/Makefile
-obj-vdso := vgettimeofday.o note.o sigreturn.o
+obj-vdso := vgettimeofday.o note.o sigreturn.o vgetcpu.o
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg
@@ -80,6 +80,7 @@ VERSION
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
+ __kernel_getcpu;
local: *;
};
}
new file mode 100644
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM64 userspace implementations of getcpu()
+ *
+ * Copyright (C) 2020 ARM Limited
+ *
+ */
+
+#include <asm/unistd.h>
+#include <asm/vdso/datapage.h>
+
+struct getcpucache;
+
+static __always_inline
+int getcpu_fallback(unsigned int *_cpu, unsigned int *_node,
+ struct getcpucache *_c)
+{
+ register unsigned int *cpu asm("x0") = _cpu;
+ register unsigned int *node asm("x1") = _node;
+ register struct getcpucache *c asm("x2") = _c;
+ register long ret asm ("x0");
+ register long nr asm("x8") = __NR_getcpu;
+
+ asm volatile(
+ " svc #0\n"
+ : "=r" (ret)
+ : "r" (cpu), "r" (node), "r" (c), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+int __kernel_getcpu(unsigned int *cpu, unsigned int *node,
+ struct getcpucache *c)
+{
+ struct vdso_cpu_data *cpu_data = __vdso_cpu_data();
+
+ if (cpu_data) {
+ if (cpu)
+ *cpu = cpu_data->cpu;
+ if (node)
+ *node = cpu_data->node;
+
+ return 0;
+ }
+
+ return getcpu_fallback(cpu, node, c);
+}
Some applications, especially trace ones, benefit from avoiding the syscall overhead on getcpu() calls so provide a vDSO implementation of it. Signed-off-by: Mark Brown <broonie@kernel.org> --- arch/arm64/kernel/vdso/Makefile | 2 +- arch/arm64/kernel/vdso/vdso.lds.S | 1 + arch/arm64/kernel/vdso/vgetcpu.c | 48 +++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/kernel/vdso/vgetcpu.c -- 2.20.1