@@ -193,4 +193,12 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym]
.endm
+/*
+ * Annotate a function as position independent, i.e., safe to be called before
+ * the kernel virtual mapping is activated.
+ */
+#define ENDPIPROC(x) \
+ .globl __pi_##x; .set __pi_##x, x; \
+ ENDPROC(x)
+
#endif /* __ASM_ASSEMBLER_H */
@@ -41,4 +41,4 @@ ENTRY(memchr)
ret
2: mov x0, #0
ret
-ENDPROC(memchr)
+ENDPIPROC(memchr)
@@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 )
.Lret0:
mov result, #0
ret
-ENDPROC(memcmp)
+ENDPIPROC(memcmp)
@@ -198,4 +198,4 @@ ENTRY(memcpy)
tst count, #0x3f
b.ne .Ltail63
ret
-ENDPROC(memcpy)
+ENDPIPROC(memcpy)
@@ -194,4 +194,4 @@ ENTRY(memmove)
tst count, #0x3f
b.ne .Ltail63
ret
-ENDPROC(memmove)
+ENDPIPROC(memmove)
@@ -213,4 +213,4 @@ ENTRY(memset)
ands count, count, zva_bits_x
b.ne .Ltail_maybe_long
ret
-ENDPROC(memset)
+ENDPIPROC(memset)
@@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
csinv data1, data1, xzr, le
csel data2, data2, data2a, le
b .Lrealigned
-ENDPROC(strlen)
+ENDPIPROC(strlen)
@@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul )
.Lret0:
mov result, #0
ret
-ENDPROC(strncmp)
+ENDPIPROC(strncmp)
@@ -98,7 +98,7 @@ ENTRY(__flush_dcache_area)
b.lo 1b
dsb sy
ret
-ENDPROC(__flush_dcache_area)
+ENDPIPROC(__flush_dcache_area)
/*
* __inval_cache_range(start, end)
@@ -131,7 +131,7 @@ __dma_inv_range:
b.lo 2b
dsb sy
ret
-ENDPROC(__inval_cache_range)
+ENDPIPROC(__inval_cache_range)
ENDPROC(__dma_inv_range)
/*
@@ -171,7 +171,7 @@ ENTRY(__dma_flush_range)
b.lo 1b
dsb sy
ret
-ENDPROC(__dma_flush_range)
+ENDPIPROC(__dma_flush_range)
/*
* __dma_map_area(start, size, dir)
@@ -184,7 +184,7 @@ ENTRY(__dma_map_area)
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_range
b __dma_clean_range
-ENDPROC(__dma_map_area)
+ENDPIPROC(__dma_map_area)
/*
* __dma_unmap_area(start, size, dir)
@@ -197,4 +197,4 @@ ENTRY(__dma_unmap_area)
cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_range
ret
-ENDPROC(__dma_unmap_area)
+ENDPIPROC(__dma_unmap_area)
For more control over which functions are called with the MMU off or with the UEFI 1:1 mapping active, annotate some assembler routines as position independent. This is done by introducing ENDPIPROC(), which replaces the ENDPROC() declaration of those routines. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- arch/arm64/include/asm/assembler.h | 8 ++++++++ arch/arm64/lib/memchr.S | 2 +- arch/arm64/lib/memcmp.S | 2 +- arch/arm64/lib/memcpy.S | 2 +- arch/arm64/lib/memmove.S | 2 +- arch/arm64/lib/memset.S | 2 +- arch/arm64/lib/strlen.S | 2 +- arch/arm64/lib/strncmp.S | 2 +- arch/arm64/mm/cache.S | 10 +++++----- 9 files changed, 20 insertions(+), 12 deletions(-)