@@ -498,6 +498,7 @@ dtb_check_done:
bic r9, r9, #31 @ ... of 32 bytes
add r6, r9, r5
add r9, r9, r10
+ stmdb sp!, {r9 - r10} @ preserve target region address
#ifdef DEBUG
sub r10, r6, r5
@@ -521,6 +522,8 @@ dtb_check_done:
/* Preserve offset to relocated code. */
sub r6, r9, r6
+ ldr r1, [sp], #4 @ end of target region
+ ldr r0, [sp], #4 @ start of target region
#ifndef CONFIG_ZBOOT_ROM
/* cache_clean_flush may use the stack, so relocate it */
add sp, sp, r6
@@ -622,6 +625,21 @@ not_relocated: mov r0, #0
add r2, sp, #0x10000 @ 64k max
mov r3, r7
bl decompress_kernel
+
+ mov r0, r4 @ base of inflated image
+ adr r1, LC0 @ actual LC0
+ ldr r2, [r1] @ linktime LC0
+ sub r2, r1, r2 @ LC0 delta
+ ldr r1, [r1, #16] @ link time inflated size offset
+ ldr r1, [r1, r2] @ actual inflated size (LE)
+#ifdef __ARMEB__
+ /* convert to big endian */
+ eor r2, r1, r1, ror #16
+ bic r2, r2, #0x00ff0000
+ mov r1, r1, ror #8
+ eor r1, r1, r2, lsr #8
+#endif
+ add r1, r1, r0 @ end of inflated image
bl cache_clean_flush
bl cache_off
@@ -1173,6 +1191,9 @@ __armv7_mmu_cache_off:
/*
* Clean and flush the cache to maintain consistency.
*
+ * On entry,
+ * r0 = start address
+ * r1 = end address (exclusive)
* On exit,
* r1, r2, r3, r9, r10, r11, r12 corrupted
* This routine must preserve:
In preparation for turning the decompressor's cache clean/flush operations into proper by-VA maintenance for v7 cores, pass the start and end addresses of the regions that need cache maintenance into cache_clean_flush in registers r0 and r1. Currently, all implementations of cache_clean_flush ignore these values, so no functional change is expected as a result of this patch. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm/boot/compressed/head.S | 21 ++++++++++++++++++++ 1 file changed, 21 insertions(+)