@@ -29,6 +29,7 @@ endif
ifdef CONFIG_TARGET_SOCFPGA_STRATIX10
obj-y += clock_manager_s10.o
+obj-y += lowlevel_init_64.o
obj-y += mailbox_s10.o
obj-y += misc_s10.o
obj-y += mmu-arm64_s10.o
@@ -41,6 +42,7 @@ endif
ifdef CONFIG_TARGET_SOCFPGA_AGILEX
obj-y += clock_manager_agilex.o
+obj-y += lowlevel_init_64.o
obj-y += mailbox_s10.o
obj-y += misc_s10.o
obj-y += mmu-arm64_s10.o
new file mode 100644
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <linux/linkage.h>
+#include <asm/macro.h>
+
+ENTRY(lowlevel_init)
+ mov x29, lr /* Save LR */
+
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+#ifdef CONFIG_SPL_ATF
+ branch_if_slave x0, 2f
+#else
+ branch_if_slave x0, 1f
+#endif
+
+ ldr x0, =GICD_BASE
+ bl gic_init_secure
+ b 2f
+
+1:
+#if defined(CONFIG_GICV3)
+ ldr x0, =GICR_BASE
+ bl gic_init_secure_percpu
+#elif defined(CONFIG_GICV2)
+ ldr x0, =GICD_BASE
+ ldr x1, =GICC_BASE
+ bl gic_init_secure_percpu
+#endif
+#endif
+
+#ifdef CONFIG_ARMV8_MULTIENTRY
+ branch_if_master x0, x1, 3f
+
+ /*
+ * Slave should wait for master clearing spin table.
+ * This sync prevent slaves observing incorrect
+ * value of spin table and jumping to wrong place.
+ */
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+#ifdef CONFIG_GICV2
+ ldr x0, =GICC_BASE
+#endif
+ bl gic_wait_for_interrupt
+#endif
+
+ /*
+ * All slaves will enter EL2 and optionally EL1.
+ */
+ adr x4, lowlevel_in_el2
+ ldr x5, =ES_TO_AARCH64
+ bl armv8_switch_to_el2
+
+lowlevel_in_el2:
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
+ adr x4, lowlevel_in_el1
+ ldr x5, =ES_TO_AARCH64
+ bl armv8_switch_to_el1
+
+lowlevel_in_el1:
+#endif
+#endif /* CONFIG_ARMV8_MULTIENTRY */
+
+2:
+#ifdef CONFIG_SPL_BUILD
+ ldr x4, =CPU_RELEASE_ADDR
+ ldr x5, [x4]
+ cbz x5, checkslavecpu
+ br x5
+checkslavecpu:
+ branch_if_slave x0, 2b
+#endif
+
+3:
+ mov lr, x29 /* Restore LR */
+ ret
+ENDPROC(lowlevel_init)