From patchwork Tue Jan 21 10:19:20 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alex Nemirovsky X-Patchwork-Id: 239859 List-Id: U-Boot discussion From: Alex.Nemirovsky at cortina-access.com (Alex Nemirovsky) Date: Tue, 21 Jan 2020 10:19:20 +0000 Subject: [PATCH v2 2/7] cortina: common: armv8: add custom init for CA ARMv8 based SoCs In-Reply-To: <1579601912-27737-1-git-send-email-alex.nemirovsky@cortina-access.com> References: <1579601912-27737-1-git-send-email-alex.nemirovsky@cortina-access.com> Message-ID: <1579601912-27737-3-git-send-email-alex.nemirovsky@cortina-access.com> Cortina Access ARMv8 boards share common custom ARMV8 init routines. Add common board init code for Cortina Access SoC ARMv8 based SoCs Signed-off-by: Alex Nemirovsky --- Changes in v2: None board/cortina/common/armv8/lowlevel_init.S | 87 ++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 board/cortina/common/armv8/lowlevel_init.S diff --git a/board/cortina/common/armv8/lowlevel_init.S b/board/cortina/common/armv8/lowlevel_init.S new file mode 100644 index 0000000..702611b --- /dev/null +++ b/board/cortina/common/armv8/lowlevel_init.S @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2020 Cortina-Access + * + */ + + +#include +#include +#include +#include +#include + + .globl lowlevel_init +lowlevel_init: + mov x29, lr /* Save LR */ + +#if defined(CONFIG_CA77XX) + /* Enable SMPEN in CPUECTLR */ + mrs x0, s3_1_c15_c2_1 + tst x0, #0x40 + b.ne skip_smp_setup + orr x0, x0, #0x40 + msr s3_1_c15_c2_1, x0 +skip_smp_setup: +#endif + +#if defined(CONFIG_CA8277B) + /* Enable CPU Timer */ + ldr x0, =CONFIG_SYS_TIMER_BASE + mov x1, #1 + str w1, [x0] +#endif + +#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) + branch_if_slave x0, 1f +#ifndef CONFIG_TARGET_VENUS + ldr x0, =GICD_BASE + bl gic_init_secure +#endif +1: +#if defined(CONFIG_GICV3) + ldr x0, =GICR_BASE + bl gic_init_secure_percpu +#elif defined(CONFIG_GICV2) + ldr x0, =GICD_BASE + ldr x1, =GICC_BASE + bl gic_init_secure_percpu +#endif +#endif + +#ifdef CONFIG_ARMV8_MULTIENTRY + branch_if_master x0, x1, 2f + + /* + * Slave should wait for master clearing spin table. + * This sync prevent salves observing incorrect + * value of spin table and jumping to wrong place. + */ +#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) +#ifdef CONFIG_GICV2 + ldr x0, =GICC_BASE +#endif + bl gic_wait_for_interrupt +#endif + + /* + * All slaves will enter EL2 and optionally EL1. + */ + adr x4, lowlevel_in_el2 + ldr x5, =ES_TO_AARCH64 + bl armv8_switch_to_el2 + +lowlevel_in_el2: +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 + adr x4, lowlevel_in_el1 + ldr x5, =ES_TO_AARCH64 + bl armv8_switch_to_el1 + +lowlevel_in_el1: +#endif + +#endif /* CONFIG_ARMV8_MULTIENTRY */ + +2: + mov lr, x29 /* Restore LR */ + ret