new file mode 100644
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+use core::arch::asm;
+
+const CTR_IDC: u64 = 1 << 28;
+
+const CTR_DMINLINE_SHIFT: u64 = 16;
+const CTR_DMINLINE_MASK: u64 = 0xf;
+
+pub fn dcache_clean_to_pou(base: *const u8, size: isize) {
+ let ctr = unsafe {
+ let mut l: u64;
+ asm!("mrs {reg}, ctr_el0", // CTR: cache type register
+ reg = out(reg) l,
+ options(pure, nomem, nostack, preserves_flags),
+ );
+ l
+ };
+
+ // Perform the clean only if needed for coherency with the I side
+ if (ctr & CTR_IDC) == 0 {
+ let line_shift = 2 + ((ctr >> CTR_DMINLINE_SHIFT) & CTR_DMINLINE_MASK);
+ let line_size: isize = 1 << line_shift;
+ let num_lines = (size + line_size - 1) >> line_shift;
+ let mut offset: isize = 0;
+
+ for _ in 1..=num_lines {
+ unsafe {
+ asm!("dc cvau, {reg}",
+ reg = in(reg) base.offset(offset),
+ options(nomem, nostack, preserves_flags),
+ );
+ }
+ offset += line_size;
+ }
+ }
+}
@@ -6,11 +6,13 @@
#![allow(incomplete_features)]
#![feature(specialization)]
+mod cmo;
mod console;
mod cstring;
mod fwcfg;
mod pagealloc;
mod paging;
+mod pecoff;
use core::{arch::global_asm, panic::PanicInfo};
use linked_list_allocator::LockedHeap;
@@ -29,6 +31,8 @@ extern "C" {
static _dtb_end: u8;
}
+type EntryFn = unsafe extern "C" fn(*const u8, u64, u64, u64) -> !;
+
const LOAD_ADDRESS: *mut u8 = 0x43210000 as _;
#[no_mangle]
@@ -97,9 +101,27 @@ extern "C" fn efilite_main(base: usize, mapped: usize, used: usize) {
.load_kernel_image(LOAD_ADDRESS)
.expect("Failed to load kernel image");
+ let pe_image = pecoff::Parser::from_ptr(LOAD_ADDRESS);
+
+ // Clean the code region of the loaded image to the PoU so we
+ // can safely fetch instructions from it once the PXN/UXN
+ // attributes are cleared
+ let code_size = pe_image.get_code_size();
+ cmo::dcache_clean_to_pou(LOAD_ADDRESS, code_size as isize);
+
// Switch back to the initial ID map so we can remap
// the loaded kernel image with different permissions
paging::deactivate();
+
+ // Remap the text/rodata part of the image read-only so we will
+ // be able to execute it with WXN protections enabled
+ paging::map_range(LOAD_ADDRESS as u64, code_size, nor_flags);
+ paging::activate();
+
+ unsafe {
+ let entrypoint: EntryFn = core::mem::transmute(LOAD_ADDRESS);
+ entrypoint(&_dtb as *const _, 0, 0, 0);
+ }
}
#[no_mangle]
new file mode 100644
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+pub struct Parser {
+ base_of_code: u64,
+ size_of_code: u64,
+}
+
+impl Parser {
+ pub fn from_ptr(ptr: *const u8) -> Parser {
+ // TODO check magic number, arch, etc
+ // TODO deal with variable PE header offset
+ let pehdr_offset = 64;
+
+ Parser {
+ base_of_code: unsafe { *(ptr.offset(pehdr_offset + 28) as *const u32) } as u64,
+ size_of_code: unsafe { *(ptr.offset(pehdr_offset + 44) as *const u32) } as u64,
+ }
+ }
+
+ pub fn get_code_size(&self) -> u64 {
+ return self.base_of_code + self.size_of_code;
+ }
+}
From: Ard Biesheuvel <ardb@google.com> Implement the bare minimum needed to discover the size of the text/rodata region of the loaded image, and use it to remap this region read-only so that we can execute it while WXN protections are enabled. Then, boot the loaded image by jumping to the start of it. --- src/cmo.rs | 37 ++++++++++++++++++++ src/main.rs | 22 ++++++++++++ src/pecoff.rs | 23 ++++++++++++ 3 files changed, 82 insertions(+)