From f12fbab2bb0bfb790c87b0812deb7a46b059a982 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Wed, 2 Oct 2024 23:07:39 +0200 Subject: [PATCH] aarch64 got HALed --- NOTES | 23 ++ aarch64_qemuvirt/src/main.rs | 12 +- hal_aarch64/Cargo.toml | 1 + hal_aarch64/src/devices/gicv2.rs | 34 ++- hal_aarch64/src/exceptions.S | 66 ----- hal_aarch64/src/irq.rs | 351 ++++++++++++----------- hal_aarch64/src/lib.rs | 6 +- hal_aarch64/src/mm/mod.rs | 45 +-- hal_aarch64/src/mm/pgt48.rs | 49 +++- hal_core/Cargo.toml | 3 + hal_core/src/fake_once_lock.rs | 25 ++ hal_core/src/hal.rs | 97 +++++++ hal_core/src/lib.rs | 19 +- hal_core/src/mm.rs | 17 +- hal_core/src/reentrant_spinlock.rs | 67 +++++ kernel/src/executable/elf.rs | 7 +- kernel/src/generic_main.rs | 31 +- kernel/src/lib.rs | 11 +- kernel/src/mm/binary_buddy_allocator.rs | 7 +- kernel/src/mm/mod.rs | 17 +- kernel/src/mm/physical_memory_manager.rs | 32 ++- kernel/src/panic.rs | 3 +- kernel/src/tests.rs | 23 +- 23 files changed, 609 insertions(+), 337 deletions(-) create mode 100644 NOTES delete mode 100644 hal_aarch64/src/exceptions.S create mode 100644 hal_core/src/fake_once_lock.rs create mode 100644 hal_core/src/hal.rs create mode 100644 hal_core/src/reentrant_spinlock.rs diff --git a/NOTES b/NOTES new file mode 100644 index 00000000..bcf00a70 --- /dev/null +++ b/NOTES @@ -0,0 +1,23 @@ +- gic mappings + - kpt.lock().map(allocator=PMM) + map needs to allocate new node + - pmm.alloc + - kpt.lock().map(allocator=NPA) # second locks + +my bug +- pgt.map(allocator=PMM) + needs to allocate new pte + - pmm.give_page + need to map that page + pgt.map(allocated=PMM) etc... + but we need to use the current pagetable instead + + +case when pagetable maps its own stuff +- pgt.map(allocator=PMM) + lock the mutex + needs to allocate new pte + - pmm.give_page + need to map that page + GLOBAL_PAGETABLE.map(allocated=PMM) if pgt == GLOBAL_PAGETABLR back to step 1 + - this make use try to lock the mutex again, hang forever diff --git a/aarch64_qemuvirt/src/main.rs b/aarch64_qemuvirt/src/main.rs index d00e485a..0da6fd11 100644 --- a/aarch64_qemuvirt/src/main.rs +++ b/aarch64_qemuvirt/src/main.rs @@ -14,9 +14,15 @@ const LAUNCH_TESTS: bool = cfg!(feature = "launch_tests"); use log::info; +unsafe fn disable_fp_trapping() { + asm!("msr CPACR_EL1, {:x}", in(reg) 0b11 << 20) +} + #[no_mangle] extern "C" fn k_main(_device_tree_ptr: usize) -> ! { - kernel::hal::cpu::disable_fp_trapping(); + unsafe { + disable_fp_trapping(); + } static PL011: Pl011 = Pl011::new(0x0900_0000); kernel::kernel_console::set_earlyinit_console(&PL011); @@ -25,9 +31,7 @@ extern "C" fn k_main(_device_tree_ptr: usize) -> ! { info!("hello, I am a goOSe! proud member of the gagelen !!!"); - unsafe { - kernel::hal::irq::init_el1_exception_handlers(); - } + kernel::HAL.init_irqs(); unsafe { asm!("isb SY"); diff --git a/hal_aarch64/Cargo.toml b/hal_aarch64/Cargo.toml index 13c79d34..d712bcf3 100644 --- a/hal_aarch64/Cargo.toml +++ b/hal_aarch64/Cargo.toml @@ -10,3 +10,4 @@ hal_core = { path = "../hal_core" } tock-registers = "0.8" cortex-a = "8.1" log = "0.4" +spin = "0.9.8" diff --git a/hal_aarch64/src/devices/gicv2.rs b/hal_aarch64/src/devices/gicv2.rs index ddd9a632..fa59fb31 100644 --- a/hal_aarch64/src/devices/gicv2.rs +++ b/hal_aarch64/src/devices/gicv2.rs @@ -2,6 +2,7 @@ use tock_registers::interfaces::{ReadWriteable, Readable, Writeable}; use tock_registers::register_bitfields; use tock_registers::registers::{ReadOnly, ReadWrite}; +use core::fmt; use hal_core::Error; pub struct GicV2 { @@ -9,6 +10,25 @@ pub struct GicV2 { pub cpu: &'static GicCpu, } +impl fmt::Debug for GicV2 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GicV2") + .field( + "distributor", + &(self.distributor as *const GicDistributor as *const ()), + ) + .field("cpu", &(self.cpu as *const _ as *const ())) + .finish() + } +} + +/// Safety: +/// There are two parts to this: +/// - distributor: I don't think this benefits from locking, if two cores write to the same +/// reg, afaik it will just send two packets to the GIC, the latter will just accept both. +/// - cpu: This is already per-cpu, not need to lock it. +unsafe impl Sync for GicV2 {} + impl GicV2 { pub fn new(distributor_base: usize, cpu_base: usize) -> Self { let distributor = unsafe { @@ -17,32 +37,32 @@ impl GicV2 { .unwrap() }; let cpu = unsafe { (cpu_base as *const GicCpu).as_ref().unwrap() }; - let mut gic = Self { distributor, cpu }; + let gic = Self { distributor, cpu }; gic.init_distributor(); gic } - pub fn disable_interrupts(&mut self) { + pub fn disable_interrupts(&self) { self.distributor .CTLR .modify(GICD_CTLR::EnableGrp0::Disable + GICD_CTLR::EnableGrp1::Disable); } - pub fn enable_interrupts(&mut self) { + pub fn enable_interrupts(&self) { self.distributor .CTLR .modify(GICD_CTLR::EnableGrp0::Enable + GICD_CTLR::EnableGrp1::Enable); } - pub fn get_int(&mut self) -> Result { + pub fn get_int(&self) -> Result { let intno = self.cpu.IAR.get(); Ok(intno) } - pub fn clear_int(&mut self, int: u32) { + pub fn clear_int(&self, int: u32) { // TODO: check (maybe in the TRM) if this could fail / give an error. self.cpu.EOIR.modify(GICC_EOIR::EOIINTID.val(int)); } @@ -54,7 +74,7 @@ impl GicV2 { } /// Put the Gic in a known state. - fn init_distributor(&mut self) { + fn init_distributor(&self) { self.disable_interrupts(); for i in 0..(self.nlines() / 32) { @@ -98,7 +118,7 @@ impl GicV2 { ); } - pub fn enable_line(&mut self, line: u32) -> Result<(), Error> { + pub fn enable_line(&self, line: u32) -> Result<(), Error> { let line = line as usize; let enable_reg_index = line >> 5; let enable_bit: u32 = 1u32 << (line % 32); diff --git a/hal_aarch64/src/exceptions.S b/hal_aarch64/src/exceptions.S deleted file mode 100644 index 64556437..00000000 --- a/hal_aarch64/src/exceptions.S +++ /dev/null @@ -1,66 +0,0 @@ -.section .text - -.macro save_regs - stp x0, x1, [sp, #-16]! - stp x2, x3, [sp, #-16]! - stp x4, x5, [sp, #-16]! - stp x6, x7, [sp, #-16]! - stp x8, x9, [sp, #-16]! - stp x10, x11, [sp, #-16]! - stp x12, x13, [sp, #-16]! - stp x14, x15, [sp, #-16]! - stp x16, x17, [sp, #-16]! - stp x18, x29, [sp, #-16]! - stp x30, xzr, [sp, #-16]! -.endm - -.macro restore_regs - ldp x30, xzr, [sp], #16 - ldp x18, x29, [sp], #16 - ldp x16, x17, [sp], #16 - ldp x14, x15, [sp], #16 - ldp x12, x13, [sp], #16 - ldp x10, x11, [sp], #16 - ldp x8, x9, [sp], #16 - ldp x6, x7, [sp], #16 - ldp x4, x5, [sp], #16 - ldp x2, x3, [sp], #16 - ldp x0, x1, [sp], #16 -.endm - -.macro gen_stub func -.balign 0x80 -asm_\func: - msr spsel, xzr - save_regs - bl \func - restore_regs - eret -.endm - -.balign 0x800 -el1_vector_table: - -// Current EL with SP0 -gen_stub sync_current_el_sp0 -gen_stub irq_current_el_sp0 -gen_stub fiq_current_el_sp0 -gen_stub serror_current_el_sp0 - -// Current EL with SPx -gen_stub sync_current_el_spx -gen_stub irq_current_el_spx -gen_stub fiq_current_el_spx -gen_stub serror_current_el_spx - -// Lower EL -gen_stub sync_lower_el -gen_stub irq_lower_el -gen_stub fiq_lower_el -gen_stub serror_lower_el - -// Lower EL with aarch32 -gen_stub sync_lower_el_aarch32 -gen_stub irq_lower_el_aarch32 -gen_stub fiq_lower_el_aarch32 -gen_stub serror_lower_el_aarch32 diff --git a/hal_aarch64/src/irq.rs b/hal_aarch64/src/irq.rs index 10a99a17..0f1a569e 100644 --- a/hal_aarch64/src/irq.rs +++ b/hal_aarch64/src/irq.rs @@ -1,3 +1,4 @@ +use core::arch::naked_asm; use core::ptr; use core::sync::atomic::{AtomicPtr, Ordering}; @@ -6,185 +7,215 @@ use hal_core::{Error, TimerCallbackFn}; use crate::devices::gicv2::GicV2; -use crate::mm; -use hal_core::mm::{PageAlloc, PageMap, Permissions, VAddr}; +use hal_core::mm::PageAlloc; +use hal_core::IrqOps; -use tock_registers::interfaces::Writeable; +use core::cell::OnceCell; -const PHYSICAL_TIMER_LINE: u32 = 30; - -pub unsafe fn init_el1_exception_handlers() { - extern "Rust" { - static el1_vector_table: core::cell::UnsafeCell<()>; - } - cortex_a::registers::VBAR_EL1.set(el1_vector_table.get() as u64); -} - -static TIMER_CALLBACK: AtomicPtr = AtomicPtr::new(ptr::null_mut()); +use cortex_a::registers::*; +use tock_registers::interfaces::{ReadWriteable, Writeable}; -pub fn set_timer_handler(h: TimerCallbackFn) { - TIMER_CALLBACK.store(h as *mut _, Ordering::Relaxed); -} - -pub fn set_timer(ticks: usize) -> Result<(), Error> { - enable_line(PHYSICAL_TIMER_LINE)?; - super::cpu::set_physical_timer(ticks); - super::cpu::unmask_interrupts(); - - Ok(()) -} - -enum IrqChip { - NoChip, - GicV2(GicV2), -} - -impl IrqChip { - fn get_int(&mut self) -> Result { - match self { - Self::NoChip => unreachable!("does not support this"), - Self::GicV2(gic) => gic.get_int(), - } - } - - fn clear_int(&mut self, int: u32) { - match self { - Self::NoChip => unreachable!("does not support this"), - Self::GicV2(gic) => gic.clear_int(int), - } - } +const PHYSICAL_TIMER_LINE: u32 = 30; - fn enable_int(&mut self, int: u32) -> Result<(), Error> { - match self { - Self::NoChip => unreachable!("does not support"), - Self::GicV2(gic) => gic.enable_line(int), +macro_rules! gen_isr_stub { + () => { + concat!( + r#" + .balign 0x80 + msr spsel, xzr + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x29, [sp, #-16]! + stp x30, xzr, [sp, #-16]! + + mov x0, . - el1_vector_table + bl aarch64_common_trap + + ldp x30, xzr, [sp], #16 + ldp x18, x29, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 + eret + "# + ) + }; +} + +#[naked] +#[no_mangle] +#[repr(align(0x800))] +unsafe extern "C" fn el1_vector_table() { + naked_asm!( + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + gen_isr_stub!(), + ); +} + +#[repr(u64)] +#[derive(Debug)] +enum InterruptType { + // Current EL with SP0 + SyncCurrentElSp0, + IrqCurrentElSp0, + FiqCurrentElSp0, + SerrorCurrentElSp0, + // Current EL with SPx + SyncCurrentElSpx, + IrqCurrentElSpx, + FiqCurrentElSpx, + SerrorCurrentElSpx, + // Lower EL + SyncLowerEl, + IrqLowerEl, + FiqLowerEl, + SerrorLowerEl, + // Lower EL with aarch32 + SyncLowerElAarch32, + IrqLowerElAarch32, + FiqLowerElAarch32, + SerrorLowerElAarch32, +} + +static mut IRQS: core::cell::OnceCell<&Aarch64Irqs> = core::cell::OnceCell::new(); + +#[no_mangle] +unsafe extern "C" fn aarch64_common_trap(offset: u64) { + log::debug!("aarch64_common_trap(0x{:x})", offset); + + let int_type = match offset { + 0x000..=0x07f => InterruptType::SyncCurrentElSp0, + 0x080..=0x0ff => InterruptType::IrqCurrentElSp0, + 0x100..=0x17f => InterruptType::FiqCurrentElSp0, + 0x180..=0x1ff => InterruptType::SerrorCurrentElSp0, + 0x200..=0x27f => InterruptType::SyncCurrentElSpx, + 0x280..=0x2ff => InterruptType::IrqCurrentElSpx, + 0x300..=0x37f => InterruptType::FiqCurrentElSpx, + 0x380..=0x3ff => InterruptType::SerrorCurrentElSpx, + 0x400..=0x47f => InterruptType::SyncLowerEl, + 0x480..=0x4ff => InterruptType::IrqLowerEl, + 0x500..=0x57f => InterruptType::FiqLowerEl, + 0x580..=0x5ff => InterruptType::SerrorLowerEl, + 0x600..=0x67f => InterruptType::SyncLowerElAarch32, + 0x680..=0x6ff => InterruptType::IrqLowerElAarch32, + 0x700..=0x77f => InterruptType::FiqLowerElAarch32, + 0x780..=0x7ff => InterruptType::SerrorLowerElAarch32, + _ => unreachable!(), + }; + + IRQS.get() + .expect("no one has init'ed the aarch64 hal yet...") + .handler(int_type); +} + +#[derive(Debug)] +pub struct Aarch64Irqs { + irq_chip: OnceCell, + timer_callback: AtomicPtr, +} + +/// Safety: I know what I'm doing :D +unsafe impl Sync for Aarch64Irqs {} + +impl Aarch64Irqs { + pub const fn new() -> Self { + Self { + irq_chip: OnceCell::new(), + timer_callback: AtomicPtr::new(ptr::null_mut()), } } -} -static mut IRQ_CHIP: IrqChip = IrqChip::NoChip; - -pub fn init_irq_chip(_dt_node: (), allocator: &impl PageAlloc) -> Result<(), Error> { - let (gicd_base, gicc_base) = (0x800_0000, 0x801_0000); - mm::current().identity_map_range( - VAddr::new(gicd_base), - 0x0001_0000 / mm::PAGE_SIZE, - Permissions::READ | Permissions::WRITE, - allocator, - )?; - mm::current().identity_map_range( - VAddr::new(gicc_base), - 0x0001_0000 / mm::PAGE_SIZE, - Permissions::READ | Permissions::WRITE, - allocator, - )?; - - unsafe { - IRQ_CHIP = IrqChip::GicV2(GicV2::new(gicd_base, gicc_base)); + fn irq_chip(&self) -> &GicV2 { + self.irq_chip.get().expect("something is trying to program the IRQ chip but `init_irq_chip` has not been called yet") } - Ok(()) -} - -fn enable_line(line: u32) -> Result<(), Error> { - unsafe { IRQ_CHIP.enable_int(line) } -} -#[no_mangle] -extern "C" fn sync_current_el_sp0() { - panic!("hit sync_current_el_sp0"); -} - -#[no_mangle] -extern "C" fn irq_current_el_sp0() { - let int = unsafe { IRQ_CHIP.get_int() }; - - match int { - Ok(PHYSICAL_TIMER_LINE) => { - // Clear the timer in order to EOI it. - cpu::clear_physical_timer(); - - let timer_cb = TIMER_CALLBACK.load(Ordering::Relaxed); - if !timer_cb.is_null() { - unsafe { - // Cannot simply dereference TIMER_CALLBACK here. - // We are using an AtomicPtr and TIMER_CALLBACK already holds the fn(). - core::mem::transmute::<_, fn()>(timer_cb)(); + fn handler(&self, int: InterruptType) { + match int { + InterruptType::IrqCurrentElSp0 => { + let int = self.irq_chip().get_int(); + + match int { + Ok(PHYSICAL_TIMER_LINE) => { + // Clear the timer in order to EOI it. + self.clear_timer(); + + let timer_cb = self.timer_callback.load(Ordering::Relaxed); + if !timer_cb.is_null() { + unsafe { + // Cannot simply dereference TIMER_CALLBACK here. + // We are using an AtomicPtr and TIMER_CALLBACK already holds the fn(). + #[allow(clippy::crosspointer_transmute)] + core::mem::transmute::<_, fn()>(timer_cb)(); + } + } + + self.irq_chip().clear_int(int.unwrap()); + } + _ => panic!("got an irq but fuck knows"), } } - - unsafe { IRQ_CHIP.clear_int(int.unwrap()) }; + _ => panic!("unhandled int {:?}", int), } - _ => panic!("got an irq but fuck knows"), } } -#[no_mangle] -extern "C" fn fiq_current_el_sp0() { - panic!("hit fiq_current_el_sp0"); -} - -#[no_mangle] -extern "C" fn serror_current_el_sp0() { - panic!("hit serror_current_el_sp0"); -} -#[no_mangle] -extern "C" fn sync_current_el_spx() { - panic!("hit sync_current_el_spx"); -} - -#[no_mangle] -extern "C" fn irq_current_el_spx() { - panic!("hit irq_current_el_spx"); -} - -#[no_mangle] -extern "C" fn fiq_current_el_spx() { - panic!("hit fiq_current_el_spx"); -} - -#[no_mangle] -extern "C" fn serror_current_el_spx() { - panic!("hit serror_current_el_spx"); -} - -#[no_mangle] -extern "C" fn sync_lower_el() { - panic!("hit sync_lower_el"); -} - -#[no_mangle] -extern "C" fn irq_lower_el() { - panic!("hit irq_lower_el"); -} +impl IrqOps for Aarch64Irqs { + fn init(&'static self) { + cortex_a::registers::VBAR_EL1.set(el1_vector_table as usize as u64); + unsafe { + IRQS.set(self) + .expect("looks like init has already been called") + }; + } -#[no_mangle] -extern "C" fn fiq_lower_el() { - panic!("hit fiq_lower_el"); -} + fn init_irq_chip(&self, _allocator: &impl PageAlloc) -> Result<(), Error> { + let (gicd_base, gicc_base) = (0x800_0000, 0x801_0000); + self.irq_chip + .set(GicV2::new(gicd_base, gicc_base)) + .expect("init_irq_chip has already been called"); + Ok(()) + } -#[no_mangle] -extern "C" fn serror_lower_el() { - panic!("hit serror_lower_el"); -} + fn unmask_interrupts(&self) { + cpu::unmask_interrupts(); + } -#[no_mangle] -extern "C" fn sync_lower_el_aarch32() { - panic!("hit sync_lower_el_aarch32"); -} + fn set_timer_handler(&self, h: TimerCallbackFn) { + self.timer_callback.store(h as *mut _, Ordering::Relaxed); + } -#[no_mangle] -extern "C" fn irq_lower_el_aarch32() { - panic!("hit irq_lower_el_aarch32"); -} + fn set_timer(&self, ticks: usize) -> Result<(), Error> { + self.irq_chip().enable_line(PHYSICAL_TIMER_LINE)?; + super::cpu::set_physical_timer(ticks); + super::cpu::unmask_interrupts(); -#[no_mangle] -extern "C" fn fiq_lower_el_aarch32() { - panic!("hit fiq_lower_el_aarch32"); -} + Ok(()) + } -#[no_mangle] -extern "C" fn serror_lower_el_aarch32() { - panic!("hit serror_lower_el_aarch32"); + fn clear_timer(&self) { + CNTP_CTL_EL0.modify(CNTP_CTL_EL0::ENABLE::CLEAR); + } } - -core::arch::global_asm!(include_str!("exceptions.S")); diff --git a/hal_aarch64/src/lib.rs b/hal_aarch64/src/lib.rs index 571f39da..95a5622f 100644 --- a/hal_aarch64/src/lib.rs +++ b/hal_aarch64/src/lib.rs @@ -1,10 +1,11 @@ #![no_std] #![feature(naked_functions)] +#![feature(fn_align)] use cortex_a::registers::*; use tock_registers::interfaces::Readable; -use core::arch::asm; +use core::arch::naked_asm; pub mod cpu; pub mod irq; @@ -31,13 +32,12 @@ pub fn panic_info() -> PanicInfo { #[naked] #[no_mangle] unsafe extern "C" fn _start() -> ! { - asm!( + naked_asm!( " adrp x9, STACK_START msr spsel, xzr mov sp, x9 b k_main ", - options(noreturn) ); } diff --git a/hal_aarch64/src/mm/mod.rs b/hal_aarch64/src/mm/mod.rs index 7b7ed3a2..721ad939 100644 --- a/hal_aarch64/src/mm/mod.rs +++ b/hal_aarch64/src/mm/mod.rs @@ -3,11 +3,7 @@ use hal_core::{ AddressRange, Error, }; -use cortex_a::asm::barrier; -use cortex_a::registers::*; -use tock_registers::interfaces::{ReadWriteable, Writeable}; - -mod pgt48; +pub mod pgt48; use pgt48::PageTable; @@ -23,10 +19,6 @@ pub fn is_pagetable_installed() -> bool { unsafe { GPT.get_mut().is_some() } } -pub fn current() -> &'static mut PageTable { - unsafe { GPT.get_mut().unwrap() } -} - pub fn prefill_pagetable( r: impl Iterator, rw: impl Iterator, @@ -46,41 +38,6 @@ pub fn prefill_pagetable( Ok(()) } -pub fn enable_paging() { - unsafe { - load_pagetable(current()); - }; -} - -unsafe fn load_pagetable(pt: &'static mut PageTable) { - MAIR_EL1.write( - // Attribute 0 - NonCacheable normal DRAM. FIXME: enable cache? - MAIR_EL1::Attr0_Normal_Outer::NonCacheable + MAIR_EL1::Attr0_Normal_Inner::NonCacheable, - ); - TTBR0_EL1.set_baddr((pt as *const PageTable) as u64); - TCR_EL1.write( - TCR_EL1::TBI0::Used - + TCR_EL1::IPS::Bits_48 - + TCR_EL1::TG0::KiB_4 - // + TCR_EL1::SH0::Inner - + TCR_EL1::SH0::None - // + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::ORGN0::NonCacheable - // + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::IRGN0::NonCacheable - + TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::A1::TTBR0 - + TCR_EL1::T0SZ.val(16) - + TCR_EL1::EPD1::DisableTTBR1Walks, - ); - - barrier::isb(barrier::SY); - - SCTLR_EL1.modify(SCTLR_EL1::M::Enable); - - barrier::isb(barrier::SY); -} - pub fn align_up(addr: usize) -> usize { mm::align_up(addr, PAGE_SIZE) } diff --git a/hal_aarch64/src/mm/pgt48.rs b/hal_aarch64/src/mm/pgt48.rs index eb437297..66c26619 100644 --- a/hal_aarch64/src/mm/pgt48.rs +++ b/hal_aarch64/src/mm/pgt48.rs @@ -1,8 +1,11 @@ use hal_core::{ - mm::{self, PageAlloc, PageEntry, PageMap, Permissions}, + mm::{self, Mmu, PageAlloc, PageEntry, PageMap, Permissions}, Error, }; +use cortex_a::asm::barrier; +use cortex_a::registers::*; + use tock_registers::interfaces::{ReadWriteable, Readable, Writeable}; use tock_registers::register_bitfields; use tock_registers::registers::{ReadOnly, ReadWrite}; @@ -225,9 +228,13 @@ impl PageMap for PageTable { type Entry = TableEntry; fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error> { + // If the allocator is not giving a page of the same size as our PTE, things are going + // down... let page = allocator.alloc(1)?; + let page_table = page as *mut PageTable; - // Safety: the PMM gave us the memory, it should be a valid pointer. + + // safety: the pmm gave us the memory, it should be a valid pointer. let page_table: &mut PageTable = unsafe { page_table.as_mut().unwrap() }; page_table @@ -238,6 +245,10 @@ impl PageMap for PageTable { Ok(page_table) } + fn ptr(&self) -> *const () { + self as *const Self as *const () + } + fn map( &mut self, va: mm::VAddr, @@ -266,8 +277,7 @@ impl PageMap for PageTable { let descriptor = unsafe { &mut content.descriptor }; if descriptor.is_invalid() { - let new_page_table = PageTable::new(allocator)?; - descriptor.set_next_level(new_page_table); + descriptor.set_next_level(PageTable::new(allocator)?); } pagetable = descriptor.get_next_level(); @@ -295,3 +305,34 @@ impl PageMap for PageTable { Ok(()) } } + +impl Mmu for PageTable { + fn mmu_on(pagetable: &P) { + MAIR_EL1.write( + // Attribute 0 - NonCacheable normal DRAM. FIXME: enable cache? + MAIR_EL1::Attr0_Normal_Outer::NonCacheable + MAIR_EL1::Attr0_Normal_Inner::NonCacheable, + ); + TTBR0_EL1.set_baddr(pagetable.ptr() as u64); + TCR_EL1.write( + TCR_EL1::TBI0::Used + + TCR_EL1::IPS::Bits_48 + + TCR_EL1::TG0::KiB_4 + // + TCR_EL1::SH0::Inner + + TCR_EL1::SH0::None + // + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::ORGN0::NonCacheable + // + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::IRGN0::NonCacheable + + TCR_EL1::EPD0::EnableTTBR0Walks + + TCR_EL1::A1::TTBR0 + + TCR_EL1::T0SZ.val(16) + + TCR_EL1::EPD1::DisableTTBR1Walks, + ); + + barrier::isb(barrier::SY); + + SCTLR_EL1.modify(SCTLR_EL1::M::Enable); + + barrier::isb(barrier::SY); + } +} diff --git a/hal_core/Cargo.toml b/hal_core/Cargo.toml index 704980af..5242a7a9 100644 --- a/hal_core/Cargo.toml +++ b/hal_core/Cargo.toml @@ -7,4 +7,7 @@ edition = "2021" [dependencies] bitflags = "2.3" +generic_once_cell = "0.1.1" log = "0.4" +spin = { version = "0.9.8", features = ["lock_api", "mutex"] } +lock_api = "0.4.11" diff --git a/hal_core/src/fake_once_lock.rs b/hal_core/src/fake_once_lock.rs new file mode 100644 index 00000000..af48196f --- /dev/null +++ b/hal_core/src/fake_once_lock.rs @@ -0,0 +1,25 @@ +use core::cell::OnceCell; + +pub struct FakeOnceLock { + cell: OnceCell, +} + +impl FakeOnceLock { + pub const fn new() -> FakeOnceLock { + Self { + cell: OnceCell::new(), + } + } + + pub fn get(&self) -> Option<&T> { + self.cell.get() + } + + pub fn set(&self, value: T) -> Result<(), T> { + self.cell.set(value) + } +} + +/// Safety: it is not safe... +unsafe impl Sync for FakeOnceLock {} +unsafe impl Send for FakeOnceLock {} diff --git a/hal_core/src/hal.rs b/hal_core/src/hal.rs new file mode 100644 index 00000000..ef742ef9 --- /dev/null +++ b/hal_core/src/hal.rs @@ -0,0 +1,97 @@ +use super::fake_once_lock::FakeOnceLock; +use super::mm::{self, Mmu, PageAlloc, PageMap}; +use super::AddressRange; +use super::Error; +use super::ReentrantSpinlock; +use super::{IrqOps, TimerCallbackFn}; + +pub struct Hal { + kpt: FakeOnceLock>, + irq_ops: I, +} + +impl Hal { + pub const fn new(irq_ops: I) -> Hal { + Self { + kpt: FakeOnceLock::new(), + irq_ops, + } + } + + pub fn init_irqs(&'static self) { + self.irq_ops.init(); + } + + pub fn init_irq_chip(&self, allocator: &impl PageAlloc) -> Result<(), Error> { + self.irq_ops.init_irq_chip(allocator) + } + + pub fn unmask_interrupts(&self) { + self.irq_ops.unmask_interrupts(); + } + + pub fn set_timer_handler(&self, h: TimerCallbackFn) { + log::trace!("Hal::set_timer_handler(0x{:x})", h as usize); + self.irq_ops.set_timer_handler(h); + } + + pub fn set_timer(&self, ticks: usize) -> Result<(), Error> { + log::trace!("Hal::set_timer({})", ticks); + self.irq_ops.set_timer(ticks) + } + + pub fn clear_timer(&self) { + log::trace!("Hal::clear_timer()"); + self.irq_ops.clear_timer(); + } + + pub fn init_kpt( + &self, + r: impl Iterator, + rw: impl Iterator, + rwx: impl Iterator, + pre_allocated: impl Iterator, + allocator: &impl PageAlloc, + ) -> Result<(), Error> { + if self + .kpt + .set(ReentrantSpinlock::new(crate::mm::prefill_pagetable::

( + r, + rw, + rwx, + pre_allocated, + allocator, + )?)) + .is_err() + { + panic!("kpt has already been set in the hal..."); + } + + log::debug!("hal::init_kpt finished"); + Ok(()) + } + + pub fn enable_paging(&self) -> Result<(), Error> { + let kpt = self.kpt.get().unwrap().lock(); + + P::mmu_on(*kpt); + + Ok(()) + } + + pub const fn page_size(&self) -> usize { + P::PAGE_SIZE + } + + pub const fn align_up(&self, val: usize) -> usize { + mm::align_up(val, self.page_size()) + } + + pub const fn align_down(&self, val: usize) -> usize { + mm::align_down(val, self.page_size()) + } + + pub fn kpt(&'static self) -> &ReentrantSpinlock<&'static mut P> { + self.kpt.get().unwrap() + } +} diff --git a/hal_core/src/lib.rs b/hal_core/src/lib.rs index 3becbf62..ff305a0e 100644 --- a/hal_core/src/lib.rs +++ b/hal_core/src/lib.rs @@ -1,9 +1,26 @@ #![no_std] -#![feature(return_position_impl_trait_in_trait)] +#![feature(const_mut_refs)] use core::convert::Into; use core::ops::Range; +mod hal; +pub use hal::Hal; + +mod fake_once_lock; + +mod reentrant_spinlock; +pub use reentrant_spinlock::ReentrantSpinlock; + +pub trait IrqOps { + fn init(&'static self); + fn init_irq_chip(&self, allocator: &impl mm::PageAlloc) -> Result<(), Error>; + fn unmask_interrupts(&self); + fn set_timer_handler(&self, h: TimerCallbackFn); + fn set_timer(&self, ticks: usize) -> Result<(), Error>; + fn clear_timer(&self); +} + pub mod mm; #[derive(Debug)] diff --git a/hal_core/src/mm.rs b/hal_core/src/mm.rs index f8cb6d8c..8fa32d0c 100644 --- a/hal_core/src/mm.rs +++ b/hal_core/src/mm.rs @@ -59,6 +59,7 @@ pub enum AllocatorError { pub trait PageAlloc: Sync { fn alloc(&self, page_count: usize) -> Result; + fn give_page(&self) -> Result; fn dealloc(&self, base: usize, page_count: usize) -> Result<(), AllocatorError>; fn used_pages(&self, f: F); } @@ -70,6 +71,10 @@ impl PageAlloc for NullPageAllocator { panic!("the null page allocator mustn't allocate"); } + fn give_page(&self) -> Result { + panic!("the null page allocator mustn't allocate (give_page)"); + } + fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> { panic!("the null page allocator cannot deallocate"); } @@ -89,6 +94,8 @@ pub trait PageMap { fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error>; + fn ptr(&self) -> *const (); + fn map( &mut self, va: VAddr, @@ -160,11 +167,15 @@ pub trait PageMap { } } -pub fn align_up(val: usize, page_sz: usize) -> usize { +pub trait Mmu { + fn mmu_on(pagetable: &P); +} + +pub const fn align_up(val: usize, page_sz: usize) -> usize { ((val + page_sz - 1) / page_sz) * page_sz } -pub fn align_down(addr: usize, page_sz: usize) -> usize { +pub const fn align_down(addr: usize, page_sz: usize) -> usize { // TODO: can this be more optimized ? // XXX: uh isn't this math wrong ? align_up(addr, page_sz) + page_sz @@ -203,5 +214,7 @@ pub fn prefill_pagetable( )? } + log::debug!("hal_core::mm::prefill_pagetable finished the fill"); + Ok(pt) } diff --git a/hal_core/src/reentrant_spinlock.rs b/hal_core/src/reentrant_spinlock.rs new file mode 100644 index 00000000..e575a95a --- /dev/null +++ b/hal_core/src/reentrant_spinlock.rs @@ -0,0 +1,67 @@ +use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use lock_api::{GuardSend, RawMutex}; + +pub struct RawReentrantSpinlock { + user: AtomicUsize, + lock: AtomicBool, +} + +fn core_id() -> usize { + let mut id: u64; + + unsafe { core::arch::asm!("mrs {:x}, mpidr_el1", out(reg) id) }; + + id as usize +} + +unsafe impl RawMutex for RawReentrantSpinlock { + // The underlying const with interior mutability is fine because it is only used for + // construction. + // Clippy recommends using a const fn for constructors but I don't have that freedom of choice + // since we depend on lock_api. + #[allow(clippy::declare_interior_mutable_const)] + const INIT: RawReentrantSpinlock = RawReentrantSpinlock { + user: AtomicUsize::new(usize::MAX), + lock: AtomicBool::new(false), + }; + + // A spinlock guard can be sent to another thread and unlocked there + type GuardMarker = GuardSend; + + fn lock(&self) { + // Note: This isn't the best way of implementing a spinlock, but it + // suffices for the sake of this example. + while !self.try_lock() {} + } + + fn try_lock(&self) -> bool { + let my_id = core_id(); + + if self.user.load(Ordering::Acquire) == my_id { + assert!(self.lock.load(Ordering::Relaxed)); + + // Already locked by myself, reenter the spinlock. + log::debug!( + "RawReentrantSpinlock::try_lock: reentering (core 0x{:X})", + my_id + ); + return true; + } + + // Try to lock the mutex and when it is done store our id in it. + self.lock + .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + && self + .user + .compare_exchange(usize::MAX, core_id(), Ordering::Acquire, Ordering::Relaxed) + .is_ok() + } + + unsafe fn unlock(&self) { + self.user.store(usize::MAX, Ordering::Release); + self.lock.store(false, Ordering::Release); + } +} + +pub type ReentrantSpinlock = lock_api::Mutex; diff --git a/kernel/src/executable/elf.rs b/kernel/src/executable/elf.rs index 890a9496..d47f81d1 100644 --- a/kernel/src/executable/elf.rs +++ b/kernel/src/executable/elf.rs @@ -2,13 +2,13 @@ use core::iter::Iterator; use crate::globals; use crate::Error; +use crate::HAL; use goblin; use goblin::elf::header::header64::Header; use goblin::elf::program_header::program_header64::ProgramHeader; use goblin::elf::program_header::*; -use crate::hal; use hal_core::mm::{PAddr, PageAlloc, PageMap, Permissions, VAddr}; fn align_down(addr: usize, page_size: usize) -> usize { @@ -65,7 +65,7 @@ impl<'a> Elf<'a> { } pub fn load(&self) -> Result<(), Error> { - let page_size = hal::mm::PAGE_SIZE; + let page_size = HAL.page_size(); for segment in self.segments() { if segment.p_type != PT_LOAD { @@ -106,7 +106,8 @@ impl<'a> Elf<'a> { for i in 0..pages_needed { let page_offset = i * page_size; // FIXME: No unwrap - hal::mm::current() + HAL.kpt() + .lock() .map( VAddr::new(align_down(virtual_pages as usize, page_size) + page_offset), PAddr::new(physical_pages + page_offset), diff --git a/kernel/src/generic_main.rs b/kernel/src/generic_main.rs index 807445c7..3f372cfe 100644 --- a/kernel/src/generic_main.rs +++ b/kernel/src/generic_main.rs @@ -3,13 +3,15 @@ use super::drivers::qemuexit::QemuExit; use super::drivers::Driver; use super::globals; -use crate::hal; use crate::mm; +use crate::HAL; use crate::tests::{self, TestResult}; use log::info; +use hal_core::mm::{PageMap, Permissions, VAddr}; + pub fn generic_main(dt: DeviceTree, hacky_devices: &[&dyn Driver]) -> ! { info!("Entered generic_main"); let qemu_exit = QemuExit::new(); @@ -26,10 +28,33 @@ pub fn generic_main(dt: DeviceTree, hacky_devices: &[& // Driver stuff // let _drvmgr = DriverManager::with_devices(&dt).unwrap(); - hal::irq::init_irq_chip((), &globals::PHYSICAL_MEMORY_MANAGER) + log::trace!("mapping gic pages"); + + cfg_if::cfg_if! { + if #[cfg(target_arch = "aarch64")] { + let (gicd_base, gicc_base) = (0x800_0000, 0x801_0000); + HAL.kpt().lock().identity_map_range( + VAddr::new(gicd_base), + 0x0001_0000 / HAL.page_size(), + Permissions::READ | Permissions::WRITE, + &globals::PHYSICAL_MEMORY_MANAGER + ).unwrap(); + HAL.kpt().lock().identity_map_range( + VAddr::new(gicc_base), + 0x0001_0000 / HAL.page_size(), + Permissions::READ | Permissions::WRITE, + &globals::PHYSICAL_MEMORY_MANAGER + ).unwrap(); + } + } + + log::trace!("initializing irq chip"); + + crate::HAL + .init_irq_chip(&globals::PHYSICAL_MEMORY_MANAGER) .expect("initialization of irq chip failed"); - hal::cpu::unmask_interrupts(); + HAL.unmask_interrupts(); if LAUNCH_TESTS { match tests::launch() { diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 6b217d6d..34578801 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -9,7 +9,6 @@ #![feature(const_for)] #![feature(alloc_error_handler)] #![feature(trait_upcasting)] -#![feature(return_position_impl_trait_in_trait)] pub extern crate alloc; @@ -33,9 +32,15 @@ mod tests; // TODO: cleanup how we handle features cfg_if::cfg_if! { - if #[cfg(target_arch = "aarch64")] { + if #[cfg(target_arch = "aarch64")] { pub type ConsoleImpl = drivers::pl011::Pl011; - pub use hal_aarch64 as hal; + + use hal_aarch64::{ + mm::pgt48, + irq::Aarch64Irqs, + }; + use hal_core::Hal; + pub static HAL: Hal = Hal::new(Aarch64Irqs::new()); } else if #[cfg(target_arch = "riscv64")] { pub type ConsoleImpl = drivers::ns16550::Ns16550; pub use hal_riscv64 as hal; diff --git a/kernel/src/mm/binary_buddy_allocator.rs b/kernel/src/mm/binary_buddy_allocator.rs index bb17cadb..9c01853a 100644 --- a/kernel/src/mm/binary_buddy_allocator.rs +++ b/kernel/src/mm/binary_buddy_allocator.rs @@ -1,5 +1,4 @@ -use crate::globals; -use crate::hal::mm::PAGE_SIZE; +use crate::{globals, HAL}; use hal_core::mm::PageAlloc; use core::alloc::{GlobalAlloc, Layout}; @@ -24,10 +23,10 @@ unsafe impl GlobalAlloc for BinaryBuddyAllocator { // - be thread-safe // - disable interrupts when entering, then re-enable - let page_count = if layout.size() <= PAGE_SIZE { + let page_count = if layout.size() <= HAL.page_size() { 1 } else { - layout.size() / PAGE_SIZE + 1 + layout.size() / HAL.page_size() + 1 }; globals::PHYSICAL_MEMORY_MANAGER .alloc(page_count) diff --git a/kernel/src/mm/mod.rs b/kernel/src/mm/mod.rs index 581fe94f..bfdc8864 100644 --- a/kernel/src/mm/mod.rs +++ b/kernel/src/mm/mod.rs @@ -6,8 +6,8 @@ mod binary_buddy_allocator; use crate::device_tree::DeviceTree; use crate::globals; -use crate::hal; use crate::Error; +use crate::HAL; use hal_core::mm::{NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}; use hal_core::AddressRange; @@ -65,7 +65,7 @@ fn map_kernel_rwx() -> ( impl Iterator, impl Iterator, ) { - let page_size = hal::mm::PAGE_SIZE; + let page_size = HAL.page_size(); let kernel_start = unsafe { crate::utils::external_symbol_value(&KERNEL_START) }; let kernel_end = unsafe { crate::utils::external_symbol_value(&KERNEL_END) }; let kernel_end_align = ((kernel_end + page_size - 1) / page_size) * page_size; @@ -102,7 +102,7 @@ pub fn map_address_space<'a, I: Iterator>( .try_push( device_tree .memory_region() - .round_up_to_page(hal::mm::PAGE_SIZE), + .round_up_to_page(HAL.page_size()), ) .unwrap(); @@ -113,7 +113,7 @@ pub fn map_address_space<'a, I: Iterator>( for drv in drivers { if let Some((base, len)) = drv.get_address_range() { - let len = hal::mm::align_up(len); + let len = HAL.align_up(len); debug!( "adding driver memory region to RW entries: [{:X}; {:X}]", base, @@ -130,7 +130,7 @@ pub fn map_address_space<'a, I: Iterator>( debug!("rwx_entries: {:X?}", rwx_entries); debug!("pre_allocated_entries: {:X?}", pre_allocated_entries); - hal::mm::prefill_pagetable( + HAL.init_kpt( r_entries.into_iter(), rw_entries.into_iter(), rwx_entries.into_iter(), @@ -142,7 +142,8 @@ pub fn map_address_space<'a, I: Iterator>( // the pre_allocated_entries). // Therefore no allocations will be made, pass the NullPageAllocator. globals::PHYSICAL_MEMORY_MANAGER.used_pages(|page| { - hal::mm::current() + HAL.kpt() + .lock() .identity_map( VAddr::new(page), Permissions::READ | Permissions::WRITE, @@ -151,7 +152,9 @@ pub fn map_address_space<'a, I: Iterator>( .unwrap(); }); - hal::mm::enable_paging(); + log::trace!("going to enable paging..."); + HAL.enable_paging()?; + log::trace!("enabled paging !"); unsafe { globals::STATE = globals::KernelState::MmuEnabledInit }; diff --git a/kernel/src/mm/physical_memory_manager.rs b/kernel/src/mm/physical_memory_manager.rs index f7a7e08a..174c514f 100644 --- a/kernel/src/mm/physical_memory_manager.rs +++ b/kernel/src/mm/physical_memory_manager.rs @@ -1,15 +1,13 @@ use crate::device_tree::DeviceTree; use crate::globals; -use crate::hal; use crate::mm; +use crate::HAL; use core::mem; use hal_core::{ mm::{AllocatorError, NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}, AddressRange, }; -use hal::mm::PAGE_SIZE; - use log::debug; use spin::mutex::Mutex; @@ -64,7 +62,7 @@ impl PhysicalMemoryManager { .filter_map(|maybe_region| maybe_region.map(|region| region.size())) .sum(); - total_memory_bytes / PAGE_SIZE + total_memory_bytes / HAL.page_size() } fn find_large_region(regions: &[Option], minimum_size: usize) -> Option { @@ -172,8 +170,8 @@ impl PhysicalMemoryManager { // Re-align the regions, for exemple things we exclude are not always aligned to a page boundary. all_regions.iter_mut().for_each(|maybe_region| { if let Some(region) = maybe_region { - region.start = hal::mm::align_down(region.start); - region.end = hal::mm::align_up(region.end); + region.start = HAL.align_down(region.start); + region.end = HAL.align_up(region.end); *maybe_region = if region.size() > 0 { Some(*region) @@ -208,8 +206,8 @@ impl PhysicalMemoryManager { .iter() .flatten() .all( - |region| region.start == hal::mm::align_up(region.start) - && region.end == hal::mm::align_up(region.end) + |region| region.start == HAL.align_up(region.start) + && region.end == HAL.align_up(region.end) ), "Expected region bounds to be aligned to the page size (won't be possible to allocate pages otherwise)" ); @@ -220,7 +218,7 @@ impl PhysicalMemoryManager { let page_count = Self::count_pages(&available_regions); let metadata_size = page_count * mem::size_of::(); - let pages_needed = hal::mm::align_up(metadata_size) / PAGE_SIZE; + let pages_needed = HAL.align_up(metadata_size) / HAL.page_size(); let metadata_addr = Self::find_large_region(&available_regions, metadata_size) .ok_or(AllocatorError::NotEnoughMemoryForMetadata)?; @@ -231,12 +229,12 @@ impl PhysicalMemoryManager { let physical_pages = available_regions .iter() .flatten() - .flat_map(|region| region.iter_pages(PAGE_SIZE)) + .flat_map(|region| region.iter_pages(HAL.page_size())) .map(|base| { Self::phys_addr_to_physical_page( base, metadata_addr, - metadata_addr + pages_needed * PAGE_SIZE, + metadata_addr + pages_needed * HAL.page_size(), ) }); @@ -270,7 +268,7 @@ impl PhysicalMemoryManager { continue; } - if consecutive_pages > 0 && page.base != last_page_base + PAGE_SIZE { + if consecutive_pages > 0 && page.base != last_page_base + HAL.page_size() { consecutive_pages = 0; continue; } @@ -294,6 +292,7 @@ impl PhysicalMemoryManager { impl PageAlloc for PhysicalMemoryManager { fn alloc(&self, page_count: usize) -> Result { + // log::debug!("PhysicalMemoryManager::alloc(page_count={})", page_count); // If there is a kernel pagetable, identity map the pages. let first_page = self.alloc_pages(page_count)?; @@ -301,7 +300,8 @@ impl PageAlloc for PhysicalMemoryManager { // The mmu is enabled, therefore we already mapped all DRAM into the kernel's pagetable // as invalid entries. // Pagetable must only modify existing entries and not allocate. - hal::mm::current() + HAL.kpt() + .lock() .identity_map_range( VAddr::new(first_page), page_count, @@ -314,6 +314,10 @@ impl PageAlloc for PhysicalMemoryManager { Ok(first_page) } + fn give_page(&self) -> Result { + self.alloc_pages(1) + } + fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> { // TODO: // - if MMU is on, unmap the page @@ -328,7 +332,7 @@ impl PageAlloc for PhysicalMemoryManager { let metadata_start = (&metadata[0] as *const PhysicalPage) as usize; let metadata_last = (&metadata[metadata.len() - 1] as *const PhysicalPage) as usize; - let metadata_pages = (metadata_start..=metadata_last).step_by(PAGE_SIZE); + let metadata_pages = (metadata_start..=metadata_last).step_by(HAL.page_size()); let allocated_pages = metadata .iter() .filter(|page| page.is_allocated()) diff --git a/kernel/src/panic.rs b/kernel/src/panic.rs index 71a9f6ef..a745c85b 100644 --- a/kernel/src/panic.rs +++ b/kernel/src/panic.rs @@ -1,4 +1,3 @@ -use crate::hal; use core::arch::asm; use log::error; @@ -7,7 +6,7 @@ use log::error; fn panic(info: &core::panic::PanicInfo) -> ! { error!("\x1b[31mkernel panic\x1b[0m: {}", info); - error!("hal panic info: {:X?}", hal::panic_info()); + error!("hal panic info:"); // {:X?}", hal::panic_info()); loop { unsafe { asm!("wfi") } diff --git a/kernel/src/tests.rs b/kernel/src/tests.rs index 9ee009da..21512479 100644 --- a/kernel/src/tests.rs +++ b/kernel/src/tests.rs @@ -5,7 +5,7 @@ use core::sync::atomic::{AtomicUsize, Ordering}; use crate::executable::elf::Elf; use crate::globals; -use crate::hal::{self, mm::PAGE_SIZE}; +use crate::HAL; use hal_core::mm::{PageAlloc, PageMap, Permissions}; use align_data::include_aligned; @@ -67,23 +67,25 @@ fn test_timer_interrupt() -> TestResult { NUM_INTERRUPTS ); - hal::cpu::clear_physical_timer(); + HAL.clear_timer(); - hal::irq::set_timer_handler(|| { + HAL.set_timer_handler(|| { trace!("."); if CNT.fetch_add(1, Ordering::Relaxed) < NUM_INTERRUPTS { - hal::irq::set_timer(50_000) + crate::HAL + .set_timer(50_000) .expect("failed to set timer in the timer handler of the test"); } }); - - hal::irq::set_timer(50_000).expect("failed to set timer for test"); + crate::HAL + .set_timer(50_000) + .expect("failed to set timer for test"); while CNT.load(Ordering::Relaxed) < NUM_INTERRUPTS {} // TODO: restore the timer handler - hal::cpu::clear_physical_timer(); + HAL.clear_timer(); TestResult::Success } else { // // Synchronous exception @@ -98,9 +100,9 @@ fn test_pagetable_remap() -> TestResult { info!("Testing the remapping capabilities of our pagetable..."); let page_src = globals::PHYSICAL_MEMORY_MANAGER.alloc(1).unwrap(); - let page_src = unsafe { slice::from_raw_parts_mut(page_src as *mut u8, PAGE_SIZE) }; + let page_src = unsafe { slice::from_raw_parts_mut(page_src as *mut u8, HAL.page_size()) }; let dst_addr = 0x0450_0000; - let page_dst = unsafe { slice::from_raw_parts(dst_addr as *const u8, hal::mm::PAGE_SIZE) }; + let page_dst = unsafe { slice::from_raw_parts(dst_addr as *const u8, HAL.page_size()) }; let deadbeef = [0xDE, 0xAD, 0xBE, 0xEF]; // Put data in source page @@ -108,7 +110,8 @@ fn test_pagetable_remap() -> TestResult { page_src[0..deadbeef.len()].copy_from_slice(&deadbeef); // Remap source page to destination page - hal::mm::current() + HAL.kpt() + .lock() .map( hal_core::mm::VAddr::new(dst_addr), hal_core::mm::PAddr::new(page_src.as_ptr() as usize),