diff --git a/.gitmodules b/.gitmodules index fa662cb6f2..c38d5c565b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -22,3 +22,6 @@ path = libs/core2 url = https://github.com/theseus-os/core2.git shallow = true +[submodule "libs/trusted_chunk"] + path = libs/trusted_chunk + url = https://github.com/Ramla-I/trusted_chunk diff --git a/Cargo.lock b/Cargo.lock index 74a748d42b..9810e804ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1186,8 +1186,25 @@ dependencies = [ "kernel_config", "log", "memory_structs", + "range_inclusive", "spin 0.9.4", "static_assertions", + "trusted_chunk", +] + +[[package]] +name = "frame_range_callbacks" +version = "0.1.0" +dependencies = [ + "core2", + "frame_allocator", + "log", + "memory_structs", + "page_table_entry", + "range_inclusive", + "spin 0.9.4", + "trusted_chunk", + "x86_64", ] [[package]] @@ -1987,6 +2004,7 @@ dependencies = [ "bitflags", "boot_info", "frame_allocator", + "frame_range_callbacks", "irq_safety", "kernel_config", "lazy_static", @@ -2042,6 +2060,7 @@ dependencies = [ "derive_more", "kernel_config", "paste", + "range_inclusive", "zerocopy", ] @@ -2822,6 +2841,21 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prusti-contracts" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9775a28190bfc0dda44516b542082746cb43545016a27f2010cb0433ae668e20" +dependencies = [ + "prusti-contracts-proc-macros", +] + +[[package]] +name = "prusti-contracts-proc-macros" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc5a49e19cc2309c2f7babcceeed80dacf3e69c8ebfb03355c76d89a0f1d1ddd" + [[package]] name = "ps" version = "0.1.0" @@ -2933,6 +2967,11 @@ dependencies = [ "tsc", ] +[[package]] +name = "range_inclusive" +version = "0.1.0" +source = "git+https://github.com/Ramla-I/range_inclusive#7998070408bc72a226c5a025f7b7df0f29a0a3c9" + [[package]] name = "rangemap" version = "1.3.0" @@ -4200,6 +4239,16 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee8fba06c1f4d0b396ef61a54530bb6b28f0dc61c38bc8bc5a5a48161e6282e" +[[package]] +name = "trusted_chunk" +version = "0.1.0" +dependencies = [ + "cfg-if 1.0.0", + "prusti-contracts", + "range_inclusive", + "spin 0.9.4", +] + [[package]] name = "tsc" version = "0.1.0" diff --git a/kernel/frame_allocator/Cargo.toml b/kernel/frame_allocator/Cargo.toml index d589b41e8b..b8c2a9ce70 100644 --- a/kernel/frame_allocator/Cargo.toml +++ b/kernel/frame_allocator/Cargo.toml @@ -8,6 +8,7 @@ version = "0.1.0" spin = "0.9.4" intrusive-collections = "0.9.0" static_assertions = "1.1.0" +range_inclusive = {git = "https://github.com/Ramla-I/range_inclusive"} [dependencies.log] version = "0.4.8" @@ -18,5 +19,9 @@ path = "../kernel_config" [dependencies.memory_structs] path = "../memory_structs" +[dependencies.trusted_chunk] +path = "../../libs/trusted_chunk" + + [lib] crate-type = ["rlib"] diff --git a/kernel/frame_allocator/src/frames.rs b/kernel/frame_allocator/src/frames.rs new file mode 100644 index 0000000000..fedf398895 --- /dev/null +++ b/kernel/frame_allocator/src/frames.rs @@ -0,0 +1,505 @@ +//! A range of unmapped frames that stores a verified `TrustedChunk`. +//! A `Frames` object is uncloneable and is the only way to access the range of frames it references. + +use kernel_config::memory::PAGE_SIZE; +use memory_structs::{FrameRange, Frame, PhysicalAddress}; +use range_inclusive::RangeInclusive; +use crate::{MemoryRegionType, contains_any, RESERVED_REGIONS, FREE_GENERAL_FRAMES_LIST, FREE_RESERVED_FRAMES_LIST}; +use core::{borrow::Borrow, cmp::Ordering, ops::{Deref, DerefMut}, fmt}; +use spin::Mutex; +use trusted_chunk::trusted_chunk::*; + +pub type AllocatedFrames = Frames<{FrameState::Unmapped}>; + +static CHUNK_ALLOCATOR: Mutex = Mutex::new(TrustedChunkAllocator::new()); + +pub(crate) fn switch_chunk_allocator_to_heap_structure() { + CHUNK_ALLOCATOR.lock().switch_to_heap_allocated() + .expect("BUG: Failed to switch the chunk allocator to heap allocated. May have been called twice."); +} + +#[derive(PartialEq, Eq)] +pub enum FrameState { + Unmapped, + Mapped +} + +/// A range of contiguous frames. +/// Owning a `Frames` object gives ownership of the range of frames it references. +/// The `verified_chunk` field is a verified `TrustedChunk` that stores the actual frames, +/// and has the invariant that it does not overlap with any other `TrustedChunk` created by the +/// `CHUNK_ALLOCATOR`. +/// +/// The frames can be in an unmapped or mapped state. In the unmapped state, the frames are not +/// immediately accessible because they're not yet mapped by any virtual memory pages. +/// They are converted into a mapped state once they are used to create a `MappedPages` object. +/// +/// When a `Frames` object in an unmapped state is dropped, it is deallocated and returned to the free frames list. +/// We expect that `Frames` in a mapped state will never be dropped, but instead will be forgotten. +/// +/// # Ordering and Equality +/// +/// `Frames` implements the `Ord` trait, and its total ordering is ONLY based on +/// its **starting** `Frame`. This is useful so we can store `Frames` in a sorted collection. +/// +/// Similarly, `Frames` implements equality traits, `Eq` and `PartialEq`, +/// both of which are also based ONLY on the **starting** `Frame` of the `Frames`. +/// Thus, comparing two `Frames` with the `==` or `!=` operators may not work as expected. +/// since it ignores their actual range of frames. +#[derive(Eq)] +pub struct Frames { + /// The type of this memory chunk, e.g., whether it's in a free or reserved region. + typ: MemoryRegionType, + /// The Frames covered by this chunk, an inclusive range. Equal to the frames in the verified chunk. + /// Needed because verification fails on a trusted chunk that stores a FrameRange or RangeInclusive, + /// but succeeds with RangeInclusive. + frames: FrameRange, + /// The actual verified chunk + verified_chunk: TrustedChunk +} + +// Frames must not be Cloneable, and it must not expose its inner frames as mutable. +assert_not_impl_any!(Frames<{FrameState::Unmapped}>: DerefMut, Clone); +assert_not_impl_any!(Frames<{FrameState::Mapped}>: DerefMut, Clone); + + +impl Frames<{FrameState::Unmapped}> { + /// Creates a new `Frames` object in an unmapped state. + /// If `frames` is empty, there is no space to store the new `Frames` information pre-heap intialization, + /// or a `TrustedChunk` already exists which overlaps with the given `frames`, then an error is returned. + pub(crate) fn new(typ: MemoryRegionType, frames: FrameRange) -> Result { + let verified_chunk = CHUNK_ALLOCATOR.lock().create_chunk(frames.to_range_inclusive()) + .map(|(chunk, _)| chunk) + .map_err(|chunk_error|{ + match chunk_error { + ChunkCreationError::Overlap(_idx) => "Failed to create a verified chunk due to an overlap", + ChunkCreationError::NoSpace => "Before the heap is initialized, requested more chunks than there is space for (64)", + ChunkCreationError::InvalidRange => "Could not create a chunk for an empty range, use the empty() function" + } + })?; + + // assert!(frames.start().number() == verified_chunk.start()); + // assert!(frames.end().number() == verified_chunk.end()); + + let f = Frames { + typ, + frames, + verified_chunk + }; + // warn!("NEW FRAMES: {:?}", f); + Ok(f) + } + + /// Creates a new Chunk from a TrustedChunk and a FrameRange. + /// It is expected that the range of `verified_chunk` is equal to `frames`. + /// Only used within the allocated frames callback function. + pub(crate) fn from_trusted_chunk(verified_chunk: TrustedChunk, frames: FrameRange, typ: MemoryRegionType) -> Self { + let f = Frames { + typ, + frames, + verified_chunk + }; + + // assert!(f.frames.start().number() == f.verified_chunk.start()); + // assert!(f.frames.end().number() == f.verified_chunk.end()); + // warn!("FROM TRUSTED CHUNK: {:?}", f); + f + } + + /// Consumes the `Frames` in an unmapped state and converts them to `Frames` in a mapped state. + /// This should only be called once a `MappedPages` has been created from the `Frames`. + pub fn into_mapped_frames(mut self) -> Frames<{FrameState::Mapped}> { + let typ = self.typ; + let (frame_range, chunk) = self.replace_with_empty(); + core::mem::forget(self); + + Frames { + typ: typ, + frames: frame_range, + verified_chunk: chunk + } + } + + /// Returns an `UnmappedFrame` if this `Frames<{FrameState::Unmapped}>` object contains only one frame. + /// + /// ## Panic + /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. + pub fn as_unmapped_frame(&self) -> UnmappedFrame { + assert!(self.size_in_frames() == 1); + UnmappedFrame { + frame: *self.start(), + _phantom: core::marker::PhantomData, + } + } +} + + +/// This function is a callback used to convert `UnmappedFrames` into `Frames<{FrameState::Unmapped}>`. +/// `UnmappedFrames` represents frames that have been unmapped from a page that had +/// exclusively mapped them, indicating that no others pages have been mapped +/// to those same frames, and thus, they can be safely deallocated. +/// +/// This exists to break the cyclic dependency cycle between this crate and +/// the `page_table_entry` crate, since `page_table_entry` must depend on types +/// from this crate in order to enforce safety when modifying page table entries. +pub(crate) fn into_allocated_frames(tc: TrustedChunk, frames: FrameRange) -> Frames<{FrameState::Unmapped}> { + let typ = if contains_any(&RESERVED_REGIONS.lock(), &frames) { + MemoryRegionType::Reserved + } else { + MemoryRegionType::Free + }; + Frames::from_trusted_chunk(tc, frames, typ) +} + +impl Drop for Frames { + fn drop(&mut self) { + match S { + FrameState::Unmapped => { + if self.size_in_frames() == 0 { return; } + // trace!("FRAMES DROP {:?}", self); + + let (frames, verified_chunk) = self.replace_with_empty(); + let unmapped_frames: Frames<{FrameState::Unmapped}> = Frames { + typ: self.typ, + frames, + verified_chunk, + }; + + // Should we remove these lines since we store the typ in Frames? + let (list, _typ) = if contains_any(&RESERVED_REGIONS.lock(), &unmapped_frames) { + (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) + } else { + (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) + }; + + // Simply add the newly-deallocated chunk to the free frames list. + let mut locked_list = list.lock(); + let res = locked_list.insert(unmapped_frames); + match res { + Ok(_inserted_free_chunk) => (), + Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), + } + + // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks + // before or after the newly-inserted free chunk. + // However, there's no *need* to do so until we actually run out of address space or until + // a requested address is in a chunk that needs to be merged. + // Thus, for performance, we save that for those future situations. + } + FrameState::Mapped => panic!("We should never drop a mapped frame! It should be forgotten instead."), + } + } +} + +impl<'f> IntoIterator for &'f Frames<{FrameState::Unmapped}> { + type IntoIter = UnmappedFramesIter<'f>; + type Item = UnmappedFrame<'f>; + fn into_iter(self) -> Self::IntoIter { + UnmappedFramesIter { + _owner: self, + range: self.frames.clone().into_iter(), + } + } +} + +/// An iterator over each [`UnmappedFrame`] in a range of [`Frames<{FrameState::Unmapped}>`]. +/// +/// To Do: Description is no longer valid, since we have an iterator for RangeInclusive now. +/// but I still think it's useful to have a `Frames<{FrameState::Unmapped}>` iterator that ties the lifetime +/// of the `UnmappedFrame` to the original object. +/// +/// We must implement our own iterator type here in order to tie the lifetime `'f` +/// of a returned `UnmappedFrame<'f>` type to the lifetime of its containing `Frames<{FrameState::Unmapped}>`. +/// This is because the underlying type of `Frames<{FrameState::Unmapped}>` is a [`FrameRange`], +/// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the +/// `RangeInclusive` type doesn't implement an immutable iterator. +/// +/// Iterating through a `RangeInclusive` actually modifies its own internal range, +/// so we must avoid doing that because it would break the semantics of a `FrameRange`. +/// In fact, this is why [`FrameRange`] only implements `IntoIterator` but +/// does not implement [`Iterator`] itself. +pub struct UnmappedFramesIter<'f> { + _owner: &'f Frames<{FrameState::Unmapped}>, + range: range_inclusive::RangeInclusiveIterator, +} +impl<'f> Iterator for UnmappedFramesIter<'f> { + type Item = UnmappedFrame<'f>; + fn next(&mut self) -> Option { + self.range.next().map(|frame| + UnmappedFrame { + frame, _phantom: core::marker::PhantomData, + } + ) + } +} + +/// A reference to a single frame within a range of `Frames<{FrameState::Unmapped}>`. +/// +/// The lifetime of this type is tied to the lifetime of its owning `Frames<{FrameState::Unmapped}>`. +#[derive(Debug)] +pub struct UnmappedFrame<'f> { + frame: Frame, + _phantom: core::marker::PhantomData<&'f Frame>, +} +impl<'f> Deref for UnmappedFrame<'f> { + type Target = Frame; + fn deref(&self) -> &Self::Target { + &self.frame + } +} +assert_not_impl_any!(UnmappedFrame: DerefMut, Clone); + + +impl Frames { + #[allow(dead_code)] + pub(crate) fn frames(&self) -> FrameRange { + self.frames.clone() + } + + pub(crate) fn typ(&self) -> MemoryRegionType { + self.typ + } + + /// Returns a new `Frames` with an empty range of frames. + /// Can be used as a placeholder, but will not permit any real usage. + pub const fn empty() -> Frames { + Frames { + typ: MemoryRegionType::Unknown, + frames: FrameRange::empty(), + verified_chunk: TrustedChunk::empty() + } + } + + /// Returns the `frames` and `verified_chunk` fields of this `Frames` object, + /// and replaces them with an empty range of frames and an empty `TrustedChunk`. + /// It's a convenience function to make sure these two fields are always changed together. + fn replace_with_empty(&mut self) -> (FrameRange, TrustedChunk) { + let chunk = core::mem::replace(&mut self.verified_chunk, TrustedChunk::empty()); + let frame_range = core::mem::replace(&mut self.frames, FrameRange::empty()); + (frame_range, chunk) + } + + /// Merges the given `Frames` object `other` into this `Frames` object (`self`). + /// This is just for convenience and usability purposes, it performs no allocation or remapping. + /// + /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. + /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. + /// + /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, + /// otherwise `Err(other)` is returned. + pub fn merge(&mut self, mut other: Self) -> Result<(), Self> { + // To Do: Check if we actually need this or does the verified merge function take care of this condition + if self.is_empty() || other.is_empty() { + return Err(other); + } + + // take out the TrustedChunk from other + let (other_frame_range, other_verified_chunk) = other.replace_with_empty(); + + // merged the other TrustedChunk with self + // failure here means that the chunks cannot be merged + match self.verified_chunk.merge(other_verified_chunk){ + Ok(_) => { + // use the newly merged TrustedChunk to update the frame range + self.frames = into_frame_range(&self.verified_chunk.frames()); + core::mem::forget(other); + // assert!(self.frames.start().number() == self.verified_chunk.start()); + // assert!(self.frames.end().number() == self.verified_chunk.end()); + // warn!("merge: {:?}", self); + Ok(()) + }, + Err(other_verified_chunk) => { + other.frames = other_frame_range; + other.verified_chunk = other_verified_chunk; + + // assert!(self.frames.start().number() == self.verified_chunk.start()); + // assert!(self.frames.end().number() == self.verified_chunk.end()); + + // assert!(other.frames.start().number() == other.verified_chunk.start()); + // assert!(other.frames.end().number() == other.verified_chunk.end()); + Err(other) + } + } + } + + /// Splits up the given `Frames` into multiple smaller `Frames`. + /// + /// Returns a tuple of three `Frames`: + /// 1. The `Frames` containing the requested range of frames starting at `start_frame`. + /// 2. The range of frames in the `self` that came before the beginning of the requested frame range. + /// 3. The range of frames in the `self` that came after the end of the requested frame range. + /// + /// If `start_frame` is not contained within `self` or `num_frames` results in an end frame greater than the end of `self`, + /// then `self` is not changed and we return (self, None, None). + pub fn split( + mut self, + start_frame: Frame, + num_frames: usize, + ) -> (Self, Option, Option) { + if self.is_empty() { + return (self, None, None); + } + + // take out the TrustedChunk + let (frame_range, verified_chunk) = self.replace_with_empty(); + + let (before, new_allocation, after) = match verified_chunk.split(start_frame.number(), num_frames) { + Ok(x) => x, + Err(vchunk) => { + self.frames = frame_range; + self.verified_chunk = vchunk; + + // assert!(self.frames.start().number() == self.verified_chunk.start()); + // assert!(self.frames.end().number() == self.verified_chunk.end()); + return (self, None, None); + } + }; + + let c1 = Self { + typ: self.typ, + frames: into_frame_range(&new_allocation.frames()), + verified_chunk: new_allocation + }; + let c2 = before.map(|vchunk| + Self{ + typ: self.typ, + frames: into_frame_range(&vchunk.frames()), + verified_chunk: vchunk + } + ); + let c3 = after.map(|vchunk| + Self{ + typ: self.typ, + frames: into_frame_range(&vchunk.frames()), + verified_chunk: vchunk + } + ); + + // assert!(c1.frames.start().number() == c1.verified_chunk.start()); + // assert!(c1.frames.end().number() == c1.verified_chunk.end()); + + // if let Some(c) = &c2 { + // assert!(c.frames.start().number() == c.verified_chunk.start()); + // assert!(c.frames.end().number() == c.verified_chunk.end()); + // } + + // if let Some(c) = &c3 { + // assert!(c.frames.start().number() == c.verified_chunk.start()); + // assert!(c.frames.end().number() == c.verified_chunk.end()); + // } + // warn!("split: {:?} {:?} {:?}", c1, c2, c3); + core::mem::forget(self); + + (c1, c2, c3) + } + + /// Splits this `Frames` into two separate `Frames` objects: + /// * `[beginning : at_frame - 1]` + /// * `[at_frame : end]` + /// + /// This function follows the behavior of [`core::slice::split_at()`], + /// thus, either one of the returned `Frames` objects may be empty. + /// * If `at_frame == self.start`, the first returned `Frames` object will be empty. + /// * If `at_frame == self.end + 1`, the second returned `Frames` object will be empty. + /// + /// Returns an `Err` containing this `Frames` if `at_frame` is otherwise out of bounds. + /// + /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at + pub fn split_at(mut self, at_frame: Frame) -> Result<(Self, Self), Self> { + if self.is_empty() { + return Err(self); + } + + // take out the TrustedChunk + let (frame_range, verified_chunk) = self.replace_with_empty(); + + let (first, second) = match verified_chunk.split_at(at_frame.number()){ + Ok((first, second)) => (first, second), + Err(vchunk) => { + self.frames = frame_range; + self.verified_chunk = vchunk; + + // assert!(self.frames.start().number() == self.verified_chunk.start()); + // assert!(self.frames.end().number() == self.verified_chunk.end()); + return Err(self); + } + }; + + let c1 = Self { + typ: self.typ, + frames: into_frame_range(&first.frames()), + verified_chunk: first + }; + let c2 = Self { + typ: self.typ, + frames: into_frame_range(&second.frames()), + verified_chunk: second + }; + + // assert!(c1.frames.start().number() == c1.verified_chunk.start()); + // assert!(c1.frames.end().number() == c1.verified_chunk.end()); + + // assert!(c2.frames.start().number() == c2.verified_chunk.start()); + // assert!(c2.frames.end().number() == c2.verified_chunk.end()); + + // warn!("split at: {:?} {:?}", c1, c2); + core::mem::forget(self); + + Ok((c1, c2)) + } +} + +impl Deref for Frames { + type Target = FrameRange; + fn deref(&self) -> &FrameRange { + &self.frames + } +} +impl Ord for Frames { + fn cmp(&self, other: &Self) -> Ordering { + self.frames.start().cmp(other.frames.start()) + } +} +impl PartialOrd for Frames { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +// To Do: will this be an issue as now this applies to Chunk as well as AllocatedFrames +#[cfg(not(test))] +impl PartialEq for Frames { + fn eq(&self, other: &Self) -> bool { + self.frames.start() == other.frames.start() + } +} +#[cfg(test)] +impl PartialEq for Frames { + fn eq(&self, other: &Self) -> bool { + self.frames == other.frames + } +} +impl Borrow for &'_ Frames { + fn borrow(&self) -> &Frame { + self.frames.start() + } +} +impl fmt::Debug for Frames { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Frames({:?}, {:?}, TrustedChunk:{{ start: {:#X}, end: {:#X} }})", self.typ, self.frames, + self.verified_chunk.frames().start() * PAGE_SIZE, self.verified_chunk.frames().end()* PAGE_SIZE) + } +} + +fn into_frame_range(frames: &RangeInclusive) -> FrameRange { + let start = into_frame(*frames.start()) + .expect("Verified chunk start was not a valid frame"); + + let end = into_frame(*frames.end()) + .expect("Verified chunk end was not a valid frame"); + FrameRange::new(start, end) +} + +fn into_frame(frame_num: usize) -> Option { + PhysicalAddress::new(frame_num * PAGE_SIZE) + .map(Frame::containing_address) +} diff --git a/kernel/frame_allocator/src/lib.rs b/kernel/frame_allocator/src/lib.rs index 1ddfb52475..14711be342 100644 --- a/kernel/frame_allocator/src/lib.rs +++ b/kernel/frame_allocator/src/lib.rs @@ -20,6 +20,9 @@ #![allow(clippy::blocks_in_if_conditions)] #![no_std] +#![feature(box_into_inner)] +#![allow(incomplete_features)] +#![feature(adt_const_params)] extern crate alloc; #[macro_use] extern crate log; @@ -28,41 +31,50 @@ extern crate memory_structs; extern crate spin; #[macro_use] extern crate static_assertions; extern crate intrusive_collections; - +extern crate range_inclusive; +extern crate trusted_chunk; #[cfg(test)] mod test; mod static_array_rb_tree; // mod static_array_linked_list; +mod frames; - -use core::{borrow::Borrow, cmp::{Ordering, min, max}, fmt, ops::{Deref, DerefMut}, marker::PhantomData}; +use core::{borrow::Borrow, cmp::{Ordering, min, max}, ops::Deref}; +use frames::*; use kernel_config::memory::*; use memory_structs::{PhysicalAddress, Frame, FrameRange}; use spin::Mutex; use intrusive_collections::Bound; use static_array_rb_tree::*; +use trusted_chunk::trusted_chunk::TrustedChunk; +use range_inclusive::RangeInclusive; +pub use frames::{AllocatedFrames, UnmappedFrame}; const FRAME_SIZE: usize = PAGE_SIZE; +#[allow(dead_code)] const MIN_FRAME: Frame = Frame::containing_address(PhysicalAddress::zero()); +#[allow(dead_code)] const MAX_FRAME: Frame = Frame::containing_address(PhysicalAddress::new_canonical(usize::MAX)); // Note: we keep separate lists for "free, general-purpose" areas and "reserved" areas, as it's much faster. /// The single, system-wide list of free physical memory frames available for general usage. -static FREE_GENERAL_FRAMES_LIST: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +static FREE_GENERAL_FRAMES_LIST: Mutex>> = Mutex::new(StaticArrayRBTree::empty()); /// The single, system-wide list of free physical memory frames reserved for specific usage. -static FREE_RESERVED_FRAMES_LIST: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +static FREE_RESERVED_FRAMES_LIST: Mutex>> = Mutex::new(StaticArrayRBTree::empty()); /// The fixed list of all known regions that are available for general use. /// This does not indicate whether these regions are currently allocated, /// rather just where they exist and which regions are known to this allocator. -static GENERAL_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +static GENERAL_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); /// The fixed list of all known regions that are reserved for specific purposes. /// This does not indicate whether these regions are currently allocated, /// rather just where they exist and which regions are known to this allocator. -static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArrayRBTree::empty()); +type IntoTrustedChunkFn = fn(RangeInclusive) -> TrustedChunk; +type IntoAllocatedFramesFn = fn(TrustedChunk, FrameRange) -> Frames<{FrameState::Unmapped}>; /// Initialize the frame allocator with the given list of available and reserved physical memory regions. /// @@ -76,11 +88,11 @@ static RESERVED_REGIONS: Mutex> = Mutex::new(StaticArra /// ## Return /// Upon success, this function returns a callback function that allows the caller /// (the memory subsystem init function) to convert a range of unmapped frames -/// back into an [`AllocatedFrames`] object. +/// back into an [`Frames<{FrameState::Unmapped}>`] object. pub fn init( free_physical_memory_areas: F, reserved_physical_memory_areas: R, -) -> Result AllocatedFrames, &'static str> +) -> Result<(IntoTrustedChunkFn, IntoAllocatedFramesFn), &'static str> where P: Borrow, F: IntoIterator, R: IntoIterator + Clone, @@ -93,9 +105,10 @@ pub fn init( return Err("BUG: Frame allocator was already initialized, cannot be initialized twice."); } - let mut free_list: [Option; 32] = Default::default(); + // start with all lists using the `Region` type so we can merge and manipulate until we're sure we have non-overlapping regions + let mut free_list: [Option; 32] = Default::default(); let mut free_list_idx = 0; - + // Populate the list of free regions for general-purpose usage. for area in free_physical_memory_areas.into_iter() { let area = area.borrow(); @@ -107,11 +120,11 @@ pub fn init( reserved_physical_memory_areas.clone(), ); } + - - let mut reserved_list: [Option; 32] = Default::default(); + let mut reserved_list: [Option; 32] = Default::default(); for (i, area) in reserved_physical_memory_areas.into_iter().enumerate() { - reserved_list[i] = Some(Chunk { + reserved_list[i] = Some(Region { typ: MemoryRegionType::Reserved, frames: area.borrow().frames.clone(), }); @@ -119,9 +132,9 @@ pub fn init( let mut changed = true; while changed { - let mut temp_reserved_list: [Option; 32] = Default::default(); + let mut temp_reserved_list: [Option; 32] = Default::default(); changed = false; - + let mut temp_reserved_list_idx = 0; for i in 0..temp_reserved_list.len() { if let Some(mut current) = reserved_list[i].clone() { @@ -142,31 +155,49 @@ pub fn init( temp_reserved_list_idx += 1; } } - + reserved_list = temp_reserved_list; } - - - // Finally, one last sanity check -- ensure no two regions overlap. - let all_areas = free_list[..free_list_idx].iter().flatten() - .chain(reserved_list.iter().flatten()); - for (i, elem) in all_areas.clone().enumerate() { - let next_idx = i + 1; - for other in all_areas.clone().skip(next_idx) { - if let Some(overlap) = elem.overlap(other) { - panic!("BUG: frame allocator free list had overlapping ranges: \n \t {:?} and {:?} overlap at {:?}", - elem, other, overlap, - ); - } - } - } - - *FREE_GENERAL_FRAMES_LIST.lock() = StaticArrayRBTree::new(free_list.clone()); - *FREE_RESERVED_FRAMES_LIST.lock() = StaticArrayRBTree::new(reserved_list.clone()); + + // We can remove this sanity check because the following code uses formally verified functions to ensure no two regions overlap. + // // Finally, one last sanity check -- ensure no two regions overlap. + // let all_areas = free_list[..free_list_idx].iter().flatten() + // .chain(reserved_list.iter().flatten()); + // for (i, elem) in all_areas.clone().enumerate() { + // let next_idx = i + 1; + // for other in all_areas.clone().skip(next_idx) { + // if let Some(overlap) = elem.overlap(other) { + // panic!("BUG: frame allocator free list had overlapping ranges: \n \t {:?} and {:?} overlap at {:?}", + // elem, other, overlap, + // ); + // } + // } + // } + + // Here, since we're sure we now have a list of regions that don't overlap, we can create lists of formally verified Chunks + let mut free_list_w_chunks: [Option>; 32] = Default::default(); + let mut reserved_list_w_chunks: [Option>; 32] = Default::default(); + for (i, elem) in reserved_list.iter().flatten().enumerate() { + reserved_list_w_chunks[i] = Some(Frames::new( + MemoryRegionType::Reserved, + elem.frames.clone() + )?); + } + + for (i, elem) in free_list.iter().flatten().enumerate() { + free_list_w_chunks[i] = Some(Frames::new( + MemoryRegionType::Free, + elem.frames.clone() + )?); + } + + *FREE_GENERAL_FRAMES_LIST.lock() = StaticArrayRBTree::new(free_list_w_chunks); + *FREE_RESERVED_FRAMES_LIST.lock() = StaticArrayRBTree::new(reserved_list_w_chunks); *GENERAL_REGIONS.lock() = StaticArrayRBTree::new(free_list); *RESERVED_REGIONS.lock() = StaticArrayRBTree::new(reserved_list); - Ok(into_allocated_frames) + // Register the callbacks to create a TrustedChunk and AllocatedFrames from an unmapped PTE + Ok((trusted_chunk::init()?, frames::into_allocated_frames)) } @@ -178,7 +209,7 @@ pub fn init( /// the given list of `reserved_physical_memory_areas`. fn check_and_add_free_region( area: &FrameRange, - free_list: &mut [Option; 32], + free_list: &mut [Option; 32], free_list_idx: &mut usize, reserved_physical_memory_areas: R, ) @@ -224,7 +255,7 @@ fn check_and_add_free_region( let new_area = FrameRange::new(current_start, current_end); if new_area.size_in_frames() > 0 { - free_list[*free_list_idx] = Some(Chunk { + free_list[*free_list_idx] = Some(Region { typ: MemoryRegionType::Free, frames: new_area, }); @@ -266,283 +297,64 @@ pub enum MemoryRegionType { Unknown, } -/// A range of contiguous frames. +/// A region of contiguous frames. +/// Only used for bookkeeping, not for allocation. /// /// # Ordering and Equality /// -/// `Chunk` implements the `Ord` trait, and its total ordering is ONLY based on -/// its **starting** `Frame`. This is useful so we can store `Chunk`s in a sorted collection. +/// `Region` implements the `Ord` trait, and its total ordering is ONLY based on +/// its **starting** `Frame`. This is useful so we can store `Region`s in a sorted collection. /// -/// Similarly, `Chunk` implements equality traits, `Eq` and `PartialEq`, -/// both of which are also based ONLY on the **starting** `Frame` of the `Chunk`. -/// Thus, comparing two `Chunk`s with the `==` or `!=` operators may not work as expected. +/// Similarly, `Region` implements equality traits, `Eq` and `PartialEq`, +/// both of which are also based ONLY on the **starting** `Frame` of the `Region`. +/// Thus, comparing two `Region`s with the `==` or `!=` operators may not work as expected. /// since it ignores their actual range of frames. #[derive(Debug, Clone, Eq)] -struct Chunk { - /// The type of this memory chunk, e.g., whether it's in a free or reserved region. +#[allow(dead_code)] +pub struct Region { + /// The type of this memory region, e.g., whether it's in a free or reserved region. typ: MemoryRegionType, - /// The Frames covered by this chunk, an inclusive range. + /// The Frames covered by this region, an inclusive range. frames: FrameRange, } -impl Chunk { - fn as_allocated_frames(&self) -> AllocatedFrames { - AllocatedFrames { - frames: self.frames.clone(), - } - } - - /// Returns a new `Chunk` with an empty range of frames. - const fn empty() -> Chunk { - Chunk { +impl Region { + /// Returns a new `Region` with an empty range of frames. + pub fn empty() -> Region { + Region { typ: MemoryRegionType::Unknown, frames: FrameRange::empty(), } } } -impl Deref for Chunk { + +impl Deref for Region { type Target = FrameRange; fn deref(&self) -> &FrameRange { &self.frames } } -impl Ord for Chunk { +impl Ord for Region { fn cmp(&self, other: &Self) -> Ordering { self.frames.start().cmp(other.frames.start()) } } -impl PartialOrd for Chunk { +impl PartialOrd for Region { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl PartialEq for Chunk { +impl PartialEq for Region { fn eq(&self, other: &Self) -> bool { self.frames.start() == other.frames.start() } } -impl Borrow for &'_ Chunk { +impl Borrow for &'_ Region { fn borrow(&self) -> &Frame { self.frames.start() } } -/// Represents a range of allocated physical memory [`Frame`]s; derefs to [`FrameRange`]. -/// -/// These frames are not immediately accessible because they're not yet mapped -/// by any virtual memory pages. -/// You must do that separately in order to create a `MappedPages` type, -/// which can then be used to access the contents of these frames. -/// -/// This object represents ownership of the range of allocated physical frames; -/// if this object falls out of scope, its allocated frames will be auto-deallocated upon drop. -pub struct AllocatedFrames { - frames: FrameRange, -} - -// AllocatedFrames must not be Cloneable, and it must not expose its inner frames as mutable. -assert_not_impl_any!(AllocatedFrames: DerefMut, Clone); - -impl Deref for AllocatedFrames { - type Target = FrameRange; - fn deref(&self) -> &FrameRange { - &self.frames - } -} -impl fmt::Debug for AllocatedFrames { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "AllocatedFrames({:?})", self.frames) - } -} - -impl AllocatedFrames { - /// Returns an empty AllocatedFrames object that performs no frame allocation. - /// Can be used as a placeholder, but will not permit any real usage. - pub const fn empty() -> AllocatedFrames { - AllocatedFrames { - frames: FrameRange::empty() - } - } - - /// Merges the given `AllocatedFrames` object `other` into this `AllocatedFrames` object (`self`). - /// This is just for convenience and usability purposes, it performs no allocation or remapping. - /// - /// The given `other` must be physically contiguous with `self`, i.e., come immediately before or after `self`. - /// That is, either `self.start == other.end + 1` or `self.end + 1 == other.start` must be true. - /// - /// If either of those conditions are met, `self` is modified and `Ok(())` is returned, - /// otherwise `Err(other)` is returned. - pub fn merge(&mut self, other: AllocatedFrames) -> Result<(), AllocatedFrames> { - if *self.start() == *other.end() + 1 { - // `other` comes contiguously before `self` - self.frames = FrameRange::new(*other.start(), *self.end()); - } - else if *self.end() + 1 == *other.start() { - // `self` comes contiguously before `other` - self.frames = FrameRange::new(*self.start(), *other.end()); - } - else { - // non-contiguous - return Err(other); - } - - // ensure the now-merged AllocatedFrames doesn't run its drop handler and free its frames. - core::mem::forget(other); - Ok(()) - } - - /// Splits this `AllocatedFrames` into two separate `AllocatedFrames` objects: - /// * `[beginning : at_frame - 1]` - /// * `[at_frame : end]` - /// - /// This function follows the behavior of [`core::slice::split_at()`], - /// thus, either one of the returned `AllocatedFrames` objects may be empty. - /// * If `at_frame == self.start`, the first returned `AllocatedFrames` object will be empty. - /// * If `at_frame == self.end + 1`, the second returned `AllocatedFrames` object will be empty. - /// - /// Returns an `Err` containing this `AllocatedFrames` if `at_frame` is otherwise out of bounds. - /// - /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at - pub fn split(self, at_frame: Frame) -> Result<(AllocatedFrames, AllocatedFrames), AllocatedFrames> { - let end_of_first = at_frame - 1; - - let (first, second) = if at_frame == *self.start() && at_frame <= *self.end() { - let first = FrameRange::empty(); - let second = FrameRange::new(at_frame, *self.end()); - (first, second) - } - else if at_frame == (*self.end() + 1) && end_of_first >= *self.start() { - let first = FrameRange::new(*self.start(), *self.end()); - let second = FrameRange::empty(); - (first, second) - } - else if at_frame > *self.start() && end_of_first <= *self.end() { - let first = FrameRange::new(*self.start(), end_of_first); - let second = FrameRange::new(at_frame, *self.end()); - (first, second) - } - else { - return Err(self); - }; - - // ensure the original AllocatedFrames doesn't run its drop handler and free its frames. - core::mem::forget(self); - Ok(( - AllocatedFrames { frames: first }, - AllocatedFrames { frames: second }, - )) - } - - /// Returns an `AllocatedFrame` if this `AllocatedFrames` object contains only one frame. - /// - /// ## Panic - /// Panics if this `AllocatedFrame` contains multiple frames or zero frames. - pub fn as_allocated_frame(&self) -> AllocatedFrame { - assert!(self.size_in_frames() == 1); - AllocatedFrame { - frame: *self.start(), - _phantom: PhantomData, - } - } -} - -/// This function is a callback used to convert `UnmappedFrames` into `AllocatedFrames`. -/// `UnmappedFrames` represents frames that have been unmapped from a page that had -/// exclusively mapped them, indicating that no others pages have been mapped -/// to those same frames, and thus, they can be safely deallocated. -/// -/// This exists to break the cyclic dependency cycle between this crate and -/// the `page_table_entry` crate, since `page_table_entry` must depend on types -/// from this crate in order to enforce safety when modifying page table entries. -fn into_allocated_frames(frames: FrameRange) -> AllocatedFrames { - AllocatedFrames { frames } -} - -impl Drop for AllocatedFrames { - fn drop(&mut self) { - if self.size_in_frames() == 0 { return; } - - let (list, typ) = if contains_any(&RESERVED_REGIONS.lock(), &self.frames) { - (&FREE_RESERVED_FRAMES_LIST, MemoryRegionType::Reserved) - } else { - (&FREE_GENERAL_FRAMES_LIST, MemoryRegionType::Free) - }; - // trace!("frame_allocator: deallocating {:?}, typ {:?}", self, typ); - - // Simply add the newly-deallocated chunk to the free frames list. - let mut locked_list = list.lock(); - let res = locked_list.insert(Chunk { - typ, - frames: self.frames.clone(), - }); - match res { - Ok(_inserted_free_chunk) => (), - Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free frame list", c), - } - - // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks - // before or after the newly-inserted free chunk. - // However, there's no *need* to do so until we actually run out of address space or until - // a requested address is in a chunk that needs to be merged. - // Thus, for performance, we save that for those future situations. - } -} - -impl<'f> IntoIterator for &'f AllocatedFrames { - type IntoIter = AllocatedFramesIter<'f>; - type Item = AllocatedFrame<'f>; - fn into_iter(self) -> Self::IntoIter { - AllocatedFramesIter { - _owner: self, - range: self.frames.clone(), - } - } -} - -/// An iterator over each [`AllocatedFrame`] in a range of [`AllocatedFrames`]. -/// -/// We must implement our own iterator type here in order to tie the lifetime `'f` -/// of a returned `AllocatedFrame<'f>` type to the lifetime of its containing `AllocatedFrames`. -/// This is because the underlying type of `AllocatedFrames` is a [`FrameRange`], -/// which itself is a [`core::ops::RangeInclusive`] of [`Frame`]s, and unfortunately the -/// `RangeInclusive` type doesn't implement an immutable iterator. -/// -/// Iterating through a `RangeInclusive` actually modifies its own internal range, -/// so we must avoid doing that because it would break the semantics of a `FrameRange`. -/// In fact, this is why [`FrameRange`] only implements `IntoIterator` but -/// does not implement [`Iterator`] itself. -pub struct AllocatedFramesIter<'f> { - _owner: &'f AllocatedFrames, - range: FrameRange, -} -impl<'f> Iterator for AllocatedFramesIter<'f> { - type Item = AllocatedFrame<'f>; - fn next(&mut self) -> Option { - self.range.next().map(|frame| - AllocatedFrame { - frame, _phantom: PhantomData, - } - ) - } -} - -/// A reference to a single frame within a range of `AllocatedFrames`. -/// -/// The lifetime of this type is tied to the lifetime of its owning `AllocatedFrames`. -#[derive(Debug)] -pub struct AllocatedFrame<'f> { - frame: Frame, - _phantom: PhantomData<&'f Frame>, -} -impl<'f> Deref for AllocatedFrame<'f> { - type Target = Frame; - fn deref(&self) -> &Self::Target { - &self.frame - } -} -assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); - - /// A series of pending actions related to frame allocator bookkeeping, /// which may result in heap allocation. /// @@ -557,21 +369,21 @@ assert_not_impl_any!(AllocatedFrame: DerefMut, Clone); /// with a `let _ = ...` binding to instantly drop it. pub struct DeferredAllocAction<'list> { /// A reference to the list into which we will insert the free general-purpose `Chunk`s. - free_list: &'list Mutex>, + free_list: &'list Mutex>>, /// A reference to the list into which we will insert the free "reserved" `Chunk`s. - reserved_list: &'list Mutex>, + reserved_list: &'list Mutex>>, /// A free chunk that needs to be added back to the free list. - free1: Chunk, + free1: Frames<{FrameState::Unmapped}>, /// Another free chunk that needs to be added back to the free list. - free2: Chunk, + free2: Frames<{FrameState::Unmapped}>, } impl<'list> DeferredAllocAction<'list> { fn new(free1: F1, free2: F2) -> DeferredAllocAction<'list> - where F1: Into>, - F2: Into>, + where F1: Into>>, + F2: Into>>, { - let free1 = free1.into().unwrap_or_else(Chunk::empty); - let free2 = free2.into().unwrap_or_else(Chunk::empty); + let free1 = free1.into().unwrap_or_else(Frames::empty); + let free2 = free2.into().unwrap_or_else(Frames::empty); DeferredAllocAction { free_list: &FREE_GENERAL_FRAMES_LIST, reserved_list: &FREE_RESERVED_FRAMES_LIST, @@ -582,19 +394,22 @@ impl<'list> DeferredAllocAction<'list> { } impl<'list> Drop for DeferredAllocAction<'list> { fn drop(&mut self) { + let chunk1 = core::mem::replace(&mut self.free1, Frames::empty()); + let chunk2 = core::mem::replace(&mut self.free2, Frames::empty()); + // Insert all of the chunks, both allocated and free ones, into the list. - if self.free1.size_in_frames() > 0 { - match self.free1.typ { - MemoryRegionType::Free => { self.free_list.lock().insert(self.free1.clone()).unwrap(); } - MemoryRegionType::Reserved => { self.reserved_list.lock().insert(self.free1.clone()).unwrap(); } - _ => error!("BUG likely: DeferredAllocAction encountered free1 chunk {:?} of a type Unknown", self.free1), + if chunk1.size_in_frames() > 0 { + match chunk1.typ() { + MemoryRegionType::Free => { self.free_list.lock().insert(chunk1).unwrap(); } + MemoryRegionType::Reserved => { self.reserved_list.lock().insert(chunk1).unwrap(); } + _ => error!("BUG likely: DeferredAllocAction encountered free1 chunk {:?} of a type Unknown", chunk1), } } - if self.free2.size_in_frames() > 0 { - match self.free2.typ { - MemoryRegionType::Free => { self.free_list.lock().insert(self.free2.clone()).unwrap(); } - MemoryRegionType::Reserved => { self.reserved_list.lock().insert(self.free2.clone()).unwrap(); } - _ => error!("BUG likely: DeferredAllocAction encountered free2 chunk {:?} of a type Unknown", self.free2), + if chunk2.size_in_frames() > 0 { + match chunk2.typ() { + MemoryRegionType::Free => { self.free_list.lock().insert(chunk2).unwrap(); } + MemoryRegionType::Reserved => { self.reserved_list.lock().insert(chunk2).unwrap(); } + _ => error!("BUG likely: DeferredAllocAction encountered free2 chunk {:?} of a type Unknown", chunk2), }; } } @@ -612,7 +427,11 @@ enum AllocationError { /// or enough remaining chunks that could satisfy the requested allocation size. OutOfAddressSpace(usize), /// The starting address was found, but not all successive contiguous frames were available. - ContiguousChunkNotFound(Frame, usize) + ContiguousChunkNotFound(Frame, usize), + /// Failed to remove a chunk from the free list given a reference to it. + ChunkRemovalFailed, + /// Failed to merge or split a Chunk. + ChunkOperationFailed, } impl From for &'static str { fn from(alloc_err: AllocationError) -> &'static str { @@ -621,6 +440,8 @@ impl From for &'static str { AllocationError::AddressNotFound(..) => "requested address was outside of this frame allocator's range", AllocationError::OutOfAddressSpace(..) => "out of physical address space", AllocationError::ContiguousChunkNotFound(..) => "only some of the requested frames were available", + AllocationError::ChunkRemovalFailed => "Failed to remove a Chunk from the free list, this is most likely due to some logical error", + AllocationError::ChunkOperationFailed => "A verified chunk function returned an error, this is most likely due to some logical error", } } } @@ -629,10 +450,10 @@ impl From for &'static str { /// Searches the given `list` for the chunk that contains the range of frames from /// `requested_frame` to `requested_frame + num_frames`. fn find_specific_chunk( - list: &mut StaticArrayRBTree, + list: &mut StaticArrayRBTree>, requested_frame: Frame, num_frames: usize -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), AllocationError> { // The end frame is an inclusive bound, hence the -1. Parentheses are needed to avoid overflow. let requested_end_frame = requested_frame + (num_frames - 1); @@ -643,17 +464,17 @@ fn find_specific_chunk( if let Some(chunk) = elem { if requested_frame >= *chunk.start() && requested_end_frame <= *chunk.end() { // Here: `chunk` was big enough and did contain the requested address. - return Ok(allocate_from_chosen_chunk(requested_frame, num_frames, &chunk.clone(), ValueRefMut::Array(elem))); + return allocate_from_chosen_chunk(requested_frame, num_frames, ValueRefMut::Array(elem)); } } } } Inner::RBTree(ref mut tree) => { - let mut cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_frame)); - if let Some(chunk) = cursor_mut.get().map(|w| w.deref().clone()) { + let cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_frame)); + if let Some(chunk) = cursor_mut.get().map(|w| w.deref()) { if chunk.contains(&requested_frame) { if requested_end_frame <= *chunk.end() { - return Ok(allocate_from_chosen_chunk(requested_frame, num_frames, &chunk, ValueRefMut::RBTree(cursor_mut))); + return allocate_from_chosen_chunk(requested_frame, num_frames, ValueRefMut::RBTree(cursor_mut)); } else { // We found the chunk containing the requested address, but it was too small to cover all of the requested frames. // Let's try to merge the next-highest contiguous chunk to see if those two chunks together @@ -664,14 +485,16 @@ fn find_specific_chunk( // Requested address: {:?}, num_frames: {}, chunk: {:?}", // requested_frame, num_frames, chunk, // ); - let next_contiguous_chunk: Option = { + let initial_chunk_ref: Option>> = { let next_cursor = cursor_mut.peek_next(); if let Some(next_chunk) = next_cursor.get().map(|w| w.deref()) { if *chunk.end() + 1 == *next_chunk.start() { // Here: next chunk was contiguous with the original chunk. if requested_end_frame <= *next_chunk.end() { // trace!("Frame allocator: found suitably-large contiguous next {:?} after initial too-small {:?}", next_chunk, chunk); - Some(next_chunk.clone()) + // We cannot clone a Chunk, so we return a reference to the first chunk, + // so that it can be removed and then we can remove the next chunk. + Some(ValueRefMut::RBTree(cursor_mut)) } else { todo!("Frame allocator: found chunk containing requested address, but it was too small. \ Theseus does not yet support merging more than two chunks during an allocation request. \ @@ -690,15 +513,22 @@ fn find_specific_chunk( return Err(AllocationError::ContiguousChunkNotFound(*chunk.end() + 1, requested_end_frame.number() - chunk.end().number())); } }; - if let Some(mut next_chunk) = next_contiguous_chunk { - // We found a suitable chunk that came contiguously after the initial too-small chunk. - // Remove the initial chunk (since we have a cursor pointing to it already) - // and "merge" it into this `next_chunk`. - let _removed_initial_chunk = cursor_mut.remove(); - // trace!("Frame allocator: removed suitably-large contiguous next {:?} after initial too-small {:?}", _removed_initial_chunk, chunk); - // Here, `cursor_mut` has been moved forward to point to the `next_chunk` now. - next_chunk.frames = FrameRange::new(*chunk.start(), *next_chunk.end()); - return Ok(allocate_from_chosen_chunk(requested_frame, num_frames, &next_chunk, ValueRefMut::RBTree(cursor_mut))); + + if let Some(initial_chunk_ref) = initial_chunk_ref { + // remove the first chunk + let initial_chunk = retrieve_chunk_from_ref(initial_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; + + // now search for the next contiguous chunk, that we already know exists + let requested_contiguous_frame = *initial_chunk.end() + 1; + let cursor_mut = tree.upper_bound_mut(Bound::Included(&requested_contiguous_frame)); + if let Some(next_chunk) = cursor_mut.get().map(|w| w.deref()) { + if next_chunk.contains(&requested_contiguous_frame) { + // merge the next chunk into the initial chunk + return adjust_chosen_chunk_contiguous(requested_frame, num_frames, initial_chunk, ValueRefMut::RBTree(cursor_mut)); + } else { + trace!("This should never fail, since we've already found a contiguous chunk."); + } + } } } } @@ -712,20 +542,20 @@ fn find_specific_chunk( /// Searches the given `list` for any chunk large enough to hold at least `num_frames`. fn find_any_chunk( - list: &mut StaticArrayRBTree, + list: &mut StaticArrayRBTree>, num_frames: usize -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), AllocationError> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), AllocationError> { // During the first pass, we ignore designated regions. match list.0 { Inner::Array(ref mut arr) => { for elem in arr.iter_mut() { if let Some(chunk) = elem { // Skip chunks that are too-small or in the designated regions. - if chunk.size_in_frames() < num_frames || chunk.typ != MemoryRegionType::Free { + if chunk.size_in_frames() < num_frames || chunk.typ() != MemoryRegionType::Free { continue; } else { - return Ok(allocate_from_chosen_chunk(*chunk.start(), num_frames, &chunk.clone(), ValueRefMut::Array(elem))); + return allocate_from_chosen_chunk(*chunk.start(), num_frames, ValueRefMut::Array(elem)); } } } @@ -734,10 +564,10 @@ fn find_any_chunk( // Because we allocate new frames by peeling them off from the beginning part of a chunk, // it's MUCH faster to start the search for free frames from higher addresses moving down. // This results in an O(1) allocation time in the general case, until all address ranges are already in use. - let mut cursor = tree.upper_bound_mut(Bound::<&Chunk>::Unbounded); + let mut cursor = tree.upper_bound_mut(Bound::<&Frames<{FrameState::Unmapped}>>::Unbounded); while let Some(chunk) = cursor.get().map(|w| w.deref()) { - if num_frames <= chunk.size_in_frames() && chunk.typ == MemoryRegionType::Free { - return Ok(allocate_from_chosen_chunk(*chunk.start(), num_frames, &chunk.clone(), ValueRefMut::RBTree(cursor))); + if num_frames <= chunk.size_in_frames() && chunk.typ() == MemoryRegionType::Free { + return allocate_from_chosen_chunk(*chunk.start(), num_frames, ValueRefMut::RBTree(cursor)); } warn!("Frame allocator: inefficient scenario: had to search multiple chunks \ (skipping {:?}) while trying to allocate {} frames at any address.", @@ -756,88 +586,73 @@ fn find_any_chunk( } +/// Removes a chunk from the RBTree. +/// `chosen_chunk_ref` is basically a wrapper over the cursor which stores the position of the chosen_chunk. +fn retrieve_chunk_from_ref(mut chosen_chunk_ref: ValueRefMut>) -> Option> { + // Remove the chosen chunk from the free frame list. + let removed_val = chosen_chunk_ref.remove(); + + match removed_val { + RemovedValue::Array(c) => c, + RemovedValue::RBTree(option_chunk) => { + option_chunk.map(|c| c.into_inner()) + } + } +} /// The final part of the main allocation routine that splits the given chosen chunk /// into multiple smaller chunks, thereby "allocating" frames from it. /// -/// This function breaks up that chunk into multiple ones and returns an `AllocatedFrames` +/// This function breaks up that chunk into multiple ones and returns an `Frames<{FrameState::Unmapped}>` /// from (part of) that chunk, ranging from `start_frame` to `start_frame + num_frames`. fn allocate_from_chosen_chunk( start_frame: Frame, num_frames: usize, - chosen_chunk: &Chunk, - mut chosen_chunk_ref: ValueRefMut, -) -> (AllocatedFrames, DeferredAllocAction<'static>) { - let (new_allocation, before, after) = split_chosen_chunk(start_frame, num_frames, chosen_chunk); - + chosen_chunk_ref: ValueRefMut>, +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), AllocationError> { // Remove the chosen chunk from the free frame list. - let _removed_chunk = chosen_chunk_ref.remove(); + let chosen_chunk = retrieve_chunk_from_ref(chosen_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; + + let (new_allocation, before, after) = chosen_chunk.split(start_frame, num_frames); // TODO: Re-use the allocated wrapper if possible, rather than allocate a new one entirely. // if let RemovedValue::RBTree(Some(wrapper_adapter)) = _removed_chunk { ... } - ( - new_allocation.as_allocated_frames(), + Ok(( + new_allocation, //.into_allocated_frames(), DeferredAllocAction::new(before, after), - ) + )) } -/// An inner function that breaks up the given chunk into multiple smaller chunks. -/// -/// Returns a tuple of three chunks: -/// 1. The `Chunk` containing the requested range of frames starting at `start_frame`. -/// 2. The range of frames in the `chosen_chunk` that came before the beginning of the requested frame range. -/// 3. The range of frames in the `chosen_chunk` that came after the end of the requested frame range. -fn split_chosen_chunk( +/// Merges the contiguous chunk given by `chunk2_ref` into `chunk1`. +/// Then allocates from the newly merged chunk. +fn adjust_chosen_chunk_contiguous( start_frame: Frame, num_frames: usize, - chosen_chunk: &Chunk, -) -> (Chunk, Option, Option) { - // The new allocated chunk might start in the middle of an existing chunk, - // so we need to break up that existing chunk into 3 possible chunks: before, newly-allocated, and after. - // - // Because Frames and PhysicalAddresses use saturating add/subtract, we need to double-check that - // we don't create overlapping duplicate Chunks at either the very minimum or the very maximum of the address space. - let new_allocation = Chunk { - typ: chosen_chunk.typ, - // The end frame is an inclusive bound, hence the -1. Parentheses are needed to avoid overflow. - frames: FrameRange::new(start_frame, start_frame + (num_frames - 1)), - }; - let before = if start_frame == MIN_FRAME { - None - } else { - Some(Chunk { - typ: chosen_chunk.typ, - frames: FrameRange::new(*chosen_chunk.start(), *new_allocation.start() - 1), - }) - }; - let after = if new_allocation.end() == &MAX_FRAME { - None - } else { - Some(Chunk { - typ: chosen_chunk.typ, - frames: FrameRange::new(*new_allocation.end() + 1, *chosen_chunk.end()), - }) - }; + mut initial_chunk: Frames<{FrameState::Unmapped}>, + contiguous_chunk_ref: ValueRefMut>, +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), AllocationError> { + let contiguous_chunk = retrieve_chunk_from_ref(contiguous_chunk_ref).ok_or(AllocationError::ChunkRemovalFailed)?; - // some sanity checks -- these can be removed or disabled for better performance - if let Some(ref b) = before { - assert!(!new_allocation.contains(b.end())); - assert!(!b.contains(new_allocation.start())); - } - if let Some(ref a) = after { - assert!(!new_allocation.contains(a.start())); - assert!(!a.contains(new_allocation.end())); - } + initial_chunk.merge(contiguous_chunk).map_err(|_| { + trace!("contiguous chunks couldn't be merged, despite previous checks"); + //To Do: should we reinsert chunk to list here. + AllocationError:: ChunkOperationFailed + })?; + let (new_allocation, before, after) = initial_chunk.split(start_frame, num_frames); - (new_allocation, before, after) -} + Ok(( + new_allocation, //.into_allocated_frames(), + DeferredAllocAction::new(before, after), + )) +} + /// Returns `true` if the given list contains *any* of the given `frames`. fn contains_any( - list: &StaticArrayRBTree, + list: &StaticArrayRBTree, frames: &FrameRange, ) -> bool { match &list.0 { @@ -876,8 +691,62 @@ fn contains_any( /// Currently, this function adds no new frames at all if any frames within the given `frames` list /// overlap any existing regions at all. /// TODO: handle partially-overlapping regions by extending existing regions on either end. -fn add_reserved_region( - list: &mut StaticArrayRBTree, +fn add_reserved_region_to_chunk_list( + list: &mut StaticArrayRBTree>, + frames: FrameRange, +) -> Result { + // We can remove this check because creating a Chunk will check for overlaps + + // // Check whether the reserved region overlaps any existing regions. + // match &mut list.0 { + // Inner::Array(ref mut arr) => { + // for chunk in arr.iter().flatten() { + // if let Some(_overlap) = chunk.overlap(&frames) { + // // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", + // // frames, _overlap, chunk + // // ); + // return Err("Failed to add reserved region that overlapped with existing reserved regions (array)."); + // } + // } + // } + // Inner::RBTree(ref mut tree) => { + // let mut cursor_mut = tree.upper_bound_mut(Bound::Included(frames.start())); + // while let Some(chunk) = cursor_mut.get().map(|w| w.deref()) { + // if chunk.start() > frames.end() { + // // We're iterating in ascending order over a sorted tree, + // // so we can stop looking for overlapping regions once we pass the end of the new frames to add. + // break; + // } + // if let Some(_overlap) = chunk.overlap(&frames) { + // // trace!("Failed to add reserved region {:?} due to overlap {:?} with existing chunk {:?}", + // // frames, _overlap, chunk + // // ); + // return Err("Failed to add reserved region that overlapped with existing reserved regions (RBTree)."); + // } + // cursor_mut.move_next(); + // } + // } + // } + + list.insert(Frames::new( + MemoryRegionType::Reserved, + frames.clone(), + )?).map_err(|_c| "BUG: Failed to insert non-overlapping frames into list.")?; + + Ok(frames) +} + + +/// Adds the given `frames` to the given `list` as a Chunk of reserved frames. +/// +/// Returns the range of **new** frames that were added to the list, +/// which will be a subset of the given input `frames`. +/// +/// Currently, this function adds no new frames at all if any frames within the given `frames` list +/// overlap any existing regions at all. +/// Handling partially-overlapping regions +fn add_reserved_region_to_region_list( + list: &mut StaticArrayRBTree, frames: FrameRange, ) -> Result { @@ -912,7 +781,7 @@ fn add_reserved_region( } } - list.insert(Chunk { + list.insert(Region { typ: MemoryRegionType::Reserved, frames: frames.clone(), }).map_err(|_c| "BUG: Failed to insert non-overlapping frames into list.")?; @@ -925,14 +794,14 @@ fn add_reserved_region( /// optionally at the requested starting `PhysicalAddress`. /// /// This simply reserves a range of frames; it does not perform any memory mapping. -/// Thus, the memory represented by the returned `AllocatedFrames` isn't directly accessible +/// Thus, the memory represented by the returned `Frames<{FrameState::Unmapped}>` isn't directly accessible /// until you map virtual pages to them. /// /// Allocation is based on a red-black tree and is thus `O(log(n))`. /// Fragmentation isn't cleaned up until we're out of address space, but that's not really a big deal. /// /// # Arguments -/// * `requested_paddr`: if `Some`, the returned `AllocatedFrames` will start at the `Frame` +/// * `requested_paddr`: if `Some`, the returned `Frames<{FrameState::Unmapped}>` will start at the `Frame` /// containing this `PhysicalAddress`. /// If `None`, the first available `Frame` range will be used, starting at any random physical address. /// * `num_frames`: the number of `Frame`s to be allocated. @@ -946,7 +815,7 @@ fn add_reserved_region( pub fn allocate_frames_deferred( requested_paddr: Option, num_frames: usize, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), &'static str> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), &'static str> { if num_frames == 0 { warn!("frame_allocator: requested an allocation of 0 frames... stupid!"); return Err("cannot allocate zero frames"); @@ -984,11 +853,10 @@ pub fn allocate_frames_deferred( // but ONLY if those frames are *NOT* in the general-purpose region. let requested_frames = FrameRange::new(requested_start_frame, requested_start_frame + (requested_num_frames - 1)); if !contains_any(&GENERAL_REGIONS.lock(), &requested_frames) { - let new_reserved_frames = add_reserved_region(&mut RESERVED_REGIONS.lock(), requested_frames)?; - // If we successfully added a new reserved region, - // then add those frames to the actual list of *available* reserved regions. - let _new_free_reserved_frames = add_reserved_region(&mut free_reserved_frames_list, new_reserved_frames.clone())?; - assert_eq!(new_reserved_frames, _new_free_reserved_frames); + // If we successfully create a new Chunk with verified functions, then add a new reserved region + let new_free_reserved_frames = add_reserved_region_to_chunk_list(&mut free_reserved_frames_list, requested_frames)?; + let _new_reserved_frames = add_reserved_region_to_region_list(&mut RESERVED_REGIONS.lock(), new_free_reserved_frames.clone())?; + assert_eq!(_new_reserved_frames, new_free_reserved_frames); find_specific_chunk(&mut free_reserved_frames_list, start_frame, num_frames) } else { @@ -1007,7 +875,7 @@ pub fn allocate_frames_deferred( pub fn allocate_frames_by_bytes_deferred( requested_paddr: Option, num_bytes: usize, -) -> Result<(AllocatedFrames, DeferredAllocAction<'static>), &'static str> { +) -> Result<(Frames<{FrameState::Unmapped}>, DeferredAllocAction<'static>), &'static str> { let actual_num_bytes = if let Some(paddr) = requested_paddr { num_bytes + (paddr.value() % FRAME_SIZE) } else { @@ -1021,7 +889,7 @@ pub fn allocate_frames_by_bytes_deferred( /// Allocates the given number of frames with no constraints on the starting physical address. /// /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames(num_frames: usize) -> Option { +pub fn allocate_frames(num_frames: usize) -> Option> { allocate_frames_deferred(None, num_frames) .map(|(af, _action)| af) .ok() @@ -1033,7 +901,7 @@ pub fn allocate_frames(num_frames: usize) -> Option { /// /// This function still allocates whole frames by rounding up the number of bytes. /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option { +pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option> { allocate_frames_by_bytes_deferred(None, num_bytes) .map(|(af, _action)| af) .ok() @@ -1044,7 +912,7 @@ pub fn allocate_frames_by_bytes(num_bytes: usize) -> Option { /// /// This function still allocates whole frames by rounding up the number of bytes. /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> Result { +pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> Result, &'static str> { allocate_frames_by_bytes_deferred(Some(paddr), num_bytes) .map(|(af, _action)| af) } @@ -1053,7 +921,7 @@ pub fn allocate_frames_by_bytes_at(paddr: PhysicalAddress, num_bytes: usize) -> /// Allocates the given number of frames starting at (inclusive of) the frame containing the given `PhysicalAddress`. /// /// See [`allocate_frames_deferred()`](fn.allocate_frames_deferred.html) for more details. -pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result { +pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result, &'static str> { allocate_frames_deferred(Some(paddr), num_frames) .map(|(af, _action)| af) } @@ -1065,6 +933,7 @@ pub fn allocate_frames_at(paddr: PhysicalAddress, num_frames: usize) -> Result Wrapper { inner: value, }) } + + /// Returns the inner value, consuming this wrapper. + pub(crate) fn into_inner(self) -> T { + self.inner + } } diff --git a/kernel/frame_allocator/src/test.rs b/kernel/frame_allocator/src/test.rs index d68ced8356..8a4518dcd2 100644 --- a/kernel/frame_allocator/src/test.rs +++ b/kernel/frame_allocator/src/test.rs @@ -1,4 +1,6 @@ //! Tests for the AllocatedFrames type, mainly the `split` method. +//! These tests have to be run individually because running them all at once leads to overlaps between `TrustedChunk`s +//! which will return an error. extern crate std; @@ -6,19 +8,11 @@ use self::std::dbg; use super::*; -impl PartialEq for AllocatedFrames { - fn eq(&self, other: &Self) -> bool { - self.frames == other.frames - } -} - fn from_addr(start_addr: usize, end_addr: usize) -> AllocatedFrames { - AllocatedFrames { - frames: FrameRange::new( + AllocatedFrames::new(MemoryRegionType::Free, FrameRange::new( Frame::containing_address(PhysicalAddress::new_canonical(start_addr)), Frame::containing_address(PhysicalAddress::new_canonical(end_addr)), - ) - } + )).unwrap() } fn frame_addr(addr: usize) -> Frame { @@ -30,7 +24,7 @@ fn split_before_beginning() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr(0x4274000); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); assert!(result.is_err()); } @@ -39,14 +33,14 @@ fn split_before_beginning() { fn split_at_beginning() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr(0x4275000); - let first = AllocatedFrames::empty(); - let second = from_addr( 0x4275000, 0x4285000); + let first = FrameRange::empty(); + let second = FrameRange::new(frame_addr(0x4275000), frame_addr(0x4285000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } @@ -54,28 +48,28 @@ fn split_at_beginning() { fn split_at_middle() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr( 0x427D000); - let first = from_addr( 0x4275000, 0x427C000); - let second = from_addr( 0x427D000, 0x4285000); + let first = FrameRange::new(frame_addr(0x4275000), frame_addr(0x427C000)); + let second = FrameRange::new( frame_addr(0x427D000), frame_addr(0x4285000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_end() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr( 0x4285000); - let first = from_addr( 0x4275000, 0x4284000); - let second = from_addr( 0x4285000, 0x4285000); + let first = FrameRange::new( frame_addr(0x4275000), frame_addr(0x4284000)); + let second = FrameRange::new( frame_addr(0x4285000), frame_addr(0x4285000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } @@ -83,14 +77,14 @@ fn split_at_end() { fn split_after_end() { let original = from_addr( 0x4275000, 0x4285000); let split_at = frame_addr( 0x4286000); - let first = from_addr( 0x4275000, 0x4285000); - let second = AllocatedFrames::empty(); + let first = FrameRange::new( frame_addr(0x4275000), frame_addr(0x4285000)); + let second = FrameRange::empty(); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } @@ -99,7 +93,7 @@ fn split_empty_at_zero() { let original = AllocatedFrames::empty(); let split_at = frame_addr(0x0000); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); assert!(result.is_err()); } @@ -109,7 +103,7 @@ fn split_empty_at_one() { let original = AllocatedFrames::empty(); let split_at = frame_addr(0x1000); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); assert!(result.is_err()); } @@ -119,7 +113,7 @@ fn split_empty_at_two() { let original = AllocatedFrames::empty(); let split_at = frame_addr(0x2000); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); assert!(result.is_err()); } @@ -129,57 +123,57 @@ fn split_empty_at_two() { #[test] fn split_at_beginning_zero() { let original = from_addr( 0x0, 0x5000); - let split_at = frame_addr(0x0); - let first = AllocatedFrames::empty(); - let second = from_addr(0x0, 0x5000); + let split_at = frame_addr(0x0); // leads to attempt to subtract with overflow + let first = FrameRange::empty(); + let second = FrameRange::new(frame_addr(0x0), frame_addr(0x5000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_beginning_one() { let original = from_addr( 0x0000, 0x5000); let split_at = frame_addr(0x1000); - let first = from_addr( 0x0000, 0x0000); - let second = from_addr( 0x1000, 0x5000); + let first = FrameRange::new( frame_addr(0x0000), frame_addr(0x0000)); + let second = FrameRange::new( frame_addr(0x1000), frame_addr(0x5000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_beginning_max_length_one() { let original = from_addr( 0xFFFF_FFFF_FFFF_F000, 0xFFFF_FFFF_FFFF_F000); let split_at = frame_addr(0xFFFF_FFFF_FFFF_F000); - let first = AllocatedFrames::empty(); - let second = from_addr(0xFFFF_FFFF_FFFF_F000, 0xFFFF_FFFF_FFFF_F000); + let first = FrameRange::empty(); + let second = FrameRange::new(frame_addr(0xFFFF_FFFF_FFFF_F000), frame_addr(0xFFFF_FFFF_FFFF_F000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_end_max_length_two() { let original = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_F000); let split_at = frame_addr( 0xFFFF_FFFF_FFFF_F000); - let first = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); - let second = from_addr( 0xFFFF_FFFF_FFFF_F000, 0xFFFF_FFFF_FFFF_F000); + let first = FrameRange::new( frame_addr(0xFFFF_FFFF_FFFF_E000), frame_addr(0xFFFF_FFFF_FFFF_E000)); + let second = FrameRange::new( frame_addr(0xFFFF_FFFF_FFFF_F000), frame_addr(0xFFFF_FFFF_FFFF_F000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } @@ -187,26 +181,26 @@ fn split_at_end_max_length_two() { fn split_after_end_max() { let original = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); let split_at = frame_addr(0xFFFF_FFFF_FFFF_F000); - let first = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); - let second = AllocatedFrames::empty(); + let first = FrameRange::new( frame_addr(0xFFFF_FFFF_FFFF_E000), frame_addr(0xFFFF_FFFF_FFFF_E000)); + let second = FrameRange::empty(); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } #[test] fn split_at_beginning_max() { let original = from_addr( 0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); let split_at = frame_addr(0xFFFF_FFFF_FFFF_E000); - let first = AllocatedFrames::empty(); - let second = from_addr(0xFFFF_FFFF_FFFF_E000, 0xFFFF_FFFF_FFFF_E000); + let first = FrameRange::empty(); + let second = FrameRange::new(frame_addr(0xFFFF_FFFF_FFFF_E000), frame_addr(0xFFFF_FFFF_FFFF_E000)); - let result = original.split(split_at); + let result = original.split_at(split_at); dbg!(&result); let (result1, result2) = result.unwrap(); - assert_eq!(result1, first); - assert_eq!(result2, second); + assert_eq!(result1.deref().clone(), first); + assert_eq!(result2.deref().clone(), second); } diff --git a/kernel/frame_range_callbacks/Cargo.toml b/kernel/frame_range_callbacks/Cargo.toml new file mode 100644 index 0000000000..f0437a452f --- /dev/null +++ b/kernel/frame_range_callbacks/Cargo.toml @@ -0,0 +1,28 @@ +[package] +authors = ["Ramla Ijaz "] +name = "frame_range_callbacks" +description = "trusted callbacks to recreate memory ranges from an unmapped PTE" +version = "0.1.0" + +[dependencies] +log = "0.4.8" +spin = "0.9.4" +core2 = { version = "0.4.0", default-features = false, features = ["alloc", "nightly"] } +x86_64 = "0.14.8" +range_inclusive = {git = "https://github.com/Ramla-I/range_inclusive"} + +[dependencies.page_table_entry] +path = "../page_table_entry" + +[dependencies.frame_allocator] +path = "../frame_allocator" + +[dependencies.trusted_chunk] +path = "../../libs/trusted_chunk" + +[dependencies.memory_structs] +path = "../memory_structs" + + +[lib] +crate-type = ["rlib"] \ No newline at end of file diff --git a/kernel/frame_range_callbacks/src/lib.rs b/kernel/frame_range_callbacks/src/lib.rs new file mode 100644 index 0000000000..bb45e5a3be --- /dev/null +++ b/kernel/frame_range_callbacks/src/lib.rs @@ -0,0 +1,64 @@ +#![no_std] +//! This crate contains callbacks to create `TrustedChunk` objects and then `AllocatedFrames` objects from an `UnmappedFrames`. +//! It's required to avoid a cyclic dependency between the `frame_allocator` and `page_table_entry` crates. +//! +//! The public `from_unmapped()` function ensures that an `UnmappedFrames` object has to be consumed to run the callbacks, +/// making sure that it can only be called when a PTE has been unmapped. + +extern crate page_table_entry; +extern crate frame_allocator; +extern crate trusted_chunk; +extern crate memory_structs; +extern crate spin; +extern crate range_inclusive; + +use core::ops::{Deref}; +use page_table_entry::UnmappedFrames; +use frame_allocator::AllocatedFrames; +use trusted_chunk::trusted_chunk::TrustedChunk; +use memory_structs::FrameRange; +use spin::Once; +use range_inclusive::RangeInclusive; + +/// This is a private callback used to convert `UnmappedFrames` into a `TrustedChunk`. +/// The `TrustedChunk` is then used to create an `AllocatedFrames`. +/// +/// This is safe because the init function in the `trusted_chunk` crate returns this callback only once, +/// and only this crate has access to the callback. The callback function has been verified with the +/// invariant that the new `TrustedChunk` has the same bounds as the range passed as an argument. +static INTO_TRUSTED_CHUNK_FUNC: Once) -> TrustedChunk> = Once::new(); + +/// This is a private callback used to convert `UnmappedFrames` into `AllocatedFrames`. +/// +/// This exists to break the cyclic dependency cycle between `page_table_entry` and +/// `frame_allocator`, which depend on each other as such: +/// * `frame_allocator` needs to `impl Into for UnmappedFrames` +/// in order to allow unmapped exclusive frames to be safely deallocated +/// * `page_table_entry` needs to use the `AllocatedFrames` type in order to allow +/// page table entry values to be set safely to a real physical frame that is owned and exists. +/// +/// To get around that, the `frame_allocator::init()` function returns a callback +/// to its function that allows converting a range of unmapped frames back into `AllocatedFrames`, +/// which then allows them to be dropped and thus deallocated. +/// +/// This is safe because the frame allocator can only be initialized once, and also because +/// only this crate has access to that function callback and can thus guarantee +/// that it is only invoked for `UnmappedFrames`. +static INTO_ALLOCATED_FRAMES_FUNC: Once AllocatedFrames> = Once::new(); + +pub fn init(into_trusted_chunk_fn: fn(RangeInclusive) -> TrustedChunk, into_alloc_frames_fn: fn(TrustedChunk, FrameRange) -> AllocatedFrames) { + INTO_TRUSTED_CHUNK_FUNC.call_once(|| into_trusted_chunk_fn); + INTO_ALLOCATED_FRAMES_FUNC.call_once(|| into_alloc_frames_fn); +} + +pub fn from_unmapped(unmapped_frames: UnmappedFrames) -> Result { + let frames = unmapped_frames.deref().clone(); + let tc = INTO_TRUSTED_CHUNK_FUNC.get() + .ok_or("BUG: Mapper::unmap(): the `INTO_TRUSTED_CHUNK_FUNC` callback was not initialized") + .map(|into_func| into_func(unmapped_frames.deref().to_range_inclusive()))?; + + INTO_ALLOCATED_FRAMES_FUNC.get() + .ok_or("BUG: Mapper::unmap(): the `INTO_ALLOCATED_FRAMES_FUNC` callback was not initialized") + .map(|into_func| into_func(tc, frames)) +} + diff --git a/kernel/memory/Cargo.toml b/kernel/memory/Cargo.toml index d40940c9dc..08efb803c3 100644 --- a/kernel/memory/Cargo.toml +++ b/kernel/memory/Cargo.toml @@ -24,6 +24,7 @@ page_allocator = { path = "../page_allocator" } frame_allocator = { path = "../frame_allocator" } no_drop = { path = "../no_drop" } owned_borrowed_trait = { path = "../../libs/owned_borrowed_trait" } +frame_range_callbacks = { path = "../frame_range_callbacks" } irq_safety = { git = "https://github.com/theseus-os/irq_safety" } diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index bf847c75d3..fae1fc80e5 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -14,6 +14,7 @@ #![feature(ptr_internals)] extern crate alloc; +extern crate frame_range_callbacks; mod paging; pub use self::paging::{ @@ -251,7 +252,7 @@ pub fn init( reserved_index += 1; } - let into_alloc_frames_fn = frame_allocator::init(free_regions.iter().flatten(), reserved_regions.iter().flatten())?; + let (into_trusted_chunk_fn, into_alloc_frames_fn) = frame_allocator::init(free_regions.iter().flatten(), reserved_regions.iter().flatten())?; debug!("Initialized new frame allocator!"); frame_allocator::dump_frame_allocator_state(); @@ -270,8 +271,10 @@ pub fn init( debug!("Initialized new page allocator!"); page_allocator::dump_page_allocator_state(); + frame_range_callbacks::init(into_trusted_chunk_fn, into_alloc_frames_fn); + // Initialize paging, which creates a new page table and maps all of the current code/data sections into it. - paging::init(boot_info, kernel_stack_start, into_alloc_frames_fn) + paging::init(boot_info, kernel_stack_start) } /// Finishes initializing the memory management system after the heap is ready. diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 5387e3aa81..957738b650 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -18,14 +18,13 @@ use core::{ slice, }; use log::{error, warn, debug, trace}; -use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, FrameRange, AllocatedPages, AllocatedFrames}; +use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, AllocatedPages, AllocatedFrames}; use crate::paging::{ get_current_p4, PageRange, table::{P4, UPCOMING_P4, Table, Level4}, }; use pte_flags::PteFlagsArch; -use spin::Once; use kernel_config::memory::PAGE_SIZE; use super::tlb_flush_virt_addr; use zerocopy::FromBytes; @@ -35,24 +34,6 @@ use owned_borrowed_trait::{OwnedOrBorrowed, Owned, Borrowed}; #[cfg(target_arch = "x86_64")] use kernel_config::memory::ENTRIES_PER_PAGE_TABLE; -/// This is a private callback used to convert `UnmappedFrames` into `AllocatedFrames`. -/// -/// This exists to break the cyclic dependency cycle between `page_table_entry` and -/// `frame_allocator`, which depend on each other as such: -/// * `frame_allocator` needs to `impl Into for UnmappedFrames` -/// in order to allow unmapped exclusive frames to be safely deallocated -/// * `page_table_entry` needs to use the `AllocatedFrames` type in order to allow -/// page table entry values to be set safely to a real physical frame that is owned and exists. -/// -/// To get around that, the `frame_allocator::init()` function returns a callback -/// to its function that allows converting a range of unmapped frames back into `AllocatedFrames`, -/// which then allows them to be dropped and thus deallocated. -/// -/// This is safe because the frame allocator can only be initialized once, and also because -/// only this crate has access to that function callback and can thus guarantee -/// that it is only invoked for `UnmappedFrames`. -pub(super) static INTO_ALLOCATED_FRAMES_FUNC: Once AllocatedFrames> = Once::new(); - /// A convenience function to translate the given virtual address into a /// physical address using the currently-active page table. pub fn translate(virtual_address: VirtualAddress) -> Option { @@ -259,7 +240,8 @@ impl Mapper { // there is no easy/efficient way to store a dynamic list of non-contiguous frames (would require Vec). // This is okay because we will deallocate each of these frames when this MappedPages object is dropped // and each of the page table entries for its pages are cleared. - core::mem::forget(frames); + let mapped_frames = frames.into_mapped_frames(); // mark the frames as mapped + core::mem::forget(mapped_frames); Ok(mapped_pages) } @@ -296,8 +278,9 @@ impl Mapper { return Err("map_allocated_pages(): page was already in use"); } - p1[page.p1_index()].set_entry(af.as_allocated_frame(), actual_flags); - core::mem::forget(af); // we currently forget frames allocated here since we don't yet have a way to track them. + p1[page.p1_index()].set_entry(af.as_unmapped_frame(), actual_flags); + let mapped_frames = af.into_mapped_frames(); // mark the frame as mapped + core::mem::forget(mapped_frames); // we currently forget frames allocated here since we don't yet have a way to track them. } Ok(MappedPages { @@ -631,9 +614,7 @@ impl MappedPages { // freed from the newly-unmapped P1 PTE entry above. match unmapped_frames { UnmapResult::Exclusive(newly_unmapped_frames) => { - let newly_unmapped_frames = INTO_ALLOCATED_FRAMES_FUNC.get() - .ok_or("BUG: Mapper::unmap(): the `INTO_ALLOCATED_FRAMES_FUNC` callback was not initialized") - .map(|into_func| into_func(newly_unmapped_frames.deref().clone()))?; + let newly_unmapped_frames = frame_range_callbacks::from_unmapped(newly_unmapped_frames)?; if let Some(mut curr_frames) = current_frame_range.take() { match curr_frames.merge(newly_unmapped_frames) { diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 1761c52cb9..b4bc0b82b8 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -29,7 +29,7 @@ use core::{ }; use log::debug; use super::{ - Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, + Frame, PageRange, VirtualAddress, PhysicalAddress, AllocatedPages, allocate_pages, AllocatedFrames, PteFlags, InitialMemoryMappings, tlb_flush_all, tlb_flush_virt_addr, get_p4, find_section_memory_bounds, @@ -110,7 +110,7 @@ impl PageTable { temporary_page.with_table_and_frame(|new_table, frame| { new_table.zero(); new_table[RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), + frame.as_unmapped_frame(), PteFlagsArch::new().valid(true).writable(true), ); })?; @@ -153,11 +153,11 @@ impl PageTable { // Overwrite upcoming page table recursive mapping. temporary_page.with_table_and_frame(|table, frame| { self.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), + frame.as_unmapped_frame(), PteFlagsArch::new().valid(true).writable(true), ); table[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), + frame.as_unmapped_frame(), PteFlagsArch::new().valid(true).writable(true), ); })?; @@ -223,12 +223,7 @@ pub fn get_current_p4() -> Frame { pub fn init( boot_info: &impl BootInformation, stack_start_virt: VirtualAddress, - into_alloc_frames_fn: fn(FrameRange) -> AllocatedFrames, ) -> Result { - // Store the callback from `frame_allocator::init()` that allows the `Mapper` to convert - // `page_table_entry::UnmappedFrames` back into `AllocatedFrames`. - mapper::INTO_ALLOCATED_FRAMES_FUNC.call_once(|| into_alloc_frames_fn); - // bootstrap a PageTable from the currently-loaded page table let mut page_table = PageTable::from_current() .map_err(|_| "Failed to allocate frame for initial page table; is it merged with another section?")?; diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index 95a70c5353..a74800ef1a 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -140,7 +140,7 @@ impl Table { assert!(!is_huge(&self[index].flags()), "mapping code does not support huge pages"); let af = frame_allocator::allocate_frames(1).expect("next_table_create(): no frames available"); self[index].set_entry( - af.as_allocated_frame(), + af.as_unmapped_frame(), flags.valid(true).writable(true), // must be valid and writable on x86_64 ); self.next_table_mut(index).unwrap().zero(); diff --git a/kernel/memory_structs/Cargo.toml b/kernel/memory_structs/Cargo.toml index bf1787139c..317859d922 100644 --- a/kernel/memory_structs/Cargo.toml +++ b/kernel/memory_structs/Cargo.toml @@ -12,6 +12,7 @@ derive_more = "0.99.0" paste = "1.0.5" kernel_config = { path = "../kernel_config" } +range_inclusive = {git = "https://github.com/Ramla-I/range_inclusive"} [lib] crate-type = ["rlib"] diff --git a/kernel/memory_structs/src/lib.rs b/kernel/memory_structs/src/lib.rs index 923ff73590..85838a74bc 100644 --- a/kernel/memory_structs/src/lib.rs +++ b/kernel/memory_structs/src/lib.rs @@ -12,12 +12,13 @@ use core::{ cmp::{min, max}, fmt, iter::Step, - ops::{Add, AddAssign, Deref, DerefMut, RangeInclusive, Sub, SubAssign} + ops::{Add, AddAssign, Deref, DerefMut, Sub, SubAssign} }; use kernel_config::memory::{MAX_PAGE_NUMBER, PAGE_SIZE}; use zerocopy::FromBytes; use paste::paste; use derive_more::*; +use range_inclusive::{RangeInclusive, RangeInclusiveIterator}; /// A macro for defining `VirtualAddress` and `PhysicalAddress` structs /// and implementing their common traits, which are generally identical. @@ -439,6 +440,11 @@ macro_rules! implement_page_frame_range { None } } + + #[doc = "Returns a `RangeInclusive` with the same bounds."] + pub fn to_range_inclusive(&self) -> RangeInclusive { + RangeInclusive::new(self.start().number(), self.end().number()) + } } impl fmt::Debug for $TypeName { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -458,9 +464,9 @@ macro_rules! implement_page_frame_range { } impl IntoIterator for $TypeName { type Item = $chunk; - type IntoIter = RangeInclusive<$chunk>; + type IntoIter = RangeInclusiveIterator<$chunk>; fn into_iter(self) -> Self::IntoIter { - self.0 + self.0.into_iter() } } diff --git a/kernel/page_table_entry/src/lib.rs b/kernel/page_table_entry/src/lib.rs index a9116b6c3c..1829a03309 100644 --- a/kernel/page_table_entry/src/lib.rs +++ b/kernel/page_table_entry/src/lib.rs @@ -15,7 +15,7 @@ use core::ops::Deref; use memory_structs::{Frame, FrameRange, PhysicalAddress}; use zerocopy::FromBytes; -use frame_allocator::AllocatedFrame; +use frame_allocator::UnmappedFrame; use pte_flags::{PteFlagsArch, PTE_FRAME_MASK}; /// A page table entry, which is a `u64` value under the hood. @@ -90,7 +90,7 @@ impl PageTableEntry { /// This is the actual mapping action that informs the MMU of a new mapping. /// /// Note: this performs no checks about the current value of this page table entry. - pub fn set_entry(&mut self, frame: AllocatedFrame, flags: PteFlagsArch) { + pub fn set_entry(&mut self, frame: UnmappedFrame, flags: PteFlagsArch) { self.0 = (frame.start_address().value() as u64) | flags.bits(); } diff --git a/libs/trusted_chunk b/libs/trusted_chunk new file mode 160000 index 0000000000..b1dcda3ed7 --- /dev/null +++ b/libs/trusted_chunk @@ -0,0 +1 @@ +Subproject commit b1dcda3ed72b14e3a248c5c10f2cb89e3251b56e