Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions src/fakes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ static bootblock: usize = 65536 + 4096;
static __eloader: usize = 65536 + 8192;
/// Defined in assembly.
#[no_mangle]
static MMIO_BASE: usize = 65536 + 16384;
/// Defined in assembly.
#[no_mangle]
static STACK_SIZE: u64 = 8 * 4096;
/// Defined in assembly.
#[no_mangle]
Expand Down
3 changes: 1 addition & 2 deletions src/loader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,7 @@ fn load_segment(
page_table
.map_region(region.clone(), mem::Attrs::new_data(), pa)
.expect("mapped region {region:#x?} read-write");
const NULL: *mut u8 = core::ptr::null_mut();
let p = NULL.with_addr(start.addr());
let p = page_table.try_with_addr(start.addr()).unwrap();
let len = end.addr() - start.addr();
core::ptr::write_bytes(p, 0, len);
core::slice::from_raw_parts_mut(p, len)
Expand Down
62 changes: 39 additions & 23 deletions src/mmu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@
//! radix tree. The `Table` trait describes behaviors at a
//! particular level in the tree. Concrete types exist for
//! tables at each tree level.
//! * `TableInner` --- An interior node in the tree can either,
//! depending on its specific type, either map to a large page
//! * `TableInner` --- An interior node in the tree can,
//! depending on its specific type, either map to a "big" page
//! or point to a next-level page table. This trait describes
//! behaviors of table types that can point to other nodes.
//! * `Mapping` and the `Mapping(1|2|3|4)` enumerations ---
Expand Down Expand Up @@ -112,7 +112,10 @@ use alloc::vec::Vec;
use bitstruct::bitstruct;
use core::ops::Range;

const NULL: *const () = core::ptr::null();
#[derive(Clone, Copy, Debug)]
pub(super) enum Error {
BadPointer,
}

/// We start with basic page and frame types.

Expand All @@ -131,7 +134,6 @@ trait Frame {
}

/// An aligned 4KiB span of physical address space.
/// XXX(cross): Provide an invariant enforcing implementation.
#[derive(Clone, Copy, Debug)]
#[repr(transparent)]
struct PFN4K(u64);
Expand All @@ -150,7 +152,6 @@ impl Frame for PFN4K {
}

/// An aligned 2MiB span of physical address space.
/// XXX(cross): Provide an invariant enforcing implementation.
#[derive(Clone, Copy, Debug)]
#[repr(transparent)]
struct PFN2M(u64);
Expand All @@ -169,7 +170,6 @@ impl Frame for PFN2M {
}

/// An aligned 1GiB span of physical address space.
/// XXX(cross): Provide an invariant enforcing implementation.
#[derive(Clone, Copy, Debug)]
#[repr(transparent)]
struct PFN1G(u64);
Expand All @@ -189,7 +189,6 @@ impl Frame for PFN1G {

/// Represents a 4KiB page of virtual memory, aligned on a 4KiB
/// boundary.
/// XXX(cross): Provide an invariant enforcing implementation.
#[derive(Clone)]
struct Page4K(usize);
impl Page4K {
Expand All @@ -201,7 +200,6 @@ impl Page4K {

/// Represents a 2MiB page of virtual memory, aligned on a 2MiB
/// boundary.
/// XXX(cross): Provide an invariant enforcing implementation.
struct Page2M(usize);
impl Page2M {
fn new(va: usize) -> Self {
Expand All @@ -212,7 +210,6 @@ impl Page2M {

/// Represents a 1GiB page of virtual memory, aligned on a 1GiB
/// boundary.
/// XXX(cross): Provide an invariant enforcing implementation.
struct Page1G(usize);
impl Page1G {
fn new(va: usize) -> Self {
Expand All @@ -235,7 +232,7 @@ enum Mapping1 {
impl Mapping for Mapping1 {
fn virt_addr(&self) -> *const () {
match self {
Mapping1::Map4K(page, _, _) => NULL.with_addr(page.addr()),
Mapping1::Map4K(page, _, _) => core::ptr::invalid(page.addr()),
}
}
}
Expand All @@ -249,7 +246,7 @@ enum Mapping2 {
impl Mapping for Mapping2 {
fn virt_addr(&self) -> *const () {
match self {
Mapping2::Map2M(page, _, _) => NULL.with_addr(page.addr()),
Mapping2::Map2M(page, _, _) => core::ptr::invalid(page.addr()),
Mapping2::Next(mapping1) => mapping1.virt_addr(),
}
}
Expand All @@ -264,7 +261,7 @@ enum Mapping3 {
impl Mapping for Mapping3 {
fn virt_addr(&self) -> *const () {
match self {
Mapping3::Map1G(page, _, _) => NULL.with_addr(page.addr()),
Mapping3::Map1G(page, _, _) => core::ptr::invalid(page.addr()),
Mapping3::Next(mapping2) => mapping2.virt_addr(),
}
}
Expand Down Expand Up @@ -435,8 +432,7 @@ impl PTE {
/// Tables are taken from the identity mapped region of the
/// address space.
unsafe fn virt_addr(self) -> *const () {
const NIL: *const () = core::ptr::null();
NIL.with_addr(self.phys_addr() as usize)
core::ptr::invalid(self.phys_addr() as usize)
}
}

Expand Down Expand Up @@ -903,6 +899,25 @@ impl PageTable {
self.pml4.map(P::mapping(page, frame, attrs));
}
}

/// XXX(cross): Make this actually walk the table to make sure
/// the VA is really mapped.
fn is_mapped(&self, va: usize) -> bool {
va != 0
}

/// Returns a raw pointer to a virtual address mapped by
/// this table.
pub(crate) fn try_with_addr<T>(&self, va: usize) -> Result<*mut T, Error> {
if !self.is_mapped(va) {
return Err(Error::BadPointer);
}
let ptr = core::ptr::from_exposed_addr_mut::<()>(va);
if !ptr.is_aligned_to(core::mem::align_of::<T>()) {
return Err(Error::BadPointer);
}
Ok(ptr as *mut T)
}
}

#[cfg(test)]
Expand Down Expand Up @@ -961,7 +976,7 @@ mod tests {
// Examine the PML3 entries. There should be a single
// entry pointing to a PML2 for the loader, and two huge
// pages for MMIO space.
let pml3 = pml4.next_mut(NULL.with_addr(0x8000_0000)).unwrap();
let pml3 = pml4.next_mut(core::ptr::invalid(0x8000_0000)).unwrap();
let n = pml3.entries.iter().filter(|&e| e.p()).count();
assert_eq!(n, 3);
let l0g = pml3.entries[0];
Expand Down Expand Up @@ -993,7 +1008,7 @@ mod tests {

// Check the PML2 entries. The PML2 maps a gigabyte of
// address space from 0 to 0x4000_0000.
let pml2 = pml3.next_mut(NULL.with_addr(0x1000_0000)).unwrap();
let pml2 = pml3.next_mut(core::ptr::invalid(0x1000_0000)).unwrap();
let n = pml2.entries.iter().filter(|&e| e.p()).count();
assert_eq!(n, 512 - 512 / 4);
// The lower quarter of the PML2 should be empty.
Expand Down Expand Up @@ -1029,7 +1044,7 @@ mod tests {
// Check the 4KiB PML1 entries. There should be one
// text page, two RO data pages, and a bunch of RW
// data pages.
let pml1 = pml2.next_mut(NULL.with_addr(0x1000_0000)).unwrap();
let pml1 = pml2.next_mut(core::ptr::invalid(0x1000_0000)).unwrap();
// Text.
assert!(pml1.entries[0].p());
assert!(!pml1.entries[0].w());
Expand Down Expand Up @@ -1133,6 +1148,12 @@ impl LoaderPageTable {
})
}

/// Returns a pointer from a virtual address mapped by this
/// table.
pub(crate) fn try_with_addr<T>(&self, va: usize) -> Result<*mut T, Error> {
self.page_table.try_with_addr(va)
}

/// Returns the physical address of the page table root.
pub(crate) fn phys_addr(&self) -> u64 {
self.page_table.phys_addr()
Expand Down Expand Up @@ -1181,7 +1202,7 @@ mod loader_page_table_tests {
mod arena {
extern crate alloc;

use super::Table;
use super::{Error, Table};
use crate::allocator::BumpAlloc;
use alloc::alloc::{AllocError, Allocator, Layout};
use core::ptr;
Expand All @@ -1202,11 +1223,6 @@ mod arena {
/// size and aligned.
pub(super) struct TableAlloc;

#[derive(Clone, Copy, Debug)]
pub(super) enum Error {
BadPointer,
}

impl TableAlloc {
/// Try and convert an integer to a pointer.
pub(super) fn try_with_addr<T: Table>(
Expand Down
10 changes: 4 additions & 6 deletions src/uart.rs
Original file line number Diff line number Diff line change
Expand Up @@ -441,8 +441,8 @@ impl Device {
}

fn reset<'a>(self) -> &'a mut ConfigMmio {
const NULL: *mut ConfigMmio = core::ptr::null_mut();
let uart = unsafe { &mut *NULL.with_addr(self.addr()) };
let regs = core::ptr::from_exposed_addr_mut::<ConfigMmio>(self.addr());
let uart = unsafe { &mut *regs };
Comment on lines +444 to +445
Copy link

@saethlin saethlin Oct 17, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The pointer from from_exposed_addr(something.addr()) would be UB to deref if this were an address inside the AM. And this pattern in general is very lintable and trips the linter between my ears. I'd suggest you change if it I could come up with a better name instead of addr, but just FYI at some point a new nightly clippy might be upset by this.

And maybe soon we will have offset_of! in order to do this address computation for the write_volatile in a way that for sure doesn't have the aliasing implications of &mut. I hope.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oophf, sorry, I've been trying to reply for two days but have been a bit under the weather and haven't gotten it done.

If I'm interpreting this correctly, this is probably fine for now because the MMIO addresses for the UART fall outside of the AM, caveat that at some point things might be less happy with the overall pattern. This seems like something to revisit at some point; perhaps using something like core::ptr::addr_of_mut!?

I remain curious about what to do about memory that's been freshly mapped. It occurs to me that this can probably be approximated in a hosted environment by playing around with mmap.

unsafe {
ptr::write_volatile(
&mut uart.srr,
Expand Down Expand Up @@ -470,8 +470,7 @@ impl Uart {
}

fn write_mmio_mut(&mut self) -> &mut MmioWrite {
const NULL: *mut MmioWrite = core::ptr::null_mut();
let regs = NULL.with_addr(self.0.addr());
let regs = core::ptr::from_exposed_addr_mut::<MmioWrite>(self.0.addr());
unsafe { &mut *regs }
}

Expand All @@ -480,8 +479,7 @@ impl Uart {
// it is mutually exclusive with a write MMIO structure,
// as the two share the same register space.
fn read_mmio_mut(&mut self) -> &mut MmioRead {
const NULL: *mut MmioRead = core::ptr::null_mut();
let regs = NULL.with_addr(self.0.addr());
let regs = core::ptr::from_exposed_addr_mut::<MmioRead>(self.0.addr());
unsafe { &mut *regs }
}

Expand Down