Skip to content

Commit

Permalink
Bootloader+Kernel: Load initfs into kernel's page mapping
Browse files Browse the repository at this point in the history
  • Loading branch information
corigan01 committed Jan 26, 2025
1 parent 8fadee2 commit 0871bc4
Show file tree
Hide file tree
Showing 13 changed files with 342 additions and 39 deletions.
2 changes: 1 addition & 1 deletion bootloader/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ pub struct Stage32toStage64 {
}

/// # `Stage64` to `Kernel` Info Block
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
pub struct KernelBootHeader {
pub phys_mem_map: &'static PhysMemoryMap<MEMORY_REGIONS>,
pub video_mode: Option<(VesaModeId, VesaMode)>,
Expand Down
3 changes: 3 additions & 0 deletions bootloader/stage-16bit/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,9 @@ fn main(disk_id: u16) -> ! {

let stack_region = unsafe { alloc.allocate(1024 * 1024) }.unwrap();

// The initfs needs to be 2Mib page aligned
alloc.align_ptr_to(1024 * 1024);

// Initfs region
let mut initfs_file = fatfs
.open(qconfig.initfs)
Expand Down
31 changes: 19 additions & 12 deletions bootloader/stage-64bit/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ use serial::{Serial, baud::SerialBaud};
use util::{
align_to,
bytes::HumanBytes,
consts::{MIB, PAGE_2M},
consts::{MIB, PAGE_2M, PAGE_4K},

Check warning on line 45 in bootloader/stage-64bit/src/main.rs

View workflow job for this annotation

GitHub Actions / Build OS

unused import: `PAGE_4K`
is_align_to,
};

mod paging;
Expand Down Expand Up @@ -130,12 +131,12 @@ fn main(stage_to_stage: &Stage32toStage64) {
(virt_info.stack_end_virt - virt_info.stack_start_virt) as usize,
),
kernel_init_heap: (
virt_info.init_start_virt,
(virt_info.init_end_virt - virt_info.init_start_virt) as usize,
virt_info.heap_start_virt,
(virt_info.heap_end_virt - virt_info.heap_start_virt) as usize,
),
initfs_ptr: (
stage_to_stage.initfs_ptr.0,
stage_to_stage.initfs_ptr.1 as usize,
virt_info.initfs_start_virt,
(virt_info.initfs_end_virt - virt_info.initfs_start_virt) as usize,
),
});

Expand Down Expand Up @@ -208,7 +209,12 @@ fn build_memory_map(
})
.expect("Unable to add elf to memory map");

let (initfs_start, initfs_len) = s2s.initfs_ptr;
let (initfs_start, mut initfs_len) = s2s.initfs_ptr;
assert!(
is_align_to(initfs_start, PAGE_2M),
"INITFS is not 2Mib page aligned, please ensure initfs is page aligned!"
);
initfs_len = align_to(initfs_len, PAGE_2M);
mm.add_region(PhysMemoryEntry {
kind: PhysMemoryKind::InitFs,
start: (initfs_start as usize).into(),
Expand Down Expand Up @@ -261,7 +267,7 @@ fn build_memory_map(
.expect("Unable to find region for kernel's stack pages");
mm.add_region(kernels_stack_pages).unwrap();

let kernels_init_pages = mm
let kernels_heap_pages = mm
.find_continuous_of(
PhysMemoryKind::Free,
PAGE_2M,
Expand All @@ -272,8 +278,8 @@ fn build_memory_map(
kind: PhysMemoryKind::KernelHeap,
..p
})
.expect("Unable to find region for kernel's stack pages");
mm.add_region(kernels_init_pages).unwrap();
.expect("Unable to find region for kernel's heap pages");
mm.add_region(kernels_heap_pages).unwrap();

logln!("{}", mm);

Expand All @@ -287,10 +293,11 @@ fn build_memory_map(
kernels_stack_pages.len() as usize,
),
kernel_virt: kernel_exe_ptr,
kernel_init_phys: (
kernels_init_pages.start.addr() as u64,
kernels_init_pages.len() as usize,
kernel_heap_phys: (
kernels_heap_pages.start.addr() as u64,
kernels_heap_pages.len() as usize,
),
kernel_initfs_phys: (initfs_start, initfs_len as usize),
}
}
}
59 changes: 49 additions & 10 deletions bootloader/stage-64bit/src/paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,8 @@ static TABLE_LVL2_KERN: SyncUnsafeCell<PageMapLvl2> = SyncUnsafeCell::new(PageMa
pub struct PageTableConfig {
pub kernel_exe_phys: (u64, usize),
pub kernel_stack_phys: (u64, usize),
pub kernel_init_phys: (u64, usize),
pub kernel_heap_phys: (u64, usize),
pub kernel_initfs_phys: (u64, usize),
pub kernel_virt: u64,
}

Expand All @@ -63,8 +64,10 @@ pub struct KernelVirtInfo {
pub exe_end_virt: u64,
pub stack_start_virt: u64,
pub stack_end_virt: u64,
pub init_start_virt: u64,
pub init_end_virt: u64,
pub heap_start_virt: u64,
pub heap_end_virt: u64,
pub initfs_start_virt: u64,
pub initfs_end_virt: u64,
}

impl KernelVirtInfo {
Expand Down Expand Up @@ -137,7 +140,8 @@ pub fn build_page_tables(c: PageTableConfig) -> KernelVirtInfo {

let exe_pages = ((c.kernel_exe_phys.1 - 1) / PAGE_2M) + 1;
let stack_pages = ((c.kernel_stack_phys.1 - 1) / PAGE_2M) + 1;
let init_pages = ((c.kernel_init_phys.1 - 1) / PAGE_2M) + 1;
let heap_pages = ((c.kernel_heap_phys.1 - 1) / PAGE_2M) + 1;
let initfs_pages = ((c.kernel_initfs_phys.1 - 1) / PAGE_2M) + 1;

for mb2 in 0..exe_pages {
let phy_addr = c.kernel_exe_phys.0 + (mb2 * PAGE_2M) as u64;
Expand All @@ -162,9 +166,9 @@ pub fn build_page_tables(c: PageTableConfig) -> KernelVirtInfo {
unsafe { (*TABLE_LVL2_KERN.get()).store(lvl2_entry, mb2 + exe_pages + 1 + tbl2_offset) };
}

// KERNEL MAP (INIT)
for mb2 in 0..init_pages {
let phy_addr = c.kernel_init_phys.0 + (mb2 * PAGE_2M) as u64;
// KERNEL MAP (HEAP)
for mb2 in 0..heap_pages {
let phy_addr = c.kernel_heap_phys.0 + (mb2 * PAGE_2M) as u64;

let lvl2_entry = PageEntry2M::new()
.set_present_flag(true)
Expand All @@ -191,14 +195,49 @@ pub fn build_page_tables(c: PageTableConfig) -> KernelVirtInfo {

unsafe { (*TABLE_LVL4.get()).store(lvl4_entry, tbl4_offset) };

// KERNEL MAP (INITFS)
for mb2 in 0..initfs_pages {
let phy_addr = c.kernel_initfs_phys.0 + (mb2 * PAGE_2M) as u64;

let lvl2_entry = PageEntry2M::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_phy_address(phy_addr);

unsafe {
(*TABLE_LVL2_KERN.get()).store(
lvl2_entry,
mb2 + exe_pages + 3 + stack_pages + tbl2_offset + heap_pages,
)
};
}

let lvl3_kernel_entry = PageEntryLvl3::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_next_entry_phy_address(unsafe { (*TABLE_LVL2_KERN.get()).table_ptr() });

unsafe { (*TABLE_LVL3_KERN.get()).store(lvl3_kernel_entry, tbl3_offset) };

let lvl4_entry = PageEntryLvl4::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_next_entry_phy_address(unsafe { (*TABLE_LVL3_KERN.get()).table_ptr() });

unsafe { (*TABLE_LVL4.get()).store(lvl4_entry, tbl4_offset) };

KernelVirtInfo {
exe_start_virt: c.kernel_virt,
exe_end_virt: c.kernel_virt + (exe_pages * PAGE_2M) as u64,
stack_start_virt: c.kernel_virt + ((exe_pages + 1) * PAGE_2M) as u64,
stack_end_virt: c.kernel_virt + ((exe_pages + stack_pages + 1) * PAGE_2M) as u64,
init_start_virt: c.kernel_virt + ((exe_pages + stack_pages + 2) * PAGE_2M) as u64,
init_end_virt: c.kernel_virt
+ ((exe_pages + stack_pages + init_pages + 2) * PAGE_2M) as u64,
heap_start_virt: c.kernel_virt + ((exe_pages + stack_pages + 2) * PAGE_2M) as u64,
heap_end_virt: c.kernel_virt
+ ((exe_pages + stack_pages + heap_pages + 2) * PAGE_2M) as u64,
initfs_start_virt: c.kernel_virt
+ ((exe_pages + stack_pages + heap_pages + 3) * PAGE_2M) as u64,
initfs_end_virt: c.kernel_virt
+ ((exe_pages + stack_pages + heap_pages + 3 + initfs_pages) * PAGE_2M) as u64,
}
}

Expand Down
10 changes: 7 additions & 3 deletions crates/arch/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,20 +60,24 @@ pub mod interrupts {

#[macro_export]
macro_rules! critcal_section {
($($tt:tt)*) => {
($($tt:tt)*) => {{
let _priv_interrupt_before_state =
::arch::registers::eflags::is_interrupts_enable_set();

if _priv_interrupt_before_state {
unsafe { ::arch::interrupts::disable_interrupts() };
}

$($tt)*;
let r = {
$($tt)*
};

if _priv_interrupt_before_state {
unsafe { ::arch::interrupts::enable_interrupts() };
}
};

r
}};
}
}

Expand Down
2 changes: 1 addition & 1 deletion crates/boolvec/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ use alloc::vec::Vec;

type BackingType = u64;

#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct BoolVec(Vec<BackingType>);

impl BoolVec {
Expand Down
2 changes: 2 additions & 0 deletions crates/mem/src/phys.rs
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,8 @@ impl<const N: usize> PhysMemoryMap<N> {
})
}

// FIXME: This function should be remade, it was made quickly and I just wanted it to work.
// I think at one point it failed to deoverlap some regions, so that could be possible.
pub fn add_region(&mut self, region: impl MemoryDesc) -> Result<(), crate::MemoryError> {
let kind = region.memory_kind();
let start = region.memory_start();
Expand Down
35 changes: 32 additions & 3 deletions crates/mem/src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ pub enum PopulationReponse {
PageTablesNotLoaded,
}

pub trait VmInjectFillAction: core::fmt::Debug {
pub trait VmInjectFillAction: core::fmt::Debug + Sync + Send {
/// Populate this page with content from this content's provider
fn populate_page(
&mut self,
Expand Down Expand Up @@ -273,6 +273,17 @@ pub struct VmObject {
pub fill_action: RwLock<VmFillAction>,
}

impl Clone for VmObject {
fn clone(&self) -> Self {
Self {
region: self.region,
mappings: self.mappings.clone(),
permissions: self.permissions,
fill_action: RwLock::new(self.fill_action.read().clone()),
}
}
}

/// The type of error given when making a new page
#[derive(Debug)]
pub enum NewVmObjectError {
Expand Down Expand Up @@ -324,6 +335,7 @@ impl VmObject {
region: VmRegion,
permissions: VmPermissions,
fill_action: VmFillAction,
override_and_fill: bool,
) -> Result<Arc<RwLock<Self>>, NewVmObjectError> {
let mut new_self = Self {
region,
Expand All @@ -337,6 +349,7 @@ impl VmObject {
.fill_action
.read()
.requests_all_pages_filled(&new_self)
|| override_and_fill
{
for vpage in new_self.region.pages_iter() {
new_self
Expand Down Expand Up @@ -501,6 +514,15 @@ pub struct VmProcess {
pub page_tables: Virt2PhysMapping,
}

impl Clone for VmProcess {
fn clone(&self) -> Self {
Self {
objects: RwLock::new(self.objects.read().clone()),
page_tables: self.page_tables.clone(),
}
}
}

impl VmProcess {
/// Init an empty ProcessVM (const fn)
pub const fn new() -> Self {
Expand Down Expand Up @@ -562,6 +584,7 @@ impl VmProcess {
region: VmRegion,
permissions: VmPermissions,
fill_action: VmFillAction,
override_and_fill_now: bool,
) -> Result<Arc<RwLock<VmObject>>, InsertVmObjectError> {
// If there is already a region that exists on that virtual address
//
Expand All @@ -576,8 +599,14 @@ impl VmProcess {
}

// Construct the object
let obj = VmObject::new(self, region, permissions, fill_action)
.map_err(|obj_err| InsertVmObjectError::VmObjectError(obj_err))?;
let obj = VmObject::new(
self,
region,
permissions,
fill_action,
override_and_fill_now,
)
.map_err(|obj_err| InsertVmObjectError::VmObjectError(obj_err))?;

// Insert the object
self.insert_vm_object(obj.clone())?;
Expand Down
1 change: 1 addition & 0 deletions kernel/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,4 @@ arch = {workspace = true}
spin = "0.9.8"
elf = {workspace = true, features = ["alloc"]}
tar = { workspace = true }
boolvec = {workspace = true}
6 changes: 5 additions & 1 deletion kernel/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ use core::{
mem::offset_of,
};

use crate::process::Process;

/// CPUs context
#[repr(C)]
#[derive(Clone, Copy, Debug)]
Expand Down Expand Up @@ -84,7 +86,9 @@ impl ProcessContext {
}
}

pub static mut KERNEL_RSP_PTR: u64 = 0x200000000000;
/// The kernel's syscall entry stack
pub static mut KERNEL_RSP_PTR: u64 = Process::KERNEL_SYSCALL_STACK_ADDR.addr() as u64;
/// A tmp for userspace's stack ptr while in kernel land
pub static mut USERSPACE_RSP_PTR: u64 = 0x121212;

#[naked]
Expand Down
6 changes: 5 additions & 1 deletion kernel/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ fn main(kbh: &KernelBootHeader) {
logln!("Running on a(n) '{:?}' processor.", cpu_vender());

gdt::init_kernel_gdt();
gdt::set_stack_for_privl(0x300000000000 as *mut u8, Ring0);
gdt::set_stack_for_privl(Process::KERNEL_IRQ_STACK_ADDR.as_mut_ptr(), Ring0);
unsafe { gdt::load_tss() };
int::attach_interrupts();
int::attach_syscall();
Expand Down Expand Up @@ -137,6 +137,7 @@ fn main(kbh: &KernelBootHeader) {
.set_read_flag(true)
.set_write_flag(true)
.set_user_flag(true),
false,
)
.unwrap();
process
Expand All @@ -150,6 +151,7 @@ fn main(kbh: &KernelBootHeader) {
.set_read_flag(true)
.set_write_flag(true)
.set_user_flag(false),
true,
)
.unwrap();
process
Expand All @@ -163,6 +165,7 @@ fn main(kbh: &KernelBootHeader) {
.set_read_flag(true)
.set_write_flag(true)
.set_user_flag(false),
true,
)
.unwrap();
process
Expand All @@ -176,6 +179,7 @@ fn main(kbh: &KernelBootHeader) {
.set_read_flag(true)
.set_write_flag(true)
.set_user_flag(false),
true,
)
.unwrap();
unsafe { process.load_tables() };
Expand Down
Loading

0 comments on commit 0871bc4

Please sign in to comment.