Rework paging

This commit is contained in:
Mathieu Strypsteen 2024-07-09 13:13:03 +02:00
parent 1ee034683a
commit feb4675f41

View file

@ -1,6 +1,9 @@
use core::panic;
use core::{
panic,
sync::atomic::{AtomicBool, Ordering},
};
use alloc::boxed::Box;
use alloc::{boxed::Box, vec::Vec};
use bitvec::{bitvec, order::Lsb0, vec::BitVec};
use kernel_common::{
loader_struct::LoaderStruct,
@ -20,9 +23,10 @@ extern "C" {
static _bss_end: u8;
}
static PAGING_ACTIVE: AtomicBool = AtomicBool::new(false);
static HEAP_PHYS_START: Once<u64> = Once::new();
static CURRENT_PML4: Mutex<Option<&mut PageTable>> = Mutex::new(None);
static PHYSICAL_FRAMES: Mutex<Option<BitVec<u64>>> = Mutex::new(None);
static HEAP_PHYS_MAPPING: Mutex<Vec<u64>> = Mutex::new(Vec::new());
fn _get_free_frame() -> u64 {
let frames_vec = PHYSICAL_FRAMES.lock();
@ -33,6 +37,15 @@ fn _get_free_frame() -> u64 {
}
panic!("No free memory left");
}
fn virt_to_phys(virt: u64) -> u64 {
if !PAGING_ACTIVE.load(Ordering::Relaxed) {
return virt - KERNEL_HEAP_START + HEAP_PHYS_START.get().unwrap();
}
assert!(virt >= KERNEL_VIRT_START);
assert!(virt < KERNEL_HEAP_START + KERNEL_HEAP_INITIAL_SIZE as u64);
let heap_map = HEAP_PHYS_MAPPING.lock();
return heap_map[(virt as usize - KERNEL_HEAP_START as usize) / 0x1000] + virt % 0x1000;
}
fn get_table_entry(table: &mut PageTable, i: usize) -> &mut PageTable {
if table.entries_virt[i].is_none() {
const NONE: Option<Box<PageTable>> = None;
@ -48,40 +61,27 @@ fn get_table_entry(table: &mut PageTable, i: usize) -> &mut PageTable {
}
return table.entries_virt[i].as_mut().unwrap();
}
fn get_page(pml4: &mut PageTable, virt: u64) -> Option<&mut PageEntry> {
fn _get_page(pml4: &PageTable, virt: u64) -> Option<&PageEntry> {
let virt_page = virt as usize / 0x1000;
let table_i = virt_page % 512;
let directory_i = virt_page / 512 % 512;
let pdpt_i = virt_page / 512 / 512 % 512;
let pml4_i = virt_page / 512 / 512 / 512 % 512;
let pdpt = &mut pml4.entries_virt[pml4_i];
let pdpt = &pml4.entries_virt[pml4_i];
if pdpt.is_some() {
let pdpt = pdpt.as_mut().unwrap();
let directory = &mut pdpt.entries_virt[pdpt_i];
let pdpt = pdpt.as_ref().unwrap();
let directory = &pdpt.entries_virt[pdpt_i];
if directory.is_some() {
let directory = directory.as_mut().unwrap();
let table = &mut directory.entries_virt[directory_i];
let directory = directory.as_ref().unwrap();
let table = &directory.entries_virt[directory_i];
if table.is_some() {
let table = table.as_mut().unwrap();
return Some(&mut table.entries_phys[table_i]);
let table = table.as_ref().unwrap();
return Some(&table.entries_phys[table_i]);
}
}
}
None
}
fn virt_to_phys(virt: u64) -> u64 {
let mut current_pml4 = CURRENT_PML4.lock();
match current_pml4.as_mut() {
None => virt - KERNEL_HEAP_START + HEAP_PHYS_START.get().unwrap(),
Some(pml4) => {
let page = get_page(pml4, virt);
match page {
None => 0,
Some(page) => page.address() * 0x1000 + virt % 0x1000,
}
}
}
}
fn map(pml4: &mut PageTable, virt: u64, phys: u64, user: bool, write: bool, exec: bool) {
if virt < 0x1000 {
panic!("First page shouldn't be mapped");
@ -167,10 +167,11 @@ pub fn setup_paging(loader_struct: &LoaderStruct, phys_start: u64, heap_start: u
}
for i in 0..KERNEL_HEAP_INITIAL_SIZE / 0x1000 {
map(pml4, KERNEL_HEAP_START + i as u64 * 0x1000, heap_start + i as u64 * 0x1000, false, true, false);
let mut heap_map = HEAP_PHYS_MAPPING.lock();
heap_map.push(heap_start + i as u64 * 0x1000);
}
unsafe {
load_cr3(virt_to_phys(pml4 as *const PageTable as u64));
}
let mut current_pml4 = CURRENT_PML4.lock();
*current_pml4 = Some(pml4);
PAGING_ACTIVE.store(true, Ordering::Relaxed);
}