This commit is contained in:
parent
1c796d75b5
commit
5e934e54fc
9 changed files with 169 additions and 166 deletions
|
@ -25,14 +25,130 @@ extern "C" {
|
|||
static _bss_end: u8;
|
||||
}
|
||||
|
||||
pub struct AddressSpace {
|
||||
pub pml4: Option<Box<PageTable>>,
|
||||
}
|
||||
|
||||
static PAGING_ACTIVE: AtomicBool = AtomicBool::new(false);
|
||||
pub static CURRENT_PML4: Spinlock<Option<&mut PageTable>> = Spinlock::new(None);
|
||||
pub static CURRENT_ADDRESS_SPACE: Spinlock<AddressSpace> = Spinlock::new(AddressSpace { pml4: None });
|
||||
static HEAP_PHYS_START: AtomicU64 = AtomicU64::new(0);
|
||||
static PHYSICAL_FRAMES: Spinlock<Option<BitVec<u64>>> = Spinlock::new(None);
|
||||
static HEAP_PHYS_MAPPING: Spinlock<Vec<u64>> = Spinlock::new(Vec::new());
|
||||
const KERNEL_MAPPINGS_START: u64 = 0xfffffffd00000000;
|
||||
const KERNEL_MAPPINGS_END: u64 = 0xfffffffe00000000;
|
||||
|
||||
impl AddressSpace {
|
||||
fn get_page(&mut self, virt: u64) -> Option<&mut PageEntry> {
|
||||
let virt_page = virt as usize / 0x1000;
|
||||
let table_i = virt_page % 512;
|
||||
let directory_i = virt_page / 512 % 512;
|
||||
let pdpt_i = virt_page / 512 / 512 % 512;
|
||||
let pml4_i = virt_page / 512 / 512 / 512 % 512;
|
||||
let pdpt = &mut self.pml4.as_mut().unwrap().entries_virt[pml4_i];
|
||||
if let Some(pdpt) = pdpt {
|
||||
let directory = &mut pdpt.entries_virt[pdpt_i];
|
||||
if let Some(directory) = directory {
|
||||
let table = &mut directory.entries_virt[directory_i];
|
||||
if let Some(table) = table {
|
||||
if table.entries_phys[table_i].present() == 0 {
|
||||
return None;
|
||||
}
|
||||
return Some(&mut table.entries_phys[table_i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
pub fn find_free_virt_range(&mut self, mut start: u64, end: u64, size: u64) -> u64 {
|
||||
assert_eq!(start % 0x1000, 0);
|
||||
assert_eq!(end % 0x1000, 0);
|
||||
assert_eq!(size % 0x1000, 0);
|
||||
while start < end - size {
|
||||
let mut free = true;
|
||||
for i in 0..size {
|
||||
if self.get_page(start + i).is_some() {
|
||||
free = false;
|
||||
start += (i + 1) * 0x1000;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if free {
|
||||
return start;
|
||||
}
|
||||
}
|
||||
panic!("No free range found");
|
||||
}
|
||||
fn map(&mut self, virt: u64, phys: u64, user: bool, write: bool, exec: bool, cache_disable: bool) {
|
||||
assert!(virt >= 0x1000, "First page shouldn't be mapped");
|
||||
assert!(!write || !exec || virt == 0x1000);
|
||||
{
|
||||
let mut frames_vec = PHYSICAL_FRAMES.lock();
|
||||
let frame = phys as usize / 0x1000;
|
||||
if frame < frames_vec.as_ref().unwrap().len() {
|
||||
frames_vec.as_mut().unwrap().set(frame, true);
|
||||
}
|
||||
}
|
||||
let virt_page = virt as usize / 0x1000;
|
||||
let table_i = virt_page % 512;
|
||||
let directory_i = virt_page / 512 % 512;
|
||||
let pdpt_i = virt_page / 512 / 512 % 512;
|
||||
let pml4_i = virt_page / 512 / 512 / 512 % 512;
|
||||
let pdpt = get_table_entry(self.pml4.as_mut().unwrap(), pml4_i);
|
||||
let directory = get_table_entry(pdpt, pdpt_i);
|
||||
let table = get_table_entry(directory, directory_i);
|
||||
let should_invalidate = table.entries_phys[table_i].present() == 1;
|
||||
table.entries_phys[table_i].set_address(phys / 0x1000);
|
||||
table.entries_phys[table_i].set_user(user as u64);
|
||||
table.entries_phys[table_i].set_write(write as u64);
|
||||
table.entries_phys[table_i].set_execute_disable(!exec as u64);
|
||||
table.entries_phys[table_i].set_cache_disable(cache_disable as u64);
|
||||
table.entries_phys[table_i].set_present(1);
|
||||
if should_invalidate {
|
||||
invlpg(virt);
|
||||
}
|
||||
}
|
||||
pub unsafe fn unmap(&mut self, address: u64) {
|
||||
let page = self.get_page(address);
|
||||
assert!(page.is_some(), "Page isn't mapped");
|
||||
if let Some(page) = page {
|
||||
page.set_present(0);
|
||||
page.set_address(0);
|
||||
invlpg(address);
|
||||
}
|
||||
}
|
||||
pub unsafe fn map_range(&mut self, virt_start: u64, phys_start: u64, size: u64, user: bool, write: bool, exec: bool, cache_disable: bool) {
|
||||
assert_eq!(virt_start % 0x1000, 0);
|
||||
assert_eq!(phys_start % 0x1000, 0);
|
||||
assert_eq!(size % 0x1000, 0);
|
||||
for i in 0..size / 0x1000 {
|
||||
self.map(virt_start + i * 0x1000, phys_start + i * 0x1000, user, write, exec, cache_disable);
|
||||
}
|
||||
}
|
||||
pub unsafe fn map_physical(&mut self, phys: u64, mut size: u64, write_combining: bool) -> u64 {
|
||||
// TODO: Implement WC
|
||||
let phys_offset = phys % 0x1000;
|
||||
let phys_end = (phys + size + 0xfff) / 0x1000 * 0x1000;
|
||||
let phys_start = phys - phys_offset;
|
||||
size = phys_end - phys_start;
|
||||
let virt_start = self.find_free_virt_range(KERNEL_MAPPINGS_START, KERNEL_MAPPINGS_END, size);
|
||||
let cache_disable = !write_combining;
|
||||
unsafe {
|
||||
self.map_range(virt_start, phys_start, size, false, true, false, cache_disable);
|
||||
}
|
||||
virt_start + phys_offset
|
||||
}
|
||||
pub unsafe fn unmap_physical(&mut self, address: u64, size: u64) {
|
||||
let end = (address + size + 0xfff) / 0x1000 * 0x1000;
|
||||
let start = address / 0x1000 * 0x1000;
|
||||
let size = end - start;
|
||||
for i in 0..size / 0x1000 {
|
||||
unsafe {
|
||||
self.unmap(start + i * 0x1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn _get_free_frame() -> u64 {
|
||||
let frames_vec = PHYSICAL_FRAMES.lock();
|
||||
for i in 0..frames_vec.as_ref().unwrap().len() {
|
||||
|
@ -49,20 +165,19 @@ fn invlpg(addr: u64) {
|
|||
smp_invalidate_tlb();
|
||||
}
|
||||
pub fn virt_to_phys(virt: u64) -> u64 {
|
||||
assert!(virt >= KERNEL_HEAP_START);
|
||||
assert!(virt < KERNEL_HEAP_START + KERNEL_HEAP_INITIAL_SIZE as u64);
|
||||
if !PAGING_ACTIVE.load(Ordering::SeqCst) {
|
||||
return virt - KERNEL_HEAP_START + HEAP_PHYS_START.load(Ordering::SeqCst);
|
||||
}
|
||||
assert!(virt >= KERNEL_HEAP_START);
|
||||
assert!(virt < KERNEL_HEAP_START + KERNEL_HEAP_INITIAL_SIZE as u64);
|
||||
let heap_map = HEAP_PHYS_MAPPING.lock();
|
||||
heap_map[(virt as usize - KERNEL_HEAP_START as usize) / 0x1000] + virt % 0x1000
|
||||
}
|
||||
fn get_table_entry(table: &mut PageTable, i: usize) -> &mut PageTable {
|
||||
if table.entries_virt[i].is_none() {
|
||||
const NONE: Option<Box<PageTable>> = None;
|
||||
let new_table = Box::new(PageTable {
|
||||
entries_phys: [PageEntry(0); 512],
|
||||
entries_virt: [NONE; 512],
|
||||
entries_virt: [const { None }; 512],
|
||||
});
|
||||
table.entries_phys[i].set_address(virt_to_phys(new_table.as_ref() as *const PageTable as u64) / 0x1000);
|
||||
table.entries_virt[i] = Some(new_table);
|
||||
|
@ -72,118 +187,6 @@ fn get_table_entry(table: &mut PageTable, i: usize) -> &mut PageTable {
|
|||
}
|
||||
table.entries_virt[i].as_mut().unwrap()
|
||||
}
|
||||
fn get_page(pml4: &mut PageTable, virt: u64) -> Option<&mut PageEntry> {
|
||||
let virt_page = virt as usize / 0x1000;
|
||||
let table_i = virt_page % 512;
|
||||
let directory_i = virt_page / 512 % 512;
|
||||
let pdpt_i = virt_page / 512 / 512 % 512;
|
||||
let pml4_i = virt_page / 512 / 512 / 512 % 512;
|
||||
let pdpt = &mut pml4.entries_virt[pml4_i];
|
||||
if let Some(pdpt) = pdpt {
|
||||
let directory = &mut pdpt.entries_virt[pdpt_i];
|
||||
if let Some(directory) = directory {
|
||||
let table = &mut directory.entries_virt[directory_i];
|
||||
if let Some(table) = table {
|
||||
if table.entries_phys[table_i].present() == 0 {
|
||||
return None;
|
||||
}
|
||||
return Some(&mut table.entries_phys[table_i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
pub fn find_free_virt_range(mut start: u64, end: u64, size: u64) -> u64 {
|
||||
assert_eq!(start % 0x1000, 0);
|
||||
assert_eq!(end % 0x1000, 0);
|
||||
assert_eq!(size % 0x1000, 0);
|
||||
let mut current_pml4 = CURRENT_PML4.lock();
|
||||
while start < end - size {
|
||||
let mut free = true;
|
||||
for i in 0..size {
|
||||
if get_page(current_pml4.as_mut().unwrap(), start + i).is_some() {
|
||||
free = false;
|
||||
start += (i + 1) * 0x1000;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if free {
|
||||
return start;
|
||||
}
|
||||
}
|
||||
panic!("No free range found");
|
||||
}
|
||||
fn map(pml4: &mut PageTable, virt: u64, phys: u64, user: bool, write: bool, exec: bool, cache_disable: bool) {
|
||||
assert!(virt >= 0x1000, "First page shouldn't be mapped");
|
||||
assert!(!write || !exec || virt == 0x1000);
|
||||
{
|
||||
let mut frames_vec = PHYSICAL_FRAMES.lock();
|
||||
let frame = phys as usize / 0x1000;
|
||||
if frame < frames_vec.as_ref().unwrap().len() {
|
||||
frames_vec.as_mut().unwrap().set(frame, true);
|
||||
}
|
||||
}
|
||||
let virt_page = virt as usize / 0x1000;
|
||||
let table_i = virt_page % 512;
|
||||
let directory_i = virt_page / 512 % 512;
|
||||
let pdpt_i = virt_page / 512 / 512 % 512;
|
||||
let pml4_i = virt_page / 512 / 512 / 512 % 512;
|
||||
let pdpt = get_table_entry(pml4, pml4_i);
|
||||
let directory = get_table_entry(pdpt, pdpt_i);
|
||||
let table = get_table_entry(directory, directory_i);
|
||||
let should_invalidate = table.entries_phys[table_i].present() == 1;
|
||||
table.entries_phys[table_i].set_address(phys / 0x1000);
|
||||
table.entries_phys[table_i].set_user(user as u64);
|
||||
table.entries_phys[table_i].set_write(write as u64);
|
||||
table.entries_phys[table_i].set_execute_disable(!exec as u64);
|
||||
table.entries_phys[table_i].set_cache_disable(cache_disable as u64);
|
||||
table.entries_phys[table_i].set_present(1);
|
||||
if should_invalidate {
|
||||
invlpg(virt);
|
||||
}
|
||||
}
|
||||
pub unsafe fn unmap(address: u64) {
|
||||
let mut current_pml4 = CURRENT_PML4.lock();
|
||||
let page = get_page(current_pml4.as_mut().unwrap(), address);
|
||||
assert!(page.is_some(), "Page isn't mapped");
|
||||
if let Some(page) = page {
|
||||
page.set_present(0);
|
||||
page.set_address(0);
|
||||
invlpg(address);
|
||||
}
|
||||
}
|
||||
pub unsafe fn map_range(virt_start: u64, phys_start: u64, size: u64, user: bool, write: bool, exec: bool, cache_disable: bool) {
|
||||
assert_eq!(virt_start % 0x1000, 0);
|
||||
assert_eq!(phys_start % 0x1000, 0);
|
||||
assert_eq!(size % 0x1000, 0);
|
||||
let mut current_pml4 = CURRENT_PML4.lock();
|
||||
for i in 0..size / 0x1000 {
|
||||
map(current_pml4.as_mut().unwrap(), virt_start + i * 0x1000, phys_start + i * 0x1000, user, write, exec, cache_disable);
|
||||
}
|
||||
}
|
||||
pub unsafe fn map_physical(phys: u64, mut size: u64, write_combining: bool) -> u64 {
|
||||
// TODO: Implement WC
|
||||
let phys_offset = phys % 0x1000;
|
||||
let phys_end = (phys + size + 0xfff) / 0x1000 * 0x1000;
|
||||
let phys_start = phys - phys_offset;
|
||||
size = phys_end - phys_start;
|
||||
let virt_start = find_free_virt_range(KERNEL_MAPPINGS_START, KERNEL_MAPPINGS_END, size);
|
||||
let cache_disable = !write_combining;
|
||||
unsafe {
|
||||
map_range(virt_start, phys_start, size, false, true, false, cache_disable);
|
||||
}
|
||||
virt_start + phys_offset
|
||||
}
|
||||
pub unsafe fn unmap_physical(address: u64, size: u64) {
|
||||
let end = (address + size + 0xfff) / 0x1000 * 0x1000;
|
||||
let start = address / 0x1000 * 0x1000;
|
||||
let size = end - start;
|
||||
for i in 0..size / 0x1000 {
|
||||
unsafe {
|
||||
unmap(start + i * 0x1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn setup_paging(loader_struct: &LoaderStruct, phys_start: u64, heap_start: u64) {
|
||||
HEAP_PHYS_START.store(heap_start, Ordering::SeqCst);
|
||||
let mut memory_size = 0;
|
||||
|
@ -224,11 +227,13 @@ pub fn setup_paging(loader_struct: &LoaderStruct, phys_start: u64, heap_start: u
|
|||
bss_start = &_bss_start as *const u8 as u64 / 0x1000;
|
||||
bss_end = (&_bss_end as *const u8 as u64 + 0xfff) / 0x1000;
|
||||
}
|
||||
const NONE: Option<Box<PageTable>> = None;
|
||||
let pml4 = Box::leak(Box::new(PageTable {
|
||||
let mut address_space = CURRENT_ADDRESS_SPACE.lock();
|
||||
address_space.pml4 = Some(Box::new(PageTable {
|
||||
entries_phys: [PageEntry(0); 512],
|
||||
entries_virt: [NONE; 512],
|
||||
entries_virt: [const { None }; 512],
|
||||
}));
|
||||
let pml4 = address_space.pml4.as_mut().unwrap();
|
||||
let pml4_address = pml4.as_ref() as *const PageTable as u64;
|
||||
for i in 256..512 {
|
||||
get_table_entry(pml4, i);
|
||||
pml4.entries_phys[i].set_user(0);
|
||||
|
@ -237,26 +242,26 @@ pub fn setup_paging(loader_struct: &LoaderStruct, phys_start: u64, heap_start: u
|
|||
}
|
||||
}
|
||||
for i in text_start..text_end {
|
||||
map(pml4, i * 0x1000, i * 0x1000 - KERNEL_VIRT_START + phys_start, false, false, true, false);
|
||||
address_space.map(i * 0x1000, i * 0x1000 - KERNEL_VIRT_START + phys_start, false, false, true, false);
|
||||
}
|
||||
for i in rodata_start..rodata_end {
|
||||
map(pml4, i * 0x1000, i * 0x1000 - KERNEL_VIRT_START + phys_start, false, false, false, false);
|
||||
address_space.map(i * 0x1000, i * 0x1000 - KERNEL_VIRT_START + phys_start, false, false, false, false);
|
||||
}
|
||||
for i in data_start..data_end {
|
||||
map(pml4, i * 0x1000, i * 0x1000 - KERNEL_VIRT_START + phys_start, false, true, false, false);
|
||||
address_space.map(i * 0x1000, i * 0x1000 - KERNEL_VIRT_START + phys_start, false, true, false, false);
|
||||
}
|
||||
for i in bss_start..bss_end {
|
||||
map(pml4, i * 0x1000, i * 0x1000 - KERNEL_VIRT_START + phys_start, false, true, false, false);
|
||||
address_space.map(i * 0x1000, i * 0x1000 - KERNEL_VIRT_START + phys_start, false, true, false, false);
|
||||
}
|
||||
for i in 0..KERNEL_HEAP_INITIAL_SIZE / 0x1000 {
|
||||
map(pml4, KERNEL_HEAP_START + i as u64 * 0x1000, heap_start + i as u64 * 0x1000, false, true, false, false);
|
||||
{
|
||||
let mut heap_map = HEAP_PHYS_MAPPING.lock();
|
||||
heap_map.push(heap_start + i as u64 * 0x1000);
|
||||
for i in 0..KERNEL_HEAP_INITIAL_SIZE / 0x1000 {
|
||||
address_space.map(KERNEL_HEAP_START + i as u64 * 0x1000, heap_start + i as u64 * 0x1000, false, true, false, false);
|
||||
heap_map.push(heap_start + i as u64 * 0x1000);
|
||||
}
|
||||
}
|
||||
unsafe {
|
||||
load_cr3(virt_to_phys(pml4 as *const PageTable as u64));
|
||||
load_cr3(virt_to_phys(pml4_address));
|
||||
}
|
||||
let mut current_pml4 = CURRENT_PML4.lock();
|
||||
*current_pml4 = Some(pml4);
|
||||
PAGING_ACTIVE.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ use embedded_graphics::{
|
|||
};
|
||||
use kernel_common::loader_struct::FramebufferInfo;
|
||||
|
||||
use crate::{cpu::paging::map_physical, misc::draw_target::FramebufferTarget, sys::locks::Spinlock};
|
||||
use crate::{cpu::paging::CURRENT_ADDRESS_SPACE, misc::draw_target::FramebufferTarget, sys::locks::Spinlock};
|
||||
|
||||
pub struct Display {
|
||||
framebuffer: FramebufferTarget,
|
||||
|
@ -63,7 +63,7 @@ impl Display {
|
|||
}
|
||||
}
|
||||
pub fn setup_display(info: FramebufferInfo) {
|
||||
let addr = unsafe { map_physical(info.address, info.height * info.stride * 4, true) };
|
||||
let addr = unsafe { CURRENT_ADDRESS_SPACE.lock().map_physical(info.address, info.height * info.stride * 4, true) };
|
||||
let fb = vec![0; info.height as usize * info.stride as usize * 4];
|
||||
let display = Display {
|
||||
framebuffer: FramebufferTarget {
|
||||
|
|
|
@ -13,7 +13,7 @@ use kernel_common::{
|
|||
};
|
||||
|
||||
use crate::{
|
||||
cpu::paging::{map_physical, unmap_physical},
|
||||
cpu::paging::CURRENT_ADDRESS_SPACE,
|
||||
misc::wrapped_alloc::{wrapped_alloc, wrapped_dealloc},
|
||||
sys::ioapic::{register_irq_handler, set_irq_override},
|
||||
RSDP_ADDRESS,
|
||||
|
@ -117,7 +117,7 @@ extern "C" fn AcpiOsInstallInterruptHandler(gsi: UINT32, handler: ACPI_OSD_HANDL
|
|||
}
|
||||
#[no_mangle]
|
||||
extern "C" fn AcpiOsMapMemory(phys: ACPI_PHYSICAL_ADDRESS, size: ACPI_SIZE) -> *mut c_void {
|
||||
unsafe { map_physical(phys, size, false) as *mut c_void }
|
||||
unsafe { CURRENT_ADDRESS_SPACE.lock().map_physical(phys, size, false) as *mut c_void }
|
||||
}
|
||||
#[no_mangle]
|
||||
extern "C" fn AcpiOsPhysicalTableOverride(_existing: *mut ACPI_TABLE_HEADER, new_address: *mut ACPI_PHYSICAL_ADDRESS, new_length: *mut UINT32) -> ACPI_STATUS {
|
||||
|
@ -200,7 +200,7 @@ extern "C" fn AcpiOsTerminate() -> ACPI_STATUS {
|
|||
#[no_mangle]
|
||||
extern "C" fn AcpiOsUnmapMemory(address: *mut c_void, size: ACPI_SIZE) {
|
||||
unsafe {
|
||||
unmap_physical(address as u64, size);
|
||||
CURRENT_ADDRESS_SPACE.lock().unmap_physical(address as u64, size);
|
||||
}
|
||||
}
|
||||
#[no_mangle]
|
||||
|
|
|
@ -2,7 +2,7 @@ use core::ptr::NonNull;
|
|||
|
||||
use acpi::{AcpiHandler, PhysicalMapping};
|
||||
|
||||
use crate::cpu::paging::{map_physical, unmap_physical};
|
||||
use crate::cpu::paging::CURRENT_ADDRESS_SPACE;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct EarlyACPIHandler {}
|
||||
|
@ -10,13 +10,13 @@ pub struct EarlyACPIHandler {}
|
|||
impl AcpiHandler for EarlyACPIHandler {
|
||||
unsafe fn map_physical_region<T>(&self, phys: usize, size: usize) -> PhysicalMapping<Self, T> {
|
||||
unsafe {
|
||||
let virt = map_physical(phys as u64, size as u64, false);
|
||||
let virt = CURRENT_ADDRESS_SPACE.lock().map_physical(phys as u64, size as u64, false);
|
||||
PhysicalMapping::new(phys, NonNull::new(virt as *mut T).unwrap(), size, size, *self)
|
||||
}
|
||||
}
|
||||
fn unmap_physical_region<T>(region: &PhysicalMapping<Self, T>) {
|
||||
unsafe {
|
||||
unmap_physical(region.virtual_start().as_ptr() as u64, region.mapped_length() as u64);
|
||||
CURRENT_ADDRESS_SPACE.lock().unmap_physical(region.virtual_start().as_ptr() as u64, region.mapped_length() as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ use acpi::{AcpiTables, HpetInfo};
|
|||
use alloc::vec::Vec;
|
||||
use kernel_common::instructions::pause;
|
||||
|
||||
use crate::cpu::paging::map_physical;
|
||||
use crate::cpu::paging::CURRENT_ADDRESS_SPACE;
|
||||
|
||||
use super::{
|
||||
early_acpi::EarlyACPIHandler,
|
||||
|
@ -118,7 +118,7 @@ pub fn sleep_internal(task: Task) {
|
|||
}
|
||||
pub fn setup_hpet(tables: &AcpiTables<EarlyACPIHandler>) {
|
||||
let hpet_info = HpetInfo::new(tables).unwrap();
|
||||
let address = unsafe { map_physical(hpet_info.base_address as u64, 0x200, false) } as *mut u64;
|
||||
let address = unsafe { CURRENT_ADDRESS_SPACE.lock().map_physical(hpet_info.base_address as u64, 0x200, false) } as *mut u64;
|
||||
ADDRESS.store(address, Ordering::SeqCst);
|
||||
let period = unsafe { address.add(REGISTER_CAPABILITIES).read_volatile() >> 32 } as usize;
|
||||
PERIOD.store(period, Ordering::SeqCst);
|
||||
|
|
|
@ -6,7 +6,7 @@ use core::{
|
|||
use bitfield::bitfield;
|
||||
|
||||
use crate::{
|
||||
cpu::{isr::ISR_HANDLERS, paging::map_physical},
|
||||
cpu::{isr::ISR_HANDLERS, paging::CURRENT_ADDRESS_SPACE},
|
||||
sys::lapic::BSP_LAPIC_ID,
|
||||
};
|
||||
|
||||
|
@ -98,7 +98,7 @@ pub fn register_irq_handler(vector: usize, handler: fn()) {
|
|||
}
|
||||
}
|
||||
pub fn setup_ioapic(phys: u64, gsi_base: usize) {
|
||||
let address = unsafe { map_physical(phys, 0x14, false) as *mut u32 };
|
||||
let address = unsafe { CURRENT_ADDRESS_SPACE.lock().map_physical(phys, 0x14, false) as *mut u32 };
|
||||
let next_id = NEXT_IOAPIC_ID.fetch_add(1, Ordering::SeqCst);
|
||||
IOAPICS[next_id].address.store(address, Ordering::SeqCst);
|
||||
IOAPICS[next_id].start_gsi.store(gsi_base, Ordering::SeqCst);
|
||||
|
|
|
@ -5,7 +5,7 @@ use core::{
|
|||
|
||||
use kernel_common::instructions::get_rflags;
|
||||
|
||||
use crate::cpu::{isr::ISR_SCHEDULER, paging::map_physical};
|
||||
use crate::cpu::{isr::ISR_SCHEDULER, paging::CURRENT_ADDRESS_SPACE};
|
||||
|
||||
use super::hpet::sleep;
|
||||
|
||||
|
@ -49,7 +49,7 @@ pub fn get_current_lapic_id() -> usize {
|
|||
}
|
||||
pub fn setup_lapic(phys: u64) {
|
||||
if phys != 0 {
|
||||
let address = unsafe { map_physical(phys, 0x400, false) as *mut u32 };
|
||||
let address = unsafe { CURRENT_ADDRESS_SPACE.lock().map_physical(phys, 0x400, false) as *mut u32 };
|
||||
ADDRESS.store(address, Ordering::SeqCst);
|
||||
BSP_LAPIC_ID.store(get_current_lapic_id(), Ordering::SeqCst);
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ use kernel_common::{instructions::pause, paging::PageTable};
|
|||
use crate::{
|
||||
cpu::{
|
||||
isr::ISR_INVALIDATE_TLB,
|
||||
paging::{map_range, unmap, virt_to_phys, CURRENT_PML4},
|
||||
paging::{virt_to_phys, CURRENT_ADDRESS_SPACE},
|
||||
},
|
||||
BROADCASTED_PANIC,
|
||||
};
|
||||
|
@ -34,23 +34,23 @@ pub fn start_aps() {
|
|||
let stack: Vec<u8> = vec![0; STACK_SIZE];
|
||||
let pml4_phys_addr;
|
||||
{
|
||||
let pml4 = CURRENT_PML4.lock();
|
||||
let pml4 = pml4.as_ref().unwrap();
|
||||
let pml4_phys_addr_u64 = virt_to_phys(*pml4 as *const PageTable as u64);
|
||||
let mut address_space = CURRENT_ADDRESS_SPACE.lock();
|
||||
let pml4 = &address_space.pml4;
|
||||
let pml4_phys_addr_u64 = virt_to_phys(pml4.as_ref().unwrap().as_ref() as *const PageTable as u64);
|
||||
pml4_phys_addr = u32::try_from(pml4_phys_addr_u64).unwrap();
|
||||
}
|
||||
unsafe {
|
||||
map_range(0x1000, 0x1000, 0x1000, false, true, false, false);
|
||||
let dest_ptr = 0x1000 as *mut u8;
|
||||
let src_ptr = ap_trampoline as *const u8;
|
||||
copy(src_ptr, dest_ptr, 0x1000);
|
||||
let pml4_offset = (&raw const trampoline_pml4).offset_from(src_ptr);
|
||||
let pml4_addr = (0x1000 + pml4_offset) as *mut u32;
|
||||
*pml4_addr = pml4_phys_addr;
|
||||
let stack_offset = (&raw const trampoline_stack).offset_from(src_ptr);
|
||||
let stack_addr = (0x1000 + stack_offset) as *mut u64;
|
||||
*stack_addr = stack.as_ptr() as u64 + STACK_SIZE as u64;
|
||||
map_range(0x1000, 0x1000, 0x1000, false, true, true, false);
|
||||
unsafe {
|
||||
address_space.map_range(0x1000, 0x1000, 0x1000, false, true, false, false);
|
||||
let dest_ptr = 0x1000 as *mut u8;
|
||||
let src_ptr = ap_trampoline as *const u8;
|
||||
copy(src_ptr, dest_ptr, 0x1000);
|
||||
let pml4_offset = (&raw const trampoline_pml4).offset_from(src_ptr);
|
||||
let pml4_addr = (0x1000 + pml4_offset) as *mut u32;
|
||||
*pml4_addr = pml4_phys_addr;
|
||||
let stack_offset = (&raw const trampoline_stack).offset_from(src_ptr);
|
||||
let stack_addr = (0x1000 + stack_offset) as *mut u64;
|
||||
*stack_addr = stack.as_ptr() as u64 + STACK_SIZE as u64;
|
||||
address_space.map_range(0x1000, 0x1000, 0x1000, false, true, true, false);
|
||||
}
|
||||
}
|
||||
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
|
||||
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
|
||||
|
@ -67,7 +67,7 @@ pub fn start_aps() {
|
|||
}
|
||||
ALL_APS_STARTED.store(true, Ordering::SeqCst);
|
||||
unsafe {
|
||||
unmap(0x1000);
|
||||
CURRENT_ADDRESS_SPACE.lock().unmap(0x1000);
|
||||
}
|
||||
}
|
||||
pub fn smp_invalidate_tlb() {
|
||||
|
|
|
@ -9,10 +9,9 @@ use uefi::{
|
|||
|
||||
fn get_table_entry(table: &mut PageTable, i: usize) -> &mut PageTable {
|
||||
if table.entries_virt[i].is_none() {
|
||||
const NONE: Option<Box<PageTable>> = None;
|
||||
let new_table = Box::new(PageTable {
|
||||
entries_phys: [PageEntry(0); 512],
|
||||
entries_virt: [NONE; 512],
|
||||
entries_virt: [const { None }; 512],
|
||||
});
|
||||
table.entries_phys[i].set_address(new_table.as_ref() as *const PageTable as u64 / 0x1000);
|
||||
table.entries_virt[i] = Some(new_table);
|
||||
|
@ -36,10 +35,9 @@ pub fn map(pml4: &mut PageTable, virt: u64, phys: u64, write: bool, exec: bool)
|
|||
table.entries_phys[table_i].set_present(1);
|
||||
}
|
||||
pub fn setup_paging(memory_map: &mut MemoryMapOwned, heap_start: u64) -> &mut PageTable {
|
||||
const NONE: Option<Box<PageTable>> = None;
|
||||
let pml4 = Box::leak(Box::new(PageTable {
|
||||
entries_phys: [PageEntry(0); 512],
|
||||
entries_virt: [NONE; 512],
|
||||
entries_virt: [const { None }; 512],
|
||||
}));
|
||||
for i in memory_map.entries() {
|
||||
if i.ty == MemoryType::LOADER_CODE
|
||||
|
|
Loading…
Add table
Reference in a new issue