Refactor
All checks were successful
Build / build (push) Successful in 2m43s

This commit is contained in:
Mathieu Strypsteen 2024-12-13 16:21:39 +01:00
parent 3cc8aa848a
commit c6e6d3886b
5 changed files with 113 additions and 103 deletions

View file

@ -13,7 +13,7 @@ use kernel_common::{
use log::info; use log::info;
use spin::Mutex; use spin::Mutex;
use crate::sys::lapic::smp_invalidate_tlb; use crate::sys::smp::smp_invalidate_tlb;
extern "C" { extern "C" {
static _text_start: u8; static _text_start: u8;

View file

@ -29,9 +29,10 @@ use sys::{
acpica_osl::AE_OK, acpica_osl::AE_OK,
early_acpi::EarlyACPIHandler, early_acpi::EarlyACPIHandler,
hpet::setup_hpet, hpet::setup_hpet,
lapic::{get_current_lapic_id, setup_lapic_timer, smp_broadcast_panic, start_aps}, lapic::{get_current_lapic_id, setup_lapic_timer},
madt::{parse_madt, INTERRUPTS_SETUP}, madt::{parse_madt, INTERRUPTS_SETUP},
pic::disable_pic, pic::disable_pic,
smp::{smp_broadcast_panic, start_aps},
sync::LOCKS_HELD, sync::LOCKS_HELD,
task::setup_multitasking, task::setup_multitasking,
}; };

View file

@ -1,36 +1,16 @@
use core::{ use core::{
ptr::{copy, null_mut}, ptr::null_mut,
sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering}, sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering},
}; };
use alloc::{vec, vec::Vec}; use kernel_common::instructions::get_rflags;
use kernel_common::{
instructions::{get_rflags, pause},
paging::PageTable,
};
use crate::{ use crate::cpu::{isr::ISR_SCHEDULER, paging::map_physical};
cpu::{
isr::{ISR_INVALIDATE_TLB, ISR_SCHEDULER},
paging::{map_physical, map_range, unmap, virt_to_phys, CURRENT_PML4},
},
BROADCASTED_PANIC,
};
use super::{ use super::hpet::sleep;
hpet::sleep,
sync::Spinlock,
task::{ALL_APS_STARTED, STACK_SIZE, STARTING_AP_ID},
};
extern "C" { pub struct LAPIC {
fn ap_trampoline(); pub lapic_id: AtomicUsize,
static trampoline_pml4: u8;
static trampoline_stack: u8;
}
struct LAPIC {
lapic_id: AtomicUsize,
present: AtomicBool, present: AtomicBool,
} }
@ -43,13 +23,10 @@ const REGISTER_TIMER_LVT: usize = 0xc8;
const REGISTER_TIMER_INITIAL_COUNT: usize = 0xe0; const REGISTER_TIMER_INITIAL_COUNT: usize = 0xe0;
const REGISTER_TIMER_CURRENT_COUNT: usize = 0xe4; const REGISTER_TIMER_CURRENT_COUNT: usize = 0xe4;
const REGISTER_TIMER_DIVIDE: usize = 0xf8; const REGISTER_TIMER_DIVIDE: usize = 0xf8;
const IPI_NMI: u32 = 0x400;
const IPI_INIT: u32 = 0x500;
const IPI_STARTUP: u32 = 0x600;
static ADDRESS: AtomicPtr<u32> = AtomicPtr::new(null_mut()); static ADDRESS: AtomicPtr<u32> = AtomicPtr::new(null_mut());
pub static BSP_LAPIC_ID: AtomicUsize = AtomicUsize::new(0); pub static BSP_LAPIC_ID: AtomicUsize = AtomicUsize::new(0);
static LAPICS: [LAPIC; 256] = [const { pub static LAPICS: [LAPIC; 256] = [const {
LAPIC { LAPIC {
lapic_id: AtomicUsize::new(0), lapic_id: AtomicUsize::new(0),
present: AtomicBool::new(false), present: AtomicBool::new(false),
@ -57,7 +34,6 @@ static LAPICS: [LAPIC; 256] = [const {
}; 256]; }; 256];
static TICKS_PER_MS: AtomicUsize = AtomicUsize::new(0); static TICKS_PER_MS: AtomicUsize = AtomicUsize::new(0);
pub static NEXT_LAPIC_ID: AtomicUsize = AtomicUsize::new(0); pub static NEXT_LAPIC_ID: AtomicUsize = AtomicUsize::new(0);
static INVALIDATE_TLB_LOCK: Spinlock = Spinlock::new();
pub fn send_eoi() { pub fn send_eoi() {
let address = ADDRESS.load(Ordering::SeqCst); let address = ADDRESS.load(Ordering::SeqCst);
@ -92,7 +68,7 @@ fn calibrate_timer() {
let ticks_in_10ms = 0xffffffff - unsafe { address.add(REGISTER_TIMER_CURRENT_COUNT).read_volatile() }; let ticks_in_10ms = 0xffffffff - unsafe { address.add(REGISTER_TIMER_CURRENT_COUNT).read_volatile() };
TICKS_PER_MS.store(ticks_in_10ms as usize / 10, Ordering::SeqCst); TICKS_PER_MS.store(ticks_in_10ms as usize / 10, Ordering::SeqCst);
} }
fn send_ipi(lapic_id: usize, data: u32) { pub fn send_ipi(lapic_id: usize, data: u32) {
let address = ADDRESS.load(Ordering::SeqCst); let address = ADDRESS.load(Ordering::SeqCst);
unsafe { unsafe {
address.add(REGISTER_ICR_2).write_volatile((lapic_id << 24) as u32); address.add(REGISTER_ICR_2).write_volatile((lapic_id << 24) as u32);
@ -124,72 +100,3 @@ pub fn add_lapic(lapic_id: usize) {
LAPICS[next_id].lapic_id.store(lapic_id, Ordering::SeqCst); LAPICS[next_id].lapic_id.store(lapic_id, Ordering::SeqCst);
LAPICS[next_id].present.store(true, Ordering::SeqCst); LAPICS[next_id].present.store(true, Ordering::SeqCst);
} }
pub fn start_aps() {
let stack: Vec<u8> = vec![0; STACK_SIZE];
let pml4_phys_addr;
{
let pml4 = CURRENT_PML4.lock();
let pml4 = pml4.as_ref().unwrap();
let pml4_phys_addr_u64 = virt_to_phys(*pml4 as *const PageTable as u64);
pml4_phys_addr = u32::try_from(pml4_phys_addr_u64).unwrap();
}
unsafe {
map_range(0x1000, 0x1000, 0x1000, false, true, false, false);
let dest_ptr = 0x1000 as *mut u8;
let src_ptr = ap_trampoline as *const u8;
copy(src_ptr, dest_ptr, 0x1000);
let pml4_offset = (&raw const trampoline_pml4).offset_from(src_ptr);
let pml4_addr = (0x1000 + pml4_offset) as *mut u32;
*pml4_addr = pml4_phys_addr;
let stack_offset = (&raw const trampoline_stack).offset_from(src_ptr);
let stack_addr = (0x1000 + stack_offset) as *mut u64;
*stack_addr = stack.as_ptr() as u64 + STACK_SIZE as u64;
map_range(0x1000, 0x1000, 0x1000, false, true, true, false);
}
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
if lapic_id == BSP_LAPIC_ID.load(Ordering::SeqCst) {
continue;
}
STARTING_AP_ID.store(lapic_id as i64, Ordering::SeqCst);
send_ipi(lapic_id, IPI_INIT);
sleep(10000);
send_ipi(lapic_id, IPI_STARTUP | 1);
while STARTING_AP_ID.load(Ordering::SeqCst) != -1 {
pause();
}
}
ALL_APS_STARTED.store(true, Ordering::SeqCst);
unsafe {
unmap(0x1000);
}
}
pub fn smp_invalidate_tlb() {
if !ALL_APS_STARTED.load(Ordering::SeqCst) {
return;
}
INVALIDATE_TLB_LOCK.lock();
let current_lapic_id = get_current_lapic_id();
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
if lapic_id == current_lapic_id {
continue;
}
send_ipi(lapic_id, ISR_INVALIDATE_TLB as u32);
}
INVALIDATE_TLB_LOCK.unlock();
}
pub fn smp_broadcast_panic() {
BROADCASTED_PANIC.store(true, Ordering::SeqCst);
if !ALL_APS_STARTED.load(Ordering::SeqCst) {
return;
}
let current_lapic_id = get_current_lapic_id();
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
if lapic_id == current_lapic_id {
continue;
}
send_ipi(lapic_id, IPI_NMI);
}
}

View file

@ -6,5 +6,6 @@ pub mod lapic;
pub mod madt; pub mod madt;
pub mod pic; pub mod pic;
pub mod scheduler; pub mod scheduler;
pub mod smp;
pub mod sync; pub mod sync;
pub mod task; pub mod task;

101
kernel/src/sys/smp.rs Normal file
View file

@ -0,0 +1,101 @@
use core::{ptr::copy, sync::atomic::Ordering};
use alloc::{vec, vec::Vec};
use kernel_common::{instructions::pause, paging::PageTable};
use crate::{
cpu::{
isr::ISR_INVALIDATE_TLB,
paging::{map_range, unmap, virt_to_phys, CURRENT_PML4},
},
BROADCASTED_PANIC,
};
use super::{
hpet::sleep,
lapic::{get_current_lapic_id, send_ipi, BSP_LAPIC_ID, LAPICS, NEXT_LAPIC_ID},
sync::Spinlock,
task::{ALL_APS_STARTED, STACK_SIZE, STARTING_AP_ID},
};
extern "C" {
fn ap_trampoline();
static trampoline_pml4: u8;
static trampoline_stack: u8;
}
const IPI_NMI: u32 = 0x400;
const IPI_INIT: u32 = 0x500;
const IPI_STARTUP: u32 = 0x600;
static INVALIDATE_TLB_LOCK: Spinlock = Spinlock::new();
pub fn start_aps() {
let stack: Vec<u8> = vec![0; STACK_SIZE];
let pml4_phys_addr;
{
let pml4 = CURRENT_PML4.lock();
let pml4 = pml4.as_ref().unwrap();
let pml4_phys_addr_u64 = virt_to_phys(*pml4 as *const PageTable as u64);
pml4_phys_addr = u32::try_from(pml4_phys_addr_u64).unwrap();
}
unsafe {
map_range(0x1000, 0x1000, 0x1000, false, true, false, false);
let dest_ptr = 0x1000 as *mut u8;
let src_ptr = ap_trampoline as *const u8;
copy(src_ptr, dest_ptr, 0x1000);
let pml4_offset = (&raw const trampoline_pml4).offset_from(src_ptr);
let pml4_addr = (0x1000 + pml4_offset) as *mut u32;
*pml4_addr = pml4_phys_addr;
let stack_offset = (&raw const trampoline_stack).offset_from(src_ptr);
let stack_addr = (0x1000 + stack_offset) as *mut u64;
*stack_addr = stack.as_ptr() as u64 + STACK_SIZE as u64;
map_range(0x1000, 0x1000, 0x1000, false, true, true, false);
}
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
if lapic_id == BSP_LAPIC_ID.load(Ordering::SeqCst) {
continue;
}
STARTING_AP_ID.store(lapic_id as i64, Ordering::SeqCst);
send_ipi(lapic_id, IPI_INIT);
sleep(10000);
send_ipi(lapic_id, IPI_STARTUP | 1);
while STARTING_AP_ID.load(Ordering::SeqCst) != -1 {
pause();
}
}
ALL_APS_STARTED.store(true, Ordering::SeqCst);
unsafe {
unmap(0x1000);
}
}
pub fn smp_invalidate_tlb() {
if !ALL_APS_STARTED.load(Ordering::SeqCst) {
return;
}
INVALIDATE_TLB_LOCK.lock();
let current_lapic_id = get_current_lapic_id();
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
if lapic_id == current_lapic_id {
continue;
}
send_ipi(lapic_id, ISR_INVALIDATE_TLB as u32);
}
INVALIDATE_TLB_LOCK.unlock();
}
pub fn smp_broadcast_panic() {
BROADCASTED_PANIC.store(true, Ordering::SeqCst);
if !ALL_APS_STARTED.load(Ordering::SeqCst) {
return;
}
let current_lapic_id = get_current_lapic_id();
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
if lapic_id == current_lapic_id {
continue;
}
send_ipi(lapic_id, IPI_NMI);
}
}