From 343babb5c48e4910e3484030e58db4f26225ef1f Mon Sep 17 00:00:00 2001 From: Mathieu Strypsteen Date: Fri, 13 Dec 2024 17:50:35 +0100 Subject: [PATCH] Add wrapped spinlock type --- Cargo.lock | 32 +------------- kernel/Cargo.toml | 3 +- kernel/src/cpu/isr.rs | 9 ++-- kernel/src/cpu/paging.rs | 9 ++-- kernel/src/main.rs | 4 +- kernel/src/misc/display.rs | 11 +++-- kernel/src/sys/acpica_osl.rs | 40 ++++++++---------- kernel/src/sys/hpet.rs | 26 ++++++------ kernel/src/sys/ioapic.rs | 7 +--- kernel/src/sys/locks.rs | 28 +++++++++++++ kernel/src/sys/madt.rs | 2 +- kernel/src/sys/mod.rs | 1 + kernel/src/sys/scheduler.rs | 22 +++++----- kernel/src/sys/smp.rs | 8 ++-- kernel/src/sys/sync.rs | 81 ++++++++++++++++++++---------------- kernel/src/sys/task.rs | 32 +++++++------- 16 files changed, 152 insertions(+), 163 deletions(-) create mode 100644 kernel/src/sys/locks.rs diff --git a/Cargo.lock b/Cargo.lock index 89ea2f7..7d48845 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,12 +35,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b2d54853319fd101b8dd81de382bcbf3e03410a64d8928bbee85a3e7dcde483" -[[package]] -name = "allocator-api2" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" - [[package]] name = "autocfg" version = "1.4.0" @@ -179,12 +173,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - [[package]] name = "float-cmp" version = "0.9.0" @@ -194,12 +182,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "foldhash" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" - [[package]] name = "funty" version = "2.0.0" @@ -212,17 +194,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "hashbrown" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" -dependencies = [ - "allocator-api2", - "equivalent", - "foldhash", -] - [[package]] name = "itertools" version = "0.13.0" @@ -242,10 +213,9 @@ dependencies = [ "bitvec", "buddy_system_allocator", "embedded-graphics", - "hashbrown", "kernel-common", + "lock_api", "log", - "spin", ] [[package]] diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 1666592..46dd45b 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -11,10 +11,9 @@ bitfield = "0.17.0" bitvec = {version = "1.0.1", default-features = false, features = ["alloc", "atomic"]} buddy_system_allocator = "0.11.0" embedded-graphics = "0.8.1" -hashbrown = "0.15.2" kernel-common = {path = "../lib/kernel-common"} +lock_api = "0.4.12" log = "0.4.22" -spin = "0.9.8" [lints.clippy] missing_safety_doc = "allow" diff --git a/kernel/src/cpu/isr.rs b/kernel/src/cpu/isr.rs index 233ad47..f0e5070 100644 --- a/kernel/src/cpu/isr.rs +++ b/kernel/src/cpu/isr.rs @@ -4,12 +4,12 @@ use core::{ }; use log::warn; -use spin::Mutex; use crate::sys::{ lapic::{get_current_lapic_id, send_eoi}, + locks::Spinlock, scheduler::scheduler, - sync::{Spinlock, IN_ISR_HANDLER, LOCKS_HELD}, + sync::{IN_ISR_HANDLER, LOCKS_HELD}, }; global_asm!(include_str!("isr.s"), options(att_syntax)); @@ -78,8 +78,7 @@ const EXCEPTIONS: [&str; 32] = [ pub const ISR_INVALIDATE_TLB: u64 = 252; pub const ISR_SCHEDULER: u64 = 254; -pub static ISR_HANDLERS: Mutex<[Option; 256]> = Mutex::new([None; 256]); -pub static ISR_HANDLERS_LOCK: Spinlock = Spinlock::new(); +pub static ISR_HANDLERS: Spinlock<[Option; 256]> = Spinlock::new([None; 256]); #[no_mangle] extern "C" fn isr_handler(state: &mut ISRState) { @@ -112,9 +111,7 @@ extern "C" fn isr_handler(state: &mut ISRState) { } let handler; { - ISR_HANDLERS_LOCK.lock(); handler = ISR_HANDLERS.lock()[state.isr as usize]; - ISR_HANDLERS_LOCK.unlock(); } if let Some(handler) = handler { handler(); diff --git a/kernel/src/cpu/paging.rs b/kernel/src/cpu/paging.rs index 15d44f5..9bc3d6e 100644 --- a/kernel/src/cpu/paging.rs +++ b/kernel/src/cpu/paging.rs @@ -11,9 +11,8 @@ use kernel_common::{ paging::{load_cr3, PageEntry, PageTable, KERNEL_HEAP_INITIAL_SIZE, KERNEL_HEAP_START, KERNEL_VIRT_START}, }; use log::info; -use spin::Mutex; -use crate::sys::smp::smp_invalidate_tlb; +use crate::sys::{locks::Spinlock, smp::smp_invalidate_tlb}; extern "C" { static _text_start: u8; @@ -27,10 +26,10 @@ extern "C" { } static PAGING_ACTIVE: AtomicBool = AtomicBool::new(false); -pub static CURRENT_PML4: Mutex> = Mutex::new(None); +pub static CURRENT_PML4: Spinlock> = Spinlock::new(None); static HEAP_PHYS_START: AtomicU64 = AtomicU64::new(0); -static PHYSICAL_FRAMES: Mutex>> = Mutex::new(None); -static HEAP_PHYS_MAPPING: Mutex> = Mutex::new(Vec::new()); +static PHYSICAL_FRAMES: Spinlock>> = Spinlock::new(None); +static HEAP_PHYS_MAPPING: Spinlock> = Spinlock::new(Vec::new()); const KERNEL_MAPPINGS_START: u64 = 0xfffffffd00000000; const KERNEL_MAPPINGS_END: u64 = 0xfffffffe00000000; diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 4b3454e..caa34d3 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -24,12 +24,12 @@ use kernel_common::{ }; use log::{error, info}; use misc::display::{display_print, setup_display}; -use spin::Mutex; use sys::{ acpica_osl::AE_OK, early_acpi::EarlyACPIHandler, hpet::setup_hpet, lapic::{get_current_lapic_id, setup_lapic_timer}, + locks::Spinlock, madt::{parse_madt, INTERRUPTS_SETUP}, pic::disable_pic, smp::{smp_broadcast_panic, start_aps}, @@ -44,7 +44,7 @@ mod sys; #[global_allocator] static ALLOC: LockedHeap<32> = LockedHeap::empty(); pub static RSDP_ADDRESS: AtomicU64 = AtomicU64::new(0); -static LOADER_STRUCT: Mutex = Mutex::new(LoaderStruct { +static LOADER_STRUCT: Spinlock = Spinlock::new(LoaderStruct { magic: 0, phys_kernel_start: 0, phys_heap_start: 0, diff --git a/kernel/src/misc/display.rs b/kernel/src/misc/display.rs index a825524..656cfc4 100644 --- a/kernel/src/misc/display.rs +++ b/kernel/src/misc/display.rs @@ -11,16 +11,15 @@ use embedded_graphics::{ text::Text, }; use kernel_common::loader_struct::FramebufferInfo; -use spin::Mutex; use crate::{ cpu::paging::map_physical, misc::draw_target::FramebufferTarget, - sys::{madt::INTERRUPTS_SETUP, sync::Spinlock}, + sys::{locks::Spinlock, madt::INTERRUPTS_SETUP, sync::RawSpinlock}, }; -static FRAMEBUFFER: Mutex> = Mutex::new(None); -static FRAMEBUFFER_LOCK: Spinlock = Spinlock::new(); +static FRAMEBUFFER: Spinlock> = Spinlock::new(None); +static FRAMEBUFFER_LOCK: RawSpinlock = RawSpinlock::new(); static FRAMEBUFFER_ADDR: AtomicPtr = AtomicPtr::new(null_mut()); static WIDTH: AtomicUsize = AtomicUsize::new(0); static HEIGHT: AtomicUsize = AtomicUsize::new(0); @@ -58,7 +57,7 @@ pub fn display_print(str: &str) { return; } if INTERRUPTS_SETUP.load(Ordering::SeqCst) { - FRAMEBUFFER_LOCK.lock(); + FRAMEBUFFER_LOCK.raw_lock(); } let mut current_x = CURRENT_X.load(Ordering::SeqCst); let mut current_y = CURRENT_Y.load(Ordering::SeqCst); @@ -85,7 +84,7 @@ pub fn display_print(str: &str) { CURRENT_Y.store(current_y, Ordering::SeqCst); copy_to_fb(); if INTERRUPTS_SETUP.load(Ordering::SeqCst) { - FRAMEBUFFER_LOCK.unlock(); + FRAMEBUFFER_LOCK.raw_unlock(); } } pub fn setup_display(info: FramebufferInfo) { diff --git a/kernel/src/sys/acpica_osl.rs b/kernel/src/sys/acpica_osl.rs index 8f08e5a..1091d14 100644 --- a/kernel/src/sys/acpica_osl.rs +++ b/kernel/src/sys/acpica_osl.rs @@ -11,7 +11,6 @@ use kernel_common::{ ioports::{inb, inl, inw, outb, outl, outw}, log::log_raw, }; -use spin::Mutex; use crate::{ cpu::paging::{map_physical, unmap_physical}, @@ -20,13 +19,14 @@ use crate::{ RSDP_ADDRESS, }; -static SCI_HANDLER: Mutex UINT32>> = Mutex::new(None); +static SCI_HANDLER: Spinlock UINT32>> = Spinlock::new(None); static SCI_CONTEXT: AtomicPtr = AtomicPtr::new(null_mut()); use super::{ hpet::get_current_time, lapic::get_current_lapic_id, - sync::{create_semaphore, lock_semaphore, unlock_semaphore, Semaphore, Spinlock}, + locks::Spinlock, + sync::{create_semaphore, lock_semaphore, unlock_semaphore, RawSemaphore, RawSpinlock}, task::{CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED}, }; @@ -34,22 +34,20 @@ pub const AE_OK: ACPI_STATUS = 0; #[no_mangle] extern "C" fn AcpiOsAcquireLock(handle: *mut c_void) -> ACPI_SIZE { - let spinlock = unsafe { (handle as *const Spinlock).as_ref().unwrap() }; - spinlock.lock(); + let spinlock = unsafe { (handle as *const RawSpinlock).as_ref().unwrap() }; + spinlock.raw_lock(); 0 } #[no_mangle] extern "C" fn AcpiOsAllocate(size: ACPI_SIZE) -> *mut c_void { let layout = Layout::from_size_align(size as usize, 16).unwrap(); - unsafe { - wrapped_alloc(layout) as *mut c_void - } + unsafe { wrapped_alloc(layout) as *mut c_void } } #[no_mangle] extern "C" fn AcpiOsCreateLock(out: *mut *mut c_void) -> ACPI_STATUS { - let spinlock = Box::leak(Box::new(Spinlock::new())); + let spinlock = Box::leak(Box::new(RawSpinlock::new())); unsafe { - *out = spinlock as *mut Spinlock as *mut c_void; + *out = spinlock as *mut RawSpinlock as *mut c_void; } AE_OK } @@ -94,11 +92,11 @@ extern "C" fn AcpiOsGetThreadId() -> UINT64 { return 1; } let task_id; - CURRENT_TASK_LOCK.lock(); + CURRENT_TASK_LOCK.raw_lock(); { - task_id = CURRENT_TASKS.lock().get(&get_current_lapic_id()).unwrap().id; + task_id = CURRENT_TASKS.lock()[get_current_lapic_id()].as_ref().unwrap().id; } - CURRENT_TASK_LOCK.unlock(); + CURRENT_TASK_LOCK.raw_unlock(); task_id as UINT64 } #[no_mangle] @@ -125,9 +123,7 @@ extern "C" fn AcpiOsInstallInterruptHandler(gsi: UINT32, handler: ACPI_OSD_HANDL } #[no_mangle] extern "C" fn AcpiOsMapMemory(phys: ACPI_PHYSICAL_ADDRESS, size: ACPI_SIZE) -> *mut c_void { - unsafe { - map_physical(phys, size, false) as *mut c_void - } + unsafe { map_physical(phys, size, false) as *mut c_void } } #[no_mangle] extern "C" fn AcpiOsPhysicalTableOverride(_existing: *mut ACPI_TABLE_HEADER, new_address: *mut ACPI_PHYSICAL_ADDRESS, new_length: *mut UINT32) -> ACPI_STATUS { @@ -167,8 +163,8 @@ extern "C" fn AcpiOsReadPort(address: ACPI_IO_ADDRESS, value: *mut UINT32, width } #[no_mangle] extern "C" fn AcpiOsReleaseLock(handle: *mut c_void, _cpu_flags: ACPI_SIZE) { - let spinlock = unsafe { (handle as *const Spinlock).as_ref().unwrap() }; - spinlock.unlock(); + let spinlock = unsafe { (handle as *const RawSpinlock).as_ref().unwrap() }; + spinlock.raw_unlock(); } #[no_mangle] extern "C" fn AcpiOsStall() { @@ -184,10 +180,10 @@ extern "C" fn AcpiOsSignal() { } #[no_mangle] extern "C" fn AcpiOsSignalSemaphore(handle: *mut c_void, units: UINT32) -> ACPI_STATUS { - let semaphore: Arc; + let semaphore: Arc; unsafe { Arc::increment_strong_count(handle); - semaphore = Arc::from_raw(handle as *const Semaphore); + semaphore = Arc::from_raw(handle as *const RawSemaphore); } unlock_semaphore(semaphore, units as usize); AE_OK @@ -225,10 +221,10 @@ extern "C" fn AcpiOsWaitEventsComplete() { #[no_mangle] extern "C" fn AcpiOsWaitSemaphore(handle: *mut c_void, units: UINT32, _timeout: UINT16) -> ACPI_STATUS { // TODO: Handle timeout - let semaphore: Arc; + let semaphore: Arc; unsafe { Arc::increment_strong_count(handle); - semaphore = Arc::from_raw(handle as *const Semaphore); + semaphore = Arc::from_raw(handle as *const RawSemaphore); } lock_semaphore(semaphore, units as usize); AE_OK diff --git a/kernel/src/sys/hpet.rs b/kernel/src/sys/hpet.rs index 9d1f5ef..e99374e 100644 --- a/kernel/src/sys/hpet.rs +++ b/kernel/src/sys/hpet.rs @@ -6,7 +6,6 @@ use core::{ use acpi::{AcpiTables, HpetInfo}; use alloc::vec::Vec; use kernel_common::instructions::pause; -use spin::Mutex; use crate::cpu::paging::map_physical; @@ -14,8 +13,9 @@ use super::{ early_acpi::EarlyACPIHandler, ioapic::register_irq_handler, lapic::get_current_lapic_id, + locks::Spinlock, scheduler::{schedule_task, yield_task, SCHEDULER_LOCK}, - sync::Spinlock, + sync::RawSpinlock, task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED}, }; @@ -31,8 +31,8 @@ const TIMER_CONFIG_ENABLE: u64 = 4; static ADDRESS: AtomicPtr = AtomicPtr::new(null_mut()); static PERIOD: AtomicUsize = AtomicUsize::new(0); static EARLY_SLEEP: AtomicBool = AtomicBool::new(false); -static SLEEPING_LIST: Mutex> = Mutex::new(Vec::new()); -static SLEEP_LOCK: Spinlock = Spinlock::new(); +static SLEEPING_LIST: Spinlock> = Spinlock::new(Vec::new()); +static SLEEP_LOCK: RawSpinlock = RawSpinlock::new(); fn ticks_to_us(ticks: usize) -> usize { let period = PERIOD.load(Ordering::SeqCst); @@ -67,7 +67,7 @@ fn handler() { EARLY_SLEEP.store(false, Ordering::SeqCst); } if MULTITASKING_ENABLED.load(Ordering::SeqCst) { - SLEEP_LOCK.lock(); + SLEEP_LOCK.raw_lock(); { let mut sleeping_list = SLEEPING_LIST.lock(); let current_time = get_current_time(); @@ -76,9 +76,9 @@ fn handler() { let mut task = sleeping_list.remove(0); task.sleep_until_us = 0; task.task_state = TaskState::Ready; - SCHEDULER_LOCK.lock(); + SCHEDULER_LOCK.raw_lock(); schedule_task(task); - SCHEDULER_LOCK.unlock(); + SCHEDULER_LOCK.raw_unlock(); } else { break; } @@ -87,7 +87,7 @@ fn handler() { schedule_hpet_interrupt(task.sleep_until_us); } } - SLEEP_LOCK.unlock(); + SLEEP_LOCK.raw_unlock(); } } pub fn get_current_time() -> usize { @@ -97,14 +97,14 @@ pub fn get_current_time() -> usize { } pub fn sleep(us: usize) { if MULTITASKING_ENABLED.load(Ordering::SeqCst) { - CURRENT_TASK_LOCK.lock(); + CURRENT_TASK_LOCK.raw_lock(); { let mut _current_task = CURRENT_TASKS.lock(); - let current_task = _current_task.get_mut(&get_current_lapic_id()).unwrap(); + let current_task = _current_task[get_current_lapic_id()].as_mut().unwrap(); current_task.sleep_until_us = get_current_time() + us; current_task.task_state = TaskState::Sleeping; } - CURRENT_TASK_LOCK.unlock(); + CURRENT_TASK_LOCK.raw_unlock(); yield_task(); } else { EARLY_SLEEP.store(true, Ordering::SeqCst); @@ -121,14 +121,14 @@ pub fn sleep(us: usize) { } } pub fn sleep_internal(task: Task) { - SLEEP_LOCK.lock(); + SLEEP_LOCK.raw_lock(); { let mut sleeping_list = SLEEPING_LIST.lock(); sleeping_list.push(task); sleeping_list.sort_by(|a, b| a.sleep_until_us.cmp(&b.sleep_until_us)); schedule_hpet_interrupt(sleeping_list.first().unwrap().sleep_until_us); } - SLEEP_LOCK.unlock(); + SLEEP_LOCK.raw_unlock(); } pub fn setup_hpet(tables: &AcpiTables) { let hpet_info = HpetInfo::new(tables).unwrap(); diff --git a/kernel/src/sys/ioapic.rs b/kernel/src/sys/ioapic.rs index 60cee1b..47bc769 100644 --- a/kernel/src/sys/ioapic.rs +++ b/kernel/src/sys/ioapic.rs @@ -6,10 +6,7 @@ use core::{ use bitfield::bitfield; use crate::{ - cpu::{ - isr::{ISR_HANDLERS, ISR_HANDLERS_LOCK}, - paging::map_physical, - }, + cpu::{isr::ISR_HANDLERS, paging::map_physical}, sys::lapic::BSP_LAPIC_ID, }; @@ -86,9 +83,7 @@ pub fn set_irq_override(gsi: usize, vector: usize, polarity: u8, trigger: u8) { pub fn register_irq_handler(vector: usize, handler: fn()) { assert!(ISR_HANDLERS.lock()[vector].is_none()); { - ISR_HANDLERS_LOCK.lock(); ISR_HANDLERS.lock()[vector] = Some(handler); - ISR_HANDLERS_LOCK.unlock(); } for i in 0..NEXT_IOAPIC_ID.load(Ordering::SeqCst) { let start = IOAPICS[i].start_gsi.load(Ordering::SeqCst); diff --git a/kernel/src/sys/locks.rs b/kernel/src/sys/locks.rs new file mode 100644 index 0000000..7c59187 --- /dev/null +++ b/kernel/src/sys/locks.rs @@ -0,0 +1,28 @@ +use core::sync::atomic::{AtomicBool, AtomicUsize}; + +use lock_api::{GuardSend, Mutex, RawMutex}; + +use super::sync::RawSpinlock; + +unsafe impl RawMutex for RawSpinlock { + const INIT: Self = RawSpinlock { + locked: AtomicBool::new(false), + lapic_id: AtomicUsize::new(0), + }; + + type GuardMarker = GuardSend; + + fn lock(&self) { + self.raw_lock(); + } + + fn try_lock(&self) -> bool { + unimplemented!(); + } + + unsafe fn unlock(&self) { + self.raw_unlock(); + } +} + +pub type Spinlock = Mutex; diff --git a/kernel/src/sys/madt.rs b/kernel/src/sys/madt.rs index 50e26b6..9875dc4 100644 --- a/kernel/src/sys/madt.rs +++ b/kernel/src/sys/madt.rs @@ -43,8 +43,8 @@ pub fn parse_madt(tables: &AcpiTables) { ); } } + INTERRUPTS_SETUP.store(true, Ordering::SeqCst); unsafe { sti(); } - INTERRUPTS_SETUP.store(true, Ordering::SeqCst); } diff --git a/kernel/src/sys/mod.rs b/kernel/src/sys/mod.rs index d3a923a..bf363c6 100644 --- a/kernel/src/sys/mod.rs +++ b/kernel/src/sys/mod.rs @@ -3,6 +3,7 @@ pub mod early_acpi; pub mod hpet; mod ioapic; pub mod lapic; +pub mod locks; pub mod madt; pub mod pic; pub mod scheduler; diff --git a/kernel/src/sys/scheduler.rs b/kernel/src/sys/scheduler.rs index 7fdcfea..f3f8fd8 100644 --- a/kernel/src/sys/scheduler.rs +++ b/kernel/src/sys/scheduler.rs @@ -1,13 +1,13 @@ use core::{arch::asm, sync::atomic::Ordering}; use alloc::{collections::vec_deque::VecDeque, vec::Vec}; -use spin::Mutex; use crate::cpu::isr::ISRState; use super::{ lapic::{get_current_lapic_id, schedule_timer_interrupt}, - sync::Spinlock, + locks::Spinlock, + sync::RawSpinlock, task::{switch_task, Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED}, }; @@ -16,16 +16,16 @@ use super::sync::{IN_ISR_HANDLER, LOCKS_HELD}; #[cfg(debug_assertions)] use kernel_common::instructions::{cli, sti}; -static SCHEDULER_LIST: Mutex> = Mutex::new(VecDeque::new()); -pub static IDLE_TASKS: Mutex> = Mutex::new(Vec::new()); -pub static SCHEDULER_LOCK: Spinlock = Spinlock::new(); +static SCHEDULER_LIST: Spinlock> = Spinlock::new(VecDeque::new()); +pub static IDLE_TASKS: Spinlock> = Spinlock::new(Vec::new()); +pub static SCHEDULER_LOCK: RawSpinlock = RawSpinlock::new(); pub fn scheduler(state: &mut ISRState) { if !MULTITASKING_ENABLED.load(Ordering::SeqCst) { return; } - CURRENT_TASK_LOCK.lock(); - SCHEDULER_LOCK.lock(); + CURRENT_TASK_LOCK.raw_lock(); + SCHEDULER_LOCK.raw_lock(); let mut switch_to_task = None; { let mut scheduler_list = SCHEDULER_LIST.lock(); @@ -34,16 +34,16 @@ pub fn scheduler(state: &mut ISRState) { switch_to_task = Some(task); } } - SCHEDULER_LOCK.unlock(); + SCHEDULER_LOCK.raw_unlock(); if let Some(task) = switch_to_task { switch_task(state, task); - CURRENT_TASK_LOCK.unlock(); + CURRENT_TASK_LOCK.raw_unlock(); return; } let mut switch_idle = false; { let _current_task = CURRENT_TASKS.lock(); - let current_task = _current_task.get(&get_current_lapic_id()); + let current_task = _current_task[get_current_lapic_id()].as_ref(); if current_task.is_none() { switch_idle = true; } @@ -57,7 +57,7 @@ pub fn scheduler(state: &mut ISRState) { if switch_idle { switch_task(state, IDLE_TASKS.lock().pop().unwrap()); } - CURRENT_TASK_LOCK.unlock(); + CURRENT_TASK_LOCK.raw_unlock(); } pub fn schedule_task(task: Task) { debug_assert!(SCHEDULER_LOCK.is_locked() || !MULTITASKING_ENABLED.load(Ordering::SeqCst)); diff --git a/kernel/src/sys/smp.rs b/kernel/src/sys/smp.rs index 32db39b..8ef32d2 100644 --- a/kernel/src/sys/smp.rs +++ b/kernel/src/sys/smp.rs @@ -14,7 +14,7 @@ use crate::{ use super::{ hpet::sleep, lapic::{get_current_lapic_id, send_ipi, BSP_LAPIC_ID, LAPICS, NEXT_LAPIC_ID}, - sync::Spinlock, + sync::RawSpinlock, task::{ALL_APS_STARTED, STACK_SIZE, STARTING_AP_ID}, }; @@ -28,7 +28,7 @@ const IPI_NMI: u32 = 0x400; const IPI_INIT: u32 = 0x500; const IPI_STARTUP: u32 = 0x600; -static INVALIDATE_TLB_LOCK: Spinlock = Spinlock::new(); +static INVALIDATE_TLB_LOCK: RawSpinlock = RawSpinlock::new(); pub fn start_aps() { let stack: Vec = vec![0; STACK_SIZE]; @@ -74,7 +74,7 @@ pub fn smp_invalidate_tlb() { if !ALL_APS_STARTED.load(Ordering::SeqCst) { return; } - INVALIDATE_TLB_LOCK.lock(); + INVALIDATE_TLB_LOCK.raw_lock(); let current_lapic_id = get_current_lapic_id(); for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) { let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst); @@ -83,7 +83,7 @@ pub fn smp_invalidate_tlb() { } send_ipi(lapic_id, ISR_INVALIDATE_TLB as u32); } - INVALIDATE_TLB_LOCK.unlock(); + INVALIDATE_TLB_LOCK.raw_unlock(); } pub fn smp_broadcast_panic() { BROADCASTED_PANIC.store(true, Ordering::SeqCst); diff --git a/kernel/src/sys/sync.rs b/kernel/src/sys/sync.rs index 201048c..858c7eb 100644 --- a/kernel/src/sys/sync.rs +++ b/kernel/src/sys/sync.rs @@ -2,12 +2,12 @@ use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use alloc::{collections::vec_deque::VecDeque, sync::Arc}; use kernel_common::instructions::{cli, sti}; -use spin::Mutex; use crate::sys::madt::INTERRUPTS_SETUP; use super::{ lapic::get_current_lapic_id, + locks::Spinlock, scheduler::{schedule_task, yield_task, SCHEDULER_LOCK}, task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK}, }; @@ -15,41 +15,48 @@ use super::{ pub static IN_ISR_HANDLER: [AtomicBool; 256] = [const { AtomicBool::new(false) }; 256]; pub static LOCKS_HELD: [AtomicUsize; 256] = [const { AtomicUsize::new(0) }; 256]; -pub struct Semaphore { - spinlock: Spinlock, +pub struct RawSemaphore { + spinlock: RawSpinlock, max_count: usize, current_count: AtomicUsize, - blocked_list: Mutex>, + blocked_list: Spinlock>, } -pub struct Spinlock { - locked: AtomicBool, - lapic_id: AtomicUsize, +pub struct RawSpinlock { + pub locked: AtomicBool, + pub lapic_id: AtomicUsize, } -impl Spinlock { +impl RawSpinlock { pub const fn new() -> Self { Self { locked: AtomicBool::new(false), lapic_id: AtomicUsize::new(0), } } - pub fn lock(&self) { - debug_assert!(INTERRUPTS_SETUP.load(Ordering::SeqCst)); + pub fn raw_lock(&self) { cli(); while self.locked.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() {} - let lapic_id = get_current_lapic_id(); - LOCKS_HELD[lapic_id].fetch_add(1, Ordering::SeqCst); - self.lapic_id.store(lapic_id, Ordering::SeqCst); + if INTERRUPTS_SETUP.load(Ordering::SeqCst) { + let lapic_id = get_current_lapic_id(); + LOCKS_HELD[lapic_id].fetch_add(1, Ordering::SeqCst); + self.lapic_id.store(lapic_id, Ordering::SeqCst); + } } - pub fn unlock(&self) { + pub fn raw_unlock(&self) { debug_assert!(self.locked.load(Ordering::SeqCst)); - let lapic_id = self.lapic_id.load(Ordering::SeqCst); - debug_assert_eq!(lapic_id, get_current_lapic_id()); + let mut lapic_id = 0; + let interrupts_setup = INTERRUPTS_SETUP.load(Ordering::SeqCst); + if interrupts_setup { + lapic_id = self.lapic_id.load(Ordering::SeqCst); + debug_assert_eq!(lapic_id, get_current_lapic_id()); + } self.locked.store(false, Ordering::SeqCst); - LOCKS_HELD[lapic_id].fetch_sub(1, Ordering::SeqCst); - if !IN_ISR_HANDLER[lapic_id].load(Ordering::SeqCst) && LOCKS_HELD[lapic_id].load(Ordering::SeqCst) == 0 { - unsafe { - sti(); + if interrupts_setup { + LOCKS_HELD[lapic_id].fetch_sub(1, Ordering::SeqCst); + if !IN_ISR_HANDLER[lapic_id].load(Ordering::SeqCst) && LOCKS_HELD[lapic_id].load(Ordering::SeqCst) == 0 { + unsafe { + sti(); + } } } } @@ -58,15 +65,15 @@ impl Spinlock { } } -pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc { - Arc::new(Semaphore { - spinlock: Spinlock::new(), +pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc { + Arc::new(RawSemaphore { + spinlock: RawSpinlock::new(), max_count, current_count: AtomicUsize::new(initial_count), - blocked_list: Mutex::new(VecDeque::new()), + blocked_list: Spinlock::new(VecDeque::new()), }) } -pub fn lock_semaphore(semaphore: Arc, count: usize) { +pub fn lock_semaphore(semaphore: Arc, count: usize) { loop { let mut success = false; let current_count = semaphore.current_count.load(Ordering::SeqCst); @@ -79,35 +86,35 @@ pub fn lock_semaphore(semaphore: Arc, count: usize) { if success { return; } - CURRENT_TASK_LOCK.lock(); + CURRENT_TASK_LOCK.raw_lock(); { let mut current_task = CURRENT_TASKS.lock(); - let current_task = current_task.get_mut(&get_current_lapic_id()).unwrap(); + let current_task = current_task[get_current_lapic_id()].as_mut().unwrap(); current_task.task_state = TaskState::SemaphoreBlocked; current_task.block_on_semaphore = Some(semaphore.clone()); current_task.semaphore_requested_count = count; } - CURRENT_TASK_LOCK.unlock(); + CURRENT_TASK_LOCK.raw_unlock(); yield_task(); } } pub fn lock_semaphore_internal(mut task: Task) { let semaphore = task.block_on_semaphore.as_ref().unwrap().clone(); - semaphore.spinlock.lock(); + semaphore.spinlock.raw_lock(); if task.semaphore_requested_count > semaphore.current_count.load(Ordering::SeqCst) { semaphore.blocked_list.lock().push_back(task); } else { task.block_on_semaphore = None; task.semaphore_requested_count = 0; task.task_state = TaskState::Ready; - SCHEDULER_LOCK.lock(); + SCHEDULER_LOCK.raw_lock(); schedule_task(task); - SCHEDULER_LOCK.unlock(); + SCHEDULER_LOCK.raw_unlock(); } - semaphore.spinlock.unlock(); + semaphore.spinlock.raw_unlock(); } -pub fn unlock_semaphore(semaphore: Arc, count: usize) { - semaphore.spinlock.lock(); +pub fn unlock_semaphore(semaphore: Arc, count: usize) { + semaphore.spinlock.raw_lock(); { semaphore.current_count.fetch_add(count, Ordering::SeqCst); debug_assert!(semaphore.current_count.load(Ordering::SeqCst) <= semaphore.max_count); @@ -115,10 +122,10 @@ pub fn unlock_semaphore(semaphore: Arc, count: usize) { task.block_on_semaphore = None; task.semaphore_requested_count = 0; task.task_state = TaskState::Ready; - SCHEDULER_LOCK.lock(); + SCHEDULER_LOCK.raw_lock(); schedule_task(task); - SCHEDULER_LOCK.unlock(); + SCHEDULER_LOCK.raw_unlock(); } } - semaphore.spinlock.unlock(); + semaphore.spinlock.raw_unlock(); } diff --git a/kernel/src/sys/task.rs b/kernel/src/sys/task.rs index 6d7cfbd..592b63d 100644 --- a/kernel/src/sys/task.rs +++ b/kernel/src/sys/task.rs @@ -1,9 +1,7 @@ use core::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering}; use alloc::{sync::Arc, vec, vec::Vec}; -use hashbrown::HashMap; use kernel_common::instructions::{cli, get_rflags, hlt, pause, sti}; -use spin::{Lazy, Mutex}; use crate::{ cpu::isr::ISRState, @@ -17,8 +15,9 @@ use crate::{ use super::{ hpet::sleep_internal, lapic::{get_current_lapic_id, schedule_timer_interrupt}, + locks::Spinlock, scheduler::yield_task, - sync::{lock_semaphore_internal, Semaphore, Spinlock}, + sync::{lock_semaphore_internal, RawSemaphore, RawSpinlock}, }; #[derive(PartialEq)] @@ -37,14 +36,14 @@ pub struct Task { initial_func: fn(), pub task_state: TaskState, pub sleep_until_us: usize, - pub block_on_semaphore: Option>, + pub block_on_semaphore: Option>, pub semaphore_requested_count: usize, } pub const STACK_SIZE: usize = 64 * 1024; static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(2); -pub static CURRENT_TASKS: Lazy>> = Lazy::new(|| Mutex::new(HashMap::new())); -pub static CURRENT_TASK_LOCK: Spinlock = Spinlock::new(); +pub static CURRENT_TASKS: Spinlock<[Option; 256]> = Spinlock::new([const { None }; 256]); +pub static CURRENT_TASK_LOCK: RawSpinlock = RawSpinlock::new(); static RFLAGS: AtomicU64 = AtomicU64::new(0); pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false); pub static STARTING_AP_ID: AtomicI64 = AtomicI64::new(-1); @@ -57,13 +56,13 @@ pub fn allocate_stack() -> u64 { pub fn switch_task(current_state: &mut ISRState, new_task: Task) { debug_assert!(CURRENT_TASK_LOCK.is_locked()); let mut _current_task = CURRENT_TASKS.lock(); - if let Some(mut current_task) = _current_task.remove(&get_current_lapic_id()) { + if let Some(mut current_task) = _current_task[get_current_lapic_id()].take() { current_task.state = *current_state; match current_task.task_state { TaskState::Ready => { - SCHEDULER_LOCK.lock(); + SCHEDULER_LOCK.raw_lock(); schedule_task(current_task); - SCHEDULER_LOCK.unlock(); + SCHEDULER_LOCK.raw_unlock(); } TaskState::Idle => IDLE_TASKS.lock().push(current_task), TaskState::Terminated => {} @@ -72,8 +71,7 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) { } } *current_state = new_task.state; - let result = _current_task.insert(get_current_lapic_id(), new_task); - assert!(result.is_none()); + _current_task[get_current_lapic_id()] = Some(new_task); schedule_timer_interrupt(); } pub fn create_task(func: fn()) -> Task { @@ -120,19 +118,19 @@ fn create_idle_task() { } } extern "C" fn task_entry() -> ! { - CURRENT_TASK_LOCK.lock(); + CURRENT_TASK_LOCK.raw_lock(); let func; { let task = CURRENT_TASKS.lock(); - func = task.get(&get_current_lapic_id()).unwrap().initial_func; + func = task[get_current_lapic_id()].as_ref().unwrap().initial_func; } - CURRENT_TASK_LOCK.unlock(); + CURRENT_TASK_LOCK.raw_unlock(); func(); - CURRENT_TASK_LOCK.lock(); + CURRENT_TASK_LOCK.raw_lock(); { - CURRENT_TASKS.lock().get_mut(&get_current_lapic_id()).unwrap().task_state = TaskState::Terminated; + CURRENT_TASKS.lock()[get_current_lapic_id()].as_mut().unwrap().task_state = TaskState::Terminated; } - CURRENT_TASK_LOCK.unlock(); + CURRENT_TASK_LOCK.raw_unlock(); yield_task(); panic!("Failed to terminate task"); }