Add wrapped spinlock type
All checks were successful
Build / build (push) Successful in 2m42s

This commit is contained in:
Mathieu Strypsteen 2024-12-13 17:50:35 +01:00
parent c6e6d3886b
commit 343babb5c4
16 changed files with 152 additions and 163 deletions

32
Cargo.lock generated
View file

@ -35,12 +35,6 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b2d54853319fd101b8dd81de382bcbf3e03410a64d8928bbee85a3e7dcde483" checksum = "6b2d54853319fd101b8dd81de382bcbf3e03410a64d8928bbee85a3e7dcde483"
[[package]]
name = "allocator-api2"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
[[package]] [[package]]
name = "autocfg" name = "autocfg"
version = "1.4.0" version = "1.4.0"
@ -179,12 +173,6 @@ dependencies = [
"byteorder", "byteorder",
] ]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]] [[package]]
name = "float-cmp" name = "float-cmp"
version = "0.9.0" version = "0.9.0"
@ -194,12 +182,6 @@ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "foldhash"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2"
[[package]] [[package]]
name = "funty" name = "funty"
version = "2.0.0" version = "2.0.0"
@ -212,17 +194,6 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "hashbrown"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
dependencies = [
"allocator-api2",
"equivalent",
"foldhash",
]
[[package]] [[package]]
name = "itertools" name = "itertools"
version = "0.13.0" version = "0.13.0"
@ -242,10 +213,9 @@ dependencies = [
"bitvec", "bitvec",
"buddy_system_allocator", "buddy_system_allocator",
"embedded-graphics", "embedded-graphics",
"hashbrown",
"kernel-common", "kernel-common",
"lock_api",
"log", "log",
"spin",
] ]
[[package]] [[package]]

View file

@ -11,10 +11,9 @@ bitfield = "0.17.0"
bitvec = {version = "1.0.1", default-features = false, features = ["alloc", "atomic"]} bitvec = {version = "1.0.1", default-features = false, features = ["alloc", "atomic"]}
buddy_system_allocator = "0.11.0" buddy_system_allocator = "0.11.0"
embedded-graphics = "0.8.1" embedded-graphics = "0.8.1"
hashbrown = "0.15.2"
kernel-common = {path = "../lib/kernel-common"} kernel-common = {path = "../lib/kernel-common"}
lock_api = "0.4.12"
log = "0.4.22" log = "0.4.22"
spin = "0.9.8"
[lints.clippy] [lints.clippy]
missing_safety_doc = "allow" missing_safety_doc = "allow"

View file

@ -4,12 +4,12 @@ use core::{
}; };
use log::warn; use log::warn;
use spin::Mutex;
use crate::sys::{ use crate::sys::{
lapic::{get_current_lapic_id, send_eoi}, lapic::{get_current_lapic_id, send_eoi},
locks::Spinlock,
scheduler::scheduler, scheduler::scheduler,
sync::{Spinlock, IN_ISR_HANDLER, LOCKS_HELD}, sync::{IN_ISR_HANDLER, LOCKS_HELD},
}; };
global_asm!(include_str!("isr.s"), options(att_syntax)); global_asm!(include_str!("isr.s"), options(att_syntax));
@ -78,8 +78,7 @@ const EXCEPTIONS: [&str; 32] = [
pub const ISR_INVALIDATE_TLB: u64 = 252; pub const ISR_INVALIDATE_TLB: u64 = 252;
pub const ISR_SCHEDULER: u64 = 254; pub const ISR_SCHEDULER: u64 = 254;
pub static ISR_HANDLERS: Mutex<[Option<fn()>; 256]> = Mutex::new([None; 256]); pub static ISR_HANDLERS: Spinlock<[Option<fn()>; 256]> = Spinlock::new([None; 256]);
pub static ISR_HANDLERS_LOCK: Spinlock = Spinlock::new();
#[no_mangle] #[no_mangle]
extern "C" fn isr_handler(state: &mut ISRState) { extern "C" fn isr_handler(state: &mut ISRState) {
@ -112,9 +111,7 @@ extern "C" fn isr_handler(state: &mut ISRState) {
} }
let handler; let handler;
{ {
ISR_HANDLERS_LOCK.lock();
handler = ISR_HANDLERS.lock()[state.isr as usize]; handler = ISR_HANDLERS.lock()[state.isr as usize];
ISR_HANDLERS_LOCK.unlock();
} }
if let Some(handler) = handler { if let Some(handler) = handler {
handler(); handler();

View file

@ -11,9 +11,8 @@ use kernel_common::{
paging::{load_cr3, PageEntry, PageTable, KERNEL_HEAP_INITIAL_SIZE, KERNEL_HEAP_START, KERNEL_VIRT_START}, paging::{load_cr3, PageEntry, PageTable, KERNEL_HEAP_INITIAL_SIZE, KERNEL_HEAP_START, KERNEL_VIRT_START},
}; };
use log::info; use log::info;
use spin::Mutex;
use crate::sys::smp::smp_invalidate_tlb; use crate::sys::{locks::Spinlock, smp::smp_invalidate_tlb};
extern "C" { extern "C" {
static _text_start: u8; static _text_start: u8;
@ -27,10 +26,10 @@ extern "C" {
} }
static PAGING_ACTIVE: AtomicBool = AtomicBool::new(false); static PAGING_ACTIVE: AtomicBool = AtomicBool::new(false);
pub static CURRENT_PML4: Mutex<Option<&mut PageTable>> = Mutex::new(None); pub static CURRENT_PML4: Spinlock<Option<&mut PageTable>> = Spinlock::new(None);
static HEAP_PHYS_START: AtomicU64 = AtomicU64::new(0); static HEAP_PHYS_START: AtomicU64 = AtomicU64::new(0);
static PHYSICAL_FRAMES: Mutex<Option<BitVec<u64>>> = Mutex::new(None); static PHYSICAL_FRAMES: Spinlock<Option<BitVec<u64>>> = Spinlock::new(None);
static HEAP_PHYS_MAPPING: Mutex<Vec<u64>> = Mutex::new(Vec::new()); static HEAP_PHYS_MAPPING: Spinlock<Vec<u64>> = Spinlock::new(Vec::new());
const KERNEL_MAPPINGS_START: u64 = 0xfffffffd00000000; const KERNEL_MAPPINGS_START: u64 = 0xfffffffd00000000;
const KERNEL_MAPPINGS_END: u64 = 0xfffffffe00000000; const KERNEL_MAPPINGS_END: u64 = 0xfffffffe00000000;

View file

@ -24,12 +24,12 @@ use kernel_common::{
}; };
use log::{error, info}; use log::{error, info};
use misc::display::{display_print, setup_display}; use misc::display::{display_print, setup_display};
use spin::Mutex;
use sys::{ use sys::{
acpica_osl::AE_OK, acpica_osl::AE_OK,
early_acpi::EarlyACPIHandler, early_acpi::EarlyACPIHandler,
hpet::setup_hpet, hpet::setup_hpet,
lapic::{get_current_lapic_id, setup_lapic_timer}, lapic::{get_current_lapic_id, setup_lapic_timer},
locks::Spinlock,
madt::{parse_madt, INTERRUPTS_SETUP}, madt::{parse_madt, INTERRUPTS_SETUP},
pic::disable_pic, pic::disable_pic,
smp::{smp_broadcast_panic, start_aps}, smp::{smp_broadcast_panic, start_aps},
@ -44,7 +44,7 @@ mod sys;
#[global_allocator] #[global_allocator]
static ALLOC: LockedHeap<32> = LockedHeap::empty(); static ALLOC: LockedHeap<32> = LockedHeap::empty();
pub static RSDP_ADDRESS: AtomicU64 = AtomicU64::new(0); pub static RSDP_ADDRESS: AtomicU64 = AtomicU64::new(0);
static LOADER_STRUCT: Mutex<LoaderStruct> = Mutex::new(LoaderStruct { static LOADER_STRUCT: Spinlock<LoaderStruct> = Spinlock::new(LoaderStruct {
magic: 0, magic: 0,
phys_kernel_start: 0, phys_kernel_start: 0,
phys_heap_start: 0, phys_heap_start: 0,

View file

@ -11,16 +11,15 @@ use embedded_graphics::{
text::Text, text::Text,
}; };
use kernel_common::loader_struct::FramebufferInfo; use kernel_common::loader_struct::FramebufferInfo;
use spin::Mutex;
use crate::{ use crate::{
cpu::paging::map_physical, cpu::paging::map_physical,
misc::draw_target::FramebufferTarget, misc::draw_target::FramebufferTarget,
sys::{madt::INTERRUPTS_SETUP, sync::Spinlock}, sys::{locks::Spinlock, madt::INTERRUPTS_SETUP, sync::RawSpinlock},
}; };
static FRAMEBUFFER: Mutex<Option<FramebufferTarget>> = Mutex::new(None); static FRAMEBUFFER: Spinlock<Option<FramebufferTarget>> = Spinlock::new(None);
static FRAMEBUFFER_LOCK: Spinlock = Spinlock::new(); static FRAMEBUFFER_LOCK: RawSpinlock = RawSpinlock::new();
static FRAMEBUFFER_ADDR: AtomicPtr<u8> = AtomicPtr::new(null_mut()); static FRAMEBUFFER_ADDR: AtomicPtr<u8> = AtomicPtr::new(null_mut());
static WIDTH: AtomicUsize = AtomicUsize::new(0); static WIDTH: AtomicUsize = AtomicUsize::new(0);
static HEIGHT: AtomicUsize = AtomicUsize::new(0); static HEIGHT: AtomicUsize = AtomicUsize::new(0);
@ -58,7 +57,7 @@ pub fn display_print(str: &str) {
return; return;
} }
if INTERRUPTS_SETUP.load(Ordering::SeqCst) { if INTERRUPTS_SETUP.load(Ordering::SeqCst) {
FRAMEBUFFER_LOCK.lock(); FRAMEBUFFER_LOCK.raw_lock();
} }
let mut current_x = CURRENT_X.load(Ordering::SeqCst); let mut current_x = CURRENT_X.load(Ordering::SeqCst);
let mut current_y = CURRENT_Y.load(Ordering::SeqCst); let mut current_y = CURRENT_Y.load(Ordering::SeqCst);
@ -85,7 +84,7 @@ pub fn display_print(str: &str) {
CURRENT_Y.store(current_y, Ordering::SeqCst); CURRENT_Y.store(current_y, Ordering::SeqCst);
copy_to_fb(); copy_to_fb();
if INTERRUPTS_SETUP.load(Ordering::SeqCst) { if INTERRUPTS_SETUP.load(Ordering::SeqCst) {
FRAMEBUFFER_LOCK.unlock(); FRAMEBUFFER_LOCK.raw_unlock();
} }
} }
pub fn setup_display(info: FramebufferInfo) { pub fn setup_display(info: FramebufferInfo) {

View file

@ -11,7 +11,6 @@ use kernel_common::{
ioports::{inb, inl, inw, outb, outl, outw}, ioports::{inb, inl, inw, outb, outl, outw},
log::log_raw, log::log_raw,
}; };
use spin::Mutex;
use crate::{ use crate::{
cpu::paging::{map_physical, unmap_physical}, cpu::paging::{map_physical, unmap_physical},
@ -20,13 +19,14 @@ use crate::{
RSDP_ADDRESS, RSDP_ADDRESS,
}; };
static SCI_HANDLER: Mutex<Option<unsafe extern "C" fn(context: *mut c_void) -> UINT32>> = Mutex::new(None); static SCI_HANDLER: Spinlock<Option<unsafe extern "C" fn(context: *mut c_void) -> UINT32>> = Spinlock::new(None);
static SCI_CONTEXT: AtomicPtr<c_void> = AtomicPtr::new(null_mut()); static SCI_CONTEXT: AtomicPtr<c_void> = AtomicPtr::new(null_mut());
use super::{ use super::{
hpet::get_current_time, hpet::get_current_time,
lapic::get_current_lapic_id, lapic::get_current_lapic_id,
sync::{create_semaphore, lock_semaphore, unlock_semaphore, Semaphore, Spinlock}, locks::Spinlock,
sync::{create_semaphore, lock_semaphore, unlock_semaphore, RawSemaphore, RawSpinlock},
task::{CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED}, task::{CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
}; };
@ -34,22 +34,20 @@ pub const AE_OK: ACPI_STATUS = 0;
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsAcquireLock(handle: *mut c_void) -> ACPI_SIZE { extern "C" fn AcpiOsAcquireLock(handle: *mut c_void) -> ACPI_SIZE {
let spinlock = unsafe { (handle as *const Spinlock).as_ref().unwrap() }; let spinlock = unsafe { (handle as *const RawSpinlock).as_ref().unwrap() };
spinlock.lock(); spinlock.raw_lock();
0 0
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsAllocate(size: ACPI_SIZE) -> *mut c_void { extern "C" fn AcpiOsAllocate(size: ACPI_SIZE) -> *mut c_void {
let layout = Layout::from_size_align(size as usize, 16).unwrap(); let layout = Layout::from_size_align(size as usize, 16).unwrap();
unsafe { unsafe { wrapped_alloc(layout) as *mut c_void }
wrapped_alloc(layout) as *mut c_void
}
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsCreateLock(out: *mut *mut c_void) -> ACPI_STATUS { extern "C" fn AcpiOsCreateLock(out: *mut *mut c_void) -> ACPI_STATUS {
let spinlock = Box::leak(Box::new(Spinlock::new())); let spinlock = Box::leak(Box::new(RawSpinlock::new()));
unsafe { unsafe {
*out = spinlock as *mut Spinlock as *mut c_void; *out = spinlock as *mut RawSpinlock as *mut c_void;
} }
AE_OK AE_OK
} }
@ -94,11 +92,11 @@ extern "C" fn AcpiOsGetThreadId() -> UINT64 {
return 1; return 1;
} }
let task_id; let task_id;
CURRENT_TASK_LOCK.lock(); CURRENT_TASK_LOCK.raw_lock();
{ {
task_id = CURRENT_TASKS.lock().get(&get_current_lapic_id()).unwrap().id; task_id = CURRENT_TASKS.lock()[get_current_lapic_id()].as_ref().unwrap().id;
} }
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.raw_unlock();
task_id as UINT64 task_id as UINT64
} }
#[no_mangle] #[no_mangle]
@ -125,9 +123,7 @@ extern "C" fn AcpiOsInstallInterruptHandler(gsi: UINT32, handler: ACPI_OSD_HANDL
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsMapMemory(phys: ACPI_PHYSICAL_ADDRESS, size: ACPI_SIZE) -> *mut c_void { extern "C" fn AcpiOsMapMemory(phys: ACPI_PHYSICAL_ADDRESS, size: ACPI_SIZE) -> *mut c_void {
unsafe { unsafe { map_physical(phys, size, false) as *mut c_void }
map_physical(phys, size, false) as *mut c_void
}
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsPhysicalTableOverride(_existing: *mut ACPI_TABLE_HEADER, new_address: *mut ACPI_PHYSICAL_ADDRESS, new_length: *mut UINT32) -> ACPI_STATUS { extern "C" fn AcpiOsPhysicalTableOverride(_existing: *mut ACPI_TABLE_HEADER, new_address: *mut ACPI_PHYSICAL_ADDRESS, new_length: *mut UINT32) -> ACPI_STATUS {
@ -167,8 +163,8 @@ extern "C" fn AcpiOsReadPort(address: ACPI_IO_ADDRESS, value: *mut UINT32, width
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsReleaseLock(handle: *mut c_void, _cpu_flags: ACPI_SIZE) { extern "C" fn AcpiOsReleaseLock(handle: *mut c_void, _cpu_flags: ACPI_SIZE) {
let spinlock = unsafe { (handle as *const Spinlock).as_ref().unwrap() }; let spinlock = unsafe { (handle as *const RawSpinlock).as_ref().unwrap() };
spinlock.unlock(); spinlock.raw_unlock();
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsStall() { extern "C" fn AcpiOsStall() {
@ -184,10 +180,10 @@ extern "C" fn AcpiOsSignal() {
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsSignalSemaphore(handle: *mut c_void, units: UINT32) -> ACPI_STATUS { extern "C" fn AcpiOsSignalSemaphore(handle: *mut c_void, units: UINT32) -> ACPI_STATUS {
let semaphore: Arc<Semaphore>; let semaphore: Arc<RawSemaphore>;
unsafe { unsafe {
Arc::increment_strong_count(handle); Arc::increment_strong_count(handle);
semaphore = Arc::from_raw(handle as *const Semaphore); semaphore = Arc::from_raw(handle as *const RawSemaphore);
} }
unlock_semaphore(semaphore, units as usize); unlock_semaphore(semaphore, units as usize);
AE_OK AE_OK
@ -225,10 +221,10 @@ extern "C" fn AcpiOsWaitEventsComplete() {
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsWaitSemaphore(handle: *mut c_void, units: UINT32, _timeout: UINT16) -> ACPI_STATUS { extern "C" fn AcpiOsWaitSemaphore(handle: *mut c_void, units: UINT32, _timeout: UINT16) -> ACPI_STATUS {
// TODO: Handle timeout // TODO: Handle timeout
let semaphore: Arc<Semaphore>; let semaphore: Arc<RawSemaphore>;
unsafe { unsafe {
Arc::increment_strong_count(handle); Arc::increment_strong_count(handle);
semaphore = Arc::from_raw(handle as *const Semaphore); semaphore = Arc::from_raw(handle as *const RawSemaphore);
} }
lock_semaphore(semaphore, units as usize); lock_semaphore(semaphore, units as usize);
AE_OK AE_OK

View file

@ -6,7 +6,6 @@ use core::{
use acpi::{AcpiTables, HpetInfo}; use acpi::{AcpiTables, HpetInfo};
use alloc::vec::Vec; use alloc::vec::Vec;
use kernel_common::instructions::pause; use kernel_common::instructions::pause;
use spin::Mutex;
use crate::cpu::paging::map_physical; use crate::cpu::paging::map_physical;
@ -14,8 +13,9 @@ use super::{
early_acpi::EarlyACPIHandler, early_acpi::EarlyACPIHandler,
ioapic::register_irq_handler, ioapic::register_irq_handler,
lapic::get_current_lapic_id, lapic::get_current_lapic_id,
locks::Spinlock,
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK}, scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
sync::Spinlock, sync::RawSpinlock,
task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED}, task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
}; };
@ -31,8 +31,8 @@ const TIMER_CONFIG_ENABLE: u64 = 4;
static ADDRESS: AtomicPtr<u64> = AtomicPtr::new(null_mut()); static ADDRESS: AtomicPtr<u64> = AtomicPtr::new(null_mut());
static PERIOD: AtomicUsize = AtomicUsize::new(0); static PERIOD: AtomicUsize = AtomicUsize::new(0);
static EARLY_SLEEP: AtomicBool = AtomicBool::new(false); static EARLY_SLEEP: AtomicBool = AtomicBool::new(false);
static SLEEPING_LIST: Mutex<Vec<Task>> = Mutex::new(Vec::new()); static SLEEPING_LIST: Spinlock<Vec<Task>> = Spinlock::new(Vec::new());
static SLEEP_LOCK: Spinlock = Spinlock::new(); static SLEEP_LOCK: RawSpinlock = RawSpinlock::new();
fn ticks_to_us(ticks: usize) -> usize { fn ticks_to_us(ticks: usize) -> usize {
let period = PERIOD.load(Ordering::SeqCst); let period = PERIOD.load(Ordering::SeqCst);
@ -67,7 +67,7 @@ fn handler() {
EARLY_SLEEP.store(false, Ordering::SeqCst); EARLY_SLEEP.store(false, Ordering::SeqCst);
} }
if MULTITASKING_ENABLED.load(Ordering::SeqCst) { if MULTITASKING_ENABLED.load(Ordering::SeqCst) {
SLEEP_LOCK.lock(); SLEEP_LOCK.raw_lock();
{ {
let mut sleeping_list = SLEEPING_LIST.lock(); let mut sleeping_list = SLEEPING_LIST.lock();
let current_time = get_current_time(); let current_time = get_current_time();
@ -76,9 +76,9 @@ fn handler() {
let mut task = sleeping_list.remove(0); let mut task = sleeping_list.remove(0);
task.sleep_until_us = 0; task.sleep_until_us = 0;
task.task_state = TaskState::Ready; task.task_state = TaskState::Ready;
SCHEDULER_LOCK.lock(); SCHEDULER_LOCK.raw_lock();
schedule_task(task); schedule_task(task);
SCHEDULER_LOCK.unlock(); SCHEDULER_LOCK.raw_unlock();
} else { } else {
break; break;
} }
@ -87,7 +87,7 @@ fn handler() {
schedule_hpet_interrupt(task.sleep_until_us); schedule_hpet_interrupt(task.sleep_until_us);
} }
} }
SLEEP_LOCK.unlock(); SLEEP_LOCK.raw_unlock();
} }
} }
pub fn get_current_time() -> usize { pub fn get_current_time() -> usize {
@ -97,14 +97,14 @@ pub fn get_current_time() -> usize {
} }
pub fn sleep(us: usize) { pub fn sleep(us: usize) {
if MULTITASKING_ENABLED.load(Ordering::SeqCst) { if MULTITASKING_ENABLED.load(Ordering::SeqCst) {
CURRENT_TASK_LOCK.lock(); CURRENT_TASK_LOCK.raw_lock();
{ {
let mut _current_task = CURRENT_TASKS.lock(); let mut _current_task = CURRENT_TASKS.lock();
let current_task = _current_task.get_mut(&get_current_lapic_id()).unwrap(); let current_task = _current_task[get_current_lapic_id()].as_mut().unwrap();
current_task.sleep_until_us = get_current_time() + us; current_task.sleep_until_us = get_current_time() + us;
current_task.task_state = TaskState::Sleeping; current_task.task_state = TaskState::Sleeping;
} }
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.raw_unlock();
yield_task(); yield_task();
} else { } else {
EARLY_SLEEP.store(true, Ordering::SeqCst); EARLY_SLEEP.store(true, Ordering::SeqCst);
@ -121,14 +121,14 @@ pub fn sleep(us: usize) {
} }
} }
pub fn sleep_internal(task: Task) { pub fn sleep_internal(task: Task) {
SLEEP_LOCK.lock(); SLEEP_LOCK.raw_lock();
{ {
let mut sleeping_list = SLEEPING_LIST.lock(); let mut sleeping_list = SLEEPING_LIST.lock();
sleeping_list.push(task); sleeping_list.push(task);
sleeping_list.sort_by(|a, b| a.sleep_until_us.cmp(&b.sleep_until_us)); sleeping_list.sort_by(|a, b| a.sleep_until_us.cmp(&b.sleep_until_us));
schedule_hpet_interrupt(sleeping_list.first().unwrap().sleep_until_us); schedule_hpet_interrupt(sleeping_list.first().unwrap().sleep_until_us);
} }
SLEEP_LOCK.unlock(); SLEEP_LOCK.raw_unlock();
} }
pub fn setup_hpet(tables: &AcpiTables<EarlyACPIHandler>) { pub fn setup_hpet(tables: &AcpiTables<EarlyACPIHandler>) {
let hpet_info = HpetInfo::new(tables).unwrap(); let hpet_info = HpetInfo::new(tables).unwrap();

View file

@ -6,10 +6,7 @@ use core::{
use bitfield::bitfield; use bitfield::bitfield;
use crate::{ use crate::{
cpu::{ cpu::{isr::ISR_HANDLERS, paging::map_physical},
isr::{ISR_HANDLERS, ISR_HANDLERS_LOCK},
paging::map_physical,
},
sys::lapic::BSP_LAPIC_ID, sys::lapic::BSP_LAPIC_ID,
}; };
@ -86,9 +83,7 @@ pub fn set_irq_override(gsi: usize, vector: usize, polarity: u8, trigger: u8) {
pub fn register_irq_handler(vector: usize, handler: fn()) { pub fn register_irq_handler(vector: usize, handler: fn()) {
assert!(ISR_HANDLERS.lock()[vector].is_none()); assert!(ISR_HANDLERS.lock()[vector].is_none());
{ {
ISR_HANDLERS_LOCK.lock();
ISR_HANDLERS.lock()[vector] = Some(handler); ISR_HANDLERS.lock()[vector] = Some(handler);
ISR_HANDLERS_LOCK.unlock();
} }
for i in 0..NEXT_IOAPIC_ID.load(Ordering::SeqCst) { for i in 0..NEXT_IOAPIC_ID.load(Ordering::SeqCst) {
let start = IOAPICS[i].start_gsi.load(Ordering::SeqCst); let start = IOAPICS[i].start_gsi.load(Ordering::SeqCst);

28
kernel/src/sys/locks.rs Normal file
View file

@ -0,0 +1,28 @@
use core::sync::atomic::{AtomicBool, AtomicUsize};
use lock_api::{GuardSend, Mutex, RawMutex};
use super::sync::RawSpinlock;
unsafe impl RawMutex for RawSpinlock {
const INIT: Self = RawSpinlock {
locked: AtomicBool::new(false),
lapic_id: AtomicUsize::new(0),
};
type GuardMarker = GuardSend;
fn lock(&self) {
self.raw_lock();
}
fn try_lock(&self) -> bool {
unimplemented!();
}
unsafe fn unlock(&self) {
self.raw_unlock();
}
}
pub type Spinlock<T> = Mutex<RawSpinlock, T>;

View file

@ -43,8 +43,8 @@ pub fn parse_madt(tables: &AcpiTables<EarlyACPIHandler>) {
); );
} }
} }
INTERRUPTS_SETUP.store(true, Ordering::SeqCst);
unsafe { unsafe {
sti(); sti();
} }
INTERRUPTS_SETUP.store(true, Ordering::SeqCst);
} }

View file

@ -3,6 +3,7 @@ pub mod early_acpi;
pub mod hpet; pub mod hpet;
mod ioapic; mod ioapic;
pub mod lapic; pub mod lapic;
pub mod locks;
pub mod madt; pub mod madt;
pub mod pic; pub mod pic;
pub mod scheduler; pub mod scheduler;

View file

@ -1,13 +1,13 @@
use core::{arch::asm, sync::atomic::Ordering}; use core::{arch::asm, sync::atomic::Ordering};
use alloc::{collections::vec_deque::VecDeque, vec::Vec}; use alloc::{collections::vec_deque::VecDeque, vec::Vec};
use spin::Mutex;
use crate::cpu::isr::ISRState; use crate::cpu::isr::ISRState;
use super::{ use super::{
lapic::{get_current_lapic_id, schedule_timer_interrupt}, lapic::{get_current_lapic_id, schedule_timer_interrupt},
sync::Spinlock, locks::Spinlock,
sync::RawSpinlock,
task::{switch_task, Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED}, task::{switch_task, Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
}; };
@ -16,16 +16,16 @@ use super::sync::{IN_ISR_HANDLER, LOCKS_HELD};
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
use kernel_common::instructions::{cli, sti}; use kernel_common::instructions::{cli, sti};
static SCHEDULER_LIST: Mutex<VecDeque<Task>> = Mutex::new(VecDeque::new()); static SCHEDULER_LIST: Spinlock<VecDeque<Task>> = Spinlock::new(VecDeque::new());
pub static IDLE_TASKS: Mutex<Vec<Task>> = Mutex::new(Vec::new()); pub static IDLE_TASKS: Spinlock<Vec<Task>> = Spinlock::new(Vec::new());
pub static SCHEDULER_LOCK: Spinlock = Spinlock::new(); pub static SCHEDULER_LOCK: RawSpinlock = RawSpinlock::new();
pub fn scheduler(state: &mut ISRState) { pub fn scheduler(state: &mut ISRState) {
if !MULTITASKING_ENABLED.load(Ordering::SeqCst) { if !MULTITASKING_ENABLED.load(Ordering::SeqCst) {
return; return;
} }
CURRENT_TASK_LOCK.lock(); CURRENT_TASK_LOCK.raw_lock();
SCHEDULER_LOCK.lock(); SCHEDULER_LOCK.raw_lock();
let mut switch_to_task = None; let mut switch_to_task = None;
{ {
let mut scheduler_list = SCHEDULER_LIST.lock(); let mut scheduler_list = SCHEDULER_LIST.lock();
@ -34,16 +34,16 @@ pub fn scheduler(state: &mut ISRState) {
switch_to_task = Some(task); switch_to_task = Some(task);
} }
} }
SCHEDULER_LOCK.unlock(); SCHEDULER_LOCK.raw_unlock();
if let Some(task) = switch_to_task { if let Some(task) = switch_to_task {
switch_task(state, task); switch_task(state, task);
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.raw_unlock();
return; return;
} }
let mut switch_idle = false; let mut switch_idle = false;
{ {
let _current_task = CURRENT_TASKS.lock(); let _current_task = CURRENT_TASKS.lock();
let current_task = _current_task.get(&get_current_lapic_id()); let current_task = _current_task[get_current_lapic_id()].as_ref();
if current_task.is_none() { if current_task.is_none() {
switch_idle = true; switch_idle = true;
} }
@ -57,7 +57,7 @@ pub fn scheduler(state: &mut ISRState) {
if switch_idle { if switch_idle {
switch_task(state, IDLE_TASKS.lock().pop().unwrap()); switch_task(state, IDLE_TASKS.lock().pop().unwrap());
} }
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.raw_unlock();
} }
pub fn schedule_task(task: Task) { pub fn schedule_task(task: Task) {
debug_assert!(SCHEDULER_LOCK.is_locked() || !MULTITASKING_ENABLED.load(Ordering::SeqCst)); debug_assert!(SCHEDULER_LOCK.is_locked() || !MULTITASKING_ENABLED.load(Ordering::SeqCst));

View file

@ -14,7 +14,7 @@ use crate::{
use super::{ use super::{
hpet::sleep, hpet::sleep,
lapic::{get_current_lapic_id, send_ipi, BSP_LAPIC_ID, LAPICS, NEXT_LAPIC_ID}, lapic::{get_current_lapic_id, send_ipi, BSP_LAPIC_ID, LAPICS, NEXT_LAPIC_ID},
sync::Spinlock, sync::RawSpinlock,
task::{ALL_APS_STARTED, STACK_SIZE, STARTING_AP_ID}, task::{ALL_APS_STARTED, STACK_SIZE, STARTING_AP_ID},
}; };
@ -28,7 +28,7 @@ const IPI_NMI: u32 = 0x400;
const IPI_INIT: u32 = 0x500; const IPI_INIT: u32 = 0x500;
const IPI_STARTUP: u32 = 0x600; const IPI_STARTUP: u32 = 0x600;
static INVALIDATE_TLB_LOCK: Spinlock = Spinlock::new(); static INVALIDATE_TLB_LOCK: RawSpinlock = RawSpinlock::new();
pub fn start_aps() { pub fn start_aps() {
let stack: Vec<u8> = vec![0; STACK_SIZE]; let stack: Vec<u8> = vec![0; STACK_SIZE];
@ -74,7 +74,7 @@ pub fn smp_invalidate_tlb() {
if !ALL_APS_STARTED.load(Ordering::SeqCst) { if !ALL_APS_STARTED.load(Ordering::SeqCst) {
return; return;
} }
INVALIDATE_TLB_LOCK.lock(); INVALIDATE_TLB_LOCK.raw_lock();
let current_lapic_id = get_current_lapic_id(); let current_lapic_id = get_current_lapic_id();
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) { for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst); let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
@ -83,7 +83,7 @@ pub fn smp_invalidate_tlb() {
} }
send_ipi(lapic_id, ISR_INVALIDATE_TLB as u32); send_ipi(lapic_id, ISR_INVALIDATE_TLB as u32);
} }
INVALIDATE_TLB_LOCK.unlock(); INVALIDATE_TLB_LOCK.raw_unlock();
} }
pub fn smp_broadcast_panic() { pub fn smp_broadcast_panic() {
BROADCASTED_PANIC.store(true, Ordering::SeqCst); BROADCASTED_PANIC.store(true, Ordering::SeqCst);

View file

@ -2,12 +2,12 @@ use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use alloc::{collections::vec_deque::VecDeque, sync::Arc}; use alloc::{collections::vec_deque::VecDeque, sync::Arc};
use kernel_common::instructions::{cli, sti}; use kernel_common::instructions::{cli, sti};
use spin::Mutex;
use crate::sys::madt::INTERRUPTS_SETUP; use crate::sys::madt::INTERRUPTS_SETUP;
use super::{ use super::{
lapic::get_current_lapic_id, lapic::get_current_lapic_id,
locks::Spinlock,
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK}, scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK}, task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK},
}; };
@ -15,41 +15,48 @@ use super::{
pub static IN_ISR_HANDLER: [AtomicBool; 256] = [const { AtomicBool::new(false) }; 256]; pub static IN_ISR_HANDLER: [AtomicBool; 256] = [const { AtomicBool::new(false) }; 256];
pub static LOCKS_HELD: [AtomicUsize; 256] = [const { AtomicUsize::new(0) }; 256]; pub static LOCKS_HELD: [AtomicUsize; 256] = [const { AtomicUsize::new(0) }; 256];
pub struct Semaphore { pub struct RawSemaphore {
spinlock: Spinlock, spinlock: RawSpinlock,
max_count: usize, max_count: usize,
current_count: AtomicUsize, current_count: AtomicUsize,
blocked_list: Mutex<VecDeque<Task>>, blocked_list: Spinlock<VecDeque<Task>>,
} }
pub struct Spinlock { pub struct RawSpinlock {
locked: AtomicBool, pub locked: AtomicBool,
lapic_id: AtomicUsize, pub lapic_id: AtomicUsize,
} }
impl Spinlock { impl RawSpinlock {
pub const fn new() -> Self { pub const fn new() -> Self {
Self { Self {
locked: AtomicBool::new(false), locked: AtomicBool::new(false),
lapic_id: AtomicUsize::new(0), lapic_id: AtomicUsize::new(0),
} }
} }
pub fn lock(&self) { pub fn raw_lock(&self) {
debug_assert!(INTERRUPTS_SETUP.load(Ordering::SeqCst));
cli(); cli();
while self.locked.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() {} while self.locked.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() {}
let lapic_id = get_current_lapic_id(); if INTERRUPTS_SETUP.load(Ordering::SeqCst) {
LOCKS_HELD[lapic_id].fetch_add(1, Ordering::SeqCst); let lapic_id = get_current_lapic_id();
self.lapic_id.store(lapic_id, Ordering::SeqCst); LOCKS_HELD[lapic_id].fetch_add(1, Ordering::SeqCst);
self.lapic_id.store(lapic_id, Ordering::SeqCst);
}
} }
pub fn unlock(&self) { pub fn raw_unlock(&self) {
debug_assert!(self.locked.load(Ordering::SeqCst)); debug_assert!(self.locked.load(Ordering::SeqCst));
let lapic_id = self.lapic_id.load(Ordering::SeqCst); let mut lapic_id = 0;
debug_assert_eq!(lapic_id, get_current_lapic_id()); let interrupts_setup = INTERRUPTS_SETUP.load(Ordering::SeqCst);
if interrupts_setup {
lapic_id = self.lapic_id.load(Ordering::SeqCst);
debug_assert_eq!(lapic_id, get_current_lapic_id());
}
self.locked.store(false, Ordering::SeqCst); self.locked.store(false, Ordering::SeqCst);
LOCKS_HELD[lapic_id].fetch_sub(1, Ordering::SeqCst); if interrupts_setup {
if !IN_ISR_HANDLER[lapic_id].load(Ordering::SeqCst) && LOCKS_HELD[lapic_id].load(Ordering::SeqCst) == 0 { LOCKS_HELD[lapic_id].fetch_sub(1, Ordering::SeqCst);
unsafe { if !IN_ISR_HANDLER[lapic_id].load(Ordering::SeqCst) && LOCKS_HELD[lapic_id].load(Ordering::SeqCst) == 0 {
sti(); unsafe {
sti();
}
} }
} }
} }
@ -58,15 +65,15 @@ impl Spinlock {
} }
} }
pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc<Semaphore> { pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc<RawSemaphore> {
Arc::new(Semaphore { Arc::new(RawSemaphore {
spinlock: Spinlock::new(), spinlock: RawSpinlock::new(),
max_count, max_count,
current_count: AtomicUsize::new(initial_count), current_count: AtomicUsize::new(initial_count),
blocked_list: Mutex::new(VecDeque::new()), blocked_list: Spinlock::new(VecDeque::new()),
}) })
} }
pub fn lock_semaphore(semaphore: Arc<Semaphore>, count: usize) { pub fn lock_semaphore(semaphore: Arc<RawSemaphore>, count: usize) {
loop { loop {
let mut success = false; let mut success = false;
let current_count = semaphore.current_count.load(Ordering::SeqCst); let current_count = semaphore.current_count.load(Ordering::SeqCst);
@ -79,35 +86,35 @@ pub fn lock_semaphore(semaphore: Arc<Semaphore>, count: usize) {
if success { if success {
return; return;
} }
CURRENT_TASK_LOCK.lock(); CURRENT_TASK_LOCK.raw_lock();
{ {
let mut current_task = CURRENT_TASKS.lock(); let mut current_task = CURRENT_TASKS.lock();
let current_task = current_task.get_mut(&get_current_lapic_id()).unwrap(); let current_task = current_task[get_current_lapic_id()].as_mut().unwrap();
current_task.task_state = TaskState::SemaphoreBlocked; current_task.task_state = TaskState::SemaphoreBlocked;
current_task.block_on_semaphore = Some(semaphore.clone()); current_task.block_on_semaphore = Some(semaphore.clone());
current_task.semaphore_requested_count = count; current_task.semaphore_requested_count = count;
} }
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.raw_unlock();
yield_task(); yield_task();
} }
} }
pub fn lock_semaphore_internal(mut task: Task) { pub fn lock_semaphore_internal(mut task: Task) {
let semaphore = task.block_on_semaphore.as_ref().unwrap().clone(); let semaphore = task.block_on_semaphore.as_ref().unwrap().clone();
semaphore.spinlock.lock(); semaphore.spinlock.raw_lock();
if task.semaphore_requested_count > semaphore.current_count.load(Ordering::SeqCst) { if task.semaphore_requested_count > semaphore.current_count.load(Ordering::SeqCst) {
semaphore.blocked_list.lock().push_back(task); semaphore.blocked_list.lock().push_back(task);
} else { } else {
task.block_on_semaphore = None; task.block_on_semaphore = None;
task.semaphore_requested_count = 0; task.semaphore_requested_count = 0;
task.task_state = TaskState::Ready; task.task_state = TaskState::Ready;
SCHEDULER_LOCK.lock(); SCHEDULER_LOCK.raw_lock();
schedule_task(task); schedule_task(task);
SCHEDULER_LOCK.unlock(); SCHEDULER_LOCK.raw_unlock();
} }
semaphore.spinlock.unlock(); semaphore.spinlock.raw_unlock();
} }
pub fn unlock_semaphore(semaphore: Arc<Semaphore>, count: usize) { pub fn unlock_semaphore(semaphore: Arc<RawSemaphore>, count: usize) {
semaphore.spinlock.lock(); semaphore.spinlock.raw_lock();
{ {
semaphore.current_count.fetch_add(count, Ordering::SeqCst); semaphore.current_count.fetch_add(count, Ordering::SeqCst);
debug_assert!(semaphore.current_count.load(Ordering::SeqCst) <= semaphore.max_count); debug_assert!(semaphore.current_count.load(Ordering::SeqCst) <= semaphore.max_count);
@ -115,10 +122,10 @@ pub fn unlock_semaphore(semaphore: Arc<Semaphore>, count: usize) {
task.block_on_semaphore = None; task.block_on_semaphore = None;
task.semaphore_requested_count = 0; task.semaphore_requested_count = 0;
task.task_state = TaskState::Ready; task.task_state = TaskState::Ready;
SCHEDULER_LOCK.lock(); SCHEDULER_LOCK.raw_lock();
schedule_task(task); schedule_task(task);
SCHEDULER_LOCK.unlock(); SCHEDULER_LOCK.raw_unlock();
} }
} }
semaphore.spinlock.unlock(); semaphore.spinlock.raw_unlock();
} }

View file

@ -1,9 +1,7 @@
use core::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering}; use core::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering};
use alloc::{sync::Arc, vec, vec::Vec}; use alloc::{sync::Arc, vec, vec::Vec};
use hashbrown::HashMap;
use kernel_common::instructions::{cli, get_rflags, hlt, pause, sti}; use kernel_common::instructions::{cli, get_rflags, hlt, pause, sti};
use spin::{Lazy, Mutex};
use crate::{ use crate::{
cpu::isr::ISRState, cpu::isr::ISRState,
@ -17,8 +15,9 @@ use crate::{
use super::{ use super::{
hpet::sleep_internal, hpet::sleep_internal,
lapic::{get_current_lapic_id, schedule_timer_interrupt}, lapic::{get_current_lapic_id, schedule_timer_interrupt},
locks::Spinlock,
scheduler::yield_task, scheduler::yield_task,
sync::{lock_semaphore_internal, Semaphore, Spinlock}, sync::{lock_semaphore_internal, RawSemaphore, RawSpinlock},
}; };
#[derive(PartialEq)] #[derive(PartialEq)]
@ -37,14 +36,14 @@ pub struct Task {
initial_func: fn(), initial_func: fn(),
pub task_state: TaskState, pub task_state: TaskState,
pub sleep_until_us: usize, pub sleep_until_us: usize,
pub block_on_semaphore: Option<Arc<Semaphore>>, pub block_on_semaphore: Option<Arc<RawSemaphore>>,
pub semaphore_requested_count: usize, pub semaphore_requested_count: usize,
} }
pub const STACK_SIZE: usize = 64 * 1024; pub const STACK_SIZE: usize = 64 * 1024;
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(2); static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(2);
pub static CURRENT_TASKS: Lazy<Mutex<HashMap<usize, Task>>> = Lazy::new(|| Mutex::new(HashMap::new())); pub static CURRENT_TASKS: Spinlock<[Option<Task>; 256]> = Spinlock::new([const { None }; 256]);
pub static CURRENT_TASK_LOCK: Spinlock = Spinlock::new(); pub static CURRENT_TASK_LOCK: RawSpinlock = RawSpinlock::new();
static RFLAGS: AtomicU64 = AtomicU64::new(0); static RFLAGS: AtomicU64 = AtomicU64::new(0);
pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false); pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false);
pub static STARTING_AP_ID: AtomicI64 = AtomicI64::new(-1); pub static STARTING_AP_ID: AtomicI64 = AtomicI64::new(-1);
@ -57,13 +56,13 @@ pub fn allocate_stack() -> u64 {
pub fn switch_task(current_state: &mut ISRState, new_task: Task) { pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
debug_assert!(CURRENT_TASK_LOCK.is_locked()); debug_assert!(CURRENT_TASK_LOCK.is_locked());
let mut _current_task = CURRENT_TASKS.lock(); let mut _current_task = CURRENT_TASKS.lock();
if let Some(mut current_task) = _current_task.remove(&get_current_lapic_id()) { if let Some(mut current_task) = _current_task[get_current_lapic_id()].take() {
current_task.state = *current_state; current_task.state = *current_state;
match current_task.task_state { match current_task.task_state {
TaskState::Ready => { TaskState::Ready => {
SCHEDULER_LOCK.lock(); SCHEDULER_LOCK.raw_lock();
schedule_task(current_task); schedule_task(current_task);
SCHEDULER_LOCK.unlock(); SCHEDULER_LOCK.raw_unlock();
} }
TaskState::Idle => IDLE_TASKS.lock().push(current_task), TaskState::Idle => IDLE_TASKS.lock().push(current_task),
TaskState::Terminated => {} TaskState::Terminated => {}
@ -72,8 +71,7 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
} }
} }
*current_state = new_task.state; *current_state = new_task.state;
let result = _current_task.insert(get_current_lapic_id(), new_task); _current_task[get_current_lapic_id()] = Some(new_task);
assert!(result.is_none());
schedule_timer_interrupt(); schedule_timer_interrupt();
} }
pub fn create_task(func: fn()) -> Task { pub fn create_task(func: fn()) -> Task {
@ -120,19 +118,19 @@ fn create_idle_task() {
} }
} }
extern "C" fn task_entry() -> ! { extern "C" fn task_entry() -> ! {
CURRENT_TASK_LOCK.lock(); CURRENT_TASK_LOCK.raw_lock();
let func; let func;
{ {
let task = CURRENT_TASKS.lock(); let task = CURRENT_TASKS.lock();
func = task.get(&get_current_lapic_id()).unwrap().initial_func; func = task[get_current_lapic_id()].as_ref().unwrap().initial_func;
} }
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.raw_unlock();
func(); func();
CURRENT_TASK_LOCK.lock(); CURRENT_TASK_LOCK.raw_lock();
{ {
CURRENT_TASKS.lock().get_mut(&get_current_lapic_id()).unwrap().task_state = TaskState::Terminated; CURRENT_TASKS.lock()[get_current_lapic_id()].as_mut().unwrap().task_state = TaskState::Terminated;
} }
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.raw_unlock();
yield_task(); yield_task();
panic!("Failed to terminate task"); panic!("Failed to terminate task");
} }