diff --git a/kernel/src/sys/acpica_osl.rs b/kernel/src/sys/acpica_osl.rs index 4e09ccd..3435ab8 100644 --- a/kernel/src/sys/acpica_osl.rs +++ b/kernel/src/sys/acpica_osl.rs @@ -16,9 +16,8 @@ use crate::{ }; use super::{ - scheduler::SCHEDULER_LOCK, sync::{create_semaphore, lock_semaphore, unlock_semaphore, Semaphore, Spinlock}, - task::{CURRENT_TASK, MULTITASKING_ENABLED}, + task::{CURRENT_TASK, CURRENT_TASK_LOCK, MULTITASKING_ENABLED}, }; pub const AE_OK: ACPI_STATUS = 0; @@ -81,11 +80,11 @@ extern "C" fn AcpiOsGetThreadId() -> UINT64 { return 1; } let task_id; - SCHEDULER_LOCK.lock(); + CURRENT_TASK_LOCK.lock(); { task_id = CURRENT_TASK.lock().as_ref().unwrap().id; } - SCHEDULER_LOCK.unlock(); + CURRENT_TASK_LOCK.unlock(); task_id as UINT64 } #[no_mangle] diff --git a/kernel/src/sys/hpet.rs b/kernel/src/sys/hpet.rs index b1e2f23..81ba7cd 100644 --- a/kernel/src/sys/hpet.rs +++ b/kernel/src/sys/hpet.rs @@ -15,7 +15,7 @@ use super::{ ioapic::register_isa_irq_handler, scheduler::{schedule_task, yield_task, SCHEDULER_LOCK}, sync::Spinlock, - task::{Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED}, + task::{Task, TaskState, CURRENT_TASK, CURRENT_TASK_LOCK, MULTITASKING_ENABLED}, }; const REGISTER_CAPABILITIES: usize = 0; @@ -66,7 +66,6 @@ fn handler() { EARLY_SLEEP.store(false, Ordering::Relaxed); } if MULTITASKING_ENABLED.load(Ordering::Relaxed) { - SCHEDULER_LOCK.lock(); SLEEP_LOCK.lock(); { let mut sleeping_list = SLEEPING_LIST.lock(); @@ -76,7 +75,9 @@ fn handler() { let mut task = sleeping_list.remove(0); task.sleep_until_us = 0; task.task_state = TaskState::Ready; + SCHEDULER_LOCK.lock(); schedule_task(task); + SCHEDULER_LOCK.unlock(); } else { break; } @@ -86,7 +87,6 @@ fn handler() { } } SLEEP_LOCK.unlock(); - SCHEDULER_LOCK.unlock(); } } fn get_current_time() -> usize { @@ -96,14 +96,14 @@ fn get_current_time() -> usize { } pub fn sleep(us: usize) { if MULTITASKING_ENABLED.load(Ordering::Relaxed) { - SCHEDULER_LOCK.lock(); + CURRENT_TASK_LOCK.lock(); { let mut _current_task = CURRENT_TASK.lock(); let current_task = _current_task.as_mut().unwrap(); current_task.sleep_until_us = get_current_time() + us; current_task.task_state = TaskState::Sleeping; } - SCHEDULER_LOCK.unlock(); + CURRENT_TASK_LOCK.unlock(); yield_task(); } else { EARLY_SLEEP.store(true, Ordering::Relaxed); diff --git a/kernel/src/sys/scheduler.rs b/kernel/src/sys/scheduler.rs index d10b9e0..01289c7 100644 --- a/kernel/src/sys/scheduler.rs +++ b/kernel/src/sys/scheduler.rs @@ -29,9 +29,9 @@ pub fn scheduler(state: &mut ISRState) { switch_to_task = Some(task); } } + SCHEDULER_LOCK.unlock(); if let Some(task) = switch_to_task { switch_task(state, task); - SCHEDULER_LOCK.unlock(); CURRENT_TASK_LOCK.unlock(); return; } @@ -46,10 +46,10 @@ pub fn scheduler(state: &mut ISRState) { if switch_idle { switch_task(state, IDLE_TASK.lock().take().unwrap()); } - SCHEDULER_LOCK.unlock(); CURRENT_TASK_LOCK.unlock(); } pub fn schedule_task(task: Task) { + assert!(SCHEDULER_LOCK.is_locked() || !MULTITASKING_ENABLED.load(Ordering::Relaxed)); let mut scheduler_list = SCHEDULER_LIST.lock(); if scheduler_list.is_empty() { schedule_timer_interrupt(); diff --git a/kernel/src/sys/sync.rs b/kernel/src/sys/sync.rs index 75cd338..b63e0d5 100644 --- a/kernel/src/sys/sync.rs +++ b/kernel/src/sys/sync.rs @@ -41,6 +41,9 @@ impl Spinlock { } } } + pub fn is_locked(&self) -> bool { + self.locked.load(Ordering::Relaxed) + } } pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc { @@ -85,12 +88,13 @@ pub fn lock_semaphore_internal(mut task: Task) { task.block_on_semaphore = None; task.semaphore_requested_count = 0; task.task_state = TaskState::Ready; + SCHEDULER_LOCK.lock(); schedule_task(task); + SCHEDULER_LOCK.unlock(); } semaphore.spinlock.unlock(); } pub fn unlock_semaphore(semaphore: Arc, count: usize) { - SCHEDULER_LOCK.lock(); semaphore.spinlock.lock(); { semaphore.current_count.fetch_add(count, Ordering::Relaxed); @@ -99,9 +103,10 @@ pub fn unlock_semaphore(semaphore: Arc, count: usize) { task.block_on_semaphore = None; task.semaphore_requested_count = 0; task.task_state = TaskState::Ready; + SCHEDULER_LOCK.lock(); schedule_task(task); + SCHEDULER_LOCK.unlock(); } } semaphore.spinlock.unlock(); - SCHEDULER_LOCK.unlock(); } diff --git a/kernel/src/sys/task.rs b/kernel/src/sys/task.rs index 1185789..b7435ff 100644 --- a/kernel/src/sys/task.rs +++ b/kernel/src/sys/task.rs @@ -56,11 +56,16 @@ pub fn allocate_stack() -> u64 { Box::leak(Box::new(Stack([0; STACK_SIZE]))) as *mut Stack as u64 + STACK_SIZE as u64 } pub fn switch_task(current_state: &mut ISRState, new_task: Task) { + assert!(CURRENT_TASK_LOCK.is_locked()); let mut _current_task = CURRENT_TASK.lock(); if let Some(mut current_task) = _current_task.take() { current_task.state = *current_state; match current_task.task_state { - TaskState::Ready => schedule_task(current_task), + TaskState::Ready => { + SCHEDULER_LOCK.lock(); + schedule_task(current_task); + SCHEDULER_LOCK.unlock(); + } TaskState::Idle => *IDLE_TASK.lock() = Some(current_task), TaskState::Terminated => {} TaskState::Sleeping => sleep_internal(current_task), @@ -110,19 +115,19 @@ pub fn create_task(func: fn()) -> Task { task } extern "C" fn task_entry() -> ! { - SCHEDULER_LOCK.lock(); + CURRENT_TASK_LOCK.lock(); let func; { let task = CURRENT_TASK.lock(); func = task.as_ref().unwrap().initial_func; } - SCHEDULER_LOCK.unlock(); + CURRENT_TASK_LOCK.unlock(); func(); - SCHEDULER_LOCK.lock(); + CURRENT_TASK_LOCK.lock(); { CURRENT_TASK.lock().as_mut().unwrap().task_state = TaskState::Terminated; } - SCHEDULER_LOCK.unlock(); + CURRENT_TASK_LOCK.unlock(); yield_task(); panic!("Failed to terminate task"); }