Add Spinlock type
All checks were successful
Build / build (push) Successful in 3m21s

This commit is contained in:
Mathieu Strypsteen 2024-11-09 18:25:51 +01:00
parent 264b58e6f3
commit b6df353b2d
5 changed files with 66 additions and 57 deletions

View file

@ -15,7 +15,7 @@ use crate::{
};
use super::{
sync::{lock_kernel, unlock_kernel},
scheduler::SCHEDULER_LOCK,
task::{CURRENT_TASK, MULTITASKING_ENABLED},
};
@ -68,11 +68,11 @@ extern "C" fn AcpiOsGetThreadId() -> UINT64 {
return 1;
}
let task_id;
lock_kernel();
SCHEDULER_LOCK.lock();
{
task_id = CURRENT_TASK.lock().as_ref().unwrap().id;
}
unlock_kernel();
SCHEDULER_LOCK.unlock();
task_id as UINT64
}
#[no_mangle]

View file

@ -13,8 +13,7 @@ use crate::cpu::paging::map_physical;
use super::{
early_acpi::EarlyACPIHandler,
ioapic::register_isa_irq_handler,
scheduler::{schedule_task, yield_task},
sync::{lock_kernel, unlock_kernel},
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
task::{Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
};
@ -65,23 +64,25 @@ fn handler() {
EARLY_SLEEP.store(false, Ordering::Relaxed);
}
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
lock_kernel();
let mut sleeping_list = SLEEPING_LIST.lock();
let current_time = get_current_time();
while let Some(task) = sleeping_list.first() {
if task.sleep_until_us <= current_time {
let mut task = sleeping_list.remove(0);
task.sleep_until_us = 0;
task.task_state = TaskState::Ready;
schedule_task(task);
} else {
break;
SCHEDULER_LOCK.lock();
{
let mut sleeping_list = SLEEPING_LIST.lock();
let current_time = get_current_time();
while let Some(task) = sleeping_list.first() {
if task.sleep_until_us <= current_time {
let mut task = sleeping_list.remove(0);
task.sleep_until_us = 0;
task.task_state = TaskState::Ready;
schedule_task(task);
} else {
break;
}
}
if let Some(task) = sleeping_list.first() {
schedule_hpet_interrupt(task.sleep_until_us);
}
}
if let Some(task) = sleeping_list.first() {
schedule_hpet_interrupt(task.sleep_until_us);
}
unlock_kernel();
SCHEDULER_LOCK.unlock();
}
}
fn get_current_time() -> usize {
@ -91,14 +92,14 @@ fn get_current_time() -> usize {
}
pub fn sleep(us: usize) {
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
lock_kernel();
SCHEDULER_LOCK.lock();
{
let mut _current_task = CURRENT_TASK.lock();
let current_task = _current_task.as_mut().unwrap();
current_task.sleep_until_us = get_current_time() + us;
current_task.task_state = TaskState::Sleeping;
}
unlock_kernel();
SCHEDULER_LOCK.unlock();
yield_task();
} else {
EARLY_SLEEP.store(true, Ordering::Relaxed);

View file

@ -7,18 +7,19 @@ use crate::cpu::isr::ISRState;
use super::{
lapic::schedule_timer_interrupt,
sync::{lock_kernel, unlock_kernel, IN_ISR_HANDLER, KERNEL_LOCKED},
sync::{Spinlock, IN_ISR_HANDLER, LOCKS_HELD},
task::{switch_task, Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
};
static SCHEDULER_LIST: Mutex<VecDeque<Task>> = Mutex::new(VecDeque::new());
pub static IDLE_TASK: Mutex<Option<Task>> = Mutex::new(None);
pub static SCHEDULER_LOCK: Spinlock = Spinlock::new();
pub fn scheduler(state: &mut ISRState) {
if !MULTITASKING_ENABLED.load(Ordering::Relaxed) {
return;
}
lock_kernel();
SCHEDULER_LOCK.lock();
let mut switch_to_task = None;
{
let mut scheduler_list = SCHEDULER_LIST.lock();
@ -29,7 +30,7 @@ pub fn scheduler(state: &mut ISRState) {
}
if let Some(task) = switch_to_task {
switch_task(state, task);
unlock_kernel();
SCHEDULER_LOCK.unlock();
return;
}
let mut switch_idle = false;
@ -43,21 +44,17 @@ pub fn scheduler(state: &mut ISRState) {
if switch_idle {
switch_task(state, IDLE_TASK.lock().take().unwrap());
}
unlock_kernel();
SCHEDULER_LOCK.unlock();
}
pub fn schedule_task(task: Task) {
lock_kernel();
{
let mut scheduler_list = SCHEDULER_LIST.lock();
if scheduler_list.is_empty() {
schedule_timer_interrupt();
}
scheduler_list.push_back(task);
let mut scheduler_list = SCHEDULER_LIST.lock();
if scheduler_list.is_empty() {
schedule_timer_interrupt();
}
unlock_kernel();
scheduler_list.push_back(task);
}
pub fn yield_task() {
if IN_ISR_HANDLER.load(Ordering::Relaxed) || KERNEL_LOCKED.load(Ordering::Relaxed) > 0 {
if IN_ISR_HANDLER.load(Ordering::Relaxed) || LOCKS_HELD.load(Ordering::Relaxed) > 0 {
panic!("Unsafe use of yield_task()");
}
unsafe {

View file

@ -5,31 +5,42 @@ use kernel_common::instructions::{cli, sti};
use spin::Mutex;
use super::{
scheduler::{schedule_task, yield_task},
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
task::{Task, TaskState, CURRENT_TASK},
};
pub static KERNEL_LOCKED: AtomicUsize = AtomicUsize::new(0);
pub static IN_ISR_HANDLER: AtomicBool = AtomicBool::new(false);
pub static LOCKS_HELD: AtomicUsize = AtomicUsize::new(0);
pub struct Semaphore {
max_count: usize,
current_count: usize,
blocked_list: VecDeque<Task>,
}
pub fn lock_kernel() {
cli();
KERNEL_LOCKED.fetch_add(1, Ordering::Relaxed);
pub struct Spinlock {
locked: AtomicBool,
}
pub fn unlock_kernel() {
KERNEL_LOCKED.fetch_sub(1, Ordering::Relaxed);
if !IN_ISR_HANDLER.load(Ordering::Relaxed) && KERNEL_LOCKED.load(Ordering::Relaxed) == 0 {
unsafe {
sti();
impl Spinlock {
pub const fn new() -> Self {
Self { locked: AtomicBool::new(false) }
}
pub fn lock(&self) {
cli();
while !self.locked.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed).is_ok() {}
LOCKS_HELD.fetch_add(1, Ordering::Relaxed);
}
pub fn unlock(&self) {
self.locked.store(false, Ordering::Relaxed);
LOCKS_HELD.fetch_sub(1, Ordering::Relaxed);
if !IN_ISR_HANDLER.load(Ordering::Relaxed) && LOCKS_HELD.load(Ordering::Relaxed) == 0 {
unsafe {
sti();
}
}
}
}
pub fn create_semaphore(max_count: usize) -> Arc<Mutex<Semaphore>> {
Arc::new(Mutex::new(Semaphore {
max_count,
@ -39,7 +50,7 @@ pub fn create_semaphore(max_count: usize) -> Arc<Mutex<Semaphore>> {
}
pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
loop {
lock_kernel();
SCHEDULER_LOCK.lock();
let mut success = false;
{
let mut semaphore = semaphore.lock();
@ -49,7 +60,7 @@ pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
}
}
if success {
unlock_kernel();
SCHEDULER_LOCK.unlock();
return;
}
{
@ -59,7 +70,7 @@ pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
current_task.block_on_semaphore = Some(semaphore.clone());
current_task.semaphore_requested_count = count;
}
unlock_kernel();
SCHEDULER_LOCK.unlock();
yield_task();
}
}
@ -76,7 +87,7 @@ pub fn lock_semaphore_internal(mut task: Task) {
}
}
pub fn unlock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
lock_kernel();
SCHEDULER_LOCK.lock();
{
let mut semaphore = semaphore.lock();
semaphore.current_count -= count;
@ -87,5 +98,5 @@ pub fn unlock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
schedule_task(task);
}
}
unlock_kernel();
SCHEDULER_LOCK.unlock();
}

View file

@ -10,14 +10,14 @@ use spin::Mutex;
use crate::{
cpu::isr::ISRState,
main,
sys::scheduler::{schedule_task, IDLE_TASK},
sys::scheduler::{schedule_task, IDLE_TASK, SCHEDULER_LOCK},
};
use super::{
hpet::sleep_internal,
lapic::schedule_timer_interrupt,
scheduler::yield_task,
sync::{lock_kernel, lock_semaphore_internal, unlock_kernel, Semaphore},
sync::{lock_semaphore_internal, Semaphore},
};
#[derive(PartialEq)]
@ -109,19 +109,19 @@ pub fn create_task(func: fn()) -> Task {
task
}
extern "C" fn task_entry() -> ! {
lock_kernel();
SCHEDULER_LOCK.lock();
let func;
{
let task = CURRENT_TASK.lock();
func = task.as_ref().unwrap().initial_func;
}
unlock_kernel();
SCHEDULER_LOCK.unlock();
func();
lock_kernel();
SCHEDULER_LOCK.lock();
{
CURRENT_TASK.lock().as_mut().unwrap().task_state = TaskState::Terminated;
}
unlock_kernel();
SCHEDULER_LOCK.unlock();
yield_task();
panic!("Failed to terminate task");
}