Implement semaphores
All checks were successful
Build / build (push) Successful in 3m32s

This commit is contained in:
Mathieu Strypsteen 2024-11-09 09:09:59 +01:00
parent ba4adfe0bb
commit e648dab896
4 changed files with 107 additions and 27 deletions

View file

@ -64,12 +64,14 @@ fn handler() {
EARLY_SLEEP.store(false, Ordering::Relaxed);
}
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
lock_kernel();
let mut sleeping_list = SLEEPING_LIST.lock();
let current_time = get_current_time();
while let Some(task) = sleeping_list.first() {
if task.sleep_until_us <= current_time {
let mut task = sleeping_list.remove(0);
task.task_state = TaskState::READY;
task.sleep_until_us = 0;
task.task_state = TaskState::Ready;
schedule_task(task);
} else {
break;
@ -78,6 +80,7 @@ fn handler() {
if let Some(task) = sleeping_list.first() {
schedule_hpet_interrupt(task.sleep_until_us);
}
unlock_kernel();
}
}
fn get_current_time() -> usize {
@ -92,7 +95,7 @@ pub fn sleep(us: usize) {
let mut _current_task = CURRENT_TASK.lock();
let current_task = _current_task.as_mut().unwrap();
current_task.sleep_until_us = get_current_time() + us;
current_task.task_state = TaskState::SLEEPING;
current_task.task_state = TaskState::Sleeping;
}
unlock_kernel();
yield_task();

View file

@ -7,7 +7,7 @@ use crate::cpu::isr::ISRState;
use super::{
lapic::schedule_timer_interrupt,
sync::{lock_kernel, unlock_kernel},
sync::{lock_kernel, unlock_kernel, IN_ISR_HANDLER, KERNEL_LOCKED},
task::{switch_task, Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
};
@ -36,7 +36,7 @@ pub fn scheduler(state: &mut ISRState) {
{
let _current_task = CURRENT_TASK.lock();
let current_task = _current_task.as_ref().unwrap();
if current_task.task_state != TaskState::READY && current_task.task_state != TaskState::IDLE {
if current_task.task_state != TaskState::Ready && current_task.task_state != TaskState::Idle {
switch_idle = true;
}
}
@ -57,6 +57,9 @@ pub fn schedule_task(task: Task) {
unlock_kernel();
}
pub fn yield_task() {
if IN_ISR_HANDLER.load(Ordering::Relaxed) || KERNEL_LOCKED.load(Ordering::Relaxed) > 0 {
panic!("Unsafe use of yield_task()");
}
unsafe {
asm!("int $254");
}

View file

@ -1,10 +1,23 @@
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use alloc::{collections::vec_deque::VecDeque, sync::Arc};
use kernel_common::instructions::{cli, sti};
use spin::Mutex;
static KERNEL_LOCKED: AtomicUsize = AtomicUsize::new(0);
use super::{
scheduler::{schedule_task, yield_task},
task::{Task, TaskState, CURRENT_TASK},
};
pub static KERNEL_LOCKED: AtomicUsize = AtomicUsize::new(0);
pub static IN_ISR_HANDLER: AtomicBool = AtomicBool::new(false);
pub struct Semaphore {
max_count: usize,
current_count: usize,
blocked_list: VecDeque<Task>,
}
pub fn lock_kernel() {
cli();
KERNEL_LOCKED.fetch_add(1, Ordering::Relaxed);
@ -17,3 +30,62 @@ pub fn unlock_kernel() {
}
}
}
pub fn create_semaphore(max_count: usize) -> Arc<Mutex<Semaphore>> {
Arc::new(Mutex::new(Semaphore {
max_count,
current_count: 0,
blocked_list: VecDeque::new(),
}))
}
pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
loop {
lock_kernel();
let mut success = false;
{
let mut semaphore = semaphore.lock();
if semaphore.current_count + count <= semaphore.max_count {
semaphore.current_count += count;
success = true;
}
}
if success {
unlock_kernel();
return;
}
{
let mut current_task = CURRENT_TASK.lock();
let current_task = current_task.as_mut().unwrap();
current_task.task_state = TaskState::SemaphoreBlocked;
current_task.block_on_semaphore = Some(semaphore.clone());
current_task.semaphore_requested_count = count;
}
unlock_kernel();
yield_task();
}
}
pub fn lock_semaphore_internal(mut task: Task) {
let semaphore = task.block_on_semaphore.as_ref().unwrap().clone();
let mut semaphore = semaphore.lock();
if semaphore.current_count + task.semaphore_requested_count > semaphore.max_count {
semaphore.blocked_list.push_back(task);
} else {
task.block_on_semaphore = None;
task.semaphore_requested_count = 0;
task.task_state = TaskState::Ready;
schedule_task(task);
}
}
pub fn unlock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
lock_kernel();
{
let mut semaphore = semaphore.lock();
semaphore.current_count -= count;
while let Some(mut task) = semaphore.blocked_list.pop_front() {
task.block_on_semaphore = None;
task.semaphore_requested_count = 0;
task.task_state = TaskState::Ready;
schedule_task(task);
}
}
unlock_kernel();
}

View file

@ -3,7 +3,7 @@ use core::{
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
};
use alloc::boxed::Box;
use alloc::{boxed::Box, sync::Arc};
use kernel_common::instructions::hlt;
use spin::Mutex;
@ -17,24 +17,27 @@ use super::{
hpet::sleep_internal,
lapic::schedule_timer_interrupt,
scheduler::yield_task,
sync::{lock_kernel, unlock_kernel},
sync::{lock_kernel, lock_semaphore_internal, unlock_kernel, Semaphore},
};
#[derive(PartialEq)]
pub enum TaskState {
READY,
IDLE,
TERMINATED,
SLEEPING,
Ready,
Idle,
Terminated,
Sleeping,
SemaphoreBlocked,
}
pub struct Task {
pub id: usize,
state: ISRState,
kernel_stack: u64,
_kernel_stack: Box<Stack>,
initial_func: fn(),
pub task_state: TaskState,
pub sleep_until_us: usize,
pub block_on_semaphore: Option<Arc<Mutex<Semaphore>>>,
pub semaphore_requested_count: usize,
}
const STACK_SIZE: usize = 64 * 1024;
@ -51,20 +54,16 @@ pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false);
pub fn allocate_stack() -> u64 {
Box::leak(Box::new(Stack([0; STACK_SIZE]))) as *mut Stack as u64 + STACK_SIZE as u64
}
fn destroy_task(task: Task) {
unsafe {
drop(Box::from_raw((task.kernel_stack - STACK_SIZE as u64) as *mut Stack));
}
}
pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
let mut _current_task = CURRENT_TASK.lock();
if let Some(mut current_task) = _current_task.take() {
current_task.state = *current_state;
match current_task.task_state {
TaskState::READY => schedule_task(current_task),
TaskState::IDLE => *IDLE_TASK.lock() = Some(current_task),
TaskState::TERMINATED => destroy_task(current_task),
TaskState::SLEEPING => sleep_internal(current_task),
TaskState::Ready => schedule_task(current_task),
TaskState::Idle => *IDLE_TASK.lock() = Some(current_task),
TaskState::Terminated => {}
TaskState::Sleeping => sleep_internal(current_task),
TaskState::SemaphoreBlocked => lock_semaphore_internal(current_task),
}
}
*current_state = new_task.state;
@ -72,7 +71,8 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
schedule_timer_interrupt();
}
pub fn create_task(func: fn()) -> Task {
let stack_address = allocate_stack();
let stack = Box::new(Stack([0; STACK_SIZE]));
let stack_address: *const Stack = &*stack;
let task = Task {
id: NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed),
state: ISRState {
@ -96,13 +96,15 @@ pub fn create_task(func: fn()) -> Task {
rip: task_entry as u64,
cs: 8,
rflags: RFLAGS.load(Ordering::Relaxed),
rsp: stack_address,
rsp: stack_address as u64 + STACK_SIZE as u64,
ss: 16,
},
kernel_stack: stack_address,
_kernel_stack: stack,
initial_func: func,
task_state: TaskState::READY,
task_state: TaskState::Ready,
sleep_until_us: 0,
block_on_semaphore: None,
semaphore_requested_count: 0,
};
task
}
@ -117,7 +119,7 @@ extern "C" fn task_entry() -> ! {
func();
lock_kernel();
{
CURRENT_TASK.lock().as_mut().unwrap().task_state = TaskState::TERMINATED;
CURRENT_TASK.lock().as_mut().unwrap().task_state = TaskState::Terminated;
}
unlock_kernel();
yield_task();
@ -135,7 +137,7 @@ pub fn setup_multitasking() -> ! {
}
RFLAGS.store(rflags, core::sync::atomic::Ordering::Relaxed);
let mut idle_task = create_task(idle);
idle_task.task_state = TaskState::IDLE;
idle_task.task_state = TaskState::Idle;
{
*IDLE_TASK.lock() = Some(idle_task);
}