This commit is contained in:
parent
264b58e6f3
commit
b6df353b2d
5 changed files with 66 additions and 57 deletions
|
@ -15,7 +15,7 @@ use crate::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
sync::{lock_kernel, unlock_kernel},
|
scheduler::SCHEDULER_LOCK,
|
||||||
task::{CURRENT_TASK, MULTITASKING_ENABLED},
|
task::{CURRENT_TASK, MULTITASKING_ENABLED},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -68,11 +68,11 @@ extern "C" fn AcpiOsGetThreadId() -> UINT64 {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
let task_id;
|
let task_id;
|
||||||
lock_kernel();
|
SCHEDULER_LOCK.lock();
|
||||||
{
|
{
|
||||||
task_id = CURRENT_TASK.lock().as_ref().unwrap().id;
|
task_id = CURRENT_TASK.lock().as_ref().unwrap().id;
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
task_id as UINT64
|
task_id as UINT64
|
||||||
}
|
}
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
|
|
|
@ -13,8 +13,7 @@ use crate::cpu::paging::map_physical;
|
||||||
use super::{
|
use super::{
|
||||||
early_acpi::EarlyACPIHandler,
|
early_acpi::EarlyACPIHandler,
|
||||||
ioapic::register_isa_irq_handler,
|
ioapic::register_isa_irq_handler,
|
||||||
scheduler::{schedule_task, yield_task},
|
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
|
||||||
sync::{lock_kernel, unlock_kernel},
|
|
||||||
task::{Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
|
task::{Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -65,7 +64,8 @@ fn handler() {
|
||||||
EARLY_SLEEP.store(false, Ordering::Relaxed);
|
EARLY_SLEEP.store(false, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
||||||
lock_kernel();
|
SCHEDULER_LOCK.lock();
|
||||||
|
{
|
||||||
let mut sleeping_list = SLEEPING_LIST.lock();
|
let mut sleeping_list = SLEEPING_LIST.lock();
|
||||||
let current_time = get_current_time();
|
let current_time = get_current_time();
|
||||||
while let Some(task) = sleeping_list.first() {
|
while let Some(task) = sleeping_list.first() {
|
||||||
|
@ -81,7 +81,8 @@ fn handler() {
|
||||||
if let Some(task) = sleeping_list.first() {
|
if let Some(task) = sleeping_list.first() {
|
||||||
schedule_hpet_interrupt(task.sleep_until_us);
|
schedule_hpet_interrupt(task.sleep_until_us);
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
}
|
||||||
|
SCHEDULER_LOCK.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn get_current_time() -> usize {
|
fn get_current_time() -> usize {
|
||||||
|
@ -91,14 +92,14 @@ fn get_current_time() -> usize {
|
||||||
}
|
}
|
||||||
pub fn sleep(us: usize) {
|
pub fn sleep(us: usize) {
|
||||||
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
||||||
lock_kernel();
|
SCHEDULER_LOCK.lock();
|
||||||
{
|
{
|
||||||
let mut _current_task = CURRENT_TASK.lock();
|
let mut _current_task = CURRENT_TASK.lock();
|
||||||
let current_task = _current_task.as_mut().unwrap();
|
let current_task = _current_task.as_mut().unwrap();
|
||||||
current_task.sleep_until_us = get_current_time() + us;
|
current_task.sleep_until_us = get_current_time() + us;
|
||||||
current_task.task_state = TaskState::Sleeping;
|
current_task.task_state = TaskState::Sleeping;
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
yield_task();
|
yield_task();
|
||||||
} else {
|
} else {
|
||||||
EARLY_SLEEP.store(true, Ordering::Relaxed);
|
EARLY_SLEEP.store(true, Ordering::Relaxed);
|
||||||
|
|
|
@ -7,18 +7,19 @@ use crate::cpu::isr::ISRState;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
lapic::schedule_timer_interrupt,
|
lapic::schedule_timer_interrupt,
|
||||||
sync::{lock_kernel, unlock_kernel, IN_ISR_HANDLER, KERNEL_LOCKED},
|
sync::{Spinlock, IN_ISR_HANDLER, LOCKS_HELD},
|
||||||
task::{switch_task, Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
|
task::{switch_task, Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
|
||||||
};
|
};
|
||||||
|
|
||||||
static SCHEDULER_LIST: Mutex<VecDeque<Task>> = Mutex::new(VecDeque::new());
|
static SCHEDULER_LIST: Mutex<VecDeque<Task>> = Mutex::new(VecDeque::new());
|
||||||
pub static IDLE_TASK: Mutex<Option<Task>> = Mutex::new(None);
|
pub static IDLE_TASK: Mutex<Option<Task>> = Mutex::new(None);
|
||||||
|
pub static SCHEDULER_LOCK: Spinlock = Spinlock::new();
|
||||||
|
|
||||||
pub fn scheduler(state: &mut ISRState) {
|
pub fn scheduler(state: &mut ISRState) {
|
||||||
if !MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
if !MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
lock_kernel();
|
SCHEDULER_LOCK.lock();
|
||||||
let mut switch_to_task = None;
|
let mut switch_to_task = None;
|
||||||
{
|
{
|
||||||
let mut scheduler_list = SCHEDULER_LIST.lock();
|
let mut scheduler_list = SCHEDULER_LIST.lock();
|
||||||
|
@ -29,7 +30,7 @@ pub fn scheduler(state: &mut ISRState) {
|
||||||
}
|
}
|
||||||
if let Some(task) = switch_to_task {
|
if let Some(task) = switch_to_task {
|
||||||
switch_task(state, task);
|
switch_task(state, task);
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut switch_idle = false;
|
let mut switch_idle = false;
|
||||||
|
@ -43,21 +44,17 @@ pub fn scheduler(state: &mut ISRState) {
|
||||||
if switch_idle {
|
if switch_idle {
|
||||||
switch_task(state, IDLE_TASK.lock().take().unwrap());
|
switch_task(state, IDLE_TASK.lock().take().unwrap());
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
}
|
}
|
||||||
pub fn schedule_task(task: Task) {
|
pub fn schedule_task(task: Task) {
|
||||||
lock_kernel();
|
|
||||||
{
|
|
||||||
let mut scheduler_list = SCHEDULER_LIST.lock();
|
let mut scheduler_list = SCHEDULER_LIST.lock();
|
||||||
if scheduler_list.is_empty() {
|
if scheduler_list.is_empty() {
|
||||||
schedule_timer_interrupt();
|
schedule_timer_interrupt();
|
||||||
}
|
}
|
||||||
scheduler_list.push_back(task);
|
scheduler_list.push_back(task);
|
||||||
}
|
|
||||||
unlock_kernel();
|
|
||||||
}
|
}
|
||||||
pub fn yield_task() {
|
pub fn yield_task() {
|
||||||
if IN_ISR_HANDLER.load(Ordering::Relaxed) || KERNEL_LOCKED.load(Ordering::Relaxed) > 0 {
|
if IN_ISR_HANDLER.load(Ordering::Relaxed) || LOCKS_HELD.load(Ordering::Relaxed) > 0 {
|
||||||
panic!("Unsafe use of yield_task()");
|
panic!("Unsafe use of yield_task()");
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
|
@ -5,31 +5,42 @@ use kernel_common::instructions::{cli, sti};
|
||||||
use spin::Mutex;
|
use spin::Mutex;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
scheduler::{schedule_task, yield_task},
|
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
|
||||||
task::{Task, TaskState, CURRENT_TASK},
|
task::{Task, TaskState, CURRENT_TASK},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub static KERNEL_LOCKED: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
pub static IN_ISR_HANDLER: AtomicBool = AtomicBool::new(false);
|
pub static IN_ISR_HANDLER: AtomicBool = AtomicBool::new(false);
|
||||||
|
pub static LOCKS_HELD: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
pub struct Semaphore {
|
pub struct Semaphore {
|
||||||
max_count: usize,
|
max_count: usize,
|
||||||
current_count: usize,
|
current_count: usize,
|
||||||
blocked_list: VecDeque<Task>,
|
blocked_list: VecDeque<Task>,
|
||||||
}
|
}
|
||||||
|
pub struct Spinlock {
|
||||||
pub fn lock_kernel() {
|
locked: AtomicBool,
|
||||||
cli();
|
|
||||||
KERNEL_LOCKED.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
pub fn unlock_kernel() {
|
|
||||||
KERNEL_LOCKED.fetch_sub(1, Ordering::Relaxed);
|
impl Spinlock {
|
||||||
if !IN_ISR_HANDLER.load(Ordering::Relaxed) && KERNEL_LOCKED.load(Ordering::Relaxed) == 0 {
|
pub const fn new() -> Self {
|
||||||
|
Self { locked: AtomicBool::new(false) }
|
||||||
|
}
|
||||||
|
pub fn lock(&self) {
|
||||||
|
cli();
|
||||||
|
while !self.locked.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed).is_ok() {}
|
||||||
|
LOCKS_HELD.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
pub fn unlock(&self) {
|
||||||
|
self.locked.store(false, Ordering::Relaxed);
|
||||||
|
LOCKS_HELD.fetch_sub(1, Ordering::Relaxed);
|
||||||
|
if !IN_ISR_HANDLER.load(Ordering::Relaxed) && LOCKS_HELD.load(Ordering::Relaxed) == 0 {
|
||||||
unsafe {
|
unsafe {
|
||||||
sti();
|
sti();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_semaphore(max_count: usize) -> Arc<Mutex<Semaphore>> {
|
pub fn create_semaphore(max_count: usize) -> Arc<Mutex<Semaphore>> {
|
||||||
Arc::new(Mutex::new(Semaphore {
|
Arc::new(Mutex::new(Semaphore {
|
||||||
max_count,
|
max_count,
|
||||||
|
@ -39,7 +50,7 @@ pub fn create_semaphore(max_count: usize) -> Arc<Mutex<Semaphore>> {
|
||||||
}
|
}
|
||||||
pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
||||||
loop {
|
loop {
|
||||||
lock_kernel();
|
SCHEDULER_LOCK.lock();
|
||||||
let mut success = false;
|
let mut success = false;
|
||||||
{
|
{
|
||||||
let mut semaphore = semaphore.lock();
|
let mut semaphore = semaphore.lock();
|
||||||
|
@ -49,7 +60,7 @@ pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if success {
|
if success {
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -59,7 +70,7 @@ pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
||||||
current_task.block_on_semaphore = Some(semaphore.clone());
|
current_task.block_on_semaphore = Some(semaphore.clone());
|
||||||
current_task.semaphore_requested_count = count;
|
current_task.semaphore_requested_count = count;
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
yield_task();
|
yield_task();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -76,7 +87,7 @@ pub fn lock_semaphore_internal(mut task: Task) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn unlock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
pub fn unlock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
||||||
lock_kernel();
|
SCHEDULER_LOCK.lock();
|
||||||
{
|
{
|
||||||
let mut semaphore = semaphore.lock();
|
let mut semaphore = semaphore.lock();
|
||||||
semaphore.current_count -= count;
|
semaphore.current_count -= count;
|
||||||
|
@ -87,5 +98,5 @@ pub fn unlock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
||||||
schedule_task(task);
|
schedule_task(task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,14 +10,14 @@ use spin::Mutex;
|
||||||
use crate::{
|
use crate::{
|
||||||
cpu::isr::ISRState,
|
cpu::isr::ISRState,
|
||||||
main,
|
main,
|
||||||
sys::scheduler::{schedule_task, IDLE_TASK},
|
sys::scheduler::{schedule_task, IDLE_TASK, SCHEDULER_LOCK},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
hpet::sleep_internal,
|
hpet::sleep_internal,
|
||||||
lapic::schedule_timer_interrupt,
|
lapic::schedule_timer_interrupt,
|
||||||
scheduler::yield_task,
|
scheduler::yield_task,
|
||||||
sync::{lock_kernel, lock_semaphore_internal, unlock_kernel, Semaphore},
|
sync::{lock_semaphore_internal, Semaphore},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
|
@ -109,19 +109,19 @@ pub fn create_task(func: fn()) -> Task {
|
||||||
task
|
task
|
||||||
}
|
}
|
||||||
extern "C" fn task_entry() -> ! {
|
extern "C" fn task_entry() -> ! {
|
||||||
lock_kernel();
|
SCHEDULER_LOCK.lock();
|
||||||
let func;
|
let func;
|
||||||
{
|
{
|
||||||
let task = CURRENT_TASK.lock();
|
let task = CURRENT_TASK.lock();
|
||||||
func = task.as_ref().unwrap().initial_func;
|
func = task.as_ref().unwrap().initial_func;
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
func();
|
func();
|
||||||
lock_kernel();
|
SCHEDULER_LOCK.lock();
|
||||||
{
|
{
|
||||||
CURRENT_TASK.lock().as_mut().unwrap().task_state = TaskState::Terminated;
|
CURRENT_TASK.lock().as_mut().unwrap().task_state = TaskState::Terminated;
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
SCHEDULER_LOCK.unlock();
|
||||||
yield_task();
|
yield_task();
|
||||||
panic!("Failed to terminate task");
|
panic!("Failed to terminate task");
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue