This commit is contained in:
parent
40696ce142
commit
52c31ccc61
4 changed files with 41 additions and 26 deletions
|
@ -6,8 +6,9 @@ use raw_cpuid::CpuId;
|
|||
use crate::{
|
||||
cpu::{gdt::setup_gdt, idt::setup_idt},
|
||||
sys::{
|
||||
lapic::{get_current_lapic_id, setup_lapic, setup_lapic_timer},
|
||||
lapic::{setup_lapic, setup_lapic_timer},
|
||||
scheduler::yield_task,
|
||||
sync::{decrease_lock_count, increase_lock_count},
|
||||
task::{CPUData, CPUDATA},
|
||||
},
|
||||
};
|
||||
|
@ -54,7 +55,11 @@ pub fn set_cpu_flags() {
|
|||
// Clear IF and DF
|
||||
asm!("wrmsr", in("rcx") 0xc0000084_u64, in("rax") 1 << 9 | 1 << 10, in("rdx") 0);
|
||||
}
|
||||
let cpudata_address = &CPUDATA.lock()[get_current_lapic_id()] as *const CPUData as u64;
|
||||
let lapic_id = increase_lock_count();
|
||||
let cpudata_ref = &CPUDATA[lapic_id].lock();
|
||||
let cpudata_ref: &CPUData = &cpudata_ref;
|
||||
let cpudata_address = cpudata_ref as *const CPUData as u64;
|
||||
decrease_lock_count(lapic_id);
|
||||
unsafe {
|
||||
asm!("wrmsr", in("rcx") 0xc0000102_u64, in("rax") cpudata_address, in("rdx") cpudata_address >> 32);
|
||||
}
|
||||
|
|
|
@ -46,13 +46,12 @@ pub fn scheduler(state: &mut ISRState) {
|
|||
}
|
||||
let mut switch_idle = false;
|
||||
{
|
||||
let current_tasks = CURRENT_TASKS.lock();
|
||||
let current_task = current_tasks[get_current_lapic_id()].as_ref();
|
||||
let current_task = CURRENT_TASKS[get_current_lapic_id()].lock();
|
||||
if current_task.is_none() {
|
||||
switch_idle = true;
|
||||
}
|
||||
if !switch_idle {
|
||||
let current_task = current_task.unwrap();
|
||||
let current_task = current_task.as_ref().unwrap();
|
||||
if current_task.task_state != TaskState::Ready && current_task.task_state != TaskState::Idle {
|
||||
switch_idle = true;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,24 @@ pub struct RawSpinlock {
|
|||
early_lock: AtomicBool,
|
||||
}
|
||||
|
||||
pub fn increase_lock_count() -> usize {
|
||||
cli();
|
||||
if INTERRUPTS_SETUP.load(Ordering::SeqCst) {
|
||||
let lapic_id = get_current_lapic_id();
|
||||
LOCKS_HELD[lapic_id].fetch_add(1, Ordering::SeqCst);
|
||||
return lapic_id;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
pub fn decrease_lock_count(lapic_id: usize) {
|
||||
LOCKS_HELD[lapic_id].fetch_sub(1, Ordering::SeqCst);
|
||||
if !IN_ISR_HANDLER[lapic_id].load(Ordering::SeqCst) && LOCKS_HELD[lapic_id].load(Ordering::SeqCst) == 0 {
|
||||
unsafe {
|
||||
sti();
|
||||
}
|
||||
}
|
||||
}
|
||||
impl RawSpinlock {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
|
@ -36,11 +54,9 @@ impl RawSpinlock {
|
|||
}
|
||||
}
|
||||
pub fn raw_lock(&self) {
|
||||
cli();
|
||||
let lapic_id = increase_lock_count();
|
||||
while self.locked.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() {}
|
||||
if INTERRUPTS_SETUP.load(Ordering::SeqCst) {
|
||||
let lapic_id = get_current_lapic_id();
|
||||
LOCKS_HELD[lapic_id].fetch_add(1, Ordering::SeqCst);
|
||||
self.lapic_id.store(lapic_id, Ordering::SeqCst);
|
||||
} else {
|
||||
self.early_lock.store(true, Ordering::SeqCst);
|
||||
|
@ -57,12 +73,7 @@ impl RawSpinlock {
|
|||
}
|
||||
self.locked.store(false, Ordering::SeqCst);
|
||||
if interrupts_setup && !early_lock {
|
||||
LOCKS_HELD[lapic_id].fetch_sub(1, Ordering::SeqCst);
|
||||
if !IN_ISR_HANDLER[lapic_id].load(Ordering::SeqCst) && LOCKS_HELD[lapic_id].load(Ordering::SeqCst) == 0 {
|
||||
unsafe {
|
||||
sti();
|
||||
}
|
||||
}
|
||||
decrease_lock_count(lapic_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ use super::{
|
|||
locks::Spinlock,
|
||||
process::{get_kernel_process, Process, PROCESSES},
|
||||
scheduler::yield_task,
|
||||
sync::{lock_semaphore_internal, RawSemaphore},
|
||||
sync::{decrease_lock_count, increase_lock_count, lock_semaphore_internal, RawSemaphore},
|
||||
};
|
||||
|
||||
#[derive(PartialEq)]
|
||||
|
@ -61,12 +61,12 @@ pub struct CPUData {
|
|||
|
||||
const STACK_SIZE: usize = 64 * 1024;
|
||||
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(2);
|
||||
pub static CURRENT_TASKS: Spinlock<[Option<Task>; 256]> = Spinlock::new([const { None }; 256]);
|
||||
pub static CURRENT_TASKS: [Spinlock<Option<Task>>; 256] = [const { Spinlock::new(None) }; 256];
|
||||
static RFLAGS: AtomicU64 = AtomicU64::new(0);
|
||||
pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false);
|
||||
pub static STARTING_AP_ID: AtomicI64 = AtomicI64::new(-1);
|
||||
pub static ALL_APS_STARTED: AtomicBool = AtomicBool::new(false);
|
||||
pub static CPUDATA: Spinlock<[CPUData; 256]> = Spinlock::new([const { CPUData { kernel_stack: 0, user_stack: 0 } }; 256]);
|
||||
pub static CPUDATA: [Spinlock<CPUData>; 256] = [const { Spinlock::new(CPUData { kernel_stack: 0, user_stack: 0 }) }; 256];
|
||||
|
||||
impl Drop for Stack {
|
||||
fn drop(&mut self) {
|
||||
|
@ -99,9 +99,8 @@ pub fn allocate_stack() -> Stack {
|
|||
}
|
||||
pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
|
||||
let lapic_id = get_current_lapic_id();
|
||||
let mut current_tasks = CURRENT_TASKS.lock();
|
||||
let cpu_data = &mut CPUDATA.lock()[lapic_id];
|
||||
if let Some(mut current_task) = current_tasks[lapic_id].take() {
|
||||
let cpu_data = &mut CPUDATA[lapic_id].lock();
|
||||
if let Some(mut current_task) = CURRENT_TASKS[lapic_id].lock().take() {
|
||||
current_task.state = *current_state;
|
||||
current_task.user_stack = cpu_data.user_stack;
|
||||
unsafe {
|
||||
|
@ -136,7 +135,7 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
|
|||
unsafe {
|
||||
_fxrstor64(new_task.fxsave_region.0.as_ptr());
|
||||
}
|
||||
current_tasks[get_current_lapic_id()] = Some(new_task);
|
||||
*CURRENT_TASKS[lapic_id].lock() = Some(new_task);
|
||||
schedule_timer_interrupt();
|
||||
}
|
||||
pub fn create_task(process: Arc<Spinlock<Process>>, func: fn()) -> Task {
|
||||
|
@ -198,8 +197,9 @@ pub fn terminate_current_task() -> ! {
|
|||
extern "C" fn task_entry() -> ! {
|
||||
let func;
|
||||
{
|
||||
let task = CURRENT_TASKS.lock();
|
||||
func = task[get_current_lapic_id()].as_ref().unwrap().initial_func;
|
||||
let lapic_id = increase_lock_count();
|
||||
func = CURRENT_TASKS[lapic_id].lock().as_ref().unwrap().initial_func;
|
||||
decrease_lock_count(lapic_id);
|
||||
}
|
||||
func();
|
||||
terminate_current_task();
|
||||
|
@ -220,9 +220,9 @@ fn idle_main() {
|
|||
}
|
||||
}
|
||||
pub fn with_current_task<F: FnOnce(&mut Task)>(func: F) {
|
||||
let mut current_tasks = CURRENT_TASKS.lock();
|
||||
let lapic_id = get_current_lapic_id();
|
||||
func(current_tasks[lapic_id].as_mut().unwrap());
|
||||
let lapic_id = increase_lock_count();
|
||||
func(CURRENT_TASKS[lapic_id].lock().as_mut().unwrap());
|
||||
decrease_lock_count(lapic_id);
|
||||
}
|
||||
pub fn setup_multitasking() -> ! {
|
||||
let task;
|
||||
|
|
Loading…
Add table
Reference in a new issue