Rework locking and call AcpiLoadTables
All checks were successful
Build / build (push) Successful in 3m12s
All checks were successful
Build / build (push) Successful in 3m12s
This commit is contained in:
parent
71be253f5f
commit
e32f4590b8
6 changed files with 56 additions and 39 deletions
|
@ -12,7 +12,7 @@ use core::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use acpi::AcpiTables;
|
use acpi::AcpiTables;
|
||||||
use acpica_rs::{AcpiInitializeSubsystem, AcpiInitializeTables};
|
use acpica_rs::{AcpiInitializeSubsystem, AcpiInitializeTables, AcpiLoadTables};
|
||||||
use buddy_system_allocator::LockedHeap;
|
use buddy_system_allocator::LockedHeap;
|
||||||
use cpu::{gdt::setup_gdt, idt::setup_idt, paging::setup_paging};
|
use cpu::{gdt::setup_gdt, idt::setup_idt, paging::setup_paging};
|
||||||
use kernel_common::{
|
use kernel_common::{
|
||||||
|
@ -74,6 +74,8 @@ fn main() {
|
||||||
assert_eq!(status, AE_OK);
|
assert_eq!(status, AE_OK);
|
||||||
status = unsafe { AcpiInitializeTables(null_mut(), 0, 0) };
|
status = unsafe { AcpiInitializeTables(null_mut(), 0, 0) };
|
||||||
assert_eq!(status, AE_OK);
|
assert_eq!(status, AE_OK);
|
||||||
|
status = unsafe { AcpiLoadTables() };
|
||||||
|
assert_eq!(status, AE_OK);
|
||||||
}
|
}
|
||||||
#[panic_handler]
|
#[panic_handler]
|
||||||
fn panic(info: &PanicInfo) -> ! {
|
fn panic(info: &PanicInfo) -> ! {
|
||||||
|
|
|
@ -8,7 +8,6 @@ use core::{
|
||||||
use acpica_rs::{ACPI_PHYSICAL_ADDRESS, ACPI_PREDEFINED_NAMES, ACPI_SIZE, ACPI_STATUS, ACPI_STRING, ACPI_TABLE_HEADER, UINT16, UINT32, UINT64};
|
use acpica_rs::{ACPI_PHYSICAL_ADDRESS, ACPI_PREDEFINED_NAMES, ACPI_SIZE, ACPI_STATUS, ACPI_STRING, ACPI_TABLE_HEADER, UINT16, UINT32, UINT64};
|
||||||
use alloc::{boxed::Box, sync::Arc};
|
use alloc::{boxed::Box, sync::Arc};
|
||||||
use kernel_common::log::log_raw;
|
use kernel_common::log::log_raw;
|
||||||
use spin::Mutex;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
cpu::paging::{map_physical, unmap_physical},
|
cpu::paging::{map_physical, unmap_physical},
|
||||||
|
@ -153,10 +152,10 @@ extern "C" fn AcpiOsSignal() {
|
||||||
}
|
}
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
extern "C" fn AcpiOsSignalSemaphore(handle: *mut c_void, units: UINT32) -> ACPI_STATUS {
|
extern "C" fn AcpiOsSignalSemaphore(handle: *mut c_void, units: UINT32) -> ACPI_STATUS {
|
||||||
let semaphore: Arc<Mutex<Semaphore>>;
|
let semaphore: Arc<Semaphore>;
|
||||||
unsafe {
|
unsafe {
|
||||||
Arc::increment_strong_count(handle);
|
Arc::increment_strong_count(handle);
|
||||||
semaphore = Arc::from_raw(handle as *const Mutex<Semaphore>);
|
semaphore = Arc::from_raw(handle as *const Semaphore);
|
||||||
}
|
}
|
||||||
unlock_semaphore(semaphore, units as usize);
|
unlock_semaphore(semaphore, units as usize);
|
||||||
AE_OK
|
AE_OK
|
||||||
|
@ -194,10 +193,10 @@ extern "C" fn AcpiOsWaitEventsComplete() {
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
extern "C" fn AcpiOsWaitSemaphore(handle: *mut c_void, units: UINT32, _timeout: UINT16) -> ACPI_STATUS {
|
extern "C" fn AcpiOsWaitSemaphore(handle: *mut c_void, units: UINT32, _timeout: UINT16) -> ACPI_STATUS {
|
||||||
// TODO: Handle timeout
|
// TODO: Handle timeout
|
||||||
let semaphore: Arc<Mutex<Semaphore>>;
|
let semaphore: Arc<Semaphore>;
|
||||||
unsafe {
|
unsafe {
|
||||||
Arc::increment_strong_count(handle);
|
Arc::increment_strong_count(handle);
|
||||||
semaphore = Arc::from_raw(handle as *const Mutex<Semaphore>);
|
semaphore = Arc::from_raw(handle as *const Semaphore);
|
||||||
}
|
}
|
||||||
lock_semaphore(semaphore, units as usize);
|
lock_semaphore(semaphore, units as usize);
|
||||||
AE_OK
|
AE_OK
|
||||||
|
|
|
@ -14,6 +14,7 @@ use super::{
|
||||||
early_acpi::EarlyACPIHandler,
|
early_acpi::EarlyACPIHandler,
|
||||||
ioapic::register_isa_irq_handler,
|
ioapic::register_isa_irq_handler,
|
||||||
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
|
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
|
||||||
|
sync::Spinlock,
|
||||||
task::{Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
|
task::{Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -30,6 +31,7 @@ static ADDRESS: AtomicPtr<u64> = AtomicPtr::new(null_mut());
|
||||||
static PERIOD: AtomicUsize = AtomicUsize::new(0);
|
static PERIOD: AtomicUsize = AtomicUsize::new(0);
|
||||||
static EARLY_SLEEP: AtomicBool = AtomicBool::new(false);
|
static EARLY_SLEEP: AtomicBool = AtomicBool::new(false);
|
||||||
static SLEEPING_LIST: Mutex<Vec<Task>> = Mutex::new(Vec::new());
|
static SLEEPING_LIST: Mutex<Vec<Task>> = Mutex::new(Vec::new());
|
||||||
|
static SLEEP_LOCK: Spinlock = Spinlock::new();
|
||||||
|
|
||||||
fn ticks_to_us(ticks: usize) -> usize {
|
fn ticks_to_us(ticks: usize) -> usize {
|
||||||
let period = PERIOD.load(Ordering::Relaxed);
|
let period = PERIOD.load(Ordering::Relaxed);
|
||||||
|
@ -65,6 +67,7 @@ fn handler() {
|
||||||
}
|
}
|
||||||
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
if MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
||||||
SCHEDULER_LOCK.lock();
|
SCHEDULER_LOCK.lock();
|
||||||
|
SLEEP_LOCK.lock();
|
||||||
{
|
{
|
||||||
let mut sleeping_list = SLEEPING_LIST.lock();
|
let mut sleeping_list = SLEEPING_LIST.lock();
|
||||||
let current_time = get_current_time();
|
let current_time = get_current_time();
|
||||||
|
@ -82,6 +85,7 @@ fn handler() {
|
||||||
schedule_hpet_interrupt(task.sleep_until_us);
|
schedule_hpet_interrupt(task.sleep_until_us);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
SLEEP_LOCK.unlock();
|
||||||
SCHEDULER_LOCK.unlock();
|
SCHEDULER_LOCK.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -116,10 +120,14 @@ pub fn sleep(us: usize) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn sleep_internal(task: Task) {
|
pub fn sleep_internal(task: Task) {
|
||||||
let mut sleeping_list = SLEEPING_LIST.lock();
|
SLEEP_LOCK.lock();
|
||||||
sleeping_list.push(task);
|
{
|
||||||
sleeping_list.sort_by(|a, b| a.sleep_until_us.cmp(&b.sleep_until_us));
|
let mut sleeping_list = SLEEPING_LIST.lock();
|
||||||
schedule_hpet_interrupt(sleeping_list.first().unwrap().sleep_until_us);
|
sleeping_list.push(task);
|
||||||
|
sleeping_list.sort_by(|a, b| a.sleep_until_us.cmp(&b.sleep_until_us));
|
||||||
|
schedule_hpet_interrupt(sleeping_list.first().unwrap().sleep_until_us);
|
||||||
|
}
|
||||||
|
SLEEP_LOCK.unlock();
|
||||||
}
|
}
|
||||||
pub fn setup_hpet(tables: &AcpiTables<EarlyACPIHandler>) {
|
pub fn setup_hpet(tables: &AcpiTables<EarlyACPIHandler>) {
|
||||||
let hpet_info = HpetInfo::new(tables).unwrap();
|
let hpet_info = HpetInfo::new(tables).unwrap();
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::cpu::isr::ISRState;
|
||||||
use super::{
|
use super::{
|
||||||
lapic::schedule_timer_interrupt,
|
lapic::schedule_timer_interrupt,
|
||||||
sync::{Spinlock, IN_ISR_HANDLER, LOCKS_HELD},
|
sync::{Spinlock, IN_ISR_HANDLER, LOCKS_HELD},
|
||||||
task::{switch_task, Task, TaskState, CURRENT_TASK, MULTITASKING_ENABLED},
|
task::{switch_task, Task, TaskState, CURRENT_TASK, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
|
||||||
};
|
};
|
||||||
|
|
||||||
static SCHEDULER_LIST: Mutex<VecDeque<Task>> = Mutex::new(VecDeque::new());
|
static SCHEDULER_LIST: Mutex<VecDeque<Task>> = Mutex::new(VecDeque::new());
|
||||||
|
@ -19,6 +19,7 @@ pub fn scheduler(state: &mut ISRState) {
|
||||||
if !MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
if !MULTITASKING_ENABLED.load(Ordering::Relaxed) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
CURRENT_TASK_LOCK.lock();
|
||||||
SCHEDULER_LOCK.lock();
|
SCHEDULER_LOCK.lock();
|
||||||
let mut switch_to_task = None;
|
let mut switch_to_task = None;
|
||||||
{
|
{
|
||||||
|
@ -31,6 +32,7 @@ pub fn scheduler(state: &mut ISRState) {
|
||||||
if let Some(task) = switch_to_task {
|
if let Some(task) = switch_to_task {
|
||||||
switch_task(state, task);
|
switch_task(state, task);
|
||||||
SCHEDULER_LOCK.unlock();
|
SCHEDULER_LOCK.unlock();
|
||||||
|
CURRENT_TASK_LOCK.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut switch_idle = false;
|
let mut switch_idle = false;
|
||||||
|
@ -45,6 +47,7 @@ pub fn scheduler(state: &mut ISRState) {
|
||||||
switch_task(state, IDLE_TASK.lock().take().unwrap());
|
switch_task(state, IDLE_TASK.lock().take().unwrap());
|
||||||
}
|
}
|
||||||
SCHEDULER_LOCK.unlock();
|
SCHEDULER_LOCK.unlock();
|
||||||
|
CURRENT_TASK_LOCK.unlock();
|
||||||
}
|
}
|
||||||
pub fn schedule_task(task: Task) {
|
pub fn schedule_task(task: Task) {
|
||||||
let mut scheduler_list = SCHEDULER_LIST.lock();
|
let mut scheduler_list = SCHEDULER_LIST.lock();
|
||||||
|
|
|
@ -6,16 +6,17 @@ use spin::Mutex;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
|
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
|
||||||
task::{Task, TaskState, CURRENT_TASK},
|
task::{Task, TaskState, CURRENT_TASK, CURRENT_TASK_LOCK},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub static IN_ISR_HANDLER: AtomicBool = AtomicBool::new(false);
|
pub static IN_ISR_HANDLER: AtomicBool = AtomicBool::new(false);
|
||||||
pub static LOCKS_HELD: AtomicUsize = AtomicUsize::new(0);
|
pub static LOCKS_HELD: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
pub struct Semaphore {
|
pub struct Semaphore {
|
||||||
|
spinlock: Spinlock,
|
||||||
max_count: usize,
|
max_count: usize,
|
||||||
current_count: usize,
|
current_count: AtomicUsize,
|
||||||
blocked_list: VecDeque<Task>,
|
blocked_list: Mutex<VecDeque<Task>>,
|
||||||
}
|
}
|
||||||
pub struct Spinlock {
|
pub struct Spinlock {
|
||||||
locked: AtomicBool,
|
locked: AtomicBool,
|
||||||
|
@ -31,6 +32,7 @@ impl Spinlock {
|
||||||
LOCKS_HELD.fetch_add(1, Ordering::Relaxed);
|
LOCKS_HELD.fetch_add(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
pub fn unlock(&self) {
|
pub fn unlock(&self) {
|
||||||
|
assert!(self.locked.load(Ordering::Relaxed));
|
||||||
self.locked.store(false, Ordering::Relaxed);
|
self.locked.store(false, Ordering::Relaxed);
|
||||||
LOCKS_HELD.fetch_sub(1, Ordering::Relaxed);
|
LOCKS_HELD.fetch_sub(1, Ordering::Relaxed);
|
||||||
if !IN_ISR_HANDLER.load(Ordering::Relaxed) && LOCKS_HELD.load(Ordering::Relaxed) == 0 {
|
if !IN_ISR_HANDLER.load(Ordering::Relaxed) && LOCKS_HELD.load(Ordering::Relaxed) == 0 {
|
||||||
|
@ -41,28 +43,28 @@ impl Spinlock {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc<Mutex<Semaphore>> {
|
pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc<Semaphore> {
|
||||||
Arc::new(Mutex::new(Semaphore {
|
Arc::new(Semaphore {
|
||||||
|
spinlock: Spinlock::new(),
|
||||||
max_count,
|
max_count,
|
||||||
current_count: initial_count,
|
current_count: AtomicUsize::new(initial_count),
|
||||||
blocked_list: VecDeque::new(),
|
blocked_list: Mutex::new(VecDeque::new()),
|
||||||
}))
|
})
|
||||||
}
|
}
|
||||||
pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
pub fn lock_semaphore(semaphore: Arc<Semaphore>, count: usize) {
|
||||||
loop {
|
loop {
|
||||||
SCHEDULER_LOCK.lock();
|
|
||||||
let mut success = false;
|
let mut success = false;
|
||||||
{
|
let current_count = semaphore.current_count.load(Ordering::Relaxed);
|
||||||
let mut semaphore = semaphore.lock();
|
if current_count >= count {
|
||||||
if semaphore.current_count >= count {
|
success = semaphore
|
||||||
semaphore.current_count -= count;
|
.current_count
|
||||||
success = true;
|
.compare_exchange(current_count, current_count - count, Ordering::Relaxed, Ordering::Relaxed)
|
||||||
}
|
.is_ok();
|
||||||
}
|
}
|
||||||
if success {
|
if success {
|
||||||
SCHEDULER_LOCK.unlock();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
CURRENT_TASK_LOCK.lock();
|
||||||
{
|
{
|
||||||
let mut current_task = CURRENT_TASK.lock();
|
let mut current_task = CURRENT_TASK.lock();
|
||||||
let current_task = current_task.as_mut().unwrap();
|
let current_task = current_task.as_mut().unwrap();
|
||||||
|
@ -70,34 +72,36 @@ pub fn lock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
||||||
current_task.block_on_semaphore = Some(semaphore.clone());
|
current_task.block_on_semaphore = Some(semaphore.clone());
|
||||||
current_task.semaphore_requested_count = count;
|
current_task.semaphore_requested_count = count;
|
||||||
}
|
}
|
||||||
SCHEDULER_LOCK.unlock();
|
CURRENT_TASK_LOCK.unlock();
|
||||||
yield_task();
|
yield_task();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn lock_semaphore_internal(mut task: Task) {
|
pub fn lock_semaphore_internal(mut task: Task) {
|
||||||
let semaphore = task.block_on_semaphore.as_ref().unwrap().clone();
|
let semaphore = task.block_on_semaphore.as_ref().unwrap().clone();
|
||||||
let mut semaphore = semaphore.lock();
|
semaphore.spinlock.lock();
|
||||||
if task.semaphore_requested_count > semaphore.current_count {
|
if task.semaphore_requested_count > semaphore.current_count.load(Ordering::Relaxed) {
|
||||||
semaphore.blocked_list.push_back(task);
|
semaphore.blocked_list.lock().push_back(task);
|
||||||
} else {
|
} else {
|
||||||
task.block_on_semaphore = None;
|
task.block_on_semaphore = None;
|
||||||
task.semaphore_requested_count = 0;
|
task.semaphore_requested_count = 0;
|
||||||
task.task_state = TaskState::Ready;
|
task.task_state = TaskState::Ready;
|
||||||
schedule_task(task);
|
schedule_task(task);
|
||||||
}
|
}
|
||||||
|
semaphore.spinlock.unlock();
|
||||||
}
|
}
|
||||||
pub fn unlock_semaphore(semaphore: Arc<Mutex<Semaphore>>, count: usize) {
|
pub fn unlock_semaphore(semaphore: Arc<Semaphore>, count: usize) {
|
||||||
SCHEDULER_LOCK.lock();
|
SCHEDULER_LOCK.lock();
|
||||||
|
semaphore.spinlock.lock();
|
||||||
{
|
{
|
||||||
let mut semaphore = semaphore.lock();
|
semaphore.current_count.fetch_add(count, Ordering::Relaxed);
|
||||||
semaphore.current_count += count;
|
assert!(semaphore.current_count.load(Ordering::Relaxed) <= semaphore.max_count);
|
||||||
assert!(semaphore.current_count <= semaphore.max_count);
|
while let Some(mut task) = semaphore.blocked_list.lock().pop_front() {
|
||||||
while let Some(mut task) = semaphore.blocked_list.pop_front() {
|
|
||||||
task.block_on_semaphore = None;
|
task.block_on_semaphore = None;
|
||||||
task.semaphore_requested_count = 0;
|
task.semaphore_requested_count = 0;
|
||||||
task.task_state = TaskState::Ready;
|
task.task_state = TaskState::Ready;
|
||||||
schedule_task(task);
|
schedule_task(task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
semaphore.spinlock.unlock();
|
||||||
SCHEDULER_LOCK.unlock();
|
SCHEDULER_LOCK.unlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ use super::{
|
||||||
hpet::sleep_internal,
|
hpet::sleep_internal,
|
||||||
lapic::schedule_timer_interrupt,
|
lapic::schedule_timer_interrupt,
|
||||||
scheduler::yield_task,
|
scheduler::yield_task,
|
||||||
sync::{lock_semaphore_internal, Semaphore},
|
sync::{lock_semaphore_internal, Semaphore, Spinlock},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
|
@ -36,7 +36,7 @@ pub struct Task {
|
||||||
initial_func: fn(),
|
initial_func: fn(),
|
||||||
pub task_state: TaskState,
|
pub task_state: TaskState,
|
||||||
pub sleep_until_us: usize,
|
pub sleep_until_us: usize,
|
||||||
pub block_on_semaphore: Option<Arc<Mutex<Semaphore>>>,
|
pub block_on_semaphore: Option<Arc<Semaphore>>,
|
||||||
pub semaphore_requested_count: usize,
|
pub semaphore_requested_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,6 +48,7 @@ static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(2);
|
||||||
struct Stack([u8; STACK_SIZE]);
|
struct Stack([u8; STACK_SIZE]);
|
||||||
|
|
||||||
pub static CURRENT_TASK: Mutex<Option<Task>> = Mutex::new(None);
|
pub static CURRENT_TASK: Mutex<Option<Task>> = Mutex::new(None);
|
||||||
|
pub static CURRENT_TASK_LOCK: Spinlock = Spinlock::new();
|
||||||
static RFLAGS: AtomicU64 = AtomicU64::new(0);
|
static RFLAGS: AtomicU64 = AtomicU64::new(0);
|
||||||
pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false);
|
pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue