Improve locking
All checks were successful
Build / build (push) Successful in 2m49s

This commit is contained in:
Mathieu Strypsteen 2024-12-14 10:08:07 +01:00
parent 343babb5c4
commit 1c796d75b5
7 changed files with 115 additions and 174 deletions

View file

@ -23,7 +23,7 @@ use kernel_common::{
paging::{KERNEL_HEAP_INITIAL_SIZE, KERNEL_HEAP_START},
};
use log::{error, info};
use misc::display::{display_print, setup_display};
use misc::display::{setup_display, DISPLAY};
use sys::{
acpica_osl::AE_OK,
early_acpi::EarlyACPIHandler,
@ -115,7 +115,10 @@ fn panic(info: &PanicInfo) -> ! {
}
error!("{}", info);
let str = format!("{}", info);
display_print(&str);
let mut display = DISPLAY.lock();
if let Some(display) = display.as_mut() {
display.print(&str);
}
}
loop {
hlt();

View file

@ -1,8 +1,3 @@
use core::{
ptr::null_mut,
sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
};
use alloc::{string::ToString, vec};
use embedded_graphics::{
mono_font::{iso_8859_13::FONT_10X20, MonoTextStyle},
@ -12,91 +7,76 @@ use embedded_graphics::{
};
use kernel_common::loader_struct::FramebufferInfo;
use crate::{
cpu::paging::map_physical,
misc::draw_target::FramebufferTarget,
sys::{locks::Spinlock, madt::INTERRUPTS_SETUP, sync::RawSpinlock},
};
use crate::{cpu::paging::map_physical, misc::draw_target::FramebufferTarget, sys::locks::Spinlock};
static FRAMEBUFFER: Spinlock<Option<FramebufferTarget>> = Spinlock::new(None);
static FRAMEBUFFER_LOCK: RawSpinlock = RawSpinlock::new();
static FRAMEBUFFER_ADDR: AtomicPtr<u8> = AtomicPtr::new(null_mut());
static WIDTH: AtomicUsize = AtomicUsize::new(0);
static HEIGHT: AtomicUsize = AtomicUsize::new(0);
static CURRENT_X: AtomicUsize = AtomicUsize::new(0);
static CURRENT_Y: AtomicUsize = AtomicUsize::new(0);
pub struct Display {
framebuffer: FramebufferTarget,
framebuffer_addr: u64,
width: usize,
height: usize,
current_x: usize,
current_y: usize,
}
fn write_char(x: usize, y: usize, c: char) {
let fb = &mut *FRAMEBUFFER.lock();
pub static DISPLAY: Spinlock<Option<Display>> = Spinlock::new(None);
impl Display {
fn write_char(&mut self, x: usize, y: usize, c: char) {
let str = c.to_string();
let pos = Point::new(x as i32 * 10 + 9, y as i32 * 20 + 19);
let style = MonoTextStyle::new(&FONT_10X20, Bgr888::WHITE);
let text = Text::new(&str, pos, style);
text.draw(fb.as_mut().unwrap()).unwrap();
text.draw(&mut self.framebuffer).unwrap();
}
fn scroll() {
let fb = &mut *FRAMEBUFFER.lock();
let fb = fb.as_mut().unwrap();
let height = HEIGHT.load(Ordering::SeqCst);
let line_size = fb.stride * 20 * 4;
fb.framebuffer.copy_within(line_size..line_size * height, 0);
fb.framebuffer[line_size * (height - 1)..].fill(0);
CURRENT_Y.store(CURRENT_Y.load(Ordering::SeqCst) - 1, Ordering::SeqCst);
fn scroll(&mut self) {
let line_size = self.framebuffer.stride * 20 * 4;
self.framebuffer.framebuffer.copy_within(line_size..line_size * self.height, 0);
self.framebuffer.framebuffer[line_size * (self.height - 1)..].fill(0);
self.current_y -= 1;
}
fn copy_to_fb() {
let fb = &mut *FRAMEBUFFER.lock();
let fb = fb.as_mut().unwrap();
let addr = FRAMEBUFFER_ADDR.load(Ordering::SeqCst);
let size = fb.stride * fb.height * 4;
fn copy_to_fb(&mut self) {
let addr = self.framebuffer_addr as *mut u8;
let size = self.framebuffer.stride * self.framebuffer.height * 4;
unsafe {
addr.copy_from_nonoverlapping(fb.framebuffer.as_ptr(), size);
addr.copy_from_nonoverlapping(self.framebuffer.framebuffer.as_ptr(), size);
}
}
pub fn display_print(str: &str) {
if FRAMEBUFFER_ADDR.load(Ordering::SeqCst).is_null() {
return;
}
if INTERRUPTS_SETUP.load(Ordering::SeqCst) {
FRAMEBUFFER_LOCK.raw_lock();
}
let mut current_x = CURRENT_X.load(Ordering::SeqCst);
let mut current_y = CURRENT_Y.load(Ordering::SeqCst);
let width = WIDTH.load(Ordering::SeqCst);
let height = HEIGHT.load(Ordering::SeqCst);
pub fn print(&mut self, str: &str) {
for c in str.chars() {
if c == '\n' {
current_x = 0;
current_y += 1;
self.current_x = 0;
self.current_y += 1;
continue;
}
if current_x == width {
current_x = 0;
current_y += 1;
if self.current_x == self.width {
self.current_x = 0;
self.current_y += 1;
}
if current_y == height {
scroll();
current_y -= 1;
if self.current_y == self.height {
self.scroll();
self.current_y -= 1;
}
write_char(current_x, current_y, c);
current_x += 1;
self.write_char(self.current_x, self.current_y, c);
self.current_x += 1;
}
CURRENT_X.store(current_x, Ordering::SeqCst);
CURRENT_Y.store(current_y, Ordering::SeqCst);
copy_to_fb();
if INTERRUPTS_SETUP.load(Ordering::SeqCst) {
FRAMEBUFFER_LOCK.raw_unlock();
self.copy_to_fb();
}
}
pub fn setup_display(info: FramebufferInfo) {
let addr = unsafe { map_physical(info.address, info.height * info.stride * 4, true) };
FRAMEBUFFER_ADDR.store(addr as *mut u8, Ordering::SeqCst);
let fb = vec![0; info.height as usize * info.stride as usize * 4];
*FRAMEBUFFER.lock() = Some(FramebufferTarget {
let display = Display {
framebuffer: FramebufferTarget {
framebuffer: fb,
width: info.width as usize,
height: info.height as usize,
stride: info.stride as usize,
});
WIDTH.store(info.width as usize / 10 - 1, Ordering::SeqCst);
HEIGHT.store(info.height as usize / 20 - 1, Ordering::SeqCst);
},
framebuffer_addr: addr,
width: info.width as usize / 10 - 1,
height: info.height as usize / 20 - 1,
current_x: 0,
current_y: 0,
};
*DISPLAY.lock() = Some(display);
}

View file

@ -27,7 +27,7 @@ use super::{
lapic::get_current_lapic_id,
locks::Spinlock,
sync::{create_semaphore, lock_semaphore, unlock_semaphore, RawSemaphore, RawSpinlock},
task::{CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
task::{CURRENT_TASKS, MULTITASKING_ENABLED},
};
pub const AE_OK: ACPI_STATUS = 0;
@ -91,13 +91,7 @@ extern "C" fn AcpiOsGetThreadId() -> UINT64 {
if !MULTITASKING_ENABLED.load(Ordering::SeqCst) {
return 1;
}
let task_id;
CURRENT_TASK_LOCK.raw_lock();
{
task_id = CURRENT_TASKS.lock()[get_current_lapic_id()].as_ref().unwrap().id;
}
CURRENT_TASK_LOCK.raw_unlock();
task_id as UINT64
CURRENT_TASKS.lock()[get_current_lapic_id()].as_ref().unwrap().id as UINT64
}
#[no_mangle]
extern "C" fn AcpiOsGetTimer() -> UINT64 {

View file

@ -14,9 +14,8 @@ use super::{
ioapic::register_irq_handler,
lapic::get_current_lapic_id,
locks::Spinlock,
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
sync::RawSpinlock,
task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
scheduler::{yield_task, SCHEDULER},
task::{Task, TaskState, CURRENT_TASKS, MULTITASKING_ENABLED},
};
const REGISTER_CAPABILITIES: usize = 0;
@ -32,7 +31,6 @@ static ADDRESS: AtomicPtr<u64> = AtomicPtr::new(null_mut());
static PERIOD: AtomicUsize = AtomicUsize::new(0);
static EARLY_SLEEP: AtomicBool = AtomicBool::new(false);
static SLEEPING_LIST: Spinlock<Vec<Task>> = Spinlock::new(Vec::new());
static SLEEP_LOCK: RawSpinlock = RawSpinlock::new();
fn ticks_to_us(ticks: usize) -> usize {
let period = PERIOD.load(Ordering::SeqCst);
@ -67,8 +65,6 @@ fn handler() {
EARLY_SLEEP.store(false, Ordering::SeqCst);
}
if MULTITASKING_ENABLED.load(Ordering::SeqCst) {
SLEEP_LOCK.raw_lock();
{
let mut sleeping_list = SLEEPING_LIST.lock();
let current_time = get_current_time();
while let Some(task) = sleeping_list.first() {
@ -76,9 +72,7 @@ fn handler() {
let mut task = sleeping_list.remove(0);
task.sleep_until_us = 0;
task.task_state = TaskState::Ready;
SCHEDULER_LOCK.raw_lock();
schedule_task(task);
SCHEDULER_LOCK.raw_unlock();
SCHEDULER.lock().add_task(task);
} else {
break;
}
@ -87,8 +81,6 @@ fn handler() {
schedule_hpet_interrupt(task.sleep_until_us);
}
}
SLEEP_LOCK.raw_unlock();
}
}
pub fn get_current_time() -> usize {
let address = ADDRESS.load(Ordering::SeqCst);
@ -97,14 +89,12 @@ pub fn get_current_time() -> usize {
}
pub fn sleep(us: usize) {
if MULTITASKING_ENABLED.load(Ordering::SeqCst) {
CURRENT_TASK_LOCK.raw_lock();
{
let mut _current_task = CURRENT_TASKS.lock();
let current_task = _current_task[get_current_lapic_id()].as_mut().unwrap();
current_task.sleep_until_us = get_current_time() + us;
current_task.task_state = TaskState::Sleeping;
}
CURRENT_TASK_LOCK.raw_unlock();
yield_task();
} else {
EARLY_SLEEP.store(true, Ordering::SeqCst);
@ -121,15 +111,11 @@ pub fn sleep(us: usize) {
}
}
pub fn sleep_internal(task: Task) {
SLEEP_LOCK.raw_lock();
{
let mut sleeping_list = SLEEPING_LIST.lock();
sleeping_list.push(task);
sleeping_list.sort_by(|a, b| a.sleep_until_us.cmp(&b.sleep_until_us));
schedule_hpet_interrupt(sleeping_list.first().unwrap().sleep_until_us);
}
SLEEP_LOCK.raw_unlock();
}
pub fn setup_hpet(tables: &AcpiTables<EarlyACPIHandler>) {
let hpet_info = HpetInfo::new(tables).unwrap();
let address = unsafe { map_physical(hpet_info.base_address as u64, 0x200, false) } as *mut u64;

View file

@ -7,8 +7,7 @@ use crate::cpu::isr::ISRState;
use super::{
lapic::{get_current_lapic_id, schedule_timer_interrupt},
locks::Spinlock,
sync::RawSpinlock,
task::{switch_task, Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
task::{switch_task, Task, TaskState, CURRENT_TASKS, MULTITASKING_ENABLED},
};
#[cfg(debug_assertions)]
@ -16,28 +15,33 @@ use super::sync::{IN_ISR_HANDLER, LOCKS_HELD};
#[cfg(debug_assertions)]
use kernel_common::instructions::{cli, sti};
static SCHEDULER_LIST: Spinlock<VecDeque<Task>> = Spinlock::new(VecDeque::new());
pub static IDLE_TASKS: Spinlock<Vec<Task>> = Spinlock::new(Vec::new());
pub static SCHEDULER_LOCK: RawSpinlock = RawSpinlock::new();
pub struct Scheduler {
tasks: VecDeque<Task>,
}
pub static SCHEDULER: Spinlock<Scheduler> = Spinlock::new(Scheduler { tasks: VecDeque::new() });
impl Scheduler {
pub fn add_task(&mut self, task: Task) {
if self.tasks.is_empty() {
schedule_timer_interrupt();
}
self.tasks.push_back(task);
}
fn get_task(&mut self) -> Option<Task> {
self.tasks.pop_front()
}
}
pub fn scheduler(state: &mut ISRState) {
if !MULTITASKING_ENABLED.load(Ordering::SeqCst) {
return;
}
CURRENT_TASK_LOCK.raw_lock();
SCHEDULER_LOCK.raw_lock();
let mut switch_to_task = None;
{
let mut scheduler_list = SCHEDULER_LIST.lock();
if !scheduler_list.is_empty() {
let task = scheduler_list.pop_front().unwrap();
switch_to_task = Some(task);
}
}
SCHEDULER_LOCK.raw_unlock();
let switch_to_task = SCHEDULER.lock().get_task();
if let Some(task) = switch_to_task {
switch_task(state, task);
CURRENT_TASK_LOCK.raw_unlock();
return;
}
let mut switch_idle = false;
@ -57,15 +61,6 @@ pub fn scheduler(state: &mut ISRState) {
if switch_idle {
switch_task(state, IDLE_TASKS.lock().pop().unwrap());
}
CURRENT_TASK_LOCK.raw_unlock();
}
pub fn schedule_task(task: Task) {
debug_assert!(SCHEDULER_LOCK.is_locked() || !MULTITASKING_ENABLED.load(Ordering::SeqCst));
let mut scheduler_list = SCHEDULER_LIST.lock();
if scheduler_list.is_empty() {
schedule_timer_interrupt();
}
scheduler_list.push_back(task);
}
pub fn yield_task() {
#[cfg(debug_assertions)]

View file

@ -8,8 +8,8 @@ use crate::sys::madt::INTERRUPTS_SETUP;
use super::{
lapic::get_current_lapic_id,
locks::Spinlock,
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK},
scheduler::{yield_task, SCHEDULER},
task::{Task, TaskState, CURRENT_TASKS},
};
pub static IN_ISR_HANDLER: [AtomicBool; 256] = [const { AtomicBool::new(false) }; 256];
@ -60,9 +60,6 @@ impl RawSpinlock {
}
}
}
pub fn is_locked(&self) -> bool {
self.locked.load(Ordering::SeqCst)
}
}
pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc<RawSemaphore> {
@ -86,7 +83,6 @@ pub fn lock_semaphore(semaphore: Arc<RawSemaphore>, count: usize) {
if success {
return;
}
CURRENT_TASK_LOCK.raw_lock();
{
let mut current_task = CURRENT_TASKS.lock();
let current_task = current_task[get_current_lapic_id()].as_mut().unwrap();
@ -94,7 +90,6 @@ pub fn lock_semaphore(semaphore: Arc<RawSemaphore>, count: usize) {
current_task.block_on_semaphore = Some(semaphore.clone());
current_task.semaphore_requested_count = count;
}
CURRENT_TASK_LOCK.raw_unlock();
yield_task();
}
}
@ -107,9 +102,7 @@ pub fn lock_semaphore_internal(mut task: Task) {
task.block_on_semaphore = None;
task.semaphore_requested_count = 0;
task.task_state = TaskState::Ready;
SCHEDULER_LOCK.raw_lock();
schedule_task(task);
SCHEDULER_LOCK.raw_unlock();
SCHEDULER.lock().add_task(task);
}
semaphore.spinlock.raw_unlock();
}
@ -122,9 +115,7 @@ pub fn unlock_semaphore(semaphore: Arc<RawSemaphore>, count: usize) {
task.block_on_semaphore = None;
task.semaphore_requested_count = 0;
task.task_state = TaskState::Ready;
SCHEDULER_LOCK.raw_lock();
schedule_task(task);
SCHEDULER_LOCK.raw_unlock();
SCHEDULER.lock().add_task(task);
}
}
semaphore.spinlock.raw_unlock();

View file

@ -8,7 +8,7 @@ use crate::{
main,
sys::{
lapic::NEXT_LAPIC_ID,
scheduler::{schedule_task, IDLE_TASKS, SCHEDULER_LOCK},
scheduler::{IDLE_TASKS, SCHEDULER},
},
};
@ -17,7 +17,7 @@ use super::{
lapic::{get_current_lapic_id, schedule_timer_interrupt},
locks::Spinlock,
scheduler::yield_task,
sync::{lock_semaphore_internal, RawSemaphore, RawSpinlock},
sync::{lock_semaphore_internal, RawSemaphore},
};
#[derive(PartialEq)]
@ -43,7 +43,6 @@ pub struct Task {
pub const STACK_SIZE: usize = 64 * 1024;
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(2);
pub static CURRENT_TASKS: Spinlock<[Option<Task>; 256]> = Spinlock::new([const { None }; 256]);
pub static CURRENT_TASK_LOCK: RawSpinlock = RawSpinlock::new();
static RFLAGS: AtomicU64 = AtomicU64::new(0);
pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false);
pub static STARTING_AP_ID: AtomicI64 = AtomicI64::new(-1);
@ -54,15 +53,12 @@ pub fn allocate_stack() -> u64 {
stack.leak().as_mut_ptr() as u64 + STACK_SIZE as u64
}
pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
debug_assert!(CURRENT_TASK_LOCK.is_locked());
let mut _current_task = CURRENT_TASKS.lock();
if let Some(mut current_task) = _current_task[get_current_lapic_id()].take() {
current_task.state = *current_state;
match current_task.task_state {
TaskState::Ready => {
SCHEDULER_LOCK.raw_lock();
schedule_task(current_task);
SCHEDULER_LOCK.raw_unlock();
SCHEDULER.lock().add_task(current_task);
}
TaskState::Idle => IDLE_TASKS.lock().push(current_task),
TaskState::Terminated => {}
@ -118,19 +114,15 @@ fn create_idle_task() {
}
}
extern "C" fn task_entry() -> ! {
CURRENT_TASK_LOCK.raw_lock();
let func;
{
let task = CURRENT_TASKS.lock();
func = task[get_current_lapic_id()].as_ref().unwrap().initial_func;
}
CURRENT_TASK_LOCK.raw_unlock();
func();
CURRENT_TASK_LOCK.raw_lock();
{
CURRENT_TASKS.lock()[get_current_lapic_id()].as_mut().unwrap().task_state = TaskState::Terminated;
}
CURRENT_TASK_LOCK.raw_unlock();
yield_task();
panic!("Failed to terminate task");
}
@ -156,7 +148,7 @@ pub fn setup_multitasking() -> ! {
create_idle_task();
}
let task = create_task(main);
schedule_task(task);
SCHEDULER.lock().add_task(task);
MULTITASKING_ENABLED.store(true, Ordering::SeqCst);
yield_task();
panic!("Setting up multitasking failed");