Improve multitasking
All checks were successful
Build / build (push) Successful in 2m49s

This commit is contained in:
Mathieu Strypsteen 2024-12-28 20:11:08 +01:00
parent 2b147549ee
commit 7db94e628e
9 changed files with 145 additions and 78 deletions

30
Cargo.lock generated
View file

@ -35,6 +35,12 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b2d54853319fd101b8dd81de382bcbf3e03410a64d8928bbee85a3e7dcde483"
[[package]]
name = "allocator-api2"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
[[package]]
name = "autocfg"
version = "1.4.0"
@ -170,6 +176,12 @@ dependencies = [
"byteorder",
]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "float-cmp"
version = "0.9.0"
@ -179,6 +191,12 @@ dependencies = [
"num-traits",
]
[[package]]
name = "foldhash"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f"
[[package]]
name = "funty"
version = "2.0.0"
@ -191,6 +209,17 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "hashbrown"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
dependencies = [
"allocator-api2",
"equivalent",
"foldhash",
]
[[package]]
name = "init"
version = "0.0.1"
@ -215,6 +244,7 @@ dependencies = [
"buddy_system_allocator",
"elf",
"embedded-graphics",
"hashbrown",
"kernel-common",
"lock_api",
"log",

View file

@ -12,6 +12,7 @@ bitvec = { version = "1.0.1", default-features = false, features = ["alloc", "at
buddy_system_allocator = { version = "0.11.0", default-features = false }
elf = { version = "0.7.4", default-features = false }
embedded-graphics = "0.8.1"
hashbrown = "0.15.2"
kernel-common = { path = "../lib/kernel-common" }
lock_api = "0.4.12"
log = "0.4.22"

View file

@ -73,6 +73,7 @@ const INIT_BINARY: &[u8] = include_bytes!("../../target/x86_64-unknown-os/releas
#[no_mangle]
extern "C" fn early_main(temp_loader_struct: *const LoaderStruct) -> ! {
{
unsafe {
ALLOCATOR.heap.lock().init(KERNEL_HEAP_START as usize, KERNEL_HEAP_INITIAL_SIZE);
}
@ -97,6 +98,7 @@ extern "C" fn early_main(temp_loader_struct: *const LoaderStruct) -> ! {
setup_hpet(&early_acpi_tables);
set_cpu_flags();
setup_lapic_timer(true);
}
setup_multitasking();
}
fn main() {

View file

@ -17,13 +17,14 @@ extern "C" {
// TODO: Proper error handling
pub fn load_binary() {
let entry;
{
let process;
{
let mut current_tasks = CURRENT_TASKS.lock();
let task = current_tasks[get_current_lapic_id()].as_mut().unwrap();
process = task.process.clone();
}
let entry;
{
let mut process = process.lock();
let binary = process.binary.unwrap();
@ -58,6 +59,7 @@ pub fn load_binary() {
assert!(entry < USER_END);
}
info!("Starting init...");
}
unsafe {
jump_usermode(entry);
}

View file

@ -1,15 +1,9 @@
use core::sync::atomic::{AtomicBool, AtomicUsize};
use lock_api::{GuardSend, Mutex, RawMutex};
use super::sync::RawSpinlock;
unsafe impl RawMutex for RawSpinlock {
const INIT: Self = RawSpinlock {
locked: AtomicBool::new(false),
lapic_id: AtomicUsize::new(0),
};
const INIT: Self = RawSpinlock::new();
type GuardMarker = GuardSend;
fn lock(&self) {

View file

@ -1,6 +1,10 @@
use core::cell::{LazyCell, OnceCell};
use core::{
cell::{LazyCell, OnceCell},
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::sync::Arc;
use hashbrown::HashMap;
use kernel_common::paging::PageTable;
use crate::{
@ -12,33 +16,43 @@ use crate::{
use super::{locks::Spinlock, scheduler::SCHEDULER, task::create_task};
pub struct Process {
pub pid: usize,
pub address_space: AddressSpace,
pub binary: Option<&'static [u8]>,
pub num_tasks: usize,
}
static KERNEL_PROCESS: Spinlock<LazyCell<Arc<Spinlock<Process>>>> = Spinlock::new(LazyCell::new(|| {
let process = Process {
pid: 0,
address_space: AddressSpace {
is_kernel: true,
pml4: OnceCell::new(),
pml4_physical_address: 0,
},
binary: None,
num_tasks: 0,
};
Arc::new(Spinlock::new(process))
}));
static NEXT_PID: AtomicUsize = AtomicUsize::new(1);
pub static PROCESSES: Spinlock<LazyCell<Spinlock<HashMap<usize, Arc<Spinlock<Process>>>>>> = Spinlock::new(LazyCell::new(|| Spinlock::new(HashMap::new())));
pub fn get_kernel_process() -> Arc<Spinlock<Process>> {
KERNEL_PROCESS.lock().clone()
}
pub fn create_process(binary: Option<&'static [u8]>) -> Arc<Spinlock<Process>> {
let pid = NEXT_PID.fetch_add(1, Ordering::SeqCst);
let mut process = Process {
pid,
address_space: AddressSpace {
is_kernel: false,
pml4: OnceCell::new(),
pml4_physical_address: 0,
},
binary,
num_tasks: 0,
};
let mut pml4 = create_page_table();
let kernel_proc = get_kernel_process();
@ -49,7 +63,9 @@ pub fn create_process(binary: Option<&'static [u8]>) -> Arc<Spinlock<Process>> {
}
process.address_space.pml4_physical_address = kernel_proc.address_space.virt_to_phys(pml4.as_ref() as *const PageTable as u64);
process.address_space.pml4.set(pml4).unwrap_or_else(|_| panic!());
Arc::new(Spinlock::new(process))
let process = Arc::new(Spinlock::new(process));
PROCESSES.lock().lock().insert(pid, process.clone());
process
}
pub fn start_init() {
let process = create_process(Some(INIT_BINARY));

View file

@ -24,6 +24,7 @@ pub struct RawSemaphore {
pub struct RawSpinlock {
pub locked: AtomicBool,
pub lapic_id: AtomicUsize,
early_lock: AtomicBool,
}
impl RawSpinlock {
@ -31,6 +32,7 @@ impl RawSpinlock {
Self {
locked: AtomicBool::new(false),
lapic_id: AtomicUsize::new(0),
early_lock: AtomicBool::new(false),
}
}
pub fn raw_lock(&self) {
@ -40,18 +42,21 @@ impl RawSpinlock {
let lapic_id = get_current_lapic_id();
LOCKS_HELD[lapic_id].fetch_add(1, Ordering::SeqCst);
self.lapic_id.store(lapic_id, Ordering::SeqCst);
} else {
self.early_lock.store(true, Ordering::SeqCst);
}
}
pub fn raw_unlock(&self) {
debug_assert!(self.locked.load(Ordering::SeqCst));
let mut lapic_id = 0;
let interrupts_setup = INTERRUPTS_SETUP.load(Ordering::SeqCst);
if interrupts_setup {
let early_lock = self.early_lock.swap(false, Ordering::SeqCst);
if interrupts_setup && !early_lock {
lapic_id = self.lapic_id.load(Ordering::SeqCst);
debug_assert_eq!(lapic_id, get_current_lapic_id());
}
self.locked.store(false, Ordering::SeqCst);
if interrupts_setup {
if interrupts_setup && !early_lock {
LOCKS_HELD[lapic_id].fetch_sub(1, Ordering::SeqCst);
if !IN_ISR_HANDLER[lapic_id].load(Ordering::SeqCst) && LOCKS_HELD[lapic_id].load(Ordering::SeqCst) == 0 {
unsafe {

View file

@ -21,7 +21,7 @@ struct SyscallArgs {
_arg5: usize,
}
const SYSCALL_TABLE: [fn(SyscallArgs) -> Result<usize, Box<dyn Error>>; 3] = [syscall_get_kernel_version, syscall_exit, syscall_debug_print];
const SYSCALL_TABLE: [fn(SyscallArgs) -> Result<usize, Box<dyn Error>>; 4] = [syscall_get_kernel_version, syscall_exit, syscall_debug_print, syscall_get_pid];
fn copy_from_user(start: u64, size: usize) -> Result<Vec<u8>, Box<dyn Error>> {
if size > 16 * 1024 * 1024 {
@ -64,6 +64,11 @@ fn syscall_debug_print(args: SyscallArgs) -> Result<usize, Box<dyn Error>> {
trace!("{}", string);
Ok(0)
}
fn syscall_get_pid(_args: SyscallArgs) -> Result<usize, Box<dyn Error>> {
let tasks = CURRENT_TASKS.lock();
let pid = tasks[get_current_lapic_id()].as_ref().unwrap().process.lock().pid;
Ok(pid)
}
#[no_mangle]
extern "C" fn syscall_handler(syscall: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
if syscall as usize >= SYSCALL_TABLE.len() {

View file

@ -19,7 +19,7 @@ use super::{
hpet::sleep_internal,
lapic::{get_current_lapic_id, schedule_timer_interrupt},
locks::Spinlock,
process::{get_kernel_process, Process},
process::{get_kernel_process, Process, PROCESSES},
scheduler::yield_task,
sync::{lock_semaphore_internal, RawSemaphore},
};
@ -76,6 +76,15 @@ impl Drop for Stack {
}
}
}
impl Drop for Task {
fn drop(&mut self) {
let mut process = self.process.lock();
process.num_tasks -= 1;
if process.num_tasks == 0 {
PROCESSES.lock().lock().remove(&process.pid).unwrap();
}
}
}
pub fn allocate_stack() -> Stack {
let kernel_proc = get_kernel_process();
@ -131,6 +140,9 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
schedule_timer_interrupt();
}
pub fn create_task(process: Arc<Spinlock<Process>>, func: fn()) -> Task {
{
process.lock().num_tasks += 1;
}
let stack = allocate_stack();
Task {
id: NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst),
@ -210,13 +222,13 @@ fn idle_main() {
}
}
pub fn setup_multitasking() -> ! {
let task;
{
let rflags = get_rflags();
RFLAGS.store(rflags, core::sync::atomic::Ordering::SeqCst);
for _ in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
create_idle_task();
}
let task;
{
let kernel_proc = get_kernel_process();
task = create_task(kernel_proc, main);
}