From 91e15d0746ea5739aa63f4babc7ec19c3d872d04 Mon Sep 17 00:00:00 2001 From: Mathieu Strypsteen Date: Fri, 27 Dec 2024 15:43:46 +0100 Subject: [PATCH] Add stack guard to all kernel stacks --- kernel/src/cpu/gdt.rs | 10 +++++----- kernel/src/main.rs | 2 +- kernel/src/sys/task.rs | 35 +++++++++++++++++++++++++++-------- 3 files changed, 33 insertions(+), 14 deletions(-) diff --git a/kernel/src/cpu/gdt.rs b/kernel/src/cpu/gdt.rs index 02fd9ed..db296a0 100644 --- a/kernel/src/cpu/gdt.rs +++ b/kernel/src/cpu/gdt.rs @@ -55,11 +55,11 @@ pub fn setup_gdt() { reserved4: 0, iopb_offset: size_of::() as u16, })); - tss.rsp[0] = allocate_stack(); - tss.ist[0] = allocate_stack(); - tss.ist[1] = allocate_stack(); - tss.ist[2] = allocate_stack(); - tss.ist[3] = allocate_stack(); + tss.rsp[0] = Box::leak(Box::new(allocate_stack())).address; + tss.ist[0] = Box::leak(Box::new(allocate_stack())).address; + tss.ist[1] = Box::leak(Box::new(allocate_stack())).address; + tss.ist[2] = Box::leak(Box::new(allocate_stack())).address; + tss.ist[3] = Box::leak(Box::new(allocate_stack())).address; let gdt = Box::leak(Box::new(GDT { entries: [ GDTEntry { diff --git a/kernel/src/main.rs b/kernel/src/main.rs index ad210e7..32332c0 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -83,9 +83,9 @@ extern "C" fn early_main(temp_loader_struct: *const LoaderStruct) -> ! { assert_eq!(loader_struct.magic, LOADER_STRUCT_MAGIC); init_logger(loader_struct.vm != 0); info!("Starting kernel..."); + setup_paging(&loader_struct, loader_struct.phys_kernel_start, loader_struct.phys_heap_start); setup_gdt(); setup_idt(); - setup_paging(&loader_struct, loader_struct.phys_kernel_start, loader_struct.phys_heap_start); setup_display(loader_struct.framebuffer); RSDP_ADDRESS.store(loader_struct.rsdp_address, Ordering::SeqCst); disable_pic(); diff --git a/kernel/src/sys/task.rs b/kernel/src/sys/task.rs index d1e9c4a..7e70b04 100644 --- a/kernel/src/sys/task.rs +++ b/kernel/src/sys/task.rs @@ -3,7 +3,7 @@ use core::{ sync::atomic::{AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering}, }; -use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; +use alloc::{boxed::Box, sync::Arc}; use kernel_common::instructions::{cli, get_rflags, hlt, pause, sti}; use crate::{ @@ -36,11 +36,14 @@ pub enum TaskState { #[repr(align(16))] struct FXSaveRegion([u8; 512]); +pub struct Stack { + pub address: u64, +} pub struct Task { pub id: usize, pub process: Arc>, state: ISRState, - kernel_stack: Vec, + kernel_stack: Stack, initial_func: fn(), pub task_state: TaskState, pub sleep_until_us: usize, @@ -65,9 +68,25 @@ pub static STARTING_AP_ID: AtomicI64 = AtomicI64::new(-1); pub static ALL_APS_STARTED: AtomicBool = AtomicBool::new(false); pub static CPUDATA: Spinlock<[CPUData; 256]> = Spinlock::new([const { CPUData { kernel_stack: 0, user_stack: 0 } }; 256]); -pub fn allocate_stack() -> u64 { - let stack: Vec = vec![0; STACK_SIZE]; - stack.leak().as_mut_ptr() as u64 + STACK_SIZE as u64 +impl Drop for Stack { + fn drop(&mut self) { + let kernel_proc = get_kernel_process(); + unsafe { + kernel_proc.lock().address_space.free_range(self.address - STACK_SIZE as u64, STACK_SIZE as u64); + } + } +} + +pub fn allocate_stack() -> Stack { + let kernel_proc = get_kernel_process(); + let mut kernel_proc = kernel_proc.lock(); + let stack_start = unsafe { kernel_proc.address_space.allocate_kernel_range(STACK_SIZE as u64, true, false) }; + unsafe { + kernel_proc.address_space.update_flags_range(stack_start, 0x1000, false, false, false); + } + Stack { + address: stack_start + STACK_SIZE as u64, + } } pub fn switch_task(current_state: &mut ISRState, new_task: Task) { let lapic_id = get_current_lapic_id(); @@ -102,7 +121,7 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) { process.address_space.switch(); } } - let kernel_stack = new_task.kernel_stack.as_ptr() as u64 + STACK_SIZE as u64; + let kernel_stack = new_task.kernel_stack.address; cpu_data.kernel_stack = kernel_stack; cpu_data.user_stack = new_task.user_stack; unsafe { @@ -112,7 +131,7 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) { schedule_timer_interrupt(); } pub fn create_task(process: Arc>, func: fn()) -> Task { - let stack = vec![0; STACK_SIZE]; + let stack = allocate_stack(); Task { id: NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst), process, @@ -138,7 +157,7 @@ pub fn create_task(process: Arc>, func: fn()) -> Task { rip: task_entry as usize as u64, cs: 8, rflags: RFLAGS.load(Ordering::SeqCst), - rsp: stack.as_ptr() as u64 + STACK_SIZE as u64, + rsp: stack.address, ss: 16, }, kernel_stack: stack,