Add stack guard to all kernel stacks
All checks were successful
Build / build (push) Successful in 2m40s

This commit is contained in:
Mathieu Strypsteen 2024-12-27 15:43:46 +01:00
parent 809189281a
commit 91e15d0746
3 changed files with 33 additions and 14 deletions

View file

@ -55,11 +55,11 @@ pub fn setup_gdt() {
reserved4: 0,
iopb_offset: size_of::<TSS>() as u16,
}));
tss.rsp[0] = allocate_stack();
tss.ist[0] = allocate_stack();
tss.ist[1] = allocate_stack();
tss.ist[2] = allocate_stack();
tss.ist[3] = allocate_stack();
tss.rsp[0] = Box::leak(Box::new(allocate_stack())).address;
tss.ist[0] = Box::leak(Box::new(allocate_stack())).address;
tss.ist[1] = Box::leak(Box::new(allocate_stack())).address;
tss.ist[2] = Box::leak(Box::new(allocate_stack())).address;
tss.ist[3] = Box::leak(Box::new(allocate_stack())).address;
let gdt = Box::leak(Box::new(GDT {
entries: [
GDTEntry {

View file

@ -83,9 +83,9 @@ extern "C" fn early_main(temp_loader_struct: *const LoaderStruct) -> ! {
assert_eq!(loader_struct.magic, LOADER_STRUCT_MAGIC);
init_logger(loader_struct.vm != 0);
info!("Starting kernel...");
setup_paging(&loader_struct, loader_struct.phys_kernel_start, loader_struct.phys_heap_start);
setup_gdt();
setup_idt();
setup_paging(&loader_struct, loader_struct.phys_kernel_start, loader_struct.phys_heap_start);
setup_display(loader_struct.framebuffer);
RSDP_ADDRESS.store(loader_struct.rsdp_address, Ordering::SeqCst);
disable_pic();

View file

@ -3,7 +3,7 @@ use core::{
sync::atomic::{AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering},
};
use alloc::{boxed::Box, sync::Arc, vec, vec::Vec};
use alloc::{boxed::Box, sync::Arc};
use kernel_common::instructions::{cli, get_rflags, hlt, pause, sti};
use crate::{
@ -36,11 +36,14 @@ pub enum TaskState {
#[repr(align(16))]
struct FXSaveRegion([u8; 512]);
pub struct Stack {
pub address: u64,
}
pub struct Task {
pub id: usize,
pub process: Arc<Spinlock<Process>>,
state: ISRState,
kernel_stack: Vec<u8>,
kernel_stack: Stack,
initial_func: fn(),
pub task_state: TaskState,
pub sleep_until_us: usize,
@ -65,9 +68,25 @@ pub static STARTING_AP_ID: AtomicI64 = AtomicI64::new(-1);
pub static ALL_APS_STARTED: AtomicBool = AtomicBool::new(false);
pub static CPUDATA: Spinlock<[CPUData; 256]> = Spinlock::new([const { CPUData { kernel_stack: 0, user_stack: 0 } }; 256]);
pub fn allocate_stack() -> u64 {
let stack: Vec<u8> = vec![0; STACK_SIZE];
stack.leak().as_mut_ptr() as u64 + STACK_SIZE as u64
impl Drop for Stack {
fn drop(&mut self) {
let kernel_proc = get_kernel_process();
unsafe {
kernel_proc.lock().address_space.free_range(self.address - STACK_SIZE as u64, STACK_SIZE as u64);
}
}
}
pub fn allocate_stack() -> Stack {
let kernel_proc = get_kernel_process();
let mut kernel_proc = kernel_proc.lock();
let stack_start = unsafe { kernel_proc.address_space.allocate_kernel_range(STACK_SIZE as u64, true, false) };
unsafe {
kernel_proc.address_space.update_flags_range(stack_start, 0x1000, false, false, false);
}
Stack {
address: stack_start + STACK_SIZE as u64,
}
}
pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
let lapic_id = get_current_lapic_id();
@ -102,7 +121,7 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
process.address_space.switch();
}
}
let kernel_stack = new_task.kernel_stack.as_ptr() as u64 + STACK_SIZE as u64;
let kernel_stack = new_task.kernel_stack.address;
cpu_data.kernel_stack = kernel_stack;
cpu_data.user_stack = new_task.user_stack;
unsafe {
@ -112,7 +131,7 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
schedule_timer_interrupt();
}
pub fn create_task(process: Arc<Spinlock<Process>>, func: fn()) -> Task {
let stack = vec![0; STACK_SIZE];
let stack = allocate_stack();
Task {
id: NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst),
process,
@ -138,7 +157,7 @@ pub fn create_task(process: Arc<Spinlock<Process>>, func: fn()) -> Task {
rip: task_entry as usize as u64,
cs: 8,
rflags: RFLAGS.load(Ordering::SeqCst),
rsp: stack.as_ptr() as u64 + STACK_SIZE as u64,
rsp: stack.address,
ss: 16,
},
kernel_stack: stack,