Start all cores
All checks were successful
Build / build (push) Successful in 1m21s

This commit is contained in:
Mathieu Strypsteen 2024-12-12 16:24:49 +01:00
parent 392572ea80
commit fe1c702a8f
14 changed files with 218 additions and 55 deletions

30
Cargo.lock generated
View file

@ -35,6 +35,12 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b2d54853319fd101b8dd81de382bcbf3e03410a64d8928bbee85a3e7dcde483"
[[package]]
name = "allocator-api2"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
[[package]]
name = "autocfg"
version = "1.4.0"
@ -173,6 +179,12 @@ dependencies = [
"byteorder",
]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "float-cmp"
version = "0.9.0"
@ -182,6 +194,12 @@ dependencies = [
"num-traits",
]
[[package]]
name = "foldhash"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2"
[[package]]
name = "funty"
version = "2.0.0"
@ -194,6 +212,17 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "hashbrown"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
dependencies = [
"allocator-api2",
"equivalent",
"foldhash",
]
[[package]]
name = "itertools"
version = "0.13.0"
@ -213,6 +242,7 @@ dependencies = [
"bitvec",
"buddy_system_allocator",
"embedded-graphics",
"hashbrown",
"kernel-common",
"log",
"spin",

View file

@ -11,6 +11,7 @@ bitfield = "0.17.0"
bitvec = {version = "1.0.1", default-features = false, features = ["alloc", "atomic"]}
buddy_system_allocator = "0.11.0"
embedded-graphics = "0.8.1"
hashbrown = "0.15.2"
kernel-common = {path = "../lib/kernel-common"}
log = "0.4.22"
spin = "0.9.8"

View file

@ -11,6 +11,7 @@ _start:
call rdrand
mov %rax, __stack_chk_guard
call early_main
.section .bss
.align 8
.global __stack_chk_guard

View file

@ -2,3 +2,4 @@ pub mod gdt;
pub mod idt;
pub mod isr;
pub mod paging;
pub mod smp;

View file

@ -25,7 +25,7 @@ extern "C" {
}
static PAGING_ACTIVE: AtomicBool = AtomicBool::new(false);
static CURRENT_PML4: Mutex<Option<&mut PageTable>> = Mutex::new(None);
pub static CURRENT_PML4: Mutex<Option<&mut PageTable>> = Mutex::new(None);
static HEAP_PHYS_START: AtomicU64 = AtomicU64::new(0);
static PHYSICAL_FRAMES: Mutex<Option<BitVec<u64>>> = Mutex::new(None);
static HEAP_PHYS_MAPPING: Mutex<Vec<u64>> = Mutex::new(Vec::new());
@ -42,11 +42,12 @@ fn _get_free_frame() -> u64 {
panic!("No free memory left");
}
fn invlpg(addr: u64) {
// TODO: Broadcast to all cores
unsafe {
asm!("invlpg [{}]", in(reg) addr);
}
}
fn virt_to_phys(virt: u64) -> u64 {
pub fn virt_to_phys(virt: u64) -> u64 {
if !PAGING_ACTIVE.load(Ordering::SeqCst) {
return virt - KERNEL_HEAP_START + HEAP_PHYS_START.load(Ordering::SeqCst);
}
@ -113,7 +114,7 @@ pub fn find_free_virt_range(mut start: u64, end: u64, size: u64) -> u64 {
}
fn map(pml4: &mut PageTable, virt: u64, phys: u64, user: bool, write: bool, exec: bool, cache_disable: bool) {
assert!(virt >= 0x1000, "First page shouldn't be mapped");
assert!(!write || !exec);
assert!(!write || !exec || virt == 0x1000);
{
let mut frames_vec = PHYSICAL_FRAMES.lock();
let frame = phys as usize / 0x1000;
@ -129,12 +130,16 @@ fn map(pml4: &mut PageTable, virt: u64, phys: u64, user: bool, write: bool, exec
let pdpt = get_table_entry(pml4, pml4_i);
let directory = get_table_entry(pdpt, pdpt_i);
let table = get_table_entry(directory, directory_i);
let should_invalidate = table.entries_phys[table_i].present() == 1;
table.entries_phys[table_i].set_address(phys / 0x1000);
table.entries_phys[table_i].set_user(user as u64);
table.entries_phys[table_i].set_write(write as u64);
table.entries_phys[table_i].set_execute_disable(!exec as u64);
table.entries_phys[table_i].set_cache_disable(cache_disable as u64);
table.entries_phys[table_i].set_present(1);
if should_invalidate {
invlpg(virt);
}
}
pub unsafe fn unmap(address: u64) {
let mut current_pml4 = CURRENT_PML4.lock();

23
kernel/src/cpu/smp.rs Normal file
View file

@ -0,0 +1,23 @@
use kernel_common::instructions::sti;
use crate::{
cpu::{gdt::setup_gdt, idt::setup_idt},
sys::{
lapic::{setup_lapic, setup_lapic_timer},
scheduler::yield_task,
},
};
#[no_mangle]
extern "C" fn ap_main() -> ! {
setup_gdt();
setup_idt();
setup_lapic(0);
// TODO: Also calibrate other cores
setup_lapic_timer(false);
unsafe {
sti();
}
yield_task();
panic!("Yielding to idle task failed");
}

View file

@ -1,5 +1,46 @@
.text
.section .rodata
.code16
.align 0x1000
.global ap_trampoline
ap_trampoline:
hlt
mov %cr4, %eax
bts $5, %eax // Physical Address Extension
mov %eax, %cr4
mov trampoline_pml4 - ap_trampoline + 0x1000, %eax
mov %eax, %cr3
mov $0xc0000080, %ecx
rdmsr
bts $8, %eax // Long Mode Enable
bts $11, %eax // No Execute Enable
wrmsr
mov %cr0, %eax
bts $0, %eax // Protected Mode Enable
bts $16, %eax // Write Protect
bts $31, %eax // Paging Enable
mov %eax, %cr0
lgdt gdt_descriptor - ap_trampoline + 0x1000
ljmp $8, $ap_trampoline_2 - ap_trampoline + 0x1000
.code64
ap_trampoline_2:
mov trampoline_stack - ap_trampoline + 0x1000, %rsp
mov $ap_main, %rax
call *%rax
.align 4
trampoline_pml4:
.int 0
.align 8
trampoline_stack:
.quad 0
gdt:
.quad 0
.quad 0x209a0000000000
.quad 0x920000000000
gdt_descriptor:
.short . - gdt - 1
.quad gdt - ap_trampoline + 0x1000

View file

@ -85,11 +85,11 @@ extern "C" fn early_main(temp_loader_struct: *const LoaderStruct) -> ! {
}
parse_madt(&early_acpi_tables);
setup_hpet(&early_acpi_tables);
setup_lapic_timer();
setup_lapic_timer(true);
setup_multitasking();
}
fn main() {
info!("Starting main kernel task");
info!("Starting main kernel task...");
let mut status = unsafe { AcpiInitializeSubsystem() };
assert_eq!(status, AE_OK);
status = unsafe { AcpiInitializeTables(null_mut(), 0, 0) };

View file

@ -25,8 +25,9 @@ static SCI_CONTEXT: AtomicPtr<c_void> = AtomicPtr::new(null_mut());
use super::{
hpet::get_current_time,
lapic::get_current_lapic_id,
sync::{create_semaphore, lock_semaphore, unlock_semaphore, Semaphore, Spinlock},
task::{CURRENT_TASK, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
task::{CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
};
pub const AE_OK: ACPI_STATUS = 0;
@ -95,7 +96,7 @@ extern "C" fn AcpiOsGetThreadId() -> UINT64 {
let task_id;
CURRENT_TASK_LOCK.lock();
{
task_id = CURRENT_TASK.lock().as_ref().unwrap().id;
task_id = CURRENT_TASKS.lock().get(&get_current_lapic_id()).unwrap().id;
}
CURRENT_TASK_LOCK.unlock();
task_id as UINT64

View file

@ -13,9 +13,10 @@ use crate::cpu::paging::map_physical;
use super::{
early_acpi::EarlyACPIHandler,
ioapic::register_irq_handler,
lapic::get_current_lapic_id,
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
sync::Spinlock,
task::{Task, TaskState, CURRENT_TASK, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
};
const REGISTER_CAPABILITIES: usize = 0;
@ -98,8 +99,8 @@ pub fn sleep(us: usize) {
if MULTITASKING_ENABLED.load(Ordering::SeqCst) {
CURRENT_TASK_LOCK.lock();
{
let mut _current_task = CURRENT_TASK.lock();
let current_task = _current_task.as_mut().unwrap();
let mut _current_task = CURRENT_TASKS.lock();
let current_task = _current_task.get_mut(&get_current_lapic_id()).unwrap();
current_task.sleep_until_us = get_current_time() + us;
current_task.task_state = TaskState::Sleeping;
}

View file

@ -3,12 +3,20 @@ use core::{
sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering},
};
use crate::cpu::paging::{map_physical, map_range, unmap};
use alloc::{vec, vec::Vec};
use kernel_common::{instructions::pause, paging::PageTable};
use super::hpet::sleep;
use crate::cpu::paging::{map_physical, map_range, unmap, virt_to_phys, CURRENT_PML4};
use super::{
hpet::sleep,
task::{ALL_APS_STARTED, STACK_SIZE, STARTING_AP_ID},
};
extern "C" {
fn ap_trampoline();
static trampoline_pml4: u8;
static trampoline_stack: u8;
}
struct LAPIC {
@ -37,7 +45,7 @@ static ADDRESS: AtomicPtr<u32> = AtomicPtr::new(null_mut());
pub static BSP_LAPIC_ID: AtomicUsize = AtomicUsize::new(0);
static LAPICS: [LAPIC; 256] = [EMPTY_LAPIC; 256];
static TICKS_PER_MS: AtomicUsize = AtomicUsize::new(0);
static NEXT_LAPIC_ID: AtomicUsize = AtomicUsize::new(0);
pub static NEXT_LAPIC_ID: AtomicUsize = AtomicUsize::new(0);
pub fn send_eoi() {
let address = ADDRESS.load(Ordering::SeqCst);
@ -45,11 +53,18 @@ pub fn send_eoi() {
address.add(REGISTER_EOI).write_volatile(0);
}
}
pub fn get_current_lapic_id() -> usize {
let address = ADDRESS.load(Ordering::SeqCst);
unsafe { address.add(REGISTER_ID).read_volatile() as usize >> 24 }
}
pub fn setup_lapic(phys: u64) {
let address = unsafe { map_physical(phys, 0x400, false) as *mut u32 };
ADDRESS.store(address, Ordering::SeqCst);
if phys != 0 {
let address = unsafe { map_physical(phys, 0x400, false) as *mut u32 };
ADDRESS.store(address, Ordering::SeqCst);
BSP_LAPIC_ID.store(get_current_lapic_id(), Ordering::SeqCst);
}
let address = ADDRESS.load(Ordering::SeqCst);
unsafe {
BSP_LAPIC_ID.store(address.add(REGISTER_ID).read_volatile() as usize, Ordering::SeqCst);
address.add(REGISTER_SPURIOUS_INT).write_volatile(0x1ff);
}
send_eoi();
@ -62,9 +77,6 @@ fn calibrate_timer() {
sleep(10000);
let ticks_in_10ms = 0xffffffff - unsafe { address.add(REGISTER_TIMER_CURRENT_COUNT).read_volatile() };
TICKS_PER_MS.store(ticks_in_10ms as usize / 10, Ordering::SeqCst);
unsafe {
address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(0);
}
}
fn send_ipi(lapic_id: usize, data: u32) {
let address = ADDRESS.load(Ordering::SeqCst);
@ -79,13 +91,16 @@ pub fn schedule_timer_interrupt() {
address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(10 * TICKS_PER_MS.load(Ordering::SeqCst) as u32);
}
}
pub fn setup_lapic_timer() {
pub fn setup_lapic_timer(calibrate: bool) {
let address = ADDRESS.load(Ordering::SeqCst);
unsafe {
address.add(REGISTER_TIMER_DIVIDE).write_volatile(3);
}
calibrate_timer();
if calibrate {
calibrate_timer();
}
unsafe {
address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(0);
address.add(REGISTER_TIMER_LVT).write_volatile(254);
}
}
@ -95,22 +110,42 @@ pub fn add_lapic(lapic_id: usize) {
LAPICS[next_id].present.store(true, Ordering::SeqCst);
}
pub fn start_aps() {
let stack: Vec<u8> = vec![0; STACK_SIZE];
let pml4_phys_addr;
{
let pml4 = CURRENT_PML4.lock();
let pml4 = pml4.as_ref().unwrap();
let pml4_phys_addr_u64 = virt_to_phys(*pml4 as *const PageTable as u64);
pml4_phys_addr = u32::try_from(pml4_phys_addr_u64).unwrap();
}
unsafe {
map_range(0x1000, 0x1000, 0x1000, false, true, false, false);
let dest_ptr = 0x1000 as *mut u8;
let src_ptr = ap_trampoline as *const u8;
copy(src_ptr, dest_ptr, 0x1000);
let pml4_offset = (&raw const trampoline_pml4).offset_from(src_ptr);
let pml4_addr = (0x1000 + pml4_offset) as *mut u32;
*pml4_addr = pml4_phys_addr;
let stack_offset = (&raw const trampoline_stack).offset_from(src_ptr);
let stack_addr = (0x1000 + stack_offset) as *mut u64;
*stack_addr = stack.as_ptr() as u64 + STACK_SIZE as u64;
map_range(0x1000, 0x1000, 0x1000, false, true, true, false);
}
for i in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
let lapic_id = LAPICS[i].lapic_id.load(Ordering::SeqCst);
if lapic_id == BSP_LAPIC_ID.load(Ordering::SeqCst) {
continue;
}
STARTING_AP_ID.store(lapic_id as i64, Ordering::SeqCst);
send_ipi(lapic_id, IPI_INIT);
sleep(10000);
send_ipi(lapic_id, IPI_STARTUP | 1);
while STARTING_AP_ID.load(Ordering::SeqCst) != -1 {
pause();
}
}
unsafe {
unmap(0x1000);
}
ALL_APS_STARTED.store(true, Ordering::SeqCst);
}

View file

@ -1,18 +1,18 @@
use core::{arch::asm, sync::atomic::Ordering};
use alloc::collections::vec_deque::VecDeque;
use alloc::{collections::vec_deque::VecDeque, vec::Vec};
use spin::Mutex;
use crate::cpu::isr::ISRState;
use super::{
lapic::schedule_timer_interrupt,
lapic::{get_current_lapic_id, schedule_timer_interrupt},
sync::{Spinlock, IN_ISR_HANDLER, LOCKS_HELD},
task::{switch_task, Task, TaskState, CURRENT_TASK, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
task::{switch_task, Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK, MULTITASKING_ENABLED},
};
static SCHEDULER_LIST: Mutex<VecDeque<Task>> = Mutex::new(VecDeque::new());
pub static IDLE_TASK: Mutex<Option<Task>> = Mutex::new(None);
pub static IDLE_TASKS: Mutex<Vec<Task>> = Mutex::new(Vec::new());
pub static SCHEDULER_LOCK: Spinlock = Spinlock::new();
pub fn scheduler(state: &mut ISRState) {
@ -37,14 +37,20 @@ pub fn scheduler(state: &mut ISRState) {
}
let mut switch_idle = false;
{
let _current_task = CURRENT_TASK.lock();
let current_task = _current_task.as_ref().unwrap();
if current_task.task_state != TaskState::Ready && current_task.task_state != TaskState::Idle {
let _current_task = CURRENT_TASKS.lock();
let current_task = _current_task.get(&get_current_lapic_id());
if current_task.is_none() {
switch_idle = true;
}
if !switch_idle {
let current_task = current_task.unwrap();
if current_task.task_state != TaskState::Ready && current_task.task_state != TaskState::Idle {
switch_idle = true;
}
}
}
if switch_idle {
switch_task(state, IDLE_TASK.lock().take().unwrap());
switch_task(state, IDLE_TASKS.lock().pop().unwrap());
}
CURRENT_TASK_LOCK.unlock();
}

View file

@ -7,8 +7,9 @@ use spin::Mutex;
use crate::sys::task::MULTITASKING_ENABLED;
use super::{
lapic::get_current_lapic_id,
scheduler::{schedule_task, yield_task, SCHEDULER_LOCK},
task::{Task, TaskState, CURRENT_TASK, CURRENT_TASK_LOCK},
task::{Task, TaskState, CURRENT_TASKS, CURRENT_TASK_LOCK},
};
pub static IN_ISR_HANDLER: AtomicBool = AtomicBool::new(false);
@ -72,8 +73,8 @@ pub fn lock_semaphore(semaphore: Arc<Semaphore>, count: usize) {
}
CURRENT_TASK_LOCK.lock();
{
let mut current_task = CURRENT_TASK.lock();
let current_task = current_task.as_mut().unwrap();
let mut current_task = CURRENT_TASKS.lock();
let current_task = current_task.get_mut(&get_current_lapic_id()).unwrap();
current_task.task_state = TaskState::SemaphoreBlocked;
current_task.block_on_semaphore = Some(semaphore.clone());
current_task.semaphore_requested_count = count;

View file

@ -1,21 +1,25 @@
use core::{
arch::asm,
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
sync::atomic::{AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering},
};
use alloc::{sync::Arc, vec, vec::Vec};
use kernel_common::instructions::hlt;
use spin::Mutex;
use hashbrown::HashMap;
use kernel_common::instructions::{hlt, pause};
use spin::{Lazy, Mutex};
use crate::{
cpu::isr::ISRState,
main,
sys::scheduler::{schedule_task, IDLE_TASK, SCHEDULER_LOCK},
sys::{
lapic::NEXT_LAPIC_ID,
scheduler::{schedule_task, IDLE_TASKS, SCHEDULER_LOCK},
},
};
use super::{
hpet::sleep_internal,
lapic::schedule_timer_interrupt,
lapic::{get_current_lapic_id, schedule_timer_interrupt},
scheduler::yield_task,
sync::{lock_semaphore_internal, Semaphore, Spinlock},
};
@ -40,13 +44,14 @@ pub struct Task {
pub semaphore_requested_count: usize,
}
const STACK_SIZE: usize = 64 * 1024;
pub const STACK_SIZE: usize = 64 * 1024;
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(2);
pub static CURRENT_TASK: Mutex<Option<Task>> = Mutex::new(None);
pub static CURRENT_TASKS: Lazy<Mutex<HashMap<usize, Task>>> = Lazy::new(|| Mutex::new(HashMap::new()));
pub static CURRENT_TASK_LOCK: Spinlock = Spinlock::new();
static RFLAGS: AtomicU64 = AtomicU64::new(0);
pub static MULTITASKING_ENABLED: AtomicBool = AtomicBool::new(false);
pub static STARTING_AP_ID: AtomicI64 = AtomicI64::new(-1);
pub static ALL_APS_STARTED: AtomicBool = AtomicBool::new(false);
pub fn allocate_stack() -> u64 {
let stack: Vec<u8> = vec![0; STACK_SIZE];
@ -54,8 +59,8 @@ pub fn allocate_stack() -> u64 {
}
pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
assert!(CURRENT_TASK_LOCK.is_locked());
let mut _current_task = CURRENT_TASK.lock();
if let Some(mut current_task) = _current_task.take() {
let mut _current_task = CURRENT_TASKS.lock();
if let Some(mut current_task) = _current_task.remove(&get_current_lapic_id()) {
current_task.state = *current_state;
match current_task.task_state {
TaskState::Ready => {
@ -63,14 +68,15 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
schedule_task(current_task);
SCHEDULER_LOCK.unlock();
}
TaskState::Idle => *IDLE_TASK.lock() = Some(current_task),
TaskState::Idle => IDLE_TASKS.lock().push(current_task),
TaskState::Terminated => {}
TaskState::Sleeping => sleep_internal(current_task),
TaskState::SemaphoreBlocked => lock_semaphore_internal(current_task),
}
}
*current_state = new_task.state;
*_current_task = Some(new_task);
let result = _current_task.insert(get_current_lapic_id(), new_task);
assert!(result.is_none());
schedule_timer_interrupt();
}
pub fn create_task(func: fn()) -> Task {
@ -110,24 +116,37 @@ pub fn create_task(func: fn()) -> Task {
};
task
}
fn create_idle_task() {
let mut idle_task = create_task(idle_main);
idle_task.task_state = TaskState::Idle;
{
IDLE_TASKS.lock().push(idle_task);
}
}
extern "C" fn task_entry() -> ! {
CURRENT_TASK_LOCK.lock();
let func;
{
let task = CURRENT_TASK.lock();
func = task.as_ref().unwrap().initial_func;
let task = CURRENT_TASKS.lock();
func = task.get(&get_current_lapic_id()).unwrap().initial_func;
}
CURRENT_TASK_LOCK.unlock();
func();
CURRENT_TASK_LOCK.lock();
{
CURRENT_TASK.lock().as_mut().unwrap().task_state = TaskState::Terminated;
CURRENT_TASKS.lock().get_mut(&get_current_lapic_id()).unwrap().task_state = TaskState::Terminated;
}
CURRENT_TASK_LOCK.unlock();
yield_task();
panic!("Failed to terminate task");
}
fn idle() {
fn idle_main() {
while !ALL_APS_STARTED.load(Ordering::SeqCst) {
if STARTING_AP_ID.load(Ordering::SeqCst) == get_current_lapic_id() as i64 {
let _ = STARTING_AP_ID.compare_exchange(get_current_lapic_id() as i64, -1, Ordering::SeqCst, Ordering::SeqCst);
}
pause();
}
loop {
hlt();
}
@ -138,10 +157,8 @@ pub fn setup_multitasking() -> ! {
asm!("pushf; pop {0:r}", out(reg) rflags);
}
RFLAGS.store(rflags, core::sync::atomic::Ordering::SeqCst);
let mut idle_task = create_task(idle);
idle_task.task_state = TaskState::Idle;
{
*IDLE_TASK.lock() = Some(idle_task);
for _ in 0..NEXT_LAPIC_ID.load(Ordering::SeqCst) {
create_idle_task();
}
let task = create_task(main);
schedule_task(task);