Switch to Ordering::SeqCst
All checks were successful
Build / build (push) Successful in 3m33s

This commit is contained in:
Mathieu Strypsteen 2024-11-10 22:44:21 +01:00
parent 38167102dc
commit 38993dc42d
11 changed files with 70 additions and 70 deletions

View file

@ -85,10 +85,10 @@ extern "C" fn isr_handler(state: &mut ISRState) {
if 48 <= state.isr && state.isr <= 254 { if 48 <= state.isr && state.isr <= 254 {
send_eoi(); // APIC interrupt send_eoi(); // APIC interrupt
} }
IN_ISR_HANDLER.store(true, Ordering::Relaxed); IN_ISR_HANDLER.store(true, Ordering::SeqCst);
if state.isr == 254 { if state.isr == 254 {
scheduler(state); scheduler(state);
IN_ISR_HANDLER.store(false, Ordering::Relaxed); IN_ISR_HANDLER.store(false, Ordering::SeqCst);
return; return;
} }
let handler; let handler;
@ -100,5 +100,5 @@ extern "C" fn isr_handler(state: &mut ISRState) {
} else { } else {
warn!("Unhandled interrupt: {}", state.isr); warn!("Unhandled interrupt: {}", state.isr);
} }
IN_ISR_HANDLER.store(false, Ordering::Relaxed); IN_ISR_HANDLER.store(false, Ordering::SeqCst);
} }

View file

@ -47,8 +47,8 @@ fn invlpg(addr: u64) {
} }
} }
fn virt_to_phys(virt: u64) -> u64 { fn virt_to_phys(virt: u64) -> u64 {
if !PAGING_ACTIVE.load(Ordering::Relaxed) { if !PAGING_ACTIVE.load(Ordering::SeqCst) {
return virt - KERNEL_HEAP_START + HEAP_PHYS_START.load(Ordering::Relaxed); return virt - KERNEL_HEAP_START + HEAP_PHYS_START.load(Ordering::SeqCst);
} }
assert!(virt >= KERNEL_HEAP_START); assert!(virt >= KERNEL_HEAP_START);
assert!(virt < KERNEL_HEAP_START + KERNEL_HEAP_INITIAL_SIZE as u64); assert!(virt < KERNEL_HEAP_START + KERNEL_HEAP_INITIAL_SIZE as u64);
@ -175,7 +175,7 @@ pub unsafe fn unmap_physical(address: u64, size: u64) {
} }
} }
pub fn setup_paging(loader_struct: &LoaderStruct, phys_start: u64, heap_start: u64) { pub fn setup_paging(loader_struct: &LoaderStruct, phys_start: u64, heap_start: u64) {
HEAP_PHYS_START.store(heap_start, Ordering::Relaxed); HEAP_PHYS_START.store(heap_start, Ordering::SeqCst);
let mut memory_size = 0; let mut memory_size = 0;
for i in loader_struct.available_memory { for i in loader_struct.available_memory {
if i.initial_page + i.page_count > memory_size { if i.initial_page + i.page_count > memory_size {
@ -245,5 +245,5 @@ pub fn setup_paging(loader_struct: &LoaderStruct, phys_start: u64, heap_start: u
} }
let mut current_pml4 = CURRENT_PML4.lock(); let mut current_pml4 = CURRENT_PML4.lock();
*current_pml4 = Some(pml4); *current_pml4 = Some(pml4);
PAGING_ACTIVE.store(true, Ordering::Relaxed); PAGING_ACTIVE.store(true, Ordering::SeqCst);
} }

View file

@ -58,7 +58,7 @@ extern "C" fn early_main(temp_loader_struct: *const LoaderStruct) -> ! {
setup_idt(); setup_idt();
setup_paging(&loader_struct, loader_struct.phys_kernel_start, loader_struct.phys_heap_start); setup_paging(&loader_struct, loader_struct.phys_kernel_start, loader_struct.phys_heap_start);
disable_pic(); disable_pic();
RSDP_ADDRESS.store(loader_struct.rsdp_address, Ordering::Relaxed); RSDP_ADDRESS.store(loader_struct.rsdp_address, Ordering::SeqCst);
let early_acpi_tables; let early_acpi_tables;
unsafe { unsafe {
early_acpi_tables = AcpiTables::from_rsdp(EarlyACPIHandler {}, loader_struct.rsdp_address as usize).unwrap(); early_acpi_tables = AcpiTables::from_rsdp(EarlyACPIHandler {}, loader_struct.rsdp_address as usize).unwrap();

View file

@ -84,11 +84,11 @@ extern "C" fn AcpiOsFree(memory: *mut c_void) {
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsGetRootPointer() -> ACPI_PHYSICAL_ADDRESS { extern "C" fn AcpiOsGetRootPointer() -> ACPI_PHYSICAL_ADDRESS {
RSDP_ADDRESS.load(Ordering::Relaxed) RSDP_ADDRESS.load(Ordering::SeqCst)
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsGetThreadId() -> UINT64 { extern "C" fn AcpiOsGetThreadId() -> UINT64 {
if !MULTITASKING_ENABLED.load(Ordering::Relaxed) { if !MULTITASKING_ENABLED.load(Ordering::SeqCst) {
return 1; return 1;
} }
let task_id; let task_id;
@ -110,13 +110,13 @@ extern "C" fn AcpiOsInitialize() -> ACPI_STATUS {
fn sci_handler() { fn sci_handler() {
let handler = SCI_HANDLER.lock().unwrap(); let handler = SCI_HANDLER.lock().unwrap();
unsafe { unsafe {
handler(SCI_CONTEXT.load(Ordering::Relaxed)); handler(SCI_CONTEXT.load(Ordering::SeqCst));
} }
} }
#[no_mangle] #[no_mangle]
extern "C" fn AcpiOsInstallInterruptHandler(gsi: UINT32, handler: ACPI_OSD_HANDLER, context: *mut c_void) -> ACPI_STATUS { extern "C" fn AcpiOsInstallInterruptHandler(gsi: UINT32, handler: ACPI_OSD_HANDLER, context: *mut c_void) -> ACPI_STATUS {
*SCI_HANDLER.lock() = handler; *SCI_HANDLER.lock() = handler;
SCI_CONTEXT.store(context, Ordering::Relaxed); SCI_CONTEXT.store(context, Ordering::SeqCst);
set_irq_override(gsi as usize, 253, 1, 1); set_irq_override(gsi as usize, 253, 1, 1);
register_irq_handler(253, sci_handler); register_irq_handler(253, sci_handler);
AE_OK AE_OK

View file

@ -34,16 +34,16 @@ static SLEEPING_LIST: Mutex<Vec<Task>> = Mutex::new(Vec::new());
static SLEEP_LOCK: Spinlock = Spinlock::new(); static SLEEP_LOCK: Spinlock = Spinlock::new();
fn ticks_to_us(ticks: usize) -> usize { fn ticks_to_us(ticks: usize) -> usize {
let period = PERIOD.load(Ordering::Relaxed); let period = PERIOD.load(Ordering::SeqCst);
ticks / (usize::pow(10, 9) / period) ticks / (usize::pow(10, 9) / period)
} }
fn us_to_ticks(us: usize) -> usize { fn us_to_ticks(us: usize) -> usize {
let period = PERIOD.load(Ordering::Relaxed); let period = PERIOD.load(Ordering::SeqCst);
usize::pow(10, 9) / period * us usize::pow(10, 9) / period * us
} }
fn schedule_hpet_interrupt(sleep_until_us: usize) { fn schedule_hpet_interrupt(sleep_until_us: usize) {
let current_time = get_current_time(); let current_time = get_current_time();
let address = ADDRESS.load(Ordering::Relaxed); let address = ADDRESS.load(Ordering::SeqCst);
let mut sleep_for_us = sleep_until_us - current_time; let mut sleep_for_us = sleep_until_us - current_time;
if sleep_for_us < 10 { if sleep_for_us < 10 {
sleep_for_us = 10; sleep_for_us = 10;
@ -57,15 +57,15 @@ fn schedule_hpet_interrupt(sleep_until_us: usize) {
} }
} }
fn handler() { fn handler() {
let address = ADDRESS.load(Ordering::Relaxed); let address = ADDRESS.load(Ordering::SeqCst);
unsafe { unsafe {
let current_config = address.add(REGISTER_TIMER0_CONFIG).read_volatile(); let current_config = address.add(REGISTER_TIMER0_CONFIG).read_volatile();
address.add(REGISTER_TIMER0_CONFIG).write_volatile(current_config & !TIMER_CONFIG_ENABLE); address.add(REGISTER_TIMER0_CONFIG).write_volatile(current_config & !TIMER_CONFIG_ENABLE);
} }
if EARLY_SLEEP.load(Ordering::Relaxed) { if EARLY_SLEEP.load(Ordering::SeqCst) {
EARLY_SLEEP.store(false, Ordering::Relaxed); EARLY_SLEEP.store(false, Ordering::SeqCst);
} }
if MULTITASKING_ENABLED.load(Ordering::Relaxed) { if MULTITASKING_ENABLED.load(Ordering::SeqCst) {
SLEEP_LOCK.lock(); SLEEP_LOCK.lock();
{ {
let mut sleeping_list = SLEEPING_LIST.lock(); let mut sleeping_list = SLEEPING_LIST.lock();
@ -90,12 +90,12 @@ fn handler() {
} }
} }
fn get_current_time() -> usize { fn get_current_time() -> usize {
let address = ADDRESS.load(Ordering::Relaxed); let address = ADDRESS.load(Ordering::SeqCst);
let current_counter = unsafe { address.add(REGISTER_COUNTER).read_volatile() }; let current_counter = unsafe { address.add(REGISTER_COUNTER).read_volatile() };
ticks_to_us(current_counter as usize) ticks_to_us(current_counter as usize)
} }
pub fn sleep(us: usize) { pub fn sleep(us: usize) {
if MULTITASKING_ENABLED.load(Ordering::Relaxed) { if MULTITASKING_ENABLED.load(Ordering::SeqCst) {
CURRENT_TASK_LOCK.lock(); CURRENT_TASK_LOCK.lock();
{ {
let mut _current_task = CURRENT_TASK.lock(); let mut _current_task = CURRENT_TASK.lock();
@ -106,15 +106,15 @@ pub fn sleep(us: usize) {
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.unlock();
yield_task(); yield_task();
} else { } else {
EARLY_SLEEP.store(true, Ordering::Relaxed); EARLY_SLEEP.store(true, Ordering::SeqCst);
let address = ADDRESS.load(Ordering::Relaxed); let address = ADDRESS.load(Ordering::SeqCst);
unsafe { unsafe {
let current_counter = address.add(REGISTER_COUNTER).read_volatile(); let current_counter = address.add(REGISTER_COUNTER).read_volatile();
address.add(REGISTER_TIMER0_COMPARATOR).write_volatile(current_counter + us_to_ticks(us) as u64); address.add(REGISTER_TIMER0_COMPARATOR).write_volatile(current_counter + us_to_ticks(us) as u64);
let current_config = address.add(REGISTER_TIMER0_CONFIG).read_volatile(); let current_config = address.add(REGISTER_TIMER0_CONFIG).read_volatile();
address.add(REGISTER_TIMER0_CONFIG).write_volatile(current_config | TIMER_CONFIG_ENABLE); address.add(REGISTER_TIMER0_CONFIG).write_volatile(current_config | TIMER_CONFIG_ENABLE);
} }
while EARLY_SLEEP.load(Ordering::Relaxed) { while EARLY_SLEEP.load(Ordering::SeqCst) {
pause(); pause();
} }
} }
@ -132,9 +132,9 @@ pub fn sleep_internal(task: Task) {
pub fn setup_hpet(tables: &AcpiTables<EarlyACPIHandler>) { pub fn setup_hpet(tables: &AcpiTables<EarlyACPIHandler>) {
let hpet_info = HpetInfo::new(tables).unwrap(); let hpet_info = HpetInfo::new(tables).unwrap();
let address = unsafe { map_physical(hpet_info.base_address as u64, 0x200) } as *mut u64; let address = unsafe { map_physical(hpet_info.base_address as u64, 0x200) } as *mut u64;
ADDRESS.store(address, Ordering::Relaxed); ADDRESS.store(address, Ordering::SeqCst);
let period = unsafe { address.add(REGISTER_CAPABILITIES).read_volatile() >> 32 } as usize; let period = unsafe { address.add(REGISTER_CAPABILITIES).read_volatile() >> 32 } as usize;
PERIOD.store(period, Ordering::Relaxed); PERIOD.store(period, Ordering::SeqCst);
unsafe { unsafe {
let current_config = address.add(REGISTER_CONFIG).read_volatile(); let current_config = address.add(REGISTER_CONFIG).read_volatile();
address.add(REGISTER_CONFIG).write_volatile(current_config | CONFIG_ENABLE | CONFIG_LEGACY_REPLACEMENT); address.add(REGISTER_CONFIG).write_volatile(current_config | CONFIG_ENABLE | CONFIG_LEGACY_REPLACEMENT);

View file

@ -39,19 +39,19 @@ static IOAPICS: [IOAPIC; 32] = [EMPTY_IOAPIC; 32];
fn read_register(apic_i: usize, reg_i: u8) -> u32 { fn read_register(apic_i: usize, reg_i: u8) -> u32 {
unsafe { unsafe {
IOAPICS[apic_i].address.load(Ordering::Relaxed).write_volatile(reg_i as u32); IOAPICS[apic_i].address.load(Ordering::SeqCst).write_volatile(reg_i as u32);
return IOAPICS[apic_i].address.load(Ordering::Relaxed).add(4).read_volatile(); return IOAPICS[apic_i].address.load(Ordering::SeqCst).add(4).read_volatile();
} }
} }
fn write_register(apic_i: usize, reg_i: u8, val: u32) { fn write_register(apic_i: usize, reg_i: u8, val: u32) {
unsafe { unsafe {
IOAPICS[apic_i].address.load(Ordering::Relaxed).write_volatile(reg_i as u32); IOAPICS[apic_i].address.load(Ordering::SeqCst).write_volatile(reg_i as u32);
IOAPICS[apic_i].address.load(Ordering::Relaxed).add(4).write_volatile(val); IOAPICS[apic_i].address.load(Ordering::SeqCst).add(4).write_volatile(val);
} }
} }
fn get_apic_for_gsi(gsi: usize) -> usize { fn get_apic_for_gsi(gsi: usize) -> usize {
for i in 0..32 { for i in 0..32 {
if gsi >= IOAPICS[i].start_gsi.load(Ordering::Relaxed) as usize && gsi < IOAPICS[i].end_gsi.load(Ordering::Relaxed) as usize { if gsi >= IOAPICS[i].start_gsi.load(Ordering::SeqCst) as usize && gsi < IOAPICS[i].end_gsi.load(Ordering::SeqCst) as usize {
return i; return i;
} }
} }
@ -59,14 +59,14 @@ fn get_apic_for_gsi(gsi: usize) -> usize {
} }
fn read_redirection(mut gsi: usize) -> RedirectionEntry { fn read_redirection(mut gsi: usize) -> RedirectionEntry {
let apic_i = get_apic_for_gsi(gsi); let apic_i = get_apic_for_gsi(gsi);
gsi -= IOAPICS[apic_i].start_gsi.load(Ordering::Relaxed) as usize; gsi -= IOAPICS[apic_i].start_gsi.load(Ordering::SeqCst) as usize;
let mut redirection_int = read_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2) as u64; let mut redirection_int = read_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2) as u64;
redirection_int |= (read_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2 + 1) as u64) << 32; redirection_int |= (read_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2 + 1) as u64) << 32;
return RedirectionEntry(redirection_int); return RedirectionEntry(redirection_int);
} }
fn write_redirection(mut gsi: usize, redirection: RedirectionEntry) { fn write_redirection(mut gsi: usize, redirection: RedirectionEntry) {
let apic_i = get_apic_for_gsi(gsi); let apic_i = get_apic_for_gsi(gsi);
gsi -= IOAPICS[apic_i].start_gsi.load(Ordering::Relaxed) as usize; gsi -= IOAPICS[apic_i].start_gsi.load(Ordering::SeqCst) as usize;
write_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2, redirection.0 as u32); write_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2, redirection.0 as u32);
write_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2 + 1, (redirection.0 >> 32) as u32); write_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2 + 1, (redirection.0 >> 32) as u32);
} }
@ -81,8 +81,8 @@ pub fn register_irq_handler(vector: usize, handler: fn()) {
assert!(ISR_HANDLERS.lock()[vector].is_none()); assert!(ISR_HANDLERS.lock()[vector].is_none());
ISR_HANDLERS.lock()[vector] = Some(handler); ISR_HANDLERS.lock()[vector] = Some(handler);
for i in 0..32 { for i in 0..32 {
let start = IOAPICS[i].start_gsi.load(Ordering::Relaxed); let start = IOAPICS[i].start_gsi.load(Ordering::SeqCst);
let end = IOAPICS[i].end_gsi.load(Ordering::Relaxed); let end = IOAPICS[i].end_gsi.load(Ordering::SeqCst);
for j in start..end { for j in start..end {
let mut redirection = read_redirection(j as usize); let mut redirection = read_redirection(j as usize);
if redirection.vector() == vector as u64 { if redirection.vector() == vector as u64 {
@ -95,16 +95,16 @@ pub fn register_irq_handler(vector: usize, handler: fn()) {
pub fn setup_ioapic(apic_i: u8, phys: u64, gsi_base: u32) { pub fn setup_ioapic(apic_i: u8, phys: u64, gsi_base: u32) {
let apic_i = apic_i as usize; let apic_i = apic_i as usize;
let address = unsafe { map_physical(phys, 0x14) as *mut u32 }; let address = unsafe { map_physical(phys, 0x14) as *mut u32 };
IOAPICS[apic_i].address.store(address, Ordering::Relaxed); IOAPICS[apic_i].address.store(address, Ordering::SeqCst);
IOAPICS[apic_i].start_gsi.store(gsi_base, Ordering::Relaxed); IOAPICS[apic_i].start_gsi.store(gsi_base, Ordering::SeqCst);
let max_ints = (read_register(apic_i, REGISTER_VERSION) >> 16) & 0xff; let max_ints = (read_register(apic_i, REGISTER_VERSION) >> 16) & 0xff;
IOAPICS[apic_i].end_gsi.store(gsi_base + max_ints, Ordering::Relaxed); IOAPICS[apic_i].end_gsi.store(gsi_base + max_ints, Ordering::SeqCst);
assert!(gsi_base + max_ints < 128); assert!(gsi_base + max_ints < 128);
for i in gsi_base..gsi_base + max_ints { for i in gsi_base..gsi_base + max_ints {
let mut redirection = RedirectionEntry(0); let mut redirection = RedirectionEntry(0);
redirection.set_mask(1); redirection.set_mask(1);
redirection.set_vector(48 + i as u64); redirection.set_vector(48 + i as u64);
redirection.set_destination(BSP_LAPIC_ID.load(Ordering::Relaxed) as u64); redirection.set_destination(BSP_LAPIC_ID.load(Ordering::SeqCst) as u64);
write_redirection(i as usize, redirection); write_redirection(i as usize, redirection);
} }
} }

View file

@ -20,40 +20,40 @@ pub static BSP_LAPIC_ID: AtomicU8 = AtomicU8::new(0);
static TICKS_PER_MS: AtomicUsize = AtomicUsize::new(0); static TICKS_PER_MS: AtomicUsize = AtomicUsize::new(0);
pub fn send_eoi() { pub fn send_eoi() {
let address = ADDRESS.load(Ordering::Relaxed); let address = ADDRESS.load(Ordering::SeqCst);
unsafe { unsafe {
address.add(REGISTER_EOI).write_volatile(0); address.add(REGISTER_EOI).write_volatile(0);
} }
} }
pub fn setup_lapic(phys: u64) { pub fn setup_lapic(phys: u64) {
let address = unsafe { map_physical(phys, 0x400) as *mut u32 }; let address = unsafe { map_physical(phys, 0x400) as *mut u32 };
ADDRESS.store(address, Ordering::Relaxed); ADDRESS.store(address, Ordering::SeqCst);
unsafe { unsafe {
BSP_LAPIC_ID.store(address.add(REGISTER_ID).read_volatile() as u8, Ordering::Relaxed); BSP_LAPIC_ID.store(address.add(REGISTER_ID).read_volatile() as u8, Ordering::SeqCst);
address.add(REGISTER_SPURIOUS_INT).write_volatile(0x1ff); address.add(REGISTER_SPURIOUS_INT).write_volatile(0x1ff);
} }
send_eoi(); send_eoi();
} }
fn calibrate_timer() { fn calibrate_timer() {
let address = ADDRESS.load(Ordering::Relaxed); let address = ADDRESS.load(Ordering::SeqCst);
unsafe { unsafe {
address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(0xffffffff); address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(0xffffffff);
} }
sleep(10000); sleep(10000);
let ticks_in_10ms = 0xffffffff - unsafe { address.add(REGISTER_TIMER_CURRENT_COUNT).read_volatile() }; let ticks_in_10ms = 0xffffffff - unsafe { address.add(REGISTER_TIMER_CURRENT_COUNT).read_volatile() };
TICKS_PER_MS.store(ticks_in_10ms as usize / 10, Ordering::Relaxed); TICKS_PER_MS.store(ticks_in_10ms as usize / 10, Ordering::SeqCst);
unsafe { unsafe {
address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(0); address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(0);
} }
} }
pub fn schedule_timer_interrupt() { pub fn schedule_timer_interrupt() {
let address = ADDRESS.load(Ordering::Relaxed); let address = ADDRESS.load(Ordering::SeqCst);
unsafe { unsafe {
address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(10 * TICKS_PER_MS.load(Ordering::Relaxed) as u32); address.add(REGISTER_TIMER_INITIAL_COUNT).write_volatile(10 * TICKS_PER_MS.load(Ordering::SeqCst) as u32);
} }
} }
pub fn setup_lapic_timer() { pub fn setup_lapic_timer() {
let address = ADDRESS.load(Ordering::Relaxed); let address = ADDRESS.load(Ordering::SeqCst);
unsafe { unsafe {
address.add(REGISTER_TIMER_DIVIDE).write_volatile(3); address.add(REGISTER_TIMER_DIVIDE).write_volatile(3);
} }

View file

@ -16,7 +16,7 @@ pub static IDLE_TASK: Mutex<Option<Task>> = Mutex::new(None);
pub static SCHEDULER_LOCK: Spinlock = Spinlock::new(); pub static SCHEDULER_LOCK: Spinlock = Spinlock::new();
pub fn scheduler(state: &mut ISRState) { pub fn scheduler(state: &mut ISRState) {
if !MULTITASKING_ENABLED.load(Ordering::Relaxed) { if !MULTITASKING_ENABLED.load(Ordering::SeqCst) {
return; return;
} }
CURRENT_TASK_LOCK.lock(); CURRENT_TASK_LOCK.lock();
@ -49,7 +49,7 @@ pub fn scheduler(state: &mut ISRState) {
CURRENT_TASK_LOCK.unlock(); CURRENT_TASK_LOCK.unlock();
} }
pub fn schedule_task(task: Task) { pub fn schedule_task(task: Task) {
assert!(SCHEDULER_LOCK.is_locked() || !MULTITASKING_ENABLED.load(Ordering::Relaxed)); assert!(SCHEDULER_LOCK.is_locked() || !MULTITASKING_ENABLED.load(Ordering::SeqCst));
let mut scheduler_list = SCHEDULER_LIST.lock(); let mut scheduler_list = SCHEDULER_LIST.lock();
if scheduler_list.is_empty() { if scheduler_list.is_empty() {
schedule_timer_interrupt(); schedule_timer_interrupt();
@ -57,8 +57,8 @@ pub fn schedule_task(task: Task) {
scheduler_list.push_back(task); scheduler_list.push_back(task);
} }
pub fn yield_task() { pub fn yield_task() {
assert!(!IN_ISR_HANDLER.load(Ordering::Relaxed)); assert!(!IN_ISR_HANDLER.load(Ordering::SeqCst));
assert_eq!(LOCKS_HELD.load(Ordering::Relaxed), 0); assert_eq!(LOCKS_HELD.load(Ordering::SeqCst), 0);
unsafe { unsafe {
asm!("int $254"); asm!("int $254");
} }

View file

@ -28,21 +28,21 @@ impl Spinlock {
} }
pub fn lock(&self) { pub fn lock(&self) {
cli(); cli();
while !self.locked.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed).is_ok() {} while !self.locked.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_ok() {}
LOCKS_HELD.fetch_add(1, Ordering::Relaxed); LOCKS_HELD.fetch_add(1, Ordering::SeqCst);
} }
pub fn unlock(&self) { pub fn unlock(&self) {
assert!(self.locked.load(Ordering::Relaxed)); assert!(self.locked.load(Ordering::SeqCst));
self.locked.store(false, Ordering::Relaxed); self.locked.store(false, Ordering::SeqCst);
LOCKS_HELD.fetch_sub(1, Ordering::Relaxed); LOCKS_HELD.fetch_sub(1, Ordering::SeqCst);
if !IN_ISR_HANDLER.load(Ordering::Relaxed) && LOCKS_HELD.load(Ordering::Relaxed) == 0 { if !IN_ISR_HANDLER.load(Ordering::SeqCst) && LOCKS_HELD.load(Ordering::SeqCst) == 0 {
unsafe { unsafe {
sti(); sti();
} }
} }
} }
pub fn is_locked(&self) -> bool { pub fn is_locked(&self) -> bool {
self.locked.load(Ordering::Relaxed) self.locked.load(Ordering::SeqCst)
} }
} }
@ -57,11 +57,11 @@ pub fn create_semaphore(max_count: usize, initial_count: usize) -> Arc<Semaphore
pub fn lock_semaphore(semaphore: Arc<Semaphore>, count: usize) { pub fn lock_semaphore(semaphore: Arc<Semaphore>, count: usize) {
loop { loop {
let mut success = false; let mut success = false;
let current_count = semaphore.current_count.load(Ordering::Relaxed); let current_count = semaphore.current_count.load(Ordering::SeqCst);
if current_count >= count { if current_count >= count {
success = semaphore success = semaphore
.current_count .current_count
.compare_exchange(current_count, current_count - count, Ordering::Relaxed, Ordering::Relaxed) .compare_exchange(current_count, current_count - count, Ordering::SeqCst, Ordering::SeqCst)
.is_ok(); .is_ok();
} }
if success { if success {
@ -82,7 +82,7 @@ pub fn lock_semaphore(semaphore: Arc<Semaphore>, count: usize) {
pub fn lock_semaphore_internal(mut task: Task) { pub fn lock_semaphore_internal(mut task: Task) {
let semaphore = task.block_on_semaphore.as_ref().unwrap().clone(); let semaphore = task.block_on_semaphore.as_ref().unwrap().clone();
semaphore.spinlock.lock(); semaphore.spinlock.lock();
if task.semaphore_requested_count > semaphore.current_count.load(Ordering::Relaxed) { if task.semaphore_requested_count > semaphore.current_count.load(Ordering::SeqCst) {
semaphore.blocked_list.lock().push_back(task); semaphore.blocked_list.lock().push_back(task);
} else { } else {
task.block_on_semaphore = None; task.block_on_semaphore = None;
@ -97,8 +97,8 @@ pub fn lock_semaphore_internal(mut task: Task) {
pub fn unlock_semaphore(semaphore: Arc<Semaphore>, count: usize) { pub fn unlock_semaphore(semaphore: Arc<Semaphore>, count: usize) {
semaphore.spinlock.lock(); semaphore.spinlock.lock();
{ {
semaphore.current_count.fetch_add(count, Ordering::Relaxed); semaphore.current_count.fetch_add(count, Ordering::SeqCst);
assert!(semaphore.current_count.load(Ordering::Relaxed) <= semaphore.max_count); assert!(semaphore.current_count.load(Ordering::SeqCst) <= semaphore.max_count);
while let Some(mut task) = semaphore.blocked_list.lock().pop_front() { while let Some(mut task) = semaphore.blocked_list.lock().pop_front() {
task.block_on_semaphore = None; task.block_on_semaphore = None;
task.semaphore_requested_count = 0; task.semaphore_requested_count = 0;

View file

@ -80,7 +80,7 @@ pub fn create_task(func: fn()) -> Task {
let stack = Box::new(Stack([0; STACK_SIZE])); let stack = Box::new(Stack([0; STACK_SIZE]));
let stack_address: *const Stack = &*stack; let stack_address: *const Stack = &*stack;
let task = Task { let task = Task {
id: NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed), id: NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst),
state: ISRState { state: ISRState {
rax: 0, rax: 0,
rbx: 0, rbx: 0,
@ -101,7 +101,7 @@ pub fn create_task(func: fn()) -> Task {
error_code: 0, error_code: 0,
rip: task_entry as u64, rip: task_entry as u64,
cs: 8, cs: 8,
rflags: RFLAGS.load(Ordering::Relaxed), rflags: RFLAGS.load(Ordering::SeqCst),
rsp: stack_address as u64 + STACK_SIZE as u64, rsp: stack_address as u64 + STACK_SIZE as u64,
ss: 16, ss: 16,
}, },
@ -141,7 +141,7 @@ pub fn setup_multitasking() -> ! {
unsafe { unsafe {
asm!("pushf; pop {0:r}", out(reg) rflags); asm!("pushf; pop {0:r}", out(reg) rflags);
} }
RFLAGS.store(rflags, core::sync::atomic::Ordering::Relaxed); RFLAGS.store(rflags, core::sync::atomic::Ordering::SeqCst);
let mut idle_task = create_task(idle); let mut idle_task = create_task(idle);
idle_task.task_state = TaskState::Idle; idle_task.task_state = TaskState::Idle;
{ {
@ -149,7 +149,7 @@ pub fn setup_multitasking() -> ! {
} }
let task = create_task(main); let task = create_task(main);
schedule_task(task); schedule_task(task);
MULTITASKING_ENABLED.store(true, Ordering::Relaxed); MULTITASKING_ENABLED.store(true, Ordering::SeqCst);
yield_task(); yield_task();
panic!("Setting up multitasking failed"); panic!("Setting up multitasking failed");
} }

View file

@ -49,16 +49,16 @@ fn main() -> Status {
with_config_table(|tables| { with_config_table(|tables| {
for i in tables { for i in tables {
if i.guid == uefi::table::cfg::ACPI2_GUID { if i.guid == uefi::table::cfg::ACPI2_GUID {
rsdp.store(i.address as u64, Ordering::Relaxed); rsdp.store(i.address as u64, Ordering::SeqCst);
break; break;
} }
} }
}); });
assert_ne!(rsdp.load(Ordering::Relaxed), 0, "RSDP not found"); assert_ne!(rsdp.load(Ordering::SeqCst), 0, "RSDP not found");
let memory_map = unsafe { exit_boot_services(MemoryType::LOADER_DATA) }; let memory_map = unsafe { exit_boot_services(MemoryType::LOADER_DATA) };
let pml4 = setup_paging(&memory_map, heap_start); let pml4 = setup_paging(&memory_map, heap_start);
map_kernel(KERNEL, pml4, kernel_start); map_kernel(KERNEL, pml4, kernel_start);
let loader_struct = generate_loader_struct(&memory_map, kernel_start, heap_start, rsdp.load(Ordering::Relaxed)); let loader_struct = generate_loader_struct(&memory_map, kernel_start, heap_start, rsdp.load(Ordering::SeqCst));
info!("Jumping to kernel..."); info!("Jumping to kernel...");
unsafe { unsafe {
(mem::transmute::<_, extern "C" fn(&LoaderStruct) -> !>(kernel_entry))(&loader_struct); (mem::transmute::<_, extern "C" fn(&LoaderStruct) -> !>(kernel_entry))(&loader_struct);