Run clippy
All checks were successful
Build / build (push) Successful in 2m43s

This commit is contained in:
Mathieu Strypsteen 2024-12-13 13:35:40 +01:00
parent 9dd71885d3
commit 3cc8aa848a
11 changed files with 32 additions and 33 deletions

View file

@ -18,3 +18,6 @@ spin = "0.9.8"
[lints.clippy]
missing_safety_doc = "allow"
needless_range_loop = "allow"
type_complexity = "allow"
upper_case_acronyms = "allow"

View file

@ -283,7 +283,7 @@ extern "C" {
}
fn set_address(entry: &mut IDTEntry, func: unsafe extern "C" fn()) {
let address = func as u64;
let address = func as usize as u64;
entry.address_low = address as u16;
entry.address_middle = (address >> 16) as u16;
entry.address_high = (address >> 32) as u32;

View file

@ -56,7 +56,7 @@ pub fn virt_to_phys(virt: u64) -> u64 {
assert!(virt >= KERNEL_HEAP_START);
assert!(virt < KERNEL_HEAP_START + KERNEL_HEAP_INITIAL_SIZE as u64);
let heap_map = HEAP_PHYS_MAPPING.lock();
return heap_map[(virt as usize - KERNEL_HEAP_START as usize) / 0x1000] + virt % 0x1000;
heap_map[(virt as usize - KERNEL_HEAP_START as usize) / 0x1000] + virt % 0x1000
}
fn get_table_entry(table: &mut PageTable, i: usize) -> &mut PageTable {
if table.entries_virt[i].is_none() {
@ -71,7 +71,7 @@ fn get_table_entry(table: &mut PageTable, i: usize) -> &mut PageTable {
table.entries_phys[i].set_user(1);
table.entries_phys[i].set_present(1);
}
return table.entries_virt[i].as_mut().unwrap();
table.entries_virt[i].as_mut().unwrap()
}
fn get_page(pml4: &mut PageTable, virt: u64) -> Option<&mut PageEntry> {
let virt_page = virt as usize / 0x1000;
@ -147,14 +147,11 @@ pub unsafe fn unmap(address: u64) {
let mut current_pml4 = CURRENT_PML4.lock();
let page = get_page(current_pml4.as_mut().unwrap(), address);
assert!(page.is_some(), "Page isn't mapped");
match page {
Some(page) => {
if let Some(page) = page {
page.set_present(0);
page.set_address(0);
invlpg(address);
}
None => {}
}
}
pub unsafe fn map_range(virt_start: u64, phys_start: u64, size: u64, user: bool, write: bool, exec: bool, cache_disable: bool) {
assert_eq!(virt_start % 0x1000, 0);

View file

@ -54,7 +54,7 @@ fn copy_to_fb() {
}
}
pub fn display_print(str: &str) {
if FRAMEBUFFER_ADDR.load(Ordering::SeqCst) == null_mut() {
if FRAMEBUFFER_ADDR.load(Ordering::SeqCst).is_null() {
return;
}
if INTERRUPTS_SETUP.load(Ordering::SeqCst) {

View file

@ -42,7 +42,7 @@ extern "C" fn AcpiOsAcquireLock(handle: *mut c_void) -> ACPI_SIZE {
extern "C" fn AcpiOsAllocate(size: ACPI_SIZE) -> *mut c_void {
let layout = Layout::from_size_align(size as usize, 16).unwrap();
unsafe {
return wrapped_alloc(layout) as *mut c_void;
wrapped_alloc(layout) as *mut c_void
}
}
#[no_mangle]
@ -126,7 +126,7 @@ extern "C" fn AcpiOsInstallInterruptHandler(gsi: UINT32, handler: ACPI_OSD_HANDL
#[no_mangle]
extern "C" fn AcpiOsMapMemory(phys: ACPI_PHYSICAL_ADDRESS, size: ACPI_SIZE) -> *mut c_void {
unsafe {
return map_physical(phys, size, false) as *mut c_void;
map_physical(phys, size, false) as *mut c_void
}
}
#[no_mangle]

View file

@ -11,7 +11,7 @@ impl AcpiHandler for EarlyACPIHandler {
unsafe fn map_physical_region<T>(&self, phys: usize, size: usize) -> PhysicalMapping<Self, T> {
unsafe {
let virt = map_physical(phys as u64, size as u64, false);
return PhysicalMapping::new(phys, NonNull::new(virt as *mut T).unwrap(), size, size, *self);
PhysicalMapping::new(phys, NonNull::new(virt as *mut T).unwrap(), size, size, *self)
}
}
fn unmap_physical_region<T>(region: &PhysicalMapping<Self, T>) {

View file

@ -46,7 +46,7 @@ static NEXT_IOAPIC_ID: AtomicUsize = AtomicUsize::new(0);
fn read_register(apic_i: usize, reg_i: u8) -> u32 {
unsafe {
IOAPICS[apic_i].address.load(Ordering::SeqCst).write_volatile(reg_i as u32);
return IOAPICS[apic_i].address.load(Ordering::SeqCst).add(4).read_volatile();
IOAPICS[apic_i].address.load(Ordering::SeqCst).add(4).read_volatile()
}
}
fn write_register(apic_i: usize, reg_i: u8, val: u32) {
@ -57,7 +57,7 @@ fn write_register(apic_i: usize, reg_i: u8, val: u32) {
}
fn get_apic_for_gsi(gsi: usize) -> usize {
for i in 0..NEXT_IOAPIC_ID.load(Ordering::SeqCst) {
if gsi >= IOAPICS[i].start_gsi.load(Ordering::SeqCst) as usize && gsi < IOAPICS[i].end_gsi.load(Ordering::SeqCst) as usize {
if gsi >= IOAPICS[i].start_gsi.load(Ordering::SeqCst) && gsi < IOAPICS[i].end_gsi.load(Ordering::SeqCst) {
return i;
}
}
@ -65,14 +65,14 @@ fn get_apic_for_gsi(gsi: usize) -> usize {
}
fn read_redirection(mut gsi: usize) -> RedirectionEntry {
let apic_i = get_apic_for_gsi(gsi);
gsi -= IOAPICS[apic_i].start_gsi.load(Ordering::SeqCst) as usize;
gsi -= IOAPICS[apic_i].start_gsi.load(Ordering::SeqCst);
let mut redirection_int = read_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2) as u64;
redirection_int |= (read_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2 + 1) as u64) << 32;
return RedirectionEntry(redirection_int);
RedirectionEntry(redirection_int)
}
fn write_redirection(mut gsi: usize, redirection: RedirectionEntry) {
let apic_i = get_apic_for_gsi(gsi);
gsi -= IOAPICS[apic_i].start_gsi.load(Ordering::SeqCst) as usize;
gsi -= IOAPICS[apic_i].start_gsi.load(Ordering::SeqCst);
write_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2, redirection.0 as u32);
write_register(apic_i, REGISTER_REDIRECTION + gsi as u8 * 2 + 1, (redirection.0 >> 32) as u32);
}
@ -94,10 +94,10 @@ pub fn register_irq_handler(vector: usize, handler: fn()) {
let start = IOAPICS[i].start_gsi.load(Ordering::SeqCst);
let end = IOAPICS[i].end_gsi.load(Ordering::SeqCst);
for j in start..end {
let mut redirection = read_redirection(j as usize);
let mut redirection = read_redirection(j);
if redirection.vector() == vector as u64 {
redirection.set_mask(0);
write_redirection(j as usize, redirection);
write_redirection(j, redirection);
}
}
}
@ -115,6 +115,6 @@ pub fn setup_ioapic(phys: u64, gsi_base: usize) {
redirection.set_mask(1);
redirection.set_vector(48 + i as u64);
redirection.set_destination(BSP_LAPIC_ID.load(Ordering::SeqCst) as u64);
write_redirection(i as usize, redirection);
write_redirection(i, redirection);
}
}

View file

@ -36,7 +36,7 @@ impl Spinlock {
pub fn lock(&self) {
debug_assert!(INTERRUPTS_SETUP.load(Ordering::SeqCst));
cli();
while !self.locked.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_ok() {}
while self.locked.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() {}
let lapic_id = get_current_lapic_id();
LOCKS_HELD[lapic_id].fetch_add(1, Ordering::SeqCst);
self.lapic_id.store(lapic_id, Ordering::SeqCst);

View file

@ -78,7 +78,7 @@ pub fn switch_task(current_state: &mut ISRState, new_task: Task) {
}
pub fn create_task(func: fn()) -> Task {
let stack = vec![0; STACK_SIZE];
let task = Task {
Task {
id: NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst),
state: ISRState {
rax: 0,
@ -98,7 +98,7 @@ pub fn create_task(func: fn()) -> Task {
r15: 0,
isr: 0,
error_code: 0,
rip: task_entry as u64,
rip: task_entry as usize as u64,
cs: 8,
rflags: RFLAGS.load(Ordering::SeqCst),
rsp: stack.as_ptr() as u64 + STACK_SIZE as u64,
@ -110,8 +110,7 @@ pub fn create_task(func: fn()) -> Task {
sleep_until_us: 0,
block_on_semaphore: None,
semaphore_requested_count: 0,
};
task
}
}
fn create_idle_task() {
let mut idle_task = create_task(idle_main);

View file

@ -71,13 +71,13 @@ fn main() -> Status {
});
assert_ne!(rsdp.load(Ordering::SeqCst), 0, "RSDP not found");
let framebuffer_info = setup_display();
let memory_map = unsafe { exit_boot_services(MemoryType::LOADER_DATA) };
let pml4 = setup_paging(&memory_map, heap_start);
let mut memory_map = unsafe { exit_boot_services(MemoryType::LOADER_DATA) };
let pml4 = setup_paging(&mut memory_map, heap_start);
map_kernel(KERNEL, pml4, kernel_start);
let loader_struct = generate_loader_struct(&memory_map, kernel_start, heap_start, rsdp.load(Ordering::SeqCst), features.has_hypervisor(), framebuffer_info);
info!("Jumping to kernel...");
unsafe {
(mem::transmute::<_, extern "C" fn(&LoaderStruct) -> !>(kernel_entry))(&loader_struct);
(mem::transmute::<u64, extern "C" fn(&LoaderStruct) -> !>(kernel_entry))(&loader_struct);
}
}
#[panic_handler]

View file

@ -19,7 +19,7 @@ fn get_table_entry(table: &mut PageTable, i: usize) -> &mut PageTable {
table.entries_phys[i].set_write(1);
table.entries_phys[i].set_present(1);
}
return table.entries_virt[i].as_mut().unwrap();
table.entries_virt[i].as_mut().unwrap()
}
pub fn map(pml4: &mut PageTable, virt: u64, phys: u64, write: bool, exec: bool) {
let virt_page = virt as usize / 0x1000;
@ -35,7 +35,7 @@ pub fn map(pml4: &mut PageTable, virt: u64, phys: u64, write: bool, exec: bool)
table.entries_phys[table_i].set_execute_disable(!exec as u64);
table.entries_phys[table_i].set_present(1);
}
pub fn setup_paging(memory_map: &MemoryMapOwned, heap_start: u64) -> &mut PageTable {
pub fn setup_paging(memory_map: &mut MemoryMapOwned, heap_start: u64) -> &mut PageTable {
const NONE: Option<Box<PageTable>> = None;
let pml4 = Box::leak(Box::new(PageTable {
entries_phys: [PageEntry(0); 512],
@ -70,7 +70,7 @@ pub fn setup_paging(memory_map: &MemoryMapOwned, heap_start: u64) -> &mut PageTa
// Write Protect
asm!("mov rax, cr0; bts rax, 16; mov cr0, rax", out("rax") _);
// No-Execute Enable
asm!("rdmsr; bts rax, 11; wrmsr", in("rcx") 0xc0000080 as u64, out("rax") _, out("rdx") _);
asm!("rdmsr; bts rax, 11; wrmsr", in("rcx") 0xc0000080_u64, out("rax") _, out("rdx") _);
}
pml4
}