Auto merge of #104101 - betrusted-io:xous-libstd-initial, r=bjorn3

Add initial libstd support for Xous

This patchset adds some minimal support to the tier-3 target `riscv32imac-unknown-xous-elf`. The following features are supported:

* alloc
* thread creation and joining
* thread sleeping
* thread_local
* panic_abort
* mutex
* condvar
* stdout

Additionally, internal support for the various Xous primitives surrounding IPC have been added as part of the Xous FFI. These may be exposed as part of `std::os::xous::ffi` in the future, however for now they are not public.

This represents the minimum viable product. A future patchset will add support for networking and filesystem support.
This commit is contained in:
bors 2023-09-19 07:38:20 +00:00
commit ae9c330629
32 changed files with 2484 additions and 10 deletions

View file

@ -644,9 +644,9 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]] [[package]]
name = "compiler_builtins" name = "compiler_builtins"
version = "0.1.100" version = "0.1.101"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6c0f24437059853f0fa64afc51f338f93647a3de4cf3358ba1bb4171a199775" checksum = "01a6d58e9c3408138099a396a98fd0d0e6cfb25d723594d2ae48b5004513fd5b"
dependencies = [ dependencies = [
"cc", "cc",
"rustc-std-workspace-core", "rustc-std-workspace-core",

View file

@ -43,7 +43,8 @@ pub unsafe fn __rust_start_panic(_payload: &mut dyn BoxMeUp) -> u32 {
libc::abort(); libc::abort();
} }
} else if #[cfg(any(target_os = "hermit", } else if #[cfg(any(target_os = "hermit",
all(target_vendor = "fortanix", target_env = "sgx") all(target_vendor = "fortanix", target_env = "sgx"),
target_os = "xous"
))] { ))] {
unsafe fn abort() -> ! { unsafe fn abort() -> ! {
// call std::sys::abort_internal // call std::sys::abort_internal

View file

@ -36,8 +36,8 @@ object = { version = "0.32.0", default-features = false, optional = true, featur
rand = { version = "0.8.5", default-features = false, features = ["alloc"] } rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
rand_xorshift = "0.3.0" rand_xorshift = "0.3.0"
[target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies] [target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), target_os = "xous", all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies]
dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] } dlmalloc = { version = "0.2.4", features = ['rustc-dep-of-std'] }
[target.x86_64-fortanix-unknown-sgx.dependencies] [target.x86_64-fortanix-unknown-sgx.dependencies]
fortanix-sgx-abi = { version = "0.5.0", features = ['rustc-dep-of-std'], public = true } fortanix-sgx-abi = { version = "0.5.0", features = ['rustc-dep-of-std'], public = true }

View file

@ -37,6 +37,7 @@ fn main() {
|| target.contains("nintendo-3ds") || target.contains("nintendo-3ds")
|| target.contains("vita") || target.contains("vita")
|| target.contains("nto") || target.contains("nto")
|| target.contains("xous")
// See src/bootstrap/synthetic_targets.rs // See src/bootstrap/synthetic_targets.rs
|| env::var("RUSTC_BOOTSTRAP_SYNTHETIC_TARGET").is_ok() || env::var("RUSTC_BOOTSTRAP_SYNTHETIC_TARGET").is_ok()
{ {

View file

@ -8,7 +8,7 @@
#![stable(feature = "rust1", since = "1.0.0")] #![stable(feature = "rust1", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)]
#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))] #[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))]
mod tests; mod tests;
use crate::ffi::OsString; use crate::ffi::OsString;

View file

@ -260,6 +260,7 @@
feature(slice_index_methods, coerce_unsized, sgx_platform) feature(slice_index_methods, coerce_unsized, sgx_platform)
)] )]
#![cfg_attr(windows, feature(round_char_boundary))] #![cfg_attr(windows, feature(round_char_boundary))]
#![cfg_attr(target_os = "xous", feature(slice_ptr_len))]
// //
// Language features: // Language features:
// tidy-alphabetical-start // tidy-alphabetical-start

View file

@ -1,6 +1,6 @@
#![deny(unsafe_op_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)]
#[cfg(all(test, not(target_os = "emscripten")))] #[cfg(all(test, not(any(target_os = "emscripten", target_os = "xous"))))]
mod tests; mod tests;
use crate::io::prelude::*; use crate::io::prelude::*;

View file

@ -1,4 +1,4 @@
#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))] #[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))]
mod tests; mod tests;
use crate::fmt; use crate::fmt;

View file

@ -146,6 +146,8 @@ pub mod vita;
pub mod vxworks; pub mod vxworks;
#[cfg(target_os = "watchos")] #[cfg(target_os = "watchos")]
pub(crate) mod watchos; pub(crate) mod watchos;
#[cfg(target_os = "xous")]
pub mod xous;
#[cfg(any(unix, target_os = "wasi", doc))] #[cfg(any(unix, target_os = "wasi", doc))]
pub mod fd; pub mod fd;

View file

@ -0,0 +1,647 @@
#![allow(dead_code)]
#![allow(unused_variables)]
#![stable(feature = "rust1", since = "1.0.0")]
#[path = "../unix/ffi/os_str.rs"]
mod os_str;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::os_str::{OsStrExt, OsStringExt};
mod definitions;
#[stable(feature = "rust1", since = "1.0.0")]
pub use definitions::*;
fn lend_mut_impl(
connection: Connection,
opcode: usize,
data: &mut [u8],
arg1: usize,
arg2: usize,
blocking: bool,
) -> Result<(usize, usize), Error> {
let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize;
let mut a1: usize = connection.try_into().unwrap();
let mut a2 = InvokeType::LendMut as usize;
let a3 = opcode;
let a4 = data.as_mut_ptr() as usize;
let a5 = data.len();
let a6 = arg1;
let a7 = arg2;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::MemoryReturned as usize {
Ok((a1, a2))
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
pub(crate) fn lend_mut(
connection: Connection,
opcode: usize,
data: &mut [u8],
arg1: usize,
arg2: usize,
) -> Result<(usize, usize), Error> {
lend_mut_impl(connection, opcode, data, arg1, arg2, true)
}
pub(crate) fn try_lend_mut(
connection: Connection,
opcode: usize,
data: &mut [u8],
arg1: usize,
arg2: usize,
) -> Result<(usize, usize), Error> {
lend_mut_impl(connection, opcode, data, arg1, arg2, false)
}
fn lend_impl(
connection: Connection,
opcode: usize,
data: &[u8],
arg1: usize,
arg2: usize,
blocking: bool,
) -> Result<(usize, usize), Error> {
let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize;
let a1: usize = connection.try_into().unwrap();
let a2 = InvokeType::Lend as usize;
let a3 = opcode;
let a4 = data.as_ptr() as usize;
let a5 = data.len();
let mut a6 = arg1;
let mut a7 = arg2;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1 => _,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6,
inlateout("a7") a7,
)
};
let result = a0;
if result == SyscallResult::MemoryReturned as usize {
Ok((a6, a7))
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
pub(crate) fn lend(
connection: Connection,
opcode: usize,
data: &[u8],
arg1: usize,
arg2: usize,
) -> Result<(usize, usize), Error> {
lend_impl(connection, opcode, data, arg1, arg2, true)
}
pub(crate) fn try_lend(
connection: Connection,
opcode: usize,
data: &[u8],
arg1: usize,
arg2: usize,
) -> Result<(usize, usize), Error> {
lend_impl(connection, opcode, data, arg1, arg2, false)
}
fn scalar_impl(connection: Connection, args: [usize; 5], blocking: bool) -> Result<(), Error> {
let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize;
let mut a1: usize = connection.try_into().unwrap();
let a2 = InvokeType::Scalar as usize;
let a3 = args[0];
let a4 = args[1];
let a5 = args[2];
let a6 = args[3];
let a7 = args[4];
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::Ok as usize {
Ok(())
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
pub(crate) fn scalar(connection: Connection, args: [usize; 5]) -> Result<(), Error> {
scalar_impl(connection, args, true)
}
pub(crate) fn try_scalar(connection: Connection, args: [usize; 5]) -> Result<(), Error> {
scalar_impl(connection, args, false)
}
fn blocking_scalar_impl(
connection: Connection,
args: [usize; 5],
blocking: bool,
) -> Result<[usize; 5], Error> {
let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize;
let mut a1: usize = connection.try_into().unwrap();
let mut a2 = InvokeType::BlockingScalar as usize;
let mut a3 = args[0];
let mut a4 = args[1];
let mut a5 = args[2];
let a6 = args[3];
let a7 = args[4];
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2,
inlateout("a3") a3,
inlateout("a4") a4,
inlateout("a5") a5,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::Scalar1 as usize {
Ok([a1, 0, 0, 0, 0])
} else if result == SyscallResult::Scalar2 as usize {
Ok([a1, a2, 0, 0, 0])
} else if result == SyscallResult::Scalar5 as usize {
Ok([a1, a2, a3, a4, a5])
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
pub(crate) fn blocking_scalar(
connection: Connection,
args: [usize; 5],
) -> Result<[usize; 5], Error> {
blocking_scalar_impl(connection, args, true)
}
pub(crate) fn try_blocking_scalar(
connection: Connection,
args: [usize; 5],
) -> Result<[usize; 5], Error> {
blocking_scalar_impl(connection, args, false)
}
fn connect_impl(address: ServerAddress, blocking: bool) -> Result<Connection, Error> {
let a0 = if blocking { Syscall::Connect } else { Syscall::TryConnect } as usize;
let address: [u32; 4] = address.into();
let a1: usize = address[0].try_into().unwrap();
let a2: usize = address[1].try_into().unwrap();
let a3: usize = address[2].try_into().unwrap();
let a4: usize = address[3].try_into().unwrap();
let a5 = 0;
let a6 = 0;
let a7 = 0;
let mut result: usize;
let mut value: usize;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0 => result,
inlateout("a1") a1 => value,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
if result == SyscallResult::ConnectionId as usize {
Ok(value.try_into().unwrap())
} else if result == SyscallResult::Error as usize {
Err(value.into())
} else {
Err(Error::InternalError)
}
}
/// Connect to a Xous server represented by the specified `address`.
///
/// The current thread will block until the server is available. Returns
/// an error if the server cannot accept any more connections.
pub(crate) fn connect(address: ServerAddress) -> Result<Connection, Error> {
connect_impl(address, true)
}
/// Attempt to connect to a Xous server represented by the specified `address`.
///
/// If the server does not exist then None is returned.
pub(crate) fn try_connect(address: ServerAddress) -> Result<Option<Connection>, Error> {
match connect_impl(address, false) {
Ok(conn) => Ok(Some(conn)),
Err(Error::ServerNotFound) => Ok(None),
Err(e) => Err(e),
}
}
/// Terminate the current process and return the specified code to the parent process.
pub(crate) fn exit(return_code: u32) -> ! {
let a0 = Syscall::TerminateProcess as usize;
let a1 = return_code as usize;
let a2 = 0;
let a3 = 0;
let a4 = 0;
let a5 = 0;
let a6 = 0;
let a7 = 0;
unsafe {
core::arch::asm!(
"ecall",
in("a0") a0,
in("a1") a1,
in("a2") a2,
in("a3") a3,
in("a4") a4,
in("a5") a5,
in("a6") a6,
in("a7") a7,
)
};
unreachable!();
}
/// Suspend the current thread and allow another thread to run. This thread may
/// continue executing again immediately if there are no other threads available
/// to run on the system.
pub(crate) fn do_yield() {
let a0 = Syscall::Yield as usize;
let a1 = 0;
let a2 = 0;
let a3 = 0;
let a4 = 0;
let a5 = 0;
let a6 = 0;
let a7 = 0;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0 => _,
inlateout("a1") a1 => _,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
}
/// Allocate memory from the system. An optional physical and/or virtual address
/// may be specified in order to ensure memory is allocated at specific offsets,
/// otherwise the kernel will select an address.
///
/// # Safety
///
/// This function is safe unless a virtual address is specified. In that case,
/// the kernel will return an alias to the existing range. This violates Rust's
/// pointer uniqueness guarantee.
pub(crate) unsafe fn map_memory<T>(
phys: Option<core::ptr::NonNull<T>>,
virt: Option<core::ptr::NonNull<T>>,
count: usize,
flags: MemoryFlags,
) -> Result<&'static mut [T], Error> {
let mut a0 = Syscall::MapMemory as usize;
let mut a1 = phys.map(|p| p.as_ptr() as usize).unwrap_or_default();
let mut a2 = virt.map(|p| p.as_ptr() as usize).unwrap_or_default();
let a3 = count * core::mem::size_of::<T>();
let a4 = flags.bits();
let a5 = 0;
let a6 = 0;
let a7 = 0;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::MemoryRange as usize {
let start = core::ptr::from_exposed_addr_mut::<T>(a1);
let len = a2 / core::mem::size_of::<T>();
let end = unsafe { start.add(len) };
Ok(unsafe { core::slice::from_raw_parts_mut(start, len) })
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
/// Destroy the given memory, returning it to the compiler.
///
/// Safety: The memory pointed to by `range` should not be used after this
/// function returns, even if this function returns Err().
pub(crate) unsafe fn unmap_memory<T>(range: *mut [T]) -> Result<(), Error> {
let mut a0 = Syscall::UnmapMemory as usize;
let mut a1 = range.as_mut_ptr() as usize;
let a2 = range.len();
let a3 = 0;
let a4 = 0;
let a5 = 0;
let a6 = 0;
let a7 = 0;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::Ok as usize {
Ok(())
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
/// Adjust the memory flags for the given range. This can be used to remove flags
/// from a given region in order to harden memory access. Note that flags may
/// only be removed and may never be added.
///
/// Safety: The memory pointed to by `range` may become inaccessible or have its
/// mutability removed. It is up to the caller to ensure that the flags specified
/// by `new_flags` are upheld, otherwise the program will crash.
pub(crate) unsafe fn update_memory_flags<T>(
range: *mut [T],
new_flags: MemoryFlags,
) -> Result<(), Error> {
let mut a0 = Syscall::UpdateMemoryFlags as usize;
let mut a1 = range.as_mut_ptr() as usize;
let a2 = range.len();
let a3 = new_flags.bits();
let a4 = 0; // Process ID is currently None
let a5 = 0;
let a6 = 0;
let a7 = 0;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::Ok as usize {
Ok(())
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
/// Create a thread with a given stack and up to four arguments
pub(crate) fn create_thread(
start: *mut usize,
stack: *mut [u8],
arg0: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> Result<ThreadId, Error> {
let mut a0 = Syscall::CreateThread as usize;
let mut a1 = start as usize;
let a2 = stack.as_mut_ptr() as usize;
let a3 = stack.len();
let a4 = arg0;
let a5 = arg1;
let a6 = arg2;
let a7 = arg3;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::ThreadId as usize {
Ok(a1.into())
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
/// Wait for the given thread to terminate and return the exit code from that thread.
pub(crate) fn join_thread(thread_id: ThreadId) -> Result<usize, Error> {
let mut a0 = Syscall::JoinThread as usize;
let mut a1 = thread_id.into();
let a2 = 0;
let a3 = 0;
let a4 = 0;
let a5 = 0;
let a6 = 0;
let a7 = 0;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::Scalar1 as usize {
Ok(a1)
} else if result == SyscallResult::Scalar2 as usize {
Ok(a1)
} else if result == SyscallResult::Scalar5 as usize {
Ok(a1)
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
/// Get the current thread's ID
pub(crate) fn thread_id() -> Result<ThreadId, Error> {
let mut a0 = Syscall::GetThreadId as usize;
let mut a1 = 0;
let a2 = 0;
let a3 = 0;
let a4 = 0;
let a5 = 0;
let a6 = 0;
let a7 = 0;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::ThreadId as usize {
Ok(a1.into())
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}
/// Adjust the given `knob` limit to match the new value `new`. The current value must
/// match the `current` in order for this to take effect.
///
/// The new value is returned as a result of this call. If the call fails, then the old
/// value is returned. In either case, this function returns successfully.
///
/// An error is generated if the `knob` is not a valid limit, or if the call
/// would not succeed.
pub(crate) fn adjust_limit(knob: Limits, current: usize, new: usize) -> Result<usize, Error> {
let mut a0 = Syscall::JoinThread as usize;
let mut a1 = knob as usize;
let a2 = current;
let a3 = new;
let a4 = 0;
let a5 = 0;
let a6 = 0;
let a7 = 0;
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
inlateout("a2") a2 => _,
inlateout("a3") a3 => _,
inlateout("a4") a4 => _,
inlateout("a5") a5 => _,
inlateout("a6") a6 => _,
inlateout("a7") a7 => _,
)
};
let result = a0;
if result == SyscallResult::Scalar2 as usize && a1 == knob as usize {
Ok(a2)
} else if result == SyscallResult::Scalar5 as usize && a1 == knob as usize {
Ok(a1)
} else if result == SyscallResult::Error as usize {
Err(a1.into())
} else {
Err(Error::InternalError)
}
}

View file

@ -0,0 +1,283 @@
mod memoryflags;
pub(crate) use memoryflags::*;
#[stable(feature = "rust1", since = "1.0.0")]
/// Indicates a particular syscall number as used by the Xous kernel.
#[derive(Copy, Clone)]
#[repr(usize)]
pub enum Syscall {
MapMemory = 2,
Yield = 3,
UpdateMemoryFlags = 12,
ReceiveMessage = 15,
SendMessage = 16,
Connect = 17,
CreateThread = 18,
UnmapMemory = 19,
ReturnMemory = 20,
TerminateProcess = 22,
TrySendMessage = 24,
TryConnect = 25,
GetThreadId = 32,
JoinThread = 36,
AdjustProcessLimit = 38,
ReturnScalar = 40,
}
#[stable(feature = "rust1", since = "1.0.0")]
/// Copies of these invocation types here for when we're running
/// in environments without libxous.
#[derive(Copy, Clone)]
#[repr(usize)]
pub enum SyscallResult {
Ok = 0,
Error = 1,
MemoryRange = 3,
ConnectionId = 7,
Message = 9,
ThreadId = 10,
Scalar1 = 14,
Scalar2 = 15,
MemoryReturned = 18,
Scalar5 = 20,
}
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Copy, Clone)]
/// A list of all known errors that may be returned by the Xous kernel.
#[repr(usize)]
pub enum Error {
NoError = 0,
BadAlignment = 1,
BadAddress = 2,
OutOfMemory = 3,
MemoryInUse = 4,
InterruptNotFound = 5,
InterruptInUse = 6,
InvalidString = 7,
ServerExists = 8,
ServerNotFound = 9,
ProcessNotFound = 10,
ProcessNotChild = 11,
ProcessTerminated = 12,
Timeout = 13,
InternalError = 14,
ServerQueueFull = 15,
ThreadNotAvailable = 16,
UnhandledSyscall = 17,
InvalidSyscall = 18,
ShareViolation = 19,
InvalidThread = 20,
InvalidPid = 21,
UnknownError = 22,
AccessDenied = 23,
UseBeforeInit = 24,
DoubleFree = 25,
DebugInProgress = 26,
InvalidLimit = 27,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl From<usize> for Error {
fn from(src: usize) -> Self {
match src {
0 => Self::NoError,
1 => Self::BadAlignment,
2 => Self::BadAddress,
3 => Self::OutOfMemory,
4 => Self::MemoryInUse,
5 => Self::InterruptNotFound,
6 => Self::InterruptInUse,
7 => Self::InvalidString,
8 => Self::ServerExists,
9 => Self::ServerNotFound,
10 => Self::ProcessNotFound,
11 => Self::ProcessNotChild,
12 => Self::ProcessTerminated,
13 => Self::Timeout,
14 => Self::InternalError,
15 => Self::ServerQueueFull,
16 => Self::ThreadNotAvailable,
17 => Self::UnhandledSyscall,
18 => Self::InvalidSyscall,
19 => Self::ShareViolation,
20 => Self::InvalidThread,
21 => Self::InvalidPid,
23 => Self::AccessDenied,
24 => Self::UseBeforeInit,
25 => Self::DoubleFree,
26 => Self::DebugInProgress,
27 => Self::InvalidLimit,
22 | _ => Self::UnknownError,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl From<i32> for Error {
fn from(src: i32) -> Self {
let Ok(src) = core::convert::TryInto::<usize>::try_into(src) else {
return Self::UnknownError;
};
src.into()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"{}",
match self {
Error::NoError => "no error occurred",
Error::BadAlignment => "memory was not properly aligned",
Error::BadAddress => "an invalid address was supplied",
Error::OutOfMemory => "the process or service has run out of memory",
Error::MemoryInUse => "the requested address is in use",
Error::InterruptNotFound =>
"the requested interrupt does not exist on this platform",
Error::InterruptInUse => "the requested interrupt is currently in use",
Error::InvalidString => "the specified string was not formatted correctly",
Error::ServerExists => "a server with that address already exists",
Error::ServerNotFound => "the requetsed server could not be found",
Error::ProcessNotFound => "the target process does not exist",
Error::ProcessNotChild =>
"the requested operation can only be done on child processes",
Error::ProcessTerminated => "the target process has crashed",
Error::Timeout => "the requested operation timed out",
Error::InternalError => "an internal error occurred",
Error::ServerQueueFull => "the server has too many pending messages",
Error::ThreadNotAvailable => "the specified thread does not exist",
Error::UnhandledSyscall => "the kernel did not recognize that syscall",
Error::InvalidSyscall => "the syscall had incorrect parameters",
Error::ShareViolation => "an attempt was made to share memory twice",
Error::InvalidThread => "tried to resume a thread that was not ready",
Error::InvalidPid => "kernel attempted to use a pid that was not valid",
Error::AccessDenied => "no permission to perform the requested operation",
Error::UseBeforeInit => "attempt to use a service before initialization finished",
Error::DoubleFree => "the requested resource was freed twice",
Error::DebugInProgress => "kernel attempted to activate a thread being debugged",
Error::InvalidLimit => "process attempted to adjust an invalid limit",
Error::UnknownError => "an unknown error occurred",
}
)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::fmt::Debug for Error {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{}", self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl crate::error::Error for Error {}
/// Indicates the type of Message that is sent when making a `SendMessage` syscall.
#[derive(Copy, Clone)]
#[repr(usize)]
pub(crate) enum InvokeType {
LendMut = 1,
Lend = 2,
Move = 3,
Scalar = 4,
BlockingScalar = 5,
}
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug, Copy, Clone)]
/// A representation of a connection to a Xous service.
pub struct Connection(u32);
#[stable(feature = "rust1", since = "1.0.0")]
impl From<u32> for Connection {
fn from(src: u32) -> Connection {
Connection(src)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl TryFrom<usize> for Connection {
type Error = core::num::TryFromIntError;
fn try_from(src: usize) -> Result<Self, Self::Error> {
Ok(Connection(src.try_into()?))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Into<u32> for Connection {
fn into(self) -> u32 {
self.0
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl TryInto<usize> for Connection {
type Error = core::num::TryFromIntError;
fn try_into(self) -> Result<usize, Self::Error> {
self.0.try_into()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub enum ServerAddressError {
InvalidLength,
}
#[stable(feature = "rust1", since = "1.0.0")]
pub struct ServerAddress([u32; 4]);
#[stable(feature = "rust1", since = "1.0.0")]
impl TryFrom<&str> for ServerAddress {
type Error = ServerAddressError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
let b = value.as_bytes();
if b.len() == 0 || b.len() > 16 {
return Err(Self::Error::InvalidLength);
}
let mut this_temp = [0u8; 16];
for (dest, src) in this_temp.iter_mut().zip(b.iter()) {
*dest = *src;
}
let mut this = [0u32; 4];
for (dest, src) in this.iter_mut().zip(this_temp.chunks_exact(4)) {
*dest = u32::from_le_bytes(src.try_into().unwrap());
}
Ok(ServerAddress(this))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Into<[u32; 4]> for ServerAddress {
fn into(self) -> [u32; 4] {
self.0
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct ThreadId(usize);
impl From<usize> for ThreadId {
fn from(src: usize) -> ThreadId {
ThreadId(src)
}
}
impl Into<usize> for ThreadId {
fn into(self) -> usize {
self.0
}
}
#[derive(Copy, Clone)]
#[repr(usize)]
/// Limits that can be passed to `AdjustLimit`
pub(crate) enum Limits {
HeapMaximum = 1,
HeapSize = 2,
}

View file

@ -0,0 +1,176 @@
/// Flags to be passed to the MapMemory struct.
/// Note that it is an error to have memory be
/// writable and not readable.
#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct MemoryFlags {
bits: usize,
}
impl MemoryFlags {
/// Free this memory
#[stable(feature = "rust1", since = "1.0.0")]
pub const FREE: Self = Self { bits: 0b0000_0000 };
/// Immediately allocate this memory. Otherwise it will
/// be demand-paged. This is implicitly set when `phys`
/// is not 0.
#[stable(feature = "rust1", since = "1.0.0")]
pub const RESERVE: Self = Self { bits: 0b0000_0001 };
/// Allow the CPU to read from this page.
#[stable(feature = "rust1", since = "1.0.0")]
pub const R: Self = Self { bits: 0b0000_0010 };
/// Allow the CPU to write to this page.
#[stable(feature = "rust1", since = "1.0.0")]
pub const W: Self = Self { bits: 0b0000_0100 };
/// Allow the CPU to execute from this page.
#[stable(feature = "rust1", since = "1.0.0")]
pub const X: Self = Self { bits: 0b0000_1000 };
#[stable(feature = "rust1", since = "1.0.0")]
pub fn bits(&self) -> usize {
self.bits
}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_bits(raw: usize) -> Option<MemoryFlags> {
if raw > 16 { None } else { Some(MemoryFlags { bits: raw }) }
}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.bits == 0
}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn empty() -> MemoryFlags {
MemoryFlags { bits: 0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn all() -> MemoryFlags {
MemoryFlags { bits: 15 }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::fmt::Binary for MemoryFlags {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::Binary::fmt(&self.bits, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::fmt::Octal for MemoryFlags {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::Octal::fmt(&self.bits, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::fmt::LowerHex for MemoryFlags {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::LowerHex::fmt(&self.bits, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::fmt::UpperHex for MemoryFlags {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::UpperHex::fmt(&self.bits, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::BitOr for MemoryFlags {
type Output = Self;
/// Returns the union of the two sets of flags.
#[inline]
fn bitor(self, other: MemoryFlags) -> Self {
Self { bits: self.bits | other.bits }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::BitOrAssign for MemoryFlags {
/// Adds the set of flags.
#[inline]
fn bitor_assign(&mut self, other: Self) {
self.bits |= other.bits;
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::BitXor for MemoryFlags {
type Output = Self;
/// Returns the left flags, but with all the right flags toggled.
#[inline]
fn bitxor(self, other: Self) -> Self {
Self { bits: self.bits ^ other.bits }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::BitXorAssign for MemoryFlags {
/// Toggles the set of flags.
#[inline]
fn bitxor_assign(&mut self, other: Self) {
self.bits ^= other.bits;
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::BitAnd for MemoryFlags {
type Output = Self;
/// Returns the intersection between the two sets of flags.
#[inline]
fn bitand(self, other: Self) -> Self {
Self { bits: self.bits & other.bits }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::BitAndAssign for MemoryFlags {
/// Disables all flags disabled in the set.
#[inline]
fn bitand_assign(&mut self, other: Self) {
self.bits &= other.bits;
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::Sub for MemoryFlags {
type Output = Self;
/// Returns the set difference of the two sets of flags.
#[inline]
fn sub(self, other: Self) -> Self {
Self { bits: self.bits & !other.bits }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::SubAssign for MemoryFlags {
/// Disables all flags enabled in the set.
#[inline]
fn sub_assign(&mut self, other: Self) {
self.bits &= !other.bits;
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::ops::Not for MemoryFlags {
type Output = Self;
/// Returns the complement of this set of flags.
#[inline]
fn not(self) -> Self {
Self { bits: !self.bits } & MemoryFlags { bits: 15 }
}
}

View file

@ -0,0 +1,17 @@
#![stable(feature = "rust1", since = "1.0.0")]
#![doc(cfg(target_os = "xous"))]
pub mod ffi;
#[stable(feature = "rust1", since = "1.0.0")]
pub mod services;
/// A prelude for conveniently writing platform-specific code.
///
/// Includes all extension traits, and some important type definitions.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod prelude {
#[doc(no_inline)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::ffi::{OsStrExt, OsStringExt};
}

View file

@ -0,0 +1,132 @@
use crate::os::xous::ffi::Connection;
use core::sync::atomic::{AtomicU32, Ordering};
mod log;
pub(crate) use log::*;
mod systime;
pub(crate) use systime::*;
mod ticktimer;
pub(crate) use ticktimer::*;
mod ns {
const NAME_MAX_LENGTH: usize = 64;
use crate::os::xous::ffi::{lend_mut, Connection};
// By making this repr(C), the layout of this struct becomes well-defined
// and no longer shifts around.
// By marking it as `align(4096)` we define that it will be page-aligned,
// meaning it can be sent between processes. We make sure to pad out the
// entire struct so that memory isn't leaked to the name server.
#[repr(C, align(4096))]
struct ConnectRequest {
data: [u8; 4096],
}
impl ConnectRequest {
pub fn new(name: &str) -> Self {
let mut cr = ConnectRequest { data: [0u8; 4096] };
let name_bytes = name.as_bytes();
// Copy the string into our backing store.
for (&src_byte, dest_byte) in name_bytes.iter().zip(&mut cr.data[0..NAME_MAX_LENGTH]) {
*dest_byte = src_byte;
}
// Set the string length to the length of the passed-in String,
// or the maximum possible length. Which ever is smaller.
for (&src_byte, dest_byte) in (name.len().min(NAME_MAX_LENGTH) as u32)
.to_le_bytes()
.iter()
.zip(&mut cr.data[NAME_MAX_LENGTH..])
{
*dest_byte = src_byte;
}
cr
}
}
pub fn connect_with_name_impl(name: &str, blocking: bool) -> Option<Connection> {
let mut request = ConnectRequest::new(name);
let opcode = if blocking {
6 /* BlockingConnect */
} else {
7 /* TryConnect */
};
let cid = if blocking { super::name_server() } else { super::try_name_server()? };
lend_mut(cid, opcode, &mut request.data, 0, name.len().min(NAME_MAX_LENGTH))
.expect("unable to perform lookup");
// Read the result code back from the nameserver
let result = u32::from_le_bytes(request.data[0..4].try_into().unwrap());
if result == 0 {
// If the result was successful, then the CID is stored in the next 4 bytes
Some(u32::from_le_bytes(request.data[4..8].try_into().unwrap()).into())
} else {
None
}
}
pub fn connect_with_name(name: &str) -> Option<Connection> {
connect_with_name_impl(name, true)
}
pub fn try_connect_with_name(name: &str) -> Option<Connection> {
connect_with_name_impl(name, false)
}
}
/// Attempt to connect to a server by name. If the server does not exist, this will
/// block until the server is created.
///
/// Note that this is different from connecting to a server by address. Server
/// addresses are always 16 bytes long, whereas server names are arbitrary-length
/// strings up to 64 bytes in length.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn connect(name: &str) -> Option<Connection> {
ns::connect_with_name(name)
}
/// Attempt to connect to a server by name. If the server does not exist, this will
/// immediately return `None`.
///
/// Note that this is different from connecting to a server by address. Server
/// addresses are always 16 bytes long, whereas server names are arbitrary-length
/// strings.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_connect(name: &str) -> Option<Connection> {
ns::try_connect_with_name(name)
}
static NAME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
/// Return a `Connection` to the name server. If the name server has not been started,
/// then this call will block until the name server has been started. The `Connection`
/// will be shared among all connections in a process, so it is safe to call this
/// multiple times.
pub(crate) fn name_server() -> Connection {
let cid = NAME_SERVER_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return cid.into();
}
let cid = crate::os::xous::ffi::connect("xous-name-server".try_into().unwrap()).unwrap();
NAME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
cid
}
fn try_name_server() -> Option<Connection> {
let cid = NAME_SERVER_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return Some(cid.into());
}
if let Ok(Some(cid)) = crate::os::xous::ffi::try_connect("xous-name-server".try_into().unwrap())
{
NAME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
Some(cid)
} else {
None
}
}

View file

@ -0,0 +1,63 @@
use crate::os::xous::ffi::Connection;
use core::sync::atomic::{AtomicU32, Ordering};
/// Group `usize` bytes into a `usize` and return it, beginning
/// from `offset` * sizeof(usize) bytes from the start. For example,
/// `group_or_null([1,2,3,4,5,6,7,8], 1)` on a 32-bit system will
/// return a usize with 5678 packed into it.
fn group_or_null(data: &[u8], offset: usize) -> usize {
let start = offset * core::mem::size_of::<usize>();
let mut out_array = [0u8; core::mem::size_of::<usize>()];
if start < data.len() {
for (dest, src) in out_array.iter_mut().zip(&data[start..]) {
*dest = *src;
}
}
usize::from_le_bytes(out_array)
}
pub(crate) enum LogScalar<'a> {
/// A panic occurred, and a panic log is forthcoming
BeginPanic,
/// Some number of bytes will be appended to the log message
AppendPanicMessage(&'a [u8]),
}
impl<'a> Into<[usize; 5]> for LogScalar<'a> {
fn into(self) -> [usize; 5] {
match self {
LogScalar::BeginPanic => [1000, 0, 0, 0, 0],
LogScalar::AppendPanicMessage(c) =>
// Text is grouped into 4x `usize` words. The id is 1100 plus
// the number of characters in this message.
// Ignore errors since we're already panicking.
{
[
1100 + c.len(),
group_or_null(&c, 0),
group_or_null(&c, 1),
group_or_null(&c, 2),
group_or_null(&c, 3),
]
}
}
}
}
/// Return a `Connection` to the log server, which is used for printing messages to
/// the console and reporting panics. If the log server has not yet started, this
/// will block until the server is running. It is safe to call this multiple times,
/// because the address is shared among all threads in a process.
pub(crate) fn log_server() -> Connection {
static LOG_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
let cid = LOG_SERVER_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return cid.into();
}
let cid = crate::os::xous::ffi::connect("xous-log-server ".try_into().unwrap()).unwrap();
LOG_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
cid
}

View file

@ -0,0 +1,28 @@
use crate::os::xous::ffi::{connect, Connection};
use core::sync::atomic::{AtomicU32, Ordering};
pub(crate) enum SystimeScalar {
GetUtcTimeMs,
}
impl Into<[usize; 5]> for SystimeScalar {
fn into(self) -> [usize; 5] {
match self {
SystimeScalar::GetUtcTimeMs => [3, 0, 0, 0, 0],
}
}
}
/// Return a `Connection` to the systime server. This server is used for reporting the
/// realtime clock.
pub(crate) fn systime_server() -> Connection {
static SYSTIME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
let cid = SYSTIME_SERVER_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return cid.into();
}
let cid = connect("timeserverpublic".try_into().unwrap()).unwrap();
SYSTIME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
cid
}

View file

@ -0,0 +1,42 @@
use crate::os::xous::ffi::Connection;
use core::sync::atomic::{AtomicU32, Ordering};
pub(crate) enum TicktimerScalar {
ElapsedMs,
SleepMs(usize),
LockMutex(usize /* cookie */),
UnlockMutex(usize /* cookie */),
WaitForCondition(usize /* cookie */, usize /* timeout (ms) */),
NotifyCondition(usize /* cookie */, usize /* count */),
FreeMutex(usize /* cookie */),
FreeCondition(usize /* cookie */),
}
impl Into<[usize; 5]> for TicktimerScalar {
fn into(self) -> [usize; 5] {
match self {
TicktimerScalar::ElapsedMs => [0, 0, 0, 0, 0],
TicktimerScalar::SleepMs(msecs) => [1, msecs, 0, 0, 0],
TicktimerScalar::LockMutex(cookie) => [6, cookie, 0, 0, 0],
TicktimerScalar::UnlockMutex(cookie) => [7, cookie, 0, 0, 0],
TicktimerScalar::WaitForCondition(cookie, timeout_ms) => [8, cookie, timeout_ms, 0, 0],
TicktimerScalar::NotifyCondition(cookie, count) => [9, cookie, count, 0, 0],
TicktimerScalar::FreeMutex(cookie) => [10, cookie, 0, 0, 0],
TicktimerScalar::FreeCondition(cookie) => [11, cookie, 0, 0, 0],
}
}
}
/// Return a `Connection` to the ticktimer server. This server is used for synchronization
/// primitives such as sleep, Mutex, and Condvar.
pub(crate) fn ticktimer_server() -> Connection {
static TICKTIMER_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
let cid = TICKTIMER_SERVER_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return cid.into();
}
let cid = crate::os::xous::ffi::connect("ticktimer-server".try_into().unwrap()).unwrap();
TICKTIMER_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
cid
}

View file

@ -101,7 +101,7 @@
#![stable(feature = "process", since = "1.0.0")] #![stable(feature = "process", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)]
#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))] #[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))]
mod tests; mod tests;
use crate::io::prelude::*; use crate::io::prelude::*;

View file

@ -44,6 +44,9 @@ cfg_if::cfg_if! {
} else if #[cfg(target_family = "wasm")] { } else if #[cfg(target_family = "wasm")] {
mod wasm; mod wasm;
pub use self::wasm::*; pub use self::wasm::*;
} else if #[cfg(target_os = "xous")] {
mod xous;
pub use self::xous::*;
} else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] {
mod sgx; mod sgx;
pub use self::sgx::*; pub use self::sgx::*;

View file

@ -0,0 +1,62 @@
use crate::alloc::{GlobalAlloc, Layout, System};
static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new();
#[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling malloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.malloc(layout.size(), layout.align()) }
}
#[inline]
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling calloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.calloc(layout.size(), layout.align()) }
}
#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling free() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.free(ptr, layout.size(), layout.align()) }
}
#[inline]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling realloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) }
}
}
mod lock {
use crate::sync::atomic::{AtomicI32, Ordering::SeqCst};
static LOCKED: AtomicI32 = AtomicI32::new(0);
pub struct DropLock;
pub fn lock() -> DropLock {
loop {
if LOCKED.swap(1, SeqCst) == 0 {
return DropLock;
}
crate::os::xous::ffi::do_yield();
}
}
impl Drop for DropLock {
fn drop(&mut self) {
let r = LOCKED.swap(0, SeqCst);
debug_assert_eq!(r, 1);
}
}
}

View file

@ -0,0 +1,111 @@
use super::mutex::Mutex;
use crate::os::xous::ffi::{blocking_scalar, scalar};
use crate::os::xous::services::ticktimer_server;
use crate::sync::Mutex as StdMutex;
use crate::time::Duration;
// The implementation is inspired by Andrew D. Birrell's paper
// "Implementing Condition Variables with Semaphores"
pub struct Condvar {
counter: StdMutex<usize>,
}
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
impl Condvar {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Condvar {
Condvar { counter: StdMutex::new(0) }
}
pub fn notify_one(&self) {
let mut counter = self.counter.lock().unwrap();
if *counter <= 0 {
return;
} else {
*counter -= 1;
}
let result = blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), 1).into(),
);
drop(counter);
result.expect("failure to send NotifyCondition command");
}
pub fn notify_all(&self) {
let mut counter = self.counter.lock().unwrap();
if *counter <= 0 {
return;
}
let result = blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), *counter)
.into(),
);
*counter = 0;
drop(counter);
result.expect("failure to send NotifyCondition command");
}
fn index(&self) -> usize {
self as *const Condvar as usize
}
pub unsafe fn wait(&self, mutex: &Mutex) {
let mut counter = self.counter.lock().unwrap();
*counter += 1;
unsafe { mutex.unlock() };
drop(counter);
let result = blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), 0).into(),
);
unsafe { mutex.lock() };
result.expect("Ticktimer: failure to send WaitForCondition command");
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
let mut counter = self.counter.lock().unwrap();
*counter += 1;
unsafe { mutex.unlock() };
drop(counter);
let mut millis = dur.as_millis() as usize;
if millis == 0 {
millis = 1;
}
let result = blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), millis)
.into(),
);
unsafe { mutex.lock() };
let result = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0;
// If we awoke due to a timeout, decrement the wake count, as that would not have
// been done in the `notify()` call.
if !result {
*self.counter.lock().unwrap() -= 1;
}
result
}
}
impl Drop for Condvar {
fn drop(&mut self) {
scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::FreeCondition(self.index()).into(),
)
.ok();
}
}

View file

@ -0,0 +1,7 @@
mod condvar;
mod mutex;
mod rwlock;
pub use condvar::*;
pub use mutex::*;
pub use rwlock::*;

View file

@ -0,0 +1,116 @@
use crate::os::xous::ffi::{blocking_scalar, do_yield, scalar};
use crate::os::xous::services::ticktimer_server;
use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst};
pub struct Mutex {
/// The "locked" value indicates how many threads are waiting on this
/// Mutex. Possible values are:
/// 0: The lock is unlocked
/// 1: The lock is locked and uncontended
/// >=2: The lock is locked and contended
///
/// A lock is "contended" when there is more than one thread waiting
/// for a lock, or it is locked for long periods of time. Rather than
/// spinning, these locks send a Message to the ticktimer server
/// requesting that they be woken up when a lock is unlocked.
locked: AtomicUsize,
/// Whether this Mutex ever was contended, and therefore made a trip
/// to the ticktimer server. If this was never set, then we were never
/// on the slow path and can skip deregistering the mutex.
contended: AtomicBool,
}
impl Mutex {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Mutex {
Mutex { locked: AtomicUsize::new(0), contended: AtomicBool::new(false) }
}
fn index(&self) -> usize {
self as *const Mutex as usize
}
#[inline]
pub unsafe fn lock(&self) {
// Try multiple times to acquire the lock without resorting to the ticktimer
// server. For locks that are held for a short amount of time, this will
// result in the ticktimer server never getting invoked. The `locked` value
// will be either 0 or 1.
for _attempts in 0..3 {
if unsafe { self.try_lock() } {
return;
}
do_yield();
}
// Try one more time to lock. If the lock is released between the previous code and
// here, then the inner `locked` value will be 1 at the end of this. If it was not
// locked, then the value will be more than 1, for example if there are multiple other
// threads waiting on this lock.
if unsafe { self.try_lock_or_poison() } {
return;
}
// When this mutex is dropped, we will need to deregister it with the server.
self.contended.store(true, Relaxed);
// The lock is now "contended". When the lock is released, a Message will get sent to the
// ticktimer server to wake it up. Note that this may already have happened, so the actual
// value of `lock` may be anything (0, 1, 2, ...).
blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::LockMutex(self.index()).into(),
)
.expect("failure to send LockMutex command");
}
#[inline]
pub unsafe fn unlock(&self) {
let prev = self.locked.fetch_sub(1, SeqCst);
// If the previous value was 1, then this was a "fast path" unlock, so no
// need to involve the Ticktimer server
if prev == 1 {
return;
}
// If it was 0, then something has gone seriously wrong and the counter
// has just wrapped around.
if prev == 0 {
panic!("mutex lock count underflowed");
}
// Unblock one thread that is waiting on this message.
scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::UnlockMutex(self.index()).into(),
)
.expect("failure to send UnlockMutex command");
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
}
#[inline]
pub unsafe fn try_lock_or_poison(&self) -> bool {
self.locked.fetch_add(1, SeqCst) == 0
}
}
impl Drop for Mutex {
fn drop(&mut self) {
// If there was Mutex contention, then we involved the ticktimer. Free
// the resources associated with this Mutex as it is deallocated.
if self.contended.load(Relaxed) {
scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::FreeMutex(self.index()).into(),
)
.ok();
}
}
}

View file

@ -0,0 +1,72 @@
use crate::os::xous::ffi::do_yield;
use crate::sync::atomic::{AtomicIsize, Ordering::SeqCst};
pub struct RwLock {
/// The "mode" value indicates how many threads are waiting on this
/// Mutex. Possible values are:
/// -1: The lock is locked for writing
/// 0: The lock is unlocked
/// >=1: The lock is locked for reading
///
/// This currently spins waiting for the lock to be freed. An
/// optimization would be to involve the ticktimer server to
/// coordinate unlocks.
mode: AtomicIsize,
}
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
impl RwLock {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> RwLock {
RwLock { mode: AtomicIsize::new(0) }
}
#[inline]
pub unsafe fn read(&self) {
while !unsafe { self.try_read() } {
do_yield();
}
}
#[inline]
pub unsafe fn try_read(&self) -> bool {
// Non-atomically determine the current value.
let current = self.mode.load(SeqCst);
// If it's currently locked for writing, then we cannot read.
if current < 0 {
return false;
}
// Attempt to lock. If the `current` value has changed, then this
// operation will fail and we will not obtain the lock even if we
// could potentially keep it.
let new = current + 1;
self.mode.compare_exchange(current, new, SeqCst, SeqCst).is_ok()
}
#[inline]
pub unsafe fn write(&self) {
while !unsafe { self.try_write() } {
do_yield();
}
}
#[inline]
pub unsafe fn try_write(&self) -> bool {
self.mode.compare_exchange(0, -1, SeqCst, SeqCst).is_ok()
}
#[inline]
pub unsafe fn read_unlock(&self) {
self.mode.fetch_sub(1, SeqCst);
}
#[inline]
pub unsafe fn write_unlock(&self) {
assert_eq!(self.mode.compare_exchange(-1, 0, SeqCst, SeqCst), Ok(-1));
}
}

View file

@ -0,0 +1,37 @@
#![deny(unsafe_op_in_unsafe_fn)]
pub mod alloc;
#[path = "../unsupported/args.rs"]
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
#[path = "../unsupported/env.rs"]
pub mod env;
#[path = "../unsupported/fs.rs"]
pub mod fs;
#[path = "../unsupported/io.rs"]
pub mod io;
pub mod locks;
#[path = "../unsupported/net.rs"]
pub mod net;
#[path = "../unsupported/once.rs"]
pub mod once;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod os_str;
#[path = "../unix/path.rs"]
pub mod path;
#[path = "../unsupported/pipe.rs"]
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
pub mod stdio;
pub mod thread;
pub mod thread_local_key;
#[path = "../unsupported/thread_parking.rs"]
pub mod thread_parking;
pub mod time;
#[path = "../unsupported/common.rs"]
mod common;
pub use common::*;

View file

@ -0,0 +1,147 @@
use super::unsupported;
use crate::error::Error as StdError;
use crate::ffi::{OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::marker::PhantomData;
use crate::os::xous::ffi::Error as XousError;
use crate::path::{self, PathBuf};
#[cfg(not(test))]
mod c_compat {
use crate::os::xous::ffi::exit;
extern "C" {
fn main() -> u32;
}
#[no_mangle]
pub extern "C" fn abort() {
exit(1);
}
#[no_mangle]
pub extern "C" fn _start() {
exit(unsafe { main() });
}
// This function is needed by the panic runtime. The symbol is named in
// pre-link args for the target specification, so keep that in sync.
#[no_mangle]
// NB. used by both libunwind and libpanic_abort
pub extern "C" fn __rust_abort() -> ! {
exit(101);
}
}
pub fn errno() -> i32 {
0
}
pub fn error_string(errno: i32) -> String {
Into::<XousError>::into(errno).to_string()
}
pub fn getcwd() -> io::Result<PathBuf> {
unsupported()
}
pub fn chdir(_: &path::Path) -> io::Result<()> {
unsupported()
}
pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
panic!("unsupported")
}
impl<'a> Iterator for SplitPaths<'a> {
type Item = PathBuf;
fn next(&mut self) -> Option<PathBuf> {
self.0
}
}
#[derive(Debug)]
pub struct JoinPathsError;
pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
where
I: Iterator<Item = T>,
T: AsRef<OsStr>,
{
Err(JoinPathsError)
}
impl fmt::Display for JoinPathsError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"not supported on this platform yet".fmt(f)
}
}
impl StdError for JoinPathsError {
#[allow(deprecated)]
fn description(&self) -> &str {
"not supported on this platform yet"
}
}
pub fn current_exe() -> io::Result<PathBuf> {
unsupported()
}
pub struct Env(!);
impl Env {
// FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
pub fn str_debug(&self) -> impl fmt::Debug + '_ {
let Self(inner) = self;
match *inner {}
}
}
impl fmt::Debug for Env {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(inner) = self;
match *inner {}
}
}
impl Iterator for Env {
type Item = (OsString, OsString);
fn next(&mut self) -> Option<(OsString, OsString)> {
self.0
}
}
pub fn env() -> Env {
panic!("not supported on this platform")
}
pub fn getenv(_: &OsStr) -> Option<OsString> {
None
}
pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> {
Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform"))
}
pub fn unsetenv(_: &OsStr) -> io::Result<()> {
Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform"))
}
pub fn temp_dir() -> PathBuf {
panic!("no filesystem on this platform")
}
pub fn home_dir() -> Option<PathBuf> {
None
}
pub fn exit(code: i32) -> ! {
crate::os::xous::ffi::exit(code as u32);
}
pub fn getpid() -> u32 {
panic!("no pids on this platform")
}

View file

@ -0,0 +1,131 @@
use crate::io;
pub struct Stdin;
pub struct Stdout {}
pub struct Stderr;
use crate::os::xous::ffi::{lend, try_lend, try_scalar, Connection};
use crate::os::xous::services::{log_server, try_connect, LogScalar};
impl Stdin {
pub const fn new() -> Stdin {
Stdin
}
}
impl io::Read for Stdin {
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
Ok(0)
}
}
impl Stdout {
pub const fn new() -> Stdout {
Stdout {}
}
}
impl io::Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
#[repr(align(4096))]
struct LendBuffer([u8; 4096]);
let mut lend_buffer = LendBuffer([0u8; 4096]);
let connection = log_server();
for chunk in buf.chunks(lend_buffer.0.len()) {
for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) {
*dest = *src;
}
lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap();
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Stderr {
pub const fn new() -> Stderr {
Stderr
}
}
impl io::Write for Stderr {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
#[repr(align(4096))]
struct LendBuffer([u8; 4096]);
let mut lend_buffer = LendBuffer([0u8; 4096]);
let connection = log_server();
for chunk in buf.chunks(lend_buffer.0.len()) {
for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) {
*dest = *src;
}
lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap();
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
pub const STDIN_BUF_SIZE: usize = 0;
pub fn is_ebadf(_err: &io::Error) -> bool {
true
}
#[derive(Copy, Clone)]
pub struct PanicWriter {
log: Connection,
gfx: Option<Connection>,
}
impl io::Write for PanicWriter {
fn write(&mut self, s: &[u8]) -> core::result::Result<usize, io::Error> {
for c in s.chunks(core::mem::size_of::<usize>() * 4) {
// Text is grouped into 4x `usize` words. The id is 1100 plus
// the number of characters in this message.
// Ignore errors since we're already panicking.
try_scalar(self.log, LogScalar::AppendPanicMessage(&c).into()).ok();
}
// Serialize the text to the graphics panic handler, only if we were able
// to acquire a connection to it. Text length is encoded in the `valid` field,
// the data itself in the buffer. Typically several messages are require to
// fully transmit the entire panic message.
if let Some(gfx) = self.gfx {
#[repr(C, align(4096))]
struct Request([u8; 4096]);
let mut request = Request([0u8; 4096]);
for (&s, d) in s.iter().zip(request.0.iter_mut()) {
*d = s;
}
try_lend(gfx, 0 /* AppendPanicText */, &request.0, 0, s.len()).ok();
}
Ok(s.len())
}
// Tests show that this does not seem to be reliably called at the end of a panic
// print, so, we can't rely on this to e.g. trigger a graphics update.
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
pub fn panic_output() -> Option<impl io::Write> {
// Generally this won't fail because every server has already connected, so
// this is likely to succeed.
let log = log_server();
// Send the "We're panicking" message (1000).
try_scalar(log, LogScalar::BeginPanic.into()).ok();
// This is will fail in the case that the connection table is full, or if the
// graphics server is not running. Most servers do not already have this connection.
let gfx = try_connect("panic-to-screen!");
Some(PanicWriter { log, gfx })
}

View file

@ -0,0 +1,144 @@
use crate::ffi::CStr;
use crate::io;
use crate::num::NonZeroUsize;
use crate::os::xous::ffi::{
blocking_scalar, create_thread, do_yield, join_thread, map_memory, update_memory_flags,
MemoryFlags, Syscall, ThreadId,
};
use crate::os::xous::services::{ticktimer_server, TicktimerScalar};
use crate::time::Duration;
use core::arch::asm;
pub struct Thread {
tid: ThreadId,
}
pub const DEFAULT_MIN_STACK_SIZE: usize = 131072;
const MIN_STACK_SIZE: usize = 4096;
pub const GUARD_PAGE_SIZE: usize = 4096;
impl Thread {
// unsafe: see thread::Builder::spawn_unchecked for safety requirements
pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
let p = Box::into_raw(Box::new(p));
let mut stack_size = crate::cmp::max(stack, MIN_STACK_SIZE);
if (stack_size & 4095) != 0 {
stack_size = (stack_size + 4095) & !4095;
}
// Allocate the whole thing, then divide it up after the fact. This ensures that
// even if there's a context switch during this function, the whole stack plus
// guard pages will remain contiguous.
let stack_plus_guard_pages: &mut [u8] = unsafe {
map_memory(
None,
None,
GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE,
MemoryFlags::R | MemoryFlags::W | MemoryFlags::X,
)
}
.map_err(|code| io::Error::from_raw_os_error(code as i32))?;
// No access to this page. Note: Write-only pages are illegal, and will
// cause an access violation.
unsafe {
update_memory_flags(&mut stack_plus_guard_pages[0..GUARD_PAGE_SIZE], MemoryFlags::W)
.map_err(|code| io::Error::from_raw_os_error(code as i32))?
};
// No access to this page. Note: Write-only pages are illegal, and will
// cause an access violation.
unsafe {
update_memory_flags(
&mut stack_plus_guard_pages[(GUARD_PAGE_SIZE + stack_size)..],
MemoryFlags::W,
)
.map_err(|code| io::Error::from_raw_os_error(code as i32))?
};
let guard_page_pre = stack_plus_guard_pages.as_ptr() as usize;
let tid = create_thread(
thread_start as *mut usize,
&mut stack_plus_guard_pages[GUARD_PAGE_SIZE..(stack_size + GUARD_PAGE_SIZE)],
p as usize,
guard_page_pre,
stack_size,
0,
)
.map_err(|code| io::Error::from_raw_os_error(code as i32))?;
extern "C" fn thread_start(main: *mut usize, guard_page_pre: usize, stack_size: usize) {
unsafe {
// Finally, let's run some code.
Box::from_raw(main as *mut Box<dyn FnOnce()>)();
}
// Destroy TLS, which will free the TLS page and call the destructor for
// any thread local storage.
unsafe {
crate::sys::thread_local_key::destroy_tls();
}
// Deallocate the stack memory, along with the guard pages. Afterwards,
// exit the thread by returning to the magic address 0xff80_3000usize,
// which tells the kernel to deallocate this thread.
let mapped_memory_base = guard_page_pre;
let mapped_memory_length = GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE;
unsafe {
asm!(
"ecall",
"ret",
in("a0") Syscall::UnmapMemory as usize,
in("a1") mapped_memory_base,
in("a2") mapped_memory_length,
in("ra") 0xff80_3000usize,
options(nomem, nostack, noreturn)
);
}
}
Ok(Thread { tid })
}
pub fn yield_now() {
do_yield();
}
pub fn set_name(_name: &CStr) {
// nope
}
pub fn sleep(dur: Duration) {
// Because the sleep server works on units of `usized milliseconds`, split
// the messages up into these chunks. This means we may run into issues
// if you try to sleep a thread for more than 49 days on a 32-bit system.
let mut millis = dur.as_millis();
while millis > 0 {
let sleep_duration =
if millis > (usize::MAX as _) { usize::MAX } else { millis as usize };
blocking_scalar(ticktimer_server(), TicktimerScalar::SleepMs(sleep_duration).into())
.expect("failed to send message to ticktimer server");
millis -= sleep_duration as u128;
}
}
pub fn join(self) {
join_thread(self.tid).unwrap();
}
}
pub fn available_parallelism() -> io::Result<NonZeroUsize> {
// We're unicore right now.
Ok(unsafe { NonZeroUsize::new_unchecked(1) })
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -0,0 +1,190 @@
use crate::mem::ManuallyDrop;
use crate::ptr;
use crate::sync::atomic::AtomicPtr;
use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::SeqCst;
use core::arch::asm;
use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags};
/// Thread Local Storage
///
/// Currently, we are limited to 1023 TLS entries. The entries
/// live in a page of memory that's unique per-process, and is
/// stored in the `$tp` register. If this register is 0, then
/// TLS has not been initialized and thread cleanup can be skipped.
///
/// The index into this register is the `key`. This key is identical
/// between all threads, but indexes a different offset within this
/// pointer.
pub type Key = usize;
pub type Dtor = unsafe extern "C" fn(*mut u8);
const TLS_MEMORY_SIZE: usize = 4096;
/// TLS keys start at `1` to mimic POSIX.
static TLS_KEY_INDEX: AtomicUsize = AtomicUsize::new(1);
fn tls_ptr_addr() -> *mut usize {
let mut tp: usize;
unsafe {
asm!(
"mv {}, tp",
out(reg) tp,
);
}
core::ptr::from_exposed_addr_mut::<usize>(tp)
}
/// Create an area of memory that's unique per thread. This area will
/// contain all thread local pointers.
fn tls_ptr() -> *mut usize {
let mut tp = tls_ptr_addr();
// If the TP register is `0`, then this thread hasn't initialized
// its TLS yet. Allocate a new page to store this memory.
if tp.is_null() {
tp = unsafe {
map_memory(
None,
None,
TLS_MEMORY_SIZE / core::mem::size_of::<usize>(),
MemoryFlags::R | MemoryFlags::W,
)
}
.expect("Unable to allocate memory for thread local storage")
.as_mut_ptr();
unsafe {
// Key #0 is currently unused.
(tp).write_volatile(0);
// Set the thread's `$tp` register
asm!(
"mv tp, {}",
in(reg) tp as usize,
);
}
}
tp
}
/// Allocate a new TLS key. These keys are shared among all threads.
fn tls_alloc() -> usize {
TLS_KEY_INDEX.fetch_add(1, SeqCst)
}
#[inline]
pub unsafe fn create(dtor: Option<Dtor>) -> Key {
let key = tls_alloc();
if let Some(f) = dtor {
unsafe { register_dtor(key, f) };
}
key
}
#[inline]
pub unsafe fn set(key: Key, value: *mut u8) {
assert!((key < 1022) && (key >= 1));
unsafe { tls_ptr().add(key).write_volatile(value as usize) };
}
#[inline]
pub unsafe fn get(key: Key) -> *mut u8 {
assert!((key < 1022) && (key >= 1));
core::ptr::from_exposed_addr_mut::<u8>(unsafe { tls_ptr().add(key).read_volatile() })
}
#[inline]
pub unsafe fn destroy(_key: Key) {
panic!("can't destroy keys on Xous");
}
// -------------------------------------------------------------------------
// Dtor registration (stolen from Windows)
//
// Xous has no native support for running destructors so we manage our own
// list of destructors to keep track of how to destroy keys. We then install a
// callback later to get invoked whenever a thread exits, running all
// appropriate destructors.
//
// Currently unregistration from this list is not supported. A destructor can be
// registered but cannot be unregistered. There's various simplifying reasons
// for doing this, the big ones being:
//
// 1. Currently we don't even support deallocating TLS keys, so normal operation
// doesn't need to deallocate a destructor.
// 2. There is no point in time where we know we can unregister a destructor
// because it could always be getting run by some remote thread.
//
// Typically processes have a statically known set of TLS keys which is pretty
// small, and we'd want to keep this memory alive for the whole process anyway
// really.
//
// Perhaps one day we can fold the `Box` here into a static allocation,
// expanding the `StaticKey` structure to contain not only a slot for the TLS
// key but also a slot for the destructor queue on windows. An optimization for
// another day!
static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut());
struct Node {
dtor: Dtor,
key: Key,
next: *mut Node,
}
unsafe fn register_dtor(key: Key, dtor: Dtor) {
let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
let mut head = DTORS.load(SeqCst);
loop {
node.next = head;
match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) {
Ok(_) => return, // nothing to drop, we successfully added the node to the list
Err(cur) => head = cur,
}
}
}
pub unsafe fn destroy_tls() {
let tp = tls_ptr_addr();
// If the pointer address is 0, then this thread has no TLS.
if tp.is_null() {
return;
}
unsafe { run_dtors() };
// Finally, free the TLS array
unsafe {
unmap_memory(core::slice::from_raw_parts_mut(
tp,
TLS_MEMORY_SIZE / core::mem::size_of::<usize>(),
))
.unwrap()
};
}
unsafe fn run_dtors() {
let mut any_run = true;
for _ in 0..5 {
if !any_run {
break;
}
any_run = false;
let mut cur = DTORS.load(SeqCst);
while !cur.is_null() {
let ptr = unsafe { get((*cur).key) };
if !ptr.is_null() {
unsafe { set((*cur).key, ptr::null_mut()) };
unsafe { ((*cur).dtor)(ptr as *mut _) };
any_run = true;
}
unsafe { cur = (*cur).next };
}
}
}

View file

@ -0,0 +1,57 @@
use crate::os::xous::ffi::blocking_scalar;
use crate::os::xous::services::{
systime_server, ticktimer_server, SystimeScalar::GetUtcTimeMs, TicktimerScalar::ElapsedMs,
};
use crate::time::Duration;
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
pub struct Instant(Duration);
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
pub struct SystemTime(Duration);
pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));
impl Instant {
pub fn now() -> Instant {
let result = blocking_scalar(ticktimer_server(), ElapsedMs.into())
.expect("failed to request elapsed_ms");
let lower = result[0];
let upper = result[1];
Instant { 0: Duration::from_millis(lower as u64 | (upper as u64) << 32) }
}
pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
self.0.checked_sub(other.0)
}
pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
self.0.checked_add(*other).map(Instant)
}
pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
self.0.checked_sub(*other).map(Instant)
}
}
impl SystemTime {
pub fn now() -> SystemTime {
let result = blocking_scalar(systime_server(), GetUtcTimeMs.into())
.expect("failed to request utc time in ms");
let lower = result[0];
let upper = result[1];
SystemTime { 0: Duration::from_millis((upper as u64) << 32 | lower as u64) }
}
pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
}
pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
Some(SystemTime(self.0.checked_add(*other)?))
}
pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
Some(SystemTime(self.0.checked_sub(*other)?))
}
}

View file

@ -46,6 +46,7 @@ cfg_if::cfg_if! {
if #[cfg(any(target_os = "l4re", if #[cfg(any(target_os = "l4re",
feature = "restricted-std", feature = "restricted-std",
all(target_family = "wasm", not(target_os = "emscripten")), all(target_family = "wasm", not(target_os = "emscripten")),
target_os = "xous",
all(target_vendor = "fortanix", target_env = "sgx")))] { all(target_vendor = "fortanix", target_env = "sgx")))] {
pub use crate::sys::net; pub use crate::sys::net;
} else { } else {

View file

@ -1640,7 +1640,10 @@ impl<'a> Builder<'a> {
// flesh out rpath support more fully in the future. // flesh out rpath support more fully in the future.
rustflags.arg("-Zosx-rpath-install-name"); rustflags.arg("-Zosx-rpath-install-name");
Some(format!("-Wl,-rpath,@loader_path/../{libdir}")) Some(format!("-Wl,-rpath,@loader_path/../{libdir}"))
} else if !target.contains("windows") && !target.contains("aix") { } else if !target.contains("windows")
&& !target.contains("aix")
&& !target.contains("xous")
{
rustflags.arg("-Clink-args=-Wl,-z,origin"); rustflags.arg("-Clink-args=-Wl,-z,origin");
Some(format!("-Wl,-rpath,$ORIGIN/../{libdir}")) Some(format!("-Wl,-rpath,$ORIGIN/../{libdir}"))
} else { } else {