From bd73f64a12d9d2e79924f6b73cd125b93799399d Mon Sep 17 00:00:00 2001 From: Lawrence Esswood Date: Tue, 2 Jul 2024 21:15:21 +0000 Subject: [PATCH] WIP All changes squashed An early squashed commit for all the changes made to tock that contains CHERI changes and more. This needs to be reworked into individual features and properly config-ed / feature-gated, or relagated to downstream applicable only. Changes here: * Generic CHERI support * Generic RV64 support * A CHERI virt QEMU board * Changes to grant access interface for zero-copy/dma (ARef/PRef/safe_buf/resettable_iterator etc) * Some example zero copy capsules/drivers (a uart) * Static init pattern (StaticComponant and helpers) * A dynamic process loading capsule * Non execute in place ("contiguous loading") Change-Id: Ia68e0846ec856f0789ba38cb9fa77f56d55647c3 --- Cargo.toml | 4 + arch/cheri/Cargo.toml | 9 + arch/cheri/src/cheri_mpu.rs | 514 ++++ arch/cheri/src/lib.rs | 7 + arch/cortex-m/src/mpu.rs | 13 +- arch/riscv/Cargo.toml | 3 + arch/riscv/src/csr/mcause.rs | 21 +- arch/riscv/src/csr/mod.rs | 87 +- arch/riscv/src/csr/mtval.rs | 52 +- arch/riscv/src/csr/satp.rs | 40 + arch/riscv/src/lib.rs | 762 +++++- arch/riscv/src/plic.rs | 170 ++ arch/{rv32i => riscv}/src/pmp.rs | 302 ++- arch/{rv32i => riscv}/src/support.rs | 38 +- arch/{rv32i => riscv}/src/syscall.rs | 423 ++- arch/rv32i/src/epmp.rs | 14 +- arch/rv32i/src/lib.rs | 653 +---- arch/rv32i/src/machine_timer.rs | 23 +- boards/acd52832/src/main.rs | 4 +- boards/arty_e21/src/main.rs | 4 +- boards/clue_nrf52840/src/main.rs | 4 +- boards/components/src/alarm.rs | 43 +- boards/components/src/console.rs | 47 +- boards/components/src/debug_writer.rs | 92 +- boards/components/src/lib.rs | 6 + boards/components/src/lldb.rs | 29 + boards/components/src/sched/cooperative.rs | 9 +- boards/components/src/sched/mlfq.rs | 9 +- boards/components/src/sched/round_robin.rs | 37 +- boards/esp32-c3-devkitM-1/src/main.rs | 4 +- boards/hail/src/main.rs | 4 +- boards/hifive1/src/main.rs | 4 +- boards/imix/src/main.rs | 4 +- boards/imxrt1050-evkb/src/main.rs | 4 +- boards/kernel_layout.ld | 57 +- boards/litex/arty/src/main.rs | 4 +- boards/litex/sim/src/main.rs | 4 +- boards/microbit_v2/src/main.rs | 4 +- boards/msp_exp432p401r/src/main.rs | 4 +- boards/nano33ble/src/main.rs | 4 +- boards/nano_rp2040_connect/src/main.rs | 4 +- boards/nordic/nrf52840_dongle/src/main.rs | 4 +- boards/nordic/nrf52840dk/src/main.rs | 4 +- boards/nordic/nrf52dk/src/main.rs | 3 +- boards/nucleo_f429zi/src/main.rs | 4 +- boards/nucleo_f446re/src/main.rs | 4 +- boards/opentitan/src/main.rs | 4 +- boards/pico_explorer_base/src/main.rs | 4 +- boards/qemu_cheri_virt/Cargo.toml | 16 + boards/qemu_cheri_virt/Makefile | 84 + boards/qemu_cheri_virt/build.rs | 4 + boards/qemu_cheri_virt/layout.ld | 15 + boards/qemu_cheri_virt/src/main.rs | 544 ++++ boards/raspberry_pi_pico/src/main.rs | 4 +- boards/redboard_artemis_nano/src/main.rs | 3 +- boards/redboard_redv/src/main.rs | 4 +- boards/stm32f3discovery/src/main.rs | 4 +- boards/stm32f412gdiscovery/src/main.rs | 4 +- boards/stm32f429idiscovery/src/main.rs | 4 +- boards/swervolf/src/main.rs | 4 +- boards/teensy40/src/main.rs | 4 +- boards/weact_f401ccu6/src/main.rs | 4 +- capsules/Cargo.toml | 1 + capsules/examples/traitobj_list.rs | 8 +- capsules/src/alarm.rs | 9 +- capsules/src/console.rs | 23 +- capsules/src/console_zero.rs | 236 ++ capsules/src/driver.rs | 2 + capsules/src/dyn_proc_loader.rs | 167 ++ capsules/src/ieee802154/virtual_mac.rs | 2 +- capsules/src/kv_store.rs | 2 +- capsules/src/lib.rs | 10 + capsules/src/low_level_debug/mod.rs | 260 +- capsules/src/net/sixlowpan/sixlowpan_state.rs | 2 +- capsules/src/net/udp/udp_recv.rs | 2 +- capsules/src/net/udp/udp_send.rs | 2 +- capsules/src/virtual_adc.rs | 2 +- capsules/src/virtual_aes_ccm.rs | 2 +- capsules/src/virtual_alarm.rs | 6 +- capsules/src/virtual_digest.rs | 2 +- capsules/src/virtual_flash.rs | 2 +- capsules/src/virtual_hmac.rs | 2 +- capsules/src/virtual_i2c.rs | 4 +- capsules/src/virtual_pwm.rs | 2 +- capsules/src/virtual_rng.rs | 2 +- capsules/src/virtual_sha.rs | 2 +- capsules/src/virtual_spi.rs | 2 +- capsules/src/virtual_timer.rs | 2 +- capsules/src/virtual_uart.rs | 11 +- capsules/src/virtual_uart_zero.rs | 247 ++ cheri_tock_recipe.md | 200 ++ chips/lowrisc/src/virtual_otbn.rs | 2 +- chips/sifive/src/clint.rs | 16 +- chips/sifive/src/lib.rs | 4 + chips/uarts/Cargo.toml | 13 + chips/uarts/src/lib.rs | 15 + chips/uarts/src/ns16550.rs | 239 ++ chips/uarts/src/primecell.rs | 76 + chips/uarts/src/uart.rs | 168 ++ chips/uarts/src/uart_zero.rs | 151 ++ kernel/Cargo.toml | 3 + kernel/src/capabilities.rs | 11 + kernel/src/cheri.rs | 619 +++++ kernel/src/collections/list.rs | 638 ++++- kernel/src/collections/mod.rs | 2 + kernel/src/collections/resettable_iterator.rs | 789 ++++++ kernel/src/collections/ring_buffer.rs | 764 +++++- kernel/src/collections/safe_buf.rs | 812 ++++++ kernel/src/component.rs | 651 +++++ kernel/src/config.rs | 301 +++ kernel/src/debug.rs | 163 +- kernel/src/dynamic_deferred_call.rs | 131 +- kernel/src/easm.rs | 53 + kernel/src/errorcode.rs | 13 + kernel/src/grant.rs | 2296 ++++++++++++++--- kernel/src/hil/time.rs | 49 + kernel/src/hil/uart.rs | 145 ++ kernel/src/introspection.rs | 11 +- kernel/src/kernel.rs | 433 +++- kernel/src/lib.rs | 31 +- kernel/src/memop.rs | 33 +- kernel/src/platform/chip.rs | 6 + kernel/src/platform/mpu.rs | 80 +- kernel/src/platform/scheduler_timer.rs | 2 +- kernel/src/process.rs | 102 +- kernel/src/process_printer.rs | 5 +- kernel/src/process_standard.rs | 759 +++--- kernel/src/process_utilities.rs | 314 ++- kernel/src/processbuffer.rs | 472 +++- kernel/src/scheduler/cooperative.rs | 9 +- kernel/src/scheduler/mlfq.rs | 10 +- kernel/src/scheduler/round_robin.rs | 25 +- kernel/src/syscall.rs | 285 +- kernel/src/syscall_driver.rs | 12 + kernel/src/upcall.rs | 93 +- kernel/src/utilities/helpers.rs | 67 + kernel/src/utilities/leased_buffer.rs | 170 ++ kernel/src/utilities/mod.rs | 4 +- kernel/src/utilities/singleton_checker.rs | 94 + kernel/src/utilities/static_init.rs | 2 +- kernel/src/utilities/static_ref.rs | 59 +- libraries/misc/Cargo.toml | 14 + libraries/misc/src/const_env.rs | 185 ++ libraries/misc/src/default_array.rs | 92 + libraries/misc/src/divorce.rs | 628 +++++ libraries/misc/src/lib.rs | 18 + libraries/misc/src/misc_macros.rs | 12 + libraries/misc/src/never.rs | 8 + libraries/misc/src/overload_impl.rs | 59 + libraries/misc/src/potatoes.rs | 90 + libraries/misc/src/take_borrow.rs | 128 + libraries/misc/src/tpanic.rs | 24 + libraries/misc/src/trait_alias.rs | 144 ++ libraries/misc/src/unsigned_allocators.rs | 306 +++ libraries/riscv-csr/src/csr.rs | 4 + libraries/tock-cells/src/lib.rs | 1 + libraries/tock-cells/src/map_cell.rs | 261 +- libraries/tock-cells/src/optional_cell.rs | 7 + libraries/tock-cells/src/take_cell.rs | 4 +- .../tock-register-interface/src/fields.rs | 2 +- libraries/tock-register-interface/src/lib.rs | 1 + .../tock-register-interface/src/macros.rs | 169 +- libraries/tock-tbf/src/parse.rs | 13 +- libraries/tock-tbf/src/types.rs | 37 +- rust-toolchain | 2 +- tools/run_cargo_fmt.sh | 8 +- 166 files changed, 16123 insertions(+), 2755 deletions(-) create mode 100644 arch/cheri/Cargo.toml create mode 100644 arch/cheri/src/cheri_mpu.rs create mode 100644 arch/cheri/src/lib.rs create mode 100644 arch/riscv/src/csr/satp.rs create mode 100644 arch/riscv/src/plic.rs rename arch/{rv32i => riscv}/src/pmp.rs (74%) rename arch/{rv32i => riscv}/src/support.rs (56%) rename arch/{rv32i => riscv}/src/syscall.rs (60%) create mode 100644 boards/qemu_cheri_virt/Cargo.toml create mode 100644 boards/qemu_cheri_virt/Makefile create mode 100644 boards/qemu_cheri_virt/build.rs create mode 100644 boards/qemu_cheri_virt/layout.ld create mode 100644 boards/qemu_cheri_virt/src/main.rs create mode 100644 capsules/src/console_zero.rs create mode 100644 capsules/src/dyn_proc_loader.rs create mode 100644 capsules/src/virtual_uart_zero.rs create mode 100644 cheri_tock_recipe.md create mode 100644 chips/uarts/Cargo.toml create mode 100644 chips/uarts/src/lib.rs create mode 100644 chips/uarts/src/ns16550.rs create mode 100644 chips/uarts/src/primecell.rs create mode 100644 chips/uarts/src/uart.rs create mode 100644 chips/uarts/src/uart_zero.rs create mode 100644 kernel/src/cheri.rs create mode 100644 kernel/src/collections/resettable_iterator.rs create mode 100644 kernel/src/collections/safe_buf.rs create mode 100644 kernel/src/easm.rs create mode 100644 kernel/src/utilities/leased_buffer.rs create mode 100644 kernel/src/utilities/singleton_checker.rs create mode 100644 libraries/misc/Cargo.toml create mode 100644 libraries/misc/src/const_env.rs create mode 100644 libraries/misc/src/default_array.rs create mode 100644 libraries/misc/src/divorce.rs create mode 100644 libraries/misc/src/lib.rs create mode 100644 libraries/misc/src/misc_macros.rs create mode 100644 libraries/misc/src/never.rs create mode 100644 libraries/misc/src/overload_impl.rs create mode 100644 libraries/misc/src/potatoes.rs create mode 100644 libraries/misc/src/take_borrow.rs create mode 100644 libraries/misc/src/tpanic.rs create mode 100644 libraries/misc/src/trait_alias.rs create mode 100644 libraries/misc/src/unsigned_allocators.rs diff --git a/Cargo.toml b/Cargo.toml index 04cb39334..1520cf301 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "arch/cortex-m7", "arch/riscv", "arch/rv32i", + "arch/cheri", "boards/acd52832", "boards/nano_rp2040_connect", "boards/arty_e21", @@ -37,6 +38,7 @@ members = [ "boards/teensy40", "boards/nano33ble", "boards/qemu_rv32_virt", + "boards/qemu_cheri_virt", "boards/swervolf", "boards/weact_f401ccu6/", "capsules", @@ -68,12 +70,14 @@ members = [ "chips/stm32f4xx", "chips/swerv", "chips/swervolf-eh1", + "chips/uarts", "kernel", "libraries/enum_primitive", "libraries/riscv-csr", "libraries/tock-cells", "libraries/tock-register-interface", "libraries/tickv", + "libraries/misc", ] exclude = [ "tools/alert_codes", diff --git a/arch/cheri/Cargo.toml b/arch/cheri/Cargo.toml new file mode 100644 index 000000000..ea75c3a6e --- /dev/null +++ b/arch/cheri/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "cheri" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +kernel = { path = "../../kernel" } diff --git a/arch/cheri/src/cheri_mpu.rs b/arch/cheri/src/cheri_mpu.rs new file mode 100644 index 000000000..e1e899b84 --- /dev/null +++ b/arch/cheri/src/cheri_mpu.rs @@ -0,0 +1,514 @@ +//! CHERI platforms do not need a separate MPU to enforce access to the address space +//! Access is granted by merit of passing capabilities. +//! The kernel is responsible for correctly bounding / limiting access on process creation. +//! It should be +//! 1) zero-ing any memory accessible to the process. This should be done in a few places: +//! When a process is created (up to the brk) +//! When a process calls brk/sbrk (from the old break to the new one) +//! When a process is restarted TODO (part of b/266802576) +//! 2) Either zero or bound capabilities so they only cover the process on process startup (DONE) +//! 3) limiting flow of capabilities between processes via IPC TODO (b/247192904) +//! 4) Performing revocation in some cases TODO (b/280575362) +//! This module implements the MPU trait to satisfy the kernel, but it mostly does nothing. What +//! it does do is report what access control CHERI _could_ provide, assuming the kernel takes +//! the other steps. + +use core::cell::Cell; +use core::fmt::{Display, Formatter}; +use core::ops::Deref; +use core::ptr::slice_from_raw_parts; +use kernel::capabilities::ProcessManagementCapability; +use kernel::cheri::{cptr, cram}; +use kernel::grant::{revoke_allows, AllowRoCount, AllowRwCount, Grant, UpcallCount}; +use kernel::platform::mpu::{Permissions, Region, RemoveRegionResult, MPU}; +use kernel::process::{Error, Process}; +use kernel::processbuffer::ReadableProcessByte; +use kernel::syscall::{CommandReturn, CommandReturnResult, SyscallDriver}; +use kernel::ErrorCode::{FAIL, NOMEM}; +use kernel::{ErrorCode, Kernel, ProcessId}; + +pub struct CheriMPU { + grant: CheriMPUGrant, + kernel: &'static Kernel, + proc_manage: &'static dyn ProcessManagementCapability, +} + +/// How many ranges can be revoked simultaneously. +/// If a process tries to share then unshare multiple regions at once with another process, this +/// may need to be increased. +const MAX_REVOKE_RANGES: usize = 2; + +#[derive(Default, Copy, Clone)] +struct RevokeRange { + start: usize, + end: usize, +} + +impl RevokeRange { + pub fn contains(self, base: usize) -> bool { + // Notes on the strictly less than end: + // Capabilities can be zero length. Objects can be adjacent. A zero-length capability at + // the boundary between two objects could therefore refer to either object. + // Doing the inequality this way might result in a zero-length capability to an object + // not being revoked because it is indistinguishable from the next object. + // This is fine, as it cannot be used to access memory as it is zero length. + // Users should be aware. + base >= self.start && base < self.end + } + pub fn is_empty(&self) -> bool { + self.start == self.end + } +} + +pub mod upcalls { + pub const ON_EPOCH: usize = 0; +} + +pub mod allow_ro { + pub const SHADOW_MAP: usize = 0; +} + +pub mod allow_rw { + pub const CTR: usize = 0; +} + +pub mod commands { + pub const SET_BASE: usize = 1; + pub const DO_SWEEP: usize = 2; +} + +#[derive(Default)] +pub struct CheriMPUGrantData { + // base address that revocation map shadows + revoke_base: Cell, +} + +pub type CheriMPUGrant = Grant, AllowRoCount<1>, AllowRwCount<1>>; + +// Without revocation, we can't have an application decrease its brk again +#[derive(Default)] +pub struct CheriMPUConfig { + // The largest _effective_ break we have seen. + // Because DDC/PCC bounds setting are not exact, this is the true application break. + // The kernel may think the break is a little less than this, but because it asks this code + // before moving the kernel break we can still reject. + rounded_app_brk: usize, + // The base of the application. + app_base: usize, + // Ranges that need revoking + revocation_ranges: [RevokeRange; MAX_REVOKE_RANGES], +} + +impl Display for CheriMPUConfig { + fn fmt(&self, _f: &mut Formatter<'_>) -> core::fmt::Result { + Ok(()) + } +} + +/// This alignment is the alignment of a capability, which malloc aligns to anyway. +const GRANULE_POW_2: usize = 4; +const GRANULE_SIZE: usize = 1 << GRANULE_POW_2; + +/// 8 bits in a byte +const BYTES_PER_BITMAP_BYTE: usize = GRANULE_SIZE * 8; +const BITMAP_ADDRESS_MASK: usize = BYTES_PER_BITMAP_BYTE - 1; + +#[derive(Copy, Clone)] +struct UserShadowMap<'a> { + offset_base: usize, + map: &'a [ReadableProcessByte], +} + +impl<'a> UserShadowMap<'a> { + pub fn contains(&self, base: usize) -> bool { + // The shadow map covers a range starting at offset_base, so offset by that much + let base = base - self.offset_base; + // Each bit of shadow map covers GRANULE_SIZE bytes, so scale by that + let shifted = base >> GRANULE_POW_2; + // `Shifted` index's into bits of map, so split into a index into the map... + let map_index = shifted >> 3; + // ...and an index into that byte + let byte_index = (shifted & 0x7) as u8; + // convert byte index to mask + let byte_mask = 1u8 << byte_index; + // now check if the bit is set. If the map does not cover the range, default to _not_ + // revoking. + self.map + .get(map_index) + .map(|byte| (byte.get() & byte_mask) != 0) + .unwrap_or(false) + } +} + +type RevocationRange = [Cell]; + +fn should_revoke(map: UserShadowMap, invalid_ranges: &[RevokeRange], base: usize) -> bool { + map.contains(base) || invalid_ranges.iter().any(|range| range.contains(base)) +} + +/// Sweep a specific range +fn sweep_range(_range: &RevocationRange, _map: UserShadowMap, _invalid_ranges: &[RevokeRange]) { + #[cfg(target_feature = "xcheri")] + { + for cap in _range { + let (base, tagged) = cap.get().get_base_and_tag(); + if tagged { + if should_revoke(_map, _invalid_ranges, base) { + cptr::invalidate_shared(cap) + } + } + } + } +} + +fn revocation_range_from_parts(base: usize, top: usize) -> *const RevocationRange { + let mask = core::mem::size_of::() - 1; + // Round down base + let base = base & !mask; + // Round up top + let top = (top + mask) & !mask; + slice_from_raw_parts( + base as *const Cell, + (top - base) / core::mem::size_of::(), + ) +} + +impl CheriMPU { + // This aligns UP base and length to the next representable range that is at least length + // long. + fn align_region(base: usize, length: usize) -> (usize, usize) { + let mask = cram(length); + let inv_mask = !mask; + ((base + inv_mask) & mask, (length + inv_mask) & mask) + } + + pub const fn new( + grant: CheriMPUGrant, + kernel: &'static Kernel, + proc_manage: &'static dyn ProcessManagementCapability, + ) -> Self { + Self { + grant, + kernel, + proc_manage, + } + } + + pub fn set_shadow_map_base( + &self, + data: &CheriMPUGrantData, + base: usize, + ) -> Result<(), ErrorCode> { + if base & BITMAP_ADDRESS_MASK != 0 { + return Err(ErrorCode::INVAL); + } + data.revoke_base.set(base); + Ok(()) + } + + // TODO: some way of breaking this loop if a process suddenly wants scheduling + /// Safety: the process memory that this would sweep must be valid for reads and writes + /// Safety: no LiveARef or LivePRef may exist to any memory that might be revoked, + /// Nor may any grants be entered via the legacy mechanism. + pub fn revoke_sweep( + &self, + config: &mut CheriMPUConfig, + proc: &dyn Process, + ) -> Result<(), ErrorCode> { + // NOTE: we cannot fail to revoke just because we cannot allocate a grant. + // If we do, the user does not have enough memory to provide a shadow map, but we can + // still revoke other capabilities that the kernel wants revoking. + // The user will be aware revocation is not working for them as they will get a failure + // to allow the map. + let proc_grant = self.grant.get_for(proc.processid()); + + let kern_data = + proc_grant.map(|proc_grant| (proc_grant.get_kern_data(), proc_grant.get_grant_data())); + + // NOTE: This ARef could break the safety condition of this function. + // that is, a user could ask us to revoke their shadow map, and at the end of this function + // we would still have a reference to it even though revocation has finished. + // However, because we drop this ARef at the end of this function, by the time we actually + // return it will once again be the case that no LiveARef s exist. + + let (allow, data, ctr) = match &kern_data { + Ok((kern_data, grant)) => ( + kern_data.get_readonly_aref(allow_ro::SHADOW_MAP), + grant.get_pref(), + kern_data.get_readwrite_aref(allow_rw::CTR), + ), + Err(_) => (Err(ErrorCode::FAIL), None, Err(ErrorCode::FAIL)), + }; + + let mut map_range = match &allow { + Ok(live) => live.deref().deref(), + Err(_) => [].as_slice(), + }; + + let mut base = data.map_or(0, |data| data.revoke_base.get()); + + let allowed_base = config.app_base & !BITMAP_ADDRESS_MASK; + let allowed_top = config.rounded_app_brk & !BITMAP_ADDRESS_MASK; + let allowed_len = allowed_top - allowed_base; + + // Clamp bottom + let delta_bytes = if base < allowed_base { + base = allowed_base; + allowed_base - base + } else { + 0 + }; + map_range = map_range + .get(delta_bytes / BYTES_PER_BITMAP_BYTE..) + .unwrap_or(&[]); + + // Clamp top + let new_len = core::cmp::min(map_range.len(), allowed_len / BYTES_PER_BITMAP_BYTE); + map_range = map_range.get(..new_len).unwrap_or(&[]); + + let map = UserShadowMap { + offset_base: base, + map: map_range, + }; + + let ranges = &mut config.revocation_ranges; + // First, sweep processes own memory. + // This includes the process itself, grants, AND the process header (including saved + // register file) + let proc_mem = revocation_range_from_parts(config.app_base, config.rounded_app_brk); + + sweep_range(unsafe { &*proc_mem }, map, ranges.as_slice()); + + // Sweep allow-ed capabilities to see if any should be revoked. + // NOTE: this would not be needed if cptr was stored in grants, but we strip this info + // Safety: same requirement as safety of this function + + unsafe { + revoke_allows(self.kernel, proc, |slice| { + should_revoke(map, ranges, slice as *const u8 as usize) + })? + } + + // TODO: sweep register file, grant allows, and CLUT entries + + // Notify epoch has ticked. + // We do so by both writing to the shared mem and sending an upcall. + + if let Ok(ctr) = ctr { + if let Some(ctr) = ctr.align_to_u32().1.get(0) { + let epoch = ctr.get().wrapping_add(1); + ctr.set(epoch); + if let Ok((kern_data, _)) = kern_data { + // Ignore the process being too busy to get this upcall, they will get one on next epoch + let _ = kern_data.schedule_upcall(upcalls::ON_EPOCH, (epoch as usize, 0, 0)); + } + } + } + + Ok(()) + } + + fn handle_command( + &self, + command_number: usize, + arg2: usize, + _arg3: usize, + appid: ProcessId, + ) -> CommandReturnResult { + match command_number { + commands::SET_BASE => { + let kern_data = self.grant.get_for(appid)?; + let new_data = kern_data.get_grant_data(); + let grant_data = new_data.get_pref().ok_or(FAIL)?; + self.set_shadow_map_base(&*grant_data, arg2)?; + } + commands::DO_SWEEP => { + self.kernel.process_map_or_external( + Err(ErrorCode::FAIL), + appid, + // Safety: we have created no livepref in this scope, which should be called + // directly from syscalls. + // The fact that this is called from the main kernel loop should be a safety + // invariant of the kernel. + |app| unsafe { app.revoke_regions() }, + self.proc_manage, + )?; + } + _ => {} + } + + Ok(CommandReturn::success()) + } +} + +impl MPU for CheriMPU { + // All MPU state is stored implicitly in the saved state of the process + type MpuConfig = CheriMPUConfig; + // CHERI can support alignments as precise as 1 + const MIN_MPUALIGN: usize = 1; + + // This is the overly permissive aligner that finds a representable range that covers an already + // allocated object [base,base+length) + fn align_range(base: usize, length: usize) -> (usize, usize) { + // The mask we get from cram is sufficient to round both length and base such that they + // would be representable. However, this align function is not designed to allocate an + // object, but cover one that already exists. Rounding down the base requires us to + // increase the length by the same amount. This increase in length might change the + // alignment requirement. Adding on a fudge factor ensures this can't happen. + // The exact shift used here depends on the number of precision bits in the capability + // format. 10 was chosen conservatively. It must be less than number of precision bits, + // but can be any value smaller. Too small a value would just over-align. + let length_and_some = length + (length >> 10); + // The mask given by cram is all 1's in the top, and 0's in the bottom + let mask = cram(length_and_some); + let new_base = base & mask; + let new_length = (length + (base - new_base) + !(mask)) & mask; + (new_base, new_length) + } + + fn number_total_regions(&self) -> usize { + usize::MAX + } + + fn allocate_region( + &self, + unallocated_memory_start: *const u8, + unallocated_memory_size: usize, + min_region_size: usize, + _permissions: Permissions, + _config: &mut Self::MpuConfig, + ) -> Option { + let (aligned_base, aligned_length) = + Self::align_region(unallocated_memory_start as usize, min_region_size); + if aligned_base - (unallocated_memory_start as usize) + aligned_length + > unallocated_memory_size + { + None + } else { + Some(Region::new(aligned_base as *const u8, aligned_length)) + } + } + + fn allocate_app_memory_region( + &self, + unallocated_memory_start: *const u8, + unallocated_memory_size: usize, + min_memory_size: usize, + initial_app_memory_size: usize, + initial_kernel_memory_size: usize, + permissions: Permissions, + config: &mut Self::MpuConfig, + ) -> Option<(*const u8, usize)> { + // CHERI can always represent smaller regions as (if not more) precisely than larger ones. + // This means it is sound to just try allocate this much memory, and then assume we could + // put a kernel/app break somewhere sensible within it, although not anywhere. + let memory_size = core::cmp::max( + min_memory_size, + initial_app_memory_size + initial_kernel_memory_size, + ); + let result = self.allocate_region( + unallocated_memory_start, + unallocated_memory_size, + memory_size, + permissions, + config, + ); + + result.map(|result| { + config.app_base = result.start_address() as usize; + let (initial_base, initial_break_len) = + Self::align_region(config.app_base, initial_app_memory_size); + config.rounded_app_brk = config.app_base + initial_break_len; + // After rounding, the base of the app region should not move and should not overlap the kernel + assert!( + initial_base == config.app_base + && (initial_break_len + initial_kernel_memory_size) <= result.size() + ); + + (result.start_address(), result.size()) + }) + } + + fn remove_memory_region( + &self, + region: Region, + config: &mut Self::MpuConfig, + ) -> Result { + // If we have space in our list, add it in. + for range in config.revocation_ranges.iter_mut() { + if range.is_empty() { + *range = RevokeRange { + start: region.start_address() as usize, + end: region.start_address() as usize + region.size(), + }; + return Ok(RemoveRegionResult::Async(Default::default())); + } + } + Err(NOMEM) + } + + #[allow(unused_variables)] + fn update_app_memory_region( + &self, + app_memory_break: *const u8, + kernel_memory_break: *const u8, + permissions: Permissions, + config: &mut Self::MpuConfig, + ) -> Result<(), ()> { + // What is being requested + let (target_base, target_break) = (config.app_base, app_memory_break as usize); + // What imprecise bounds setting would result in + let (rounded_base, rounded_break_length) = + Self::align_region(target_base, target_break - target_base); + + let rounded_break = rounded_base + rounded_break_length; + + let max_rounded_break = core::cmp::max(rounded_break, config.rounded_app_brk); + + // Because we might have released a capability to the application, we don't allow a kernel + // memory break that crosses the largest rounded app brk. + // We also don't allow the base of the capability to ever move. + if target_base != rounded_base || max_rounded_break > (kernel_memory_break as usize) { + Err(()) + } else { + config.rounded_app_brk = max_rounded_break; + Ok(()) + } + } + + #[inline] + unsafe fn revoke_regions( + &self, + config: &mut Self::MpuConfig, + proc: &dyn Process, + ) -> Result<(), ErrorCode> { + let result = self.revoke_sweep(config, proc); + + // Ranges can be used again + if result.is_ok() { + for region in &mut config.revocation_ranges { + *region = Default::default() + } + } + + result + } +} + +impl SyscallDriver for CheriMPU { + fn command( + &self, + command_num: usize, + r2: usize, + r3: usize, + process_id: ProcessId, + ) -> CommandReturn { + self.handle_command(command_num, r2, r3, process_id).into() + } + + fn allocate_grant(&self, process_id: ProcessId) -> Result<(), Error> { + self.grant.enter(process_id, |_, _| {}) + } +} + +kernel::very_simple_component!(impl for CheriMPU, new(CheriMPUGrant, &'static Kernel, &'static dyn ProcessManagementCapability)); diff --git a/arch/cheri/src/lib.rs b/arch/cheri/src/lib.rs new file mode 100644 index 000000000..5395e99ba --- /dev/null +++ b/arch/cheri/src/lib.rs @@ -0,0 +1,7 @@ +//! Generalised support for CHERI architectures + +#![no_std] +#![feature(const_trait_impl, const_mut_refs, const_slice_split_at_mut)] +#![feature(macro_metavar_expr)] + +pub mod cheri_mpu; diff --git a/arch/cortex-m/src/mpu.rs b/arch/cortex-m/src/mpu.rs index 2a4595852..1517e8a2b 100644 --- a/arch/cortex-m/src/mpu.rs +++ b/arch/cortex-m/src/mpu.rs @@ -7,12 +7,13 @@ use core::fmt; use kernel; use kernel::platform::mpu; +use kernel::platform::mpu::RemoveRegionResult; use kernel::utilities::cells::OptionalCell; use kernel::utilities::math; use kernel::utilities::registers::interfaces::{Readable, Writeable}; use kernel::utilities::registers::{register_bitfields, FieldValue, ReadOnly, ReadWrite}; use kernel::utilities::StaticRef; -use kernel::ProcessId; +use kernel::{ErrorCode, ProcessId}; /// MPU Registers for the Cortex-M3, Cortex-M4 and Cortex-M7 families /// Described in section 4.5 of @@ -373,6 +374,8 @@ impl mpu::MPU { type MpuConfig = CortexMConfig; + const MIN_MPUALIGN: usize = MIN_REGION_SIZE; + fn clear_mpu(&self) { self.registers.ctrl.write(Control::ENABLE::CLEAR); } @@ -526,22 +529,22 @@ impl mpu::MPU &self, region: mpu::Region, config: &mut Self::MpuConfig, - ) -> Result<(), ()> { + ) -> Result { let (idx, _r) = config .regions .iter() .enumerate() .find(|(_idx, r)| **r == region) - .ok_or(())?; + .ok_or(ErrorCode::FAIL)?; if idx == APP_MEMORY_REGION_NUM { - return Err(()); + return Err(ErrorCode::INVAL); } config.regions[idx] = CortexMRegion::empty(idx); config.is_dirty.set(true); - Ok(()) + Ok(RemoveRegionResult::Sync) } fn allocate_app_memory_region( diff --git a/arch/riscv/Cargo.toml b/arch/riscv/Cargo.toml index a53fc3cfb..7dc6f5aea 100644 --- a/arch/riscv/Cargo.toml +++ b/arch/riscv/Cargo.toml @@ -8,4 +8,7 @@ edition = "2021" kernel = { path = "../../kernel" } tock-registers = { path = "../../libraries/tock-register-interface" } riscv-csr = { path = "../../libraries/riscv-csr" } +misc = { path = "../../libraries/misc" } +[features] +page_align_pmp = [] diff --git a/arch/riscv/src/csr/mcause.rs b/arch/riscv/src/csr/mcause.rs index 80a31c98e..5ea49d9c6 100644 --- a/arch/riscv/src/csr/mcause.rs +++ b/arch/riscv/src/csr/mcause.rs @@ -5,12 +5,13 @@ register_bitfields![usize, is_interrupt OFFSET(crate::XLEN - 1) NUMBITS(1) [], reason OFFSET(0) NUMBITS(crate::XLEN - 1) [] ], - // Per the spec, implementations are allowed to use the higher bits of the - // interrupt/exception reason for their own purposes. For regular parsing, - // we only concern ourselves with the "standard" values. + // Although the base spec only defines the first 4 bits, some patterns in the first 6 bits + // are designated for custom use so we should include them here. + // I don't like the truncation here and as nothing is actually checking that reserved is 0 + // and this can lead to weird errors. reason [ - reserved OFFSET(4) NUMBITS(crate::XLEN - 5) [], - std OFFSET(0) NUMBITS(4) [] + reserved OFFSET(6) NUMBITS(crate::XLEN - 7) [], + std OFFSET(0) NUMBITS(6) [] ] ]; @@ -53,7 +54,7 @@ pub enum Interrupt { } /// Exception -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq)] pub enum Exception { InstructionMisaligned, InstructionFault, @@ -69,6 +70,10 @@ pub enum Exception { InstructionPageFault, LoadPageFault, StorePageFault, + #[cfg(target_feature = "xcheri")] + CHERIPageException, + #[cfg(target_feature = "xcheri")] + CHERIException, Unknown, } @@ -108,6 +113,10 @@ impl Exception { 12 => Exception::InstructionPageFault, 13 => Exception::LoadPageFault, 15 => Exception::StorePageFault, + #[cfg(target_feature = "xcheri")] + 0x1b => Exception::CHERIPageException, + #[cfg(target_feature = "xcheri")] + 0x1c => Exception::CHERIException, _ => Exception::Unknown, } } diff --git a/arch/riscv/src/csr/mod.rs b/arch/riscv/src/csr/mod.rs index 1c6c1185b..9843a6a79 100644 --- a/arch/riscv/src/csr/mod.rs +++ b/arch/riscv/src/csr/mod.rs @@ -1,19 +1,26 @@ //! Tock Register interface for using CSR registers. use riscv_csr::csr::{ - ReadWriteRiscvCsr, MCAUSE, MCYCLE, MCYCLEH, MEPC, MIE, MINSTRET, MINSTRETH, MIP, MSCRATCH, - MSECCFG, MSECCFGH, MSTATUS, MTVAL, MTVEC, PMPADDR0, PMPADDR1, PMPADDR10, PMPADDR11, PMPADDR12, - PMPADDR13, PMPADDR14, PMPADDR15, PMPADDR16, PMPADDR17, PMPADDR18, PMPADDR19, PMPADDR2, - PMPADDR20, PMPADDR21, PMPADDR22, PMPADDR23, PMPADDR24, PMPADDR25, PMPADDR26, PMPADDR27, - PMPADDR28, PMPADDR29, PMPADDR3, PMPADDR30, PMPADDR31, PMPADDR32, PMPADDR33, PMPADDR34, - PMPADDR35, PMPADDR36, PMPADDR37, PMPADDR38, PMPADDR39, PMPADDR4, PMPADDR40, PMPADDR41, - PMPADDR42, PMPADDR43, PMPADDR44, PMPADDR45, PMPADDR46, PMPADDR47, PMPADDR48, PMPADDR49, - PMPADDR5, PMPADDR50, PMPADDR51, PMPADDR52, PMPADDR53, PMPADDR54, PMPADDR55, PMPADDR56, - PMPADDR57, PMPADDR58, PMPADDR59, PMPADDR6, PMPADDR60, PMPADDR61, PMPADDR62, PMPADDR63, - PMPADDR7, PMPADDR8, PMPADDR9, PMPCFG0, PMPCFG1, PMPCFG10, PMPCFG11, PMPCFG12, PMPCFG13, - PMPCFG14, PMPCFG15, PMPCFG2, PMPCFG3, PMPCFG4, PMPCFG5, PMPCFG6, PMPCFG7, PMPCFG8, PMPCFG9, + ReadWriteRiscvCsr, HMPCOUNTER_BASE, MCAUSE, MCYCLE, MEPC, MIE, MINSTRET, MIP, MSCRATCH, + MSECCFG, MSTATUS, MTVAL, MTVEC, PMPADDR0, PMPADDR1, PMPADDR10, PMPADDR11, PMPADDR12, PMPADDR13, + PMPADDR14, PMPADDR15, PMPADDR16, PMPADDR17, PMPADDR18, PMPADDR19, PMPADDR2, PMPADDR20, + PMPADDR21, PMPADDR22, PMPADDR23, PMPADDR24, PMPADDR25, PMPADDR26, PMPADDR27, PMPADDR28, + PMPADDR29, PMPADDR3, PMPADDR30, PMPADDR31, PMPADDR32, PMPADDR33, PMPADDR34, PMPADDR35, + PMPADDR36, PMPADDR37, PMPADDR38, PMPADDR39, PMPADDR4, PMPADDR40, PMPADDR41, PMPADDR42, + PMPADDR43, PMPADDR44, PMPADDR45, PMPADDR46, PMPADDR47, PMPADDR48, PMPADDR49, PMPADDR5, + PMPADDR50, PMPADDR51, PMPADDR52, PMPADDR53, PMPADDR54, PMPADDR55, PMPADDR56, PMPADDR57, + PMPADDR58, PMPADDR59, PMPADDR6, PMPADDR60, PMPADDR61, PMPADDR62, PMPADDR63, PMPADDR7, PMPADDR8, + PMPADDR9, PMPCFG0, PMPCFG10, PMPCFG12, PMPCFG14, PMPCFG2, PMPCFG4, PMPCFG6, PMPCFG8, SATP, STVEC, UTVEC, }; + +#[cfg(any(target_arch = "riscv32", not(target_os = "none")))] +use riscv_csr::csr::{ + MCYCLEH, MINSTRETH, MSECCFGH, PMPCFG1, PMPCFG11, PMPCFG13, PMPCFG15, PMPCFG3, PMPCFG5, PMPCFG7, + PMPCFG9, +}; + +use core::mem; use tock_registers::fields::FieldValue; use tock_registers::interfaces::{ReadWriteable, Readable, Writeable}; @@ -30,6 +37,7 @@ pub mod mtval; pub mod mtvec; pub mod pmpaddr; pub mod pmpconfig; +pub mod satp; pub mod stvec; pub mod utvec; @@ -47,7 +55,6 @@ pub struct CSR { pub mcycleh: ReadWriteRiscvCsr, pub mcycle: ReadWriteRiscvCsr, - #[cfg(any(target_arch = "riscv32", not(target_os = "none")))] pub pmpcfg0: ReadWriteRiscvCsr, #[cfg(any(target_arch = "riscv32", not(target_os = "none")))] pub pmpcfg1: ReadWriteRiscvCsr, @@ -138,6 +145,20 @@ pub struct CSR { pub pmpaddr62: ReadWriteRiscvCsr, pub pmpaddr63: ReadWriteRiscvCsr, + pub hpmcounter3: ReadWriteRiscvCsr, + pub hpmcounter4: ReadWriteRiscvCsr, + pub hpmcounter5: ReadWriteRiscvCsr, + pub hpmcounter6: ReadWriteRiscvCsr, + pub hpmcounter7: ReadWriteRiscvCsr, + pub hpmcounter8: ReadWriteRiscvCsr, + pub hpmcounter9: ReadWriteRiscvCsr, + pub hpmcounter10: ReadWriteRiscvCsr, + pub hpmcounter11: ReadWriteRiscvCsr, + pub hpmcounter12: ReadWriteRiscvCsr, + pub hpmcounter13: ReadWriteRiscvCsr, + pub hpmcounter14: ReadWriteRiscvCsr, + pub hpmcounter15: ReadWriteRiscvCsr, + pub mie: ReadWriteRiscvCsr, pub mscratch: ReadWriteRiscvCsr, pub mepc: ReadWriteRiscvCsr, @@ -153,6 +174,8 @@ pub struct CSR { pub utvec: ReadWriteRiscvCsr, pub stvec: ReadWriteRiscvCsr, + + pub satp: ReadWriteRiscvCsr, } // Define the "addresses" of each CSR register. @@ -255,6 +278,20 @@ pub const CSR: &CSR = &CSR { pmpaddr62: ReadWriteRiscvCsr::new(), pmpaddr63: ReadWriteRiscvCsr::new(), + hpmcounter3: ReadWriteRiscvCsr::new(), + hpmcounter4: ReadWriteRiscvCsr::new(), + hpmcounter5: ReadWriteRiscvCsr::new(), + hpmcounter6: ReadWriteRiscvCsr::new(), + hpmcounter7: ReadWriteRiscvCsr::new(), + hpmcounter8: ReadWriteRiscvCsr::new(), + hpmcounter9: ReadWriteRiscvCsr::new(), + hpmcounter10: ReadWriteRiscvCsr::new(), + hpmcounter11: ReadWriteRiscvCsr::new(), + hpmcounter12: ReadWriteRiscvCsr::new(), + hpmcounter13: ReadWriteRiscvCsr::new(), + hpmcounter14: ReadWriteRiscvCsr::new(), + hpmcounter15: ReadWriteRiscvCsr::new(), + mie: ReadWriteRiscvCsr::new(), mscratch: ReadWriteRiscvCsr::new(), mepc: ReadWriteRiscvCsr::new(), @@ -270,6 +307,8 @@ pub const CSR: &CSR = &CSR { utvec: ReadWriteRiscvCsr::new(), stvec: ReadWriteRiscvCsr::new(), + + satp: ReadWriteRiscvCsr::new(), }; impl CSR { @@ -309,7 +348,7 @@ impl CSR { // reads the cycle counter #[cfg(target_arch = "riscv64")] pub fn read_cycle_counter(&self) -> u64 { - CSR.mcycle.read(mcycle::mcycle::mcycle) + CSR.mcycle.read(mcycle::mcycle::mcycle) as u64 } pub fn pmpconfig_get(&self, index: usize) -> usize { @@ -545,4 +584,26 @@ impl CSR { _ => unreachable!(), } } + + /// Index is horribly confusing here because somebody thought that having multiple registers + /// with the name pmpcfgX with fields called pmpYcfg was a perfectly sensible idea. + /// This converts the 'Y' index above to the relevant 'X' index. + pub fn pmp_index_to_cfg_index(index: usize) -> usize { + // On RV32, there are 4 per register + #[cfg(target_arch = "riscv32")] + { + index / 4 + } + + // On RV64 there are 8 per register, but only every other exists + #[cfg(any(target_arch = "riscv64", not(target_os = "none")))] + { + (index / 4) & !1usize + } + } + + /// And this one gives the sub-index within the pmpcfgX register for a Y in pmpYcfg. + pub fn pmp_index_to_cfg_sub_index(index: usize) -> usize { + index % mem::size_of::() + } } diff --git a/arch/riscv/src/csr/mtval.rs b/arch/riscv/src/csr/mtval.rs index 1b75736b8..8de7724ce 100644 --- a/arch/riscv/src/csr/mtval.rs +++ b/arch/riscv/src/csr/mtval.rs @@ -1,8 +1,58 @@ use kernel::utilities::registers::register_bitfields; // mtval contains the address of an exception +// On CHERI, in the event of CHERI exceptions, it has a different format register_bitfields![usize, pub mtval [ - exception_addr OFFSET(0) NUMBITS(crate::XLEN) [] + exception_addr OFFSET(0) NUMBITS(crate::XLEN) [], + cause OFFSET(0) NUMBITS(5) [ + NONE = 0x00, + LENGTH = 0x01, + TAG = 0x02, + SEAL = 0x03, + TYPE = 0x04, + PERM_SOFT = 0x08, + REPRESENT = 0x0a, + UNALIGNED = 0x0b, + GLOBAL = 0x10, + PERM_EXECUTE = 0x11, + PERM_LOAD = 0x12, + PERM_STORE = 0x13, + PERM_LOAD_CAP = 0x14, + PERM_STORE_CAP = 0x15, + PERM_STORE_LOCAL_CAP = 0x16, + PERM_SEAL = 0x17, + PERM_ASR = 0x18, + PERM_CINVOKE = 0x19, + PERM_CINVOKE_IDC = 0x1a, + PERM_UNSEAL = 0x1b, + PERM_SET_CID = 0x1c, + ], + cap_idx OFFSET(5) NUMBITS(6) [ + // All the bit patterns from 0-31 are the GPRs as you would expect. + // The CSRs are as follows: + PCC = 0b100000, + DDC = 0b100001, + + UTCC = 0b100100, + UTDC = 0b100101, + UScratchC = 0b100110, + UEPCC = 0b100111, + + STCC = 0b101100, + STDC = 0b101101, + SScratchC = 0b101110, + SEPCC = 0b101111, + + MTCC = 0b111100, + MTDC = 0b111101, + MScratchC = 0b111110, + MEPCC = 0b111111, + ], + // Top bit indicates CSR vs GPR + cap_idx_type OFFSET(10) NUMBITS(1) [ + GPR = 0, + CSR = 1, + ], ] ]; diff --git a/arch/riscv/src/csr/satp.rs b/arch/riscv/src/csr/satp.rs new file mode 100644 index 000000000..33b5d9722 --- /dev/null +++ b/arch/riscv/src/csr/satp.rs @@ -0,0 +1,40 @@ +use kernel::utilities::registers::{register_bitfields, LocalRegisterCopy}; + +// satp contains the root PTE + +#[cfg(target_arch = "riscv32")] +register_bitfields![usize, + pub satp [ + ppn OFFSET(0) NUMBITS(22) [], + asid OFFSET(33) NUMBITS(9) [], + mode OFFSET(31) NUMBITS(1) [ + BARE = 0, + Sv32 = 1, + ] + ] +]; + +#[cfg(any(target_arch = "riscv64", not(target_os = "none")))] +register_bitfields![usize, + pub satp [ + ppn OFFSET(0) NUMBITS(44) [], + asid OFFSET(44) NUMBITS(16) [], + mode OFFSET(60) NUMBITS(4) [ + BARE = 0, + Sv39 = 8, + Sv48 = 9, + Sv57 = 10, + Sv64 = 11, + ], + ] +]; + +trait PPNAddr { + fn get_ppn_as_addr(&self) -> usize; +} + +impl PPNAddr for LocalRegisterCopy { + fn get_ppn_as_addr(&self) -> usize { + self.read(satp::ppn) << 12 + } +} diff --git a/arch/riscv/src/lib.rs b/arch/riscv/src/lib.rs index bac6438c9..b9f4f3e73 100644 --- a/arch/riscv/src/lib.rs +++ b/arch/riscv/src/lib.rs @@ -2,16 +2,768 @@ #![crate_name = "riscv"] #![crate_type = "rlib"] +#![feature(asm_const, naked_functions, split_array)] +#![feature(const_trait_impl, const_mut_refs, const_slice_split_at_mut)] +#![feature(const_type_id)] +#![feature(const_default_impls)] +#![feature(macro_metavar_expr)] #![no_std] pub mod csr; +pub mod plic; +pub mod pmp; +pub mod support; +pub mod syscall; + +// Is there a case when sizeof(usize) is not XLEN? +// There is some confusion in the code base as to which one to use #[cfg(target_arch = "riscv32")] pub const XLEN: usize = 32; -#[cfg(target_arch = "riscv64")] +#[cfg(any(target_arch = "riscv64", not(target_os = "none")))] pub const XLEN: usize = 64; -// Default to 32 bit if no architecture is specified of if this is being -// compiled for testing on a different architecture. -#[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64", target_os = "none")))] -pub const XLEN: usize = 32; +#[cfg(not(any( + target_arch = "riscv32", + target_arch = "riscv64", + not(target_os = "none") +)))] +compile_error!("No target architecture defined"); + +#[cfg(target_feature = "xcheri")] +pub const CLEN_BYTES: usize = 2 * XLEN; + +// CLEN_BYTES is not really defined on non CHERI. On non-CHERI this just means XLEN. +#[cfg(not(target_feature = "xcheri"))] +pub const CLEN_BYTES: usize = XLEN; + +use core::fmt::Write; + +use kernel::utilities::registers::interfaces::{Readable, Writeable}; + +extern "C" { + // Where the end of the stack region is (and hence where the stack should + // start). + static _estack: usize; + + // Boundaries of the .bss section. + static mut _szero: usize; + static mut _ezero: usize; + + // Where the .data section is stored in flash. + static mut _etext: usize; + + // Boundaries of the .data section. + static mut _srelocate: usize; + static mut _erelocate: usize; + + // The global pointer, value set in the linker script + static __global_pointer: usize; +} + +/// Entry point of all programs (`_start`). +/// +/// This assembly does three functions: +/// +/// 1. It initializes the stack pointer, the frame pointer (needed for closures +/// to work in start_rust) and the global pointer. +/// 2. It initializes the .bss and .data RAM segments. This must be done before +/// any Rust code runs. See for more +/// information. +/// 3. Finally it calls `main()`, the main entry point for Tock boards. +#[cfg(target_os = "none")] +#[link_section = ".riscv.start"] +#[export_name = "_start"] +#[naked] +pub extern "C" fn _start() { + use kernel::*; + unsafe { + kernel::easm! (" + // Set the global pointer register using the variable defined in the + // linker script. This register is only set once. The global pointer + // is a method for sharing state between the linker and the CPU so + // that the linker can emit code with offsets that are relative to + // the gp register, and the CPU can successfully execute them. + // + // https://gnu-mcu-eclipse.github.io/arch/riscv/programmer/#the-gp-global-pointer-register + // https://groups.google.com/a/groups.riscv.org/forum/#!msg/sw-dev/60IdaZj27dY/5MydPLnHAQAJ + // https://www.sifive.com/blog/2017/08/28/all-aboard-part-3-linker-relaxation-in-riscv-toolchain/ + // Likely not a good idea to allow the linker to use global pointer to derive global pointer + .option push + .option norelax + la gp, {gp}$ // Set the global pointer. Value set in linker script. + .option pop + // Initialize the stack pointer register. This comes directly from + // the linker script. + la sp, {estack} + + // Set s0 (the frame pointer) to the start of the stack. + add s0, sp, zero + + // Initialize mscratch to 0 so that we know that we are currently + // in the kernel. This is used for the check in the trap handler. + csrw 0x340, zero // CSR=0x340=mscratch + + // INITIALIZE MEMORY + + // Start by initializing .bss memory. The Tock linker script defines + // `_szero` and `_ezero` to mark the .bss segment. + la a0, {sbss} // a0 = first address of .bss + la a1, {ebss} // a1 = first address after .bss + + 100: // bss_init_loop + beq a0, a1, 101f // If a0 == a1, we are done.", + ; stx!() " zero, 0(a0) // *a0 = 0. Write 0 to the memory location in a0. + addi a0, a0, {XLEN_BYTES} // a0 = a0 + XLEN_BYTES. Increment pointer to next word. + j 100b // Continue the loop. + + 101: // bss_init_done + + + // Now initialize .data memory. This involves coping the values right at the + // end of the .text section (in flash) into the .data section (in RAM). + la a0, {sdata} // a0 = first address of data section in RAM + la a1, {edata} // a1 = first address after data section in RAM + la a2, {etext} // a2 = address of stored data initial values + + 200: // data_init_loop + beq a0, a1, 201f // If we have reached the end of the .data + // section then we are done.", + ; ldx!() " a3, 0(a2) // a3 = *a2. Load value from initial values into a3. + " stx!() " a3, 0(a0) // *a0 = a3. Store initial value into + // next place in .data. + addi a0, a0, {XLEN_BYTES} // a0 = a0 + XLEN_BYTES. Increment to next word in memory. + addi a2, a2, {XLEN_BYTES} // a2 = a2 + XLEN_BYTES. Increment to next word in flash. + j 200b // Continue the loop. + + 201: // data_init_done + + // With that initial setup out of the way, we now branch to the main + // code, likely defined in a board's main.rs. + j main + ", + gp = sym __global_pointer, + estack = sym _estack, + sbss = sym _szero, + ebss = sym _ezero, + sdata = sym _srelocate, + edata = sym _erelocate, + etext = sym _etext, + XLEN_BYTES = const XLEN / 8, + options(noreturn) + ); + } +} + +/// The various privilege levels in RISC-V. +pub enum PermissionMode { + User = 0x0, + Supervisor = 0x1, + Reserved = 0x2, + Machine = 0x3, +} + +/// Tell the MCU what address the trap handler is located at. +/// +/// This is a generic implementation. There may be board specific versions as +/// some platforms have added more bits to the `mtvec` register. +/// +/// The trap handler is called on exceptions and for interrupts. +pub unsafe fn configure_trap_handler(mode: PermissionMode) { + match mode { + PermissionMode::Machine => csr::CSR.mtvec.write( + csr::mtvec::mtvec::trap_addr.val(_start_trap as usize >> 2) + + csr::mtvec::mtvec::mode::CLEAR, + ), + PermissionMode::Supervisor => csr::CSR.stvec.write( + csr::stvec::stvec::trap_addr.val(_start_trap as usize >> 2) + + csr::stvec::stvec::mode::CLEAR, + ), + PermissionMode::User => csr::CSR.utvec.write( + csr::utvec::utvec::trap_addr.val(_start_trap as usize >> 2) + + csr::utvec::utvec::mode::CLEAR, + ), + PermissionMode::Reserved => ( + // TODO some sort of error handling? + ), + } +} + +// Mock implementation for tests on Travis-CI. +#[cfg(not(any(target_os = "none")))] +pub extern "C" fn _start_trap() { + unimplemented!() +} + +/// This is the trap handler function. This code is called on all traps, +/// including interrupts, exceptions, and system calls from applications. +/// +/// Tock uses only the single trap handler, and does not use any vectored +/// interrupts or other exception handling. The trap handler has to determine +/// why the trap handler was called, and respond accordingly. Generally, there +/// are two reasons the trap handler gets called: an interrupt occurred or an +/// application called a syscall. +/// +/// In the case of an interrupt while the kernel was executing we only need to +/// save the kernel registers and then run whatever interrupt handling code we +/// need to. If the trap happens while and application was executing, we have to +/// save the application state and then resume the `switch_to()` function to +/// correctly return back to the kernel. +#[cfg(all(target_os = "none"))] +#[link_section = ".riscv.trap"] +#[export_name = "_start_trap"] +#[naked] +pub extern "C" fn _start_trap() { + use kernel::*; + unsafe { + kernel::easm!( + " + // The first thing we have to do is determine if we came from user + // mode or kernel mode, as we need to save state and proceed + // differently. We cannot, however, use any registers because we do + // not want to lose their contents. So, we rely on `mscratch`. If + // mscratch is 0, then we came from the kernel. If it is >0, then it + // contains the kernel's stack pointer and we came from an app. + // + // We use the csrrw instruction to save the current stack pointer + // so we can retrieve it if necessary. + // + // If we could enter this trap handler twice (for example, + // handling an interrupt while an exception is being + // handled), storing a non-zero value in mscratch + // temporarily could cause a race condition similar to the + // one of PR 2308[1]. + // However, as indicated in section 3.1.6.1 of the RISC-V + // Privileged Spec[2], MIE will be set to 0 when taking a + // trap into machine mode. Therefore, this can only happen + // when causing an exception in the trap handler itself. + // + // [1] https://github.com/tock/tock/pull/2308 + // [2] https://github.com/riscv/riscv-isa-manual/releases/download/draft-20201222-42dc13a/riscv-privileged.pdf + // Even though this may be a hybrid kernel, we still need to use mcscratch + // in case ths user cared about their CSP", + csr_op!("sp" <- "mscratch" <- "sp"), + "bnez sp, 300f // If sp != 0 then we must have come from an app. + + + // _from_kernel: + // Swap back the zero value for the stack pointer in mscratch", + csr_op!("sp" <- "mscratch" <- "sp"), + "// Now, since we want to use the stack to save kernel registers, we + // first need to make sure that the trap wasn't the result of a + // stack overflow, in which case we can't use the current stack + // pointer. We also, however, cannot modify any of the current + // registers until we save them, and we cannot save them to the + // stack until we know the stack is valid. So, we use the mscratch + // trick again to get one register we can use. + + // Save t0's contents to mscratch", + csr_op!("mscratch" <- "t0"), + "// Load the address of the bottom of the stack (`_sstack`) into our + // newly freed-up t0 register. + .type _sstack, object + la t0, _sstack + + // Compare the kernel stack pointer to the bottom of the stack. If + // the stack pointer is above the bottom of the stack, then continue + // handling the fault as normal. + bgtu sp, t0, 100f // branch if sp > t0 + + // If we get here, then we did encounter a stack overflow. We are + // going to panic at this point, but for that to work we need a + // valid stack to run the panic code. We do this by just starting + // over with the kernel stack and placing the stack pointer at the + // top of the original stack. + la sp, _estack + + + 100: // _from_kernel_continue + + // Restore t0, and make sure mscratch is set back to 0 (our flag + // tracking that the kernel is executing).", + "li t0, 0", + csr_op!("t0" <- "mscratch" <- "t0"),"// t0=mscratch, mscratch=0 + + // Make room for the caller saved registers we need to restore after + // running any trap handler code. + addi sp, sp, -16*{CLEN_BYTES} + + // Save all of the caller saved registers.", + FOR_EACH("Reg" in ["ra", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7"] : + stptr!() ptrreg!() "\\()\\Reg, FOR_N*{CLEN_BYTES}(sp)" + ), " + + // Jump to board-specific trap handler code. Likely this was an + // interrupt and we want to disable a particular interrupt, but each + // board/chip can customize this as needed. + jal ra, _start_trap_rust_from_kernel + + // Restore the registers from the stack.", + FOR_EACH("Reg" in ["ra", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7"] : + ldptr!() ptrreg!() "\\()\\Reg, FOR_N*{CLEN_BYTES}(sp)" + ), " + + // Reset the stack pointer. + addi sp, sp, 16*{CLEN_BYTES} + + // mret returns from the trap handler. The PC is set to what is in + // mepc and execution proceeds from there. Since we did not modify + // mepc we will return to where the exception occurred. + mret + + + + // Handle entering the trap handler from an app differently. + 300: // _from_app + + // At this point all we know is that we entered the trap handler + // from an app. We don't know _why_ we got a trap, it could be from + // an interrupt, syscall, or fault (or maybe something else). + // Therefore we have to be very careful not to overwrite any + // registers before we have saved them. + // + // We ideally want to save registers in the per-process stored state + // struct. However, we don't have a pointer to that yet, and we need + // to use a temporary register to get that address. So, we save s0 + // to the kernel stack before we can it to the proper spot.", + + ; ".if " is_cheri!() " + // On CHERI, no loads and stores can happen until we switch DDC. + // Do a little juggle to swap mtdc and ddc (does not clobber ct0). + // When we have some registers spare we will restore mtdc + cspecialrw ct0, mtdc, ct0 + cspecialrw ct0, ddc, ct0 + cspecialrw ct0, mtdc, ct0 + .endif", + + ; stptr!() ptrreg!("s0") ", 0*{CLEN_BYTES}(sp) + // Ideally it would be better to save all of the app registers once + // we return back to the `switch_to_process()` code. However, we + // also potentially need to disable an interrupt in case the app was + // interrupted, so it is safer to just immediately save all of the + // app registers. + // + // We do this by retrieving the stored state pointer from the kernel + // stack and storing the necessary values in it.", + ; ldx!() " s0, 1*{CLEN_BYTES}(sp) // Load the stored state pointer into s0.", + ; stptr!() ptrregn!("1") ", 0*{CLEN_BYTES}(s0) // ra", + FOR_RANGE("regn" in 3 .. 32 : + ".if \\regn != 8 // s0 + " stptr!() ptrregn!() "\\()\\regn, (\\regn-1)*{CLEN_BYTES}(s0) + .endif" + ), + // Now retrieve the original value of s0 and save that as well.", + ;ldptr!() ptrreg!("t0") ", 0*{CLEN_BYTES}(sp)", + ;stptr!() ptrreg!("t0") ", 7*{CLEN_BYTES}(s0) // s0,fp + // We also need to store the app stack pointer, mcause, and mepc. We + // need to store mcause because we use that to determine why the app + // stopped executing and returned to the kernel. We store mepc + // because it is where we need to return to in the app at some + // point. We need to store mtval in case the app faulted and we need + // mtval to help with debugging.", + ; ".if " is_cheri!() " + // We now need to save the trapped DDC (which is in mtdc) + // and restore mtdc it to what it was for the next trap + cspecialr ct0, ddc + cspecialrw ct0, mtdc, ct0 + sc ct0, 32*{CLEN_BYTES}(s0) + .endif", + csr_op!("mscratch" -> "t0"), + ;stptr!() ptrreg!("t0") ", 1*{CLEN_BYTES}(s0) // Save the app sp to the stored state struct", + csr_op!("mepc" -> "t0"), + ;stptr!() ptrreg!("t0") ", 31*{CLEN_BYTES}(s0) // Save the PC to the stored state struct + csrr t0, 0x343 // CSR=0x343=mtval", + ;stx!() " t0, ({CAUSE_OFFSET} + {XLEN_BYTES})(s0) // Save mtval to the stored state struct + + // Save mcause last, as we depend on it being loaded in t0 below + csrr t0, 0x342 // CSR=0x342=mcause", + ;stx!() " t0, ({CAUSE_OFFSET})(s0) // Save mcause to the stored state struct, leave in t0 + + // Now we need to check if this was an interrupt, and if it was, + // then we need to disable the interrupt before returning from this + // trap handler so that it does not fire again. If mcause is greater + // than or equal to zero this was not an interrupt (i.e. the most + // significant bit is not 1). + bge t0, zero, 200f + // Copy mcause into a0 and then call the interrupt disable function. + mv a0, t0 + jal ra, _disable_interrupt_trap_rust_from_app + + 200: // _from_app_continue + // Now determine the address of _return_to_kernel and resume the + // context switching code. We need to load _return_to_kernel into + // mepc so we can use it to return to the context switch code.", + ;ldptr!() ptrreg!("t0") ", 2 * {CLEN_BYTES}(sp) // Load _return_to_kernel into t0.", + csr_op!("mepc" <- "t0"), + // Ensure that mscratch is 0. This makes sure that we know that on + // a future trap that we came from the kernel. + "li t0, 0", + csr_op!("mscratch" <- "t0")," + + // Need to set mstatus.MPP to 0b11 so that we stay in machine mode. + csrr t0, 0x300 // CSR=0x300=mstatus + li t1, 0x1800 // Load 0b11 to the MPP bits location in t1 + or t0, t0, t1 // Set the MPP bits to one + csrw 0x300, t0 // CSR=0x300=mstatus + + // Use mret to exit the trap handler and return to the context + // switching code. + mret + ", + CLEN_BYTES = const core::mem::size_of::(), + XLEN_BYTES = const XLEN / 8, + CAUSE_OFFSET = const crate::syscall::CAUSE_OFFSET, + options(noreturn) + ); + } +} + +/// RISC-V semihosting needs three exact instructions in uncompressed form. +/// +/// See https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc#11-semihosting-trap-instruction-sequence +/// for more details on the three instructions. +/// +/// In order to work with semihosting we include the assembly here +/// where we are able to disable compressed instruction support. This +/// follows the example used in the Linux kernel: +/// https://elixir.bootlin.com/linux/v5.12.10/source/arch/riscv/include/asm/jump_label.h#L21 +/// as suggested by the RISC-V developers: +/// https://groups.google.com/a/groups.riscv.org/g/isa-dev/c/XKkYacERM04/m/CdpOcqtRAgAJ +#[cfg(all(target_os = "none"))] +pub unsafe fn semihost_command(command: usize, arg0: usize, arg1: usize) -> usize { + use core::arch::asm; + let res; + asm!( + " + .option push + .option norelax + .option norvc + slli x0, x0, 0x1f + ebreak + srai x0, x0, 7 + .option pop + ", + in("a0") command, + in("a1") arg0, + in("a2") arg1, + lateout("a0") res, + ); + res +} + +// Mock implementation for tests on Travis-CI. +#[cfg(not(any(target_os = "none")))] +pub unsafe fn semihost_command(_command: usize, _arg0: usize, _arg1: usize) -> usize { + unimplemented!() +} + +/// Print a readable string for an mcause reason. +pub unsafe fn print_mcause(mcval: csr::mcause::Trap, writer: &mut dyn Write) { + match mcval { + csr::mcause::Trap::Interrupt(interrupt) => match interrupt { + csr::mcause::Interrupt::UserSoft => { + let _ = writer.write_fmt(format_args!("User software interrupt")); + } + csr::mcause::Interrupt::SupervisorSoft => { + let _ = writer.write_fmt(format_args!("Supervisor software interrupt")); + } + csr::mcause::Interrupt::MachineSoft => { + let _ = writer.write_fmt(format_args!("Machine software interrupt")); + } + csr::mcause::Interrupt::UserTimer => { + let _ = writer.write_fmt(format_args!("User timer interrupt")); + } + csr::mcause::Interrupt::SupervisorTimer => { + let _ = writer.write_fmt(format_args!("Supervisor timer interrupt")); + } + csr::mcause::Interrupt::MachineTimer => { + let _ = writer.write_fmt(format_args!("Machine timer interrupt")); + } + csr::mcause::Interrupt::UserExternal => { + let _ = writer.write_fmt(format_args!("User external interrupt")); + } + csr::mcause::Interrupt::SupervisorExternal => { + let _ = writer.write_fmt(format_args!("Supervisor external interrupt")); + } + csr::mcause::Interrupt::MachineExternal => { + let _ = writer.write_fmt(format_args!("Machine external interrupt")); + } + csr::mcause::Interrupt::Unknown => { + let _ = writer.write_fmt(format_args!("Reserved/Unknown")); + } + }, + csr::mcause::Trap::Exception(exception) => match exception { + csr::mcause::Exception::InstructionMisaligned => { + let _ = writer.write_fmt(format_args!("Instruction access misaligned")); + } + csr::mcause::Exception::InstructionFault => { + let _ = writer.write_fmt(format_args!("Instruction access fault")); + } + csr::mcause::Exception::IllegalInstruction => { + let _ = writer.write_fmt(format_args!("Illegal instruction")); + } + csr::mcause::Exception::Breakpoint => { + let _ = writer.write_fmt(format_args!("Breakpoint")); + } + csr::mcause::Exception::LoadMisaligned => { + let _ = writer.write_fmt(format_args!("Load address misaligned")); + } + csr::mcause::Exception::LoadFault => { + let _ = writer.write_fmt(format_args!("Load access fault")); + } + csr::mcause::Exception::StoreMisaligned => { + let _ = writer.write_fmt(format_args!("Store/AMO address misaligned")); + } + csr::mcause::Exception::StoreFault => { + let _ = writer.write_fmt(format_args!("Store/AMO access fault")); + } + csr::mcause::Exception::UserEnvCall => { + let _ = writer.write_fmt(format_args!("Environment call from U-mode")); + } + csr::mcause::Exception::SupervisorEnvCall => { + let _ = writer.write_fmt(format_args!("Environment call from S-mode")); + } + csr::mcause::Exception::MachineEnvCall => { + let _ = writer.write_fmt(format_args!("Environment call from M-mode")); + } + csr::mcause::Exception::InstructionPageFault => { + let _ = writer.write_fmt(format_args!("Instruction page fault")); + } + csr::mcause::Exception::LoadPageFault => { + let _ = writer.write_fmt(format_args!("Load page fault")); + } + csr::mcause::Exception::StorePageFault => { + let _ = writer.write_fmt(format_args!("Store/AMO page fault")); + } + #[cfg(target_feature = "xcheri")] + csr::mcause::Exception::CHERIPageException => { + let _ = writer.write_fmt(format_args!("CHERI page exception")); + } + #[cfg(target_feature = "xcheri")] + csr::mcause::Exception::CHERIException => { + let _ = writer.write_fmt(format_args!("CHERI Exception")); + } + csr::mcause::Exception::Unknown => { + let _ = writer.write_fmt(format_args!("Reserved")); + } + }, + } +} + +/// Print a readable string for an mcause reason. +pub unsafe fn print_mtval(_mcval: csr::mcause::Trap, _mtval: usize, _writer: &mut dyn Write) { + // Cheri exceptions have more information in the xtval register + #[cfg(target_feature = "xcheri")] + { + use crate::csr::mtval::mtval; + if let csr::mcause::Trap::Exception(ex) = _mcval { + if ex == csr::mcause::Exception::CHERIPageException + || ex == csr::mcause::Exception::CHERIException + { + let mtval = + tock_registers::LocalRegisterCopy::::new(_mtval); + + let _ = _writer.write_fmt(format_args!("Cause = ",)); + + // Print CHERI cause + match mtval.read_as_enum(mtval::cause) { + Some(mtval::cause::Value::NONE) => { + let _ = _writer.write_fmt(format_args!("none")); + } + Some(mtval::cause::Value::LENGTH) => { + let _ = _writer.write_fmt(format_args!("length")); + } + Some(mtval::cause::Value::TAG) => { + let _ = _writer.write_fmt(format_args!("tag")); + } + Some(mtval::cause::Value::SEAL) => { + let _ = _writer.write_fmt(format_args!("seal")); + } + Some(mtval::cause::Value::TYPE) => { + let _ = _writer.write_fmt(format_args!("type")); + } + Some(mtval::cause::Value::PERM_SOFT) => { + let _ = _writer.write_fmt(format_args!("permit software defined")); + } + Some(mtval::cause::Value::REPRESENT) => { + let _ = _writer.write_fmt(format_args!("representability")); + } + Some(mtval::cause::Value::UNALIGNED) => { + let _ = _writer.write_fmt(format_args!("unaligned base")); + } + Some(mtval::cause::Value::GLOBAL) => { + let _ = _writer.write_fmt(format_args!("global")); + } + Some(mtval::cause::Value::PERM_EXECUTE) => { + let _ = _writer.write_fmt(format_args!("permit execute")); + } + Some(mtval::cause::Value::PERM_LOAD) => { + let _ = _writer.write_fmt(format_args!("permit load")); + } + Some(mtval::cause::Value::PERM_STORE) => { + let _ = _writer.write_fmt(format_args!("permit store")); + } + Some(mtval::cause::Value::PERM_LOAD_CAP) => { + let _ = _writer.write_fmt(format_args!("permit load cap")); + } + Some(mtval::cause::Value::PERM_STORE_CAP) => { + let _ = _writer.write_fmt(format_args!("permit store cap")); + } + Some(mtval::cause::Value::PERM_STORE_LOCAL_CAP) => { + let _ = _writer.write_fmt(format_args!("permit store local cap")); + } + Some(mtval::cause::Value::PERM_SEAL) => { + let _ = _writer.write_fmt(format_args!("permit seal")); + } + Some(mtval::cause::Value::PERM_ASR) => { + let _ = _writer.write_fmt(format_args!("permit access system registers")); + } + Some(mtval::cause::Value::PERM_CINVOKE) => { + let _ = _writer.write_fmt(format_args!("permit cinvoke")); + } + Some(mtval::cause::Value::PERM_CINVOKE_IDC) => { + let _ = _writer.write_fmt(format_args!("permit cinvoke IDC access")); + } + Some(mtval::cause::Value::PERM_UNSEAL) => { + let _ = _writer.write_fmt(format_args!("permit unseal")); + } + Some(mtval::cause::Value::PERM_SET_CID) => { + let _ = _writer.write_fmt(format_args!("permit set compartment ID")); + } + None => { + let _ = _writer.write_fmt(format_args!("invalid unknown")); + } + } + + let _ = _writer.write_fmt(format_args!(", reg = ",)); + + if let Some(mtval::cap_idx_type::Value::GPR) = + mtval.read_as_enum(mtval::cap_idx_type) + { + // Just print GPR as a number + let _ = _writer.write_fmt(format_args!("C{}", mtval.read(mtval::cap_idx))); + } else { + // CSRs have less formulaic names + match mtval.read_as_enum(mtval::cap_idx) { + Some(mtval::cap_idx::Value::PCC) => { + let _ = _writer.write_fmt(format_args!("PCC")); + } + Some(mtval::cap_idx::Value::DDC) => { + let _ = _writer.write_fmt(format_args!("DDC")); + } + Some(_) => { + let _ = _writer.write_fmt(format_args!("valid unknown")); + } + None => { + let _ = _writer.write_fmt(format_args!("invalid unknown")); + } + } + } + } + } + } +} + +/// Prints out RISCV machine state, including basic system registers +/// (mcause, mstatus, mepc, mtval, interrupt status). +pub unsafe fn print_riscv_state(writer: &mut dyn Write) { + let mcval: csr::mcause::Trap = core::convert::From::from(csr::CSR.mcause.extract()); + let _ = writer.write_fmt(format_args!("\r\n---| RISC-V Machine State |---\r\n")); + let _ = writer.write_fmt(format_args!("Last cause (mcause): ")); + print_mcause(mcval, writer); + let interrupt = csr::CSR.mcause.read(csr::mcause::mcause::is_interrupt); + let code = csr::CSR.mcause.read(csr::mcause::mcause::reason); + let _ = writer.write_fmt(format_args!( + " (interrupt={}, exception code={:#010X})", + interrupt, code + )); + let _ = writer.write_fmt(format_args!( + "\r\nLast value (mtval): {:#010X}\ + \r\n\ + \r\nSystem register dump:\ + \r\n mepc: {:#010X} mstatus: {:#010X}\ + \r\n mcycle: {:#010X} minstret: {:#010X}", + csr::CSR.mtval.get(), + csr::CSR.mepc.get(), + csr::CSR.mstatus.get(), + csr::CSR.mcycle.get(), + csr::CSR.minstret.get(), + )); + let mstatus = csr::CSR.mstatus.extract(); + let uie = mstatus.is_set(csr::mstatus::mstatus::uie); + let sie = mstatus.is_set(csr::mstatus::mstatus::sie); + let mie = mstatus.is_set(csr::mstatus::mstatus::mie); + let upie = mstatus.is_set(csr::mstatus::mstatus::upie); + let spie = mstatus.is_set(csr::mstatus::mstatus::spie); + let mpie = mstatus.is_set(csr::mstatus::mstatus::mpie); + let spp = mstatus.is_set(csr::mstatus::mstatus::spp); + let mpp = mstatus.read(csr::mstatus::mstatus::mpp); + let _ = writer.write_fmt(format_args!( + "\r\n mstatus: {:#010X}\ + \r\n uie: {:5} upie: {}\ + \r\n sie: {:5} spie: {}\ + \r\n mie: {:5} mpie: {}\ + \r\n spp: {}\ + \r\n mpp: {}", + mstatus.get(), + uie, + upie, + sie, + spie, + mie, + mpie, + spp, + mpp, + )); + let e_usoft = csr::CSR.mie.is_set(csr::mie::mie::usoft); + let e_ssoft = csr::CSR.mie.is_set(csr::mie::mie::ssoft); + let e_msoft = csr::CSR.mie.is_set(csr::mie::mie::msoft); + let e_utimer = csr::CSR.mie.is_set(csr::mie::mie::utimer); + let e_stimer = csr::CSR.mie.is_set(csr::mie::mie::stimer); + let e_mtimer = csr::CSR.mie.is_set(csr::mie::mie::mtimer); + let e_uext = csr::CSR.mie.is_set(csr::mie::mie::uext); + let e_sext = csr::CSR.mie.is_set(csr::mie::mie::sext); + let e_mext = csr::CSR.mie.is_set(csr::mie::mie::mext); + + let p_usoft = csr::CSR.mip.is_set(csr::mip::mip::usoft); + let p_ssoft = csr::CSR.mip.is_set(csr::mip::mip::ssoft); + let p_msoft = csr::CSR.mip.is_set(csr::mip::mip::msoft); + let p_utimer = csr::CSR.mip.is_set(csr::mip::mip::utimer); + let p_stimer = csr::CSR.mip.is_set(csr::mip::mip::stimer); + let p_mtimer = csr::CSR.mip.is_set(csr::mip::mip::mtimer); + let p_uext = csr::CSR.mip.is_set(csr::mip::mip::uext); + let p_sext = csr::CSR.mip.is_set(csr::mip::mip::sext); + let p_mext = csr::CSR.mip.is_set(csr::mip::mip::mext); + let _ = writer.write_fmt(format_args!( + "\r\n mie: {:#010X} mip: {:#010X}\ + \r\n usoft: {:6} {:6}\ + \r\n ssoft: {:6} {:6}\ + \r\n msoft: {:6} {:6}\ + \r\n utimer: {:6} {:6}\ + \r\n stimer: {:6} {:6}\ + \r\n mtimer: {:6} {:6}\ + \r\n uext: {:6} {:6}\ + \r\n sext: {:6} {:6}\ + \r\n mext: {:6} {:6}\r\n", + csr::CSR.mie.get(), + csr::CSR.mip.get(), + e_usoft, + p_usoft, + e_ssoft, + p_ssoft, + e_msoft, + p_msoft, + e_utimer, + p_utimer, + e_stimer, + p_stimer, + e_mtimer, + p_mtimer, + e_uext, + p_uext, + e_sext, + p_sext, + e_mext, + p_mext + )); +} diff --git a/arch/riscv/src/plic.rs b/arch/riscv/src/plic.rs new file mode 100644 index 000000000..5de57802d --- /dev/null +++ b/arch/riscv/src/plic.rs @@ -0,0 +1,170 @@ +//! Platform Level Interrupt Control peripheral driver. + +// TODO I found several copies of this. Hopefully this can become canonical, +// I have written it to be a superset. + +use crate::csr; +use core::cell::Cell; +use kernel::utilities::registers::ReadWrite; +use kernel::utilities::StaticRef; +use kernel::very_simple_component; +use tock_registers::fields::FieldValue; +use tock_registers::interfaces::{ReadWriteable, Readable, Writeable}; +use tock_registers::{register_bitfields, register_structs}; + +// Can actually be fewer, we don't use priorities for anything. +pub const N_PRIO_BITS: u32 = 3; + +register_structs! { + pub PlicRegisters test_defaults<1,1> { + /// Interrupt Priority Registers + (0x000 => pad priority: [ReadWrite; N_IRQS]), + /// Interrupt Pending Register + (0x1000 => pad pending: [ReadWrite; PLIC_REGS]), + /// Interrupt Enable Register + (0x2000 => pad enable: [ReadWrite; PLIC_REGS]), + /// Priority Threshold Register + (0x200000 => threshold: ReadWrite), + /// Claim/Complete Register + (0x200004 => claim: ReadWrite), + (0x200008 => _reserved3), + /// MSIP Register + (0x4000000 => msip: ReadWrite), + (0x4000004 => _reserved4), + (0x4004000 => alert_test: ReadWrite), + (0x4004004 => @END), + } +} + +register_bitfields![u32, + priority [ + Priority OFFSET(0) NUMBITS(crate::plic::N_PRIO_BITS) [] + ] +]; + +// 32 IRQs per register (need to round up) +pub struct Plic { + registers: StaticRef>, + pending: Cell, +} + +pub const fn plic_regs_for_n_irqs(n_irqs: usize) -> usize { + (n_irqs + 31) / 32 +} + +impl Plic { + pub const fn new(base: StaticRef>) -> Self { + Plic { + registers: base, + pending: Cell::new(false), + } + } + + pub fn split_index(index: u32) -> (usize, u32) { + let res = ((index / 32) as usize, index % 32); + if res.0 >= PLIC_REGS { + panic!("Invalid IRQ: {}", index); + } + res + } + + /// Clear all pending interrupts. + pub fn clear_all_pending(&self) { + for pending in self.registers.pending.iter() { + pending.set(0); + } + } + + /// Enable all interrupts. + pub fn enable_all(&self) { + for enable in self.registers.enable.iter() { + enable.set(0xFFFF_FFFF); + } + + // Set some default priority for each interrupt. This is not really used + // at this point. We set here the maximum value. + for priority in self.registers.priority.iter() { + priority.write(priority::Priority::SET); + } + + // Accept all interrupts. + self.registers.threshold.write(priority::Priority.val(0)); + } + + /// Disable all interrupts. + pub fn disable_all(&self) { + for enable in self.registers.enable.iter() { + enable.set(0); + } + } + + /// Disable a specific interrupt + pub fn disable_interrupt(&self, index: u32) { + let (index, shift) = Self::split_index(index); + self.registers.enable[index].modify(FieldValue::::new(1, shift as usize, 0)); + } + + /// Save the current interrupt to be handled later + /// Interrupts must be disabled before this is called. + /// Saved interrupts can be retrieved by calling `get_saved_interrupts()`. + /// Saved interrupts are cleared when `'complete()` is called. + pub fn save_pending(&self) { + self.pending.set(true) + } + + pub fn has_pending(&self) -> bool { + self.pending.get() + } + + /// Get the next pending interrupt, if there are any + pub fn get_saved_interrupts(&self) -> Option { + let claim = self.registers.claim.get(); + if claim == 0 { + self.pending.set(false); + None + } else { + Some(claim) + } + } + + /// Mark the interrupt as handled (internally) without a signal to disable it + pub unsafe fn disable_and_complete(&self, index: u32) { + self.disable_interrupt(index); + self.complete(index); + } + + /// Signal that an interrupt is finished being handled. In Tock, this should be + /// called from the normal main loop (not the interrupt handler). + /// Interrupts must be disabled before this is called. + pub unsafe fn complete(&self, index: u32) { + self.registers.claim.set(index); + } + + /// This is a generic implementation. There may be board specific versions as + /// some platforms have added more bits to the `mtvec` register. + pub fn suppress_all(&self) { + // Accept all interrupts. + self.registers.threshold.write(priority::Priority.val(0)); + } + + pub fn fin(&self, enable_interrupts: bool) { + // Reset interrupts at PLIC level + self.disable_all(); + self.clear_all_pending(); + self.enable_all(); + + // Then enable interrupts globally if required + if enable_interrupts { + csr::CSR.mie.modify( + csr::mie::mie::mext::SET + csr::mie::mie::msoft::SET + csr::mie::mie::mtimer::SET, + ); + + csr::CSR.mstatus.modify(csr::mstatus::mstatus::mie::SET); + } + } +} + +very_simple_component!(impl<{const N_IRQS : usize, const PLIC_REGS : usize}> for Plic::, + new(StaticRef>), + fin(bool) +); diff --git a/arch/rv32i/src/pmp.rs b/arch/riscv/src/pmp.rs similarity index 74% rename from arch/rv32i/src/pmp.rs rename to arch/riscv/src/pmp.rs index 77e2ebab2..b16fdde0b 100644 --- a/arch/rv32i/src/pmp.rs +++ b/arch/riscv/src/pmp.rs @@ -10,15 +10,19 @@ //! PMP regions. use core::cell::Cell; -use core::cmp; use core::fmt; +use core::{cmp, mem}; use kernel::utilities::cells::OptionalCell; use crate::csr; use kernel::platform::mpu; +use kernel::platform::mpu::RemoveRegionResult; use kernel::utilities::cells::MapCell; use kernel::utilities::registers::{self, register_bitfields}; -use kernel::ProcessId; +use kernel::utilities::singleton_checker::SingletonChecker; +use kernel::ErrorCode::INVAL; +use kernel::{ErrorCode, ProcessId}; +use tock_registers::interfaces::Writeable; // Generic PMP config register_bitfields![u8, @@ -36,6 +40,44 @@ register_bitfields![u8, ] ]; +// This is to handle a QEMU bug. QEMU tries not to do a PMP check for every instruction, +// but when a PMP check is performed, the access size is not known because QEMU +// does not know how many instructions it will translate for one basic block. +// Instead, it just checks if an entire page worth could be translated. +// This requires us to over align to the size QEMU assumes is a page. +#[cfg(feature = "page_align_pmp")] +const PMP_ALIGN: usize = 0x1000; + +// I know PMP align is actually 4, but by making it 8 I can get rid of the whole "sizes less than +// 8 need to be rounded up to 8, but sizes greater than 8 to the nearest 4" nonsense. + +#[cfg(not(feature = "page_align_pmp"))] +const PMP_ALIGN: usize = 8; + +// Align down, return difference +fn align_down_diff(an_addr: &mut usize) -> usize { + let diff = *an_addr % PMP_ALIGN; + *an_addr -= diff; + diff +} + +fn align_up(an_addr: &mut usize) { + *an_addr = *an_addr + ((0usize - *an_addr) % PMP_ALIGN) +} + +fn align_region(start: &mut usize, size: &mut usize) { + // Region start round up + *size += align_down_diff(start); + + // Region size round up + align_up(size); + + // Regions must be at least 8 bytes + if *size < 8 { + *size = if 8 > PMP_ALIGN { 8 } else { PMP_ALIGN }; + } +} + /// Main PMP struct. /// /// Tock will ignore locked PMP regions. Note that Tock will not make any @@ -65,11 +107,66 @@ pub struct PMP { /// This is the total number of available regions. /// This will be between 0 and MAX_AVAILABLE_REGIONS_OVER_TWO * 2 depending /// on the hardware and previous boot stages. - num_regions: usize, + num_regions: Cell, +} + +fn set_pmp_region(region: &Option, with_index: usize) { + match region { + Some(r) => { + let cfg_val = r.cfg.value as usize; + let start = r.location.0 as usize; + let size = r.location.1; + + let disable_val = (csr::pmpconfig::pmpcfg::r0::CLEAR + + csr::pmpconfig::pmpcfg::w0::CLEAR + + csr::pmpconfig::pmpcfg::x0::CLEAR + + csr::pmpconfig::pmpcfg::a0::OFF) + .value; + let index = csr::CSR::pmp_index_to_cfg_index(2 * with_index); + // Second region has sub_index + 1 + let sub_index = csr::CSR::pmp_index_to_cfg_sub_index(2 * with_index); + let region_shift = sub_index * 8; + let other_region_mask: usize = !(0xFFFFusize << region_shift); + + csr::CSR.pmpaddr_set(with_index * 2, (start) >> 2); + csr::CSR.pmpaddr_set((with_index * 2) + 1, (start + size) >> 2); + + csr::CSR.pmpconfig_set( + index, + (disable_val | cfg_val << 8) << region_shift + | (csr::CSR.pmpconfig_get(index) & other_region_mask), + ); + } + None => {} + }; } impl PMP { + pub const fn const_new(chk: &mut SingletonChecker) -> Self { + kernel::assert_single!(chk); + Self { + last_configured_for: MapCell::empty(), + num_regions: Cell::new(0), + locked_region_mask: Cell::new(0), + } + } + + // Safety: singleton pub unsafe fn new() -> Self { + let pmp = Self { + last_configured_for: MapCell::empty(), + num_regions: Cell::new(0), + locked_region_mask: Cell::new(0), + }; + pmp.init(); + pmp + } + + pub fn init(&self) { + // This scan will clear the PMP so cannot be run if we are configured + if self.last_configured_for.is_some() { + return; + } // RISC-V PMP can support from 0 to 64 PMP regions // Let's figure out how many are supported. // We count any regions that are locked as unsupported @@ -77,15 +174,18 @@ impl PMP PMP 0 { + if pmpcfg_og & ((1 << 7) << shift) > 0 { // The bit is locked. Mark this regions as not usable locked_region_mask |= 1 << i; } else { @@ -108,14 +208,11 @@ impl PMP kernel::platform::mpu::MPU for PMP { type MpuConfig = PMPConfig; + const MIN_MPUALIGN: usize = PMP_ALIGN; fn clear_mpu(&self) { // We want to disable all of the hardware entries, so we use `NUM_REGIONS` here, @@ -299,10 +397,10 @@ impl kernel::platform::mpu::MPU for x in 1..(MAX_AVAILABLE_REGIONS_OVER_TWO * 2) { csr::CSR.pmpaddr_set(x, 0x0); } - for x in 1..(MAX_AVAILABLE_REGIONS_OVER_TWO * 2 / 4) { - csr::CSR.pmpconfig_set(x, 0); + for x in 1..(MAX_AVAILABLE_REGIONS_OVER_TWO * 2 / mem::size_of::()) { + csr::CSR.pmpconfig_set(x * (mem::size_of::() / 4), 0); } - csr::CSR.pmpaddr_set(0, 0xFFFF_FFFF); + csr::CSR.pmpaddr_set(0, usize::MAX); // enable R W X fields csr::CSR.pmpconfig_set( 0, @@ -324,7 +422,7 @@ impl kernel::platform::mpu::MPU } fn number_total_regions(&self) -> usize { - self.num_regions / 2 + self.num_regions.get() / 2 } fn allocate_region( @@ -352,20 +450,7 @@ impl kernel::platform::mpu::MPU let mut start = unallocated_memory_start as usize; let mut size = min_region_size; - // Region start always has to align to 4 bytes - if start % 4 != 0 { - start += 4 - (start % 4); - } - - // Region size always has to align to 4 bytes - if size % 4 != 0 { - size += 4 - (size % 4); - } - - // Regions must be at least 8 bytes - if size < 8 { - size = 8; - } + align_region(&mut start, &mut size); let region = PMPRegion::new(start as *const u8, size, permissions); @@ -379,22 +464,22 @@ impl kernel::platform::mpu::MPU &self, region: mpu::Region, config: &mut Self::MpuConfig, - ) -> Result<(), ()> { + ) -> Result { let (index, _r) = config .regions .iter() .enumerate() .find(|(_idx, r)| r.map_or(false, |r| r == region)) - .ok_or(())?; + .ok_or(INVAL)?; if config.is_index_locked_or_app(self.locked_region_mask.get(), index) { - return Err(()); + return Err(INVAL); } config.regions[index] = None; config.is_dirty.set(true); - Ok(()) + Ok(RemoveRegionResult::Sync) } fn allocate_app_memory_region( @@ -428,9 +513,8 @@ impl kernel::platform::mpu::MPU // App memory size is what we actual set the region to. So this region // has to be aligned to 4 bytes. let mut initial_app_memory_size: usize = initial_app_memory_size; - if initial_app_memory_size % 4 != 0 { - initial_app_memory_size += 4 - (initial_app_memory_size % 4); - } + + align_up(&mut initial_app_memory_size); // Make sure there is enough memory for app memory and kernel memory. let mut region_size = cmp::max( @@ -438,13 +522,10 @@ impl kernel::platform::mpu::MPU initial_app_memory_size + initial_kernel_memory_size, ) as usize; - // Region size always has to align to 4 bytes - if region_size % 4 != 0 { - region_size += 4 - (region_size % 4); - } - // The region should start as close as possible to the start of the unallocated memory. - let region_start = unallocated_memory_start as usize; + let mut region_start = unallocated_memory_start as usize; + + align_region(&mut region_start, &mut region_size); // Make sure the region fits in the unallocated memory. if region_start + region_size @@ -487,14 +568,20 @@ impl kernel::platform::mpu::MPU let app_memory_break = app_memory_break as usize; let kernel_memory_break = kernel_memory_break as usize; + let mut region_start = region_start as usize; + let mut region_size = app_memory_break - region_start as usize; + + // It is OK to be overly permissive in setting up the PMP as long as we do not cross the + // kernel memory break. This is mostly to fix the QEMU bug. + align_region(&mut region_start, &mut region_size); + + let app_memory_break = region_start + region_size; + // Out of memory if app_memory_break > kernel_memory_break { return Err(()); } - // Get size of updated region - let region_size = app_memory_break - region_start as usize; - let region = PMPRegion::new(region_start as *const u8, region_size, permissions); config.regions[region_num] = Some(region); @@ -513,32 +600,7 @@ impl kernel::platform::mpu::MPU // configuration of this app has not changed. if !last_configured_for_this_app || config.is_dirty.get() { for (x, region) in config.regions.iter().enumerate() { - match region { - Some(r) => { - let cfg_val = r.cfg.value as usize; - let start = r.location.0 as usize; - let size = r.location.1; - - let disable_val = (csr::pmpconfig::pmpcfg::r0::CLEAR - + csr::pmpconfig::pmpcfg::w0::CLEAR - + csr::pmpconfig::pmpcfg::x0::CLEAR - + csr::pmpconfig::pmpcfg::a0::OFF) - .value; - let (region_shift, other_region_mask) = if x % 2 == 0 { - (0, 0xFFFF_0000) - } else { - (16, 0x0000_FFFF) - }; - csr::CSR.pmpconfig_set( - x / 2, - (disable_val | cfg_val << 8) << region_shift - | (csr::CSR.pmpconfig_get(x / 2) & other_region_mask), - ); - csr::CSR.pmpaddr_set(x * 2, (start) >> 2); - csr::CSR.pmpaddr_set((x * 2) + 1, (start + size) >> 2); - } - None => {} - }; + set_pmp_region(region, x); } config.is_dirty.set(false); self.last_configured_for.put(*app_id); @@ -579,22 +641,12 @@ impl kernel::platform::mpu::KernelM let mut start = memory_start as usize; let mut size = memory_size; - // Region start always has to align to 4 bytes - if start % 4 != 0 { - start += 4 - (start % 4); - } + align_region(&mut start, &mut size); - // Region size always has to align to 4 bytes - if size % 4 != 0 { - size += 4 - (size % 4); - } - - // Regions must be at least 8 bytes - if size < 8 { - size = 8; - } - - let region = PMPRegion::new(start as *const u8, size, permissions); + let mut region = PMPRegion::new(start as *const u8, size, permissions); + // Once set this should be locked. + // NOTE: Locking a TOR region also locks the one before it + region.cfg += pmpcfg::l::SET; config.regions[region_num] = Some(region); @@ -608,57 +660,29 @@ impl kernel::platform::mpu::KernelM fn enable_kernel_mpu(&self, config: &mut Self::KernelMpuConfig) { for (i, region) in config.regions.iter().rev().enumerate() { + // Kernel use the highest regions first as when we switch we use the lowest first + // FIXME: Should this not be num_regions? MAX_AVAILABLE_REGIONS_OVER_TWO may not be supported. let x = MAX_AVAILABLE_REGIONS_OVER_TWO - i - 1; - match region { - Some(r) => { - let cfg_val = r.cfg.value as usize; - let start = r.location.0 as usize; - let size = r.location.1; - - match x % 2 { - 0 => { - csr::CSR.pmpaddr_set((x * 2) + 1, (start + size) >> 2); - // Disable access up to the start address - csr::CSR.pmpconfig_modify( - x / 2, - csr::pmpconfig::pmpcfg::r0::CLEAR - + csr::pmpconfig::pmpcfg::w0::CLEAR - + csr::pmpconfig::pmpcfg::x0::CLEAR - + csr::pmpconfig::pmpcfg::a0::CLEAR, - ); - csr::CSR.pmpaddr_set(x * 2, start >> 2); - - // Set access to end address - csr::CSR - .pmpconfig_set(x / 2, cfg_val << 8 | csr::CSR.pmpconfig_get(x / 2)); - // Lock the CSR - csr::CSR.pmpconfig_modify(x / 2, csr::pmpconfig::pmpcfg::l1::SET); - } - 1 => { - csr::CSR.pmpaddr_set((x * 2) + 1, (start + size) >> 2); - // Disable access up to the start address - csr::CSR.pmpconfig_modify( - x / 2, - csr::pmpconfig::pmpcfg::r2::CLEAR - + csr::pmpconfig::pmpcfg::w2::CLEAR - + csr::pmpconfig::pmpcfg::x2::CLEAR - + csr::pmpconfig::pmpcfg::a2::CLEAR, - ); - csr::CSR.pmpaddr_set(x * 2, start >> 2); - - // Set access to end address - csr::CSR.pmpconfig_set( - x / 2, - cfg_val << 24 | csr::CSR.pmpconfig_get(x / 2), - ); - // Lock the CSR - csr::CSR.pmpconfig_modify(x / 2, csr::pmpconfig::pmpcfg::l3::SET); - } - _ => break, - } - } - None => {} - }; + set_pmp_region(®ion, x); } } } + +// If we have a PMP and are not using it, we still need to enable it or all accesses will fail +// It does raise the question why you are using, say, the MMU, rather than the PMP. Not enough PMP entries? +pub fn pmp_permit_all() { + // With NAPOT, the more ones, the bigger the range. + csr::CSR.pmpaddr0.set(usize::MAX); + csr::CSR.pmpcfg0.write( + csr::pmpconfig::pmpcfg::r0::SET + + csr::pmpconfig::pmpcfg::w0::SET + + csr::pmpconfig::pmpcfg::x0::SET + + csr::pmpconfig::pmpcfg::a0::NAPOT, + ); +} + +kernel::very_simple_component!( + impl<{const MAX_AVAILABLE_REGIONS_OVER_TWO: usize}> for PMP::, + const_new(&'a mut SingletonChecker), + init() +); diff --git a/arch/rv32i/src/support.rs b/arch/riscv/src/support.rs similarity index 56% rename from arch/rv32i/src/support.rs rename to arch/riscv/src/support.rs index f652e4d3d..c00f3d709 100644 --- a/arch/rv32i/src/support.rs +++ b/arch/riscv/src/support.rs @@ -2,8 +2,9 @@ use crate::csr::{mstatus::mstatus, CSR}; use core::ops::FnOnce; +use core::ptr::NonNull; -#[cfg(all(target_arch = "riscv32", target_os = "none"))] +#[cfg(all(target_os = "none"))] #[inline(always)] /// NOP instruction pub fn nop() { @@ -13,7 +14,7 @@ pub fn nop() { } } -#[cfg(all(target_arch = "riscv32", target_os = "none"))] +#[cfg(all(target_os = "none"))] #[inline(always)] /// WFI instruction pub unsafe fn wfi() { @@ -21,6 +22,27 @@ pub unsafe fn wfi() { asm!("wfi", options(nomem, nostack)); } +#[inline(always)] +/// sfence.vma instruction +pub fn sfence_vma() { + use core::arch::asm; + unsafe { + asm!("sfence.vma", options(nomem, nostack)); + } +} + +/// sfence.vma instruction with arguments to invalidate a single ASID +pub fn sfence_vma_asid(_asid: usize) { + // First argument is address, second is ASID. An argument with _register_ zero applies to all + // addresses / ASIDs. Another register with a _value_ of 0 will select the first page or + // ASID 0. + #[cfg(target_os = "none")] + unsafe { + use core::arch::asm; + asm!("sfence.vma x0, {asid}", asid = in(reg) _asid, options(nomem, nostack)); + } +} + pub unsafe fn atomic(f: F) -> R where F: FnOnce() -> R, @@ -47,14 +69,22 @@ where } // Mock implementations for tests on Travis-CI. -#[cfg(not(any(target_arch = "riscv32", target_os = "none")))] +#[cfg(not(any(target_os = "none")))] /// NOP instruction (mock) pub fn nop() { unimplemented!() } -#[cfg(not(any(target_arch = "riscv32", target_os = "none")))] +#[cfg(not(any(target_os = "none")))] /// WFI instruction (mock) pub unsafe fn wfi() { unimplemented!() } + +// TODO: Cache ops for RISCV + +pub unsafe fn prepare_dma(_range: NonNull<[u8]>) {} + +pub unsafe fn finish_dma(_range: NonNull<[u8]>) {} + +pub fn executable_memory_changed(_range: NonNull<[u8]>) {} diff --git a/arch/rv32i/src/syscall.rs b/arch/riscv/src/syscall.rs similarity index 60% rename from arch/rv32i/src/syscall.rs rename to arch/riscv/src/syscall.rs index 8dbfdd5ac..304d2ef2b 100644 --- a/arch/rv32i/src/syscall.rs +++ b/arch/riscv/src/syscall.rs @@ -1,38 +1,74 @@ //! Kernel-userland system call interface for RISC-V architecture. use core::convert::TryInto; -use core::fmt::Write; +use core::fmt::{Display, Formatter, Write}; use core::mem::size_of; use core::ops::Range; use crate::csr::mcause; use kernel; +use kernel::cheri::*; use kernel::errorcode::ErrorCode; use kernel::syscall::ContextSwitchReason; +// Sadly, CHERI macros are not namespaced +use kernel::utilities::singleton_checker::SingletonChecker; +use kernel::*; + /// This holds all of the state that the kernel must keep for the process when /// the process is not executing. #[derive(Default)] #[repr(C)] -pub struct Riscv32iStoredState { +pub struct RiscvStoredState { /// Store all of the app registers. - regs: [u32; 31], + regs: [cptr; 31], /// This holds the PC value of the app when the exception/syscall/interrupt /// occurred. We also use this to set the PC that the app should start /// executing at when it is resumed/started. - pc: u32, + pc: cptr, + + /// This holds the default data capability. Switched with the kernel DDC if we trap from an app. + #[cfg(target_feature = "xcheri")] + ddc: cptr, /// We need to store the mcause CSR between when the trap occurs and after /// we exit the trap handler and resume the context switching code. - mcause: u32, + mcause: usize, /// We need to store the mtval CSR for the process in case the mcause /// indicates a fault. In that case, the mtval contains useful debugging /// information. - mtval: u32, + mtval: usize, +} + +pub struct DdcDisplay<'a> { + _state: &'a RiscvStoredState, } +impl<'a> Display for DdcDisplay<'a> { + fn fmt(&self, _f: &mut Formatter<'_>) -> core::fmt::Result { + #[cfg(target_feature = "xcheri")] + { + return _f.write_fmt(format_args!("DDC: {:#010X}", self._state.ddc)); + } + #[cfg(not(target_feature = "xcheri"))] + core::fmt::Result::Ok(()) + } +} + +impl RiscvStoredState { + pub fn get_ddc_display(&self) -> DdcDisplay { + DdcDisplay { _state: self } + } +} + +// Because who would ever need offsetof? +#[cfg(target_feature = "xcheri")] +pub const CAUSE_OFFSET: usize = size_of::() * 33; +#[cfg(not(target_feature = "xcheri"))] +pub const CAUSE_OFFSET: usize = size_of::() * 32; + // Named offsets into the stored state registers. These needs to be kept in // sync with the register save logic in _start_trap() as well as the register // restore logic in switch_to_process() below. @@ -45,11 +81,16 @@ const R_A3: usize = 12; const R_A4: usize = 13; /// Values for encoding the stored state buffer in a binary slice. -const VERSION: u32 = 1; -const STORED_STATE_SIZE: u32 = size_of::() as u32; +const VERSION: usize = 1; +const STORED_STATE_SIZE: usize = size_of::() as usize; +#[cfg(any(target_arch = "riscv32"))] const TAG: [u8; 4] = [b'r', b'v', b'5', b'i']; +#[cfg(any(target_arch = "riscv64", not(target_os = "none")))] +const TAG: [u8; 8] = [b'r', b'v', b'5', b'i', b'r', b'v', b'5', b'i']; const METADATA_LEN: usize = 3; +// TODO: CHERI. This seems to be for swap or some such. Needs thinking about. + const VERSION_IDX: usize = 0; const SIZE_IDX: usize = 1; const TAG_IDX: usize = 2; @@ -59,14 +100,14 @@ const MTVAL_IDX: usize = 5; const REGS_IDX: usize = 6; const REGS_RANGE: Range = REGS_IDX..REGS_IDX + 31; -const U32_SZ: usize = size_of::(); -fn u32_byte_range(index: usize) -> Range { - index * U32_SZ..(index + 1) * U32_SZ +const USIZE_SZ: usize = size_of::(); +fn usize_byte_range(index: usize) -> Range { + index * USIZE_SZ..(index + 1) * USIZE_SZ } -fn u32_from_u8_slice(slice: &[u8], index: usize) -> Result { - let range = u32_byte_range(index); - Ok(u32::from_le_bytes( +fn usize_from_u8_slice(slice: &[u8], index: usize) -> Result { + let range = usize_byte_range(index); + Ok(usize::from_le_bytes( slice .get(range) .ok_or(ErrorCode::SIZE)? @@ -75,27 +116,29 @@ fn u32_from_u8_slice(slice: &[u8], index: usize) -> Result { )) } -fn write_u32_to_u8_slice(val: u32, slice: &mut [u8], index: usize) { - let range = u32_byte_range(index); +fn write_usize_to_u8_slice(val: usize, slice: &mut [u8], index: usize) { + let range = usize_byte_range(index); slice[range].copy_from_slice(&val.to_le_bytes()); } -impl core::convert::TryFrom<&[u8]> for Riscv32iStoredState { +impl core::convert::TryFrom<&[u8]> for RiscvStoredState { type Error = ErrorCode; - fn try_from(ss: &[u8]) -> Result { - if ss.len() == size_of::() + METADATA_LEN * U32_SZ - && u32_from_u8_slice(ss, VERSION_IDX)? == VERSION - && u32_from_u8_slice(ss, SIZE_IDX)? == STORED_STATE_SIZE - && u32_from_u8_slice(ss, TAG_IDX)? == u32::from_le_bytes(TAG) + fn try_from(ss: &[u8]) -> Result { + if ss.len() == size_of::() + METADATA_LEN * USIZE_SZ + && usize_from_u8_slice(ss, VERSION_IDX)? == VERSION + && usize_from_u8_slice(ss, SIZE_IDX)? == STORED_STATE_SIZE + && usize_from_u8_slice(ss, TAG_IDX)? == usize::from_le_bytes(TAG) { - let mut res = Riscv32iStoredState { - regs: [0; 31], - pc: u32_from_u8_slice(ss, PC_IDX)?, - mcause: u32_from_u8_slice(ss, MCAUSE_IDX)?, - mtval: u32_from_u8_slice(ss, MTVAL_IDX)?, + let mut res = RiscvStoredState { + regs: [0usize.into(); 31], + pc: (usize_from_u8_slice(ss, PC_IDX)? as usize).into(), + #[cfg(target_feature = "xcheri")] + ddc: 0usize.into(), + mcause: usize_from_u8_slice(ss, MCAUSE_IDX)?, + mtval: usize_from_u8_slice(ss, MTVAL_IDX)?, }; for (i, v) in (REGS_RANGE).enumerate() { - res.regs[i] = u32_from_u8_slice(ss, v)?; + res.regs[i] = (usize_from_u8_slice(ss, v)? as usize).into(); } Ok(res) } else { @@ -111,10 +154,16 @@ impl SysCall { pub const unsafe fn new() -> SysCall { SysCall(()) } + pub const fn const_new(chk: &mut SingletonChecker) -> Self { + assert_single!(chk); + Self(()) + } } +kernel::very_simple_component!(impl for SysCall, const_new(&'a mut SingletonChecker)); + impl kernel::syscall::UserspaceKernelBoundary for SysCall { - type StoredState = Riscv32iStoredState; + type StoredState = RiscvStoredState; fn initial_process_app_brk_size(&self) -> usize { // The RV32I UKB implementation does not use process memory for any @@ -131,20 +180,45 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { state: &mut Self::StoredState, ) -> Result<(), ()> { // Need to clear the stored state when initializing. - state.regs.iter_mut().for_each(|x| *x = 0); - state.pc = 0; + state.regs.iter_mut().for_each(|x| *x = usize::into(0)); + // CHERI note: this PC cannot be executed. It will always be replaced with an initial fn. + state.pc = usize::into(0); + #[cfg(target_feature = "xcheri")] + { + let start = accessible_memory_start as usize; + + state + .ddc + .set_addr_from_ddc_restricted(start, start, (_app_brk as usize) - start); + } + state.mcause = 0; // The first time the process runs we need to set the initial stack // pointer in the sp register. // // We do not pre-allocate any stack for RV32I processes. - state.regs[R_SP] = accessible_memory_start as u32; + state.regs[R_SP] = usize::into(accessible_memory_start as usize); // We do not use memory for UKB, so just return ok. Ok(()) } + unsafe fn get_extra_syscall_arg( + &self, + ndx: usize, + _accessible_memory_start: *const u8, + _app_brk: *const u8, + state: &Self::StoredState, + ) -> Option { + // A4 was the last argument used by the standard syscall. We can get at least another 3, + // and then we might want to go to the stack. + if ndx >= 3 { + return None; + } + Some(state.regs[R_A4 + 1 + ndx].into()) + } + unsafe fn set_syscall_return_value( &self, _accessible_memory_start: *const u8, @@ -170,7 +244,9 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { let (a1slice, r) = r.split_at_mut(R_A2 - R_A1); let (a2slice, a3slice) = r.split_at_mut(R_A3 - R_A2); - return_value.encode_syscall_return( + // Really we need to write out own version of this that zeros other bits + // Then the above ugly coerce would not be needed + return_value.encode_syscall_return_cptr( &mut a0slice[0], &mut a1slice[0], &mut a2slice[0], @@ -185,15 +261,15 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { &self, _accessible_memory_start: *const u8, _app_brk: *const u8, - state: &mut Riscv32iStoredState, + state: &mut RiscvStoredState, callback: kernel::process::FunctionCall, ) -> Result<(), ()> { // Set the register state for the application when it starts // executing. These are the argument registers. - state.regs[R_A0] = callback.argument0 as u32; - state.regs[R_A1] = callback.argument1 as u32; - state.regs[R_A2] = callback.argument2 as u32; - state.regs[R_A3] = callback.argument3 as u32; + state.regs[R_A0] = callback.argument0.into(); + state.regs[R_A1] = callback.argument1.into(); + state.regs[R_A2] = callback.argument2.into(); + state.regs[R_A3] = callback.argument3.into(); // We also need to set the return address (ra) register so that the new // function that the process is running returns to the correct location. @@ -201,21 +277,22 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { // process is executing then `state.pc` is invalid/useless, but the // application must ignore it anyway since there is nothing logically // for it to return to. So this doesn't hurt anything. - state.regs[R_RA] = state.pc as u32; + state.regs[R_RA] = state.pc; // Save the PC we expect to execute. - state.pc = callback.pc as u32; + // On CHERI we are basically forcing a jump, so caller better have the correct bounds. + state.pc = callback.pc; Ok(()) } // Mock implementation for tests on Travis-CI. - #[cfg(not(any(target_arch = "riscv32", target_os = "none")))] + #[cfg(not(any(target_os = "none")))] unsafe fn switch_to_process( &self, _accessible_memory_start: *const u8, _app_brk: *const u8, - _state: &mut Riscv32iStoredState, + _state: &mut RiscvStoredState, ) -> (ContextSwitchReason, Option<*const u8>) { // Convince lint that 'mcause' and 'R_A4' are used during test build let _cause = mcause::Trap::from(_state.mcause as usize); @@ -223,14 +300,13 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { unimplemented!() } - #[cfg(all(target_arch = "riscv32", target_os = "none"))] + #[cfg(all(target_os = "none"))] unsafe fn switch_to_process( &self, _accessible_memory_start: *const u8, _app_brk: *const u8, - state: &mut Riscv32iStoredState, + state: &mut RiscvStoredState, ) -> (ContextSwitchReason, Option<*const u8>) { - use core::arch::asm; // We need to ensure that the compiler does not reorder // kernel memory writes to after the userspace context switch // to ensure we provide a consistent memory view of @@ -241,7 +317,7 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { // is not set, hence the compiler has to assume the assembly // will issue arbitrary memory accesses (acting as a compiler // fence). - asm!(" + kernel::easm!(" // Before switching to the app we need to save the kernel registers to // the kernel stack. We then save the stack pointer in the mscratch // CSR (0x340) so we can retrieve it after returning to the kernel @@ -252,77 +328,19 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { // memory map to make it easier to keep track: // // ``` - // 34*4(sp): <- original stack pointer - // 33*4(sp): - // 32*4(sp): x31 - // 31*4(sp): x30 - // 30*4(sp): x29 - // 29*4(sp): x28 - // 28*4(sp): x27 - // 27*4(sp): x26 - // 26*4(sp): x25 - // 25*4(sp): x24 - // 24*4(sp): x23 - // 23*4(sp): x22 - // 22*4(sp): x21 - // 21*4(sp): x20 - // 20*4(sp): x19 - // 19*4(sp): x18 - // 18*4(sp): x17 - // 17*4(sp): x16 - // 16*4(sp): x15 - // 15*4(sp): x14 - // 14*4(sp): x13 - // 13*4(sp): x12 - // 12*4(sp): x11 - // 11*4(sp): x10 - // 10*4(sp): x9 - // 9*4(sp): x8 - // 8*4(sp): x7 - // 7*4(sp): x6 - // 6*4(sp): x5 - // 5*4(sp): x4 - // 4*4(sp): x3 - // 3*4(sp): x1 - // 2*4(sp): _return_to_kernel (100) (address to resume after trap) - // 1*4(sp): *state (Per-process StoredState struct) - // 0*4(sp): app s0 <- new stack pointer + // 34 * SZ(sp): <- original stack pointer + // 3..32 * SZ(sp) : saved registers + // 2 * SZ(sp): _return_to_kernel (100) (address to resume after trap) + // 1 * SZ(sp): *state (Per-process StoredState struct) + // 0 * SZ(sp): app s0 <- new stack pointer // ``` - addi sp, sp, -34*4 // Move the stack pointer down to make room. - - sw x1, 3*4(sp) // Save all of the registers on the kernel stack. - sw x3, 4*4(sp) - sw x4, 5*4(sp) - sw x5, 6*4(sp) - sw x6, 7*4(sp) - sw x7, 8*4(sp) - sw x8, 9*4(sp) - sw x9, 10*4(sp) - sw x10, 11*4(sp) - sw x11, 12*4(sp) - sw x12, 13*4(sp) - sw x13, 14*4(sp) - sw x14, 15*4(sp) - sw x15, 16*4(sp) - sw x16, 17*4(sp) - sw x17, 18*4(sp) - sw x18, 19*4(sp) - sw x19, 20*4(sp) - sw x20, 21*4(sp) - sw x21, 22*4(sp) - sw x22, 23*4(sp) - sw x23, 24*4(sp) - sw x24, 25*4(sp) - sw x25, 26*4(sp) - sw x26, 27*4(sp) - sw x27, 28*4(sp) - sw x28, 29*4(sp) - sw x29, 30*4(sp) - sw x30, 31*4(sp) - sw x31, 32*4(sp) - - sw a0, 1*4(sp) // Store process state pointer on stack as well. + addi sp, sp, -34*{CLEN_BYTES} // Move the stack pointer down to make room.", + ;stptr!() ptrregn!("1") ", 3*{CLEN_BYTES}(sp) // Save all of the registers on the kernel stack.", + FOR_RANGE("regn" in 3 .. 32 : + stptr!() ptrregn!() "\\()\\regn, (\\regn+1)*{CLEN_BYTES}(sp)" + ), + ; stx!() " a0, 1*{CLEN_BYTES}(sp) // Store process state pointer on stack as well. // We need to have this available for after the app // returns to the kernel so we can store its // registers. @@ -352,15 +370,16 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { // Store the address to jump back to on the stack so that the trap // handler knows where to return to after the app stops executing. - // - // In asm!() we can't use the shorthand `li` pseudo-instruction, as it - // complains about _return_to_kernel (100) not being a constant in the - // required range. - lui t0, %hi(100f) - addi t0, t0, %lo(100f) - sw t0, 2*4(sp) - - csrw 0x340, sp // Save stack pointer in mscratch. This allows + + la t0, 100f", + ; ".if " is_cheri!() " + // On CHERI, we must add some bounds information + cspecialr ct1, pcc + csetaddr ct0, ct1, t0 + .endif + " stptr!() ptrreg!("t0") ", 2*{CLEN_BYTES}(sp)", + csr_op!("mscratch" <- "sp"), + " // Save stack pointer in mscratch. This allows // us to find it when the app returns back to // the kernel. @@ -368,46 +387,37 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { // executing at. This has been saved in Riscv32iStoredState for us // (either when the app returned back to the kernel or in the // `set_process_function()` function). - lw t0, 31*4(a0) // Retrieve the PC from Riscv32iStoredState - csrw 0x341, t0 // Set mepc CSR. This is the PC we want to go to. + // Retrieve the PC from Riscv32iStoredState", + ; ldptr!() ptrreg!("t0") ", 31*{CLEN_BYTES}(a0)", + csr_op!("mepc" <- "t0"), "// Set mepc CSR. This is the PC we want to go to. // Restore all of the app registers from what we saved. If this is the // first time running the app then most of these values are // irrelevant, However we do need to set the four arguments to the // `_start_ function in the app. If the app has been executing then this // allows the app to correctly resume. - mv t0, a0 // Save the state pointer to a specific register. - lw x1, 0*4(t0) // ra - lw x2, 1*4(t0) // sp - lw x3, 2*4(t0) // gp - lw x4, 3*4(t0) // tp - lw x6, 5*4(t0) // t1 - lw x7, 6*4(t0) // t2 - lw x8, 7*4(t0) // s0,fp - lw x9, 8*4(t0) // s1 - lw x10, 9*4(t0) // a0 - lw x11, 10*4(t0) // a1 - lw x12, 11*4(t0) // a2 - lw x13, 12*4(t0) // a3 - lw x14, 13*4(t0) // a4 - lw x15, 14*4(t0) // a5 - lw x16, 15*4(t0) // a6 - lw x17, 16*4(t0) // a7 - lw x18, 17*4(t0) // s2 - lw x19, 18*4(t0) // s3 - lw x20, 19*4(t0) // s4 - lw x21, 20*4(t0) // s5 - lw x22, 21*4(t0) // s6 - lw x23, 22*4(t0) // s7 - lw x24, 23*4(t0) // s8 - lw x25, 24*4(t0) // s9 - lw x26, 25*4(t0) // s10 - lw x27, 26*4(t0) // s11 - lw x28, 27*4(t0) // t3 - lw x29, 28*4(t0) // t4 - lw x30, 29*4(t0) // t5 - lw x31, 30*4(t0) // t6 - lw x5, 4*4(t0) // t0. Do last since we overwrite our pointer. + mv t0, a0 // Save the state pointer to a specific register.", + FOR_RANGE("regn" in 1 .. 32 : + ".if \\regn != 5 + " ldptr!() ptrregn!() "\\()\\regn, (\\regn-1)*{CLEN_BYTES}(t0) + .endif" + ), + ; ".if " is_cheri!() " + // Load processes DDC. We cannot restore it before the last load has happened. + // We can use mtdc as a scratch register (have it hold ct1), so ct1 can hold ddc. + // DDC should currently hold the kernel DDC, which should eventually go in mtdc + cspecialw mtdc, ct1 + " ldptr!() ptrreg!("t1") ", 32*{CLEN_BYTES}(t0) + .endif + " ldptr!() ptrregn!(5) ", 4*{CLEN_BYTES}(t0) // t0. Do last since we overwrite our pointer. + .if " is_cheri!() " + // Currently: + // mtdc holds ct1 + // ct1 holds ddc + // ddc holds mdtc + cspecialrw ct1, ddc, ct1 + cspecialrw ct1, mtdc, ct1 + .endif // Call mret to jump to where mepc points, switch to user mode, and // start running the app. @@ -421,46 +431,20 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { 100: // _return_to_kernel // We have already stored the app registers in the trap handler. We - // can restore the kernel registers before resuming kernel code. - lw x1, 3*4(sp) - lw x3, 4*4(sp) - lw x4, 5*4(sp) - lw x5, 6*4(sp) - lw x6, 7*4(sp) - lw x7, 8*4(sp) - lw x8, 9*4(sp) - lw x9, 10*4(sp) - lw x10, 11*4(sp) - lw x11, 12*4(sp) - lw x12, 13*4(sp) - lw x13, 14*4(sp) - lw x14, 15*4(sp) - lw x15, 16*4(sp) - lw x16, 17*4(sp) - lw x17, 18*4(sp) - lw x18, 19*4(sp) - lw x19, 20*4(sp) - lw x20, 21*4(sp) - lw x21, 22*4(sp) - lw x22, 23*4(sp) - lw x23, 24*4(sp) - lw x24, 25*4(sp) - lw x25, 26*4(sp) - lw x26, 27*4(sp) - lw x27, 28*4(sp) - lw x28, 29*4(sp) - lw x29, 30*4(sp) - lw x30, 31*4(sp) - lw x31, 32*4(sp) - - addi sp, sp, 34*4 // Reset kernel stack pointer - ", + // can restore the kernel registers before resuming kernel code.", + ; ldptr!() ptrregn!(1) ", 3*{CLEN_BYTES}(sp)", + FOR_RANGE("regn" in 3 .. 32 : + ldptr!() ptrregn!() "\\()\\regn, (\\regn+1)*{CLEN_BYTES}(sp)" + ), + + "addi sp, sp, 34*{CLEN_BYTES} // Reset kernel stack pointer", // The register to put the state struct pointer in is not // particularly relevant, however we must avoid using t0 // as that is overwritten prior to being accessed // (although stored and later restored) in the assembly - in("a0") state as *mut Riscv32iStoredState, + CLEN_BYTES = const size_of::(), + in("a0") state as *mut RiscvStoredState, ); let ret = match mcause::Trap::from(state.mcause as usize) { @@ -478,11 +462,11 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { state.pc += 4; let syscall = kernel::syscall::Syscall::from_register_arguments( - state.regs[R_A4] as u8, - state.regs[R_A0] as usize, - state.regs[R_A1] as usize, - state.regs[R_A2] as usize, - state.regs[R_A3] as usize, + usize::from(state.regs[R_A4]) as u8, + usize::from(state.regs[R_A0]), + state.regs[R_A1], + state.regs[R_A2], + state.regs[R_A3], ); match syscall { @@ -498,14 +482,14 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { } }; let new_stack_pointer = state.regs[R_SP]; - (ret, Some(new_stack_pointer as *const u8)) + (ret, Some(new_stack_pointer.as_ptr() as *const u8)) } unsafe fn print_context( &self, _accessible_memory_start: *const u8, _app_brk: *const u8, - state: &Riscv32iStoredState, + state: &RiscvStoredState, writer: &mut dyn Write, ) { let _ = writer.write_fmt(format_args!( @@ -526,10 +510,10 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { \r\n R13: {:#010X} R29: {:#010X}\ \r\n R14: {:#010X} R30: {:#010X}\ \r\n R15: {:#010X} R31: {:#010X}\ - \r\n PC : {:#010X}\ + \r\n PC : {:#010X} {} \ \r\n\ \r\n mcause: {:#010X} (", - 0, + >::from(0usize), state.regs[15], state.regs[0], state.regs[16], @@ -562,35 +546,34 @@ impl kernel::syscall::UserspaceKernelBoundary for SysCall { state.regs[14], state.regs[30], state.pc, + state.get_ddc_display(), state.mcause, )); - crate::print_mcause(mcause::Trap::from(state.mcause as usize), writer); + let cause = mcause::Trap::from(state.mcause as usize); + crate::print_mcause(cause, writer); let _ = writer.write_fmt(format_args!( ")\ - \r\n mtval: {:#010X}\ - \r\n\r\n", + \r\n mtval: {:#010X} (", state.mtval, )); + crate::print_mtval(cause, state.mtval, writer); + let _ = writer.write_fmt(format_args!(")\r\n\r\n",)); } - fn store_context( - &self, - state: &Riscv32iStoredState, - out: &mut [u8], - ) -> Result { - const U32_SZ: usize = size_of::(); - if out.len() >= size_of::() + METADATA_LEN * U32_SZ { - write_u32_to_u8_slice(VERSION, out, VERSION_IDX); - write_u32_to_u8_slice(STORED_STATE_SIZE, out, SIZE_IDX); - write_u32_to_u8_slice(u32::from_le_bytes(TAG), out, TAG_IDX); - write_u32_to_u8_slice(state.pc, out, PC_IDX); - write_u32_to_u8_slice(state.mcause, out, MCAUSE_IDX); - write_u32_to_u8_slice(state.mtval, out, MTVAL_IDX); + fn store_context(&self, state: &RiscvStoredState, out: &mut [u8]) -> Result { + const USIZE_SZ: usize = size_of::(); + if out.len() >= size_of::() + METADATA_LEN * USIZE_SZ { + write_usize_to_u8_slice(VERSION, out, VERSION_IDX); + write_usize_to_u8_slice(STORED_STATE_SIZE, out, SIZE_IDX); + write_usize_to_u8_slice(usize::from_le_bytes(TAG), out, TAG_IDX); + write_usize_to_u8_slice(usize::from(state.pc), out, PC_IDX); + write_usize_to_u8_slice(state.mcause, out, MCAUSE_IDX); + write_usize_to_u8_slice(state.mtval, out, MTVAL_IDX); for (i, v) in state.regs.iter().enumerate() { - write_u32_to_u8_slice(*v, out, REGS_IDX + i); + write_usize_to_u8_slice(usize::from(*v), out, REGS_IDX + i); } // +3 for pc, mcause, mtval - Ok((state.regs.len() + 3 + METADATA_LEN) * U32_SZ) + Ok((state.regs.len() + 3 + METADATA_LEN) * USIZE_SZ) } else { Err(ErrorCode::SIZE) } diff --git a/arch/rv32i/src/epmp.rs b/arch/rv32i/src/epmp.rs index 45132b04e..bad92cec9 100644 --- a/arch/rv32i/src/epmp.rs +++ b/arch/rv32i/src/epmp.rs @@ -13,10 +13,12 @@ use crate::csr; use core::cell::Cell; use core::{cmp, fmt}; use kernel::platform::mpu; +use kernel::platform::mpu::RemoveRegionResult; use kernel::utilities::cells::{MapCell, OptionalCell}; use kernel::utilities::registers::interfaces::ReadWriteable; use kernel::utilities::registers::{self, register_bitfields}; -use kernel::ProcessId; +use kernel::ErrorCode::INVAL; +use kernel::{ErrorCode, ProcessId}; // Generic PMP config register_bitfields![u8, @@ -410,6 +412,8 @@ impl kernel::platform::mpu::MPU { type MpuConfig = PMPConfig; + const MIN_MPUALIGN: usize = 4; + fn clear_mpu(&self) { // We want to disable all of the hardware entries, so we use `NUM_REGIONS` here, // and not `NUM_REGIONS / 2`. @@ -515,22 +519,22 @@ impl kernel::platform::mpu::MPU &self, region: mpu::Region, config: &mut Self::MpuConfig, - ) -> Result<(), ()> { + ) -> Result { let (index, _r) = config .regions .iter() .enumerate() .find(|(_idx, r)| r.map_or(false, |r| r == region)) - .ok_or(())?; + .ok_or(INVAL)?; if config.is_index_locked_or_app(self.locked_region_mask.get(), index) { - return Err(()); + return Err(ErrorCode::INVAL); } config.regions[index] = None; config.is_dirty.set(true); - Ok(()) + Ok(RemoveRegionResult::Sync) } fn allocate_app_memory_region( diff --git a/arch/rv32i/src/lib.rs b/arch/rv32i/src/lib.rs index 7639fd38b..6fb18b1ec 100644 --- a/arch/rv32i/src/lib.rs +++ b/arch/rv32i/src/lib.rs @@ -2,654 +2,21 @@ #![crate_name = "rv32i"] #![crate_type = "rlib"] -#![feature(asm_sym, naked_functions)] +#![feature(asm_const, naked_functions)] #![no_std] -use core::fmt::Write; - -use kernel::utilities::registers::interfaces::{Readable, Writeable}; - +#[cfg(any(target_arch = "riscv32", not(target_os = "none")))] pub mod clic; +#[cfg(any(target_arch = "riscv32", not(target_os = "none")))] pub mod epmp; pub mod machine_timer; -pub mod pmp; -pub mod support; -pub mod syscall; -// Re-export the shared CSR library so that dependent crates do not have to have +// Re-export some shared libraries so that dependent crates do not have to have // both rv32i and riscv as dependencies. +pub use riscv::configure_trap_handler; pub use riscv::csr; - -extern "C" { - // Where the end of the stack region is (and hence where the stack should - // start). - static _estack: usize; - - // Boundaries of the .bss section. - static mut _szero: usize; - static mut _ezero: usize; - - // Where the .data section is stored in flash. - static mut _etext: usize; - - // Boundaries of the .data section. - static mut _srelocate: usize; - static mut _erelocate: usize; - - // The global pointer, value set in the linker script - static __global_pointer: usize; -} - -/// Entry point of all programs (`_start`). -/// -/// This assembly does three functions: -/// -/// 1. It initializes the stack pointer, the frame pointer (needed for closures -/// to work in start_rust) and the global pointer. -/// 2. It initializes the .bss and .data RAM segments. This must be done before -/// any Rust code runs. See https://github.com/tock/tock/issues/2222 for more -/// information. -/// 3. Finally it calls `main()`, the main entry point for Tock boards. -#[cfg(all(target_arch = "riscv32", target_os = "none"))] -#[link_section = ".riscv.start"] -#[export_name = "_start"] -#[naked] -pub extern "C" fn _start() { - use core::arch::asm; - unsafe { - asm! (" - // Set the global pointer register using the variable defined in the - // linker script. This register is only set once. The global pointer - // is a method for sharing state between the linker and the CPU so - // that the linker can emit code with offsets that are relative to - // the gp register, and the CPU can successfully execute them. - // - // https://gnu-mcu-eclipse.github.io/arch/riscv/programmer/#the-gp-global-pointer-register - // https://groups.google.com/a/groups.riscv.org/forum/#!msg/sw-dev/60IdaZj27dY/5MydPLnHAQAJ - // https://www.sifive.com/blog/2017/08/28/all-aboard-part-3-linker-relaxation-in-riscv-toolchain/ - // - lui gp, %hi({gp}$) // Set the global pointer. - addi gp, gp, %lo({gp}$) // Value set in linker script. - - // Initialize the stack pointer register. This comes directly from - // the linker script. - lui sp, %hi({estack}) // Set the initial stack pointer. - addi sp, sp, %lo({estack}) // Value from the linker script. - - // Set s0 (the frame pointer) to the start of the stack. - add s0, sp, zero - - // Initialize mscratch to 0 so that we know that we are currently - // in the kernel. This is used for the check in the trap handler. - csrw 0x340, zero // CSR=0x340=mscratch - - // INITIALIZE MEMORY - - // Start by initializing .bss memory. The Tock linker script defines - // `_szero` and `_ezero` to mark the .bss segment. - la a0, {sbss} // a0 = first address of .bss - la a1, {ebss} // a1 = first address after .bss - - 100: // bss_init_loop - beq a0, a1, 101f // If a0 == a1, we are done. - sw zero, 0(a0) // *a0 = 0. Write 0 to the memory location in a0. - addi a0, a0, 4 // a0 = a0 + 4. Increment pointer to next word. - j 100b // Continue the loop. - - 101: // bss_init_done - - - // Now initialize .data memory. This involves coping the values right at the - // end of the .text section (in flash) into the .data section (in RAM). - la a0, {sdata} // a0 = first address of data section in RAM - la a1, {edata} // a1 = first address after data section in RAM - la a2, {etext} // a2 = address of stored data initial values - - 200: // data_init_loop - beq a0, a1, 201f // If we have reached the end of the .data - // section then we are done. - lw a3, 0(a2) // a3 = *a2. Load value from initial values into a3. - sw a3, 0(a0) // *a0 = a3. Store initial value into - // next place in .data. - addi a0, a0, 4 // a0 = a0 + 4. Increment to next word in memory. - addi a2, a2, 4 // a2 = a2 + 4. Increment to next word in flash. - j 200b // Continue the loop. - - 201: // data_init_done - - // With that initial setup out of the way, we now branch to the main - // code, likely defined in a board's main.rs. - j main - ", - gp = sym __global_pointer, - estack = sym _estack, - sbss = sym _szero, - ebss = sym _ezero, - sdata = sym _srelocate, - edata = sym _erelocate, - etext = sym _etext, - options(noreturn) - ); - } -} - -/// The various privilege levels in RISC-V. -pub enum PermissionMode { - User = 0x0, - Supervisor = 0x1, - Reserved = 0x2, - Machine = 0x3, -} - -/// Tell the MCU what address the trap handler is located at. -/// -/// This is a generic implementation. There may be board specific versions as -/// some platforms have added more bits to the `mtvec` register. -/// -/// The trap handler is called on exceptions and for interrupts. -pub unsafe fn configure_trap_handler(mode: PermissionMode) { - match mode { - PermissionMode::Machine => csr::CSR.mtvec.write( - csr::mtvec::mtvec::trap_addr.val(_start_trap as usize >> 2) - + csr::mtvec::mtvec::mode::CLEAR, - ), - PermissionMode::Supervisor => csr::CSR.stvec.write( - csr::stvec::stvec::trap_addr.val(_start_trap as usize >> 2) - + csr::stvec::stvec::mode::CLEAR, - ), - PermissionMode::User => csr::CSR.utvec.write( - csr::utvec::utvec::trap_addr.val(_start_trap as usize >> 2) - + csr::utvec::utvec::mode::CLEAR, - ), - PermissionMode::Reserved => ( - // TODO some sort of error handling? - ), - } -} - -// Mock implementation for tests on Travis-CI. -#[cfg(not(any(target_arch = "riscv32", target_os = "none")))] -pub extern "C" fn _start_trap() { - unimplemented!() -} - -/// This is the trap handler function. This code is called on all traps, -/// including interrupts, exceptions, and system calls from applications. -/// -/// Tock uses only the single trap handler, and does not use any vectored -/// interrupts or other exception handling. The trap handler has to determine -/// why the trap handler was called, and respond accordingly. Generally, there -/// are two reasons the trap handler gets called: an interrupt occurred or an -/// application called a syscall. -/// -/// In the case of an interrupt while the kernel was executing we only need to -/// save the kernel registers and then run whatever interrupt handling code we -/// need to. If the trap happens while and application was executing, we have to -/// save the application state and then resume the `switch_to()` function to -/// correctly return back to the kernel. -#[cfg(all(target_arch = "riscv32", target_os = "none"))] -#[link_section = ".riscv.trap"] -#[export_name = "_start_trap"] -#[naked] -pub extern "C" fn _start_trap() { - use core::arch::asm; - unsafe { - asm!( - " - // The first thing we have to do is determine if we came from user - // mode or kernel mode, as we need to save state and proceed - // differently. We cannot, however, use any registers because we do - // not want to lose their contents. So, we rely on `mscratch`. If - // mscratch is 0, then we came from the kernel. If it is >0, then it - // contains the kernel's stack pointer and we came from an app. - // - // We use the csrrw instruction to save the current stack pointer - // so we can retrieve it if necessary. - // - // If we could enter this trap handler twice (for example, - // handling an interrupt while an exception is being - // handled), storing a non-zero value in mscratch - // temporarily could cause a race condition similar to the - // one of PR 2308[1]. - // However, as indicated in section 3.1.6.1 of the RISC-V - // Privileged Spec[2], MIE will be set to 0 when taking a - // trap into machine mode. Therefore, this can only happen - // when causing an exception in the trap handler itself. - // - // [1] https://github.com/tock/tock/pull/2308 - // [2] https://github.com/riscv/riscv-isa-manual/releases/download/draft-20201222-42dc13a/riscv-privileged.pdf - csrrw sp, 0x340, sp // CSR=0x340=mscratch - bnez sp, 300f // If sp != 0 then we must have come from an app. - - - // _from_kernel: - // Swap back the zero value for the stack pointer in mscratch - csrrw sp, 0x340, sp // CSR=0x340=mscratch - - // Now, since we want to use the stack to save kernel registers, we - // first need to make sure that the trap wasn't the result of a - // stack overflow, in which case we can't use the current stack - // pointer. We also, however, cannot modify any of the current - // registers until we save them, and we cannot save them to the - // stack until we know the stack is valid. So, we use the mscratch - // trick again to get one register we can use. - - // Save t0's contents to mscratch - csrw 0x340, t0 // CSR=0x340=mscratch - - // Load the address of the bottom of the stack (`_sstack`) into our - // newly freed-up t0 register. - lui t0, %hi(_sstack) // t0 = _sstack - addi t0, t0, %lo(_sstack) - - // Compare the kernel stack pointer to the bottom of the stack. If - // the stack pointer is above the bottom of the stack, then continue - // handling the fault as normal. - bgtu sp, t0, 100f // branch if sp > t0 - - // If we get here, then we did encounter a stack overflow. We are - // going to panic at this point, but for that to work we need a - // valid stack to run the panic code. We do this by just starting - // over with the kernel stack and placing the stack pointer at the - // top of the original stack. - lui sp, %hi(_estack) // sp = _estack - addi sp, sp, %lo(_estack) - - - 100: // _from_kernel_continue - - // Restore t0, and make sure mscratch is set back to 0 (our flag - // tracking that the kernel is executing). - csrrw t0, 0x340, zero // t0=mscratch, mscratch=0 - - // Make room for the caller saved registers we need to restore after - // running any trap handler code. - addi sp, sp, -16*4 - - // Save all of the caller saved registers. - sw ra, 0*4(sp) - sw t0, 1*4(sp) - sw t1, 2*4(sp) - sw t2, 3*4(sp) - sw t3, 4*4(sp) - sw t4, 5*4(sp) - sw t5, 6*4(sp) - sw t6, 7*4(sp) - sw a0, 8*4(sp) - sw a1, 9*4(sp) - sw a2, 10*4(sp) - sw a3, 11*4(sp) - sw a4, 12*4(sp) - sw a5, 13*4(sp) - sw a6, 14*4(sp) - sw a7, 15*4(sp) - - // Jump to board-specific trap handler code. Likely this was an - // interrupt and we want to disable a particular interrupt, but each - // board/chip can customize this as needed. - jal ra, _start_trap_rust_from_kernel - - // Restore the registers from the stack. - lw ra, 0*4(sp) - lw t0, 1*4(sp) - lw t1, 2*4(sp) - lw t2, 3*4(sp) - lw t3, 4*4(sp) - lw t4, 5*4(sp) - lw t5, 6*4(sp) - lw t6, 7*4(sp) - lw a0, 8*4(sp) - lw a1, 9*4(sp) - lw a2, 10*4(sp) - lw a3, 11*4(sp) - lw a4, 12*4(sp) - lw a5, 13*4(sp) - lw a6, 14*4(sp) - lw a7, 15*4(sp) - - // Reset the stack pointer. - addi sp, sp, 16*4 - - // mret returns from the trap handler. The PC is set to what is in - // mepc and execution proceeds from there. Since we did not modify - // mepc we will return to where the exception occurred. - mret - - - - // Handle entering the trap handler from an app differently. - 300: // _from_app - - // At this point all we know is that we entered the trap handler - // from an app. We don't know _why_ we got a trap, it could be from - // an interrupt, syscall, or fault (or maybe something else). - // Therefore we have to be very careful not to overwrite any - // registers before we have saved them. - // - // We ideally want to save registers in the per-process stored state - // struct. However, we don't have a pointer to that yet, and we need - // to use a temporary register to get that address. So, we save s0 - // to the kernel stack before we can it to the proper spot. - sw s0, 0*4(sp) - - // Ideally it would be better to save all of the app registers once - // we return back to the `switch_to_process()` code. However, we - // also potentially need to disable an interrupt in case the app was - // interrupted, so it is safer to just immediately save all of the - // app registers. - // - // We do this by retrieving the stored state pointer from the kernel - // stack and storing the necessary values in it. - lw s0, 1*4(sp) // Load the stored state pointer into s0. - sw x1, 0*4(s0) // ra - sw x3, 2*4(s0) // gp - sw x4, 3*4(s0) // tp - sw x5, 4*4(s0) // t0 - sw x6, 5*4(s0) // t1 - sw x7, 6*4(s0) // t2 - sw x9, 8*4(s0) // s1 - sw x10, 9*4(s0) // a0 - sw x11, 10*4(s0) // a1 - sw x12, 11*4(s0) // a2 - sw x13, 12*4(s0) // a3 - sw x14, 13*4(s0) // a4 - sw x15, 14*4(s0) // a5 - sw x16, 15*4(s0) // a6 - sw x17, 16*4(s0) // a7 - sw x18, 17*4(s0) // s2 - sw x19, 18*4(s0) // s3 - sw x20, 19*4(s0) // s4 - sw x21, 20*4(s0) // s5 - sw x22, 21*4(s0) // s6 - sw x23, 22*4(s0) // s7 - sw x24, 23*4(s0) // s8 - sw x25, 24*4(s0) // s9 - sw x26, 25*4(s0) // s10 - sw x27, 26*4(s0) // s11 - sw x28, 27*4(s0) // t3 - sw x29, 28*4(s0) // t4 - sw x30, 29*4(s0) // t5 - sw x31, 30*4(s0) // t6 - // Now retrieve the original value of s0 and save that as well. - lw t0, 0*4(sp) - sw t0, 7*4(s0) // s0,fp - - // We also need to store the app stack pointer, mcause, and mepc. We - // need to store mcause because we use that to determine why the app - // stopped executing and returned to the kernel. We store mepc - // because it is where we need to return to in the app at some - // point. We need to store mtval in case the app faulted and we need - // mtval to help with debugging. - csrr t0, 0x340 // CSR=0x340=mscratch - sw t0, 1*4(s0) // Save the app sp to the stored state struct - csrr t0, 0x341 // CSR=0x341=mepc - sw t0, 31*4(s0) // Save the PC to the stored state struct - csrr t0, 0x343 // CSR=0x343=mtval - sw t0, 33*4(s0) // Save mtval to the stored state struct - - // Save mcause last, as we depend on it being loaded in t0 below - csrr t0, 0x342 // CSR=0x342=mcause - sw t0, 32*4(s0) // Save mcause to the stored state struct, leave in t0 - - // Now we need to check if this was an interrupt, and if it was, - // then we need to disable the interrupt before returning from this - // trap handler so that it does not fire again. If mcause is greater - // than or equal to zero this was not an interrupt (i.e. the most - // significant bit is not 1). - bge t0, zero, 200f - // Copy mcause into a0 and then call the interrupt disable function. - mv a0, t0 - jal ra, _disable_interrupt_trap_rust_from_app - - 200: // _from_app_continue - // Now determine the address of _return_to_kernel and resume the - // context switching code. We need to load _return_to_kernel into - // mepc so we can use it to return to the context switch code. - lw t0, 2*4(sp) // Load _return_to_kernel into t0. - csrw 0x341, t0 // CSR=0x341=mepc - - // Ensure that mscratch is 0. This makes sure that we know that on - // a future trap that we came from the kernel. - csrw 0x340, zero // CSR=0x340=mscratch - - // Need to set mstatus.MPP to 0b11 so that we stay in machine mode. - csrr t0, 0x300 // CSR=0x300=mstatus - li t1, 0x1800 // Load 0b11 to the MPP bits location in t1 - or t0, t0, t1 // Set the MPP bits to one - csrw 0x300, t0 // CSR=0x300=mstatus - - // Use mret to exit the trap handler and return to the context - // switching code. - mret - ", - options(noreturn) - ); - } -} - -/// RISC-V semihosting needs three exact instructions in uncompressed form. -/// -/// See https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc#11-semihosting-trap-instruction-sequence -/// for more details on the three insturctions. -/// -/// In order to work with semihosting we include the assembly here -/// where we are able to disable compressed instruction support. This -/// follows the example used in the Linux kernel: -/// https://elixir.bootlin.com/linux/v5.12.10/source/arch/riscv/include/asm/jump_label.h#L21 -/// as suggested by the RISC-V developers: -/// https://groups.google.com/a/groups.riscv.org/g/isa-dev/c/XKkYacERM04/m/CdpOcqtRAgAJ -#[cfg(all(target_arch = "riscv32", target_os = "none"))] -pub unsafe fn semihost_command(command: usize, arg0: usize, arg1: usize) -> usize { - use core::arch::asm; - let res; - asm!( - " - .option push - .option norelax - .option norvc - slli x0, x0, 0x1f - ebreak - srai x0, x0, 7 - .option pop - ", - in("a0") command, - in("a1") arg0, - in("a2") arg1, - lateout("a0") res, - ); - res -} - -// Mock implementation for tests on Travis-CI. -#[cfg(not(any(target_arch = "riscv32", target_os = "none")))] -pub unsafe fn semihost_command(_command: usize, _arg0: usize, _arg1: usize) -> usize { - unimplemented!() -} - -/// Print a readable string for an mcause reason. -pub unsafe fn print_mcause(mcval: csr::mcause::Trap, writer: &mut dyn Write) { - match mcval { - csr::mcause::Trap::Interrupt(interrupt) => match interrupt { - csr::mcause::Interrupt::UserSoft => { - let _ = writer.write_fmt(format_args!("User software interrupt")); - } - csr::mcause::Interrupt::SupervisorSoft => { - let _ = writer.write_fmt(format_args!("Supervisor software interrupt")); - } - csr::mcause::Interrupt::MachineSoft => { - let _ = writer.write_fmt(format_args!("Machine software interrupt")); - } - csr::mcause::Interrupt::UserTimer => { - let _ = writer.write_fmt(format_args!("User timer interrupt")); - } - csr::mcause::Interrupt::SupervisorTimer => { - let _ = writer.write_fmt(format_args!("Supervisor timer interrupt")); - } - csr::mcause::Interrupt::MachineTimer => { - let _ = writer.write_fmt(format_args!("Machine timer interrupt")); - } - csr::mcause::Interrupt::UserExternal => { - let _ = writer.write_fmt(format_args!("User external interrupt")); - } - csr::mcause::Interrupt::SupervisorExternal => { - let _ = writer.write_fmt(format_args!("Supervisor external interrupt")); - } - csr::mcause::Interrupt::MachineExternal => { - let _ = writer.write_fmt(format_args!("Machine external interrupt")); - } - csr::mcause::Interrupt::Unknown => { - let _ = writer.write_fmt(format_args!("Reserved/Unknown")); - } - }, - csr::mcause::Trap::Exception(exception) => match exception { - csr::mcause::Exception::InstructionMisaligned => { - let _ = writer.write_fmt(format_args!("Instruction access misaligned")); - } - csr::mcause::Exception::InstructionFault => { - let _ = writer.write_fmt(format_args!("Instruction access fault")); - } - csr::mcause::Exception::IllegalInstruction => { - let _ = writer.write_fmt(format_args!("Illegal instruction")); - } - csr::mcause::Exception::Breakpoint => { - let _ = writer.write_fmt(format_args!("Breakpoint")); - } - csr::mcause::Exception::LoadMisaligned => { - let _ = writer.write_fmt(format_args!("Load address misaligned")); - } - csr::mcause::Exception::LoadFault => { - let _ = writer.write_fmt(format_args!("Load access fault")); - } - csr::mcause::Exception::StoreMisaligned => { - let _ = writer.write_fmt(format_args!("Store/AMO address misaligned")); - } - csr::mcause::Exception::StoreFault => { - let _ = writer.write_fmt(format_args!("Store/AMO access fault")); - } - csr::mcause::Exception::UserEnvCall => { - let _ = writer.write_fmt(format_args!("Environment call from U-mode")); - } - csr::mcause::Exception::SupervisorEnvCall => { - let _ = writer.write_fmt(format_args!("Environment call from S-mode")); - } - csr::mcause::Exception::MachineEnvCall => { - let _ = writer.write_fmt(format_args!("Environment call from M-mode")); - } - csr::mcause::Exception::InstructionPageFault => { - let _ = writer.write_fmt(format_args!("Instruction page fault")); - } - csr::mcause::Exception::LoadPageFault => { - let _ = writer.write_fmt(format_args!("Load page fault")); - } - csr::mcause::Exception::StorePageFault => { - let _ = writer.write_fmt(format_args!("Store/AMO page fault")); - } - csr::mcause::Exception::Unknown => { - let _ = writer.write_fmt(format_args!("Reserved")); - } - }, - } -} - -/// Prints out RISCV machine state, including basic system registers -/// (mcause, mstatus, mtvec, mepc, mtval, interrupt status). -pub unsafe fn print_riscv_state(writer: &mut dyn Write) { - let mcval: csr::mcause::Trap = core::convert::From::from(csr::CSR.mcause.extract()); - let _ = writer.write_fmt(format_args!("\r\n---| RISC-V Machine State |---\r\n")); - let _ = writer.write_fmt(format_args!("Last cause (mcause): ")); - print_mcause(mcval, writer); - let interrupt = csr::CSR.mcause.read(csr::mcause::mcause::is_interrupt); - let code = csr::CSR.mcause.read(csr::mcause::mcause::reason); - let _ = writer.write_fmt(format_args!( - " (interrupt={}, exception code={:#010X})", - interrupt, code - )); - let _ = writer.write_fmt(format_args!( - "\r\nLast value (mtval): {:#010X}\ - \r\n\ - \r\nSystem register dump:\ - \r\n mepc: {:#010X} mstatus: {:#010X}\ - \r\n mcycle: {:#010X} minstret: {:#010X}\ - \r\n mtvec: {:#010X}", - csr::CSR.mtval.get(), - csr::CSR.mepc.get(), - csr::CSR.mstatus.get(), - csr::CSR.mcycle.get(), - csr::CSR.minstret.get(), - csr::CSR.mtvec.get() - )); - let mstatus = csr::CSR.mstatus.extract(); - let uie = mstatus.is_set(csr::mstatus::mstatus::uie); - let sie = mstatus.is_set(csr::mstatus::mstatus::sie); - let mie = mstatus.is_set(csr::mstatus::mstatus::mie); - let upie = mstatus.is_set(csr::mstatus::mstatus::upie); - let spie = mstatus.is_set(csr::mstatus::mstatus::spie); - let mpie = mstatus.is_set(csr::mstatus::mstatus::mpie); - let spp = mstatus.is_set(csr::mstatus::mstatus::spp); - let _ = writer.write_fmt(format_args!( - "\r\n mstatus: {:#010X}\ - \r\n uie: {:5} upie: {}\ - \r\n sie: {:5} spie: {}\ - \r\n mie: {:5} mpie: {}\ - \r\n spp: {}", - mstatus.get(), - uie, - upie, - sie, - spie, - mie, - mpie, - spp - )); - let e_usoft = csr::CSR.mie.is_set(csr::mie::mie::usoft); - let e_ssoft = csr::CSR.mie.is_set(csr::mie::mie::ssoft); - let e_msoft = csr::CSR.mie.is_set(csr::mie::mie::msoft); - let e_utimer = csr::CSR.mie.is_set(csr::mie::mie::utimer); - let e_stimer = csr::CSR.mie.is_set(csr::mie::mie::stimer); - let e_mtimer = csr::CSR.mie.is_set(csr::mie::mie::mtimer); - let e_uext = csr::CSR.mie.is_set(csr::mie::mie::uext); - let e_sext = csr::CSR.mie.is_set(csr::mie::mie::sext); - let e_mext = csr::CSR.mie.is_set(csr::mie::mie::mext); - - let p_usoft = csr::CSR.mip.is_set(csr::mip::mip::usoft); - let p_ssoft = csr::CSR.mip.is_set(csr::mip::mip::ssoft); - let p_msoft = csr::CSR.mip.is_set(csr::mip::mip::msoft); - let p_utimer = csr::CSR.mip.is_set(csr::mip::mip::utimer); - let p_stimer = csr::CSR.mip.is_set(csr::mip::mip::stimer); - let p_mtimer = csr::CSR.mip.is_set(csr::mip::mip::mtimer); - let p_uext = csr::CSR.mip.is_set(csr::mip::mip::uext); - let p_sext = csr::CSR.mip.is_set(csr::mip::mip::sext); - let p_mext = csr::CSR.mip.is_set(csr::mip::mip::mext); - let _ = writer.write_fmt(format_args!( - "\r\n mie: {:#010X} mip: {:#010X}\ - \r\n usoft: {:6} {:6}\ - \r\n ssoft: {:6} {:6}\ - \r\n msoft: {:6} {:6}\ - \r\n utimer: {:6} {:6}\ - \r\n stimer: {:6} {:6}\ - \r\n mtimer: {:6} {:6}\ - \r\n uext: {:6} {:6}\ - \r\n sext: {:6} {:6}\ - \r\n mext: {:6} {:6}\r\n", - csr::CSR.mie.get(), - csr::CSR.mip.get(), - e_usoft, - p_usoft, - e_ssoft, - p_ssoft, - e_msoft, - p_msoft, - e_utimer, - p_utimer, - e_stimer, - p_stimer, - e_mtimer, - p_mtimer, - e_uext, - p_uext, - e_sext, - p_sext, - e_mext, - p_mext - )); -} +pub use riscv::pmp; +pub use riscv::print_riscv_state; +pub use riscv::support; +pub use riscv::syscall; +pub use riscv::PermissionMode; diff --git a/arch/rv32i/src/machine_timer.rs b/arch/rv32i/src/machine_timer.rs index 1f649b7e1..7a7b6b401 100644 --- a/arch/rv32i/src/machine_timer.rs +++ b/arch/rv32i/src/machine_timer.rs @@ -1,29 +1,28 @@ //! RISC-V Generic Machine Timer +use core::marker::PhantomData; +use core::ops::Deref; use kernel::hil::time::{Ticks, Ticks64}; use kernel::utilities::registers::interfaces::{Readable, Writeable}; use kernel::utilities::registers::ReadWrite; use kernel::ErrorCode; -pub struct MachineTimer<'a> { - compare_low: &'a ReadWrite, - compare_high: &'a ReadWrite, - value_low: &'a ReadWrite, - value_high: &'a ReadWrite, +pub struct MachineTimer<'a, T: 'a + Deref> = &'a ReadWrite> { + compare_low: T, + compare_high: T, + value_low: T, + value_high: T, + p: PhantomData<&'a ReadWrite>, } -impl<'a> MachineTimer<'a> { - pub const fn new( - compare_low: &'a ReadWrite, - compare_high: &'a ReadWrite, - value_low: &'a ReadWrite, - value_high: &'a ReadWrite, - ) -> Self { +impl<'a, T: 'a + Deref>> MachineTimer<'a, T> { + pub const fn new(compare_low: T, compare_high: T, value_low: T, value_high: T) -> Self { MachineTimer { compare_low, compare_high, value_low, value_high, + p: PhantomData, } } diff --git a/boards/acd52832/src/main.rs b/boards/acd52832/src/main.rs index a682bee7f..2174fdea5 100644 --- a/boards/acd52832/src/main.rs +++ b/boards/acd52832/src/main.rs @@ -49,8 +49,7 @@ const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::Panic // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 4; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); /// Dummy buffer that causes the linker to reserve enough space for the stack. #[no_mangle] @@ -595,7 +594,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/arty_e21/src/main.rs b/boards/arty_e21/src/main.rs index ac924d72d..b11790ef3 100644 --- a/boards/arty_e21/src/main.rs +++ b/boards/arty_e21/src/main.rs @@ -29,8 +29,7 @@ const NUM_PROCS: usize = 4; const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::PanicFaultPolicy {}; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None, None, None, None]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Reference to the chip for panic dumps. static mut CHIP: Option<&'static arty_e21_chip::chip::ArtyExx> = None; @@ -286,7 +285,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/clue_nrf52840/src/main.rs b/boards/clue_nrf52840/src/main.rs index 08f479909..1f6ce60d0 100644 --- a/boards/clue_nrf52840/src/main.rs +++ b/boards/clue_nrf52840/src/main.rs @@ -105,8 +105,7 @@ const FAULT_RESPONSE: kernel::process::StopWithDebugFaultPolicy = // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 8; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static nrf52840::chip::NRF52> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -791,7 +790,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/components/src/alarm.rs b/boards/components/src/alarm.rs index 9a113ebc2..045396f73 100644 --- a/boards/components/src/alarm.rs +++ b/boards/components/src/alarm.rs @@ -18,15 +18,16 @@ // Author: Philip Levis // Last modified: 12/21/2019 +use core::marker::PhantomData; use core::mem::MaybeUninit; use capsules::alarm::AlarmDriver; use capsules::virtual_alarm::{MuxAlarm, VirtualMuxAlarm}; -use kernel::capabilities; use kernel::component::Component; use kernel::create_capability; use kernel::hil::time::{self, Alarm}; use kernel::static_init_half; +use kernel::{capabilities, simple_static_component}; // Setup static space for the objects. #[macro_export] @@ -129,3 +130,43 @@ impl> Component for AlarmDriverComponent { alarm } } + +simple_static_component!(impl<{A: 'static + time::Alarm<'static>}> for AlarmMuxComponent::, + Output = MuxAlarm<'static, A>, + NewInput = &'static A, + FinInput = &'static A, + | _slf, input | {MuxAlarm::new(input)}, + | slf, input | {input.set_alarm_client(slf)} +); + +pub struct AlarmMuxClient>(PhantomData); + +simple_static_component!(impl<{A: 'static + time::Alarm<'static>}> for AlarmMuxClient::, + Output = VirtualMuxAlarm<'static, A>, + NewInput = &'static MuxAlarm<'static, A>, + FinInput = (), + | _slf, input | {VirtualMuxAlarm::new(input)}, + | slf, _input | {slf.setup();} +); + +simple_static_component!(impl<{A: 'static + time::Alarm<'static>}> for AlarmDriverComponent::, + Inherit = AlarmMuxClient, + Output = AlarmDriver<'static, VirtualMuxAlarm<'static, A>>, + NewInput = (&'static MuxAlarm<'static, A>, capsules::alarm::AlarmGrant), + FinInput = (), + | _slf, input, supe | super{input.0} {AlarmDriver::new(supe, input.1)}, + | slf, _input, supe | super{()} {supe.set_alarm_client(slf)} +); + +pub struct VirtualSchedulerTimerComponent>(PhantomData); + +use kernel::platform::scheduler_timer::VirtualSchedulerTimer; + +simple_static_component!(impl<{A: 'static + time::Alarm<'static>}> for VirtualSchedulerTimerComponent::, + Inherit = AlarmMuxClient, + Output = VirtualSchedulerTimer>, + NewInput = &'static MuxAlarm<'static, A>, + FinInput = (), + | _slf, input, supe | super{input} {VirtualSchedulerTimer::new(supe)}, + | _slf, _input, _supe | super{()} {} +); diff --git a/boards/components/src/console.rs b/boards/components/src/console.rs index 94f425d1f..b896b7ccd 100644 --- a/boards/components/src/console.rs +++ b/boards/components/src/console.rs @@ -21,16 +21,17 @@ use capsules::console; use capsules::virtual_uart::{MuxUart, UartDevice}; -use kernel::capabilities; use kernel::component::Component; use kernel::create_capability; -use kernel::dynamic_deferred_call::DynamicDeferredCall; +use kernel::dynamic_deferred_call::{DynamicDeferredCall, ProtoDynamicDeferredCall}; use kernel::hil; use kernel::hil::uart; use kernel::static_init; use kernel::utilities::static_init::StaticUninitializedBuffer; +use kernel::{capabilities, simple_static_component}; -use capsules::console::DEFAULT_BUF_SIZE; +use capsules::console::{Console, DEFAULT_BUF_SIZE}; +use kernel::hil::uart::{Receive, Transmit}; pub struct UartMuxComponent { uart: &'static dyn uart::Uart<'static>, @@ -64,6 +65,7 @@ impl Component for UartMuxComponent { &mut capsules::virtual_uart::RX_BUF, self.baud_rate, self.deferred_caller, + None, ) ); uart_mux.initialize_callback_handle( @@ -144,3 +146,42 @@ impl Component for ConsoleComponent { console } } + +pub struct UartMuxClientComponent(); + +simple_static_component!(impl for UartMuxClientComponent, + Output = UartDevice<'static>, + NewInput = (&'static MuxUart<'static>, bool), + FinInput = (), + |_slf, input| {UartDevice::new(input.0, input.1)}, + |slf, _input| {slf.setup()} +); + +simple_static_component!(impl for ConsoleComponent, + Inherit = UartMuxClientComponent, + Output = Console<'static>, + BUFFER_BYTES = 2 * DEFAULT_BUF_SIZE, + NewInput = (&'static MuxUart<'static>, capsules::console::ConsoleGrant), + FinInput = (), + |_slf, input, buf, supe | super{(input.0, true)} { + let (b1, b2) : (&mut [u8; DEFAULT_BUF_SIZE], &mut [u8; DEFAULT_BUF_SIZE]) = kernel::component::split_array_mut::(buf); + Console::new(supe, b1, b2, input.1) + }, + |slf, _input, supe | super{()} { + supe.set_transmit_client(slf); + supe.set_receive_client(slf); + } +); + +simple_static_component!(impl for UartMuxComponent, + Output = MuxUart<'static>, + BUFFER_BYTES = capsules::virtual_uart::RX_BUF_LEN, + NewInput = (&'static dyn uart::Uart<'static>, u32, &'static DynamicDeferredCall, &'a mut ProtoDynamicDeferredCall), + FinInput = &'static dyn uart::Uart<'static>, + |slf, input, buf | {MuxUart::new(input.0, buf, input.1, input.2, input.3.register(slf))}, + |slf, input | { + slf.initialize(); + input.set_transmit_client(slf); + input.set_receive_client(slf); + } +); diff --git a/boards/components/src/debug_writer.rs b/boards/components/src/debug_writer.rs index 5665ff0ab..82f49ea40 100644 --- a/boards/components/src/debug_writer.rs +++ b/boards/components/src/debug_writer.rs @@ -21,9 +21,11 @@ use capsules::virtual_uart::{MuxUart, UartDevice}; use kernel::capabilities; use kernel::collections::ring_buffer::RingBuffer; -use kernel::component::Component; +use kernel::component::{Component, StaticComponent, StaticComponentFinalize}; +use kernel::debug::{DebugWriter, DebugWriterWrapper}; use kernel::hil; use kernel::hil::uart; +use kernel::hil::uart::Transmit; use kernel::static_init; // The sum of the output_buf and internal_buf is set to a multiple of 1024 bytes in order to avoid excessive @@ -31,11 +33,11 @@ use kernel::static_init; // least a 1 KiB boundary). This is not _semantically_ critical, but helps keep buffers on 1 KiB // boundaries in some cases. Of course, these definitions are only advisory, and individual boards // can choose to pass in their own buffers with different lengths. -const DEBUG_BUFFER_KBYTE: usize = 1; +pub const DEBUG_BUFFER_KBYTE: usize = 1; // Bytes [0, DEBUG_BUFFER_SPLIT) are used for output_buf while bytes // [DEBUG_BUFFER_SPLIT, DEBUG_BUFFER_KBYTE * 1024) are used for internal_buf. -const DEBUG_BUFFER_SPLIT: usize = 64; +pub const DEBUG_BUFFER_SPLIT: usize = 64; pub struct DebugWriterComponent { uart_mux: &'static MuxUart<'static>, @@ -125,3 +127,87 @@ impl + uart::Transmit<'static> + 'static> Component }); } } + +impl StaticComponentFinalize for DebugWriterComponent { + type FinaliseInput = (); + + fn component_finalize( + slf: &'static Self::Output, + state: &'static Self::StaticState, + _input: Self::FinaliseInput, + ) { + state.setup(); + state.set_transmit_client(slf); + } +} + +impl const StaticComponent for DebugWriterComponent { + type Output = DebugWriter; + type StaticState = UartDevice<'static>; + type StaticStateMut = (RingBuffer<'static, u8>, DebugWriterWrapper); + + type BufferBytes = [u8; 1024 * DEBUG_BUFFER_KBYTE]; + + type NewInput<'a> = &'static MuxUart<'static>; + + fn component_new<'a>( + slf: &'static Self::Output, + state: &'static Self::StaticState, + state_mut: &'static mut Self::StaticStateMut, + buffer: &'static mut Self::BufferBytes, + input: Self::NewInput<'a>, + ) -> (Self::Output, Self::StaticState, Self::StaticStateMut) { + let (output_buf, internal_buf) = buffer.split_at_mut(DEBUG_BUFFER_SPLIT); + let debug_writer = DebugWriter::new(state, output_buf, &mut state_mut.0); + ( + debug_writer, + UartDevice::new(input, false), + (RingBuffer::new(internal_buf), DebugWriterWrapper::new(slf)), + ) + } +} + +// A version of the debug writer component without a uart device mux. +pub struct LegacyDebugWriterComponent(); + +impl const StaticComponent for LegacyDebugWriterComponent { + type Output = DebugWriter; + type StaticState = DebugWriterWrapper; + type StaticStateMut = RingBuffer<'static, u8>; + type BufferBytes = [u8; 1024 * DEBUG_BUFFER_KBYTE]; + + type NewInput<'a> = &'static dyn hil::uart::Transmit<'static>; + + fn component_new<'a>( + slf: &'static Self::Output, + _state: &'static Self::StaticState, + state_mut: &'static mut Self::StaticStateMut, + buffer: &'static mut Self::BufferBytes, + input: Self::NewInput<'a>, + ) -> (Self::Output, Self::StaticState, Self::StaticStateMut) { + let (output_buf, internal_buf) = buffer.split_at_mut(DEBUG_BUFFER_SPLIT); + let debug_writer = DebugWriter::new(input, output_buf, state_mut); + ( + debug_writer, + DebugWriterWrapper::new(slf), + RingBuffer::new(internal_buf), + ) + } +} + +impl StaticComponentFinalize for LegacyDebugWriterComponent { + type FinaliseInput = (); + + fn component_finalize( + _slf: &'static Self::Output, + state: &'static Self::StaticState, + _input: Self::FinaliseInput, + ) { + // Safety: nothing is unsafe about set_debug_writer_wrapper afaict. If it turns out + // calling this more than once is unsafe, we can always add a singleton check to the + // constructor for DebugWriterWrapper. + unsafe { + kernel::debug::set_debug_writer_wrapper(state); + } + } +} diff --git a/boards/components/src/lib.rs b/boards/components/src/lib.rs index 120403652..785888773 100644 --- a/boards/components/src/lib.rs +++ b/boards/components/src/lib.rs @@ -1,3 +1,9 @@ +#![feature(const_trait_impl)] +#![feature(const_mut_refs)] +#![feature(const_slice_split_at_mut)] +#![feature(maybe_uninit_array_assume_init)] +#![feature(const_maybe_uninit_array_assume_init)] +#![feature(macro_metavar_expr)] #![no_std] pub mod adc; diff --git a/boards/components/src/lldb.rs b/boards/components/src/lldb.rs index 2e6e8d858..5d8cc146f 100644 --- a/boards/components/src/lldb.rs +++ b/boards/components/src/lldb.rs @@ -14,12 +14,15 @@ // Author: Amit Levy // Last modified: 12/04/2019 +use crate::console::UartMuxClientComponent; use capsules::low_level_debug; +use capsules::low_level_debug::{LowLevelDebug, LowLevelDebugZero}; use capsules::virtual_uart::{MuxUart, UartDevice}; use kernel::capabilities; use kernel::component::Component; use kernel::create_capability; use kernel::hil; +use kernel::hil::uart::Transmit; use kernel::static_init; pub struct LowLevelDebugComponent { @@ -68,3 +71,29 @@ impl Component for LowLevelDebugComponent { lldb } } + +// This version of the LLDB uses the legacy interface +kernel::simple_static_component!(impl for LowLevelDebugComponent, + Inherit = UartMuxClientComponent, + Output = LowLevelDebug<'static, UartDevice<'static>>, + BUFFER_BYTES = low_level_debug::BUF_LEN, + NewInput = (&'static MuxUart<'static>, low_level_debug::GrantType), + FinInput = (), + |_slf, input, buf, supe | super{(input.0, true)} { + LowLevelDebug::new(buf, supe, input.1) + }, + |slf, _input, supe | super{()} { + supe.set_transmit_client(slf); + } +); + +pub struct LowLevelDebugZeroComponent(); + +kernel::simple_static_component!(impl for LowLevelDebugZeroComponent, + Output = LowLevelDebugZero, + BUFFER_BYTES = low_level_debug::BUF_LEN, + NewInput = low_level_debug::GrantType, + FinInput = (), + |_slf, input, buf | { LowLevelDebugZero::new(buf, input)}, + |_slf, _input | {} +); diff --git a/boards/components/src/sched/cooperative.rs b/boards/components/src/sched/cooperative.rs index be01f2189..7293e7b10 100644 --- a/boards/components/src/sched/cooperative.rs +++ b/boards/components/src/sched/cooperative.rs @@ -13,9 +13,8 @@ use core::mem::MaybeUninit; use kernel::component::Component; -use kernel::process::Process; use kernel::scheduler::cooperative::{CoopProcessNode, CooperativeSched}; -use kernel::{static_init, static_init_half}; +use kernel::{static_init, static_init_half, ProcEntry}; #[macro_export] macro_rules! coop_component_helper { @@ -30,11 +29,11 @@ macro_rules! coop_component_helper { } pub struct CooperativeComponent { - processes: &'static [Option<&'static dyn Process>], + processes: &'static [ProcEntry], } impl CooperativeComponent { - pub fn new(processes: &'static [Option<&'static dyn Process>]) -> CooperativeComponent { + pub fn new(processes: &'static [ProcEntry]) -> CooperativeComponent { CooperativeComponent { processes } } } @@ -50,7 +49,7 @@ impl Component for CooperativeComponent { let init_node = static_init_half!( node, CoopProcessNode<'static>, - CoopProcessNode::new(&self.processes[i]) + CoopProcessNode::new(&self.processes[i].proc_ref) ); scheduler.processes.push_head(init_node); } diff --git a/boards/components/src/sched/mlfq.rs b/boards/components/src/sched/mlfq.rs index aba4a7036..052d525ea 100644 --- a/boards/components/src/sched/mlfq.rs +++ b/boards/components/src/sched/mlfq.rs @@ -10,9 +10,8 @@ use core::mem::MaybeUninit; use capsules::virtual_alarm::{MuxAlarm, VirtualMuxAlarm}; use kernel::component::Component; use kernel::hil::time; -use kernel::process::Process; use kernel::scheduler::mlfq::{MLFQProcessNode, MLFQSched}; -use kernel::static_init_half; +use kernel::{static_init_half, ProcEntry}; #[macro_export] macro_rules! mlfq_component_helper { @@ -31,13 +30,13 @@ macro_rules! mlfq_component_helper { pub struct MLFQComponent> { alarm_mux: &'static MuxAlarm<'static, A>, - processes: &'static [Option<&'static dyn Process>], + processes: &'static [ProcEntry], } impl> MLFQComponent { pub fn new( alarm_mux: &'static MuxAlarm<'static, A>, - processes: &'static [Option<&'static dyn Process>], + processes: &'static [ProcEntry], ) -> MLFQComponent { MLFQComponent { alarm_mux, @@ -72,7 +71,7 @@ impl> Component for MLFQComponent { let init_node = static_init_half!( node, MLFQProcessNode<'static>, - MLFQProcessNode::new(&self.processes[i]) + MLFQProcessNode::new(&self.processes[i].proc_ref) ); scheduler.processes[0].push_head(init_node); } diff --git a/boards/components/src/sched/round_robin.rs b/boards/components/src/sched/round_robin.rs index c29412c7d..4fefe0d1a 100644 --- a/boards/components/src/sched/round_robin.rs +++ b/boards/components/src/sched/round_robin.rs @@ -13,10 +13,11 @@ // Last modified: 03/31/2020 use core::mem::MaybeUninit; +use kernel::collections::list::ListLink; use kernel::component::Component; -use kernel::process::Process; +use kernel::new_const_array; use kernel::scheduler::round_robin::{RoundRobinProcessNode, RoundRobinSched}; -use kernel::{static_init, static_init_half}; +use kernel::{simple_static_component, static_init, static_init_half, ProcEntry}; #[macro_export] macro_rules! rr_component_helper { @@ -31,11 +32,11 @@ macro_rules! rr_component_helper { } pub struct RoundRobinComponent { - processes: &'static [Option<&'static dyn Process>], + processes: &'static [ProcEntry], } impl RoundRobinComponent { - pub fn new(processes: &'static [Option<&'static dyn Process>]) -> RoundRobinComponent { + pub fn new(processes: &'static [ProcEntry]) -> RoundRobinComponent { RoundRobinComponent { processes } } } @@ -51,10 +52,36 @@ impl Component for RoundRobinComponent { let init_node = static_init_half!( node, RoundRobinProcessNode<'static>, - RoundRobinProcessNode::new(&self.processes[i]) + RoundRobinProcessNode::new(&self.processes[i].proc_ref) ); scheduler.processes.push_head(init_node); } scheduler } } + +pub struct RoundRobinSchedWithQueue { + sched: RoundRobinSched<'static>, + queue: [RoundRobinProcessNode<'static>; N_PROCS], +} + +impl RoundRobinSchedWithQueue { + pub fn get_sched(&self) -> &RoundRobinSched<'static> { + &self.sched + } +} + +pub struct StaticRoundRobinComponent(); + +simple_static_component!(impl<{const N_PROCS : usize}> for StaticRoundRobinComponent, + Output = RoundRobinSchedWithQueue, + NewInput = &'static [ProcEntry], + FinInput = (), + | slf, input | { + let (hd, proc_list) = new_const_array!([RoundRobinProcessNode<'static>; N_PROCS], ListLink::empty(), | link, i | { + (RoundRobinProcessNode::new_with_next(&input[i].proc_ref, link), ListLink::new(&slf.queue[i])) + }); + RoundRobinSchedWithQueue{ sched :RoundRobinSched::new_with_head(hd), queue : proc_list } + }, + | _slf, _input | {} +); diff --git a/boards/esp32-c3-devkitM-1/src/main.rs b/boards/esp32-c3-devkitM-1/src/main.rs index 57a1868f6..1f1d11826 100644 --- a/boards/esp32-c3-devkitM-1/src/main.rs +++ b/boards/esp32-c3-devkitM-1/src/main.rs @@ -30,8 +30,7 @@ const NUM_PROCS: usize = 4; // // Actual memory for holding the active process structures. Need an empty list // at least. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Reference to the chip for panic dumps. static mut CHIP: Option<&'static esp32_c3::chip::Esp32C3> = None; @@ -307,7 +306,6 @@ unsafe fn setup() -> ( &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/hail/src/main.rs b/boards/hail/src/main.rs index efd29cfc5..0c4095136 100644 --- a/boards/hail/src/main.rs +++ b/boards/hail/src/main.rs @@ -39,8 +39,7 @@ mod test_take_map_cell; const NUM_PROCS: usize = 20; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static sam4l::chip::Sam4l> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -579,7 +578,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, fault_policy, &process_management_capability, ) diff --git a/boards/hifive1/src/main.rs b/boards/hifive1/src/main.rs index e52d399ec..71b0e8d30 100644 --- a/boards/hifive1/src/main.rs +++ b/boards/hifive1/src/main.rs @@ -29,8 +29,7 @@ pub const NUM_PROCS: usize = 4; // // Actual memory for holding the active process structures. Need an empty list // at least. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Reference to the chip for panic dumps. static mut CHIP: Option<&'static e310x::chip::E310x> = None; @@ -296,7 +295,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/imix/src/main.rs b/boards/imix/src/main.rs index 8d547dde3..7e466e567 100644 --- a/boards/imix/src/main.rs +++ b/boards/imix/src/main.rs @@ -92,8 +92,7 @@ const PAN_ID: u16 = 0xABCD; // how should the kernel respond when a process faults const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::PanicFaultPolicy {}; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static sam4l::chip::Sam4l> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -733,7 +732,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/imxrt1050-evkb/src/main.rs b/boards/imxrt1050-evkb/src/main.rs index a4658a09f..a2ebf77fa 100644 --- a/boards/imxrt1050-evkb/src/main.rs +++ b/boards/imxrt1050-evkb/src/main.rs @@ -44,8 +44,7 @@ pub mod boot_header; const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); type Chip = imxrt1050::chip::Imxrt10xx; static mut CHIP: Option<&'static Chip> = None; @@ -526,7 +525,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/kernel_layout.ld b/boards/kernel_layout.ld index bf6f7bbd6..6b9d83de7 100644 --- a/boards/kernel_layout.ld +++ b/boards/kernel_layout.ld @@ -37,6 +37,11 @@ */ PAGE_SIZE = DEFINED(PAGE_SIZE) ? PAGE_SIZE : 512; +WORD_SIZE = DEFINED(WORD_SIZE) ? WORD_SIZE : 4; + +/* If we have a bootloader (or something like QEMU) then the elf entry point + * might matter */ +ENTRY(_start) SECTIONS { @@ -70,10 +75,13 @@ SECTIONS /* STATIC ELEMENTS FOR TOCK KERNEL */ .text : { - . = ALIGN(4); + . = ALIGN(WORD_SIZE); _textstart = .; /* Symbol expected by some MS build toolchains */ _stext = .; /* First of standard s,e (start/end) pair */ + /* This must come first on platforms that have it */ + KEEP(*(asm_trampolines)); + /* Place vector table at the beginning of ROM. * * The first 16 entries in the ARM vector table are defined by ARM and @@ -135,29 +143,29 @@ SECTIONS - ctors/dtors Symbols used by the C++ runtime for initialization / termination */ - . = ALIGN(4); + . = ALIGN(WORD_SIZE); KEEP(*(.init)) - . = ALIGN(4); + . = ALIGN(WORD_SIZE); __preinit_array_start = .; KEEP (*(.preinit_array)) __preinit_array_end = .; - . = ALIGN(4); + . = ALIGN(WORD_SIZE); __init_array_start = .; KEEP (*(SORT(.init_array.*))) KEEP (*(.init_array)) __init_array_end = .; - . = ALIGN(4); + . = ALIGN(WORD_SIZE); KEEP (*crtbegin.o(.ctors)) KEEP (*(EXCLUDE_FILE (*crtend.o) .ctors)) KEEP (*(SORT(.ctors.*))) KEEP (*crtend.o(.ctors)) - . = ALIGN(4); + . = ALIGN(WORD_SIZE); KEEP(*(.fini)) - . = ALIGN(4); + . = ALIGN(WORD_SIZE); __fini_array_start = .; KEEP (*(.fini_array)) KEEP (*(SORT(.fini_array.*))) @@ -203,12 +211,6 @@ SECTIONS } > rom . = ALIGN(PAGE_SIZE); - /* Mark the end of static elements */ - . = ALIGN(4); - _erodata = .; - _etext = .; - _textend = .; /* alias for _etext expected by some MS toolchains */ - /* Customer configuration is most often located at the end of the rom. It is * conditional, and won't be written if not specified in the board specific @@ -229,7 +231,7 @@ SECTIONS .apps : { /* _sapps symbol used by Tock to look for first application. */ - . = ALIGN(4); + . = ALIGN(WORD_SIZE); _sapps = .; /* Include placeholder bytes in this section so that the linker @@ -239,11 +241,8 @@ SECTIONS * and openocd fails to write it. * * An issue has been submitted https://github.com/raspberrypi/openocd/issues/25 + * These bytes break QEMU which will overwrite flash if this is done. */ - BYTE(0xFF) - BYTE(0xFF) - BYTE(0xFF) - BYTE(0xFF) } > prog /* _eapps symbol used by tock to calculate the length of app flash */ _eapps = _sapps + LENGTH(prog); @@ -264,7 +263,7 @@ SECTIONS */ .relocate : { - . = ALIGN(4); + . = ALIGN(WORD_SIZE); _srelocate = .; /* The Global Pointer is used by the RISC-V architecture to provide @@ -289,10 +288,24 @@ SECTIONS *(.sdata .sdata.* .gnu.linkonce.r.*) *(.data .data.*); - . = ALIGN(4); + . = ALIGN(WORD_SIZE); _erelocate = .; } > ram AT>rom + /* NOTE: sections in data might have high alignment, resulting in padding + * between end of text and the relocatable data. + * Because we assume that relocatable data comes immediately after text, + * data will end up at the wrong address relative to _srelocate if there + * is any. + * This is why we put the calculation of etext here so we can explictly + * make it the start of relocate. + * Arguably, a different symbol would be better but this would be annoying + * for all the startup ASM */ + + /* Mark the end of static elements */ + _erodata = LOADADDR(.relocate); + _etext = LOADADDR(.relocate); + _textend = LOADADDR(.relocate); /* alias for _etext expected by some MS toolchains */ .sram (NOLOAD) : { @@ -306,7 +319,7 @@ SECTIONS * Elements placed in the .bss and .COMMON sections are simply used to * measure amount of memory to zero out. */ - . = ALIGN(4); + . = ALIGN(WORD_SIZE); _szero = .; /* In addition to the traditional .bss section, RISC-V splits out a "small data" section @@ -315,7 +328,7 @@ SECTIONS *(.sbss .sbss.* .bss .bss.*); *(COMMON) - . = ALIGN(4); + . = ALIGN(WORD_SIZE); _ezero = .; diff --git a/boards/litex/arty/src/main.rs b/boards/litex/arty/src/main.rs index 186d2cb2b..6f5df624c 100644 --- a/boards/litex/arty/src/main.rs +++ b/boards/litex/arty/src/main.rs @@ -75,8 +75,7 @@ const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. Need an // empty list at least. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Reference to the chip, led controller, UART hardware, and process printer for // panic dumps. @@ -561,7 +560,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/litex/sim/src/main.rs b/boards/litex/sim/src/main.rs index 21480620a..f74a568f4 100644 --- a/boards/litex/sim/src/main.rs +++ b/boards/litex/sim/src/main.rs @@ -81,8 +81,7 @@ const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. Need an // empty list at least. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Reference to the chip and UART hardware for panic dumps struct LiteXSimPanicReferences { @@ -634,7 +633,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/microbit_v2/src/main.rs b/boards/microbit_v2/src/main.rs index 7faed9663..b6e9fd350 100644 --- a/boards/microbit_v2/src/main.rs +++ b/boards/microbit_v2/src/main.rs @@ -65,8 +65,7 @@ const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::Panic // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 4; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static nrf52833::chip::NRF52> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -701,7 +700,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/msp_exp432p401r/src/main.rs b/boards/msp_exp432p401r/src/main.rs index a5b07b837..6d2fe701d 100644 --- a/boards/msp_exp432p401r/src/main.rs +++ b/boards/msp_exp432p401r/src/main.rs @@ -25,8 +25,7 @@ pub mod io; const NUM_PROCS: usize = 4; /// Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); /// Static reference to chip for panic dumps. static mut CHIP: Option<&'static msp432::chip::Msp432> = @@ -468,7 +467,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/nano33ble/src/main.rs b/boards/nano33ble/src/main.rs index 729a203ef..96c48e2ea 100644 --- a/boards/nano33ble/src/main.rs +++ b/boards/nano33ble/src/main.rs @@ -88,8 +88,7 @@ const FAULT_RESPONSE: kernel::process::StopWithDebugFaultPolicy = const NUM_PROCS: usize = 8; // State for loading and holding applications. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static nrf52840::chip::NRF52> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -694,7 +693,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/nano_rp2040_connect/src/main.rs b/boards/nano_rp2040_connect/src/main.rs index 707b26da5..5519d99fc 100644 --- a/boards/nano_rp2040_connect/src/main.rs +++ b/boards/nano_rp2040_connect/src/main.rs @@ -57,8 +57,7 @@ const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::Panic // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 4; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static Rp2040> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -549,7 +548,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/nordic/nrf52840_dongle/src/main.rs b/boards/nordic/nrf52840_dongle/src/main.rs index 28848c65c..7f81e0211 100644 --- a/boards/nordic/nrf52840_dongle/src/main.rs +++ b/boards/nordic/nrf52840_dongle/src/main.rs @@ -58,8 +58,7 @@ const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::Panic // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 8; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Static reference to chip for panic dumps static mut CHIP: Option<&'static nrf52840::chip::NRF52> = None; @@ -447,7 +446,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/nordic/nrf52840dk/src/main.rs b/boards/nordic/nrf52840dk/src/main.rs index 17e6dbcd4..8f4ce8b4a 100644 --- a/boards/nordic/nrf52840dk/src/main.rs +++ b/boards/nordic/nrf52840dk/src/main.rs @@ -143,8 +143,7 @@ const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::Panic // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 8; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static nrf52840::chip::NRF52> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -717,7 +716,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/nordic/nrf52dk/src/main.rs b/boards/nordic/nrf52dk/src/main.rs index 3319e8045..063abaacc 100644 --- a/boards/nordic/nrf52dk/src/main.rs +++ b/boards/nordic/nrf52dk/src/main.rs @@ -119,7 +119,7 @@ const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::Panic // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 4; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = [None; 4]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Static reference to chip for panic dumps static mut CHIP: Option<&'static nrf52832::chip::NRF52> = None; @@ -479,7 +479,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/nucleo_f429zi/src/main.rs b/boards/nucleo_f429zi/src/main.rs index 630d0c103..818e765da 100644 --- a/boards/nucleo_f429zi/src/main.rs +++ b/boards/nucleo_f429zi/src/main.rs @@ -28,8 +28,7 @@ pub mod io; const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None, None, None, None]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static stm32f429zi::chip::Stm32f4xx> = None; @@ -620,7 +619,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/nucleo_f446re/src/main.rs b/boards/nucleo_f446re/src/main.rs index 8185ccd70..888261388 100644 --- a/boards/nucleo_f446re/src/main.rs +++ b/boards/nucleo_f446re/src/main.rs @@ -32,8 +32,7 @@ mod virtual_uart_rx_test; const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None, None, None, None]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Static reference to chip for panic dumps. static mut CHIP: Option<&'static stm32f446re::chip::Stm32f4xx> = @@ -543,7 +542,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/opentitan/src/main.rs b/boards/opentitan/src/main.rs index 19021d2e3..a0c04e9a1 100644 --- a/boards/opentitan/src/main.rs +++ b/boards/opentitan/src/main.rs @@ -49,7 +49,8 @@ const NUM_PROCS: usize = 4; // // Actual memory for holding the active process structures. Need an empty list // at least. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; 4] = [None; NUM_PROCS]; +static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; 4] = + kernel::Kernel::init_process_array(); // Test access to the peripherals #[cfg(test)] @@ -768,7 +769,6 @@ unsafe fn setup() -> ( &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/pico_explorer_base/src/main.rs b/boards/pico_explorer_base/src/main.rs index 7fcf7fc85..1d275f161 100644 --- a/boards/pico_explorer_base/src/main.rs +++ b/boards/pico_explorer_base/src/main.rs @@ -59,8 +59,7 @@ const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::Panic // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 4; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static Rp2040> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -568,7 +567,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/qemu_cheri_virt/Cargo.toml b/boards/qemu_cheri_virt/Cargo.toml new file mode 100644 index 000000000..cd31cb4be --- /dev/null +++ b/boards/qemu_cheri_virt/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "qemu_cheri_virt" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +components = { path = "../components" } +capsules = { path = "../../capsules" } +kernel = { path = "../../kernel" } +misc = { path = "../../libraries/misc" } +uarts = { path = "../../chips/uarts", features = ["ns16550_u8"]} +riscv = { path = "../../arch/riscv"} +cheri = { path = "../../arch/cheri" } +sifive = { path = "../../chips/sifive" } \ No newline at end of file diff --git a/boards/qemu_cheri_virt/Makefile b/boards/qemu_cheri_virt/Makefile new file mode 100644 index 000000000..8ce3bfbcc --- /dev/null +++ b/boards/qemu_cheri_virt/Makefile @@ -0,0 +1,84 @@ +# Makefile for building qemu CHERI virt board + +# If not provided, assume cheri sdk installed in default location +CHERI_SDK ?= $(abspath ${HOME}/cheri/output/sdk) + +# Default 64-bit +BITS ?= 64 + +# Default cheri on (pure/hybrid controls only userspace) +CHERI ?= pure + +# Other config +export NUM_PROCS ?= 4 +export PLIC_BASE_ADDR ?= 0xc000000 +export BAUD_RATE ?=115200 +export N_REGIONS ?= 4 +export STACK_SIZE ?= 0x2000 +export UART0_BASE_ADDR ?= 0x10000000 +export UART0_IRQ ?=10 + +TARGET=riscv${BITS}imac-unknown-none-cheri-hybrid-elf + +# When using "no_std" (which Tock does), rust builds its own compiler_builtins. +# memcpy is broken using upstream rust based compiler builtins because it +# uses usize, not a pointer type (as well as going out of bounds) +# I have a rewritten version, but can't work out how to add that as +# dependency while not also building the upstream compiler builtins. +# In the mean time, I am linking in the cheribuild baremetal libc which +# will override the weak symbols provided by the rust compiler builtins + +CHERI_LIBC ?= $(CHERI_SDK)/baremetal/baremetal-newlib-riscv$(BITS)-hybrid/riscv$(BITS)-unknown-elf + +EXTRA_FLAGS = -lstatic=c -L$(CHERI_LIBC)/lib + +PLATFORM=qemu_cheri_virt + +RUSTC_FLAGS += \ + -C link-arg=-Tlayout.ld \ + -C linker=${CHERI_SDK}/bin/ld.lld \ + -C linker-flavor=ld.lld \ + -C relocation-model=static \ + -C link-arg=-nmagic \ + -C link-arg=-icf=all \ + $(EXTRA_FLAGS) \ + -Z macro-backtrace \ + +# We do have rustup, we just don't want the Makefile.common to try any of the +# toolchain commands with it because it is a custom one and that always causes +# an error. +NO_RUSTUP=1 +TOOLCHAIN ?= ${CHERI_SDK}/bin/llvm + +include ../Makefile.common + +# Pad to flash size +$(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM)_pad.bin: $(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM).bin + cp $^ $@ + dd if=/dev/null of=$@ bs=1 seek=33554431 count=1 + +$(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM)_app.bin : $(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM).bin $(APP_BIN) + cp $< $@ + dd if=/dev/null of=$@ bs=1 seek=4194304 count=1 + cat $(APP_BIN) >> $@ + dd if=/dev/null of=$@ bs=1 seek=33554431 count=1 + + +invoke_qemu = ${CHERI_SDK}/bin/qemu-system-riscv${BITS}cheri \ + -M virt \ + -cpu any,Xcheri_v9=true \ + -m 10M \ + -bios none \ + -drive if=pflash,file=$(1),format=raw,readonly=on -nographic \ + -D /tmp/qemu.log \ + -d instr,mmu + +# Run just the kernel on QEMU +.PHONY: run +run: $(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM)_pad.bin + $(call invoke_qemu,$^) + +# Run the kernel with a bundled app on QEMU +.PHONY: run_app +run_app: $(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM)_app.bin + $(call invoke_qemu,$^) \ No newline at end of file diff --git a/boards/qemu_cheri_virt/build.rs b/boards/qemu_cheri_virt/build.rs new file mode 100644 index 000000000..3051054fc --- /dev/null +++ b/boards/qemu_cheri_virt/build.rs @@ -0,0 +1,4 @@ +fn main() { + println!("cargo:rerun-if-changed=layout.ld"); + println!("cargo:rerun-if-changed=../kernel_layout.ld"); +} diff --git a/boards/qemu_cheri_virt/layout.ld b/boards/qemu_cheri_virt/layout.ld new file mode 100644 index 000000000..aead65c48 --- /dev/null +++ b/boards/qemu_cheri_virt/layout.ld @@ -0,0 +1,15 @@ +/* The QEMU virt board + */ + + +MEMORY +{ + rom (rx) : ORIGIN = 0x20000000, LENGTH = 0x400000 + prog (rx) : ORIGIN = 0x20400000, LENGTH = 0x4000000-0x400000 + ram (rwx) : ORIGIN = 0x80000000, LENGTH = 0x800000 +} + +MPU_MIN_ALIGN = 1; +WORD_SIZE = 16; + +INCLUDE ../kernel_layout.ld diff --git a/boards/qemu_cheri_virt/src/main.rs b/boards/qemu_cheri_virt/src/main.rs new file mode 100644 index 000000000..979c2d3af --- /dev/null +++ b/boards/qemu_cheri_virt/src/main.rs @@ -0,0 +1,544 @@ +//! Board file for qemu CHERI virt board + +#![no_std] +#![feature(macro_metavar_expr, const_mut_refs, const_trait_impl)] +#![cfg_attr(not(doc), no_main)] + +use capsules::{ + console_zero::{self, Console}, + virtual_alarm::VirtualMuxAlarm, + virtual_uart_zero::mux3, + virtual_uart_zero::mux3::UartMuxComponent, +}; +use cheri::cheri_mpu::CheriMPU; +use components::{ + alarm::{AlarmDriverComponent, AlarmMuxComponent, VirtualSchedulerTimerComponent}, + debug_writer::LegacyDebugWriterComponent, + sched::round_robin::StaticRoundRobinComponent, +}; +use core::fmt::Write; +use core::panic::PanicInfo; +use kernel::{ + capabilities, create_capability, create_static_capability, debug, + debug::IoWrite, + dynamic_deferred_call::ProtoDynamicDeferredCallSized, + hil::{uart, uart::LegacyTransmitComponent, uart::ZeroTransmitLegacyWrapper}, + platform::chip::Chip, + platform::chip::InterruptService, + platform::scheduler_timer::VirtualSchedulerTimer, + platform::KernelResources, + process::ProcessPrinterText, + scheduler::round_robin::RoundRobinSched, + utilities::singleton_checker::SingletonChecker, + utilities::StaticRef, + Kernel, ProtoKernel, +}; +use misc::const_env_int; +use riscv::plic::Plic; +use riscv::plic::PlicRegisters; +use uarts::ns16550::UartRegisters; +use uarts::ns16550::ZeroUartComponent; + +// Config +const_env_int!(PLIC_BASE_ADDR: usize); +const_env_int!(pub NUM_PROCS : usize); +const_env_int!(UART0_BASE_ADDR: usize); +const_env_int!(BAUD_RATE: u32); +const_env_int!(STACK_SIZE: usize); +const_env_int!(pub UART0_IRQ : u32); + +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); + +/// Placeholder buffer that causes the linker to reserve enough space for the stack. +#[no_mangle] +#[link_section = ".stack_buffer"] +pub static mut STACK_MEMORY: [u8; STACK_SIZE] = [0; STACK_SIZE]; + +// How should the kernel respond when a process faults. +const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::PanicFaultPolicy {}; + +pub const UART0_BASE: StaticRef = + unsafe { StaticRef::new(UART0_BASE_ADDR as *const UartRegisters) }; +pub const PLIC_BASE: StaticRef> = + unsafe { StaticRef::new(PLIC_BASE_ADDR as *const PlicRegisters) }; + +const N_IRQS: usize = 128; +const PLIC_REGS: usize = riscv::plic::plic_regs_for_n_irqs(N_IRQS); + +const MAX_DEFERRED_CALLERS: usize = 2; + +// TODO: Clint instead of no timer +type TimerComponant = kernel::component::NoComponent; +type Timer = (); + +kernel::define_components!(structs Components, CState, CStateMut { + // Core kernel things + kernel : Kernel, + mpu : CheriMPU, + process_printer: ProcessPrinterText, + scheduler : StaticRoundRobinComponent::, + scheduler_timer : VirtualSchedulerTimerComponent::, + userspace_kernel_boundary: riscv::syscall::SysCall, + timer : TimerComponant, + // Alarms + alarm_mux : AlarmMuxComponent, + alarm_capsule : AlarmDriverComponent, + // Printing things + uart_and_console : ZeroUartComponent + >>, + // PLIC, also good to come pretty late as it will enable interrupts + plic : Plic, + // Deferred calls. Construct last to make sure they have been registered. + dyn_def : kernel::dynamic_deferred_call::DynamicDeferredCallComponent::, +}); + +/// Root structure +struct Platform { + /// All components + components: Components, + /// Shared state + state: CState, + /// Owned state + state_mut: CStateMut, +} + +create_static_capability!(static PROC_MNG_CAP : MCap = capabilities::ProcessManagementCapability); + +static mut BOARD: Platform = { + // Safety: this is the only time in init we take the reference to the platform + let slf = unsafe { &mut BOARD }; + + // Safety: we never take mut references to processes + let procs = unsafe { &PROCESSES }; + + // Make reference to individual elements, we don't need the very top level one (and it would + // alias with the mutable bit anyway) + let c_state_mut = &mut slf.state_mut; + let c_state = &slf.state; + let componants = &slf.components; + let board_kernel = &componants.kernel; + + // Safety: only one instance of this checker should be made. We do so only at top-level board + // logic. + let mut checker = unsafe { SingletonChecker::new_sized::<100>() }; + let chk = checker.as_unsized(); + + // This builds the kernel object + let (proto, counter) = ProtoKernel::new(chk); + + // This builds the dynamic deferred object + let deferred = ProtoDynamicDeferredCallSized::::new(); + + // Capabilities (that we only need for construction. those that get held are globals.) + let memory_allocation_cap = create_capability!(capabilities::MemoryAllocationCapability); + + // Grants for capsules + kernel::construct_grants!(board_kernel, proto, counter, &memory_allocation_cap, { + alarm_grant : capsules::alarm::DRIVER_NUM, + console_grant : capsules::console::DRIVER_NUM, + lldb_grant : capsules::low_level_debug::DRIVER_NUM, + cheri_grant : capsules::driver::NUM::MMU as usize, + }); + + // This gunk builds a non zero-copy bridge for the debug + let bridge = + ZeroTransmitLegacyWrapper::transmitter_as_legacy(mux3::c(&slf.components.uart_and_console)); + + // Build all components + construct_components!( + let components, component_state, component_state_mut = &slf.components, c_state, c_state_mut, + { + (procs, counter), // kernel + (cheri_grant, board_kernel, &PROC_MNG_CAP), // MPU, + (), // printer + procs, // sched + &slf.components.alarm_mux, // sched_timer + chk, // boundary + (), // timer + &slf.components.timer, // alarm mux + (&slf.components.alarm_mux, alarm_grant), // alarm capsule + ( // uart + ( // mux + console_grant, // console + lldb_grant, // lldb + bridge, // debug + (), // mux + ), + UART0_BASE // uart + ), + PLIC_BASE, // PLIC + (deferred, chk), // dynamic deferred calls + } + ); + + Platform { + components, + state: component_state, + state_mut: component_state_mut, + } +}; + +#[no_mangle] +fn main() { + // only machine mode + unsafe { + riscv::configure_trap_handler(riscv::PermissionMode::Machine); + } + + // If we support a PMP, we must enable an entry, even if using another mechanism + // This is because RISCV defines no entries to fail any mode but M-Mode. + riscv::pmp::pmp_permit_all(); + + let components = unsafe { &BOARD.components }; + let state = unsafe { &BOARD.state }; + let board_kernel = &components.kernel; + + // initialize capabilities + let process_mgmt_cap = create_capability!(capabilities::ProcessManagementCapability); + let main_loop_cap = create_capability!(capabilities::MainLoopCapability); + + finalize_components!(components, state, { + (), // kernel + (), // mpu, + (), // process printer + (), // sched + (), // sched_timer + (), // boundary + (), // timer, + &components.timer, // alarm mux + (), // alarm capsule + ( // uart+clients + ( + (), // console + (), // lldb + (), // debug + (), // mux + ), + uart::Parameters { // uart + baud_rate: BAUD_RATE, + width: uart::Width::Eight, + stop_bits: uart::StopBits::One, + parity: uart::Parity::None, + hw_flow_control: false, + }, + ), + true, // plic, + (), // dyn_def + }); + + debug!("CHERI platform initialization complete."); + + let (flash, mem) = unsafe { kernel::process::get_mems() }; + + let _ = kernel::process::load_processes_advanced( + board_kernel, + components, + flash, + mem, + &FAULT_RESPONSE, + true, + &process_mgmt_cap, + ) + .unwrap_or_else(|err| { + debug!("Error loading processes!"); + debug!("{:?}", err); + None + }); + + debug!("Entering main loop."); + board_kernel.kernel_loop::<_, _, { NUM_PROCS as u8 }>( + components, + components, + None, + &main_loop_cap, + ); +} + +impl kernel::platform::SyscallDriverLookup for Components { + fn with_driver(&self, driver_num: usize, f: F) -> R + where + F: FnOnce(Option<&dyn kernel::syscall::SyscallDriver>) -> R, + { + use capsules::low_level_debug::LowLevelDebugZero; + const MMU_NUM: usize = capsules::driver::NUM::MMU as usize; + f(match driver_num { + capsules::console::DRIVER_NUM => { + Some(Console::get_syscall_driver(mux3::a(&self.uart_and_console))) + } + capsules::alarm::DRIVER_NUM => Some(&self.alarm_capsule), + capsules::low_level_debug::DRIVER_NUM => Some(LowLevelDebugZero::get_syscall_driver( + mux3::b(&self.uart_and_console), + )), + MMU_NUM => Some(&self.mpu), + _ => None, + }) + } +} + +impl kernel::platform::chip::InterruptService<()> for Components { + unsafe fn service_interrupt(&self, interrupt: u32) -> bool { + match interrupt { + UART0_IRQ => self.uart_and_console.handle_interrupt(), + _ => return false, + } + true + } + + unsafe fn service_deferred_call(&self, _: ()) -> bool { + false + } +} + +use kernel::utilities::registers::interfaces::{ReadWriteable, Readable}; +use riscv::csr::mie::mie; +use riscv::csr::{mcause, CSR}; + +impl KernelResources for Components { + type SyscallDriverLookup = Self; + type SyscallFilter = (); + type ProcessFault = (); + type Scheduler = RoundRobinSched<'static>; + type SchedulerTimer = VirtualSchedulerTimer>; + type WatchDog = (); + type ContextSwitchCallback = (); + + fn syscall_driver_lookup(&self) -> &Self::SyscallDriverLookup { + &self + } + fn syscall_filter(&self) -> &Self::SyscallFilter { + &() + } + fn process_fault(&self) -> &Self::ProcessFault { + &() + } + fn scheduler(&self) -> &Self::Scheduler { + self.scheduler.get_sched() + } + fn scheduler_timer(&self) -> &Self::SchedulerTimer { + &self.scheduler_timer + } + fn watchdog(&self) -> &Self::WatchDog { + &() + } + fn context_switch_callback(&self) -> &Self::ContextSwitchCallback { + &() + } +} + +impl Chip for Components { + type MPU = CheriMPU; + + type UserspaceKernelBoundary = riscv::syscall::SysCall; + + fn service_pending_interrupts(&self) { + use riscv::csr::mip::mip; + + while self.has_pending_interrupts() { + let mip = CSR.mip.extract(); + + if mip.is_set(mip::mtimer) { + //self.timer.handle_interrupt(); + } + + if self.plic.has_pending() { + unsafe { + let mut ctr = 0; + while let Some(interrupt) = self.plic.get_saved_interrupts() { + ctr += 1; + if ctr == 100 { + debug!("WARN: Interrupt storm detected. Possibly one of your devices is not acking interrupts properly"); + } + if !self.service_interrupt(interrupt) { + debug!("Unexpected IRQ: {}", interrupt); + self.plic.disable_and_complete(interrupt); + } else { + self.atomic(|| { + self.plic.complete(interrupt); + }); + } + } + } + } + } + + // Re-enable all MIE interrupts that we care about. Since we looped + // until we handled them all, we can re-enable all of them. + CSR.mie.modify(mie::mext::SET + mie::mtimer::SET); + } + + fn has_pending_interrupts(&self) -> bool { + use riscv::csr::mip::mip; + // First check if the global machine timer interrupt is set. + // We would also need to check for additional global interrupt bits + // if there were to be used for anything in the future. + + if CSR.mip.is_set(mip::mtimer) { + return true; + } + + // Then we can check the PLIC. + self.plic.has_pending() + } + + fn mpu(&self) -> &Self::MPU { + &self.mpu + } + + fn userspace_kernel_boundary(&self) -> &riscv::syscall::SysCall { + &self.userspace_kernel_boundary + } + + fn sleep(&self) { + unsafe { + riscv::support::wfi(); + } + } + + unsafe fn atomic(&self, f: F) -> R + where + F: FnOnce() -> R, + { + riscv::support::atomic(f) + } + + unsafe fn print_state(&self, writer: &mut dyn Write) { + unsafe { + riscv::print_riscv_state(writer); + } + } +} + +struct Writer {} + +static mut WRITER: Writer = Writer {}; + +impl Write for Writer { + fn write_str(&mut self, s: &str) -> ::core::fmt::Result { + self.write(s.as_bytes()); + Ok(()) + } +} + +impl IoWrite for Writer { + fn write(&mut self, buf: &[u8]) { + let uart = unsafe { &BOARD.components.uart_and_console }; + uart.transmit_sync(buf); + } +} + +pub fn panic_reset() -> ! { + #[allow(dead_code)] + pub enum SifiveShutdownStatus { + FinisherFail = 0x3333, + FinisherPass = 0x5555, + FinisherReset = 0x7777, + } + + pub fn reset_sifive(status: SifiveShutdownStatus, code: u16) -> ! { + unsafe { + let val: u32 = (status as u32) | ((code as u32) << 16); + let reset_ptr: *mut u32 = 0x100000 as *mut u32; + *reset_ptr = val; + + loop { + riscv::support::wfi() + } + } + } + + reset_sifive(SifiveShutdownStatus::FinisherFail, 1) +} + +/// Panic handler. +#[cfg(not(test))] +#[no_mangle] +#[panic_handler] +pub unsafe extern "C" fn panic_fmt(pi: &PanicInfo) -> ! { + let writer = &mut WRITER; + + debug::panic_print_2( + writer, + pi, + &riscv::support::nop, + &PROCESSES, + Some(&BOARD.components), + Some(&BOARD.components.process_printer), + ); + + panic_reset() +} + +fn handle_exception(exception: mcause::Exception) { + match exception { + mcause::Exception::UserEnvCall | mcause::Exception::SupervisorEnvCall => (), + _ => { + panic!("fatal exception {}", exception as u32); + } + } +} + +unsafe fn handle_interrupt(intr: mcause::Interrupt) { + match intr { + mcause::Interrupt::UserSoft + | mcause::Interrupt::UserTimer + | mcause::Interrupt::UserExternal => { + panic!("unexpected user-mode interrupt"); + } + mcause::Interrupt::SupervisorExternal + | mcause::Interrupt::SupervisorTimer + | mcause::Interrupt::SupervisorSoft => { + panic!("unexpected supervisor-mode interrupt"); + } + + mcause::Interrupt::MachineSoft => { + CSR.mie.modify(mie::msoft::CLEAR); + } + mcause::Interrupt::MachineTimer => { + CSR.mie.modify(mie::mtimer::CLEAR); + } + mcause::Interrupt::MachineExternal => { + // We received an interrupt, disable interrupts while we handle them + CSR.mie.modify(mie::mext::CLEAR); + + // Set to handle later + BOARD.components.plic.save_pending(); + } + + mcause::Interrupt::Unknown => { + panic!("interrupt of unknown cause"); + } + } +} + +/// Trap handler for board/chip specific code. +/// +#[export_name = "_start_trap_rust_from_kernel"] +pub unsafe extern "C" fn start_trap_rust() { + match mcause::Trap::from(CSR.mcause.extract()) { + mcause::Trap::Interrupt(interrupt) => { + handle_interrupt(interrupt); + } + mcause::Trap::Exception(exception) => { + handle_exception(exception); + } + } +} + +/// Function that gets called if an interrupt occurs while an app was running. +/// mcause is passed in, and this function should correctly handle disabling the +/// interrupt that fired so that it does not trigger again. +#[export_name = "_disable_interrupt_trap_rust_from_app"] +pub unsafe extern "C" fn disable_interrupt_trap_handler(mcause_val: usize) { + match mcause::Trap::from(mcause_val as usize) { + mcause::Trap::Interrupt(interrupt) => { + handle_interrupt(interrupt); + } + _ => { + panic!("unexpected non-interrupt mcause: {}", mcause_val); + } + } +} diff --git a/boards/raspberry_pi_pico/src/main.rs b/boards/raspberry_pi_pico/src/main.rs index 73a845adf..34aba68f0 100644 --- a/boards/raspberry_pi_pico/src/main.rs +++ b/boards/raspberry_pi_pico/src/main.rs @@ -62,8 +62,7 @@ const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::Panic // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 4; -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static Rp2040> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -529,7 +528,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/redboard_artemis_nano/src/main.rs b/boards/redboard_artemis_nano/src/main.rs index 7b1d8132c..13b217dea 100644 --- a/boards/redboard_artemis_nano/src/main.rs +++ b/boards/redboard_artemis_nano/src/main.rs @@ -38,7 +38,7 @@ mod tests; const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = [None; 4]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Static reference to chip for panic dumps. static mut CHIP: Option<&'static apollo3::chip::Apollo3> = None; @@ -385,7 +385,6 @@ unsafe fn setup() -> ( &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/redboard_redv/src/main.rs b/boards/redboard_redv/src/main.rs index 4e2174e05..c80ea2c96 100644 --- a/boards/redboard_redv/src/main.rs +++ b/boards/redboard_redv/src/main.rs @@ -29,8 +29,7 @@ pub const NUM_PROCS: usize = 4; // // Actual memory for holding the active process structures. Need an empty list // at least. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Reference to the chip for panic dumps. static mut CHIP: Option<&'static e310x::chip::E310x> = None; @@ -292,7 +291,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/stm32f3discovery/src/main.rs b/boards/stm32f3discovery/src/main.rs index 784e59091..5a1032b2d 100644 --- a/boards/stm32f3discovery/src/main.rs +++ b/boards/stm32f3discovery/src/main.rs @@ -35,8 +35,7 @@ mod virtual_uart_rx_test; const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None, None, None, None]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Static reference to chip for panic dumps. static mut CHIP: Option<&'static stm32f303xc::chip::Stm32f3xx> = None; @@ -828,7 +827,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/stm32f412gdiscovery/src/main.rs b/boards/stm32f412gdiscovery/src/main.rs index 8e9986dc1..40f74134b 100644 --- a/boards/stm32f412gdiscovery/src/main.rs +++ b/boards/stm32f412gdiscovery/src/main.rs @@ -28,8 +28,7 @@ pub mod io; const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None, None, None, None]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static stm32f412g::chip::Stm32f4xx> = None; static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; @@ -817,7 +816,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/stm32f429idiscovery/src/main.rs b/boards/stm32f429idiscovery/src/main.rs index a8c80db64..92d54b2f9 100644 --- a/boards/stm32f429idiscovery/src/main.rs +++ b/boards/stm32f429idiscovery/src/main.rs @@ -28,8 +28,7 @@ pub mod io; const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None, None, None, None]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static stm32f429zi::chip::Stm32f4xx> = None; @@ -626,7 +625,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/swervolf/src/main.rs b/boards/swervolf/src/main.rs index 76152abc5..26cb399c0 100644 --- a/boards/swervolf/src/main.rs +++ b/boards/swervolf/src/main.rs @@ -26,8 +26,7 @@ pub const NUM_PROCS: usize = 4; // // Actual memory for holding the active process structures. Need an empty list // at least. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); // Reference to the chip for panic dumps. static mut CHIP: Option<&'static swervolf_eh1::chip::SweRVolf> = None; @@ -244,7 +243,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) diff --git a/boards/teensy40/src/main.rs b/boards/teensy40/src/main.rs index 199ca6210..db232c598 100644 --- a/boards/teensy40/src/main.rs +++ b/boards/teensy40/src/main.rs @@ -26,8 +26,7 @@ use kernel::{create_capability, static_init}; const NUM_PROCS: usize = 4; /// Actual process memory -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None; NUM_PROCS]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); /// What should we do if a process faults? const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::PanicFaultPolicy {}; @@ -355,7 +354,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/boards/weact_f401ccu6/src/main.rs b/boards/weact_f401ccu6/src/main.rs index 4a4db4114..02d9eb375 100644 --- a/boards/weact_f401ccu6/src/main.rs +++ b/boards/weact_f401ccu6/src/main.rs @@ -27,8 +27,7 @@ pub mod io; const NUM_PROCS: usize = 4; // Actual memory for holding the active process structures. -static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = - [None, None, None, None]; +static mut PROCESSES: kernel::ProcessArray = kernel::Kernel::init_process_array(); static mut CHIP: Option<&'static stm32f401cc::chip::Stm32f4xx> = None; @@ -483,7 +482,6 @@ pub unsafe fn main() { &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), - &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) diff --git a/capsules/Cargo.toml b/capsules/Cargo.toml index 65301bcf1..448b3ea59 100644 --- a/capsules/Cargo.toml +++ b/capsules/Cargo.toml @@ -7,4 +7,5 @@ edition = "2021" [dependencies] kernel = { path = "../kernel" } enum_primitive = { path = "../libraries/enum_primitive" } +misc = { path = "../libraries/misc" } tickv = { path = "../libraries/tickv" } diff --git a/capsules/examples/traitobj_list.rs b/capsules/examples/traitobj_list.rs index 9792a0106..d06b8d083 100644 --- a/capsules/examples/traitobj_list.rs +++ b/capsules/examples/traitobj_list.rs @@ -24,11 +24,11 @@ use kernel::debug; pub trait Funky<'a>: 'a { fn name(&self) -> &'static str; - fn next_funky_thing(&'a self) -> &'a ListLink<'a, dyn Funky<'a>>; + fn next_funky_thing(&self) -> &ListLink<'a, dyn Funky<'a>>; } impl<'a> ListNode<'a, dyn Funky<'a>> for dyn Funky<'a> { - fn next(&'a self) -> &'a ListLink<'a, dyn Funky<'a>> { + fn next(&self) -> &ListLink<'a, dyn Funky<'a>> { &self.next_funky_thing() } } @@ -74,7 +74,7 @@ impl<'a> Funky<'a> for Jazz<'a> { "Jazz" } - fn next_funky_thing(&'a self) -> &'a ListLink<'a, dyn Funky<'a>> { + fn next_funky_thing(&self) -> &ListLink<'a, dyn Funky<'a>> { &self.next } } @@ -96,7 +96,7 @@ impl<'a> Funky<'a> for Cheese<'a> { fn name(&self) -> &'static str { "Cheese" } - fn next_funky_thing(&'a self) -> &'a ListLink<'a, dyn Funky<'a>> { + fn next_funky_thing(&self) -> &ListLink<'a, dyn Funky<'a>> { &self.next } } diff --git a/capsules/src/alarm.rs b/capsules/src/alarm.rs index f22e7ed00..bcc1ae4b1 100644 --- a/capsules/src/alarm.rs +++ b/capsules/src/alarm.rs @@ -34,18 +34,17 @@ impl Default for AlarmData { } } +pub type AlarmGrant = Grant, AllowRoCount<0>, AllowRwCount<0>>; + pub struct AlarmDriver<'a, A: Alarm<'a>> { alarm: &'a A, num_armed: Cell, - app_alarms: Grant, AllowRoCount<0>, AllowRwCount<0>>, + app_alarms: AlarmGrant, next_alarm: Cell, } impl<'a, A: Alarm<'a>> AlarmDriver<'a, A> { - pub const fn new( - alarm: &'a A, - grant: Grant, AllowRoCount<0>, AllowRwCount<0>>, - ) -> AlarmDriver<'a, A> { + pub const fn new(alarm: &'a A, grant: AlarmGrant) -> AlarmDriver<'a, A> { AlarmDriver { alarm: alarm, num_armed: Cell::new(0), diff --git a/capsules/src/console.rs b/capsules/src/console.rs index d470f8fce..7e58c81a1 100644 --- a/capsules/src/console.rs +++ b/capsules/src/console.rs @@ -80,14 +80,16 @@ pub struct App { read_len: usize, } +pub type ConsoleGrant = Grant< + App, + UpcallCount<3>, + AllowRoCount<{ ro_allow::COUNT }>, + AllowRwCount<{ rw_allow::COUNT }>, +>; + pub struct Console<'a> { uart: &'a dyn uart::UartData<'a>, - apps: Grant< - App, - UpcallCount<3>, - AllowRoCount<{ ro_allow::COUNT }>, - AllowRwCount<{ rw_allow::COUNT }>, - >, + apps: ConsoleGrant, tx_in_progress: OptionalCell, tx_buffer: TakeCell<'static, [u8]>, rx_in_progress: OptionalCell, @@ -95,16 +97,11 @@ pub struct Console<'a> { } impl<'a> Console<'a> { - pub fn new( + pub const fn new( uart: &'a dyn uart::UartData<'a>, tx_buffer: &'static mut [u8], rx_buffer: &'static mut [u8], - grant: Grant< - App, - UpcallCount<3>, - AllowRoCount<{ ro_allow::COUNT }>, - AllowRwCount<{ rw_allow::COUNT }>, - >, + grant: ConsoleGrant, ) -> Console<'a> { Console { uart: uart, diff --git a/capsules/src/console_zero.rs b/capsules/src/console_zero.rs new file mode 100644 index 000000000..675639dcf --- /dev/null +++ b/capsules/src/console_zero.rs @@ -0,0 +1,236 @@ +use core::cell::Cell; +use kernel::collections::list::{GenListNode, ListLinkGen, PRefListLink, PRefQueue}; +use kernel::grant::{ARefNoClone, AllowRoCount, AllowRwCount, Grant, LivePRef, PRef, UpcallCount}; +use kernel::hil::uart::{ZeroTransmit, ZeroTransmitClient}; +use kernel::processbuffer::ReadableProcessSlice; +use kernel::syscall::{CommandReturn, SyscallDriver}; +use kernel::upcall::PUpcall; +use kernel::{very_simple_component, ErrorCode, ProcessId}; + +/// Syscall driver number. +use crate::driver; +pub const DRIVER_NUM: usize = driver::NUM::Console as usize; + +/// Ids for read-only allow buffers +mod ro_allow { + pub const WRITE: usize = 1; + /// The number of allow buffers the kernel stores for this grant + pub const COUNT: u8 = 2; +} + +/// Ids for read-write allow buffers +mod rw_allow { + /// The number of allow buffers the kernel stores for this grant + pub const COUNT: u8 = 2; +} + +// Prefer NoClone as it transmutes more freely with owned buffers from other clients that +// might be muxed with this one. +pub type Buffer = ARefNoClone; + +#[derive(Default)] +pub struct App { + // Link for queue + link: PRefListLink, + // Callback + transmit_callback: Cell, + // Data + transmit_data: Cell, +} + +// TODO the intrusive linked list above needs to remove the link when dropped. +// this requires finishing the app closing mechanism, which should +// (a) call a pre-drop method on each grant to let the owner know it is being dropped +// (b) call drop on the type + +impl GenListNode> for App { + fn next(&self) -> &ListLinkGen> { + &self.link + } +} + +type ConsoleGrant = Grant< + App, + UpcallCount<3>, + AllowRoCount<{ ro_allow::COUNT }>, + AllowRwCount<{ rw_allow::COUNT }>, +>; + +pub struct Console { + apps: ConsoleGrant, + transmit_queue: PRefQueue, +} + +impl ZeroTransmitClient for Console { + type Buf = Buffer; + + fn transmit_finish>( + transmitter: &Transmit, + buf: Self::Buf, + res: Result<(), ErrorCode>, + ) { + Console::finish_put(transmitter, buf, res) + } +} + +impl Console { + pub const fn new(grant: ConsoleGrant) -> Self { + Console { + apps: grant, + transmit_queue: PRefQueue::::new(), + } + } + + fn buf_len(buf: Buffer) -> usize { + buf.with_live(|x| match x { + None => 0, + Some(buf) => buf.len(), + }) + } + + fn finish_put>( + transmitter: &Transmit, + buf: Buffer, + result: Result<(), ErrorCode>, + ) { + let slf = transmitter.get_client(); + // Callback on head of list + if let Some(head) = slf.transmit_queue.pop_head() { + if let Some(live) = head.try_into_live() { + let result = match result { + Ok(_) => Self::buf_len(buf), + Err(_) => kernel::errorcode::into_statuscode(result), + }; + // Should we really ignore this? What is meant to happen on this kind of + // pushback? Kernel error? + let _ = live.transmit_callback.get().schedule(result, 0, 0); + } + } + // Peek to see if there is another + if let Some(head) = slf.transmit_queue.peek_head() { + if let Some(live) = head.try_into_live() { + Self::start_print(transmitter, live) + } + } + } + + fn start_print>(transmitter: &Transmit, app: LivePRef) { + let data = app.transmit_data.take(); + match transmitter.transmit(data) { + Ok(Some(data)) => { + // Call finish ourselves + Self::finish_put(transmitter, data, Ok(())) + } + Ok(None) => { + // Nothing to do, handled on callback + } + Err((buf, er)) => Self::finish_put(transmitter, buf, Err(er)), + } + } + + fn do_command>( + transmitter: &Transmit, + cmd_num: usize, + arg1: usize, + _: usize, + appid: ProcessId, + ) -> Result { + let slf = transmitter.get_client(); + let process_grant = slf.apps.get_for(appid)?; + let grant_data = process_grant.get_grant_data(); + let kern_data = process_grant.get_kern_data(); + let app_data = grant_data.get_pref().ok_or(ErrorCode::BUSY)?; + match cmd_num { + 0 => {} + 1 => { + // putstr + if app_data.link.is_some() { + // Already in queue + return Err(ErrorCode::BUSY); + } + + let buf = kern_data.get_readonly_aref(ro_allow::WRITE)?; + + if arg1 != buf.len() { + return Err(ErrorCode::SIZE); + } + + // Store previous allows + app_data.transmit_callback.set(kern_data.get_upcall(1)); + app_data.transmit_data.set(buf.as_noclone().into()); + // Then enqueue + let some = slf.transmit_queue.is_some(); + slf.transmit_queue.push_tail(app_data.into()); + if !some { + // Handle now as nothing else was in the queue + Self::start_print(transmitter, app_data); + } + } + 2 => { + // getnstr + return Err(ErrorCode::NOSUPPORT); + } + 3 => { + // Abort RX + return Err(ErrorCode::NOSUPPORT); + } + _ => return Err(ErrorCode::NOSUPPORT), + } + Ok(CommandReturn::success()) + } + + pub const fn get_syscall_driver>( + t: &Transmitter, + ) -> &ConsoleSyscallDriver { + ConsoleSyscallDriver::get(t) + } +} + +misc::overload_impl!(ConsoleSyscallDriver); + +impl> SyscallDriver for ConsoleSyscallDriver { + /// Setup shared buffers. + /// + /// ### `allow_num` + /// + /// - `1`: Writeable buffer for read buffer + + /// Setup shared buffers. + /// + /// ### `allow_num` + /// + /// - `1`: Readonly buffer for write buffer + + // Setup callbacks. + // + // ### `subscribe_num` + // + // - `1`: Write buffer completed callback + // - `2`: Read buffer completed callback + + /// Initiate serial transfers + /// + /// ### `command_num` + /// + /// - `0`: Driver check. + /// - `1`: Transmits a buffer passed via `allow`, up to the length + /// passed in `arg1` + /// - `2`: Receives into a buffer passed via `allow`, up to the length + /// passed in `arg1` + /// - `3`: Cancel any in progress receives and return (via callback) + /// what has been received so far. + fn command(&self, cmd_num: usize, arg1: usize, arg2: usize, appid: ProcessId) -> CommandReturn { + match Console::do_command(&self.inner, cmd_num, arg1, arg2, appid) { + Ok(command) => command, + Err(er) => CommandReturn::failure(er), + } + } + + fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> { + self.inner.get_client().apps.enter(processid, |_, _| {}) + } +} + +very_simple_component!(impl for Console, + new(ConsoleGrant) +); diff --git a/capsules/src/driver.rs b/capsules/src/driver.rs index f189dfd09..832b26a30 100644 --- a/capsules/src/driver.rs +++ b/capsules/src/driver.rs @@ -21,6 +21,8 @@ pub enum NUM { // Kernel Ipc = 0x10000, + ProcLoader = 0x10002, + MMU = 0x10003, // HW Buses Spi = 0x20001, diff --git a/capsules/src/dyn_proc_loader.rs b/capsules/src/dyn_proc_loader.rs new file mode 100644 index 000000000..dda3f738b --- /dev/null +++ b/capsules/src/dyn_proc_loader.rs @@ -0,0 +1,167 @@ +//! Dynamically loads processes from allowed buffers +//! +//! Setup +//! ----- +//! +//! ``` +//! // Create the loader +//! let loader = capsules::dyn_proc_loader::ProcLoader::new( +//! board_kernel.create_grant( +//! capsules::dyn_proc_loader::DRIVER_NUM, +//! &memory_allocation_cap, +//! ), +//! board_kernel, +//! chip, +//! &FAULT_RESPONSE, +//! &process_mgmt_cap, +//! ); +//! // Then load your initial processes, keeping the returned memory +//! let mem = kernel::process::load_processes_advanced( +//! board_kernel, +//! chip, +//! flash, +//! mem, +//! &FAULT_RESPONSE, +//! true, +//! &process_mgmt_cap, +//! )?; +//! +//! // Then pass to the loader +//! +//! loader.provide_mem(mem); +//! ``` +//! +//! Note, this can only be used with contiguously loaded processes as otherwise they would try +//! execute from memory located inside the loader. +//! +//! +//! Usage (from userspace) +//! The general flow is as follows: +//! User allows a single buffer (slot 0) containing a tbf +//! User registers a callback +//! User calls command 1 to load the process +//! User waits for callback +//! Capsule loads process +//! Capsule notifies user via callback +//! User can un-allow callback and buffer (if desired) + +use crate::driver; +use core::cell::Cell; +use core::ops::Deref; +use kernel::capabilities::ProcessManagementCapability; +use kernel::grant::{AllowRoCount, AllowRwCount, Grant, UpcallCount}; +use kernel::platform::chip::Chip; +use kernel::process::{try_load_process_pub, Error, ProcessFaultPolicy}; +use kernel::syscall::{CommandReturn, CommandReturnResult, SyscallDriver}; +use kernel::{very_simple_component, ErrorCode, Kernel, ProcessId}; + +pub const DRIVER_NUM: usize = driver::NUM::ProcLoader as usize; + +type GrantT = Grant<(), UpcallCount<1>, AllowRoCount<1>, AllowRwCount<1>>; + +pub struct ProcLoader<'a, C: Chip + 'static, const CHECK_VERSION: bool> { + grant: GrantT, + kernel: &'static Kernel, + chip: &'static C, + mem: Cell>, + fault_policy: &'static dyn ProcessFaultPolicy, + capability: &'a dyn ProcessManagementCapability, +} + +very_simple_component!(impl<{C : Chip + 'static, const CHECK_VERSION : bool}> for ProcLoader::<'static, C, CHECK_VERSION>, + new(GrantT, &'static Kernel, &'static C, &'static dyn ProcessFaultPolicy, &'static dyn ProcessManagementCapability) +); + +impl<'a, C: Chip + 'static, const CHECK_VERSION: bool> ProcLoader<'a, C, CHECK_VERSION> { + pub const fn new( + grant: GrantT, + kernel: &'static Kernel, + chip: &'static C, + fault_policy: &'static dyn ProcessFaultPolicy, + capability: &'a dyn ProcessManagementCapability, + ) -> Self { + Self { + grant, + kernel, + chip, + mem: Cell::new(None), + fault_policy, + capability, + } + } + + /// Provide remaining unallocated memory to the loader. + /// Not a part of new as this happens relatively late. + pub fn provide_mem(&self, free_memory: Option<&'static mut [u8]>) { + self.mem.set(free_memory) + } + + /// ### `command_num` + /// - `0` Driver check, return OK() + /// - `1` Load process + fn handle_command( + &self, + command_number: usize, + _arg2: usize, + _arg3: usize, + appid: ProcessId, + ) -> CommandReturnResult { + match command_number { + 0 => Ok(CommandReturn::success()), + 1 => { + let proc_grant = self.grant.get_for(appid)?; + let kern_data = proc_grant.get_kern_data(); + let tbf = kern_data.get_readonly_aref(0)?.as_noclone(); + + // Fail early + if kern_data.could_schedule_upcall(0).is_err() { + return Err(ErrorCode::BUSY); + } + + // Current process loading is really synchronous, but we still adopt the async + // model in case it ever changes + // SAFETY: we only use this temporarily to load the process. + // Still, we should still refactor. + let mut flash = tbf.deref().to_byte_slice(); + let mut mem = self.mem.take(); + let load_result = try_load_process_pub::( + self.kernel, + self.chip, + self.fault_policy, + CHECK_VERSION, + &mut flash, + &mut mem, + self.capability, + ); + self.mem.set(mem); + // Handle errors + load_result?; + + // Otherwise the process should now be loaded, create the upcall + kern_data.schedule_upcall(0, (0, 0, 0)).unwrap(); + + // Return success + Ok(CommandReturn::success()) + } + _ => Err(ErrorCode::NOSUPPORT), + } + } +} + +impl<'a, C: Chip + 'static, const CHECK_VERSION: bool> SyscallDriver + for ProcLoader<'a, C, CHECK_VERSION> +{ + fn command( + &self, + command_num: usize, + r2: usize, + r3: usize, + process_id: ProcessId, + ) -> CommandReturn { + self.handle_command(command_num, r2, r3, process_id).into() + } + + fn allocate_grant(&self, process_id: ProcessId) -> Result<(), Error> { + self.grant.enter(process_id, |_, _| {}) + } +} diff --git a/capsules/src/ieee802154/virtual_mac.rs b/capsules/src/ieee802154/virtual_mac.rs index 7e436ff4a..e249d7296 100644 --- a/capsules/src/ieee802154/virtual_mac.rs +++ b/capsules/src/ieee802154/virtual_mac.rs @@ -223,7 +223,7 @@ impl MacUser<'_> { } impl<'a> ListNode<'a, MacUser<'a>> for MacUser<'a> { - fn next(&'a self) -> &'a ListLink<'a, MacUser<'a>> { + fn next(&self) -> &ListLink<'a, MacUser<'a>> { &self.next } } diff --git a/capsules/src/kv_store.rs b/capsules/src/kv_store.rs index 9de810d25..2e6410e5e 100644 --- a/capsules/src/kv_store.rs +++ b/capsules/src/kv_store.rs @@ -96,7 +96,7 @@ pub struct KVStore<'a, K: KVSystem<'a> + KVSystem<'a, K = T>, T: 'static + kv_sy impl<'a, K: KVSystem<'a, K = T>, T: kv_system::KeyType> ListNode<'a, KVStore<'a, K, T>> for KVStore<'a, K, T> { - fn next(&self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, KVStore<'a, K, T>> { &self.next } } diff --git a/capsules/src/lib.rs b/capsules/src/lib.rs index 4691d74f4..bce98108c 100644 --- a/capsules/src/lib.rs +++ b/capsules/src/lib.rs @@ -1,4 +1,11 @@ +#![feature(const_mut_refs)] +#![feature(type_alias_impl_trait)] +#![feature(array_methods)] +#![feature(macro_metavar_expr)] +#![feature(const_trait_impl)] +#![feature(const_precise_live_drops)] #![forbid(unsafe_code)] +#![feature(const_convert)] #![no_std] pub mod test; @@ -23,11 +30,13 @@ pub mod button; pub mod buzzer_driver; pub mod ccs811; pub mod console; +pub mod console_zero; pub mod crc; pub mod ctap; pub mod dac; pub mod debug_process_restart; pub mod driver; +pub mod dyn_proc_loader; pub mod fm25cl; pub mod ft6x06; pub mod fxos8700cq; @@ -107,3 +116,4 @@ pub mod virtual_sha; pub mod virtual_spi; pub mod virtual_timer; pub mod virtual_uart; +pub mod virtual_uart_zero; diff --git a/capsules/src/low_level_debug/mod.rs b/capsules/src/low_level_debug/mod.rs index cf70a4a4c..4c2db054c 100644 --- a/capsules/src/low_level_debug/mod.rs +++ b/capsules/src/low_level_debug/mod.rs @@ -4,20 +4,47 @@ mod fmt; use core::cell::Cell; +use core::ops::Deref; use kernel::grant::{AllowRoCount, AllowRwCount, Grant, UpcallCount}; -use kernel::hil::uart::{Transmit, TransmitClient}; -use kernel::syscall::CommandReturn; +use kernel::hil::uart::{Transmit, TransmitClient, ZeroTransmit, ZeroTransmitClient}; +use kernel::process::Error; +use kernel::syscall::{CommandReturn, SyscallDriver}; +use kernel::utilities::leased_buffer::LeasedBufferCell; use kernel::{ErrorCode, ProcessId}; // LowLevelDebug requires a &mut [u8] buffer of length at least BUF_LEN. pub use fmt::BUF_LEN; +pub type GrantType = Grant, AllowRoCount<0>, AllowRwCount<0>>; + pub const DRIVER_NUM: usize = crate::driver::NUM::LowLevelDebug as usize; -pub struct LowLevelDebug<'u, U: Transmit<'u>> { +// Length of the debug queue for each app. Each queue entry takes 3 words (tag +// and 2 usizes to print). The queue will be allocated in an app's grant region +// when that app first uses the debug driver. +const QUEUE_SIZE: usize = 4; + +// ----------------------------------------------------------------------------- +// Implementation details below +// ----------------------------------------------------------------------------- + +#[derive(Default)] +pub struct AppData { + queue: [Option; QUEUE_SIZE], +} + +#[derive(Clone, Copy)] +pub(crate) enum DebugEntry { + Dropped(usize), // Some debug messages were dropped + AlertCode(usize), // Display a predefined alert code + Print1(usize), // Print a single number + Print2(usize, usize), // Print two numbers +} + +struct BaseLowLevelDebug { buffer: Cell>, - grant: Grant, AllowRoCount<0>, AllowRwCount<0>>, + grant: GrantType, // grant_failed is set to true when LowLevelDebug fails to allocate an app's // grant region. When it has a chance, LowLevelDebug will print a message // indicating a grant initialization has failed, then set this back to @@ -25,26 +52,42 @@ pub struct LowLevelDebug<'u, U: Transmit<'u>> { // using grant storage, it will at least output an error indicating some // application's message was dropped. grant_failed: Cell, +} + +impl BaseLowLevelDebug { + pub const fn new(buffer: &'static mut [u8], grant: GrantType) -> Self { + Self { + buffer: Cell::new(Some(buffer)), + grant, + grant_failed: Cell::new(false), + } + } +} + +pub struct LowLevelDebug<'u, U: Transmit<'u>> { + base: BaseLowLevelDebug, uart: &'u U, } impl<'u, U: Transmit<'u>> LowLevelDebug<'u, U> { - pub fn new( + pub const fn new( buffer: &'static mut [u8], uart: &'u U, - grant: Grant, AllowRoCount<0>, AllowRwCount<0>>, + grant: GrantType, ) -> LowLevelDebug<'u, U> { LowLevelDebug { - buffer: Cell::new(Some(buffer)), - grant, - grant_failed: Cell::new(false), + base: BaseLowLevelDebug::new(buffer, grant), uart, } } } -impl<'u, U: Transmit<'u>> kernel::syscall::SyscallDriver for LowLevelDebug<'u, U> { - fn command( +// Trait shared by both implementations that has most of the logic. + +trait LLDB { + fn transmit_str(&self, tx_buffer: &'static mut [u8], len: usize); + fn get_base(&self) -> &BaseLowLevelDebug; + fn do_command( &self, minor_num: usize, r2: usize, @@ -61,36 +104,25 @@ impl<'u, U: Transmit<'u>> kernel::syscall::SyscallDriver for LowLevelDebug<'u, U CommandReturn::success() } - fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> { - self.grant.enter(processid, |_, _| {}) - } -} - -impl<'u, U: Transmit<'u>> TransmitClient for LowLevelDebug<'u, U> { - fn transmitted_buffer( - &self, - tx_buffer: &'static mut [u8], - _tx_len: usize, - _rval: Result<(), ErrorCode>, - ) { + fn transmit_finish(&self, tx_buffer: &'static mut [u8]) { // Identify and transmit the next queued entry. If there are no queued // entries remaining, store buffer. + let base = self.get_base(); + // Prioritize printing the "grant init failed" message over per-app // debug entries. - if self.grant_failed.take() { + + if base.grant_failed.take() { const MESSAGE: &[u8] = b"LowLevelDebug: grant init failed\n"; tx_buffer[..MESSAGE.len()].copy_from_slice(MESSAGE); - let _ = self.uart.transmit_buffer(tx_buffer, MESSAGE.len()).map_err( - |(_, returned_buffer)| { - self.buffer.set(Some(returned_buffer)); - }, - ); + self.transmit_str(tx_buffer, MESSAGE.len()); + return; } - for process_grant in self.grant.iter() { + for process_grant in base.grant.iter() { let appid = process_grant.processid(); let (app_num, first_entry) = process_grant.enter(|owned_app_data, _| { owned_app_data.queue.rotate_left(1); @@ -100,29 +132,31 @@ impl<'u, U: Transmit<'u>> TransmitClient for LowLevelDebug<'u, U> { None => continue, Some(to_print) => to_print, }; - self.transmit_entry(tx_buffer, app_num, to_print); + + let msg_len = fmt::format_entry(app_num, to_print, tx_buffer); + + self.transmit_str(tx_buffer, msg_len); + return; } - self.buffer.set(Some(tx_buffer)); - } -} -// ----------------------------------------------------------------------------- -// Implementation details below -// ----------------------------------------------------------------------------- + base.buffer.set(Some(tx_buffer)); + } -impl<'u, U: Transmit<'u>> LowLevelDebug<'u, U> { // If the UART is not busy (the buffer is available), transmits the entry. // Otherwise, adds it to the app's queue. fn push_entry(&self, entry: DebugEntry, appid: ProcessId) { - use DebugEntry::Dropped; + let base = self.get_base(); - if let Some(buffer) = self.buffer.take() { - self.transmit_entry(buffer, appid.id(), entry); + if let Some(buffer) = base.buffer.take() { + let msg_len = fmt::format_entry(appid.id(), entry, buffer); + self.transmit_str(buffer, msg_len); return; } - let result = self.grant.enter(appid, |borrow, _| { + use DebugEntry::Dropped; + + let result = base.grant.enter(appid, |borrow, _| { for queue_entry in &mut borrow.queue { if queue_entry.is_none() { *queue_entry = Some(entry); @@ -143,38 +177,138 @@ impl<'u, U: Transmit<'u>> LowLevelDebug<'u, U> { // message. This gives the user a chance of figuring out what happened // when LowLevelDebug fails. if result.is_err() { - self.grant_failed.set(true); + base.grant_failed.set(true); } } +} - // Immediately prints the provided entry to the UART. - fn transmit_entry(&self, buffer: &'static mut [u8], app_num: usize, entry: DebugEntry) { - let msg_len = fmt::format_entry(app_num, entry, buffer); - // The uart's error message is ignored because we cannot do anything if - // it fails anyway. +impl<'u, U: Transmit<'u>> LLDB for LowLevelDebug<'u, U> { + fn transmit_str(&self, tx_buffer: &'static mut [u8], len: usize) { let _ = self .uart - .transmit_buffer(buffer, msg_len) + .transmit_buffer(tx_buffer, len) .map_err(|(_, returned_buffer)| { - self.buffer.set(Some(returned_buffer)); + self.base.buffer.set(Some(returned_buffer)); }); } + fn get_base(&self) -> &BaseLowLevelDebug { + &self.base + } } -// Length of the debug queue for each app. Each queue entry takes 3 words (tag -// and 2 usizes to print). The queue will be allocated in an app's grant region -// when that app first uses the debug driver. -const QUEUE_SIZE: usize = 4; +impl<'u, U: Transmit<'u>> kernel::syscall::SyscallDriver for LowLevelDebug<'u, U> { + fn command( + &self, + minor_num: usize, + r2: usize, + r3: usize, + caller_id: ProcessId, + ) -> CommandReturn { + self.do_command(minor_num, r2, r3, caller_id) + } -#[derive(Default)] -pub struct AppData { - queue: [Option; QUEUE_SIZE], + fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> { + self.base.grant.enter(processid, |_, _| {}) + } } -#[derive(Clone, Copy)] -pub(crate) enum DebugEntry { - Dropped(usize), // Some debug messages were dropped - AlertCode(usize), // Display a predefined alert code - Print1(usize), // Print a single number - Print2(usize, usize), // Print two numbers +impl<'u, U: Transmit<'u>> TransmitClient for LowLevelDebug<'u, U> { + fn transmitted_buffer( + &self, + tx_buffer: &'static mut [u8], + _tx_len: usize, + _rval: Result<(), ErrorCode>, + ) { + self.transmit_finish(tx_buffer) + } +} + +// The zero-copy version +pub struct LowLevelDebugZero { + base: BaseLowLevelDebug, + leased: LeasedBufferCell<'static, u8>, +} + +impl LowLevelDebugZero { + // Immediately prints the provided string to the UART + fn transmit_buf>( + transmit: &Transmit, + buffer: &'static mut [u8], + msg_len: usize, + ) { + let slf = transmit.get_client(); + let lease = slf.leased.set_lease(buffer, 0..msg_len); + match transmit.transmit(lease) { + Ok(Some(buf)) => { + // Short circuit + Self::transmit_finish(transmit, buf, Ok(())) + } + Err((b, _)) => { + // There is nothing we can do on error apart from restore the buffer + slf.leased.take_buf(b); + } + _ => { + // waiting for callback + } + } + } + + pub const fn new(buffer: &'static mut [u8], grant: GrantType) -> Self { + Self { + base: BaseLowLevelDebug::new(buffer, grant), + leased: LeasedBufferCell::new(), + } + } + + pub const fn get_syscall_driver>( + t: &Transmitter, + ) -> &LLDBSyscallDriver { + LLDBSyscallDriver::get(t) + } +} + +misc::overload_impl!(LLDBSyscallDriver); + +impl ZeroTransmitClient for LowLevelDebugZero { + type Buf = &'static mut [u8]; + + fn transmit_finish>( + transmitter: &Transmit, + buf: Self::Buf, + _res: Result<(), ErrorCode>, + ) { + let slf = transmitter.get_client(); + LLDBSyscallDriver::::transmit_finish( + LLDBSyscallDriver::::get(transmitter), + slf.leased.take_buf(buf), + ); + } +} + +impl> LLDB for LLDBSyscallDriver { + fn transmit_str(&self, tx_buffer: &'static mut [u8], len: usize) { + LowLevelDebugZero::transmit_buf(self.deref(), tx_buffer, len) + } + + fn get_base(&self) -> &BaseLowLevelDebug { + &self.get_client().base + } +} + +impl> SyscallDriver + for LLDBSyscallDriver +{ + fn command( + &self, + command_num: usize, + r2: usize, + r3: usize, + process_id: ProcessId, + ) -> CommandReturn { + self.do_command(command_num, r2, r3, process_id) + } + + fn allocate_grant(&self, process_id: ProcessId) -> Result<(), Error> { + self.get_client().base.grant.enter(process_id, |_, _| {}) + } } diff --git a/capsules/src/net/sixlowpan/sixlowpan_state.rs b/capsules/src/net/sixlowpan/sixlowpan_state.rs index 8184c2ce5..bf5d07001 100644 --- a/capsules/src/net/sixlowpan/sixlowpan_state.rs +++ b/capsules/src/net/sixlowpan/sixlowpan_state.rs @@ -642,7 +642,7 @@ pub struct RxState<'a> { } impl<'a> ListNode<'a, RxState<'a>> for RxState<'a> { - fn next(&'a self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, RxState<'a>> { &self.next } } diff --git a/capsules/src/net/udp/udp_recv.rs b/capsules/src/net/udp/udp_recv.rs index f5d3e383a..27f00b33b 100644 --- a/capsules/src/net/udp/udp_recv.rs +++ b/capsules/src/net/udp/udp_recv.rs @@ -118,7 +118,7 @@ pub struct UDPReceiver<'a> { } impl<'a> ListNode<'a, UDPReceiver<'a>> for UDPReceiver<'a> { - fn next(&'a self) -> &'a ListLink<'a, UDPReceiver<'a>> { + fn next(&self) -> &ListLink<'a, UDPReceiver<'a>> { &self.next } } diff --git a/capsules/src/net/udp/udp_send.rs b/capsules/src/net/udp/udp_send.rs index c04dc5d3f..5ecfb3e42 100644 --- a/capsules/src/net/udp/udp_send.rs +++ b/capsules/src/net/udp/udp_send.rs @@ -252,7 +252,7 @@ pub struct UDPSendStruct<'a, T: IP6Sender<'a>> { } impl<'a, T: IP6Sender<'a>> ListNode<'a, UDPSendStruct<'a, T>> for UDPSendStruct<'a, T> { - fn next(&'a self) -> &'a ListLink<'a, UDPSendStruct<'a, T>> { + fn next(&self) -> &ListLink<'a, UDPSendStruct<'a, T>> { &self.next } } diff --git a/capsules/src/virtual_adc.rs b/capsules/src/virtual_adc.rs index f382b3c1f..2fa4d800a 100644 --- a/capsules/src/virtual_adc.rs +++ b/capsules/src/virtual_adc.rs @@ -100,7 +100,7 @@ impl<'a, A: hil::adc::Adc> AdcDevice<'a, A> { } impl<'a, A: hil::adc::Adc> ListNode<'a, AdcDevice<'a, A>> for AdcDevice<'a, A> { - fn next(&'a self) -> &'a ListLink<'a, AdcDevice<'a, A>> { + fn next(&self) -> &ListLink<'a, AdcDevice<'a, A>> { &self.next } } diff --git a/capsules/src/virtual_aes_ccm.rs b/capsules/src/virtual_aes_ccm.rs index ff50def4f..b392858d9 100644 --- a/capsules/src/virtual_aes_ccm.rs +++ b/capsules/src/virtual_aes_ccm.rs @@ -914,7 +914,7 @@ impl<'a, A: AES128<'a> + AES128Ctr + AES128CBC + AES128ECB> symmetric_encryption impl<'a, A: AES128<'a> + AES128Ctr + AES128CBC + AES128ECB> ListNode<'a, VirtualAES128CCM<'a, A>> for VirtualAES128CCM<'a, A> { - fn next(&'a self) -> &'a ListLink<'a, VirtualAES128CCM<'a, A>> { + fn next(&self) -> &ListLink<'a, VirtualAES128CCM<'a, A>> { &self.next } } diff --git a/capsules/src/virtual_alarm.rs b/capsules/src/virtual_alarm.rs index b3897fb15..bcf5375b1 100644 --- a/capsules/src/virtual_alarm.rs +++ b/capsules/src/virtual_alarm.rs @@ -45,15 +45,15 @@ pub struct VirtualMuxAlarm<'a, A: Alarm<'a>> { } impl<'a, A: Alarm<'a>> ListNode<'a, VirtualMuxAlarm<'a, A>> for VirtualMuxAlarm<'a, A> { - fn next(&self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, VirtualMuxAlarm<'a, A>> { &self.next } } impl<'a, A: Alarm<'a>> VirtualMuxAlarm<'a, A> { /// After calling new, always call setup() - pub fn new(mux_alarm: &'a MuxAlarm<'a, A>) -> VirtualMuxAlarm<'a, A> { - let zero = A::Ticks::from(0); + pub const fn new(mux_alarm: &'a MuxAlarm<'a, A>) -> VirtualMuxAlarm<'a, A> { + let zero = A::Ticks::ZERO; VirtualMuxAlarm { mux: mux_alarm, dt_reference: Cell::new(TickDtReference { diff --git a/capsules/src/virtual_digest.rs b/capsules/src/virtual_digest.rs index 97caf7dfe..cb90518d6 100644 --- a/capsules/src/virtual_digest.rs +++ b/capsules/src/virtual_digest.rs @@ -43,7 +43,7 @@ pub struct VirtualMuxDigest<'a, A: digest::Digest<'a, L>, const L: usize> { impl<'a, A: digest::Digest<'a, L>, const L: usize> ListNode<'a, VirtualMuxDigest<'a, A, L>> for VirtualMuxDigest<'a, A, L> { - fn next(&self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, VirtualMuxDigest<'a, A, L>> { &self.next } } diff --git a/capsules/src/virtual_flash.rs b/capsules/src/virtual_flash.rs index cf326eb3e..861639edd 100644 --- a/capsules/src/virtual_flash.rs +++ b/capsules/src/virtual_flash.rs @@ -181,7 +181,7 @@ impl<'a, F: hil::flash::Flash> hil::flash::Client for FlashUser<'a, F> { } impl<'a, F: hil::flash::Flash> ListNode<'a, FlashUser<'a, F>> for FlashUser<'a, F> { - fn next(&'a self) -> &'a ListLink<'a, FlashUser<'a, F>> { + fn next(&self) -> &ListLink<'a, FlashUser<'a, F>> { &self.next } } diff --git a/capsules/src/virtual_hmac.rs b/capsules/src/virtual_hmac.rs index 8ebc21151..be5ac3058 100644 --- a/capsules/src/virtual_hmac.rs +++ b/capsules/src/virtual_hmac.rs @@ -29,7 +29,7 @@ pub struct VirtualMuxHmac<'a, A: digest::Digest<'a, L>, const L: usize> { impl<'a, A: digest::Digest<'a, L>, const L: usize> ListNode<'a, VirtualMuxHmac<'a, A, L>> for VirtualMuxHmac<'a, A, L> { - fn next(&self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, VirtualMuxHmac<'a, A, L>> { &self.next } } diff --git a/capsules/src/virtual_i2c.rs b/capsules/src/virtual_i2c.rs index 89d168065..f182fe2c9 100644 --- a/capsules/src/virtual_i2c.rs +++ b/capsules/src/virtual_i2c.rs @@ -246,7 +246,7 @@ impl I2CClient for I2CDevice<'_> { } impl<'a> ListNode<'a, I2CDevice<'a>> for I2CDevice<'a> { - fn next(&'a self) -> &'a ListLink<'a, I2CDevice<'a>> { + fn next(&self) -> &ListLink<'a, I2CDevice<'a>> { &self.next } } @@ -347,7 +347,7 @@ impl<'a> I2CClient for SMBusDevice<'a> { } impl<'a> ListNode<'a, SMBusDevice<'a>> for SMBusDevice<'a> { - fn next(&'a self) -> &'a ListLink<'a, SMBusDevice<'a>> { + fn next(&self) -> &ListLink<'a, SMBusDevice<'a>> { &self.next } } diff --git a/capsules/src/virtual_pwm.rs b/capsules/src/virtual_pwm.rs index 28275fc0e..1300ad294 100644 --- a/capsules/src/virtual_pwm.rs +++ b/capsules/src/virtual_pwm.rs @@ -127,7 +127,7 @@ impl<'a, P: hil::pwm::Pwm> PwmPinUser<'a, P> { } impl<'a, P: hil::pwm::Pwm> ListNode<'a, PwmPinUser<'a, P>> for PwmPinUser<'a, P> { - fn next(&'a self) -> &'a ListLink<'a, PwmPinUser<'a, P>> { + fn next(&self) -> &ListLink<'a, PwmPinUser<'a, P>> { &self.next } } diff --git a/capsules/src/virtual_rng.rs b/capsules/src/virtual_rng.rs index f45f0cd0c..6bac4aadd 100644 --- a/capsules/src/virtual_rng.rs +++ b/capsules/src/virtual_rng.rs @@ -98,7 +98,7 @@ pub struct VirtualRngMasterDevice<'a> { // Implement ListNode trait for virtual rng device impl<'a> ListNode<'a, VirtualRngMasterDevice<'a>> for VirtualRngMasterDevice<'a> { - fn next(&self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, VirtualRngMasterDevice<'a>> { &self.next } } diff --git a/capsules/src/virtual_sha.rs b/capsules/src/virtual_sha.rs index b0335321d..9738f1db3 100644 --- a/capsules/src/virtual_sha.rs +++ b/capsules/src/virtual_sha.rs @@ -29,7 +29,7 @@ pub struct VirtualMuxSha<'a, A: digest::Digest<'a, L>, const L: usize> { impl<'a, A: digest::Digest<'a, L>, const L: usize> ListNode<'a, VirtualMuxSha<'a, A, L>> for VirtualMuxSha<'a, A, L> { - fn next(&self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, VirtualMuxSha<'a, A, L>> { &self.next } } diff --git a/capsules/src/virtual_spi.rs b/capsules/src/virtual_spi.rs index cde9a5b64..5dd929a8d 100644 --- a/capsules/src/virtual_spi.rs +++ b/capsules/src/virtual_spi.rs @@ -222,7 +222,7 @@ impl hil::spi::SpiMasterClient for VirtualSpiMasterDev impl<'a, Spi: hil::spi::SpiMaster> ListNode<'a, VirtualSpiMasterDevice<'a, Spi>> for VirtualSpiMasterDevice<'a, Spi> { - fn next(&'a self) -> &'a ListLink<'a, VirtualSpiMasterDevice<'a, Spi>> { + fn next(&self) -> &ListLink<'a, VirtualSpiMasterDevice<'a, Spi>> { &self.next } } diff --git a/capsules/src/virtual_timer.rs b/capsules/src/virtual_timer.rs index 4ab499713..53c55decb 100644 --- a/capsules/src/virtual_timer.rs +++ b/capsules/src/virtual_timer.rs @@ -35,7 +35,7 @@ pub struct VirtualTimer<'a, A: Alarm<'a>> { } impl<'a, A: Alarm<'a>> ListNode<'a, VirtualTimer<'a, A>> for VirtualTimer<'a, A> { - fn next(&self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, VirtualTimer<'a, A>> { &self.next } } diff --git a/capsules/src/virtual_uart.rs b/capsules/src/virtual_uart.rs index 6dae1c3be..79c3a442f 100644 --- a/capsules/src/virtual_uart.rs +++ b/capsules/src/virtual_uart.rs @@ -52,7 +52,7 @@ use kernel::hil::uart; use kernel::utilities::cells::{OptionalCell, TakeCell}; use kernel::ErrorCode; -const RX_BUF_LEN: usize = 64; +pub const RX_BUF_LEN: usize = 64; pub static mut RX_BUF: [u8; RX_BUF_LEN] = [0; RX_BUF_LEN]; pub struct MuxUart<'a> { @@ -190,11 +190,12 @@ impl<'a> uart::ReceiveClient for MuxUart<'a> { } impl<'a> MuxUart<'a> { - pub fn new( + pub const fn new( uart: &'a dyn uart::Uart<'a>, buffer: &'static mut [u8], speed: u32, deferred_caller: &'a DynamicDeferredCall, + handle: Option, ) -> MuxUart<'a> { MuxUart { uart: uart, @@ -204,7 +205,7 @@ impl<'a> MuxUart<'a> { buffer: TakeCell::new(buffer), completing_read: Cell::new(false), deferred_caller: deferred_caller, - handle: OptionalCell::empty(), + handle: OptionalCell::new_option(handle), } } @@ -339,7 +340,7 @@ pub struct UartDevice<'a> { } impl<'a> UartDevice<'a> { - pub fn new(mux: &'a MuxUart<'a>, receiver: bool) -> UartDevice<'a> { + pub const fn new(mux: &'a MuxUart<'a>, receiver: bool) -> UartDevice<'a> { UartDevice { state: Cell::new(UartDeviceReceiveState::Idle), mux: mux, @@ -398,7 +399,7 @@ impl<'a> uart::ReceiveClient for UartDevice<'a> { } impl<'a> ListNode<'a, UartDevice<'a>> for UartDevice<'a> { - fn next(&'a self) -> &'a ListLink<'a, UartDevice<'a>> { + fn next(&self) -> &ListLink<'a, UartDevice<'a>> { &self.next } } diff --git a/capsules/src/virtual_uart_zero.rs b/capsules/src/virtual_uart_zero.rs new file mode 100644 index 000000000..bc3e2c2ba --- /dev/null +++ b/capsules/src/virtual_uart_zero.rs @@ -0,0 +1,247 @@ +//! Virtualize a UART bus with the new interface. +//! Can handle a (statically) variable number of ZeroTransmitClients +//! Clients will have to agree on a type they can all convert their buffer into/from. +//! The mux will take care of calling into() on both paths. +//! At worst, this will be an enum over all their buffer types. +//! The policy implements an in order queue. + +use core::cell::Cell; +use kernel::collections::list::{ListLink, ListNode}; + +/// A node in the queue (monomorphic with respect to client) +struct PerClientStateNode { + /// Buffer this client has queued + queue: Cell>, + /// Link to next in queue (if any) + link: ListLink<'static, PerClientStateNode>, + /// The value to set in_progress to when popping this from the queue + /// Although this is known statically in most cases, having it here + /// helps for the de-queue case where we don't care about the the + /// type of the client. + ndx: u8, +} + +impl ListNode<'static, PerClientStateNode> for PerClientStateNode { + fn next(&self) -> &ListLink<'static, PerClientStateNode> { + &self.link + } +} + +// A node in the queue plus the client state (different type per client) +struct PerClientState { + // Client state + client: Client, + // Node for list + node: PerClientStateNode, +} + +macro_rules! DeclareMux { + ($mux_name : ident, $(($name : ident, $ids : ident, $overload : ident, $n : tt)),+) => { + +use crate::virtual_uart_zero::PerClientState; +use core::marker::PhantomData; +use kernel::hil::uart::{ZeroTransmitClient, ZeroTransmit}; +use kernel::ErrorCode; +use core::cell::Cell; +use kernel::collections::list::{Queue, ListLink}; +use crate::virtual_uart_zero::PerClientStateNode; + +pub struct $mux_name +{ + $($name : PerClientState<$ids, Buf>,)+ + // The in progress transmission + in_progress : Cell, + // A queue (through the parts of the client state that are the same) + queue : Queue<'static, PerClientStateNode>, + // Ugly hack: + // If something can think of a way around this I am open to suggestion. The problem is this: + // In order to support modifying the intrusive list though this mux, we need a reference + // with a lifetime that lives as long as the struct itself (in this case static). + // The "transmit" method in the uart hil gives a possibly shorter reference. + // I don't want to modify the transmit hil to require a lifetime that is a parameter to the + // type as this would be ugly as well. + // We need some way to get to a handle with a longer lifetime again. The safe way of doing this + // is to have this weird self reference. + static_self : &'static Self, +} + +// Create a set of types that act as transmitters for each client. +macro_rules! DeclareMuxRefHelper { + ($$one_id : ident, $$one_n : tt, $$one_name: ident, $$one_overload : ident) => { + + misc::overload_impl!($$one_overload); + + impl>> ZeroTransmit<$$one_id> for + $$one_overload + where $($ids::Buf : Into, Buf : Into<$ids::Buf>),+ + { + #[inline] + fn transmit(&self, buf: $$one_id::Buf) -> Result, ($$one_id::Buf, ErrorCode)> { + let transmit = &self.inner; + let all_state = transmit.get_client(); + let buf = buf.into(); + let state = &all_state.$$one_name; + if all_state.in_progress.get() == 0 { + // Nothing in progress, do now + match transmit.transmit(buf) { + Ok(None) => { + all_state.in_progress.set($$one_n); // in progress + Ok(None) + }, + Ok(Some(buf)) => Ok(Some(buf.into())), // short-circuit + Err((buf, er)) => Err((buf.into(), er)) // error + } + } else { + // Have to queue: + match state.node.queue.replace(Some(buf)) { + None => { + all_state.queue.push_head(&all_state.static_self.$$one_name.node); + Ok(None) + } + Some(buf) => { Err((buf.into(), ErrorCode::BUSY))} + } + } + } + #[inline] + fn get_client(&self) -> &$$one_id { + &self.inner.get_client().$$one_name.client + } + } + + /// Get a client reference given the reference to the total transmitter + pub const fn $$one_name>> + (t : &Transmitter) -> &$$one_overload:: + where $($ids::Buf : Into, Buf : Into<$ids::Buf>),+ { + $$one_overload::::get(t) + } + } +} + +$( + DeclareMuxRefHelper!($ids, $n, $name, $overload); +)* + + + +// The Mux is also a client +impl ZeroTransmitClient for $mux_name + where $($ids::Buf : Into, Buf : Into<$ids::Buf>),+ { + type Buf = Buf; + + fn transmit_finish>(transmitter: &Transmit, + mut buf: Self::Buf, + mut res: Result<(), ErrorCode>) { + // transmit_finish is only called from our uart's callback path, never internally to + // this module, so there should be no capsules on the stack. + // We can therefore handle as many items in the queue as possible now. + // This is in contrast to the other virtual_uart, where the corresponding function + // was at risk at being called from the transmit path. + // On our transmit path, we have a completely separate call to transmit.transmit() that + // will never call this function. + + let slf = transmitter.get_client(); + let mut just_finished = slf.in_progress.take(); // take clears in progress + loop { + // Call client callback + match just_finished { + $( + $n => $ids::transmit_finish($name(transmitter), buf.into(), res), + )+ + _ => { debug_assert!(false)} + } + + // It is possible that a capsule jumped in and took the in_progress slot. I don't + // really mind, but if ever we get starvation this is why. + if slf.in_progress.get() != 0 { + return; + } + + // Try pop something off the queue + if let Some(next) = slf.queue.pop_head() { + if let Some(next_buf) = next.queue.take() { + just_finished = next.ndx; + (buf, res) = match transmitter.transmit(next_buf) { + Ok(None) => { + slf.in_progress.set(just_finished); + return; // in progress, just return and we will get a callback + }, + Ok(Some(buf)) => {(buf, Ok(()))}, // short circuit + Err((buf, er)) => {(buf, Err(er))} // error + }; + // Go around the loop and call the next client callback + } else { + debug_assert!(false); + return; + } + } else { + return; + } + } + + } +} + +misc::overload_impl!(MuxDeferredCall); + +// Declare a constructor for the virtual uart (only really for the component to use) +impl $mux_name + where $($ids::Buf : Into, Buf : Into<$ids::Buf>),+ { + + pub const fn new($($name : $ids,)* static_self : &'static Self) -> Self { + Self { + $($name : PerClientState { + client : $name, + node : PerClientStateNode { + queue : Cell::new(None), + link : ListLink::empty(), + ndx : $n, + }, + },)* + in_progress : Cell::new(0), + queue : Queue::new(), + static_self, + } + } +} + +/// Constructs a uart mux component. Should be parameterised by a buffer type that can be +/// converted to/from each clients buffer type, and component factories for each client. +pub struct UartMuxComponent( + PhantomData::<(Buf, $($ids),+)> +); + +kernel::simple_static_component!(impl<{Buf : 'static, $($ids),+}> for UartMuxComponent:: + where { + $( + $ids::Output : ZeroTransmitClient + 'static, + <$ids::Output as ZeroTransmitClient>::Buf : Into, + Buf: Into<<$ids::Output as ZeroTransmitClient>::Buf>, + )+ + }, + Contain = ($($name.client : $ids),+), + Output = $mux_name, + NewInput = (), + FinInput = (), + |slf, _input | { + $mux_name::new($($name,)+ slf) + }, + |_slf, _input| {} +); + +};} + +pub mod mux2 { + DeclareMux!(UartMux, (a, A, ARef, 1), (b, B, BRef, 2)); +} +pub mod mux3 { + DeclareMux!(UartMux, (a, A, ARef, 1), (b, B, BRef, 2), (c, C, CRef, 3)); +} +pub mod mux4 { + DeclareMux!( + UartMux, + (a, A, ARef, 1), + (b, B, BRef, 2), + (c, C, CRef, 3), + (d, D, DRef, 4) + ); +} diff --git a/cheri_tock_recipe.md b/cheri_tock_recipe.md new file mode 100644 index 000000000..03dfeb940 --- /dev/null +++ b/cheri_tock_recipe.md @@ -0,0 +1,200 @@ +# Get a toolchain + +## Cheribuild + +First, check out cheribuild: + +``` +mkdir ~/cheri && cd ~/cheri +git clone https://github.com/CTSRD-CHERI/cheribuild.git +``` + +Cheribuild will tell you, but probably need a few packages. If you system has +apt: +``` +sudo apt install cmake ninja-build libpixman-1-dev libglib2.0-dev samba +``` + +It will help you build everything but rustc. + +You will need (at minimum) a clang/llvm/libc/qemu + +libc is needed even for tock because we need CHERI compatible compiler builtins. + +To build these: + +``` +cd ~/cheri/cheribuild +./cheribuild.py llvm qemu compiler-rt-builtins-baremetal-riscv64-hybrid +CCC_OVERRIDE_OPTIONS=+-Wno-error=implicit-function-declaration ./cheribuild.py newlib-baremetal-riscv64-hybrid --reconfigure --clean +``` + +Note: other useful targets to build with cheribuild are the buuiltins/newlib with +riscv32 for 32bit, -purecap for purecap, or without -hybrid for non-CHERI. + + +LLVM, especially, will take a while. As it is progressing, start with getting a CHERI rustc. + +## Rustc + +Sadly, nobody has taught cheribuild this recipe yet. Check out: + +``` +cd ~ +git clone -b cheri-hybrid-1.67.1+rv32 https://github.com/arichardson/rust.git +``` + +You will then need to add your own config.toml to inside the root of this project. +Here is mine (I presume you are also on an x86 host): + +``` + +[target.x86_64-unknown-linux-gnu] +llvm-config = "[HOME]/cheri/output/sdk/bin/llvm-config" +# crt-static = true + +[build] +extended = true +docs = false +tools = [ + "cargo", + "clippy", + "rustdoc", + "rustfmt", + "rust-analyzer", + "rust-analyzer-proc-macro-srv", + "analysis", +]i +sanitizers = true +profiler = true + +[install] +prefix= "[HOME]/cheri/output/sdk/rust-sdk" +sysconfdir= "etc" + +[rust] +codegen-tests = false +channel = "nightly" +llvm-tools = true +``` + +You might not care to build all the tools / sanitizers / profiler. +Obviously, replace [MY_HOME] with something appropriate. I think this file does +NOT expand environment variables. + +Note that building this requires llvm to have already built, so wait for that. + +For some reason rustc expects this directory to exist even though we are not +using the llvm-project from rustc (because we use the CHERI one), so: + +``` +mkdir ~/rust/src/llvm-project/libunwind +``` + +Then, build and install. + +``` +./x.py build +./x.py install +``` + +Finally, make a custom rustup toolchain. If you don't actually have rustup: + +``` +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +To make a toolchain called cheri (this matches the name in the toolchain file in tock) +linked to where rustc is installed: + +``` +rustup toolchain link cheri ${HOME}/cheri/output/sdk/rust-sdk +``` + +# Build and run CHERI tock + +You can check out the CHERI tock here: + +``` +cd ~ +git clone -b all_squashed https://github.com/tock/tock-cheri.git +``` + +And, finally, build/run the QEMU CHERI virt board: + +``` +make -C ~/tock/boards/qemu_cheri_virt run +``` + +If you used the paths in this file then this should just work, although running +it will be completely un-interesting without any processes. + +# Build and run CHERI C applications + +Now is a good time to also build a purecap libc: + +``` +cd ~/cheri/cheribuild +./cheribuild.py compiler-rt-builtins-baremetal-riscv64-purecap +CCC_OVERRIDE_OPTIONS=+-Wno-error=implicit-function-declaration ./cheribuild.py newlib-baremetal-riscv64-purecap +``` + +And for whatever reason builtins and libc end up not being in the same directory, +so copy over all the libraries from the builtins into the respective libc + +``` +cp ~/cheri/output/sdk/baremetal/baremetal-riscv64-purecap/lib/* ~/cheri/output/sdk/baremetal/baremetal-newlib-riscv64-purecap/riscv64-unknown-elf/lib/ +cp ~/cheri/output/sdk/baremetal/baremetal-riscv64-hybrid/lib/* ~/cheri/output/sdk/baremetal/baremetal-newlib-riscv64-hybrid/riscv64-unknown-elf/lib/ +``` + + +Check out libtock-c: + +``` +cd ~ +git clone -b all_squashed https://github.com/tock/libtock-c-cheri.git +``` + +Note: The makefile rules in the libtock-c expect both the cheri sdk and tock to +be in the same directory as libtock-c. If they are not, you will likely have +to invoke make with CHERI_SDK and TOCK_DIR set to something sensible. + +You can now run some examples + +``` +cd ~/libtock-c/examples/c_hello +make CLANG=1 RISCV=1 CHERI=1 run_pure +``` + +Each target will be built, but `run_pure` runs the purecap binary and `run_hybrid` +runs the hybrid binary. + +You might also try running the revocation test which interracts more heavily +with CHERI features in the kernel: + +``` +cd ~/libtock-c/examples/revoke_test +make CLANG=1 RISCV=1 CHERI=1 run_pure +``` + +There is also `examples/vun` with the simple buffer overflow example presented +in the tockworld7 cheri talk. + +# Build and run CHERI Rust applications + +check out libtock-rs: + +``` +cd ~ +git clone -b all_squashed https://github.com/tock/libtock-rs-cheri.git +``` + +Note: this branch expects to have tock rooted under it, so +will either have to fiddle with git submodules or just remove +the submodule and symlink tock + +Now you can run an example: + +``` +EXAMPLE=consle make -C ~/libtock-rs run_qemu_rv64xcheri +``` diff --git a/chips/lowrisc/src/virtual_otbn.rs b/chips/lowrisc/src/virtual_otbn.rs index 2cea387b4..626be9dcf 100644 --- a/chips/lowrisc/src/virtual_otbn.rs +++ b/chips/lowrisc/src/virtual_otbn.rs @@ -15,7 +15,7 @@ pub struct VirtualMuxAccel<'a> { } impl<'a> ListNode<'a, VirtualMuxAccel<'a>> for VirtualMuxAccel<'a> { - fn next(&self) -> &'a ListLink> { + fn next(&self) -> &ListLink<'a, VirtualMuxAccel<'a>> { &self.next } } diff --git a/chips/sifive/src/clint.rs b/chips/sifive/src/clint.rs index ebc41919c..875443367 100644 --- a/chips/sifive/src/clint.rs +++ b/chips/sifive/src/clint.rs @@ -5,7 +5,7 @@ use kernel::utilities::cells::OptionalCell; use kernel::utilities::registers::interfaces::Writeable; use kernel::utilities::registers::{register_structs, ReadWrite}; use kernel::utilities::StaticRef; -use kernel::ErrorCode; +use kernel::{ErrorCode, StaticRefGEP}; use rv32i::machine_timer::MachineTimer; register_structs! { @@ -24,19 +24,19 @@ register_structs! { pub struct Clint<'a> { registers: StaticRef, client: OptionalCell<&'a dyn time::AlarmClient>, - mtimer: MachineTimer<'a>, + mtimer: MachineTimer<'a, StaticRef>>, } impl<'a> Clint<'a> { - pub fn new(base: &'a StaticRef) -> Self { + pub const fn new(base: &'a StaticRef) -> Self { Self { registers: *base, client: OptionalCell::empty(), mtimer: MachineTimer::new( - &base.compare_low, - &base.compare_high, - &base.value_low, - &base.value_high, + StaticRefGEP!(base, compare_low), + StaticRefGEP!(base, compare_high), + StaticRefGEP!(base, value_low), + StaticRefGEP!(base, value_high), ), } } @@ -137,3 +137,5 @@ impl kernel::platform::scheduler_timer::SchedulerTimer for Clint<'_> { //csr::CSR.mie.modify(csr::mie::mie::mtimer::CLEAR); } } + +kernel::very_simple_component!(impl for Clint<'static>, new(&'static StaticRef)); diff --git a/chips/sifive/src/lib.rs b/chips/sifive/src/lib.rs index 0de3b1bb8..53503aa26 100644 --- a/chips/sifive/src/lib.rs +++ b/chips/sifive/src/lib.rs @@ -3,6 +3,10 @@ #![no_std] #![crate_name = "sifive"] #![crate_type = "rlib"] +#![feature(const_refs_to_cell)] +#![feature(const_trait_impl)] +#![feature(macro_metavar_expr)] +#![feature(const_mut_refs)] pub mod clint; pub mod gpio; diff --git a/chips/uarts/Cargo.toml b/chips/uarts/Cargo.toml new file mode 100644 index 000000000..a02595290 --- /dev/null +++ b/chips/uarts/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "uarts" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tock-registers = { path = "../../libraries/tock-register-interface" } +kernel = { path = "../../kernel" } + +[features] +ns16550_u8 = [] diff --git a/chips/uarts/src/lib.rs b/chips/uarts/src/lib.rs new file mode 100644 index 000000000..1802d2b9f --- /dev/null +++ b/chips/uarts/src/lib.rs @@ -0,0 +1,15 @@ +#![crate_name = "uarts"] +#![crate_type = "rlib"] +#![no_std] +#![feature( + macro_metavar_expr, + const_trait_impl, + const_mut_refs, + const_slice_split_at_mut +)] +#![feature(const_precise_live_drops)] + +pub mod ns16550; +pub mod primecell; +mod uart; +mod uart_zero; diff --git a/chips/uarts/src/ns16550.rs b/chips/uarts/src/ns16550.rs new file mode 100644 index 000000000..13528c20f --- /dev/null +++ b/chips/uarts/src/ns16550.rs @@ -0,0 +1,239 @@ +//! ns16550 compatible UART driver. + +use core::cell::Cell; +use core::marker::PhantomData; +use kernel::hil::time::Frequency; +use kernel::hil::uart::{Parameters, Parity, StopBits, Width}; +use kernel::utilities::registers::interfaces::{Readable, Writeable}; +use kernel::utilities::registers::{register_bitfields, register_structs, Aliased, ReadWrite}; +use kernel::utilities::StaticRef; +use kernel::ErrorCode; +use kernel::{debug, hil}; + +use crate::uart::{UartRegConstruct, UartRegistersIF}; + +// All ns165550 registers are really 8bit, but an ns16550 has a "register shift" that defines how far +// apart they are. We default to 32-bit registers here, with an option for 8-bit. + +// The register_* macros cannot handle a type alias, so factoring out the type is hard to make generic +// It is mostly the fault of repr(X) that they cannot take a type alias. +// We could define two different types, but the generics get ugly because they dont share +// a common trait. Conditional compilation to the rescue... + +macro_rules! define_uart_regs { + {$Ty : tt, $scale : tt} => { + register_structs! { + pub UartRegisters { + ( 0b000 * $scale => pub(crate) brdl: ReadWrite<$Ty>), // DLL when DLAB=1 + ( 0b001 * $scale => pub(crate) ier: ReadWrite<$Ty, IER::Register>), // DLH when DLAB=1 + ( 0b010 * $scale => pub(crate) fcr_iir: Aliased<$Ty, IIR::Register, FCR::Register>), + ( 0b011 * $scale => pub(crate) lcr: ReadWrite<$Ty, LCR::Register>), + ( 0b100 * $scale => pub(crate) mcr: ReadWrite<$Ty>), + ( 0b101 * $scale => pub(crate) lsr: ReadWrite<$Ty, LSR::Register>), + ( 0b110 * $scale => pub(crate) msr: ReadWrite<$Ty>), + ( 0b111 * $scale => @END), + } + } + register_bitfields![$Ty, + pub(crate) IER [ + ALL OFFSET(0) NUMBITS(4) [], + MODEM OFFSET(3) NUMBITS(1) [], + RCV_LINE OFFSET(2) NUMBITS(1) [], + TX_EMPTY OFFSET(1) NUMBITS(1) [], + RX_RDY OFFSET(0) NUMBITS(1) [], + ], + pub(crate) FCR [ + FIFO_ENABLE OFFSET(0) NUMBITS(1) [], + CLEAR_RX OFFSET(1) NUMBITS(1) [], + CLEAR_TX OFFSET(2) NUMBITS(1) [], + FIFO_REC_TRIG_LVL OFFSET(6) NUMBITS(2) [ + ONE_BYTE = 0, + FOUR_BYTE = 1, + EIGHT_BYTE = 2, + FOURTEEN_BYTE = 3, + ], + ], + pub(crate) IIR [ + STATUS OFFSET(0) NUMBITS(1) [ + NO_INTERRUPT = 0b1, + PENDING = 0b0, + ], + IIR_CODE OFFSET(0) NUMBITS(4) [ + NO_INTERRUPT = 0b0001, + REC_STATUS = 0b0110, + REC_READY = 0b0100, + TIMEOUT = 0b1100, + TRANS_EMPTY = 0b0010, + ], + FIFOs_ENABLED OFFSET(6) NUMBITS(2) [ + DISABLED = 0b00, + ENABLED = 0b11, + ] + ], + pub(crate) LCR [ + WORD_LENGTH OFFSET(0) NUMBITS(2) [ + FIVE = 0b00, + SIX = 0b01, + SEVEN = 0b10, + EIGHT = 0b11, + ], + STOP_BITS OFFSET(2) NUMBITS(1) [ + ONE = 0b0, + TWO = 0b1, + ], + PARITY_ENABLE OFFSET(3) NUMBITS(1) [], + EVEN_PARITY OFFSET(4) NUMBITS(1) [], + FORCE_PARITY OFFSET(5) NUMBITS(1) [], + SET_BREAK OFFSET(6) NUMBITS(1) [], + DLAB OFFSET(7) NUMBITS(1) [], + ], + pub(crate) LSR [ + // 1 only if transmitters FIFO is _completely_ empty + THR_EMPTY OFFSET(5) NUMBITS(1), + // 1 only if both transmitters FIFO AND TSR + TRANS_EMPTY OFFSET(6) NUMBITS(1), + ] + ]; + } +} + +const FIFO_DEPTH: usize = 16; + +#[cfg(feature = "ns16550_u8")] +define_uart_regs!(u8, 1); + +#[cfg(not(feature = "ns16550_u8"))] +define_uart_regs!(u32, 4); + +pub struct UartRegs { + pub registers: StaticRef, + // If true, then BOTH fifos are operational. + // The depth of the FIFOs are assumed to be 16 words. + pub fifos_enabled: Cell, + pub(crate) phantom_f: core::marker::PhantomData, +} + +impl UartRegistersIF for UartRegs { + fn get_transmit_space(&self) -> usize { + let block_max = if self.fifos_enabled.get() { + FIFO_DEPTH + } else { + 1 + }; + if self.registers.lsr.is_set(LSR::THR_EMPTY) { + block_max + } else { + 0 + } + } + + fn transmit_byte(&self, val: u8) { + self.registers.brdl.set(val.into()) + } + + fn enable_interrupts(&self) { + self.registers.ier.write(IER::TX_EMPTY::SET); + } + + fn disable_interrupts(&self) { + self.registers.ier.set(0); + } + + fn interrupt_expected(&self) -> bool { + // Read what interrupt we got. This also acks the interrupt if it was a THR empty. + let iir = self.registers.fcr_iir.extract(); + + if iir.matches_all(IIR::IIR_CODE::TRANS_EMPTY) { + true + } else { + debug!("Unimplemented UART code {}", iir.read(IIR::IIR_CODE)); + false + } + } +} + +impl hil::uart::Configure for UartRegs { + fn configure(&self, params: Parameters) -> Result<(), ErrorCode> { + // This chip does not support these features. + if params.parity != hil::uart::Parity::None { + return Err(ErrorCode::NOSUPPORT); + } + if params.hw_flow_control != false { + return Err(ErrorCode::NOSUPPORT); + } + + // Put this in a known state so we write to the registers we expect + self.registers.lcr.write(LCR::DLAB::CLEAR); + + // Do not get any spurious interrupts + self.disable_interrupts(); + + // Set divisor reg. This controls the BAUD rate. + + // First set DLAB in LCR. This makes brdl and ier into divisor low and high. + self.registers.lcr.write(LCR::DLAB::SET); + + // Formula is: dl = (SYSTEM_CLK_FREQ + 8 * BAUD) / (16 * BAUD) + // e.g.: BAUD 115200 and CLK 25MHz, gives 14: dl = (25 * 1000000 + (8 * 115200)) / (16 * 115200) + let dl: u16 = + ((F::frequency() + (8u32 * params.baud_rate)) / (16u32 * params.baud_rate)) as u16; + + self.registers.brdl.set(((dl & 0xFF) as u8).into()); // actually dl low byte + self.registers.ier.set((((dl >> 8) & 0xFF) as u8).into()); // actually dl high byte + + // Configure params. I've been a bit lazy here, add as needed. + assert!( + params.parity == Parity::None + && params.stop_bits == StopBits::One + && params.width == Width::Eight + ); + + self.registers.lcr.write( + LCR::PARITY_ENABLE::CLEAR + + LCR::STOP_BITS::ONE + + LCR::WORD_LENGTH::EIGHT + + LCR::DLAB::CLEAR, + ); + + // Writing to enable also resets the fifos + self.registers + .fcr_iir + .write(FCR::FIFO_REC_TRIG_LVL::EIGHT_BYTE + FCR::FIFO_ENABLE::SET); + + self.registers.mcr.set(0); + + // This might clear some pending interrupts, so why not. + self.registers.lsr.get(); + self.registers.brdl.get(); + self.registers.fcr_iir.get(); + self.registers.msr.get(); + + // Check if the UART supports FIFO operation + self.fifos_enabled.set( + self.registers + .fcr_iir + .matches_any(IIR::FIFOs_ENABLED::ENABLED), + ); + + self.enable_interrupts(); + + Ok(()) + } +} + +impl const UartRegConstruct for UartRegs { + type MemoryMapped = UartRegisters; + + fn construct(base: StaticRef) -> Self { + UartRegs { + registers: base, + fifos_enabled: Cell::new(false), + phantom_f: PhantomData, + } + } +} + +pub type Uart<'a, F> = crate::uart::Uart<'a, UartRegs>; +pub type UartZero = crate::uart_zero::ZeroUart, Client>; +pub type ZeroUartComponent = + crate::uart_zero::ZeroUartComponent, ClientFactory>; diff --git a/chips/uarts/src/primecell.rs b/chips/uarts/src/primecell.rs new file mode 100644 index 000000000..32cf14bfb --- /dev/null +++ b/chips/uarts/src/primecell.rs @@ -0,0 +1,76 @@ +//! ARM PrimeCell compatible uart +//! This is mostly incomplete as it was intended to test QEMU, which never +//! blocks. + +use crate::uart::{UartRegConstruct, UartRegistersIF}; +use kernel::hil::time::Frequency; +use kernel::hil::uart::Parameters; +use kernel::utilities::registers::ReadWrite; +use kernel::utilities::StaticRef; +use kernel::{hil, ErrorCode}; +use tock_registers::interfaces::Writeable; +use tock_registers::register_structs; + +const FIFO_DEPTH: usize = 8; + +register_structs! { + pub UartRegisters { + ( 0x00 => pub(crate) dr: ReadWrite), + ( 0x04 => pub(crate) rsr_ecr: ReadWrite), + ( 0x08 => pub(crate) lcr_h : ReadWrite), + ( 0x0C => pub(crate) lcr_m : ReadWrite), + ( 0x10 => pub(crate) lcr_l : ReadWrite), + ( 0x14 => pub(crate) cr : ReadWrite), + ( 0x18 => pub(crate) fr : ReadWrite), + ( 0x1c => pub(crate) iir_icr : ReadWrite), + ( 0x20 => pub(crate) lpr : ReadWrite), + ( 0x24 => @END), + } + +} + +pub struct UartRegs { + pub registers: StaticRef, + pub(crate) phantom_f: core::marker::PhantomData, +} + +impl UartRegistersIF for UartRegs { + fn get_transmit_space(&self) -> usize { + FIFO_DEPTH + } + + fn transmit_byte(&self, val: u8) { + self.registers.dr.set(val.into()) + } + + fn enable_interrupts(&self) {} + + fn disable_interrupts(&self) {} + + fn interrupt_expected(&self) -> bool { + true + } +} + +impl hil::uart::Configure for UartRegs { + fn configure(&self, _params: Parameters) -> Result<(), ErrorCode> { + // Note: this was just for a qemu test, so I have not bothered. + Ok(()) + } +} + +impl const UartRegConstruct for UartRegs { + type MemoryMapped = UartRegisters; + + fn construct(base: StaticRef) -> Self { + UartRegs { + registers: base, + phantom_f: core::marker::PhantomData, + } + } +} + +pub type Uart<'a, F> = crate::uart::Uart<'a, UartRegs>; +pub type UartZero = crate::uart_zero::ZeroUart, Client>; +pub type ZeroUartComponent = + crate::uart_zero::ZeroUartComponent, ClientFactory>; diff --git a/chips/uarts/src/uart.rs b/chips/uarts/src/uart.rs new file mode 100644 index 000000000..109be626f --- /dev/null +++ b/chips/uarts/src/uart.rs @@ -0,0 +1,168 @@ +//! Generic uart implementation + +use core::cell::Cell; +use kernel::hil::uart::Configure; +use kernel::utilities::cells::{OptionalCell, TakeCell}; +use kernel::utilities::StaticRef; +use kernel::{hil, ErrorCode}; + +/// A low level trait that uarts in this crate can provide +pub trait UartRegistersIF: Configure { + /// Either how many words can be written, or 0 if unknown + fn get_transmit_space(&self) -> usize; + /// Transmit a byte + fn transmit_byte(&self, val: u8); + /// Enable all needed interrupts + fn enable_interrupts(&self); + /// Disable all interrupts + fn disable_interrupts(&self); + /// Was the interrupt an expected one + fn interrupt_expected(&self) -> bool; +} + +#[const_trait] +pub trait UartRegConstruct { + type MemoryMapped; + fn construct(base: StaticRef) -> Self; +} + +pub struct Uart<'a, R: UartRegistersIF + UartRegConstruct> { + regs: R, + tx_client: OptionalCell<&'a dyn hil::uart::TransmitClient>, + rx_client: OptionalCell<&'a dyn hil::uart::ReceiveClient>, + buffer: TakeCell<'static, [u8]>, + len: Cell, + index: Cell, +} + +impl<'a, R: UartRegistersIF + UartRegConstruct> Uart<'a, R> { + pub fn new(base: StaticRef) -> Self { + Uart { + regs: R::construct(base), + tx_client: OptionalCell::empty(), + rx_client: OptionalCell::empty(), + buffer: TakeCell::empty(), + len: Cell::new(0), + index: Cell::new(0), + } + } + + fn transmit(&self, tx_data: &[u8], start: usize, end: usize, allow_block: bool) -> usize { + // Always 0. We might be calling this shortly after a previous transmission. + let mut space = 0; + + // Fill the TX buffer until it reports full. + for i in start..end { + while space == 0 { + space = self.regs.get_transmit_space(); + if space == 0 && !allow_block { + return i; + } + } + + // Write the byte from the array to the tx register. + self.regs.transmit_byte(tx_data[i].into()); + space -= 1; + } + end + } + + fn transmit_stored(&self, allow_block: bool, allow_callback: bool) { + // Try transmit anything left + if self.len.get() != self.index.get() { + self.buffer.map(|tx_data| { + self.index.set(self.transmit( + tx_data, + self.index.get(), + self.len.get(), + allow_block, + )) + }); + } + // And if we are done, call back + if self.len.get() == self.index.get() { + // Signal client write done only if this came from an interrupt + // Could use a deferred call here (would allow us to disable interrupts) + if allow_callback { + self.tx_client.map(|client| { + self.buffer.take().map(|buffer| { + client.transmitted_buffer(buffer, self.len.get(), Ok(())); + }); + }); + } + } + } + + pub fn handle_interrupt(&self) { + if self.regs.interrupt_expected() { + // Now handle the interrupt + self.transmit_stored(false, true); + } + } + + pub fn transmit_sync(&self, bytes: &[u8]) { + // Fill the TX buffer and spin if it is full + self.transmit(bytes, 0, bytes.len(), true); + } +} + +impl hil::uart::Configure for Uart<'_, R> { + fn configure(&self, params: hil::uart::Parameters) -> Result<(), ErrorCode> { + self.regs.configure(params) + } +} + +impl<'a, R: UartRegistersIF + UartRegConstruct> hil::uart::Transmit<'a> for Uart<'a, R> { + fn set_transmit_client(&self, client: &'a dyn hil::uart::TransmitClient) { + self.tx_client.set(client); + } + + fn transmit_buffer( + &self, + tx_data: &'static mut [u8], + tx_len: usize, + ) -> Result<(), (ErrorCode, &'static mut [u8])> { + if tx_len == 0 { + return Err((ErrorCode::SIZE, tx_data)); + } + + // Save the buffer so we can keep sending it. + self.buffer.replace(tx_data); + self.len.set(tx_len); + self.index.set(0); + + self.transmit_stored(false, false); + + Ok(()) + } + + fn transmit_abort(&self) -> Result<(), ErrorCode> { + Err(ErrorCode::FAIL) + } + + fn transmit_word(&self, _word: u32) -> Result<(), ErrorCode> { + Err(ErrorCode::FAIL) + } +} + +impl<'a, R: UartRegistersIF + UartRegConstruct> hil::uart::Receive<'a> for Uart<'a, R> { + fn set_receive_client(&self, client: &'a dyn hil::uart::ReceiveClient) { + self.rx_client.set(client); + } + + fn receive_buffer( + &self, + rx_buffer: &'static mut [u8], + _rx_len: usize, + ) -> Result<(), (ErrorCode, &'static mut [u8])> { + Err((ErrorCode::FAIL, rx_buffer)) + } + + fn receive_abort(&self) -> Result<(), ErrorCode> { + Err(ErrorCode::FAIL) + } + + fn receive_word(&self) -> Result<(), ErrorCode> { + Err(ErrorCode::FAIL) + } +} diff --git a/chips/uarts/src/uart_zero.rs b/chips/uarts/src/uart_zero.rs new file mode 100644 index 000000000..145313c19 --- /dev/null +++ b/chips/uarts/src/uart_zero.rs @@ -0,0 +1,151 @@ +//! General zero-copy UART driver. + +use core::marker::PhantomData; +use kernel::collections::resettable_iterator::{IntoResettableIterator, ResettableIterator}; +use kernel::collections::safe_buf::{GetByte, IntoResettableByteReadIterator}; +use kernel::component::StaticComponentFinalize; +use kernel::hil; +use kernel::hil::uart::{ZeroTransmit, ZeroTransmitClient}; +use kernel::utilities::cells::OptionalCell; +use kernel::utilities::StaticRef; +use kernel::{simple_static_component, ErrorCode}; + +pub struct ZeroUart +where + Client::Buf: IntoResettableByteReadIterator, +{ + client: Client, + regs: R, + // The currently transmitting data + in_progress: OptionalCell<::ResetIterType>, +} + +impl ZeroUart +where + Client::Buf: IntoResettableByteReadIterator, +{ + pub const fn new(client: Client, base: StaticRef) -> Self { + Self { + client, + regs: R::construct(base), + in_progress: OptionalCell::empty(), + } + } + + // Returns whether the operation completed + fn transmit>( + &self, + iter: &mut Iter, + allow_block: bool, + ) -> bool { + // We might be calling this shortly after a previous transmission. + let mut space = 0; + + loop { + while space == 0 { + space = self.regs.get_transmit_space(); + if space == 0 && !allow_block { + return false; + } + } + for b in &mut *iter { + // Write the byte from the array to the tx register. + let b = b.get_byte(); + self.regs.transmit_byte(b.into()); + space -= 1; + if space == 0 { + break; + } + } + if space != 0 { + break; + } + } + + return true; + } + + pub fn handle_interrupt(&self) { + if self.regs.interrupt_expected() { + // If there is a transmit in progress, continue it + if let Some(mut iter) = self.in_progress.take() { + if self.transmit(&mut iter.iter(), false) { + // If finished, pass back to callback + Client::transmit_finish(self, iter.reset(), Ok(())); + } else { + // Otherwise save progress + self.in_progress.set(iter) + } + } + } + } + + pub fn transmit_sync(&self, bytes: &[u8]) { + // Fill the TX buffer and spin if it is full + self.transmit(&mut bytes.into_iter(), true); + } +} + +impl hil::uart::Configure + for ZeroUart +where + Client::Buf: IntoResettableByteReadIterator, +{ + fn configure(&self, params: hil::uart::Parameters) -> Result<(), ErrorCode> { + self.regs.configure(params) + } +} + +impl ZeroTransmit + for ZeroUart +where + Client::Buf: IntoResettableByteReadIterator, +{ + fn transmit(&self, buf: Client::Buf) -> Result, (Client::Buf, ErrorCode)> { + if self.in_progress.is_some() { + return Err((buf, ErrorCode::BUSY)); + } + + let mut iter = buf.into_resettable_iterator(); + + if self.transmit(&mut iter.iter(), false) { + Ok(Some(iter.reset())) + } else { + self.in_progress.set(iter); + Ok(None) + } + } + + fn get_client(&self) -> &Client { + &self.client + } +} + +pub struct ZeroUartComponent< + R: UartRegistersIF + ~const UartRegConstruct, + ClientFactory: StaticComponentFinalize, +> where + ClientFactory::Output: ZeroTransmitClient, + ::Buf: IntoResettableByteReadIterator, +{ + _phantom: PhantomData<(R, ClientFactory)>, +} + +use crate::uart::{UartRegConstruct, UartRegistersIF}; +use kernel::hil::uart::Configure; + +simple_static_component!(impl<{R : UartRegistersIF + ~const UartRegConstruct, ClientFactory : StaticComponentFinalize}> for + ZeroUartComponent:: where + {ClientFactory::Output : ZeroTransmitClient, + ::Buf : IntoResettableByteReadIterator}, + Contain = (client : ClientFactory), + Output = ZeroUart, + NewInput = StaticRef, + FinInput = hil::uart::Parameters, + |slf, input| { + ZeroUart::new(client, input) + }, + |slf, input | { + let _ = slf.configure(input); + } +); diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index ff40705f0..be0bf4e17 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" tock-registers = { path = "../libraries/tock-register-interface" } tock-cells = { path = "../libraries/tock-cells" } tock-tbf = { path = "../libraries/tock-tbf" } +misc = { path = "../libraries/misc" } # In general, Tock discourages the use of cargo features. However for certain # kernel crate configuration, we have not found reasonable alternatives to @@ -29,3 +30,5 @@ tock-tbf = { path = "../libraries/tock-tbf" } trace_syscalls = [] debug_load_processes = [] no_debug_panics = [] +counted_grant_refs = [] +use_static_init = [] diff --git a/kernel/src/capabilities.rs b/kernel/src/capabilities.rs index b14edb9d6..baeb3dff0 100644 --- a/kernel/src/capabilities.rs +++ b/kernel/src/capabilities.rs @@ -90,3 +90,14 @@ pub unsafe trait CreatePortTableCapability {} /// of the networking stack. A capsule would never hold this capability although /// it may hold capabilities created via this capability. pub unsafe trait NetworkCapabilityCreationCapability {} + +/// The `HoldAllowReferencesCapability` allows the bearer to hold reference counted references +/// to buffers allowed to the kernel by users. +/// Until these references are dropped, the process cannot allow another buffer in the same slot, +/// or be restarted if it crashes. +pub unsafe trait HoldAllowReferencesCapability {} +/// The 'HoldGrantReferencesCapability' allows the bearer to hold reference counted references +/// to the grant region. +/// Until these references are dropped, the grant region cannot be entered normally, and the +/// process cannot be restarted if it crashes. +pub unsafe trait HoldGrantReferencesCapability {} diff --git a/kernel/src/cheri.rs b/kernel/src/cheri.rs new file mode 100644 index 000000000..5b5b343cf --- /dev/null +++ b/kernel/src/cheri.rs @@ -0,0 +1,619 @@ +//! CHERI helpers for capabilities and inline asm for mostly CHERI-unaware rustc. +//! This still requires a rustc compiled with a CHERI-aware llvm. + +#[cfg(target_feature = "xcheri")] +use crate::debug; +#[cfg(target_feature = "xcheri")] +use core::arch::asm; +#[cfg(target_feature = "xcheri")] +use core::fmt::Debug; +#[cfg(target_feature = "xcheri")] +use core::fmt::{Formatter, LowerHex, UpperHex}; +use core::mem; +#[cfg(target_feature = "xcheri")] +use core::ops::AddAssign; + +#[cfg(target_feature = "xcheri")] +pub const CPTR_ALIGN: usize = 2 * mem::size_of::(); +#[cfg(not(target_feature = "xcheri"))] +pub const CPTR_ALIGN: usize = mem::size_of::(); + +#[cfg(target_pointer_width = "64")] +#[repr(align(16))] +#[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +struct CptrAlign(); + +#[cfg(not(target_pointer_width = "64"))] +#[repr(align(8))] +#[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +struct CptrAlign(); + +/// On CHERI this is meant to be a the same as C/C++'s __capability void* +/// On non-CHERI this is just a usize. +/// Just use *mut () if you want a non-capability in hybrid mode +// TODO: Remove me when there is compiler support +#[cfg(target_feature = "xcheri")] +#[repr(C)] +#[allow(non_camel_case_types)] +#[derive(Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct cptr { + // FIXME: There is nothing stopping the compiler from using two usize moves which are not + // tag preserving, apart from using a capability move being more efficient. + // CHERI memcpy does understand this rule. + as_ints: [usize; 2], + align: CptrAlign, +} + +#[cfg(target_feature = "xcheri")] +impl Debug for cptr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + self.fmt_cap(f) + } +} + +#[cfg(not(target_feature = "xcheri"))] +#[allow(non_camel_case_types)] +pub type cptr = usize; + +#[cfg(target_feature = "xcheri")] +macro_rules! inplace_cheri_asm { + ($self : expr, $code : literal, $($body : tt)*) => { + unsafe { + asm!( "lc ct0, 0({sptr})", + $code, + "sc ct0, 0({sptr})", + sptr = in(reg) $self, + $($body)*, + out("t0") _, + options(preserves_flags, nostack) + ); + } + } +} + +pub mod cheri_perms { + pub const GLOBAL: usize = 1 << 0; + pub const EXECUTE: usize = 1 << 1; + pub const LOAD: usize = 1 << 2; + pub const STORE: usize = 1 << 3; + pub const LOAD_CAP: usize = 1 << 4; + pub const STORE_CAP: usize = 1 << 5; + pub const STORE_CAP_LOCAL: usize = 1 << 6; + pub const SEAL: usize = 1 << 7; + pub const CINVOKE: usize = 1 << 8; + pub const UNSEAL: usize = 1 << 9; + pub const ACCESS_SYS: usize = 1 << 10; + pub const SET_CID: usize = 1 << 11; + + pub const DEFAULT_RWX: usize = + EXECUTE | LOAD | STORE | LOAD_CAP | STORE_CAP | GLOBAL | STORE_CAP_LOCAL; + pub const DEFAULT_RW: usize = LOAD | STORE | LOAD_CAP | STORE_CAP | GLOBAL | STORE_CAP_LOCAL; + pub const DEFAULT_RX: usize = EXECUTE | LOAD | LOAD_CAP | GLOBAL | STORE_CAP_LOCAL; + pub const DEFAULT_R: usize = LOAD | LOAD_CAP | GLOBAL | STORE_CAP_LOCAL; +} + +#[cfg(target_feature = "xcheri")] +macro_rules! cheri_get_asm { + ($self : expr, $op : literal) => { + unsafe { + let res : usize; + asm!( "lc ct0, 0({sptr})", + concat!($op, " {res}, ct0"), + sptr = in(reg) $self, + res = out(reg) res, + out("t0") _, + options(preserves_flags, pure, readonly, nostack), + ); + res + } + } +} + +pub trait CPtrOps { + fn as_ptr(&self) -> *const (); + + fn is_valid_for_operation(&self, _length: usize, _perms: usize) -> bool { + true + } + + fn as_ptr_checked(&self, length: usize, perms: usize) -> *const () { + if self.is_valid_for_operation(length, perms) { + self.as_ptr() + } else { + core::ptr::null() + } + } + + fn set_addr_from_ddc(&mut self, _addr: usize); + fn set_addr_from_pcc(&mut self, _addr: usize); + + fn set_addr_from_ddc_restricted(&mut self, addr: usize, base: usize, len: usize) { + self.set_addr_from_ddc(base); + // Justification for why this is not exact: + // cheri_mpu.rs ensures that the true range of DDC (rounded_app_brk) will not cross the + // the kernel break. + self.set_bounds(len); + self.set_addr(addr); + self.and_perms(cheri_perms::DEFAULT_RW); + } + + fn set_addr_from_pcc_restricted(&mut self, addr: usize, base: usize, len: usize) { + self.set_addr_from_pcc(base); + // This is not exact for the same reason. + self.set_bounds(len); + self.set_addr(addr); + self.and_perms(cheri_perms::DEFAULT_RX); + } + + fn set_addr(&mut self, _addr: usize); + fn as_mut_usize(&mut self) -> &mut usize; + // cptr can be treated like an Option> + fn map_or(&self, default: U, f: F) -> U + where + F: FnOnce(&Self) -> U, + { + if self.as_ptr() as usize == 0usize { + default + } else { + f(self) + } + } + fn set_bounds(&mut self, _length: usize) {} + fn set_bounds_exact(&mut self, _length: usize) {} + fn and_perms(&mut self, _perms: usize) {} + fn seal_entry(&mut self) {} + fn set_flags(&mut self, _flags: usize) {} +} + +pub const TYPE_BITS_START: usize = 27; +pub const TYPE_BITS_LEN: usize = 18; + +#[cfg(target_feature = "xcheri")] +impl cptr { + pub fn fmt_cap(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!( + f, + "{:#018X} (b:{:#018X},t:{:#018X},v:{},p:{:2X}", + self.as_ptr() as usize, + self.get_base(), + self.get_top_unclamped(), + self.get_tag(), + self.get_perms(), + ) + } + + pub fn get_offset(&self) -> usize { + cheri_get_asm!(self, "cgetoffset") + } + pub fn get_len(&self) -> usize { + cheri_get_asm!(self, "cgetlen") + } + pub fn get_perms(&self) -> usize { + cheri_get_asm!(self, "cgetperm") + } + pub fn get_tag(&self) -> usize { + cheri_get_asm!(self, "cgettag") + } + pub fn get_type(&self) -> usize { + cheri_get_asm!(self, "cgettype") + } + pub fn get_base(&self) -> usize { + cheri_get_asm!(self, "cgetbase") + } + + /// Slighter more efficient than doing get_base() and get_tag() separately + #[inline] + pub fn get_base_and_tag(&self) -> (usize, bool) { + unsafe { + let base: usize; + let tag: usize; + asm!( + "lc ct0, 0({sptr})", + "cgetbase {base}, ct0", + "cgettag {tag}, ct0", + sptr = in(reg) self, + base = out(reg) base, + tag = out(reg) tag, + out("t0") _, + options(preserves_flags, pure, readonly, nostack), + ); + (base, tag != 0) + } + } + + #[inline] + pub fn invalidate_shared(shared: &core::cell::Cell) { + unsafe { + asm!( + "lw t0, 0({sptr})", + "sw t0, 0({sptr})", + sptr = in(reg) (shared as *const core::cell::Cell), + out("t0") _, + options(preserves_flags, nostack), + ) + } + } + + pub fn get_top_unclamped(&self) -> usize { + self.get_base() + self.get_len() + } + pub fn get_high(&self) -> usize { + self.as_ints[1] + } +} + +#[cfg(target_feature = "xcheri")] +impl CPtrOps for cptr { + fn as_ptr(&self) -> *const () { + usize::from(self) as *const () + } + + fn is_valid_for_operation(&self, length: usize, perms: usize) -> bool { + let coffset: usize = self.get_offset(); + let clen: usize = self.get_len(); + let cperms: usize = self.get_perms(); + let tag: usize = self.get_tag(); + let ctype: usize = self.get_type(); + + // Must be tagged + let mut checks_pass = tag != 0; + + // Must be unsealed + checks_pass &= ctype == !0usize; + + // Have all specified permissions + checks_pass &= (cperms & perms) == perms; + + // Now check length. We want to check that [coffset, coffset + length] fits (non-strictly) + // within [0, clen] + + // First, check offset is in [0, clen], i.e., the capability is in its bounds. + // NOTE: If the offset is negative, this will still be false as we do an unsigned comparison + // NOTE: cgetlen is saturating, not truncating. + checks_pass &= coffset <= clen; + + // Second, Check offset + length (the end of the offset the user is asking us to access) + // <= + // clen. (the largest end the capability would allow) + // NOTE: the user controls length and so can overflow the calculation offset + length + // However, as we have already checked offset is in the range [0, clen] we can use + // this arrangement: + checks_pass &= length <= clen - coffset; + + if !checks_pass && length != usize::MAX && length != 1 { + debug!( + "Capability {:?} not valid for operation of length {}. perms: {:x}.", + self, length, perms + ); + } + + checks_pass + } + + fn set_addr_from_ddc(&mut self, _addr: usize) { + unsafe { + asm!( "cspecialr ct0, ddc", + "csetaddr ct0, ct0, {val}", + "sc ct0, 0({sptr})", + sptr = in(reg) self, + val = in(reg) _addr, + out("t0") _ + ); + } + } + + fn set_addr_from_pcc(&mut self, _addr: usize) { + unsafe { + asm!( "cspecialr ct0, pcc", + "csetaddr ct0, ct0, {val}", + "sc ct0, 0({sptr})", + sptr = in(reg) self, + val = in(reg) _addr, + out("t0") _ + ); + } + } + + fn set_addr(&mut self, _addr: usize) { + inplace_cheri_asm!(self, "csetaddr ct0, ct0, {val}", val = in(reg) _addr) + } + + fn as_mut_usize(&mut self) -> &mut usize { + return &mut self.as_ints[0]; + } + + fn set_bounds(&mut self, length: usize) { + inplace_cheri_asm!(self, "csetbounds ct0, ct0, {val}", val = in(reg) length) + } + + fn set_bounds_exact(&mut self, length: usize) { + inplace_cheri_asm!(self, "csetboundsexact ct0, ct0, {val}", val = in(reg) length) + } + + fn and_perms(&mut self, perms: usize) { + inplace_cheri_asm!(self, "candperm ct0, ct0, {val}", val = in(reg) perms) + } + + fn set_flags(&mut self, flags: usize) { + inplace_cheri_asm!(self, "csetflags ct0, ct0, {val}", val = in(reg) flags) + } +} + +#[cfg(target_feature = "xcheri")] +impl Clone for cptr { + fn clone(&self) -> Self { + let mut x: cptr = cptr { + as_ints: [0, 0], + align: CptrAlign(), + }; + x.clone_from(self); + x + } + + // The compiler is still getting moves wrong for capabilities + // This version gets it right, I might make the type not copy, + // and then use this everywhere + + fn clone_from(&mut self, source: &Self) { + unsafe { + asm!( "lc ct0, 0({src})", + "sc ct0, 0({dst})", + src = in(reg) source, + dst = in(reg) self, + out("t0") _ + ); + } + } +} + +#[cfg(not(target_feature = "xcheri"))] +impl CPtrOps for usize { + fn as_ptr(&self) -> *const () { + *self as *const () + } + + fn set_addr_from_ddc(&mut self, _addr: usize) { + *self = _addr; + } + fn set_addr_from_pcc(&mut self, _addr: usize) { + *self = _addr; + } + + fn set_addr(&mut self, _addr: usize) { + *self = _addr; + } + + fn as_mut_usize(&mut self) -> &mut usize { + self + } +} + +#[cfg(target_feature = "xcheri")] +impl AddAssign for cptr { + fn add_assign(&mut self, rhs: usize) { + inplace_cheri_asm!(self, "cincoffset ct0, ct0, {val}", val = in(reg) rhs) + } +} + +// For printing the address as hex +#[cfg(target_feature = "xcheri")] +impl UpperHex for cptr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + self.fmt_cap(f) + } +} + +#[cfg(target_feature = "xcheri")] +impl LowerHex for cptr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + self.fmt_cap(f) + } +} + +// A provenance free cast +#[cfg(target_feature = "xcheri")] +impl From for cptr { + fn from(val: usize) -> Self { + let mut res: cptr = cptr::default(); + res.as_ints[0] = val; + res + } +} + +// Cast back to usize +#[cfg(target_feature = "xcheri")] +impl From for usize { + fn from(ptr: cptr) -> Self { + ptr.as_ints[0] + } +} +#[cfg(target_feature = "xcheri")] +impl From<&cptr> for usize { + fn from(ptr: &cptr) -> Self { + ptr.as_ints[0] + } +} + +// Trace on / off on QEMU +pub fn trace_on() { + #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] + unsafe { + core::arch::asm!("slti zero, zero, 0x1b") + } +} + +pub fn trace_off() { + #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] + unsafe { + core::arch::asm!("slti zero, zero, 0x1e") + } +} + +// Macros to help asm. Might just move these to easm + +/// Gives the name of the pointer-width register +#[cfg(target_feature = "xcheri")] +#[macro_export] +macro_rules! ptrreg { + (zero) => {"cnull"}; + ($($targ: expr)?) => {concat!("c", $($targ)?)} +} + +#[cfg(not(target_feature = "xcheri"))] +#[macro_export] +macro_rules! ptrreg { + (zero) => {"zero"}; + ($($targ: expr)?) => {concat!("", $($targ)?)} +} + +/// Gives the name of the pointer-width register (by number) +#[cfg(target_feature = "xcheri")] +#[macro_export] +macro_rules! ptrregn { + ($($targ: expr)?) => {concat!("c", $($targ)?)} +} + +#[cfg(not(target_feature = "xcheri"))] +#[macro_export] +macro_rules! ptrregn { + ($($targ: expr)?) => {concat!("x", $($targ)?)} +} + +// Loads an XLEN size register +#[cfg(target_arch = "riscv32")] +#[macro_export] +macro_rules! ldx { + () => { + "lw " + }; +} +#[cfg(target_arch = "riscv64")] +#[macro_export] +macro_rules! ldx { + () => { + "ld " + }; +} + +/// Stores a XLEN size register +#[cfg(target_arch = "riscv32")] +#[macro_export] +macro_rules! stx { + () => { + "sw " + }; +} +/// Stores a XLEN size register +#[cfg(target_arch = "riscv64")] +#[macro_export] +macro_rules! stx { + () => { + "sd " + }; +} + +/// Loads a pointer-sized register +#[cfg(target_feature = "xcheri")] +#[macro_export] +macro_rules! ldptr { + () => { + "lc " + }; +} +/// Loads a pointer-sized register +#[cfg(not(target_feature = "xcheri"))] +#[macro_export] +macro_rules! ldptr { + () => { + ldx!() + }; +} + +/// Stores a pointer-sized register +#[cfg(target_feature = "xcheri")] +#[macro_export] +macro_rules! stptr { + () => { + "sc " + }; +} + +/// Stores a pointer-sized register +#[cfg(not(target_feature = "xcheri"))] +#[macro_export] +macro_rules! stptr { + () => { + stx!() + }; +} + +/// Does csr or cspecial depending on platform +#[cfg(target_feature = "xcheri")] +#[macro_export] +macro_rules! csr_ptr { + () => { + "cspecial" + }; +} +/// Does csr or cspecial depending on platform +#[cfg(not(target_feature = "xcheri"))] +#[macro_export] +macro_rules! csr_ptr { + () => { + "csr" + }; +} + +#[macro_export] +macro_rules! csr_op { + {$REG : tt <- $SRC : tt} => + {concat!(csr_ptr!(), "w", " ", $REG, ptrreg!(), ", ", ptrreg!($SRC))}; + {$REG : tt -> $DST : tt} => + {concat!(csr_ptr!(), "r", " ", ptrreg!($DST), ", ", $REG, ptrreg!())}; + {$DST : tt <- $REG : tt <- $SRC : tt} => + {concat!(csr_ptr!(), "rw", " ", ptrreg!($DST), ", ", $REG, ptrreg!(), ", ", ptrreg!($SRC))}; +} + +/// Is there already an assembly level symbol for this? +/// Expands to a string constant 0 or 1 +#[cfg(target_feature = "xcheri")] +#[macro_export] +macro_rules! is_cheri { + () => { + "1" + }; +} + +#[cfg(not(target_feature = "xcheri"))] +#[macro_export] +macro_rules! is_cheri { + () => { + "0" + }; +} + +/// CRAM: Returns a mask (all ones in the top end) that can be used to round bounds and lengths +/// such that they can be represented. Both length, bottom, and top must be rounded using the +/// mask. +/// Increasing length by rounding it up is guaranteed not to change the alignment requirement. +/// Increasing length by any more may change the alignment requirement. +pub fn cram(_length: usize) -> usize { + let result: usize; + + // SAFETY: cram is always safe + #[cfg(target_feature = "xcheri")] + unsafe { + asm!( + "cram {result}, {input}", + input = in(reg) _length, + result = out(reg) result, + options(pure, nomem, preserves_flags, nostack), + ) + } + #[cfg(not(target_feature = "xcheri"))] + { + result = !0; + } + + result +} diff --git a/kernel/src/collections/list.rs b/kernel/src/collections/list.rs index 3e46142c0..929bcfa4f 100644 --- a/kernel/src/collections/list.rs +++ b/kernel/src/collections/list.rs @@ -1,34 +1,118 @@ -//! Linked list implementation. +//! Singly-Linked list implementation. +//! ListLink uses a standard read-only reference to some T as link +//! ListLinkGen uses any reference type as long is it implements Deref. +//! -use core::cell::Cell; +use crate::grant::PRef; +use core::cell::{Cell, Ref}; +use core::ops::Deref; +use misc::take_borrow::TakeBorrow; -pub struct ListLink<'a, T: 'a + ?Sized>(Cell>); +/// Ref does not implement Clone, so we need another trait that is really just Clone but can also +/// be implemented for Ref. Would be nice to automatically implement it for all clone types, but +/// min_specialisation does not seem to be enough? +pub trait RefClone { + fn ref_clone(&self) -> Self; +} -impl<'a, T: ?Sized> ListLink<'a, T> { - pub const fn empty() -> ListLink<'a, T> { - ListLink(Cell::new(None)) +impl RefClone for Option { + #[inline] + fn ref_clone(&self) -> Self { + match &self { + None => None, + Some(v) => Some(v.ref_clone()), + } } } -pub trait ListNode<'a, T: ?Sized> { - fn next(&'a self) -> &'a ListLink<'a, T>; +// Like Deref but can panic. Panic is currently on possible for PRef. +pub trait PanicDeref { + type Target: ?Sized; + + fn panic_deref(&self) -> &Self::Target; } +// Anything that supports deref supports PanicDeref +impl PanicDeref for T { + type Target = ::Target; -pub struct List<'a, T: 'a + ?Sized + ListNode<'a, T>> { - head: ListLink<'a, T>, + fn panic_deref(&self) -> &Self::Target { + self.deref() + } } -pub struct ListIterator<'a, T: 'a + ?Sized + ListNode<'a, T>> { - cur: Option<&'a T>, +pub struct ListLinkGen + RefClone>(Cell>); + +impl + RefClone> ListLinkGen { + #[inline] + pub fn clone_link(&self) -> Option { + self.0.take_borrow().ref_clone() + } + + #[inline] + pub fn points_to(&self, next: &T) -> bool { + match self.0.take_borrow().as_ref() { + None => false, + Some(r) => core::ptr::eq(next as *const T, r.panic_deref() as *const T), + } + } + + #[inline] + pub fn is_some(&self) -> bool { + self.0.take_borrow().is_some() + } + + #[inline] + pub const fn empty() -> Self { + ListLinkGen(Cell::new(None)) + } + + #[inline] + pub const fn new(link: LinkT) -> Self { + ListLinkGen(Cell::new(Some(link))) + } } -impl<'a, T: ?Sized + ListNode<'a, T>> Iterator for ListIterator<'a, T> { - type Item = &'a T; +impl + RefClone> Default for ListLinkGen { + fn default() -> Self { + Self::empty() + } +} + +pub trait GenListNode + RefClone> { + fn next(&self) -> &ListLinkGen; +} - fn next(&mut self) -> Option<&'a T> { - match self.cur { +#[derive(Default)] +pub struct GenList, LinkT: PanicDeref + RefClone> { + head: ListLinkGen, +} + +#[derive(Default)] +pub struct GenListFastTail< + T: ?Sized + GenListNode, + LinkT: PanicDeref + RefClone, +> { + head: GenList, + tail: ListLinkGen, +} + +pub struct GenListIterator< + T: ?Sized + GenListNode, + LinkT: PanicDeref + RefClone, +> { + cur: Option, +} + +impl, LinkT: PanicDeref + RefClone> Iterator + for GenListIterator +{ + type Item = LinkT; + + #[inline(always)] + fn next(&mut self) -> Option { + match self.cur.take() { Some(res) => { - self.cur = res.next().0.get(); + self.cur = res.panic_deref().next().clone_link(); Some(res) } None => None, @@ -36,42 +120,522 @@ impl<'a, T: ?Sized + ListNode<'a, T>> Iterator for ListIterator<'a, T> { } } -impl<'a, T: ?Sized + ListNode<'a, T>> List<'a, T> { - pub const fn new() -> List<'a, T> { - List { - head: ListLink(Cell::new(None)), +impl, LinkT: PanicDeref + RefClone> + GenList +{ + /// Construct new empty list + #[inline] + pub const fn new() -> Self { + Self { + head: ListLinkGen::empty(), } } - pub fn head(&self) -> Option<&'a T> { - self.head.0.get() + #[inline] + pub const fn new_with_head(head: ListLinkGen) -> Self { + Self { head } + } + + /// prefer to head().is_some() as clone may be expensive + #[inline] + pub fn is_some(&self) -> bool { + self.head.0.take_borrow().is_some() + } + + /// Get a (copy) of a link to the head element + #[inline] + pub fn head(&self) -> Option { + self.head.clone_link() } - pub fn push_head(&self, node: &'a T) { - node.next().0.set(self.head.0.get()); + #[inline] + pub fn push_head(&self, node: LinkT) { + node.panic_deref().next().0.set(self.head.0.take()); self.head.0.set(Some(node)); } - pub fn push_tail(&self, node: &'a T) { - node.next().0.set(None); - match self.iter().last() { - Some(last) => last.next().0.set(Some(node)), + /// Note this may be expensive as it walks the list. + /// Use the other List type to avoid cost for long lists. + #[inline] + pub fn push_tail(&self, node: LinkT) { + node.panic_deref().next().0.set(None); + match &self.iter().last() { + Some(last) => last.panic_deref().next().0.set(Some(node)), None => self.push_head(node), } } - pub fn pop_head(&self) -> Option<&'a T> { - let remove = self.head.0.get(); - match remove { - Some(node) => self.head.0.set(node.next().0.get()), - None => self.head.0.set(None), + /// Insert node after node_prev. This is not offered as a method on a link as the list tracking + /// structure may need to update itself. + #[inline] + pub fn insert_after(&self, node_prev: &T, node: LinkT) { + // node's next is dropped + let prev_link = &node_prev.next().0; + let last = prev_link.take(); + // node points to what node_prev did + node.panic_deref().next().0.set(last); + // node_prev now points to node + prev_link.set(Some(node)); + } + + #[inline] + pub fn remove_after(&self, node_prev: &ListLinkGen, node: &T) { + node_prev.0.set(node.next().0.take()) + } + + /// Apply a function with a reference to the link that points to a node + #[inline] + pub fn find_prev(&self, node: &T) -> Option { + for prev in self.iter() { + if prev.panic_deref().next().points_to(node) { + return Some(prev); + } + } + None + } + + /// Try to remove from the list. Returns a link to the previous element if there was one. + #[inline] + pub fn try_remove(&self, node: &T) -> Option { + if self.head.points_to(node) { + self.remove_after(&self.head, node); + None + } else { + match self.find_prev(node) { + None => None, + Some(prev) => { + self.remove_after(prev.panic_deref().next(), node); + Some(prev) + } + } + } + } + + #[inline] + pub fn pop_head(&self) -> Option { + let remove = self.head.0.take(); + if let Some(remove) = &remove { + self.head.0.set(remove.panic_deref().next().0.take()); } remove } - pub fn iter(&self) -> ListIterator<'a, T> { - ListIterator { - cur: self.head.0.get(), + #[inline] + pub fn peek_head(&self) -> Option { + self.head.clone_link() + } + + #[inline] + pub fn iter(&self) -> GenListIterator { + GenListIterator { + cur: self.head.clone_link(), + } + } + + /// Filter items out of list. Items are removed if f returns true. + /// Returns a link to the new last item. + #[inline] + pub fn filter bool>(&self, f: F) -> Option { + // This is a link to the node we are finding a successor for. + // The first link is not in a normal node so this is none + let mut last_link = None; + // This is the actual next ptr we will set. + let mut last_link_next_ptr = &self.head; + + // A link to the current item we are filtering + let mut cur_item = self.head.0.take(); + + while let Some(some_cur_item) = cur_item { + // Get the next current item + let cur_item_deref = some_cur_item.panic_deref(); + cur_item = cur_item_deref.next().0.take(); + // Check if we need to filter the current one + if !f(cur_item_deref) { + // clone link and place it + last_link_next_ptr.0.set(Some(some_cur_item.ref_clone())); + // Set the next link to set + last_link = Some(some_cur_item); + last_link_next_ptr = last_link.as_ref().unwrap().panic_deref().next(); + } } + + last_link + } +} + +impl, LinkT: PanicDeref + RefClone> + GenListFastTail +{ + /// Construct new empty list + #[inline] + pub const fn new() -> Self { + Self { + head: GenList::new(), + tail: ListLinkGen::empty(), + } + } + + #[inline] + pub const fn new_with_hd_tl(head: ListLinkGen, tail: ListLinkGen) -> Self { + Self { + head: GenList::new_with_head(head), + tail, + } + } + + /// prefer to head().is_some() as clone may be expensive + #[inline] + pub fn is_some(&self) -> bool { + self.head.is_some() + } + + /// Get a (copy) of a link to the head element + #[inline] + pub fn head(&self) -> Option { + self.head.head() + } + + #[inline] + pub fn push_head(&self, node: LinkT) { + if !self.is_some() { + self.tail.0.set(Some(node.ref_clone())) + } + self.head.push_head(node) + } + + /// This is O(1) + #[inline] + pub fn push_tail(&self, node: LinkT) { + match self.tail.0.take() { + None => self.push_head(node), + Some(old_tail) => { + self.tail.0.set(Some(node.ref_clone())); + self.head.insert_after(old_tail.panic_deref(), node); + } + } + } + + /// Insert node after node_prev. This is not offered as a method on a link as the list tracking + /// structure may need to update itself. + #[inline] + pub fn insert_after(&self, node_prev: &T, node: LinkT) { + if self.tail.points_to(node_prev) { + self.tail.0.set(Some(node.ref_clone())) + } + self.head.insert_after(node_prev, node) + } + + #[inline] + pub fn peek_head(&self) -> Option { + self.head.peek_head() + } + + /// Try to remove from the list + #[inline] + pub fn try_remove(&self, node: &T) { + let prev = self.head.try_remove(node); + if self.tail.points_to(node) { + self.tail.0.set(prev); + } + } + + #[inline] + pub fn pop_head(&self) -> Option { + let result = self.head.pop_head(); + if !self.head.is_some() { + self.tail.0.set(None); + } + result + } + + #[inline] + pub fn iter(&self) -> GenListIterator { + self.head.iter() + } + + /// Filter items out of list. Items are removed if f returns true. + #[inline] + pub fn filter bool>(&self, f: F) { + self.tail.0.set(self.head.filter(f)) + } +} + +// The standard list types that use a reference + +pub type ListLink<'a, T> = ListLinkGen; +pub type List<'a, T> = GenList; +pub type ListIterator<'a, T> = GenListIterator; +pub type Queue<'a, T> = GenListFastTail; + +impl<'a, T: ?Sized> RefClone for &'a T { + #[inline] + fn ref_clone(&self) -> Self { + self.clone() + } +} + +// You can implement the GenListNode rather than this +pub trait ListNode<'a, T: ?Sized> { + fn next(&self) -> &ListLink<'a, T>; +} +// But in order to provide backwards compat, automatic implementation of the generic interface: +impl<'a, T: 'a + ?Sized + ListNode<'a, T>> GenListNode for T { + #[inline(always)] + fn next(&self) -> &ListLinkGen { + >::next(self) + } +} + +// Using Ref (recall 'b is the lifetime of the borrow of the RefCell) + +impl<'a, T: ?Sized> RefClone for Ref<'a, T> { + #[inline] + fn ref_clone(&self) -> Self { + Ref::clone(self) + } +} + +pub type RefListLink<'b, T> = ListLinkGen>; +pub type RefList<'b, T> = GenList>; +pub type RefListIterator<'b, T> = GenListIterator>; +pub type RefQueue<'b, T> = GenListFastTail>; + +// Using Pref + +impl RefClone for PRef { + #[inline] + fn ref_clone(&self) -> Self { + self.clone() + } +} + +pub type PRefListLink = ListLinkGen>; +pub type PRefList = GenList>; +pub type PRefListIterator = GenListIterator>; +pub type PRefQueue = GenListFastTail>; + +#[cfg(test)] +mod tests { + use crate::collections::list::{ + GenList, GenListFastTail, GenListNode, ListLinkGen, PanicDeref, RefClone, + }; + use core::cell::{Cell, Ref, RefCell}; + use misc::leak_thread_local; + + trait GetVal { + fn get_val(&self) -> u8; + } + + // Macro to declare a few different node types + macro_rules! declare_list { + ($node_ty : ident, $link_ty : ty, $list_ty: ident) => { + // A node with a u8 and any type of link so we can test different ones + struct $node_ty<'a> { + next: ListLinkGen<$node_ty<'a>, $link_ty>, + val: Cell, + } + + impl<'a> GenListNode<$node_ty<'a>, $link_ty> for $node_ty<'a> { + fn next(&self) -> &ListLinkGen<$node_ty<'a>, $link_ty> { + &self.next + } + } + + impl<'a> $node_ty<'a> { + const fn new(val: u8) -> Self { + Self { + next: ListLinkGen::<$node_ty<'a>, $link_ty>(Cell::new(None)), + val: Cell::new(val), + } + } + } + + impl<'a> GetVal for $node_ty<'a> { + fn get_val(&self) -> u8 { + self.val.get() + } + } + }; + } + + // Standard references + declare_list!(NormalNode, &'a NormalNode<'a>, ANormalList); + // RefCell based + declare_list!(RefNode, Ref<'a, RefNode<'a>>, ARefList); + + thread_local! { + static NODE1: RefCell> = RefCell::new(RefNode::new(1)); + static NODE2: RefCell> = RefCell::new(RefNode::new(2)); + static NODE3: RefCell> = RefCell::new(RefNode::new(3)); + } + + fn get_node1() -> Ref<'static, RefNode<'static>> { + unsafe { leak_thread_local!(NODE1) }.borrow() + } + fn get_node2() -> Ref<'static, RefNode<'static>> { + unsafe { leak_thread_local!(NODE2) }.borrow() + } + fn get_node3() -> Ref<'static, RefNode<'static>> { + unsafe { leak_thread_local!(NODE3) }.borrow() + } + + #[test] + fn test_is_some() { + // Test for any list + fn do_test, T: GenListNode + GetVal>( + a_node: LinkT, + ) { + let new_list = GenList::::new(); + assert!(!new_list.is_some()); + new_list.push_head(a_node); + assert_eq!(new_list.head().unwrap().panic_deref().get_val(), 1); + assert!(new_list.is_some()); + + let a_node = new_list.pop_head().unwrap(); + + let new_queue = GenListFastTail::::new(); + assert!(!new_queue.is_some()); + new_queue.push_head(a_node); + assert_eq!(new_queue.head().unwrap().panic_deref().get_val(), 1); + assert!(new_queue.is_some()); + } + + // Normal lists + let node = NormalNode::new(1); + do_test(&node); + // Ref lists + do_test(get_node1()); + } + + #[test] + fn test_push_pop() { + fn do_test, T: GenListNode + GetVal>( + node1: LinkT, + node2: LinkT, + node3: LinkT, + ) { + let a_list = GenListFastTail::::new(); + a_list.push_head(node1); + a_list.push_head(node2); + a_list.push_head(node3); + + let node3 = a_list.pop_head().unwrap(); + let node2 = a_list.pop_head().unwrap(); + let node1 = a_list.pop_head().unwrap(); + + assert_eq!(node3.panic_deref().get_val(), 3); + assert_eq!(node2.panic_deref().get_val(), 2); + assert_eq!(node1.panic_deref().get_val(), 1); + } + + let node1 = NormalNode::new(1); + let node2 = NormalNode::new(2); + let node3 = NormalNode::new(3); + + do_test(&node1, &node2, &node3); + do_test(get_node1(), get_node2(), get_node3()); + } + + #[test] + fn test_push_tail() { + fn do_test, T: GenListNode + GetVal>( + node1: LinkT, + node2: LinkT, + node3: LinkT, + ) { + let node1_clone = node1.ref_clone(); + + let a_list = GenListFastTail::::new(); + // [] + a_list.push_head(node1); + // [1] + a_list.push_tail(node2); + // [1,2] + a_list.push_head(node3); + // [3,1,2] + a_list.try_remove(node1_clone.panic_deref()); + // [3,2] + + let node3 = a_list.pop_head().unwrap(); + let node2 = a_list.pop_head().unwrap(); + + assert_eq!(node3.panic_deref().get_val(), 3); + assert_eq!(node2.panic_deref().get_val(), 2); + } + + let node1 = NormalNode::new(1); + let node2 = NormalNode::new(2); + let node3 = NormalNode::new(3); + + do_test(&node1, &node2, &node3); + do_test(get_node1(), get_node2(), get_node3()); + } + + #[test] + fn test_filter() { + fn do_test, T: GenListNode + GetVal>( + node1: LinkT, + node2: LinkT, + node3: LinkT, + ) { + let a_list = GenListFastTail::::new(); + // [] + a_list.push_head(node1); + // [1] + a_list.push_tail(node2); + // [1,2] + a_list.push_head(node3); + // [3,1,2] + + a_list.filter(|item| item.get_val() % 2 == 1); + // [2] + + assert_eq!(a_list.pop_head().unwrap().panic_deref().get_val(), 2); + assert!(!a_list.is_some()); + } + + let node1 = NormalNode::new(1); + let node2 = NormalNode::new(2); + let node3 = NormalNode::new(3); + + do_test(&node1, &node2, &node3); + do_test(get_node1(), get_node2(), get_node3()); + } + + #[test] + fn test_iter() { + fn do_test, T: GenListNode + GetVal>( + node1: LinkT, + node2: LinkT, + node3: LinkT, + ) { + let a_list = GenListFastTail::::new(); + // [] + a_list.push_head(node1); + // [1] + a_list.push_tail(node2); + // [1,2] + a_list.push_head(node3); + // [3,1,2] + + let expect = [3, 1, 2]; + + // Iterator should work more than once + for _ in 0..1 { + let mut ndx = 0; + for node in a_list.iter() { + assert_eq!(node.panic_deref().get_val(), expect[ndx]); + ndx += 1; + } + assert_eq!(ndx, 3); + } + } + + let node1 = NormalNode::new(1); + let node2 = NormalNode::new(2); + let node3 = NormalNode::new(3); + + do_test(&node1, &node2, &node3); + do_test(get_node1(), get_node2(), get_node3()); } } diff --git a/kernel/src/collections/mod.rs b/kernel/src/collections/mod.rs index 8c2ae4d5a..5f1fe03a6 100644 --- a/kernel/src/collections/mod.rs +++ b/kernel/src/collections/mod.rs @@ -2,4 +2,6 @@ pub mod list; pub mod queue; +pub mod resettable_iterator; pub mod ring_buffer; +pub mod safe_buf; diff --git a/kernel/src/collections/resettable_iterator.rs b/kernel/src/collections/resettable_iterator.rs new file mode 100644 index 000000000..a93cb9bfe --- /dev/null +++ b/kernel/src/collections/resettable_iterator.rs @@ -0,0 +1,789 @@ +//! Provides traits (and implementations for some collections) for two new iterator-like interfaces +//! Resettable iterators support a reset method to re-obtain the collection from the iterator. +//! This is useful for when a borrow would not suffice because there is no sensible scope in which +//! the borrow could occur. +//! It also adds chasing iterators, which are two iterators through the same collection where one +//! chases the other. The chasing iterator returns none when it has returned every item the first +//! iterator has. If more items are taken from the first iterator, the chasing iterator will have +//! new items. + +use crate::collections::resettable_iterator::private::IntoRawHelper; +use crate::grant::{PRefBase, Track}; +use core::cell::{Ref, RefMut}; +use core::marker::PhantomData; + +mod private { + use core::cell::{Ref, RefMut}; + use core::ptr::NonNull; + + /// Has a raw representation + pub trait IntoRawHelper<'a, T: ?Sized> { + type Output; + type RefT; + /// SAFETY: Convert self into a raw form, and also return a reference. It is up to the caller to ensure + /// that when from_raw_mut is called, nothing derived from the reference exists. + /// It is also up to the caller to ensure that the &'a T does not outlive the Output. + unsafe fn into_raw(self) -> (Self::Output, Self::RefT); + /// SAFETY: See into_raw_mut + unsafe fn from_raw(raw: Self::Output) -> Self; + } + + impl<'a, T: ?Sized> IntoRawHelper<'a, T> for &'a mut T { + type Output = NonNull; + type RefT = &'a mut T; + unsafe fn into_raw(self) -> (Self::Output, &'a mut T) { + (self.into(), self) + } + unsafe fn from_raw(mut raw: Self::Output) -> Self { + raw.as_mut() + } + } + + impl<'a, T: ?Sized> IntoRawHelper<'a, T> for RefMut<'a, T> { + type Output = Self; + type RefT = &'a mut T; + + unsafe fn into_raw(self) -> (Self::Output, Self::RefT) { + let mut ptr: Option> = None; + // Take the reference out the smart pointer. + // Soundness: this is a little dodgy. Internally, Ref and RefMut use raw pointers, + // so it is sound to make an actual rust reference, as long as we don't make another, + // which we won't. However, it is worth noting that if the internals of Ref change, + // then this suddenly becomes unsound + let unchanged = RefMut::map(self, |as_ref| { + ptr = Some(as_ref.into()); + as_ref + }); + let mut ptr = ptr.unwrap_unchecked(); + (unchanged, ptr.as_mut()) + } + + unsafe fn from_raw(raw: Self::Output) -> Self { + raw + } + } + + impl<'a, T: ?Sized> IntoRawHelper<'a, T> for Ref<'a, T> { + type Output = Self; + type RefT = &'a T; + unsafe fn into_raw(self) -> (Self::Output, Self::RefT) { + let mut ptr: Option> = None; + // Take the reference out the smart pointer. + // Soundness: see into_raw_mut + let unchanged = Ref::map(self, |as_ref| { + ptr = Some(as_ref.into()); + as_ref + }); + let ptr = ptr.unwrap_unchecked(); + (unchanged, ptr.as_ref()) + } + + unsafe fn from_raw(raw: Self::Output) -> Self { + raw + } + } +} + +/// Boilerplate to declare a useful alias for a resettable iterator where the item obeys some bounds +/// Usage: +/// declare_chasing_resettable_x_iterator!( +/// NewIterator <--- A new trait that is a ResettableIterator plus some bounds on its items +/// SomeUniqueName <--- any type name you have not used before +/// NewIteratorInto <--- A new trait that is a IntoResettableIterator plus some bounds on its items +/// Bounds <--- the bounds (on items) implied by using the new NewIterator and NewIteratorInto traits +/// ); +/// e.g.: +/// declare_chasing_resettable_x_iterator!( +/// FooIterator, +/// BlahBlah, +/// IntoFooIterator, +/// Foo); +/// +/// will create two new traits: FooIterator and IntoFooIterator. These will have supertraits of +/// ResettableIterator and IntoResettableIterator (respectively), with the included bounds on items +/// of Foo. +#[macro_export] +macro_rules! declare_resettable_x_iterator { + ($name : ident, $nameType : ident, $nameInto : ident, $($bounds : tt)*) => { + pub trait $nameType<'a, ImplicitBound = &'a Self> : $crate::collections::resettable_iterator::ResettableIteratorTypes<'a, ImplicitBound, ItemType = Self::ItemTypeBounded> { + type ItemTypeBounded : $($bounds)*; + } + impl<'a, T : $crate::collections::resettable_iterator::ResettableIteratorTypes<'a>> $nameType<'a> for T where >::ItemType : $($bounds)* { + type ItemTypeBounded = >::ItemType; + } + pub trait $name : $crate::collections::resettable_iterator::ResettableIterator + for<'a> $nameType<'a> {} + impl< T : $crate::collections::resettable_iterator::ResettableIterator + for<'a> $nameType<'a>> $name for T {} + pub trait $nameInto : $crate::collections::resettable_iterator::IntoResettableIterator { + type ResetIterTypeBounded : $name; + } + impl< T : $crate::collections::resettable_iterator::IntoResettableIterator> $nameInto for T where ::ResetIterType : $name { + type ResetIterTypeBounded = ::ResetIterType; + } + }; +} + +/// Boilerplate to declare a useful alias for a chasing resettable iterator where the item obeys some bounds +/// See comment on declare_resettable_x_iterator +#[macro_export] +macro_rules! declare_chasing_resettable_x_iterator { + ($name : ident, $nameType : ident, $nameInto : ident, $($bounds : tt)*) => { + pub trait $nameType<'a, ImplicitBound = &'a Self> : $crate::collections::resettable_iterator::ResettableIteratorTypes<'a, ImplicitBound, ItemType = Self::ItemTypeBounded> { + type ItemTypeBounded : $($bounds)*; + } + impl<'a, T : $crate::collections::resettable_iterator::ResettableIteratorTypes<'a>> $nameType<'a> for T where >::ItemType : $($bounds)* { + type ItemTypeBounded = >::ItemType; + } + pub trait $name : $crate::collections::resettable_iterator::ResettableIterator + $crate::collections::resettable_iterator::ChasingResettableIterator + for<'a> $nameType<'a> {} + impl< T : $crate::collections::resettable_iterator::ResettableIterator + $crate::collections::resettable_iterator::ChasingResettableIterator + for<'a> $nameType<'a>> $name for T {} + pub trait $nameInto : $crate::collections::resettable_iterator::IntoChasingResettableIterator { + type ChasingResetIterTypeBounded : $name; + } + impl< T : $crate::collections::resettable_iterator::IntoChasingResettableIterator> $nameInto for T where ::ChasingResetIterType : $name { + type ChasingResetIterTypeBounded = ::ChasingResetIterType; + } + }; +} + +/// Can be converted into a resettable iterator +pub trait IntoResettableIterator { + /// The type of the Resettable iterator + type ResetIterType: ResettableIterator; + fn into_resettable_iterator(self) -> Self::ResetIterType; +} + +/// Can be converted into a resettable iterator that has two streams. +/// The chasing iterator returns items only after they have been returned by the first. +pub trait IntoChasingResettableIterator { + /// The type of the Resettable iterator + type ChasingResetIterType: ChasingResettableIterator; + fn into_chasing_resettable_iterator(self) -> Self::ChasingResetIterType; +} + +/// The types of the iterator and item for a &'a mut ResettableIterator. +/// The implicit bound obviates the need for a Self : 'a, which causes problems with GATS, which +/// cannot in general have associated lifetimes. +pub trait ResettableIteratorTypes<'a, ImplicitBound = &'a Self> { + type ItemType; + type IterType: Iterator; +} + +/// An iterator that can be consumed to return the collection that formed it. +/// More specifically, any borrow of this is an iterator, even if possibly a ResettableIterator +/// is also an iterator. +/// The extra layer of indirection is offered in case the items need a shorter lifetime ('a). +pub trait ResettableIterator: for<'a> ResettableIteratorTypes<'a> { + type CollectionType; + fn reset(self) -> Self::CollectionType; + fn iter(&mut self) -> >::IterType; +} + +pub trait ChasingResettableIteratorTypes<'a, ImplicitBound = &'a Self>: + ResettableIteratorTypes<'a, ImplicitBound> +{ + type ChasingIterType: Iterator; +} + +pub trait ChasingResettableIterator: + ResettableIterator + for<'a> ChasingResettableIteratorTypes<'a> +{ + /// Get an iterator that chases the first iterator. + /// Will only returns items that have previously been returned (and then gone out of lifetime) + /// from the lead iterator. + fn chasing_iter(&mut self) -> >::ChasingIterType; + /// Peek the next item from the chasing iterator + fn chasing_peek(&mut self) -> Option<>::ItemType>; + /// Peek the next item from the lead iterator + fn lead_peek(&mut self) -> Option<>::ItemType>; +} + +pub struct ResettableOnce { + used: bool, + value: T, +} + +impl ResettableOnce { + pub fn new(value: T) -> Self { + Self { used: false, value } + } +} + +impl<'a, T> Iterator for &'a mut ResettableOnce { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + if self.used { + None + } else { + self.used = true; + // Safety: this transmutes the lifetime of the reference of self.value from that of the + // &mut self to that of 'a. Even though self can borrowed several times for different + // lifetimes shorter than 'a, because this iterator only returns the item once, it is + // always fine to do for this longer lifetime. + let item: &mut T = &mut self.value; + unsafe { Some(core::mem::transmute(item)) } + } + } +} + +impl<'a, T> ResettableIteratorTypes<'a> for ResettableOnce { + type ItemType = <&'a mut Self as Iterator>::Item; + type IterType = &'a mut Self; +} + +impl ResettableIterator for ResettableOnce { + type CollectionType = T; + + fn reset(self) -> Self::CollectionType { + self.value + } + + fn iter(&mut self) -> >::IterType { + self + } +} + +enum ChasingResettableOnceState { + /// Neither iterator has returned the item + None, + /// The first iterator has returned the item + First, + /// The second iterator has returned the item + Second, +} + +pub struct ChasingResettableOnce { + state: ChasingResettableOnceState, + value: T, +} + +impl ChasingResettableOnce { + pub fn new(value: T) -> Self { + Self { + state: ChasingResettableOnceState::None, + value, + } + } +} + +pub struct ChasingResettableOnceChaser<'a, T> { + r: &'a mut ChasingResettableOnce, +} + +impl<'a, T> ChasingResettableOnceChaser<'a, T> { + fn next_helper(&mut self, advance: bool) -> Option<&'a mut T> { + match self.r.state { + ChasingResettableOnceState::First => { + if advance { + self.r.state = ChasingResettableOnceState::Second; + } + let item: &mut T = &mut self.r.value; + // Safety : similar to getting it the first time. Constructing this chaser requires + // the other iter to have been dropped, and so the item from it is also out of + // scope. + unsafe { Some(core::mem::transmute(item)) } + } + _ => None, + } + } +} + +impl<'a, T> Iterator for ChasingResettableOnceChaser<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + self.next_helper(true) + } +} + +impl<'a, T> Iterator for &'a mut ChasingResettableOnce { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + match self.state { + ChasingResettableOnceState::None => { + self.state = ChasingResettableOnceState::First; + // Safety: this transmutes the lifetime of the reference of self.value from that of the + // &mut self to that of 'a. Even though self can borrowed several times for different + // lifetimes shorter than 'a, because this iterator only returns the item once, it is + // always fine to do for this longer lifetime. + // It will be returned by the other iterator, but that cannot exist at the same time + // as this time because constructing it requires borrowing the ChasingResettableOnce + // mutably. + let item: &mut T = &mut self.value; + unsafe { Some(core::mem::transmute(item)) } + } + _ => None, + } + } +} + +impl<'a, T> ResettableIteratorTypes<'a> for ChasingResettableOnce { + type ItemType = <&'a mut Self as Iterator>::Item; + type IterType = &'a mut Self; +} + +impl<'a, T> ChasingResettableIteratorTypes<'a> for ChasingResettableOnce { + type ChasingIterType = ChasingResettableOnceChaser<'a, T>; +} + +impl ResettableIterator for ChasingResettableOnce { + type CollectionType = T; + + fn reset(self) -> Self::CollectionType { + self.value + } + + fn iter(&mut self) -> >::IterType { + self + } +} + +impl ChasingResettableIterator for ChasingResettableOnce { + fn chasing_iter<'a>( + &'a mut self, + ) -> >::ChasingIterType { + ChasingResettableOnceChaser::<'a, T> { r: self } + } + + fn chasing_peek<'a>(&'a mut self) -> Option<>::ItemType> { + ChasingResettableOnceChaser::<'a, T> { r: self }.next_helper(false) + } + + fn lead_peek(&mut self) -> Option<>::ItemType> { + match self.state { + ChasingResettableOnceState::None => Some(&mut self.value), + _ => None, + } + } +} + +/// Simple case where the original reference and an iterator can be be stored as a pair, +/// to be used when the RefT can be copied. +pub struct IterReset { + orig_ref: RefT, + the_iter: IterT, +} + +// Sadly I have to copy this implementation for lots of T because negative impls/bounds +// specialisation do not work yet and a different implementation is needed for non-copy types. +macro_rules! define_into_resettable_iterator { + ($T : ty, $($tok:tt)*) => { + // Where we can cheaply copy a IntoIterator T we can just stash that copy + impl<$($tok)*> IntoResettableIterator for $T + where $T : Copy + IntoIterator + { + type ResetIterType = IterReset<$T, <$T as IntoIterator>::IntoIter>; + + fn into_resettable_iterator(self) -> Self::ResetIterType { + Self::ResetIterType {orig_ref: self, the_iter: self.into_iter()} + } + } + } +} + +define_into_resettable_iterator!(&'a T, 'a, T: ? Sized + 'a); + +/// IterReset is an iterator itself, so will also automatically support &mut Self : IntoIterator +impl Iterator for IterReset { + type Item = IterT::Item; + + fn next(&mut self) -> Option { + self.the_iter.next() + } +} + +impl<'a, RefT, IterT: Iterator> ResettableIteratorTypes<'a> for IterReset +where + RefT: IntoResettableIterator, +{ + type ItemType = <&'a mut Self as Iterator>::Item; + type IterType = &'a mut Self; +} + +/// Unwrap copy discarding the iterator +impl ResettableIterator for IterReset +where + RefT: IntoResettableIterator, +{ + type CollectionType = RefT; + fn reset(self) -> Self::CollectionType { + self.orig_ref + } + + fn iter(&mut self) -> >::IterType { + self + } +} + +pub struct IterChaseReset +where + IterT::Item: Clone, +{ + orig_ref: RefT, + the_iter: IterT, + the_chaser: IterT, + chase_peeked: Option, + lead_peeked: Option, + count: usize, +} + +pub struct IterChaseResetRef<'a, RefT, IterT: Iterator> +where + IterT::Item: Clone, +{ + the_ref: &'a mut IterChaseReset, +} + +impl IntoChasingResettableIterator for T +where + T: Clone + IntoIterator, + T::Item: Clone, +{ + type ChasingResetIterType = IterChaseReset::IntoIter>; + + fn into_chasing_resettable_iterator(self) -> Self::ChasingResetIterType { + Self::ChasingResetIterType { + orig_ref: self.clone(), + the_iter: self.clone().into_iter(), + the_chaser: self.into_iter(), + chase_peeked: None, + lead_peeked: None, + count: 0, + } + } +} + +impl Iterator for IterChaseReset +where + IterT::Item: Clone, +{ + type Item = IterT::Item; + + fn next(&mut self) -> Option { + if self.lead_peeked.is_some() { + return self.lead_peeked.take(); + } + self.count += 1; + self.the_iter.next() + } +} + +impl<'a, RefT, IterT: Iterator> Iterator for IterChaseResetRef<'a, RefT, IterT> +where + IterT::Item: Clone, +{ + type Item = IterT::Item; + + fn next(&mut self) -> Option { + if self.the_ref.chase_peeked.is_some() { + return self.the_ref.chase_peeked.take(); + } + if self.the_ref.count == 0 { + None + } else { + self.the_ref.count -= 1; + self.the_ref.the_chaser.next() + } + } +} + +impl<'a, RefT, IterT: Iterator> ResettableIteratorTypes<'a> for IterChaseReset +where + RefT: IntoChasingResettableIterator, + IterT::Item: Clone, +{ + type ItemType = <&'a mut Self as Iterator>::Item; + type IterType = &'a mut Self; +} +impl<'a, RefT, IterT: Iterator> ChasingResettableIteratorTypes<'a> for IterChaseReset +where + RefT: IntoChasingResettableIterator, + IterT::Item: Clone, +{ + type ChasingIterType = IterChaseResetRef<'a, RefT, IterT>; +} +impl ResettableIterator for IterChaseReset +where + RefT: IntoChasingResettableIterator, + IterT::Item: Clone, +{ + type CollectionType = RefT; + fn reset(self) -> Self::CollectionType { + self.orig_ref + } + + fn iter(&mut self) -> >::IterType { + self + } +} +impl ChasingResettableIterator for IterChaseReset +where + RefT: IntoChasingResettableIterator, + IterT::Item: Clone, +{ + fn chasing_iter<'a>( + &'a mut self, + ) -> >::ChasingIterType { + IterChaseResetRef::<'a, RefT, IterT> { the_ref: self } + } + + fn chasing_peek(&mut self) -> Option<>::ItemType> { + if !self.chase_peeked.is_some() && self.count != 0 { + self.count -= 1; + self.chase_peeked = self.the_chaser.next(); + } + self.chase_peeked.clone() + } + + fn lead_peek(&mut self) -> Option<>::ItemType> { + if !self.lead_peeked.is_some() { + self.count += 1; + self.lead_peeked = self.the_iter.next(); + } + self.lead_peeked.clone() + } +} + +/// A more complicated wrapper for mutable iterators +pub struct MutIterReset { + /// The iterator. It may return items with overly permissive lifetimes ('i). + the_iter: IterT, + /// A sound type to store the original collection (likely as a raw pointer) + /// Even if we don't use it we are not allowed to have any aliased mutable references. + orig: RefTWrapped, + /// In case orig loses any important lifetime information + _phantom: Phantom, +} + +macro_rules! declare_resettable_iter_mut { + ($a: lifetime, $i : lifetime, $c : lifetime, $T : ident, $X : ty, $IT : ty, $SIT : ty) => { + /// Implement for any mutable reference where that mutable reference can be an iterator + impl<$i, $c: $i, $T : ?Sized, ItemT: $i, IterT : Iterator> IntoResettableIterator for $X + where >::RefT : IntoIterator { + type ResetIterType = MutIterReset<>::Output, PhantomData<($X, &$c $T, &$i ItemT)>, IterT>; + + fn into_resettable_iterator(self) -> Self::ResetIterType { + // Safety: the_iter will be dropped first if this type is dropped + // references can only leak via next, which shortens the lifetime of references. + let (orig, reff) = unsafe { self.into_raw() }; + Self::ResetIterType {orig, _phantom : PhantomData, the_iter : reff.into_iter()} + } + } + /// Implement iterator for mutable reference, must shorten the lifetime of items to that of + /// the iterator reference. + impl<$a, $c, $i, $T: ?Sized, ItemT: $i, IterT : Iterator, OrigT> Iterator for + &$a mut MutIterReset, IterT> { + type Item = $SIT; + + fn next(&mut self) -> Option { + // This is a reference with lifetime 'i. + self.the_iter.next() + // Which will shorten to 'a, the lifetime of the borrow of MutIterReset + // this ensure that if we consume MutIterReset, no references will exist. + } + } + }; +} + +declare_resettable_iter_mut!('a, 'i, 'c, T, &'c mut T, &'i mut ItemT, &'a mut ItemT); +declare_resettable_iter_mut!('a, 'i, 'c, T, RefMut<'c, T>, &'i mut ItemT, &'a mut ItemT); +declare_resettable_iter_mut!('a, 'i, 'c, T, Ref<'c, T>, &'i ItemT, &'a ItemT); + +impl<'a, 'c, 'i, T: ?Sized, ItemT: 'i, IterT, X: IntoRawHelper<'c, T>> ResettableIteratorTypes<'a> + for MutIterReset, IterT> +where + &'a mut Self: Iterator, +{ + type ItemType = <&'a mut Self as Iterator>::Item; + type IterType = &'a mut Self; +} + +impl<'c, 'i, T: ?Sized, ItemT: 'i, IterT: Iterator, X: IntoRawHelper<'c, T>> ResettableIterator + for MutIterReset, IterT> +where + for<'a> &'a mut Self: Iterator, +{ + type CollectionType = X; + + fn reset(self) -> Self::CollectionType { + // Just in case dropping the iterator references itself + drop(self.the_iter); + // SAFETY: any items provided via next are bounded by a lifetime from a borrow of self + // because this method consumes self, those will all have gone out of scope by now + // Orig was also cast from a valid reference, and the phantom data ensures that the + // lifetime is still valid. + unsafe { X::from_raw(self.orig) } + } + + fn iter(&mut self) -> >::IterType { + self + } +} + +/// Resettable Iterator for PRef type. Starts returning none if memory is unmapped +/// Clone is whether the original PRef could be cloned or not and will impact the returned PRef +/// when reset() is called. +pub struct PRefResetIter { + /// Original pref. + pref: PRefBase, + /// Iterator, not guarded by the pref + iter: IterT, +} + +/// A reference to an iterator over something from a PRef that has been checked for liveness +pub struct CheckedPrefResetIter<'a, IterT: Iterator> { + checked_ref: Option<&'a mut IterT>, +} + +impl IntoResettableIterator + for PRefBase +where + &'static T: IntoIterator, + PRefResetIter::IntoIter, Trk, CLONE>: + ResettableIterator>, +{ + type ResetIterType = PRefResetIter::IntoIter, Trk, CLONE>; + + fn into_resettable_iterator(self) -> Self::ResetIterType { + // Safety: PRefResetIter will check the pref before returning any items, and will bound + // their lifetime to a more appropriate lifetime than static during which the process + // will not be unmapped. + let raw_ptr = unsafe { self.get_ptr().as_ref() }; + Self::ResetIterType { + pref: self, + iter: raw_ptr.into_iter(), + } + } +} + +impl<'a, 'i: 'a, ItemT: 'i + 'a, IterT: Iterator> Iterator + for CheckedPrefResetIter<'a, IterT> +{ + type Item = &'a ItemT; + + fn next(&mut self) -> Option { + match &mut self.checked_ref { + // None if reference was no longer valid + None => None, + // Otherwise the reference is valid for the duration of the borrow 'a, so as long as we + // rebound the lifetime to 'a it is safe to return. + Some(iter_ref) => iter_ref.next(), + } + } +} + +impl< + 'a, + 'i: 'a, + ItemT: 'i + 'a, + IterT: Iterator, + T: ?Sized, + Trk: Track, + const CLONE: bool, + > ResettableIteratorTypes<'a> for PRefResetIter +{ + type ItemType = as Iterator>::Item; + type IterType = CheckedPrefResetIter<'a, IterT>; +} + +impl< + 'i, + ItemT: 'i, + IterT: Iterator, + T: ?Sized, + Trk: Track, + const CLONE: bool, + > ResettableIterator for PRefResetIter +where + for<'a> PRefResetIter: + ResettableIteratorTypes<'a, IterType = CheckedPrefResetIter<'a, IterT>>, +{ + type CollectionType = PRefBase; + + fn reset(self) -> Self::CollectionType { + self.pref + } + + fn iter(&mut self) -> >::IterType { + CheckedPrefResetIter { + checked_ref: if self.pref.is_still_alive() { + Some(&mut self.iter) + } else { + None + }, + } + } +} + +#[cfg(test)] +mod tests { + use crate::collections::resettable_iterator::{ + ChasingResettableIterator, IntoChasingResettableIterator, IntoResettableIterator, + ResettableIterator, + }; + + #[test] + fn mut_example() { + let mut some_collection = [1, 2, 3, 4]; + let ref_to_collection = &mut some_collection; + + let mut reset_iter = ref_to_collection.into_resettable_iterator(); + + let mut i = 1; + let mut foo: Option<&mut i32> = None; + for item in reset_iter.iter() { + assert_eq!(i, *item); + i += 1; + foo = Some(item); + } + + let _ = *foo.unwrap(); + let _ref_to_collection2 = reset_iter.reset(); + //*foo.unwrap(); // <-- illegal, foo has lifetime that cannot go across reset + } + + #[test] + fn non_mut_example() { + let some_collection = [1, 2, 3, 4]; + let ref_to_collection = &some_collection; + + let mut reset_iter = ref_to_collection.into_resettable_iterator(); + + let mut i = 1; + let mut foo: Option<&i32> = None; + for item in reset_iter.iter() { + assert_eq!(i, *item); + i += 1; + foo = Some(item); + } + + let _ = *foo.unwrap(); + let _ref_to_collection2 = reset_iter.reset(); + + let _ = *foo.unwrap(); // <-- illegal + } + + #[test] + fn chasing_test() { + let some_collection = [1, 2, 3, 4]; + let ref_to_collection = &some_collection; + + let mut chasing = ref_to_collection.into_chasing_resettable_iterator(); + + // iter() gives a pass over each item + assert_eq!(*chasing.iter().next().unwrap(), 1); + assert_eq!(*chasing.iter().next().unwrap(), 2); + // chasing_iter() only gives the items already returned by iter(); + assert_eq!(*chasing.chasing_iter().next().unwrap(), 1); + assert_eq!(*chasing.chasing_iter().next().unwrap(), 2); + assert!(chasing.chasing_iter().next().is_none()); + // can go back to the first iterator + assert_eq!(*chasing.iter().next().unwrap(), 3); + assert_eq!(*chasing.iter().next().unwrap(), 4); + assert!(chasing.iter().next().is_none()); + // and then more items will appear in the chasing iterator + assert_eq!(*chasing.chasing_iter().next().unwrap(), 3); + assert_eq!(*chasing.chasing_iter().next().unwrap(), 4); + assert!(chasing.chasing_iter().next().is_none()); + } +} diff --git a/kernel/src/collections/ring_buffer.rs b/kernel/src/collections/ring_buffer.rs index d9672668c..baba0cd80 100644 --- a/kernel/src/collections/ring_buffer.rs +++ b/kernel/src/collections/ring_buffer.rs @@ -1,6 +1,8 @@ //! Implementation of a ring buffer. use crate::collections::queue; +use core::cell::Cell; +use core::mem::MaybeUninit; pub struct RingBuffer<'a, T: 'a> { ring: &'a mut [T], @@ -8,8 +10,446 @@ pub struct RingBuffer<'a, T: 'a> { tail: usize, } +/// Types for head/tail. 64K is plenty for character buffers. Factored to parameterize this later +/// if good reason presents itself. +pub type RingBufferInt = u16; + +/// Statically sized ring buffer. As opposed to the other ring buffer above: +/// Power of two N will properly optimise as it is a constant, not dynamic, value +/// Data is stored inline making it easier to allocate (saves a bunch of unsafe in process standard) +/// Uses Cell so no &mut is needed for most operations getting rid of MapCells +/// Uses MaybeUninit so Default / Zeroing is also not required. +/// Has more efficient push_slice for moving slices with memcpy. +/// Uses unsafe =( / has an unsafer retain implementation. +pub struct StaticSizedRingBuffer { + /// Wrapping counter of how many items pushed + tail: Cell, + /// Wrapping counter of how many items popped + head: Cell, + /// Note: we put the cell around the array because projecting `Cell[;N] -> [Cell<>;N]` exists + /// but the opposite does not. + ring: Cell<[MaybeUninit; N]>, +} + +impl Drop for StaticSizedRingBuffer { + fn drop(&mut self) { + // Make sure to drop every item + self.empty(); + } +} + +// I was hoping to share logic here, but the &mut in Queue makes that hard. +// Also, because we don't use &mut, we have to be REALLY careful about when T::drop happens because +// the drop implementation of T might capture the buffer and try to make concurrent access. +impl StaticSizedRingBuffer { + const Z: MaybeUninit = MaybeUninit::zeroed(); + const U: MaybeUninit = MaybeUninit::uninit(); + + /// A version of new that will result in all zeros. + /// Good for global queues that go in BSS. + #[inline] + pub const fn new_zeros() -> Self { + StaticSizedRingBuffer { + ring: Cell::new([Self::Z; N]), + tail: Cell::new(0), + head: Cell::new(0), + } + } + + /// A version of new that will result in the ring being uninitialized + /// Good for dynamically allocating, e.g. in process headers. + #[inline] + pub fn new_uninit() -> Self { + StaticSizedRingBuffer { + ring: Cell::new([Self::U; N]), + tail: Cell::new(0), + head: Cell::new(0), + } + } + + /// How many elements are in the queue + #[inline] + pub fn len(&self) -> usize { + (self.tail.get() as usize).wrapping_sub(self.head.get() as usize) + } + + #[inline] + pub fn is_full(&self) -> bool { + self.len() == N + } + + #[inline] + pub fn has_elements(&self) -> bool { + self.tail.get() != self.head.get() + } + + /// How many space is available to write elements + #[inline] + pub fn available_len(&self) -> usize { + (N as usize).wrapping_sub(self.len()) + } + + /// Try to push to the queue, if full returns back the argument + #[inline] + pub fn enqueue(&self, value: T) -> Result<(), T> { + if self.is_full() { + return Err(value); + } + + let tl = self.tail.get(); + + self.ring.as_array_of_cells()[(tl as usize % N)].set(MaybeUninit::new(value)); + + self.tail.set(tl.wrapping_add(1)); + + Ok(()) + } + + /// Try to push to the queue, if full pops an item from the queue + #[inline] + pub fn push(&self, value: T) -> Result<(), T> { + let full = self.is_full(); + let tl = self.tail.get() as usize; + let next_tl = tl.wrapping_add(1); + self.tail.set(next_tl as RingBufferInt); + let old = self.ring.as_array_of_cells()[(tl as usize) % N].replace(MaybeUninit::new(value)); + if full { + self.head.set(next_tl.wrapping_sub(N) as RingBufferInt); + Err(unsafe { old.assume_init() }) + } else { + Ok(()) + } + } + + #[inline] + pub fn dequeue(&self) -> Result { + if !self.has_elements() { + return Err(()); + } + + let hd = self.head.get(); + + let value = unsafe { + // Safety: We only read items we have previously written with push + self.ring.as_array_of_cells()[hd as usize % N] + .replace(MaybeUninit::uninit()) + .assume_init() + }; + + self.head.set(hd.wrapping_add(1)); + + Ok(value) + } + + /// Dequeue multiple elements + #[inline] + pub fn dequeue_many(&self, n: usize) -> Result<(), ()> { + if n > self.len() { + return Err(()); + } + + if core::mem::needs_drop::() { + // Need to drop individual items + for _i in 0..n { + let _ = self.dequeue(); + } + } else { + // Can just increment + self.head + .set(self.head.get().wrapping_add(n as RingBufferInt)) + } + + Ok(()) + } + + /// Get up to as many as len elements from the queues as slices (without popping them). + /// This needs to be &mut because otherwise we will be holding a reference to data that can + /// be ripped out from underneath us by pop() and made un-init + #[inline] + pub fn as_slices_up_to(&mut self, mut len: usize) -> (&mut [T], &mut [T]) { + if len > self.len() { + // Cap len + len = self.len(); + } + + // As we happen to have &mut, we can also get rid of the Cell<> + let ring = self.ring.get_mut(); + + let hd = self.head.get(); + let tl = hd.wrapping_add(len as RingBufferInt); // will be <= the actual tail + + let split_ndx = (hd as usize) % N; + + let (left, right) = ring.split_at_mut(split_ndx); + + let end_ndx = (tl as usize) % N; + + let (left, right) = if (split_ndx < end_ndx) || len == 0 { + // Simple case where there is no wrap-around + (&mut right[..len], &mut left[..0]) + } else { + // Wrap around + (right, &mut left[..end_ndx]) + }; + + // Safety: in the no wrap round case (hd < tl) we have a zero length left and the items + // between hd and tl. + // In the wrap around case, we have all the items after head, and every item up to the + // wrapped tail. + unsafe { + ( + MaybeUninit::slice_assume_init_mut(left), + MaybeUninit::slice_assume_init_mut(right), + ) + } + } + + #[inline] + pub fn as_slices(&mut self) -> (&mut [T], &mut [T]) { + self.as_slices_up_to(self.len()) + } + + /// Get an element at a particular index. + /// Note: ndx should be a wrapping counter. It is a counter from 0..u16::MAX, + /// not from 0...N. + /// If the counter is not between tail and head an error is returned + #[inline] + pub fn get_ndx(&mut self, ndx: RingBufferInt) -> Result<&mut T, ()> { + let hd = self.head.get(); + + // This is the n'th element in the queue + let nth_elem = (ndx as usize).wrapping_sub(hd as usize); + // This is how elements are in the queue + let len = self.len(); + + // If we are asking for an element too far after hd, error + if nth_elem >= len { + return Err(()); + } + + let elem = &mut self.ring.get_mut()[(ndx as usize) % N]; + + // Safety: Elements between hd and tl are init. They cannot be made unnit with a pop + // because the signature of this method owns self. + unsafe { Ok(elem.assume_init_mut()) } + } + + #[inline] + pub fn get_head(&mut self) -> Result<&mut T, ()> { + self.get_ndx(self.head.get()) + } + + #[inline] + pub fn get_tail(&mut self) -> Result<&mut T, ()> { + self.get_ndx(self.tail.get()) + } + + #[inline] + pub fn empty(&self) { + // Drop everything we need to + + if core::mem::needs_drop::() { + while self.has_elements() { + let _ = self.dequeue(); + // Note: the drop of the result above may cause concurrent modification. + // This is O.K. + } + } else { + self.head.set(0); + self.tail.set(0); + } + } + + /// Retain only items for which the predicate is true. + /// Safety: Neither T::Drop or the closure passed to retain may access 'self' in any way. + #[inline] + pub unsafe fn retain(&self, mut f: F) + where + F: FnMut(&T) -> bool, + { + // Index over the elements before the retain operation. + let mut src = self.head.get() as usize; + // Index over the retained elements. + let mut dst = src; + + let end = self.tail.get() as usize; + + let ring = self.ring.as_array_of_cells(); + + while src != end { + let src_ndx = src % N; + + let item = ring[src_ndx].replace(MaybeUninit::uninit()); + + // Safety: all items between head and tail are init + let item = unsafe { item.assume_init() }; + + if f(&item) { + // This sequence is written as replace, f, set for correctness. However, we + // are hoping for f/drop to run in place. + // The first branch of this if is a NOP if f was run in place, which will makes + // it obvious to compiler that the move is conditional. + if src == dst { + ring[src_ndx].set(MaybeUninit::new(item)) + } else { + ring[dst % N].set(MaybeUninit::new(item)) + } + + dst = dst.wrapping_add(1); + } + + src = src.wrapping_add(1); + } + + self.tail.set(dst as RingBufferInt); + } + + /// Safe version of retain + pub fn retain_mut(&mut self, f: F) + where + F: FnMut(&T) -> bool, + { + // Safety: If self is &mut, there is no way for f to modify self. + unsafe { self.retain(f) } + } +} + +pub struct StaticSizedRingBufferIter<'a, T, const N: usize> { + ring_buffer: &'a mut StaticSizedRingBuffer, + ndx: RingBufferInt, +} + +impl<'a, T, const N: usize> IntoIterator for &'a mut StaticSizedRingBuffer { + type Item = &'a mut T; + type IntoIter = StaticSizedRingBufferIter<'a, T, N>; + + fn into_iter(self) -> Self::IntoIter { + let hd = self.head.get(); + StaticSizedRingBufferIter { + ring_buffer: self, + ndx: hd, + } + } +} + +impl<'a, T, const N: usize> Iterator for StaticSizedRingBufferIter<'a, T, N> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + if self.ndx == self.ring_buffer.tail.get() { + None + } else { + let elem = &mut self.ring_buffer.ring.get_mut()[(self.ndx as usize) % N]; + self.ndx = self.ndx.wrapping_add(1); + unsafe { + // Safety: we started iterating at head, and return none once we hit tail. + // all elements between head and tail are init. + // This iterator owns the collection so it cannot be modified. + // The iterator will never return the item again, so increasing lifetime to 'a + // also safe. + core::mem::transmute(Some(elem.assume_init_mut())) + } + } + } +} + +impl StaticSizedRingBuffer { + #[inline] + pub fn enqueue_slice(&self, values: &[T]) -> Result<(), ()> { + self.push_or_enqueue_slice(values, false) + } + + #[inline] + pub fn push_slice(&self, values: &[T]) -> Result<(), ()> { + self.push_or_enqueue_slice(values, true) + } + + /// Version of enqueue that uses memcpy to more efficiently push a larger range + /// Only ptr::copy seems to correctly generate the memcpy so this uses that internally. + /// If overwrite is true will drop any items overwritten. + /// If false, will return an error if there is no space. + #[inline] + pub fn push_or_enqueue_slice(&self, mut values: &[T], overwrite: bool) -> Result<(), ()> { + let mut len = values.len(); + + // Short circuit this case, makes thinking about the rest of this easier + if len == 0 { + return Ok(()); + } + + // Clamp data to N if data is longer and we are overwriting + if len > N { + if overwrite { + // If we are overwriting, we only take the last N values + values = &values[(len - N)..]; + len = N; + } else { + return Err(()); + } + } + + // Check data would fit in buffer + if self.available_len() < len { + // Copying will overwrite everything from [hd, hd + overlap). + if overwrite { + let overlap = len - self.available_len(); + if core::mem::needs_drop::() { + for _ in 0..overlap { + let _ = self.dequeue(); + // Note: drops happens here which may cause concurrent modification + } + // If the concurrent modification happens in an annoying way, just fail. + if self.available_len() != len { + return Err(()); + } + } else { + // If no dropping needed, just advance head + self.head + .set(self.head.get().wrapping_add(overlap as RingBufferInt)) + } + } else { + return Err(()); + } + } + + let tl = self.tail.get(); + // Might as well advance tl now + self.tail.set(tl.wrapping_add(len as RingBufferInt)); + + // Copy [start_index,stop_index] + let start_index = tl as usize % N; + let stop_index = (tl as usize).wrapping_add(len) % N; + + // Cap first copy at end of array if wrap around case + let len1 = if stop_index < start_index { + N - start_index + } else { + len + }; + + let dst = self.ring.as_array_of_cells()[start_index as usize].as_ptr(); + let mut src = values.as_ptr() as *const MaybeUninit; + unsafe { + core::ptr::copy(src, dst, len1 as usize); + } + + // Wrap around case. Note the use of a comparison between lengths (not the stop_index < ...) + // The first would generate a pointless copy of length-zero when stop_index was 0. + if len1 != len { + // Copy [0, stop_index) + // always to the start of the ring if we wrap around + let dst = self.ring.as_array_of_cells()[0].as_ptr(); + // we already copied len1 elements + src = src.wrapping_add(len1 as usize); + unsafe { core::ptr::copy(src, dst, (stop_index) as usize) } + } + + Ok(()) + } +} + impl<'a, T: Copy> RingBuffer<'a, T> { - pub fn new(ring: &'a mut [T]) -> RingBuffer<'a, T> { + pub const fn new(ring: &'a mut [T]) -> RingBuffer<'a, T> { RingBuffer { head: 0, tail: 0, @@ -19,9 +459,9 @@ impl<'a, T: Copy> RingBuffer<'a, T> { /// Returns the number of elements that can be enqueued until the ring buffer is full. pub fn available_len(&self) -> usize { - // The maximum capacity of the queue is ring.len - 1, because head == tail for the empty - // queue. - self.ring.len().saturating_sub(1 + queue::Queue::len(self)) + // Applying the mod at access, rather than increment, means we can distinguish between a + // full and empty buffer. + self.ring.len().wrapping_sub(queue::Queue::len(self)) } /// Returns up to 2 slices that together form the contents of the ring buffer. @@ -34,20 +474,17 @@ impl<'a, T: Copy> RingBuffer<'a, T> { /// contents of the buffer is `[left, right].concat()` (although physically the "left" slice is /// stored after the "right" slice). pub fn as_slices(&'a self) -> (Option<&'a [T]>, Option<&'a [T]>) { - if self.head < self.tail { - (Some(&self.ring[self.head..self.tail]), None) - } else if self.head > self.tail { - let (left, right) = self.ring.split_at(self.head); - ( - Some(right), - if self.tail == 0 { - None - } else { - Some(&left[..self.tail]) - }, - ) - } else { + if self.head == self.tail { (None, None) + } else { + let hd = self.head % self.ring.len(); + let tl = self.tail % self.ring.len(); + if hd < tl { + (Some(&self.ring[hd..tl]), None) + } else { + let (left, right) = self.ring.split_at(hd); + (Some(right), if tl == 0 { None } else { Some(&left[..tl]) }) + } } } } @@ -58,18 +495,11 @@ impl queue::Queue for RingBuffer<'_, T> { } fn is_full(&self) -> bool { - self.head == ((self.tail + 1) % self.ring.len()) + self.len() == self.ring.len() } fn len(&self) -> usize { - if self.tail > self.head { - self.tail - self.head - } else if self.tail < self.head { - (self.ring.len() - self.head) + self.tail - } else { - // head equals tail, length is zero - 0 - } + self.tail.wrapping_sub(self.head) } fn enqueue(&mut self, val: T) -> bool { @@ -77,30 +507,30 @@ impl queue::Queue for RingBuffer<'_, T> { // Incrementing tail will overwrite head false } else { - self.ring[self.tail] = val; - self.tail = (self.tail + 1) % self.ring.len(); + self.ring[self.tail % self.ring.len()] = val; + self.tail = self.tail.wrapping_add(1); true } } fn push(&mut self, val: T) -> Option { let result = if self.is_full() { - let val = self.ring[self.head]; - self.head = (self.head + 1) % self.ring.len(); + let val = self.ring[self.head % self.ring.len()]; + self.head = self.head.wrapping_add(1); Some(val) } else { None }; - self.ring[self.tail] = val; - self.tail = (self.tail + 1) % self.ring.len(); + self.ring[self.tail % self.ring.len()] = val; + self.tail = self.tail.wrapping_add(1); result } fn dequeue(&mut self) -> Option { if self.has_elements() { - let val = self.ring[self.head]; - self.head = (self.head + 1) % self.ring.len(); + let val = self.ring[self.head % self.ring.len()]; + self.head = self.head.wrapping_add(1); Some(val) } else { None @@ -123,15 +553,15 @@ impl queue::Queue for RingBuffer<'_, T> { let mut dst = self.head; while src != self.tail { - if f(&self.ring[src]) { + if f(&self.ring[src % len]) { // When the predicate is true, move the current element to the // destination if needed, and increment the destination index. if src != dst { - self.ring[dst] = self.ring[src]; + self.ring[dst % len] = self.ring[src % len]; } - dst = (dst + 1) % len; + dst = dst.wrapping_add(1); } - src = (src + 1) % len; + src = src.wrapping_add(1); } self.tail = dst; @@ -142,14 +572,66 @@ impl queue::Queue for RingBuffer<'_, T> { mod test { use super::super::queue::Queue; use super::RingBuffer; + use crate::collections::ring_buffer::StaticSizedRingBuffer; + + // I didn't really want to implement the queue interface in queue.rs, but it was useful for + // testing. + + impl Queue for StaticSizedRingBuffer { + fn has_elements(&self) -> bool { + Self::has_elements(self) + } + + fn is_full(&self) -> bool { + Self::is_full(self) + } + + fn len(&self) -> usize { + Self::len(self) + } + + fn enqueue(&mut self, val: T) -> bool { + Self::enqueue(self, val).is_ok() + } + + fn push(&mut self, val: T) -> Option { + Self::push(self, val).err() + } + + fn dequeue(&mut self) -> Option { + Self::dequeue(self).ok() + } + + fn empty(&mut self) { + Self::empty(self) + } + + fn retain(&mut self, f: F) + where + F: FnMut(&T) -> bool, + { + Self::retain_mut(self, f) + } + } #[test] fn test_enqueue_dequeue() { const LEN: usize = 10; let mut ring = [0; LEN]; let mut buf = RingBuffer::new(&mut ring); + do_enqueue_dequeue(&mut buf, LEN); + } + + #[test] + fn test_enqueue_dequeue_const() { + const LEN: usize = 10; + let mut buf = StaticSizedRingBuffer::::new_zeros(); + do_enqueue_dequeue(&mut buf, LEN); + } - for _ in 0..2 * LEN { + fn do_enqueue_dequeue>(buf: &mut T, len: usize) { + // Twice length to stress going around ring at least once + for _ in 0..2 * len { assert!(buf.enqueue(42)); assert_eq!(buf.len(), 1); assert!(buf.has_elements()); @@ -163,41 +645,52 @@ mod test { #[test] fn test_push() { const LEN: usize = 10; - const MAX: usize = 100; - let mut ring = [0; LEN + 1]; + let mut ring = [0; LEN]; let mut buf = RingBuffer::new(&mut ring); - for i in 0..LEN { + do_test_push(&mut buf, LEN); + } + + #[test] + fn test_push_const() { + const LEN: usize = 10; + let mut buf = StaticSizedRingBuffer::::new_zeros(); + do_test_push(&mut buf, LEN); + } + + fn do_test_push>(buf: &mut T, len: usize) { + const MAX: usize = 100; + for i in 0..len { assert_eq!(buf.len(), i); assert!(!buf.is_full()); assert_eq!(buf.push(i), None); assert!(buf.has_elements()); } - for i in LEN..MAX { + for i in len..MAX { assert!(buf.is_full()); - assert_eq!(buf.push(i), Some(i - LEN)); + assert_eq!(buf.push(i), Some(i - len)); } - for i in 0..LEN { + for i in 0..len { assert!(buf.has_elements()); - assert_eq!(buf.len(), LEN - i); - assert_eq!(buf.dequeue(), Some(MAX - LEN + i)); + assert_eq!(buf.len(), len - i); + assert_eq!(buf.dequeue(), Some(MAX - len + i)); assert!(!buf.is_full()); } assert!(!buf.has_elements()); } - // Enqueue integers 1 <= n < len, checking that it succeeds and that the + // Enqueue integers 0 <= n < len, checking that it succeeds and that the // queue is full at the end. // See std::iota in C++. - fn enqueue_iota(buf: &mut RingBuffer, len: usize) { - for i in 1..len { + fn enqueue_iota>(buf: &mut T, len: usize) { + for i in 0..len { assert!(!buf.is_full()); assert!(buf.enqueue(i)); assert!(buf.has_elements()); - assert_eq!(buf.len(), i); + assert_eq!(buf.len(), i + 1); } assert!(buf.is_full()); @@ -205,11 +698,11 @@ mod test { assert!(buf.has_elements()); } - // Dequeue all elements, expecting integers 1 <= n < len, checking that the + // Dequeue all elements, expecting integers 0 <= n < len, checking that the // queue is empty at the end. // See std::iota in C++. - fn dequeue_iota(buf: &mut RingBuffer, len: usize) { - for i in 1..len { + fn dequeue_iota>(buf: &mut T, len: usize) { + for i in 0..len { assert!(buf.has_elements()); assert_eq!(buf.len(), len - i); assert_eq!(buf.dequeue(), Some(i)); @@ -223,7 +716,7 @@ mod test { // Move the head by `count` elements, by enqueueing/dequeueing `count` // times an element. // This assumes an empty queue at the beginning, and yields an empty queue. - fn move_head(buf: &mut RingBuffer, count: usize) { + fn move_head>(buf: &mut T, count: usize) { assert!(!buf.has_elements()); assert_eq!(buf.len(), 0); @@ -242,11 +735,22 @@ mod test { let mut ring = [0; LEN]; let mut buf = RingBuffer::new(&mut ring); + do_fill_once(&mut buf, LEN); + } + + #[test] + fn test_fill_once_const() { + const LEN: usize = 10; + let mut buf = StaticSizedRingBuffer::::new_zeros(); + do_fill_once(&mut buf, LEN); + } + + fn do_fill_once>(buf: &mut T, len: usize) { assert!(!buf.has_elements()); assert_eq!(buf.len(), 0); - enqueue_iota(&mut buf, LEN); - dequeue_iota(&mut buf, LEN); + enqueue_iota(buf, len); + dequeue_iota(buf, len); } #[test] @@ -255,9 +759,20 @@ mod test { let mut ring = [0; LEN]; let mut buf = RingBuffer::new(&mut ring); + do_test_refill(&mut buf, LEN); + } + + #[test] + fn test_refill_const() { + const LEN: usize = 10; + let mut buf = StaticSizedRingBuffer::::new_zeros(); + do_test_refill(&mut buf, LEN); + } + + fn do_test_refill>(buf: &mut T, len: usize) { for _ in 0..10 { - enqueue_iota(&mut buf, LEN); - dequeue_iota(&mut buf, LEN); + enqueue_iota(buf, len); + dequeue_iota(buf, len); } } @@ -267,11 +782,22 @@ mod test { let mut ring = [0; LEN]; let mut buf = RingBuffer::new(&mut ring); - move_head(&mut buf, LEN - 2); - enqueue_iota(&mut buf, LEN); + do_test_refill(&mut buf, LEN); + } + + #[test] + fn test_retain_const() { + const LEN: usize = 10; + let mut buf = StaticSizedRingBuffer::::new_zeros(); + do_test_refill(&mut buf, LEN); + } + + fn do_test_retain>(buf: &mut T, len: usize) { + move_head(buf, len - 2); + enqueue_iota(buf, len); buf.retain(|x| x % 2 == 1); - assert_eq!(buf.len(), LEN / 2); + assert_eq!(buf.len(), len / 2); assert_eq!(buf.dequeue(), Some(1)); assert_eq!(buf.dequeue(), Some(3)); @@ -280,4 +806,118 @@ mod test { assert_eq!(buf.dequeue(), Some(9)); assert_eq!(buf.dequeue(), None); } + + // This tests the more efficient copy + #[test] + fn test_copy_slice_const() { + const LEN: usize = 100; + let mut buf = StaticSizedRingBuffer::::new_zeros(); + + let mut data: [usize; 70] = [0; 70]; + + for i in 0..70 { + data[i] = i; + } + + move_head(&mut buf, 50); + + assert!(!buf.has_elements()); + + // Push some data + assert!(buf.push_or_enqueue_slice(&data, false).is_ok()); + assert_eq!(buf.len(), 70); + // Push some more without clobbering existing data + assert!(buf.push_or_enqueue_slice(&data, false).is_err()); + // Now with + assert!(buf.push_or_enqueue_slice(&data, true).is_ok()); + assert!(buf.is_full()); + + let mut data_expect: [usize; 140] = [0; 140]; + for i in 0..70 { + data_expect[i] = i; + data_expect[i + 70] = i; + } + let data_expect = &data_expect[40..140]; + + for i in 0..100 { + assert_eq!(buf.dequeue(), Ok(data_expect[i])); + } + + assert!(!buf.has_elements()); + } + + #[test] + fn test_dequeue_many() { + #[derive(PartialEq, Debug)] + struct DropInc<'a> { + ctr: &'a std::cell::Cell, + } + impl Drop for DropInc<'_> { + fn drop(&mut self) { + self.ctr.set(self.ctr.get() + 1); + } + } + + let ctr = std::cell::Cell::new(0); + + let buffer = StaticSizedRingBuffer::::new_uninit(); + + for _i in 0..10 { + assert_eq!(buffer.push(DropInc { ctr: &ctr }), Ok(())) + } + + assert_eq!(buffer.len(), 10); + + assert_eq!(buffer.dequeue_many(6), Ok(())); + + assert_eq!(ctr.get(), 6); + + assert_eq!(buffer.dequeue_many(10), Err(())); + + assert_eq!(ctr.get(), 6); + } + + #[test] + fn test_as_slices_up_to() { + let mut buf = StaticSizedRingBuffer::::new_uninit(); + + // Fill the buffer, with the hd in a non-trivial place. + buf.enqueue_slice(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + .expect(""); + buf.dequeue_many(3).expect(""); + buf.enqueue_slice(&[10, 11, 12]).expect(""); + + // Check simple case works: + let (a, b) = buf.as_slices_up_to(4); + assert_eq!(a, [3, 4, 5, 6]); + assert_eq!(b, []); + + // Check the wrap around works: + let (a, b) = buf.as_slices_up_to(10); + assert_eq!(a, [3, 4, 5, 6, 7, 8, 9]); + assert_eq!(b, [10, 11, 12]); + + // And check length cap + let (a, b) = buf.as_slices_up_to(100); + assert_eq!(a, [3, 4, 5, 6, 7, 8, 9]); + assert_eq!(b, [10, 11, 12]); + } + + fn test_iterator() { + let mut buf = StaticSizedRingBuffer::::new_uninit(); + + // Fill the buffer, with the hd in a non-trivial place. + buf.enqueue_slice(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + .expect(""); + buf.dequeue_many(3).expect(""); + buf.enqueue_slice(&[10, 11, 12]).expect(""); + let mut expect: usize = 3; + + for x in &mut buf { + assert_eq!(*x, expect); + expect += 1; + } + + assert_eq!(expect, 13); + } } diff --git a/kernel/src/collections/safe_buf.rs b/kernel/src/collections/safe_buf.rs new file mode 100644 index 000000000..47b82aae9 --- /dev/null +++ b/kernel/src/collections/safe_buf.rs @@ -0,0 +1,812 @@ +use crate::collections::resettable_iterator::{ + ChasingResettableOnce, IntoChasingResettableIterator, +}; +use crate::processbuffer::{ReadableProcessByte, ReadableProcessU32, WriteableProcessU32}; +use crate::{declare_chasing_resettable_x_iterator, declare_resettable_x_iterator}; +use core::cell::{Cell, Ref}; +use core::mem; +use core::ops::{Deref, Index, IndexMut, Range, RangeFrom, RangeTo}; +use core::ptr::NonNull; +use misc::divorce::{ + DivorceLifeless, Divorceable, Divorced, LifelessCast, LifelessRef, LifelessRefMut, + LifelessRefTraits, Reunitable, +}; +use misc::trait_alias; + +/// A single interface for the purpose of (possibly DMA-enabled) IO to (a possible chain of) buffers +/// This interface is meant to bring in line different types that might be used for this purpose +/// Buffers need to be able to contain: +/// Buffers allowed by userspace with allow_ro / allow_rw +/// Buffers allocated in (custom) grants +/// Statically allocated buffers +/// Stack allocated buffers +/// The exact types are likely going to be a blend of +/// `[u8;N]` +/// `[u8]` +/// `[Cell]` +/// `[ReadableProcessByte]` +/// `Ref` of the above to make DMA more feasible +/// It is preferable to be polymorphic in the type over just borrowing to get `& mut [Cell]`, +/// because the lifetime of such a reference will not play nicely with async code where the callee +/// needs to store the reference for the duration of an operation, possibly a DMA operation. +/// +/// Also, even though `& mut [Cell]` is likely the intersection of all requirements, it may not +/// optimise as well as, say, `[u8;N]` where bounds checking can be done statically. +/// +/// Currently, there is also wasted effort tracking "progress" through a buffer with a manual index. +/// These are more naturally written as iterators, to avoid extra bounds checking and wasted effort. +/// ResettableIterator is offered to allow the common pattern of being passed a buffer, making an +/// iterator, looping (possibly in batches across interrupts), converting back into the buffer, +/// and then calling a callback. +/// Iterators also have the advantage of walking linked lists better than indexing +/// We will still offer the indexing option, but it should probably be avoided + +/// This type describing a single contiguous buffer that corresponds to a rust type in the address +/// space of the CPU. +pub type CpuDmaRef = LifelessRef<[u8]>; +/// Mutable version of CpuDmaRef +pub type CpuDmaRefMut = LifelessRefMut<[u8]>; + +pub unsafe fn cpu_dma_ref_from_address(base: usize, len: usize) -> CpuDmaRef { + CpuDmaRef::remake(NonNull::slice_from_raw_parts( + NonNull::new_unchecked(base as *mut u8), + len, + )) +} + +pub unsafe fn cpu_dma_ref_mut_from_address(base: usize, len: usize) -> CpuDmaRefMut { + CpuDmaRefMut::remake(NonNull::slice_from_raw_parts( + NonNull::new_unchecked(base as *mut u8), + len, + )) +} + +/// Traits for reading a byte +pub trait GetByte { + /// Get the byte + fn get_byte(&self) -> u8; +} +/// Trait for writing a byte +pub trait SetByte: GetByte { + /// Set the byte + fn set_byte(&mut self, value: u8); +} + +// Just u8's, used for kernel originated buffers and grants +impl GetByte for &mut u8 { + fn get_byte(&self) -> u8 where { + **self + } +} +impl SetByte for &mut u8 { + fn set_byte(&mut self, value: u8) { + **self = value; + } +} +impl GetByte for &u8 { + fn get_byte(&self) -> u8 where { + **self + } +} + +impl GetByte for u8 { + fn get_byte(&self) -> u8 { + *self + } +} + +impl SetByte for u8 { + fn set_byte(&mut self, value: u8) { + *self = value; + } +} + +// Cell used for RW allow buffers +impl<'a> GetByte for &'a Cell { + fn get_byte(&self) -> u8 { + Cell::::get(*self) + } +} +impl<'a> SetByte for &'a Cell { + fn set_byte(&mut self, value: u8) { + Cell::::set(*self, value); + } +} + +impl GetByte for Cell { + fn get_byte(&self) -> u8 { + self.get() + } +} + +impl SetByte for Cell { + fn set_byte(&mut self, value: u8) { + self.set(value) + } +} + +// Used for RO allow buffers +impl GetByte for ReadableProcessByte { + fn get_byte(&self) -> u8 { + ReadableProcessByte::get(self) + } +} + +impl<'a> GetByte for &'a ReadableProcessByte { + fn get_byte(&self) -> u8 { + (*self).get_byte() + } +} + +pub trait GetU32 { + fn get_u32(&self) -> u32; +} + +impl<'a> GetU32 for &'a ReadableProcessU32 { + fn get_u32(&self) -> u32 { + self.get() + } +} + +impl<'a> GetU32 for &'a u32 { + fn get_u32(&self) -> u32 { + **self + } +} + +impl<'a> GetU32 for &'a WriteableProcessU32 { + fn get_u32(&self) -> u32 { + self.get() + } +} + +/// A trait for a collection that has a number of elements +pub trait BufLength { + /// How many elements are in this collection + fn buf_len(&self) -> usize; +} + +impl BufLength for [T] { + fn buf_len(&self) -> usize { + self.len() + } +} + +impl BufLength for [T; N] { + fn buf_len(&self) -> usize { + N + } +} + +impl BufLength for NonNull<[T]> { + fn buf_len(&self) -> usize { + self.len() + } +} + +impl BufLength for NonNull<[T; N]> { + fn buf_len(&self) -> usize { + N + } +} + +/// The trait used for DMA +pub trait FragmentDivorce { + /// Divorce a lifeless ref to a slice of u8s, leaving whatever remains behind + fn divorce_fragment(&mut self) -> CpuDmaRef; + /// Reunite them again + fn reunite_fragment(&mut self, lifeless: CpuDmaRef); + /// TODO: Decide if I want a capability to invoke this interface + /// b/271563432 + /// Reunite from a raw pointer that has been read from some CSR / Descriptor + /// This is less safe than the other reunite. Don't stash values from consuming the lifeless ref, + /// always try to read back from hardware to catch errors. + fn reunite_fragment_raw(&mut self, ptr: NonNull<[u8]>); + /// Does the fragment match the divorced type. You should already probably know if it does, + /// this is mostly here for when the LifelessRefs were split in a way that was not tracked. + fn raw_matches(&self, ptr: NonNull<[u8]>) -> bool; +} + +/// Mutable version of FragmentDivorce where the fragments are LifelessRefMut rather than +/// LifelessRef. +pub trait FragmentDivorceMut: FragmentDivorce { + fn divorce_fragment_mut(&mut self) -> CpuDmaRefMut; + fn reunite_fragment_mut(&mut self, lifeless: CpuDmaRefMut); + fn reunite_fragment_mut_raw(&mut self, ptr: NonNull<[u8]>); +} + +/// More generic versions of fragment divorce to avoid a copy and paste for the mutable version +pub trait FragmentDivorceGeneric { + fn divorce_fragment_gen(&mut self) -> Frag; + fn reunite_fragment_gen(&mut self, lifeless: Frag); + fn reunite_fragment_raw_gen(&mut self, ptr: NonNull); +} + +// More helpful trait names for a buffer that can be converted into an iterator over bytes, +// and then back + +// For Reading bytes with the CPU +declare_resettable_x_iterator!( + ResettableByteReadIterator, + ResettableByteReadIteratorTy, + IntoResettableByteReadIterator, + GetByte +); +declare_resettable_x_iterator!( + ResettableU32ReadIterator, + ResettableU32ReadIteratorTy, + IntoResettableU32ReadIterator, + GetU32 +); +// For Writing bytes with the CPU +declare_resettable_x_iterator!( + ResettableByteWriteIterator, + ResettableByteWriteIteratorTy, + IntoResettableByteWriteIterator, + SetByte +); +// For read-only DMA access +declare_chasing_resettable_x_iterator!( + ResettableDivorceIterator, + ResettableDivorceIteratorTy, + IntoResettableDivorceIterator, + FragmentDivorce +); +// For read/write DMA access +declare_chasing_resettable_x_iterator!( + ResettableDivorceMutIterator, + ResettableDivorceMutIteratorTy, + IntoResettableDivorceMutIterator, + FragmentDivorceMut +); + +trait_alias!( + /// Can index to read bytes. Slicing will give the same type. + pub trait ByteReadIndexSelf = Index, Index, Output = Self> + ,Index, Output = Self> + ,Index, Output = Self> + as where Index:::Output as IndexOutputGet: GetByte | ?Sized +); + +trait_alias!( + /// Can index to read bytes. Can also be sub-sliced that (may) give a different type + pub trait ByteReadIndex = Index, Index> + ,Index> + ,Index> + as where Index:::Output as IndexOutputGet: GetByte | ?Sized, + Index>:::Output as IndexR: ByteReadIndexSelf | ?Sized, + Index>:::Output as IndexT: ByteReadIndexSelf | ?Sized, + Index>:::Output as IndexF: ByteReadIndexSelf | ?Sized +); + +trait_alias!( + /// This trait is meant to encapsulate a (possibly chain of) read only data buffers. + /// Try to use the iterator paradigm, but you can also deref to index. + pub trait GenBufRead = IntoResettableByteReadIterator, Deref + as where Deref:::Target as DerefTarget : ByteReadIndex | ?Sized +); + +trait_alias!( + pub trait ByteWriteIndexSelf = IndexMut, IndexMut, Output = Self> + ,IndexMut, Output = Self> + ,IndexMut, Output = Self> + as where Index:::Output as IndexOutputSet: SetByte | ?Sized +); + +trait_alias!( + pub trait ByteWriteIndex = Index, IndexMut> + ,IndexMut> + ,IndexMut> + as where Index:::Output as IndexOutputGet: SetByte | ?Sized, + Index>:::Output as IndexR: ByteWriteIndexSelf | ?Sized, + Index>:::Output as IndexT: ByteWriteIndexSelf | ?Sized, + Index>:::Output as IndexF: ByteWriteIndexSelf | ?Sized +); + +trait_alias!( + pub trait GenBufWrite = IntoResettableByteWriteIterator, Deref + as where Deref:::Target as DerefTargetWrite : ByteWriteIndex | ?Sized +); + +pub trait DMAFinish { + type From; + fn finish_dma(self) -> Self::From; +} + +// Can be used for DMA (read only) +pub trait GenBufDMARead { + type GenBufDMA: IntoResettableDivorceIterator + DMAFinish; + /// Perform any type conversion / wrapping required to store divorced types + fn prepare_dma(self) -> Self::GenBufDMA; +} + +/// A single, contiguous, DMA buffer. Only use this when hardware/software can really not handle +/// fragmentation as IOMPUs / length-matching may also fragment. +///GenBufDMARead is corresponding non-contiguous version. +pub trait GenBufSingleDMARead { + type GenBufDMA: FragmentDivorce + DMAFinish; + fn prepare_dma_single(self) -> Self::GenBufDMA; +} + +trait_alias!( + pub trait GenBufSingleDMAWrite = GenBufSingleDMARead + as where GenBufSingleDMARead:::GenBufDMA as GenBufDMABounded : FragmentDivorceMut | + DMAFinish +); + +trait_alias!( + pub trait GenBufDMAWrite = GenBufDMARead + as where GenBufDMARead:::GenBufDMA as GenBufDMABounded : IntoResettableDivorceMutIterator | + DMAFinish +); + +/// For types that just create a single fragment we wrap in an enum that allows the divorced type +/// to remain. We have the None state to help in-place modification. +pub enum PayloadOrDivorced { + Payload(P), + Divorced(Divorced), + None(), +} + +// Implement default to set state to none +impl Default for PayloadOrDivorced

{ + fn default() -> Self { + PayloadOrDivorced::None() + } +} + +// Prepare single buffer for DMA by allocating space for divorced type +impl GenBufDMARead for P +where + PayloadOrDivorced

: IntoResettableDivorceIterator, +{ + type GenBufDMA = PayloadOrDivorced

; + + fn prepare_dma(self) -> Self::GenBufDMA { + PayloadOrDivorced::Payload(self) + } +} + +// Same again for GenBufSingleDMARead but without the requirement for an iterator +impl GenBufSingleDMARead for P +where + PayloadOrDivorced

: FragmentDivorce, +{ + type GenBufDMA = PayloadOrDivorced

; + + fn prepare_dma_single(self) -> Self::GenBufDMA { + PayloadOrDivorced::Payload(self) + } +} + +// Finish by unwrapping +impl DMAFinish for PayloadOrDivorced

{ + type From = P; + + #[inline] + fn finish_dma(self) -> Self::From { + match self { + PayloadOrDivorced::Payload(p) => p, + _ => { + panic!() + } + } + } +} + +// PayloadOrDivorced is a once iterator + +impl IntoChasingResettableIterator for PayloadOrDivorced

{ + type ChasingResetIterType = ChasingResettableOnce; + + fn into_chasing_resettable_iterator(self) -> Self::ChasingResetIterType { + ChasingResettableOnce::new(self) + } +} + +impl FragmentDivorceGeneric for PayloadOrDivorced

{ + fn divorce_fragment_gen(&mut self) -> P::Lifeless { + let val = mem::take(self); + let (d, l) = match val { + PayloadOrDivorced::Payload(payload) => payload.divorce(), + _ => panic!(), + }; + *self = PayloadOrDivorced::Divorced(d); + l + } + + fn reunite_fragment_gen(&mut self, lifeless: P::Lifeless) { + let val = mem::take(self); + let payload = match val { + PayloadOrDivorced::Divorced(d) => d.reunite(lifeless), + _ => panic!(), + }; + *self = PayloadOrDivorced::Payload(payload); + } + + fn reunite_fragment_raw_gen(&mut self, ptr: NonNull) { + unsafe { + self.reunite_fragment_gen(P::Lifeless::remake(ptr)); + } + } +} + +// Provide the implementations of the two concrete traits from the more generic ones + +impl FragmentDivorce for PayloadOrDivorced

+where + P::Lifeless: LifelessCast, +{ + fn divorce_fragment(&mut self) -> CpuDmaRef { + self.divorce_fragment_gen().cast() + } + + fn reunite_fragment(&mut self, lifeless: CpuDmaRef) { + // Safety: we are possibly casting back in an illegal way but this will checked by the + // reunite. + unsafe { + self.reunite_fragment_gen(P::Lifeless::cast_back(lifeless)); + } + } + + fn reunite_fragment_raw(&mut self, ptr: NonNull<[u8]>) { + unsafe { + self.reunite_fragment(LifelessRef::remake(ptr)); + } + } + + fn raw_matches(&self, ptr: NonNull<[u8]>) -> bool { + // Safety: we only construct the lifeless ref for the duration of the check + if let Some(l) = unsafe { P::Lifeless::try_cast_back(LifelessRef::remake(ptr)) } { + match &self { + PayloadOrDivorced::Divorced(d) => Divorceable::::matches(d, &l), + _ => panic!(), + } + } else { + false + } + } +} + +impl FragmentDivorceMut for PayloadOrDivorced

+where + Self: FragmentDivorce, + P::Lifeless: LifelessCast, +{ + fn divorce_fragment_mut(&mut self) -> CpuDmaRefMut { + self.divorce_fragment_gen().cast() + } + + fn reunite_fragment_mut(&mut self, lifeless: CpuDmaRefMut) { + // Safety: we are possibly casting back in an illegal way but this will checked by the + // reunite. + unsafe { + self.reunite_fragment_gen(P::Lifeless::cast_back(lifeless)); + } + } + + fn reunite_fragment_mut_raw(&mut self, ptr: NonNull<[u8]>) { + unsafe { + self.reunite_fragment_mut(LifelessRefMut::remake(ptr)); + } + } +} + +// Also provide on the references + +impl<'a, T: FragmentDivorce> FragmentDivorce for &'a mut T { + fn divorce_fragment(&mut self) -> CpuDmaRef { + (*self).divorce_fragment() + } + + fn reunite_fragment(&mut self, lifeless: CpuDmaRef) { + (*self).reunite_fragment(lifeless) + } + + fn reunite_fragment_raw(&mut self, ptr: NonNull<[u8]>) { + (*self).reunite_fragment_raw(ptr) + } + + fn raw_matches(&self, ptr: NonNull<[u8]>) -> bool { + ::raw_matches(self, ptr) + } +} + +impl<'a, T: FragmentDivorceMut> FragmentDivorceMut for &'a mut T { + fn divorce_fragment_mut(&mut self) -> CpuDmaRefMut { + (*self).divorce_fragment_mut() + } + + fn reunite_fragment_mut(&mut self, lifeless: CpuDmaRefMut) { + (*self).reunite_fragment_mut(lifeless) + } + + fn reunite_fragment_mut_raw(&mut self, ptr: NonNull<[u8]>) { + (*self).reunite_fragment_mut_raw(ptr) + } +} + +/// A type to support DMA to a linked list of buffer fragments. +/// If there is only one buffer to be used at an interface, then it should already support +/// GenBufDMARead/Write. If you need a chain of same typed buffers, use ChainBuf<'a, T> for +/// that type. If chains are being passed through multiple layers which attach different types, +/// ChainBuf<'a, GenPayload<'a>> is designed for that use case. +/// ChainBuf allows editing the chain, and so we wrap things in cells. +/// I chose this over simply a mutable reference because we might need multiple references. +pub struct ChainBuf<'a, PayloadT: 'a> { + next: Option>, + payload: Cell, +} + +impl<'a, PayloadT: 'a + FragmentDivorce + Default> GenBufDMARead for ChainLink<'a, PayloadT> { + type GenBufDMA = Self; + + fn prepare_dma(self) -> Self::GenBufDMA { + self + } +} + +impl<'a, PayloadT: 'a + FragmentDivorce + Default> DMAFinish for ChainLink<'a, PayloadT> { + type From = Self; + + fn finish_dma(self) -> Self::From { + self + } +} + +// Implement the FragmentDivorce/Mut traits for ChainBufs +impl<'a, PayloadT: 'a + FragmentDivorce + Default> FragmentDivorce for ChainLink<'a, PayloadT> { + fn divorce_fragment(&mut self) -> CpuDmaRef { + let mut p = self.payload.take(); + let result = p.divorce_fragment(); + self.payload.set(p); + result + } + + fn reunite_fragment(&mut self, lifeless: CpuDmaRef) { + let mut p = self.payload.take(); + let result = p.reunite_fragment(lifeless); + self.payload.set(p); + result + } + + fn reunite_fragment_raw(&mut self, ptr: NonNull<[u8]>) { + let mut p = self.payload.take(); + let result = p.reunite_fragment_raw(ptr); + self.payload.set(p); + result + } + + fn raw_matches(&self, ptr: NonNull<[u8]>) -> bool { + let p = self.payload.take(); + let result = p.raw_matches(ptr); + self.payload.set(p); + result + } +} + +impl<'a, PayloadT: 'a + FragmentDivorceMut + Default> FragmentDivorceMut + for ChainLink<'a, PayloadT> +{ + fn divorce_fragment_mut(&mut self) -> CpuDmaRefMut { + let mut p = self.payload.take(); + let result = p.divorce_fragment_mut(); + self.payload.set(p); + result + } + + fn reunite_fragment_mut(&mut self, lifeless: CpuDmaRefMut) { + let mut p = self.payload.take(); + let result = p.reunite_fragment_mut(lifeless); + self.payload.set(p); + result + } + + fn reunite_fragment_mut_raw(&mut self, ptr: NonNull<[u8]>) { + let mut p = self.payload.take(); + let result = p.reunite_fragment_mut_raw(ptr); + self.payload.set(p); + result + } +} + +impl<'a, PayloadT: 'a> ChainBuf<'a, PayloadT> { + pub fn new_with_next(payload: PayloadT, next: Option>) -> Self { + ChainBuf { + next, + payload: Cell::new(payload), + } + } + pub fn new(payload: PayloadT) -> Self { + Self::new_with_next(payload, None) + } + pub fn clone_next(&self) -> Option> { + self.next.clone() + } + pub fn replace_next( + &mut self, + val: Option>, + ) -> Option> { + mem::replace(&mut self.next, val) + } + pub fn get_ref(&'a self) -> ChainLink<'a, PayloadT> { + self.into() + } +} + +// Some convenience wrappers for putting in a payload that first needs wrapping +impl<'a, InnerP: 'a + DivorceLifeless> ChainBuf<'a, PayloadOrDivorced> { + pub fn new_divorcable_with_next>>>( + payload: InnerP, + next: NextT, + ) -> Self { + Self::new_with_next(PayloadOrDivorced::Payload(payload), Some(next.into())) + } + pub fn set_next>>>( + &mut self, + next: NextT, + ) -> Option>> { + self.next.replace(next.into()) + } + pub fn new_divorcable(payload: InnerP) -> Self { + Self::new_with_next(PayloadOrDivorced::Payload(payload), None) + } +} + +/// A link for scatter gather DMA lists. Contains the two most common types of reference we expect +/// to be in use. +/// A list with just one variant would not be appendable to a list that used the other. +pub enum ChainLink<'a, PayloadT: 'a> { + // Limited lifetime reference + Ref(&'a ChainBuf<'a, PayloadT>), + // Reference counted reference (might not really be static, but the Ref will always outlive the cell) + RcRef(Ref<'a, ChainBuf<'a, PayloadT>>), +} + +// Can convert between the different types of reference +impl<'a, PayloadT: 'a> From<&'a ChainBuf<'a, PayloadT>> for ChainLink<'a, PayloadT> { + fn from(val: &'a ChainBuf<'a, PayloadT>) -> Self { + ChainLink::Ref(val) + } +} + +impl<'a, PayloadT: 'a> From>> for ChainLink<'a, PayloadT> { + fn from(val: Ref<'a, ChainBuf<'a, PayloadT>>) -> Self { + ChainLink::RcRef(val) + } +} + +impl<'a, PayloadT: 'a> Clone for ChainLink<'a, PayloadT> { + fn clone(&self) -> Self { + match self { + ChainLink::Ref(x) => ChainLink::Ref(x.clone()), + // This clone is only an associated method, so we have to do this ourselves + ChainLink::RcRef(x) => ChainLink::RcRef(Ref::<'_, ChainBuf<'_, PayloadT>>::clone(x)), + } + } +} + +// Allow a chainlink to be used as a ChainBuf reference +impl<'a, PayloadT: 'a> Deref for ChainLink<'a, PayloadT> { + type Target = ChainBuf<'a, PayloadT>; + + fn deref(&self) -> &Self::Target { + match self { + ChainLink::Ref(r) => *r, + ChainLink::RcRef(rc) => rc.deref(), + } + } +} + +pub struct ChainBufIter<'a, P: 'a> { + next: Option>, +} + +// Implement into iter for all of ChainLink or a raw reference +impl<'a, P: 'a> IntoIterator for ChainLink<'a, P> { + type Item = ChainLink<'a, P>; + type IntoIter = ChainBufIter<'a, P>; + + fn into_iter(self) -> Self::IntoIter { + ChainBufIter { next: Some(self) } + } +} +impl<'a, P: 'a> IntoIterator for &'a ChainBuf<'a, P> { + type Item = ChainLink<'a, P>; + type IntoIter = ChainBufIter<'a, P>; + + fn into_iter(self) -> Self::IntoIter { + ChainBufIter { + next: Some(ChainLink::Ref(self)), + } + } +} + +impl<'a, P> Iterator for ChainBufIter<'a, P> { + type Item = ChainLink<'a, P>; + + fn next(&mut self) -> Option { + match self.next.take() { + Some(gn) => { + self.next = gn.clone_next(); + Some(gn) + } + None => None, + } + } +} + +#[cfg(test)] +mod tests { + use crate::collections::resettable_iterator::IntoChasingResettableIterator; + use crate::collections::safe_buf::{ChainBuf, GenBufDMARead}; + use crate::platform::iompu::{DMAMatchIter, DmaRef, DmaRefMut, InOrderIOMPU}; + + type NoMPU = crate::platform::iompu::NoIOMPU<()>; + + #[test] + fn match_iter_test() { + // 60 bytes in two lots of 30 + let src1 = [77u8; 30].as_slice(); + let src2 = [77u8; 30].as_slice(); + + // 60 bytes in three lots of 20 + let mut dst1 = [0u8; 20]; + let mut dst2 = [0u8; 20]; + let mut dst3 = [0u8; 20]; + + // Some chains over them + let dst_chain = ChainBuf::new_divorcable(dst3.as_mut_slice()); + let dst_chain = ChainBuf::new_divorcable_with_next(dst2.as_mut_slice(), &dst_chain); + let dst_chain = ChainBuf::new_divorcable_with_next(dst1.as_mut_slice(), &dst_chain); + + let src_chain = ChainBuf::new_divorcable(src2); + let src_chain = ChainBuf::new_divorcable_with_next(src1, &src_chain); + + // Into iterators + let dst_iter = dst_chain + .get_ref() + .prepare_dma() + .into_chasing_resettable_iterator(); + let src_iter = src_chain + .get_ref() + .prepare_dma() + .into_chasing_resettable_iterator(); + + // Create stream from an IOMPU + let io_mpu = NoMPU::new(); + + let dst_stream = io_mpu.start_dma_stream_mut(dst_iter); + let src_stream = io_mpu.start_dma_stream(src_iter); + + // Zip equal sized parts + let mut match_iter: DMAMatchIter<_, _> = DMAMatchIter::new(dst_stream, src_stream, &io_mpu); + + let mut total = 0; + + let mut pairs: [Option<(DmaRefMut, DmaRef)>; 10] = Default::default(); + let mut i = 0; + + // Iter over equal sized fragments. Get pairs of lifelessRefs to do DMA on + for pair in match_iter.iter(&io_mpu) { + let (dst_frag, src_frag) = pair.ok().unwrap(); + // They should match in size + assert_eq!(dst_frag.len(), src_frag.len()); + total += dst_frag.len(); + pairs[i] = Some((dst_frag, src_frag)); + i += 1; + } + + // And again to return them. Getting the order wrong / forgetting one will cause a panic + for j in 0..i { + if let Some((dst_frag, src_frag)) = pairs[j].take() { + match_iter.reunite_pair((dst_frag.into(), src_frag.into()), &io_mpu) + } + } + + // Should have totalled correctly + assert_eq!(total, 60); + } + + // See gpdma.rs for more complicated tests involving the traits in the file. +} diff --git a/kernel/src/component.rs b/kernel/src/component.rs index 872e85ede..f63a2110d 100644 --- a/kernel/src/component.rs +++ b/kernel/src/component.rs @@ -36,3 +36,654 @@ pub trait Component { /// Output type object. unsafe fn finalize(self, static_memory: Self::StaticInput) -> Self::Output; } + +/// Until issue 90091 clears up, here is a split_array_mut that is both const and has both +/// outputs as arrays, not slices +/// N-M must be provided due to lack of const_generic_expr +pub const fn split_array_mut( + input: &mut [T; N], +) -> (&mut [T; M], &mut [T; N_MINUS_M]) { + if (N - M) != N_MINUS_M { + panic!("Wrong constants") + } + + let (x, y) = input.split_at_mut(M); + // SAFETY: M is checked by split_at_mut + unsafe { + ( + &mut *(x.as_mut_ptr() as *mut [T; M]), + &mut *(y.as_mut_ptr() as *mut [T; N_MINUS_M]), + ) + } +} + +// It looks feature(const_precise_live_drops) might fix the issue that made me need this. + +/// Similar to Component, StaticComponent encapsulates construction and initialisation of devices +/// and capsules, but it is intended to be used in a way such that it works with const +/// initialization. +/// +/// The constructor should allocate both StaticState/StaticStateMut for the component. +/// They can initialize them with the values returned from component_new. +/// +/// Then, the StaticComponent can be created using references to the aforementioned state +/// +/// During boot, component_finalize should be called. +/// +/// See kernel::define_components for a helper macro that does all this automatically. +/// +/// See kernel::simple_static_component for a macro to implement this interface for you. +/// See kernel::very_simple_component if you type just has a new() and finalize() without any +/// inheritance / dependant state. +#[const_trait] +pub trait StaticComponent { + /// The type this factory produces + type Output: Sized; + + /// Should be statically allocated by constructor with static storage. + type StaticState: Sized; + type StaticStateMut: Sized; + + /// A special case where StaticStateMut would contain large buffers of the type [u8; N] + /// This is kept separate from the rest of StaticStateMut to ensure that the linker can + /// place it in BSS. + /// The intent was for this to be an associated constant, but the arithmetic on those makes + /// the compiler sad. So instead this an associated type of [u8; N]. + /// Nothing is stopping you from setting it to something else, but try to keep it all 0. + /// CheckedNewZero should perform enough checks such that you can't make a mistake. + type BufferBytes: ~const CheckedNewZero + Sized; + + /// Should also be provided when calling new + /// The lifetime parameter here is for state that needs borrowing for component_new. + /// It should not be a lifetime of the output, which is almost certainly static. + type NewInput<'a>; + + /// Construct initial state for this component and the state associated with it. + /// slf is the reference to the static where eventually the result from this will get assigned. + /// Trying to access any member of slf inside this method will almost certainly cause a cycle + /// and break compilation. + /// You can take a reference to them. + fn component_new<'a>( + slf: &'static Self::Output, + state: &'static Self::StaticState, + state_mut: &'static mut Self::StaticStateMut, + buffer: &'static mut Self::BufferBytes, + input: Self::NewInput<'a>, + ) -> (Self::Output, Self::StaticState, Self::StaticStateMut); +} + +/// Rust does not like calling const associated methods directly, here is a helper to call the +/// associated method on a StaticComponent. +pub const fn call_component_new<'a, T: ~const StaticComponent + ?Sized>( + slf: &'static T::Output, + state: &'static T::StaticState, + state_mut: &'static mut T::StaticStateMut, + buffer: &'static mut T::BufferBytes, + input: T::NewInput<'a>, +) -> (T::Output, T::StaticState, T::StaticStateMut) { + T::component_new(slf, state, state_mut, buffer, input) +} + +/// Everything that implements StaticComponent should probably also implement this. +/// There is no actual requirement, but the helper macros will break if you don't. +/// We keep it as separate trait as it is not const. +pub trait StaticComponentFinalize: ~const StaticComponent { + type FinaliseInput; + fn component_finalize( + _slf: &'static Self::Output, + _state: &'static Self::StaticState, + _input: Self::FinaliseInput, + ) { + } +} + +/// A version of NewZero that will also check the result. +#[const_trait] +pub trait CheckedNewZero: ~const NewZero + Sized { + const SZ: usize = core::mem::size_of::(); + + fn new_zero_checked() -> Self { + let result = Self::new_zero(); + + let as_bytes = core::ptr::addr_of!(result) as *const u8; + let as_bytes = unsafe { core::slice::from_raw_parts(as_bytes, Self::SZ) }; + + let mut i = 0; + + while i != Self::SZ { + if as_bytes[i] != 0 { + panic!("Result non-zero"); + } + i += 1; + } + + result + } +} +impl const CheckedNewZero for T {} + +pub const fn call_new_zero_checked() -> T { + T::new_zero_checked() +} + +/// Constructor that returns all zeros. +/// It is not unsafe to return non-zeros, just bad for performance. +#[const_trait] +pub trait NewZero { + fn new_zero() -> Self; +} + +/// Implement NewZero for arrays of u8s: +impl const NewZero for [u8; N] { + fn new_zero() -> Self { + [0u8; N] + } +} + +/// And for unit +impl const NewZero for () { + fn new_zero() -> Self { + () + } +} + +/// And tuples (recursively), up to 8 elements. Feel free to bump the number if need be. +macro_rules! implement_new_zero { + ($($t : ident)*) => { + impl<$($t : ~const NewZero,)*> const NewZero for ($($t,)*) { + fn new_zero() -> Self { + ($($t::new_zero(),)*) + } + } + }; +} +macro_rules! implement_new_zero_rec { + ($t : ident) => (implement_new_zero!($t);); + ($t : ident $($ts : ident)*) => ( + implement_new_zero!($t $($ts)*); + implement_new_zero_rec!($($ts)*); + ) +} +implement_new_zero_rec!(A B C D E F G H); + +#[macro_export] +macro_rules! define_components_no_helpers { + ( + structs $components : ident, $non_mut: ident, $mut : ident { + $($id : ident : $t : path,)* + } + ) => { + pub struct $components { + $(pub $id : <$t as $crate::component::StaticComponent>::Output),* + } + pub struct $non_mut { + $(pub $id : <$t as $crate::component::StaticComponent>::StaticState),* + } + pub struct $mut { + $(pub $id : <$t as $crate::component::StaticComponent>::StaticStateMut),* + } + } +} + +/// A helper macro to declare, init, and finalize components for your board. +/// Will declare three structs for you, all of which should have static storage. +/// Only the last type should have mutable reference taken to it. You should take care not to +/// take more than one. +/// Calling this will define two other macros you can call in your init and finalize logic. +/// +/// Usage: +/// ```ignore +/// // Define three types (in a scope your board logic can see) +/// kernel::define_components!(structs Components, CState, CStateMut { +/// component1 : Type1, +/// component2 : Type2, +/// }); +/// +/// // Construct an instance of each (use this within your const initializer). args will be extra +/// // arguments to construct each of the componants and depends on their type. +/// kernel::construct_components!(let componants, state, state_mut = +/// ref_to_components, ref_to_state, ref_to_mut_state, { +/// (args1), +/// (args2), +/// }); +/// +/// // And finally finalise (probably inside main) +/// kernel::finalize_components!(ref_to_components, ref_to_state { +/// (extra_args), +/// (extra_args), +/// }); +/// ``` +/// +/// NOTE: you will likely need `const_precise_live_drops` to call this macro. +#[macro_export] +macro_rules! define_components { + ( + structs $components : ident, $non_mut: ident, $mut : ident { + $($id : ident : $t : path,)* + } + ) => { + $crate::define_components_no_helpers!(structs $components, $non_mut, $mut { + $($id : $t,)* + }); + + struct componant_buffers + { + $(pub $id : <$t as $crate::component::StaticComponent>::BufferBytes),* + } + use $crate::component::CheckedNewZero; + static mut COMPONANT_BUFFER : componant_buffers = componant_buffers { + $($id : $crate::component::call_new_zero_checked::<<$t as $crate::component::StaticComponent>::BufferBytes>()),* + }; + + /// A helper macro to init the structs described by define_components. + /// See define_components. + #[macro_export] + macro_rules! construct_components { + ($$($$t:tt)*) => ( + $crate :: construct_components_helper!( + {$components, $non_mut, $mut {$($id : $t,)*}}, + $$($$t)*) + ) + } + + /// A helper macro finalize the structs described by define_components. + /// See define_components. + #[macro_export] + macro_rules! finalize_components { + ($$($$t:tt)*) => ( + $crate :: finalize_components_helper!( + {$components, $non_mut, $mut {$($id : $t,)*}}, + $$($$t)*) + ) + } + } +} + +/// A version of `define_components` that also creates a single component that can be used to +/// construct this set of components. +/// Usage is the same as define_components (see that) preceded by a `component = IDENT,` +/// `IDENT` will be a new component that will define everything in this set. + +#[macro_export] +macro_rules! define_components_as_component { + ( + component = $fac : ident, structs $components : ident, $non_mut: ident, $mut : ident { + $($id : ident : $t : path,)* + } + ) => { + // First define the components + $crate::define_components_no_helpers!(structs $components, $non_mut, $mut { $($id : $t,)* }); + // Then declare the factory + pub struct $fac(); + // Then implement component + impl const $crate::component::StaticComponent for $fac { + type Output = $components; + type StaticState = $non_mut; + type StaticStateMut = $mut; + type BufferBytes = ($(<$t as $crate::component::StaticComponent>::BufferBytes,)*); + type NewInput<'a> = ($(<$t as $crate::component::StaticComponent>::NewInput<'a>,)*); + + fn component_new<'a>(slf: &'static Self::Output, state: &'static Self::StaticState, state_mut: &'static mut Self::StaticStateMut, buffer: &'static mut Self::BufferBytes, input: Self::NewInput<'a>) -> + (Self::Output, Self::StaticState, Self::StaticStateMut) { + $( + let $id = $crate::component::call_component_new::<$t>(&slf.$id, &state.$id, &mut state_mut.$id, &mut buffer.${index()}, input.${index()}); + )* + + let components = $components { + $($id : $id.0,)* + }; + let component_state = $non_mut { + $($id : $id.1,)* + }; + let component_state_mut = $mut { + $($id : $id.2,)* + }; + ( + components, + component_state, + component_state_mut + ) + } + } + // And finalize + impl $crate::component::StaticComponentFinalize for $fac { + type FinaliseInput = ($(<$t as $crate::component::StaticComponentFinalize>::FinaliseInput,)*); + + fn component_finalize(slf: &'static Self::Output, state: &'static Self::StaticState, input: Self::FinaliseInput) { + // Just finalize every member + $(<$t as $crate::component::StaticComponentFinalize>::component_finalize(&slf.$id, &state.$id, input.${index()});)* + } + } + } +} + +/// Helper for construct_components. Call that, not this. +#[macro_export] +macro_rules! construct_components_helper { + ({$components_t : ident, $non_mut_t: ident, $mut_t : ident {$($id : ident : $t : path,)*}}, + let $components : ident, $component_state : ident, $component_state_mut : ident = $c_r : expr, $s_r : expr, $m_r : expr, { + $($arg : expr,)* + }) => { + let cbuffer = unsafe {&mut COMPONANT_BUFFER}; + // First call methods one-by-one, assigning the triples to a variable with a name + // we can use again + $( + let $id = $crate::component::call_component_new::<$t>(&$c_r.$id, &$s_r.$id, &mut$m_r.$id, &mut cbuffer.$id, $arg); + )* + // Then split out each of the threes into groups + + let $components = $components_t { + $($id : $id.0,)* + }; + let $component_state = $non_mut_t { + $($id : $id.1,)* + }; + let $component_state_mut = $mut_t { + $($id : $id.2,)* + }; + } +} + +/// Helper for construct_components. Call that, not this, +#[macro_export] +macro_rules! finalize_components_helper { + ({$components_t : ident, $non_mut_t: ident, $mut_t : ident {$($id : ident : $t : path,)*}}, + $c_r : expr, $s_r : expr, { + $($arg : expr,)* + }) => { + { + $(<$t as $crate::component::StaticComponentFinalize>::component_finalize(&$c_r.$id, &$s_r.$id, $arg);)* + } + } +} + +/// const construct a number of grants. +/// Usage: +/// ```ignore +/// use kernel::construct_grants; +/// construct_grants!(kernel_ref, proto_kern, counter, mem_alloc_cap, { +/// grant1 : NUM1, +/// grant2 : NUM2, +/// ... +/// }); +/// ``` +#[macro_export] +macro_rules! construct_grants { + ($kernel : expr, $proto : expr, $counter : ident, $cap : expr, { + $($id : ident : $p : expr,)* + }) => { + $(let ($id, $counter) = $proto.create_grant($kernel, $p, $counter, $cap);)* + } +} + +/// A StaticComponent factory for () +pub struct NoComponent; +impl const StaticComponent for NoComponent { + type Output = (); + type StaticState = (); + type StaticStateMut = (); + type BufferBytes = (); + type NewInput<'a> = (); + + fn component_new<'a>( + _slf: &'static Self::Output, + _state: &'static Self::StaticState, + _state_mut: &'static mut Self::StaticStateMut, + _buffer: &'static mut Self::BufferBytes, + _input: Self::NewInput<'a>, + ) -> (Self::Output, Self::StaticState, Self::StaticStateMut) { + ((), (), ()) + } +} +impl StaticComponentFinalize for NoComponent { + type FinaliseInput = (); +} + +/// The StaticComponent trait is quite verbose as it is meant to handle lots of cases. +/// This macro implements the interface. +/// Usage: +/// ```ignore +/// use kernel::simple_static_component; +/// simple_static_component!( +/// // Implement for a factory type +/// impl for MyFactory, +/// // Can inherit from another factory (by reference to the parent) +/// // call the constructor / finalize with the super{} block +/// // the static reference to this parent is accessible via the supr argument. +/// (Inherit = ...)?, +/// // Or inherit by value from other factories (by composition). +/// // These will be constructed automatically (theirs inputs will just prepended to the inputs +/// // this component specifies) +/// // Finalize will also just be called automatically, again just prepended arguments together. +/// // x, y, z etc should be the field names from within your struct. +/// // the actual contained state will be accessed as self.x(.member)?. +/// // X, Y, Z etc should be _factories_ for those fields. +/// // The constructed values will be available in the scope of your closure with the same names. +/// (Contain = (x : X, y : Y, z : Z...) (member)? )?, +/// // The type that will result +/// Output = ..., +/// // How many (mutable) u8's are owned by this output type +/// (BUFFER_BYTES = ...)?, +/// // Other inputs to constructor (runs at compile time) +/// NewInput = ..., +/// // Other inputs to finalize (runs during init) +/// FinInput = ..., +/// // The function for construction. super{...} passes arguments to the inherited type +/// // slf is a reference to where this result will eventually be placed. You can do pointer +/// // arithmetic on it, but cannot read/write via it as the object as not exist at this point. +/// // supr is the 'slf' argument for the super type. +/// // the x, y, x fields will also be available in this scope. +/// | slf, input (,buf)? (,supr)? | (super{...})? {...}, +/// // The function for construction. super{...} passes arguments to the inherited type's +/// // finalize +/// | slf, input (,supr)? | (super{...})? {...} +/// ) +/// ``` +/// +/// If NewInput lifetime argument is needed, use 'a. +#[macro_export] +macro_rules! simple_static_component { + // If no Inherit is defined, add in NoComponent as the parent. + (impl $(<{$($ts:tt)*}>)? for $t : ty $(where {$($wheres: tt)*})?, + $(Contain = ($($field : ident $(.$member:ident)? : $factory : path),*),)? + Output = $ot : ty, + $(BUFFER_BYTES = $bb : expr,)? + NewInput = $it : ty, + FinInput = $ift : ty, + |$slf : ident, $input : ident $(,$b : ident)? | {$($new:tt)*}, + |$slf2 : ident, $input2 : ident | { $($finalize:tt)* } + ) => { + $crate::simple_static_component!( + impl $(<{$($ts)*}>)? for $t $(where {$($wheres)*})?, + Inherit = $crate::component::NoComponent, + $(Contain = ($($field $(.$member)? : $factory),*),)? + Output = $ot, + $(BUFFER_BYTES = $bb,)? + NewInput = $it, + FinInput = $ift, + |$slf, $input $(,$b)?, _sup | super{()} {$($new)*}, + |$slf2, $input2, _sup | super{()} {$($finalize)*}); + }; + // If buffer bytes is not specified, add in 0 as the amount + (impl $(<{$($ts:tt)*}>)? for $t : ty $(where {$($wheres :tt)*})?, + Inherit = $inherit : ty, + $(Contain = ($($field : ident $(.$member:ident)? : $factory : path),*),)? + Output = $ot : ty, + NewInput = $it : ty, + FinInput = $ift : ty, + |$slf : ident, $input : ident, $sup : ident | super {$($super : tt)*} {$($new:tt)*}, + |$slf2 : ident, $input2 : ident, $sup2 : ident | super {$($super2 : tt)*} {$($finalize:tt)*} + ) => { + $crate::simple_static_component!( + impl $(<{$($ts)*}>)? for $t $(where {$($wheres)*})?, + Inherit = $inherit, + $(Contain = ($($field $(.$member)? : $factory),*),)? + Output = $ot, + BUFFER_BYTES = 0, + NewInput = $it, + FinInput = $ift, + |$slf, $input, _buffer, $sup | super {$($super)*} {$($new)*}, + |$slf2, $input2, $sup2 | super {$($super2)*} {$($finalize)*} + ); + }; + // Main case. + (impl $(<{$($ts:tt)*}>)? for $t : ty $(where {$($wheres: tt)*})?, + Inherit = $inherit : ty, + $(Contain = ($($field : ident $(.$member:ident)? : $factory : path),*),)? + Output = $ot : ty, + BUFFER_BYTES = $bb : expr, + NewInput = $it : ty, + FinInput = $ift : ty, + |$slf : ident, $input : ident, $b : ident, $sup : ident | super {$($super : tt)*} {$($new:tt)*}, + |$slf2 : ident, $input2 : ident, $sup2 : ident | super {$($super2 : tt)*} {$($finalize:tt)*} + ) => { + impl $(<$($ts)*>)? const $crate::component::StaticComponent for $t where + //$inherit : ~const $crate::component::StaticComponent, + //$inherit : $crate::component::StaticComponentFinalize, + $($($factory : ~const $crate::component::StaticComponent, )*)? + $($($factory : $crate::component::StaticComponentFinalize, )*)? + $( $($wheres)*)? + { + type Output = $ot; + type StaticState = ( + <$inherit as $crate::component::StaticComponent>::Output, // output from inherit + <$inherit as $crate::component::StaticComponent>::StaticState, // its state recursively + $( + // recursive state from contained things + ($(<$factory as $crate::component::StaticComponent>::StaticState,)*) + )? + ); + type StaticStateMut = ( + // TODO: might want a way to set this via the macro + (), + // + <$inherit as $crate::component::StaticComponent>::StaticStateMut, + // recursive state from contained things + $( + ($(<$factory as $crate::component::StaticComponent>::StaticStateMut,)*) + )? + ); + // If we have any contained things, we just inherit their inputs + #[allow(unused_parens)] + type NewInput<'a> = ( + $($(<$factory as $crate::component::StaticComponent>::NewInput<'a>,)*)? // and those for things it contains + $it // this things inputs + ); + + type BufferBytes = ( + [u8; $bb], // our bytes + <$inherit as $crate::component::StaticComponent>::BufferBytes, // for inherited + $( + ($(<$factory as $crate::component::StaticComponent>::BufferBytes,)*) + )? + ); + + fn component_new<'a>($slf: &'static Self::Output, state: &'static Self::StaticState, state_mut: &'static mut Self::StaticStateMut, $b: &'static mut Self::BufferBytes, $input: Self::NewInput<'a>) -> (Self::Output, Self::StaticState, Self::StaticStateMut) { + let inherit_new = <$inherit as $crate::component::StaticComponent>::component_new(&state.0, &state.1, &mut state_mut.1, &mut $b.1, $($super)*); + let $sup = &state.0; + // Also do a similar thing to construct everything this thing will contain + $( + let sub_state_ref = &state.2; + let sub_mut_ref = &mut state_mut.2; + let sub_buf = &mut $b.2; + $( + let $field = <$factory as $crate::component::StaticComponent>::component_new(&$slf.$field$(.$member)?, + &sub_state_ref.${index()}, &mut sub_mut_ref.${index()}, &mut sub_buf.${index()}, $input.${index()}); + let last_input = $input.${length()}; + )* + let $input = last_input; + // currently, $field is a triple of (slf, state, state_mut). + // we need to put together the state / state_mut into one tuple + let sub_state = ($($field.1 ,)*); + let sub_mut = ($($field.2 ,)*); + // And then shadow field to just be slf + $( + let $field = $field.0; + )* + )? + let $b = &mut $b.0; + + ( + {$($new)*}, + (inherit_new.0, inherit_new.1 + $(, sub_state ${ignore(field)})? + ), + ((), inherit_new.2 $(, sub_mut ${ignore(field)})?) + ) + } + } + impl $(<$($ts)*>)? $crate::component::StaticComponentFinalize for $t where + //$inherit : ~const $crate::component::StaticComponent, + //$inherit : $crate::component::StaticComponentFinalize, + $($($factory : ~const $crate::component::StaticComponent, )*)? + $($($factory : $crate::component::StaticComponentFinalize, )*)? + $( $($wheres)*)? + { + #[allow(unused_parens)] + type FinaliseInput = ( + $($(<$factory as $crate::component::StaticComponentFinalize>::FinaliseInput,)*)? // and those for things it contains + $ift + ); + + fn component_finalize($slf2: &'static Self::Output, state: &'static Self::StaticState, $input2: Self::FinaliseInput) { + // First finalize parent + <$inherit as $crate::component::StaticComponentFinalize>::component_finalize(&state.0, &state.1, $($super2)*); + let $sup2 = &state.0; + // Call finalize on all contained components + $( + let sub_state = &state.2; + $( + <$factory as $crate::component::StaticComponentFinalize>::component_finalize( + &$slf2.$field$(.$member)?, &sub_state.${index()}, $input2.${index()}); + let last_input = $input2.${length()}; + )* + let $input2 = last_input; + )? + + { + $($finalize)* + } + } + } + }; +} + +/// Another even less verbose helper for static components. +/// +/// For components which just have a new(...) and (maybe) a fin(&self, ...) where all +/// input arguments just need propagation to the respective method (in order with no extras). +/// +/// Usage: +/// ```ignore +/// kernel::very_simple_component!( impl(<{X,Y, Z}>)? Type, new_method(...) (, fin_method(...))?); +/// // examples +/// kernel::very_simple_component!(impl for Foo, new()); +/// kernel::very_simple_component!(impl for Bar, new(), fin()); +/// kernel::very_simple_component!(impl<{T, V}> for Foo, new()); +/// kernel::very_simple_component!(impl for Bax, new(u8, &'a mut usize), fin(u8,u8)); +/// ``` +/// If your new method needs a lifetime argument, use 'a +#[macro_export] +macro_rules! very_simple_component { + (impl $(<{$($impl:tt)*}>)? for $t : path $(where {$($wheres: tt)*})?, $new : ident ($($new_arg : ty),*) $(, $fin : ident ($($fin_arg : ty),*))? ) => { + $crate::simple_static_component!(impl $(<{$($impl)*}>)? for $t $(where {$($wheres)*})?, + Output = Self, + NewInput = ($($new_arg),*), + FinInput = ($($($fin_arg),*)?), + |_slf, _input| { + $crate::very_simple_component!(@call_helper {Self::$new}, _input, $($new_arg)*) + }, + |_slf, _input| { + $(let _ = $crate::very_simple_component!(@call_helper {_slf.$fin}, _input, $($fin_arg)*);)? + } + ); + }; + // One argument is passed directly + (@call_helper {$($f : tt)*}, $v : ident, $t : ty) => { + $($f)*($v) + }; + // Other numbers are broken out of a tuple + (@call_helper {$($f : tt)*}, $v : ident, $($t : ty)*) => { + $($f)*($(${ignore(t)} $v.${index()},)*) + } +} diff --git a/kernel/src/config.rs b/kernel/src/config.rs index 4bf217779..cb29e11c8 100644 --- a/kernel/src/config.rs +++ b/kernel/src/config.rs @@ -67,6 +67,17 @@ pub(crate) struct Config { // is identified, using configuration constants is the most effective // option. pub(crate) debug_panics: bool, + + pub(crate) counted_grant_refs: bool, + + pub(crate) is_cheri: bool, + + pub(crate) contiguous_load_procs: bool, + + pub(crate) static_init: bool, + + /// Whether or not the MMU requires asynchronous configuration + pub(crate) async_mpu_config: bool, } /// A unique instance of `Config` where compile-time configuration options are @@ -78,4 +89,294 @@ pub(crate) const CONFIG: Config = Config { trace_syscalls: cfg!(feature = "trace_syscalls"), debug_load_processes: cfg!(feature = "debug_load_processes"), debug_panics: !cfg!(feature = "no_debug_panics"), + counted_grant_refs: cfg!(feature = "counted_grant_refs"), + is_cheri: cfg!(target_feature = "xcheri"), + static_init: cfg!(feature = "use_static_init"), + contiguous_load_procs: true, + async_mpu_config: cfg!(target_feature = "xcheri"), }; + +/// Trait allows selecting type based on a const param +pub trait CfgControl { + type Out: ?Sized; +} + +impl CfgControl for (*const T, *const U) { + type Out = T; +} +impl CfgControl for (*const T, *const U) { + type Out = U; +} + +/// Selects between T and U based on condition +pub type IfElseT = + <(*const T, *const U) as CfgControl>::Out; + +/// These types are for situations where a feature would change what type is in use. This is better +/// than conditional compilation as a single compilation run can type check all combinations of +/// features. +/// +/// Using NOT_COND (not !COND) as const generics still don't like expressions. +/// +/// Usage: type MyType = IfElseCfg +/// +/// If coming from C and you are used to the pattern of +/// +/// struct Foo { +/// #if SOME_FLAG +/// T1 field_t1; +/// #else +/// T2 field_t2; +/// #endif +/// } +/// +/// Instead write: +/// +/// struct Foo { +/// field : IfElseCfg, +/// } +/// +/// Then, rather than +/// +/// Foo myFoo = ...; +/// #if SOME_FLAG +/// bar(&myFoo.field_t1); +/// #else +/// baz(&myFoo.field_t2); +/// #endif +/// +/// Do +/// +/// let myFoo : Foo = ...; +/// if SOME_FLAG { +/// bar(myFoo.get_true_ref()) +/// } else { +/// baz(myFoo.get_false_ref()) +/// } +/// +/// Or more cleanly: +/// +/// let myFoo : Foo = ...; +/// myFoo.mapRef(bar, baz); +/// +pub struct IfElseCfg(IfElseT) +where + (*const T, *const U): CfgControl; + +pub type TrueCfg = IfElseCfg; +pub type FalseCfg = IfElseCfg; + +impl Clone for IfElseCfg +where + (*const T, *const U): CfgControl, + IfElseT: Clone, +{ + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl Copy for IfElseCfg +where + (*const T, *const U): CfgControl, + IfElseT: Copy, +{ +} + +impl Default for IfElseCfg { + fn default() -> Self { + Self::new_true(Default::default()) + } +} + +impl Default for IfElseCfg { + fn default() -> Self { + Self::new_false(Default::default()) + } +} + +impl IfElseCfg { + pub const fn new_true(value: T) -> Self { + Self(value) + } + pub const fn new_false(_value: U) -> Self { + panic!() + } + pub fn map_ref(&self, true_f: FT, _false_f: FF) -> R + where + FT: FnOnce(&T) -> R, + FF: FnOnce(&U) -> R, + { + true_f(&self.0) + } + pub fn map_mut(&mut self, true_f: FT, _false_f: FF) -> R + where + FT: FnOnce(&mut T) -> R, + FF: FnOnce(&mut U) -> R, + { + true_f(&mut self.0) + } + pub const fn get_true_ref(&self) -> &T { + &self.0 + } + pub fn get_true_mut(&mut self) -> &mut T { + &mut self.0 + } + pub fn get_false_ref(&self) -> &U { + panic!() + } + pub fn get_false_mut(&mut self) -> &mut U { + panic!() + } + pub fn consume_true(self) -> T { + self.0 + } + pub fn consume_false(self) -> U { + panic!() + } +} + +impl IfElseCfg { + pub const fn new_true(_value: T) -> Self { + panic!() + } + pub const fn new_false(value: U) -> Self { + Self(value) + } + pub fn map_ref(&self, _true_f: FT, false_f: FF) -> R + where + FT: FnOnce(&T) -> R, + FF: FnOnce(&U) -> R, + { + false_f(&self.0) + } + pub fn map_mut(&mut self, _true_f: FT, false_f: FF) -> R + where + FT: FnOnce(&mut T) -> R, + FF: FnOnce(&mut U) -> R, + { + false_f(&mut self.0) + } + pub fn get_true_ref(&self) -> &T { + panic!() + } + pub fn get_true_mut(&mut self) -> &mut T { + panic!() + } + pub const fn get_false_ref(&self) -> &U { + &self.0 + } + pub fn get_false_mut(&mut self) -> &mut U { + &mut self.0 + } + pub fn consume_true(self) -> T { + panic!() + } + pub fn consume_false(self) -> U { + self.0 + } +} + +/// Correctly uses the IfElseCfg type depending on one of the config options in the global CONFIG +/// struct. +/// If no false type is provided, it will be unit. +/// +/// Usage: type MyType = TIfCfg!(FeatureFlag, TrueType [, FalseType]?); +/// +/// e.g.: +/// +/// type TracingType = TIfCfg!(trace_syscalls, TypeNeedIfTracing); +/// +/// Expands to +/// +/// type TracingType = IfElseCfg +/// +/// Usage in conjunction with ! (or never::Never), +/// +/// If you want to eliminate a enum variant in certain configurations, do: +/// +/// See OnlyInCfg and NotInCfg for shorter forms +/// +/// ```text +/// use kernel::TIfCfg; +/// use misc::never::Never; +/// enum E { +/// Var1, +/// Var2(TIfCfg!(debug_panics, (), Never)), // variant only exists with config debug_panics +/// Var3(TIfCfg!(debug_panics, Never, ())), // variant only exists without config debug_panics +/// } +/// ``` +/// +#[macro_export] +macro_rules! TIfCfg { + ($feature : ident, $TIf : ty, $TElse : ty) => + ($crate::config::IfElseCfg<$TIf, $TElse, {$crate::config::CONFIG. $feature}>); + ($feature : ident, $T : ty) => + ($crate::TIfCfg!($feature, $T, ())); +} + +#[macro_export] +macro_rules! OnlyInCfg { + ($feature : ident, $t : ty) => { + $crate::TIfCfg!($feature, $t, misc::never::Never) + }; + ($feature : ident) => { + $crate::TIfCfg!($feature, (), misc::never::Never) + }; +} + +#[macro_export] +macro_rules! NotInCfg { + ($feature : ident, $t : ty) => { + $crate::TIfCfg!($feature, misc::never::Never, $t) + }; + ($feature : ident) => { + $crate::TIfCfg!($feature, misc::never::Never, ()) + }; +} + +#[cfg(test)] +mod tests { + use crate::config::IfElseCfg; + use core::mem::size_of; + // For use as, e.g., a counter that we don't need in some situations. + // A more normal use would be type ConfigU32 = IfElseCfg + type ConfigU32 = IfElseCfg; + type AsU32 = ConfigU32; + type AsUnit = ConfigU32; + + #[test] + fn test_true() { + // Check size + assert_eq!(size_of::(), size_of::()); + // Check use + let mut val: AsU32 = AsU32::new_true(77); + *val.get_true_mut() = 66; + assert_eq!(val.consume_true(), 66); + } + + #[test] + fn test_false() { + // Check size + assert_eq!(size_of::(), size_of::<()>()); + // Check use + let val: AsUnit = AsUnit::new_false(()); + assert_eq!(val.consume_false(), ()); + } + + #[test] + #[should_panic] + fn test_wrong_true() { + let val: AsU32 = AsU32::new_true(77); + val.get_false_ref(); + } + + #[test] + #[should_panic] + fn test_wrong_false() { + let val: AsUnit = AsUnit::new_false(()); + val.get_true_ref(); + } +} diff --git a/kernel/src/debug.rs b/kernel/src/debug.rs index 3b350597a..bc3e5a3a0 100644 --- a/kernel/src/debug.rs +++ b/kernel/src/debug.rs @@ -56,14 +56,13 @@ use core::str; use crate::collections::queue::Queue; use crate::collections::ring_buffer::RingBuffer; -use crate::hil; use crate::platform::chip::Chip; -use crate::process::Process; use crate::process::ProcessPrinter; use crate::utilities::binary_write::BinaryToWriteWrapper; use crate::utilities::cells::NumericCellExt; use crate::utilities::cells::{MapCell, TakeCell}; use crate::ErrorCode; +use crate::{hil, ProcEntry}; /// This trait is similar to std::io::Write in that it takes bytes instead of a string (contrary to /// core::fmt::Write), but io::Write isn't available in no_std (due to std::io::Error not being @@ -105,16 +104,34 @@ pub unsafe fn panic_print( writer: &mut W, panic_info: &PanicInfo, nop: &dyn Fn(), - processes: &'static [Option<&'static dyn Process>], + processes: &'static [ProcEntry], chip: &'static Option<&'static C>, process_printer: &'static Option<&'static PP>, +) { + panic_print_2( + writer, + panic_info, + nop, + processes, + chip.map(|c| c), + process_printer.map(|pp| pp), + ); +} + +pub unsafe fn panic_print_2( + writer: &mut W, + panic_info: &PanicInfo, + nop: &dyn Fn(), + processes: &'static [ProcEntry], + chip: Option<&'static C>, + process_printer: Option<&'static PP>, ) { panic_begin(nop); panic_banner(writer, panic_info); // Flush debug buffer if needed flush(writer); - panic_cpu_state(chip, writer); - panic_process_info(processes, process_printer, writer); + panic_cpu_state_2(chip, writer); + panic_process_info_2(processes, process_printer, writer); } /// Tock default panic routine. @@ -125,7 +142,7 @@ pub unsafe fn panic], + processes: &'static [ProcEntry], chip: &'static Option<&'static C>, process_printer: &'static Option<&'static PP>, ) -> ! { @@ -172,6 +189,10 @@ pub unsafe fn panic_cpu_state( chip: &'static Option<&'static C>, writer: &mut W, ) { + panic_cpu_state_2(chip.map(|c| c), writer); +} + +pub unsafe fn panic_cpu_state_2(chip: Option<&'static C>, writer: &mut W) { chip.map(|c| { c.print_state(writer); }); @@ -181,15 +202,23 @@ pub unsafe fn panic_cpu_state( /// /// **NOTE:** The supplied `writer` must be synchronous. pub unsafe fn panic_process_info( - procs: &'static [Option<&'static dyn Process>], + procs: &'static [ProcEntry], process_printer: &'static Option<&'static PP>, writer: &mut W, +) { + panic_process_info_2(procs, process_printer.map(|pp| pp), writer) +} + +pub unsafe fn panic_process_info_2( + procs: &'static [ProcEntry], + process_printer: Option<&'static PP>, + writer: &mut W, ) { process_printer.map(|printer| { // print data about each process let _ = writer.write_fmt(format_args!("\r\n---| App Status |---\r\n")); for idx in 0..procs.len() { - procs[idx].map(|process| { + procs[idx].proc_ref.get().map(|process| { // Print the memory map and basic process info. // // Because we are using a synchronous printer we do not need to @@ -322,7 +351,7 @@ pub fn debug_enqueue_fmt(args: Arguments) { } pub fn debug_flush_queue_() { - let writer = unsafe { get_debug_writer() }; + let mut writer = get_debug_writer(); unsafe { DEBUG_QUEUE.as_deref_mut() }.map(|buffer| { buffer.dw.map(|dw| { @@ -363,8 +392,9 @@ macro_rules! debug_flush_queue { /// Wrapper type that we need a mutable reference to for the core::fmt::Write /// interface. +#[derive(Copy, Clone)] pub struct DebugWriterWrapper { - dw: MapCell<&'static DebugWriter>, + dw: &'static DebugWriter, } /// Main type that we need an immutable reference to so we can share it with @@ -382,31 +412,30 @@ pub struct DebugWriter { /// Static variable that holds the kernel's reference to the debug tool. This is /// needed so the debug!() macros have a reference to the object to use. -static mut DEBUG_WRITER: Option<&'static mut DebugWriterWrapper> = None; +static mut DEBUG_WRITER: Option = None; -unsafe fn try_get_debug_writer() -> Option<&'static mut DebugWriterWrapper> { - DEBUG_WRITER.as_deref_mut() +fn try_get_debug_writer() -> Option { + unsafe { DEBUG_WRITER } } -unsafe fn get_debug_writer() -> &'static mut DebugWriterWrapper { +fn get_debug_writer() -> DebugWriterWrapper { try_get_debug_writer().unwrap() // Unwrap fail = Must call `set_debug_writer_wrapper` in board initialization. } /// Function used by board main.rs to set a reference to the writer. -pub unsafe fn set_debug_writer_wrapper(debug_writer: &'static mut DebugWriterWrapper) { - DEBUG_WRITER = Some(debug_writer); +/// TODO: This should really pass DebugWriterWrapper by value as it already hides a reference. +pub unsafe fn set_debug_writer_wrapper(debug_writer: &'static DebugWriterWrapper) { + DEBUG_WRITER = Some(*debug_writer); } impl DebugWriterWrapper { - pub fn new(dw: &'static DebugWriter) -> DebugWriterWrapper { - DebugWriterWrapper { - dw: MapCell::new(dw), - } + pub const fn new(dw: &'static DebugWriter) -> DebugWriterWrapper { + DebugWriterWrapper { dw } } } impl DebugWriter { - pub fn new( + pub const fn new( uart: &'static dyn hil::uart::Transmit, out_buffer: &'static mut [u8], internal_buffer: &'static mut RingBuffer<'static, u8>, @@ -433,7 +462,7 @@ impl DebugWriter { // Can only publish if we have the output_buffer. If we don't that is // fine, we will do it when the transmit done callback happens. self.internal_buffer.map(|ring_buffer| { - if let Some(out_buffer) = self.output_buffer.take() { + while let Some(out_buffer) = self.output_buffer.take() { let mut count = 0; for dst in out_buffer.iter_mut() { @@ -455,6 +484,9 @@ impl DebugWriter { } else { self.output_buffer.put(None); } + } else { + self.output_buffer.put(Some(out_buffer)); + break; } } }); @@ -486,49 +518,42 @@ impl hil::uart::TransmitClient for DebugWriter { /// Pass through functions. impl DebugWriterWrapper { fn increment_count(&self) { - self.dw.map(|dw| { - dw.increment_count(); - }); + self.dw.increment_count() } fn get_count(&self) -> usize { - self.dw.map_or(0, |dw| dw.get_count()) + self.dw.get_count() } fn publish_bytes(&self) { - self.dw.map(|dw| { - dw.publish_bytes(); - }); + self.dw.publish_bytes() } fn extract(&self) -> Option<&mut RingBuffer<'static, u8>> { - self.dw.map_or(None, |dw| dw.extract()) + self.dw.extract() } } impl IoWrite for DebugWriterWrapper { fn write(&mut self, bytes: &[u8]) { const FULL_MSG: &[u8] = b"\n*** DEBUG BUFFER FULL ***\n"; - self.dw.map(|dw| { - dw.internal_buffer.map(|ring_buffer| { - let available_len_for_msg = - ring_buffer.available_len().saturating_sub(FULL_MSG.len()); + self.dw.internal_buffer.map(|ring_buffer| { + let available_len_for_msg = ring_buffer.available_len().saturating_sub(FULL_MSG.len()); - if available_len_for_msg >= bytes.len() { - for &b in bytes { - ring_buffer.enqueue(b); - } - } else { - for &b in &bytes[..available_len_for_msg] { - ring_buffer.enqueue(b); - } - // When the buffer is close to full, print a warning and drop the current - // string. - for &b in FULL_MSG { - ring_buffer.enqueue(b); - } + if available_len_for_msg >= bytes.len() { + for &b in bytes { + ring_buffer.enqueue(b); } - }); + } else { + for &b in &bytes[..available_len_for_msg] { + ring_buffer.enqueue(b); + } + // When the buffer is close to full, print a warning and drop the current + // string. + for &b in FULL_MSG { + ring_buffer.enqueue(b); + } + } }); } } @@ -541,39 +566,39 @@ impl Write for DebugWriterWrapper { } pub fn debug_print(args: Arguments) { - let writer = unsafe { get_debug_writer() }; + let mut writer = get_debug_writer(); - let _ = write(writer, args); + let _ = write(&mut writer, args); writer.publish_bytes(); } pub fn debug_println(args: Arguments) { - let writer = unsafe { get_debug_writer() }; + let mut writer = get_debug_writer(); - let _ = write(writer, args); + let _ = write(&mut writer, args); let _ = writer.write_str("\r\n"); writer.publish_bytes(); } -fn write_header(writer: &mut DebugWriterWrapper, (file, line): &(&'static str, u32)) -> Result { +fn write_header(mut writer: DebugWriterWrapper, (file, line): &(&'static str, u32)) -> Result { writer.increment_count(); let count = writer.get_count(); writer.write_fmt(format_args!("TOCK_DEBUG({}): {}:{}: ", count, file, line)) } pub fn debug_verbose_print(args: Arguments, file_line: &(&'static str, u32)) { - let writer = unsafe { get_debug_writer() }; + let mut writer = get_debug_writer(); let _ = write_header(writer, file_line); - let _ = write(writer, args); + let _ = write(&mut writer, args); writer.publish_bytes(); } pub fn debug_verbose_println(args: Arguments, file_line: &(&'static str, u32)) { - let writer = unsafe { get_debug_writer() }; + let mut writer = get_debug_writer(); let _ = write_header(writer, file_line); - let _ = write(writer, args); + let _ = write(&mut writer, args); let _ = writer.write_str("\r\n"); writer.publish_bytes(); } @@ -586,10 +611,10 @@ macro_rules! debug { debug!("") }); ($msg:expr $(,)?) => ({ - $crate::debug::debug_println(format_args!($msg)) + $crate::debug::debug_println(format_args!($msg)) }); ($fmt:expr, $($arg:tt)+) => ({ - $crate::debug::debug_println(format_args!($fmt, $($arg)+)) + $crate::debug::debug_println(format_args!($fmt, $($arg)+)) }); } @@ -601,18 +626,18 @@ macro_rules! debug_verbose { debug_verbose!("") }); ($msg:expr $(,)?) => ({ - $crate::debug::debug_verbose_println(format_args!($msg), { - // TODO: Maybe make opposite choice of panic!, no `static`, more - // runtime code for less static data - static _FILE_LINE: (&'static str, u32) = (file!(), line!()); - &_FILE_LINE - }) + $crate::debug::debug_verbose_println(format_args!($msg), { + // TODO: Maybe make opposite choice of panic!, no `static`, more + // runtime code for less static data + static _FILE_LINE: (&'static str, u32) = (file!(), line!()); + &_FILE_LINE + }) }); ($fmt:expr, $($arg:tt)+) => ({ - $crate::debug::debug_verbose_println(format_args!($fmt, $($arg)+), { - static _FILE_LINE: (&'static str, u32) = (file!(), line!()); - &_FILE_LINE - }) + $crate::debug::debug_verbose_println(format_args!($fmt, $($arg)+), { + static _FILE_LINE: (&'static str, u32) = (file!(), line!()); + &_FILE_LINE + }) }); } diff --git a/kernel/src/dynamic_deferred_call.rs b/kernel/src/dynamic_deferred_call.rs index c8f1b12ac..219501d76 100644 --- a/kernel/src/dynamic_deferred_call.rs +++ b/kernel/src/dynamic_deferred_call.rs @@ -65,9 +65,11 @@ //! ); //! ``` +use crate::simple_static_component; use core::cell::Cell; use crate::utilities::cells::OptionalCell; +use crate::utilities::singleton_checker::SingletonChecker; /// Kernel-global dynamic deferred call instance /// @@ -80,8 +82,9 @@ pub struct DynamicDeferredCallClientState { scheduled: Cell, client: OptionalCell<&'static dyn DynamicDeferredCallClient>, } -impl Default for DynamicDeferredCallClientState { - fn default() -> DynamicDeferredCallClientState { + +impl DynamicDeferredCallClientState { + pub const fn new() -> Self { DynamicDeferredCallClientState { scheduled: Cell::new(false), client: OptionalCell::empty(), @@ -89,6 +92,12 @@ impl Default for DynamicDeferredCallClientState { } } +impl Default for DynamicDeferredCallClientState { + fn default() -> DynamicDeferredCallClientState { + Self::new() + } +} + /// Dynamic deferred call /// /// This struct manages and calls dynamically (at runtime) registered @@ -102,6 +111,111 @@ pub struct DynamicDeferredCall { call_pending: Cell, } +pub struct ProtoDynamicDeferredCallUnsized { + counter: usize, + client_states: T, +} + +pub type ProtoDynamicDeferredCall = + ProtoDynamicDeferredCallUnsized<[DynamicDeferredCallClientState]>; +pub type ProtoDynamicDeferredCallSized = + ProtoDynamicDeferredCallUnsized<[DynamicDeferredCallClientState; N]>; + +impl ProtoDynamicDeferredCallSized { + /// Create a prototype for a DynamicDeferredCall. You can register calls with this, and then + /// complete it later. + pub const fn new() -> Self { + const DDCS: DynamicDeferredCallClientState = DynamicDeferredCallClientState::new(); + Self { + counter: 0, + client_states: [DDCS; N], + } + } + + /// Complete constructing the DynamicDeferredCall. + /// Once this is done, the completed client_states can be copied to the global. + pub const fn complete( + self, + client_states: &'static [DynamicDeferredCallClientState], + ) -> (DynamicDeferredCall, [DynamicDeferredCallClientState; N]) { + ( + DynamicDeferredCall::new_with_counter(client_states, self.counter), + self.client_states, + ) + } +} + +impl ProtoDynamicDeferredCall { + pub const fn register( + &mut self, + ddc_client: &'static dyn DynamicDeferredCallClient, + ) -> Option { + let ctr = self.counter; + if ctr < self.client_states.len() { + self.client_states[ctr].client = OptionalCell::new(ddc_client); + self.counter = ctr + 1; + Some(DeferredCallHandle(ctr)) + } else { + None + } + } +} + +/// Dynamic deferred calls with the array inline. +pub struct DynamicCallsWithArray { + calls: DynamicDeferredCall, + array: [DynamicDeferredCallClientState; SLOTS], +} + +impl DynamicCallsWithArray { + pub const fn get(&self) -> &DynamicDeferredCall { + &self.calls + } +} + +/// Constructs (and registers) the structure to contain Dynamic Deferred Calls +/// Expects a ProtoDynamicDeferredCall to already have been constructed, and every call to have +/// been registered. +/// Usage: +/// ```ignore +/// #![feature(macro_metavar_expr)] +/// // Inside your kernel::define_components! include this component +/// kernel::define_components!( +/// // ... +/// dyn_def : kernel::dynamic_deferred_call::DynamicDeferredCallComponent::<2>, +/// ); +/// // Inside your const-init construct the prototype +/// use kernel::dynamic_deferred_call::ProtoDynamicDeferredCallSized; +/// let mut deferred = ProtoDynamicDeferredCallSized::<2>::new(); +/// construct_components!( +/// // (a reference to the deferred calls might be an argument to other constructors) +/// (&mut deferred), // some other component that uses deferred calls +/// // finally, in construct_components!, pass the prototype to the component +/// (deferred) // The dyn_def +/// ); +/// ``` +pub struct DynamicDeferredCallComponent {} + +simple_static_component!(impl<{const SLOTS: usize}> for DynamicDeferredCallComponent::, + Output = DynamicCallsWithArray::, + NewInput = (ProtoDynamicDeferredCallSized::, &'a mut SingletonChecker), + FinInput = (), + | slf, input | { + crate::assert_single!(input.1); + let (calls, array) = input.0.complete(&slf.array); + DynamicCallsWithArray { + calls, + array + } + }, + | slf, _input | { + unsafe { + // Safety: we used the singleton checker to make sure we didn't construct more than one + DynamicDeferredCall::set_global_instance(&slf.calls); + } + } +); + impl DynamicDeferredCall { /// Construct a new dynamic deferred call implementation /// @@ -111,10 +225,19 @@ impl DynamicDeferredCall { /// /// The `clients` array can be initialized using the implementation of [Default] /// for the [DynamicDeferredCallClientState]. - pub fn new(client_states: &'static [DynamicDeferredCallClientState]) -> DynamicDeferredCall { + pub const fn new( + client_states: &'static [DynamicDeferredCallClientState], + ) -> DynamicDeferredCall { + Self::new_with_counter(client_states, 0) + } + + const fn new_with_counter( + client_states: &'static [DynamicDeferredCallClientState], + counter: usize, + ) -> DynamicDeferredCall { DynamicDeferredCall { client_states, - handle_counter: Cell::new(0), + handle_counter: Cell::new(counter), call_pending: Cell::new(false), } } diff --git a/kernel/src/easm.rs b/kernel/src/easm.rs new file mode 100644 index 000000000..d82a4df29 --- /dev/null +++ b/kernel/src/easm.rs @@ -0,0 +1,53 @@ +//! A slightly better asm context than asm!. +//! Allows inline concat with ';' and provides some FOR_EACH / FOR_RANGE style helpers +//! Some of this has become RISCV specific, probably best to factor that out + +#[macro_export] +macro_rules! easm_help { + // Usage FOR_EACH(Var in [...] : "code") + // If Var were "v" Code should refer to "\\v" + // "FOR_N" will expand to a completely unrolled loop. + {@PROC($ln : expr, FOR_EACH($v : literal in [$($vals:literal),*] : $($code : expr)+), $($ins:tt)*) -> @RES($($out:tt)*)} => + // It is hard to do variable substitution in rust without declaring another macro + // Might as well asm macros here + {$crate::easm_help!(@PROC(concat!($ln,"f"), $($ins)*) -> @RES($($out)* concat!( + ".macro _foreach_help_", $ln, " ", $v, " + ", $($code),+ ," + .endm + .set FOR_N, 0 + ", $("_foreach_help_", $ln, " ", $vals, "; .set FOR_N, FOR_N + 1;"),* + ),))}; + // Usage FOR_RANGE(Var in lower ... upper "code") + // Bit of a hack, will only work with a limited range of 0..32, but gives literals like 3 instead of 1+1+1. + {@PROC($ln : expr, FOR_RANGE($v : literal in $l : literal .. $u: literal : $($code : expr)+), $($ins:tt)*) -> @RES($($out:tt)*)} => + {$crate::easm_help!(@PROC($ln, FOR_EACH($v + in ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30","31","32"] + : ".if \\" $v " >= " $l " && \\" $v "<" $u "\n" $($code)+ " \n .endif\n"), $($ins)*) -> @RES($($out)*))}; + + // ; starts C style string concat (until next ,) for easier macro insertion + {@PROC($ln : expr, ; $($As : expr)* , $($ins:tt)*) -> @RES($($out:tt)*)} => + {$crate::easm_help!(@PROC($ln, $($ins)*) -> @RES($($out)* concat!($($As),*),))}; + + // Unwrap when nothing left to process + {@PROC($ln : expr,) -> @RES($($out:tt)*)} => + {core::arch::asm!($($out)*)}; + + // Output a common prelude to include helpers. + // Currently not being used. + {@PROC($ln : expr, @PRELUDE, $($ins:tt)*) -> @RES($($out:tt)*)} => + {$crate::easm_help!(@PROC($ln, $($ins)*) -> @RES( + "", $($out)*))}; + + // Not one of the new easm terms, pass it through + {@PROC($ln : expr, $e : tt $($ins:tt)*) -> @RES($($out:tt)*)} => + {$crate::easm_help!(@PROC($ln, $($ins)*) -> @RES($($out)* $e))}; + {@PROC($ln : expr, $e : tt) -> @RES($($out:tt)*)} => + {$crate::easm_help!(@PROC($ln,) -> @RES($($out)* $e))}; +} + +#[macro_export] +macro_rules! easm { + // Wrap in a @PROC(...) -> @RES(...) term to ensure that partial results parse. + {$($tail:tt)*} => + {$crate::easm_help!(@PROC(line!(), @PRELUDE, $($tail)*) -> @RES())}; +} diff --git a/kernel/src/errorcode.rs b/kernel/src/errorcode.rs index 22cdcbb93..19a0701b5 100644 --- a/kernel/src/errorcode.rs +++ b/kernel/src/errorcode.rs @@ -43,6 +43,19 @@ pub enum ErrorCode { NOACK = 13, } +// Implementing a few tuple destruction's makes ? work better + +impl From<(ErrorCode, T)> for ErrorCode { + fn from(value: (ErrorCode, T)) -> Self { + value.0 + } +} +impl From<(ErrorCode, T, U)> for ErrorCode { + fn from(value: (ErrorCode, T, U)) -> Self { + value.0 + } +} + impl From for usize { fn from(err: ErrorCode) -> usize { err as usize diff --git a/kernel/src/grant.rs b/kernel/src/grant.rs index c71ee99ef..821f2681e 100644 --- a/kernel/src/grant.rs +++ b/kernel/src/grant.rs @@ -124,19 +124,30 @@ //! the closure. ▼ //! ``` +use crate::cheri::CPtrOps; +use core::cell::{Cell, Ref, RefCell, RefMut}; use core::cmp; +use core::convert::Into; use core::marker::PhantomData; +use core::mem::transmute; use core::mem::{align_of, size_of}; use core::ops::{Deref, DerefMut}; use core::ptr::{write, NonNull}; use core::slice; +use misc::take_borrow::TakeBorrow; +use crate::collections::list::PanicDeref; +use crate::collections::safe_buf::BufLength; +use crate::config::CONFIG; use crate::kernel::Kernel; -use crate::process::{Error, Process, ProcessCustomGrantIdentifer, ProcessId}; -use crate::processbuffer::{ReadOnlyProcessBuffer, ReadWriteProcessBuffer}; -use crate::processbuffer::{ReadOnlyProcessBufferRef, ReadWriteProcessBufferRef}; -use crate::upcall::{Upcall, UpcallError, UpcallId}; -use crate::ErrorCode; +use crate::process::{Error, Process, ProcessCustomGrantIdentifier, ProcessId}; +use crate::processbuffer::{ + raw_processbuf_to_roprocessslice, raw_processbuf_to_rwprocessslice, ReadOnlyProcessBuffer, + ReadOnlyProcessBufferRef, ReadWriteProcessBuffer, ReadWriteProcessBufferRef, + ReadableProcessBuffer, ReadableProcessSlice, WriteableProcessSlice, +}; +use crate::upcall::{PUpcall, Upcall, UpcallError, UpcallId}; +use crate::{capabilities, debug, process, ErrorCode, ProcEntry, TIfCfg}; /// Tracks how many upcalls a grant instance supports automatically. pub trait UpcallSize { @@ -179,29 +190,34 @@ impl AllowRwSize for AllowRwCount { /// non-T part of grant). /// /// Example layout of full grant belonging to a single app and driver: +/// (If sizeof:: were 4 and align_of:: were 8. /// /// ```text,ignore /// 0x003FFC8 ┌────────────────────────────────────┐ /// │ T | -/// 0x003FFxx ├ ───────────────────────── ┐ K | -/// │ Padding (ensure T aligns)| e | -/// 0x003FF44 ├ ───────────────────────── | r | -/// │ SavedAllowRwN | n | +/// 0x003FFxx ├ ───────────────────────── ┐ | +/// │ Padding (ensure T aligns)| | +/// 0x003FF70 ├ ───────────────────────── | | +/// │ SavedAllowRwN | K | /// │ ... | e | G -/// │ SavedAllowRw1 | l | r -/// │ SavedAllowRw0 | | a -/// 0x003FF44 ├ ───────────────────────── | O | n -/// │ SavedAllowRoN | w | t -/// │ ... | n | -/// │ SavedAllowRo1 | e | M -/// │ SavedAllowRo0 | d | e -/// 0x003FF30 ├ ───────────────────────── | | m -/// │ SavedUpcallN | D | o -/// │ ... | a | r -/// │ SavedUpcall1 | t | y -/// │ SavedUpcall0 | a | -/// 0x003FF24 ├ ───────────────────────── | | -/// │ Counters (usize) | | +/// │ SavedAllowRw1 | r | r +/// │ SavedAllowRw0 | n | a +/// 0x003FF60 ├ ───────────────────────── | e | n +/// │ SavedAllowRoN | l | t +/// │ ... | | +/// │ SavedAllowRo1 | O | M +/// │ SavedAllowRo0 | w | e +/// 0x003FF50 ├ ───────────────────────── | n | m +/// │ SavedUpcallN | e | o +/// │ ... | d | r +/// │ SavedUpcall1 | | y +/// │ SavedUpcall0 | D | +/// 0x003FF40 ├ ───────────────────────── | a | +/// │ Padding (align upcall) | t | +/// 0x003FF38 ├ ───────────────────────── | a | +/// │ RefCell<> | | (only if feature grant_refs) +/// 0x003FF28 ├ ───────────────────────── | | +/// │ Counters (Cell) | | /// 0x003FF20 └────────────────────────────────────┘ /// ``` /// @@ -228,13 +244,13 @@ struct EnteredGrantKernelManagedLayout<'a> { grant_num: usize, /// The location of the counters structure for the grant. - counters_ptr: *mut usize, + counters_ptr: *const Cell, /// Pointer to the array of saved upcalls. - upcalls_array: *mut SavedUpcall, + upcalls_array: *const SavedUpcall, /// Pointer to the array of saved read-only allows. - allow_ro_array: *mut SavedAllowRo, + allow_ro_array: *const SavedAllowRo, /// Pointer to the array of saved read-write allows. - allow_rw_array: *mut SavedAllowRw, + allow_rw_array: *const SavedAllowRw, } /// Represents the number of the upcall elements in the kernel owned section of @@ -256,9 +272,21 @@ struct GrantDataSize(usize); #[derive(Copy, Clone)] struct GrantDataAlign(usize); +/// The RefCell used in the Grant layout that contains the 'T'. The T is stored discontinuously, but +/// nominally belongs to the RefCell. The bool records if a Read-Only reference has been leaked. +type LayoutRefCell = RefCell>; + +/// The fixed-size portion of the kernel managed layout +#[repr(C)] +struct KernelManagedLayoutFixed { + counters: Cell, + // Not RefCell<(bool,T)> in case T has to be very aligned, which would bloat the grant + ref_cell: LayoutRefCell, +} + impl<'a> EnteredGrantKernelManagedLayout<'a> { - /// Reads the specified pointer as the base of the kernel owned grant region - /// that has previously been initialized. + /// Reads the specified pointer as the base of the kernel owned grant + /// region. /// /// # Safety /// @@ -267,22 +295,23 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { /// `EnteredGrantKernelManagedLayout` for the given `base_ptr` at the same /// time, otherwise multiple mutable references to the same upcall/allow /// slices could be created. + #[inline] unsafe fn read_from_base( base_ptr: NonNull, process: &'a dyn Process, grant_num: usize, ) -> Self { - let counters_ptr = base_ptr.as_ptr() as *mut usize; - let counters_val = counters_ptr.read(); + // Safety: the requirements of as_ref are the same as this function. + let fixed = base_ptr.cast::().as_ref(); + let counters_ptr = base_ptr.as_ptr() as *const Cell; // Parse the counters field for each of the fields - let [_, _, allow_ro_num, upcalls_num] = u32::to_be_bytes(counters_val as u32); + let [_, _, allow_ro_num, upcalls_num] = u32::to_be_bytes(fixed.counters.get() as u32); - // Skip over the counter usize, then the stored array of `SavedAllowRo` - // items and `SavedAllowRw` items. - let upcalls_array = counters_ptr.add(1) as *mut SavedUpcall; - let allow_ro_array = upcalls_array.add(upcalls_num as usize) as *mut SavedAllowRo; - let allow_rw_array = allow_ro_array.add(allow_ro_num as usize) as *mut SavedAllowRw; + let upcalls_array = ((fixed as *const KernelManagedLayoutFixed).add(1) as *const u8) + .add(Self::PADDING_BEFORE_ARRAY) as *const SavedUpcall; + let allow_ro_array = upcalls_array.add(upcalls_num.into()) as *const SavedAllowRo; + let allow_rw_array = allow_ro_array.add(allow_ro_num.into()) as *const SavedAllowRw; Self { process, @@ -294,6 +323,10 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { } } + // Padding needed after the counts to ensure that array elements are aligned enough + const PADDING_BEFORE_ARRAY: usize = + (size_of::()).wrapping_neg() % align_of::(); + /// Creates a layout from the specified pointer and lengths of arrays and /// initializes the kernel owned portion of the layout. /// @@ -304,6 +337,7 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { /// not be any other `EnteredGrantKernelManagedLayout` for /// the given `base_ptr` at the same time, otherwise multiple mutable /// references to the same upcall/allow slices could be created. + #[inline] unsafe fn initialize_from_counts( base_ptr: NonNull, upcalls_num_val: UpcallItems, @@ -312,7 +346,9 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { process: &'a dyn Process, grant_num: usize, ) -> Self { - let counters_ptr = base_ptr.as_ptr() as *mut usize; + debug_assert_eq!((base_ptr.as_ptr() as usize) % align_of::(), 0); + let kmlf_ref = base_ptr.cast::().as_ref(); + let counters_ptr = core::ptr::addr_of!((*kmlf_ref).counters) as *mut Cell; // Create the counters usize value by correctly packing the various // counts into 8 bit fields. @@ -320,14 +356,16 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { u32::from_be_bytes([0, allow_rw_num_val.0, allow_ro_num_val.0, upcalls_num_val.0]) as usize; - let upcalls_array = counters_ptr.add(1) as *mut SavedUpcall; - let allow_ro_array = upcalls_array.add(upcalls_num_val.0.into()) as *mut SavedAllowRo; - let allow_rw_array = allow_ro_array.add(allow_ro_num_val.0.into()) as *mut SavedAllowRw; + let upcalls_array = ((base_ptr.as_ptr() as *const KernelManagedLayoutFixed).add(1) + as *const u8) + .add(Self::PADDING_BEFORE_ARRAY) as *const SavedUpcall; + let allow_ro_array = upcalls_array.add(upcalls_num_val.0.into()) as *const SavedAllowRo; + let allow_rw_array = allow_ro_array.add(allow_ro_num_val.0.into()) as *const SavedAllowRw; - counters_ptr.write(counter.into()); - write_default_array(upcalls_array, upcalls_num_val.0.into()); - write_default_array(allow_ro_array, allow_ro_num_val.0.into()); - write_default_array(allow_rw_array, allow_rw_num_val.0.into()); + counters_ptr.write(Cell::new(counter.into())); + write_default_array(upcalls_array as *mut u8, upcalls_num_val.0.into()); + write_default_array(allow_ro_array as *mut u8, allow_ro_num_val.0.into()); + write_default_array(allow_rw_array as *mut u8, allow_rw_num_val.0.into()); Self { process, @@ -339,9 +377,10 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { } } - /// Returns the entire grant size including the kernel owned memory, - /// padding, and data for T. Requires that grant_t_align be a power of 2, - /// which is guaranteed from align_of rust calls. + /// Returns the entire grant size including the kernel own memory, padding, + /// and data for T. Requires that grant_t_align be a power of 2, which is + /// guaranteed from align_of rust calls. + #[inline] fn grant_size( upcalls_num: UpcallItems, allow_ro_num: AllowRoItems, @@ -349,38 +388,50 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { grant_t_size: GrantDataSize, grant_t_align: GrantDataAlign, ) -> usize { - let kernel_managed_size = size_of::() - + upcalls_num.0 as usize * size_of::() + let kernel_managed_size = size_of::() + + Self::PADDING_BEFORE_ARRAY + + upcalls_num.0 as usize * size_of::() // Then the three arrays + allow_ro_num.0 as usize * size_of::() + allow_rw_num.0 as usize * size_of::(); // We know that grant_t_align is a power of 2, so we can make a mask // that will save only the remainder bits. let grant_t_align_mask = grant_t_align.0 - 1; - // Determine padding to get to the next multiple of grant_t_align by - // taking the remainder and subtracting that from the alignment, then - // ensuring a full alignment value maps to 0. - let padding = - (grant_t_align.0 - (kernel_managed_size & grant_t_align_mask)) & grant_t_align_mask; - kernel_managed_size + padding + grant_t_size.0 + + let total_size = kernel_managed_size + grant_t_size.0; + + // Align up + (total_size + grant_t_align_mask) & !grant_t_align_mask } /// Returns the alignment of the entire grant region based on the alignment /// of data T. + #[inline] fn grant_align(grant_t_align: GrantDataAlign) -> usize { - // The kernel owned memory all aligned to usize. We need to use the - // higher of the two alignment to ensure our padding calculations work - // for any alignment of T. - cmp::max(align_of::(), grant_t_align.0) + // We need the highest alignment of all objects in the grant to ensure our padding + // calculations work for any alignment of T. + // We put the three variable length arrays in alignment order. + assert!( + align_of::() >= align_of::() + && align_of::() >= align_of::() + ); + cmp::max( + cmp::max( + align_of::(), + align_of::(), + ), + grant_t_align.0, + ) } - /// Returns the offset for the grant data t within the entire grant region. + /// Returns the grant data pointer given the base pointer /// /// # Safety /// /// The caller must ensure that the specified base pointer is aligned to at /// least the alignment of T and points to a grant that is of size /// grant_size bytes. - unsafe fn offset_of_grant_data_t( + #[inline] + unsafe fn get_grant_data_t( base_ptr: NonNull, grant_size: usize, grant_t_size: GrantDataSize, @@ -392,6 +443,14 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { NonNull::new_unchecked(base_ptr.as_ptr().add(grant_size - grant_t_size_usize)) } + /// Returns a pointer to the RefCell for the grant data given the base pointer. + #[inline] + unsafe fn get_grant_data_t_ref(base_ptr: NonNull) -> NonNull { + NonNull::new_unchecked(core::ptr::addr_of_mut!( + (*(base_ptr.as_ptr() as *mut KernelManagedLayoutFixed)).ref_cell + )) + } + /// Read an 8 bit value from the counter field offset by the specified /// number of bits. This is a helper function for reading the counter field. fn get_counter_offset(&self, offset_bits: usize) -> usize { @@ -400,7 +459,7 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { // Creating a `EnteredGrantKernelManagedLayout` object requires that the // pointers are well aligned and point to valid memory. let counters_val = unsafe { self.counters_ptr.read() }; - (counters_val >> offset_bits) & 0xFF + (counters_val.get() >> offset_bits) & 0xFF } /// Return the number of upcalls stored by the core kernel for this grant. @@ -420,34 +479,49 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { self.get_counter_offset(16) } - /// Return mutable access to the slice of stored upcalls for this grant. - /// This is necessary for storing a new upcall. - fn get_upcalls_slice(&mut self) -> &mut [SavedUpcall] { + /// Return immutable access to the slice of stored upcalls for this grant. + /// Use .set method for storing a new upcall. + fn get_upcalls_slice(&self) -> &[SavedUpcall] { // # Safety // // Creating a `EnteredGrantKernelManagedLayout` object ensures that the // pointer to the upcall array is valid. - unsafe { slice::from_raw_parts_mut(self.upcalls_array, self.get_upcalls_number()) } + unsafe { + slice::from_raw_parts( + self.upcalls_array as *const SavedUpcall, + self.get_upcalls_number(), + ) + } } - /// Return mutable access to the slice of stored read-only allow buffers for - /// this grant. This is necessary for storing a new read-only allow. - fn get_allow_ro_slice(&mut self) -> &mut [SavedAllowRo] { + /// Return immutable access to the slice of stored read-only allow buffers for + /// this grant. Use .set method for storing a new read-only allow. + fn get_allow_ro_slice(&self) -> &[SavedAllowRo] { // # Safety // // Creating a `EnteredGrantKernelManagedLayout` object ensures that the // pointer to the RO allow array is valid. - unsafe { slice::from_raw_parts_mut(self.allow_ro_array, self.get_allow_ro_number()) } + unsafe { + slice::from_raw_parts( + self.allow_ro_array as *const SavedAllowRo, + self.get_allow_ro_number(), + ) + } } - /// Return mutable access to the slice of stored read-write allow buffers - /// for this grant. This is necessary for storing a new read-write allow. - fn get_allow_rw_slice(&mut self) -> &mut [SavedAllowRw] { + /// Return immutable access to the slice of stored read-write allow buffers + /// for this grant. Use .set method for storing a new read-write allow. + fn get_allow_rw_slice(&self) -> &[SavedAllowRw] { // # Safety // // Creating a `EnteredGrantKernelManagedLayout` object ensures that the // pointer to the RW allow array is valid. - unsafe { slice::from_raw_parts_mut(self.allow_rw_array, self.get_allow_rw_number()) } + unsafe { + slice::from_raw_parts( + self.allow_rw_array as *const SavedAllowRw, + self.get_allow_rw_number(), + ) + } } /// Return slices to the kernel managed upcalls and allow buffers. This @@ -479,23 +553,6 @@ impl<'a> EnteredGrantKernelManagedLayout<'a> { } } -// Ensure that we leave the grant once this goes out of scope. -impl Drop for EnteredGrantKernelManagedLayout<'_> { - fn drop(&mut self) { - // ### Safety - // - // To safely call this function we must ensure that no references will - // exist to the grant once `leave_grant()` returns. Because using a - // `EnteredGrantKernelManagedLayout` object is the only only way we - // access the actual memory of a grant, and we are calling - // `leave_grant()` from its `drop()` method, we are sure there will be - // no remaining references to the grant. - unsafe { - self.process.leave_grant(self.grant_num); - } - } -} - /// This GrantData object provides access to the memory allocated for a grant /// for a specific process. /// @@ -505,10 +562,218 @@ impl Drop for EnteredGrantKernelManagedLayout<'_> { /// /// Capsules gain access to a GrantData object by calling `Grant::enter()`. pub struct GrantData<'a, T: 'a + ?Sized> { - /// The mutable reference to the actual object type stored in the grant. + /// The mutable reference to the actual object type stored in the grant, data: &'a mut T, } +/// New grant data wrapper. Can provide one of three interfaces: +/// 1) The legacy interface. Can be borrowed inside a closure to provide a `GrantData` which is +/// short lived smart pointer to a T. +/// 2) The new interface. Provides a `PRef`. `PRef` can be long lived, and can be converted back +/// and forth with a `LivePRef` (also a smart pointer to a T). Converting checks that the process +/// is still live. +/// 3) A reference counted interface for DMA. Provides a `Ref` or `RefMut`. +/// Note taking out a `Pref` will _permanently_ disallow legacy enter and the reference counted +/// RefMut interface. + +pub struct NewGrantData<'a, T: 'a> { + ref_cell: &'a LayoutRefCell, + the_t: NonNull, + proc_entry: &'static ProcEntry, +} + +impl<'a, T: 'a> NewGrantData<'a, T> { + #[inline] + fn new(ref_cell: &'a LayoutRefCell, the_t: NonNull, proc_entry: &'static ProcEntry) -> Self { + Self { + ref_cell, + the_t, + proc_entry, + } + } + + /// Get a RefMut for the grant data. Taking this and not dropping it will mean all future + /// calls will return None. + /// It will also block the grant from being freed if the process dies. + /// Only use this if you _really_ need the reference to outlive a single syscall. + /// If you don't, prefer getting a Pref. + #[inline] + fn priv_try_get_ref_mut<'b>(&self) -> Option> + where + T: 'b, + { + match self.ref_cell.try_borrow_mut() { + Ok(borrowed) => { + let mut raw_ptr = self.the_t; + Some({ + // SAFETY: The unit in the refcell was always meant to point to this data + let value_short = RefMut::map(borrowed, |_| unsafe { raw_ptr.as_mut() }); + // SAFETY: T lives as long as 'b because of bound on function. + // We ensure elsewhere that the RefCell will not be dropped / moved while any + // of these references still exist. + unsafe { core::mem::transmute::, RefMut<'b, T>>(value_short) } + }) + } + Err(_) => None, + } + } + + /// See try_get_ref_mut. This will allow multiple Refs to exist. + #[inline] + fn priv_try_get_ref<'b>(&self) -> Option> + where + T: 'b, + { + match self.ref_cell.try_borrow() { + Ok(borrowed) => { + let raw_ptr = self.the_t; + Some({ + // SAFETY: The refcell was always meant to include this data. + let value_short = Ref::map(borrowed, |_| unsafe { raw_ptr.as_ref() }); + // SAFETY: T lives as long as 'b because of bound on function. + // We ensure elsewhere that the RefCell will not be dropped / moved while any + // of these references still exist. + unsafe { core::mem::transmute::, Ref<'b, T>>(value_short) } + }) + } + Err(_) => None, + } + } + + /// Get a short-lived reference to the data for the lifetime of a closure. Because the reference + /// cannot leak the closure, it may be difficult for functions of the reference to be stored. + /// Prefer get_pref() if you want to pass data into other capsules / HALs. + #[inline] + pub fn legacy_enter( + &self, + fun: F, + panic_on_reenter: bool, + kern_data: &GrantKernelData, + allocator: &mut GrantRegionAllocator, + ) -> Option + where + F: FnOnce(&mut GrantData, &GrantKernelData, &mut GrantRegionAllocator) -> R, + { + match self.priv_try_get_ref_mut() { + Some(mut re) => Some(fun( + &mut GrantData::new(re.deref_mut()), + kern_data, + allocator, + )), + None => { + // If we get an error it is because the grant is already + // entered. `process.enter_grant()` can fail for several + // reasons, but only the double enter case can happen once a + // grant has been applied. The other errors would be detected + // earlier (i.e. before the grant can be applied). + + // If `panic_on_reenter` is false, we skip this error and do + // nothing with this grant. + if !panic_on_reenter { + return None; + } + + // If `enter_grant` fails, we panic!() to notify the developer + // that they tried to enter the same grant twice which is + // prohibited because it would result in two mutable references + // existing for the same memory. This preserves type correctness + // (but does crash the system). + // + // ## Explanation and Rationale + // + // This panic represents a tradeoff. While it is undesirable to + // have the potential for a runtime crash in this grant region + // code, it balances usability with type correctness. The + // challenge is that calling `self.apps.iter()` is a common + // pattern in capsules to access the grant region of every app + // that is using the capsule, and sometimes it is intuitive to + // call that inside of a `self.apps.enter(app_id, |app| {...})` + // closure. However, `.enter()` means that app's grant region is + // entered, and then a naive `.iter()` would re-enter the grant + // region and cause undefined behavior. We considered different + // options to resolve this. + // + // 1. Have `.iter()` only iterate over grant regions which are + // not entered. This avoids the bug, but could lead to + // unexpected behavior, as `self.apps.iter()` will do + // different things depending on where in a capsule it is + // called. + // 2. Have the compiler detect when `.iter()` is called when a + // grant region has already been entered. We don't know of a + // viable way to implement this. + // 3. Panic if `.iter()` is called when a grant is already + // entered. + // + // We decided on option 3 because it balances minimizing + // surprises (`self.apps.iter()` will always iterate all grants) + // while also protecting against the bug. We expect that any + // code that attempts to call `self.apps.iter()` after calling + // `.enter()` will immediately encounter this `panic!()` and + // have to be refactored before any tests will be successful. + // Therefore, this `panic!()` should only occur at + // development/testing time. + // + // ## How to fix this error + // + // If you are seeing this panic, you need to refactor your + // capsule to not call `.iter()` or `.each()` from inside a + // `.enter()` closure. That is, you need to close the grant + // region you are currently in before trying to iterate over all + // grant regions. + panic!("Attempted to re-enter a grant region."); + } + } + } + + /// Get a process-lifetime bound reference to grant data. + /// LivePRef can be converted back to PRef type for storage which ascribes + /// to 'static. To convert back, use `TryInto>`. + #[inline] + pub fn get_pref(&self) -> Option> { + match self.ref_cell.try_borrow() { + Ok(r) => { + if !r.get() { + // PRef leaks a count to block concurrent use by the other interfaces. + // Preferred over PRef containing a Ref as that would add extra code + // generation to the common case of only Pref being in use. + // We only do this once so that repeated use of get_pref does not overflow the + // counter and on terminate can magically resurrect this reference. + r.set(true); + core::mem::forget(r); + } + // Safety: leaking a reference blocks any exclusive references being created + // NewGrantData cannot outlive a process so the process is valid at this point. + unsafe { Some(LivePRef::new(self.the_t, self.proc_entry)) } + } + Err(_) => None, + } + } + + /// Public version of priv_try_get_ref_mut + #[inline] + pub fn try_get_ref_mut<'b>( + &self, + _cap: &'static dyn capabilities::HoldGrantReferencesCapability, + ) -> Option> + where + T: 'b + Sized, + { + self.priv_try_get_ref_mut() + } + + /// Public version of priv_try_get_ref + #[inline] + pub fn try_get_ref<'b>( + &self, + _cap: &'static dyn capabilities::HoldGrantReferencesCapability, + ) -> Option> + where + T: 'b + Sized, + { + self.priv_try_get_ref() + } +} + impl<'a, T: 'a + ?Sized> GrantData<'a, T> { /// Create a `GrantData` object to provide access to the actual object /// allocated for a process. @@ -516,19 +781,26 @@ impl<'a, T: 'a + ?Sized> GrantData<'a, T> { /// Only one can GrantData per underlying object can be created at a time. /// Otherwise, there would be multiple mutable references to the same object /// which is undefined behavior. + #[inline] fn new(data: &'a mut T) -> GrantData<'a, T> { - GrantData { data: data } + GrantData { data } } } +/// This can now panic for the reference counted version. Would be nice to offer a better +/// interface. +/// Maybe users of Grant<> should select which style they prefer, and this should not be offered for +/// the reference counted versions. impl<'a, T: 'a + ?Sized> Deref for GrantData<'a, T> { type Target = T; + #[inline] fn deref(&self) -> &T { self.data } } impl<'a, T: 'a + ?Sized> DerefMut for GrantData<'a, T> { + #[inline] fn deref_mut(&mut self) -> &mut T { self.data } @@ -582,6 +854,15 @@ impl<'a> GrantKernelData<'a> { } } + /// Get the reference to the process that this grant data is for. This being available avoids + /// boilerplate of looking up the process again after entering a grant. Should be pub(crate) + /// otherwise the process management cap could be sidestepped. + /// I would like to see more of that interface have caps added, and then make this always + /// available. Or at least give a proxy object. + pub(crate) fn get_process(&self) -> &'a dyn Process { + self.process + } + /// Schedule the specified upcall for the process with r0, r1, r2 as /// provided values. /// @@ -607,14 +888,119 @@ impl<'a> GrantKernelData<'a> { subscribe_num, driver_num: self.driver_num, }, - saved_upcall.appdata, - saved_upcall.fn_ptr, + saved_upcall.appdata.get(), + saved_upcall.fn_ptr.get(), ); upcall.schedule(self.process, r.0, r.1, r.2) }, ) } + pub fn could_schedule_upcall(&self, subscribe_num: usize) -> Result<(), UpcallError> { + self.upcalls.get(subscribe_num).map_or( + Err(UpcallError::InvalidSubscribeNum), + |saved_upcall| { + saved_upcall.fn_ptr.get().map_or(Ok(()), |_| { + self.process.could_enqueue_task().map_err(|er| match er { + ErrorCode::NOMEM => UpcallError::QueueFull, + _ => UpcallError::KernelError, + }) + }) + }, + ) + } + + pub fn get_upcall(&self, subscribe_num: usize) -> PUpcall { + let (tracker, fptr, data) = self.upcalls.get(subscribe_num).map_or( + ( + DualTracker::global_dead(), + Default::default(), + Default::default(), + ), + |saved_upcall| { + let id = self.process.processid(); + ( + DualTracker::new( + id.kernel.get_live_tracker_for(id), + ALiveTracker::new(&saved_upcall.live), + ), + saved_upcall.fn_ptr.get(), + saved_upcall.appdata.get(), + ) + }, + ); + PUpcall::new( + tracker, + data, + fptr, + UpcallId { + driver_num: self.driver_num, + subscribe_num, + }, + ) + } + + pub fn has_nonnull_upcall(&self, subscribe_num: usize) -> bool { + self.upcalls + .get(subscribe_num) + .map_or(false, |saved_upcall| { + saved_upcall.fn_ptr.get().map_or(false, |_| true) + }) + } + + /// Common logic between get_readonly_processbuffer, get_readwrite_processbuffer, + /// and the ARef versions. + fn get_helper( + &self, + allow_num: usize, + ro: bool, + ) -> Result<(*const [u8], &Cell), Error> { + let saved_allow = if ro { &self.allow_ro } else { &self.allow_rw } + .get(allow_num) + .ok_or(crate::process::Error::AddressOutOfBounds)?; + + let ptr = saved_allow.value.0.map_ref( + |true_ref| unsafe { + // Safety: we never get_mut on this refcell. + *true_ref.as_ptr() + }, + |false_ref| false_ref.get(), + ); + + let tracker = &saved_allow.live; + + Ok((ptr, tracker)) + } + + fn get_rc_helper( + &self, + allow_num: usize, + ro: bool, + ) -> Result, crate::process::Error> { + if !CONFIG.counted_grant_refs { + return Err(Error::KernelError); + } + + let saved_allow = if ro { &self.allow_ro } else { &self.allow_rw } + .get(allow_num) + .ok_or(crate::process::Error::AddressOutOfBounds)?; + + // # Safety: + // For as long as the reference counter is non-zero, the RefCell will not be changed + // due to the logic in allow_ro and ... (TODO: don't allow process free while counts). + // So, we can transmute this reference to have static lifetime as the only danger + // of that would it be outliving its Refcell, which we will keep around until + // the count is zero. + // We could have used Rc to have not needed the lifetime at all, + // but Rc does not expose its cell like Ref does with RefCell. + + unsafe { + let as_static: Option> = + core::mem::transmute(saved_allow.value.0.get_true_ref().try_borrow()); + as_static.ok_or(crate::process::Error::AlreadyInUse) + } + } + /// Returns a lifetime limited reference to the requested /// `ReadOnlyProcessBuffer`. /// @@ -629,25 +1015,57 @@ impl<'a> GrantKernelData<'a> { &self, allow_ro_num: usize, ) -> Result { - self.allow_ro.get(allow_ro_num).map_or( - Err(crate::process::Error::AddressOutOfBounds), - |saved_ro| { - // # Safety - // - // This is the saved process buffer data has been validated to - // be wholly contained within this process before it was stored. - // The lifetime of the ReadOnlyProcessBuffer is bound to the - // lifetime of self, which correctly limits dereferencing this - // saved pointer to only when it is valid. - unsafe { - Ok(ReadOnlyProcessBufferRef::new( - saved_ro.ptr, - saved_ro.len, - self.process.processid(), - )) - } - }, - ) + let (ptr, _) = self.get_helper(allow_ro_num, true)?; + + // # Safety + // + // This is the saved process buffer data has been validated to + // be wholly contained within this process before it was stored. + // The lifetime of the ReadOnlyProcessBuffer is bound to the + // lifetime of self, which correctly limits dereferencing this + // saved pointer to only when it is valid. + unsafe { + Ok(ReadOnlyProcessBufferRef::new( + ptr.as_ptr(), + ptr.len(), + self.process.processid(), + )) + } + } + + /// Helper to reduce boilerplate for get + and_then + enter + pub fn enter_readonly_processbuffer( + &self, + allow_ro_num: usize, + fun: F, + ) -> Result + where + F: FnOnce(&ReadableProcessSlice) -> R, + { + self.get_readonly_processbuffer(allow_ro_num) + .and_then(|buf| buf.enter(fun)) + } + + /// Get a reference counted reference to the requested `ReadOnlyProcessBuffer` + /// This reference is a valid for longer, but no new buffer can be allowed while references + /// Still exist. This may cause a loss in stability. + /// To encourage use of the other interface, this one requires a capability. + pub fn get_readonly_processbuffer_rc( + &self, + allow_ro_num: usize, + _cap: &dyn capabilities::HoldAllowReferencesCapability, + ) -> Result, crate::process::Error> { + let as_static = self.get_rc_helper(allow_ro_num, true)?; + + Ok(Ref::map(as_static, |slice| { + // We can also avoid the extra indirection (and any possibly problematic + // changing of the RefCell) by following the pointer through the RefCell and + // converting the slice to something more typesafe. + // This has the benefit of allowing re-use of the slot, and stopping the pointer from + // changing from underneath us. + let slice = *slice; + unsafe { raw_processbuf_to_roprocessslice::<'static>(slice.as_ptr(), slice.len()) } + })) } /// Returns a lifetime limited reference to the requested @@ -664,25 +1082,89 @@ impl<'a> GrantKernelData<'a> { &self, allow_rw_num: usize, ) -> Result { - self.allow_rw.get(allow_rw_num).map_or( - Err(crate::process::Error::AddressOutOfBounds), - |saved_rw| { - // # Safety - // - // This is the saved process buffer data has been validated to - // be wholly contained within this process before it was stored. - // The lifetime of the ReadWriteProcessBuffer is bound to the - // lifetime of self, which correctly limits dereferencing this - // saved pointer to only when it is valid. - unsafe { - Ok(ReadWriteProcessBufferRef::new( - saved_rw.ptr, - saved_rw.len, - self.process.processid(), - )) - } - }, - ) + let (ptr, _) = self.get_helper(allow_rw_num, false)?; + + // # Safety + // + // This is the saved process buffer data has been validated to + // be wholly contained within this process before it was stored. + // The lifetime of the ReadWriteProcessBuffer is bound to the + // lifetime of self, which correctly limits dereferencing this + // saved pointer to only when it is valid. + unsafe { + Ok(ReadWriteProcessBufferRef::new( + (ptr as *mut [u8]).as_mut_ptr(), + ptr.len(), + self.process.processid(), + )) + } + } + + /// Get a reference counted reference to the requested `ReadWriteProcessBuffer` + /// This reference is a valid for longer, but no buffer can be allowed while references + /// Still exist. This may cause a loss in stability. + /// Note, this is only a Ref, not RefMut, because WritetableProcessSlice might overlap with + /// other allowed buffers. The interior mutability of the type allows for writing. + pub fn get_readwrite_processbuffer_rc( + &self, + allow_rw_num: usize, + _cap: &dyn capabilities::HoldAllowReferencesCapability, + ) -> Result, crate::process::Error> { + let as_static = self.get_rc_helper(allow_rw_num, false)?; + + Ok(Ref::map(as_static, |slice| { + // We can also avoid the extra indirection (and any possibly problematic + // changing of the RefCell) by following the pointer through the RefCell and + // converting the slice to something more typesafe. + // This has the benefit of allowing re-use of the slot, and stopping the pointer from + // changing from underneath us. + let slice = *slice; + unsafe { + raw_processbuf_to_rwprocessslice::<'static>(slice.as_ptr() as *mut u8, slice.len()) + } + })) + } + + /// Get a LiveARef for an read-only allowed buffer. It is always valid, but has limited + /// lifetime. Convert into ARef which ascribes to static to store. + pub fn get_readonly_aref( + &self, + allow_ro_num: usize, + ) -> Result, ErrorCode> { + let (ptr, tracker) = self.get_helper(allow_ro_num, true)?; + let buf = unsafe { raw_processbuf_to_roprocessslice(ptr.as_ptr(), ptr.len()) }; + let id = self.process.processid(); + unsafe { + // Safety: the existence of this object means the process is currently live. + Ok(LiveARef::new( + buf.into(), + id.kernel.get_live_tracker_for(id), + tracker, + )) + } + } + + /// Get a LiveARef for an read/write allowed buffer. It is always valid, but has limited + /// lifetime. Convert into ARef which ascribes to static to store. + pub fn get_readwrite_aref( + &self, + allow_rw_num: usize, + ) -> Result, ErrorCode> { + let (ptr, tracker) = self.get_helper(allow_rw_num, false)?; + let buf = unsafe { raw_processbuf_to_rwprocessslice(ptr.as_ptr() as *mut u8, ptr.len()) }; + let id = self.process.processid(); + unsafe { + // Safety: the existence of this object means the process is currently live. + Ok(LiveARef::new( + buf.into(), + id.kernel.get_live_tracker_for(id), + tracker, + )) + } + } + + pub fn get_extra_syscall_arg(&self, ndx: usize) -> Option { + self.get_process().get_extra_syscall_arg(ndx) } } @@ -692,45 +1174,88 @@ impl<'a> GrantKernelData<'a> { #[repr(C)] #[derive(Default)] struct SavedUpcall { - appdata: usize, - fn_ptr: Option>, + appdata: Cell, + fn_ptr: Cell, + /// It is up to applications when to rotate these. The kernel promises to stop using the ref + /// upon request. + /// For libtock-rs, this is controlled by `scope::share`. + /// At the END of any `scope::share` any buffers shared within that block are un-allowed by + /// rotating the counter. Entering a block does not rotate the counter, so a nested share + /// will not immediately unshare previous buffers. + live: Cell, +} + +/// A wrapper of a raw slice, possibly with a ref count if enabled +/// It doesn't really matter this is *const or *mut for our use cases +type AllowSliceTIf = TIfCfg!(counted_grant_refs, RefCell<*const [u8]>, Cell<*const [u8]>); +/// Wrapping again to implement Default +struct AllowSliceInner(AllowSliceTIf); + +impl AllowSliceInner { + fn new(value: *const [u8]) -> Self { + if CONFIG.counted_grant_refs { + AllowSliceInner(AllowSliceTIf::new_true(RefCell::new(value))) + } else { + AllowSliceInner(AllowSliceTIf::new_false(Cell::new(value))) + } + } + + /// Try replace the slice pointer with another. + /// On failure, returns the input and false. + /// On success, returns the value and true. + /// If unallow_previous is true, then it will give back the last allowed value, otherwise, + /// NULL is returned. + fn try_replace(&self, value: *const [u8], unallow_previous: bool) -> (*const [u8], bool) { + if unallow_previous { + if CONFIG.counted_grant_refs { + match self.0.get_true_ref().try_borrow_mut() { + Ok(mut ref_mut) => (core::mem::replace(ref_mut.deref_mut(), value), true), + Err(_) => (value, false), + } + } else { + (self.0.get_false_ref().replace(value), true) + } + } else { + // If we are not unallowing the previous buffers, then it does not matter if there + // are any reference counts. + if CONFIG.counted_grant_refs { + unsafe { + *self.0.get_true_ref().as_ptr() = value; + } + } else { + self.0.get_false_ref().set(value); + } + (core::ptr::slice_from_raw_parts(0 as *const u8, 0), true) + } + } + + fn new_null() -> Self { + Self::new(core::ptr::slice_from_raw_parts(core::ptr::null(), 0)) + } +} + +impl Default for AllowSliceInner { + fn default() -> Self { + Self::new_null() + } } /// A minimal representation of a read-only allow from app, used for storing a /// read-only allow in a process' kernel managed grant space without wasting /// memory duplicating information such as process ID. #[repr(C)] +#[derive(Default)] struct SavedAllowRo { - ptr: *const u8, - len: usize, -} - -impl Default for SavedAllowRo { - fn default() -> Self { - Self { - ptr: core::ptr::null(), - len: 0, - } - } + value: AllowSliceInner, + /// It is up to applications when to rotate these. For rust, a change corresponds to the END + /// of any lifetime of scope::share + live: Cell, } /// A minimal representation of a read-write allow from app, used for storing a /// read-write allow in a process' kernel managed grant space without wasting /// memory duplicating information such as process ID. -#[repr(C)] -struct SavedAllowRw { - ptr: *mut u8, - len: usize, -} - -impl Default for SavedAllowRw { - fn default() -> Self { - Self { - ptr: core::ptr::null_mut(), - len: 0, - } - } -} +type SavedAllowRw = SavedAllowRo; /// Write the default value of T to every element of the array. /// @@ -742,6 +1267,7 @@ impl Default for SavedAllowRw { /// function is called. The memory does not need to be initialized yet. If it /// already does contain initialized memory, then those contents will be /// overwritten without being `Drop`ed first. +#[inline] unsafe fn write_default_array(base: *mut T, num: usize) { for i in 0..num { base.add(i).write(T::default()); @@ -756,25 +1282,18 @@ fn enter_grant_kernel_managed( driver_num: usize, ) -> Result { let grant_num = process.lookup_grant_from_driver_num(driver_num)?; + let mem = process.get_grant_mem(grant_num)?; - // Check if the grant has been allocated, and if not we cannot enter this - // grant. - match process.grant_is_allocated(grant_num) { - Some(true) => { /* Allocated, nothing to do */ } - Some(false) => return Err(ErrorCode::NOMEM), - None => return Err(ErrorCode::FAIL), - }; - - // Return early if no grant. - let grant_base_ptr = process.enter_grant(grant_num).or(Err(ErrorCode::NOMEM))?; // # Safety // // We know that this pointer is well aligned and initialized with meaningful // data when the grant region was allocated. - let layout = unsafe { - EnteredGrantKernelManagedLayout::read_from_base(grant_base_ptr, process, grant_num) - }; - Ok(layout) + match mem { + None => Err(ErrorCode::NOMEM), + Some(mem) => { + Ok(unsafe { EnteredGrantKernelManagedLayout::read_from_base(mem, process, grant_num) }) + } + } } /// Subscribe to an upcall by saving the upcall in the grant region for the @@ -784,7 +1303,7 @@ pub(crate) fn subscribe( upcall: Upcall, ) -> Result { // Enter grant and keep it open until _grant_open goes out of scope. - let mut layout = match enter_grant_kernel_managed(process, upcall.upcall_id.driver_num) { + let layout = match enter_grant_kernel_managed(process, upcall.upcall_id.driver_num) { Ok(val) => val, Err(e) => return Err((upcall, e)), }; @@ -801,19 +1320,19 @@ pub(crate) fn subscribe( // Index into the saved upcall slice to get the old upcall. Use .get in case // userspace passed us a bad subscribe number. - match saved_upcalls_slice.get_mut(upcall.upcall_id.subscribe_num) { + match saved_upcalls_slice.get(upcall.upcall_id.subscribe_num) { Some(saved_upcall) => { // Create an `Upcall` object with the old saved upcall. let old_upcall = Upcall::new( process.processid(), upcall.upcall_id, - saved_upcall.appdata, - saved_upcall.fn_ptr, + saved_upcall.appdata.get(), + saved_upcall.fn_ptr.get(), ); // Overwrite the saved upcall with the new upcall. - saved_upcall.appdata = upcall.appdata; - saved_upcall.fn_ptr = upcall.fn_ptr; + saved_upcall.appdata.set(upcall.appdata); + saved_upcall.fn_ptr.set(upcall.fn_ptr); // Success! Ok(old_upcall) @@ -822,99 +1341,191 @@ pub(crate) fn subscribe( } } +/// Revoke all allow-ed buffers that match a predicate P. +/// Safety: no LiveARef or LivePRef may exist to anything filtered out, nor may any grants be +/// entered via the legacy mechanism. +pub unsafe fn revoke_allows bool>( + kernel: &Kernel, + process: &dyn Process, + mut p: P, +) -> Result<(), ErrorCode> { + let max = kernel.get_grant_count_and_finalize(); + + for driver_num in 0..max { + let Ok(layout) = enter_grant_kernel_managed(process, driver_num) else { + continue; + }; + + for vector in [layout.get_allow_ro_slice(), layout.get_allow_rw_slice()] { + for allow in vector { + let range = if CONFIG.counted_grant_refs { + // This error happens if this function is called inside of a legacy enter, + // which breaks its safe contract. + *allow + .value + .0 + .get_true_ref() + .try_borrow() + .or(Err(ErrorCode::BUSY))? + } else { + allow.value.0.get_false_ref().get() + }; + if p(range) { + let (_, success) = allow + .value + .try_replace(core::ptr::slice_from_raw_parts(core::ptr::null(), 0), true); + // Attempt to revoke a reference counted allow + if !success { + return Err(ErrorCode::BUSY); + } else { + debug!("We got rid of: {}", range.as_ptr() as usize); + } + } + } + } + + // Note: upcalls will be revoked because they are stored as CHERI capabilities + } + + Ok(()) +} + +pub(crate) fn try_free_grant(process: &dyn Process) -> Result<(), ErrorCode> { + // TODO: open as each driver + let driver_num: usize = 0; + // Enter grant and keep it open until `_grant_open` goes out of scope. + let layout = match enter_grant_kernel_managed(process, driver_num) { + Ok(val) => val, + Err(e) => match e { + // Some of these mean the grant is already free + ErrorCode::NOMEM => return Ok(()), + // Others, propagate the error + _ => return Err(e), + }, + }; + + // Now try take each of the allowed buffers mutably. If any fail, we fail. + + let _saved_allow_ro_slice = layout.get_allow_ro_slice(); + let _saved_allow_rw_slice = layout.get_allow_rw_slice(); + + // TODO actually loop through the slices + // TODO also check the T in the grant has no references + // TODO also check custom grants have no references + todo!(); +} + +/// Stores the process buffer in the kernel managed grant +/// Safety: ptr, len has been validated and has actually been allowed to us by the process. +/// If unallow_previous is true then all previously allowed buffers (to this slot) will no longer +/// be used by the kernel. +/// Currently, we use whether a Null ptr is passed as whether this is intended by the user, as this +/// matches the patterns used in userspace. +/// However, it does not quite match the original tock interface and so might break some downsteam +/// code. We could also have a separate syscall that explicitly says this is false. +pub(crate) unsafe fn allow_helper( + process: &dyn Process, + driver_num: usize, + allow_num: usize, + ptr: *const u8, + len: usize, + read_only: bool, + unallow_previous: bool, +) -> (*const u8, usize, Result<(), ErrorCode>) { + // Enter grant + let layout = match enter_grant_kernel_managed(process, driver_num) { + Ok(layout) => layout, + Err(e) => return (ptr, len, Err(e)), + }; + + let saved_slice = if read_only { + layout.get_allow_ro_slice() + } else { + layout.get_allow_rw_slice() + }; + + // Index into the saved slice to get the old value. Use .get in case + // userspace passed us a bad allow number. + match saved_slice.get(allow_num) { + Some(saved) => { + // Replace old values with current buffer. + let (old, changed) = saved + .value + .try_replace(core::ptr::slice_from_raw_parts(ptr, len), unallow_previous); + + // Rotate liveness so any references still in the kernel become invalid + if changed && unallow_previous { + *saved.live.take_borrow() += 1; + } + + ( + old.as_ptr(), + old.len(), + if !changed { + Err(ErrorCode::BUSY) + } else { + Ok(()) + }, + ) + } + None => (ptr, len, Err(ErrorCode::NOSUPPORT)), + } +} + /// Stores the specified read-only process buffer in the kernel managed grant /// region for this process and driver. The previous read-only process buffer /// stored at the same allow_num id is returned. +#[inline(always)] pub(crate) fn allow_ro( process: &dyn Process, driver_num: usize, allow_num: usize, buffer: ReadOnlyProcessBuffer, ) -> Result { - // Enter grant and keep it open until `_grant_open` goes out of scope. - let mut layout = match enter_grant_kernel_managed(process, driver_num) { - Ok(val) => val, - Err(e) => return Err((buffer, e)), - }; - - // Create the saved allow ro slice from the grant memory. - // + let (ptr, len, proc) = buffer.consume(); + let unallow = (ptr as usize) == 0; // # Safety // - // This is safe because of how the grant was initially allocated and that - // because we were able to enter the grant the grant region must be valid - // and initialized. We are also holding the grant open until _grant_open - // goes out of scope. - let saved_allow_ro_slice = layout.get_allow_ro_slice(); - - // Index into the saved slice to get the old value. Use .get in case - // userspace passed us a bad allow number. - match saved_allow_ro_slice.get_mut(allow_num) { - Some(saved) => { - // # Safety - // + // The ReadOnlyProcessBuffer type guarantees validity + unsafe { + let (ptr, len, result) = + allow_helper(process, driver_num, allow_num, ptr, len, true, unallow); + let buffer = ReadOnlyProcessBuffer::new_option(ptr, len, proc); + match result { // The pointer has already been validated to be within application // memory before storing the values in the saved slice. - let old_allow = - unsafe { ReadOnlyProcessBuffer::new(saved.ptr, saved.len, process.processid()) }; - - // Replace old values with current buffer. - let (ptr, len) = buffer.consume(); - saved.ptr = ptr; - saved.len = len; - - // Success! - Ok(old_allow) + Ok(()) => Ok(buffer), + Err(code) => Err((buffer, code)), } - None => Err((buffer, ErrorCode::NOSUPPORT)), } } /// Stores the specified read-write process buffer in the kernel managed grant /// region for this process and driver. The previous read-write process buffer /// stored at the same allow_num id is returned. +#[inline(always)] pub(crate) fn allow_rw( process: &dyn Process, driver_num: usize, allow_num: usize, buffer: ReadWriteProcessBuffer, ) -> Result { - // Enter grant and keep it open until `_grant_open` goes out of scope. - let mut layout = match enter_grant_kernel_managed(process, driver_num) { - Ok(val) => val, - Err(e) => return Err((buffer, e)), - }; + let (ptr, len, proc) = buffer.consume(); + let unallow = (ptr as usize) == 0; - // Create the saved allow rw slice from the grant memory. - // // # Safety // - // This is safe because of how the grant was initially allocated and that - // because we were able to enter the grant the grant region must be valid - // and initialized. We are also holding the grant open until `_grant_open` - // goes out of scope. - let saved_allow_rw_slice = layout.get_allow_rw_slice(); - - // Index into the saved slice to get the old value. Use .get in case - // userspace passed us a bad allow number. - match saved_allow_rw_slice.get_mut(allow_num) { - Some(saved) => { - // # Safety - // + // The ReadWriteProcessBuffer type guarantees validity + unsafe { + let (ptr, len, result) = + allow_helper(process, driver_num, allow_num, ptr, len, false, unallow); + let buffer = ReadWriteProcessBuffer::new_option(ptr as *mut u8, len, proc); + match result { // The pointer has already been validated to be within application // memory before storing the values in the saved slice. - let old_allow = - unsafe { ReadWriteProcessBuffer::new(saved.ptr, saved.len, process.processid()) }; - - // Replace old values with current buffer. - let (ptr, len) = buffer.consume(); - saved.ptr = ptr; - saved.len = len; - - // Success! - Ok(old_allow) + Ok(()) => Ok(buffer), + Err(code) => Err((buffer, code)), } - None => Err((buffer, ErrorCode::NOSUPPORT)), } } @@ -926,29 +1537,20 @@ pub(crate) fn allow_rw( /// /// This is created from a `Grant` when that grant is entered for a specific /// process. -pub struct ProcessGrant< - 'a, - T: 'a, - Upcalls: UpcallSize, - AllowROs: AllowRoSize, - AllowRWs: AllowRwSize, -> { - /// The process the grant is applied to. - /// - /// We use a reference here because instances of `ProcessGrant` are very - /// short lived. They only exist while a `Grant` is being entered, so we can - /// be sure the process still exists while a `ProcessGrant` exists. No - /// `ProcessGrant` can be stored. - process: &'a dyn Process, +pub struct ProcessGrant<'a, T, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: AllowRwSize> { + /// The process entry for the process the grant is applied to. + /// Must point to a valid entry for the lifetime of this struct + process: &'static ProcEntry, + + /// The grant mem for the process. If this ProcessGrant is successfully created, this memory + /// is initialised and valid for at least the lifetime of this object. + grant_mem: NonNull, /// The syscall driver number this grant is associated with. driver_num: usize, - /// The identifier of the Grant this is applied for. - grant_num: usize, - /// Used to store Rust types for grant. - _phantom: PhantomData<(T, Upcalls, AllowROs, AllowRWs)>, + _phantom: PhantomData<(T, Upcalls, AllowROs, AllowRWs, &'a dyn Process)>, } impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: AllowRwSize> @@ -984,7 +1586,7 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow num_allow_ros: AllowRoItems, num_allow_rws: AllowRwItems, processid: ProcessId, - ) -> Result<(Option>, &'a dyn Process), Error> { + ) -> Result<(Option>, &'a ProcEntry, NonNull), Error> { // Here is an example of how the grants are laid out in the grant // region of process's memory: // @@ -1014,16 +1616,17 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow // The grant memory is not allocated until the actual grant region // is actually used. - let process = processid + let process_entry = processid .kernel - .get_process(processid) + .get_process_entry(processid) .ok_or(Error::NoSuchApp)?; - // Check if the grant is allocated. If not, we allocate it process - // memory first. We then create an `ProcessGrant` object for this - // grant. - if let Some(is_allocated) = process.grant_is_allocated(grant_num) { - if !is_allocated { + let process = process_entry.proc_ref.get().ok_or(Error::NoSuchApp)?; + + let grant_ptr = process.get_grant_mem(grant_num)?; + + match grant_ptr { + None => { // Calculate the alignment and size for entire grant region. let alloc_align = EnteredGrantKernelManagedLayout::grant_align(grant_t_align); let alloc_size = EnteredGrantKernelManagedLayout::grant_size( @@ -1035,11 +1638,9 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow ); // Allocate grant, the memory is still uninitialized though. - if !process.allocate_grant(grant_num, driver_num, alloc_size, alloc_align) { - return Err(Error::OutOfMemory); - } - - let grant_ptr = process.enter_grant(grant_num)?; + let grant_ptr = process + .allocate_grant(grant_num, driver_num, alloc_size, alloc_align) + .ok_or(Error::OutOfMemory)?; // Create a layout from the counts we have and initialize // all memory so it is valid in the future to read as a @@ -1063,6 +1664,9 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow process, grant_num, ); + EnteredGrantKernelManagedLayout::get_grant_data_t_ref(grant_ptr) + .as_ptr() + .write(RefCell::new(Cell::new(false))); } // # Safety @@ -1071,28 +1675,26 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow // large and is at least as aligned as grant_t_align. unsafe { Ok(( - Some(EnteredGrantKernelManagedLayout::offset_of_grant_data_t( + Some(EnteredGrantKernelManagedLayout::get_grant_data_t( grant_ptr, alloc_size, grant_t_size, )), - process, + process_entry, + grant_ptr, )) } - } else { + } + Some(grant_ptr) => { // T was already allocated, outer function should not // initialize T. - Ok((None, process)) + Ok((None, process_entry, grant_ptr)) } - } else { - // Cannot use the grant region in any way if the process is - // inactive. - Err(Error::InactiveApp) } } // Handle the bulk of the work in a function which is not templated. - let (opt_raw_grant_ptr_nn, process) = new_inner( + let (opt_raw_grant_ptr_nn, process_entry, grant_ptr) = new_inner( grant.grant_num, grant.driver_num, GrantDataSize(size_of::()), @@ -1136,9 +1738,9 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow // We have ensured the grant is already allocated or was just allocated, // so we can create and return the `ProcessGrant` type. Ok(ProcessGrant { - process: process, + process: process_entry, + grant_mem: grant_ptr, driver_num: grant.driver_num, - grant_num: grant.grant_num, _phantom: PhantomData, }) } @@ -1146,31 +1748,46 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow /// Return an `ProcessGrant` for a grant in a process if the process is /// valid and that process grant has already been allocated, or `None` /// otherwise. - fn new_if_allocated( + /// SAFETY: Must call with a valid process entry + unsafe fn new_if_allocated( grant: &Grant, - process: &'a dyn Process, + process: &'static ProcEntry, ) -> Option { - if let Some(is_allocated) = process.grant_is_allocated(grant.grant_num) { - if is_allocated { - Some(ProcessGrant { - process: process, - driver_num: grant.driver_num, - grant_num: grant.grant_num, - _phantom: PhantomData, - }) - } else { - // Grant has not been allocated. - None - } + if let Ok(Some(ptr)) = process + .proc_ref + .get() + .unwrap_unchecked() + .get_grant_mem(grant.grant_num) + { + Some(ProcessGrant { + process: process, + grant_mem: ptr, + driver_num: grant.driver_num, + _phantom: PhantomData, + }) } else { - // Process is invalid. + // Grant has not been allocated. + // or Process is invalid. None } } + /// We use a reference here because instances of `ProcessGrant` are very + /// short lived. They only exist while a `Grant` is being entered or borrowed, so we can + /// be sure the process still exists while a `ProcessGrant` exists. No + /// `ProcessGrant` can be stored. + #[inline] + fn process_ref(&self) -> &'a dyn Process { + unsafe { + // Safety: these are only created for valid process entries + self.process.proc_ref.get().unwrap_unchecked() + } + } + /// Return the ProcessId of the process this ProcessGrant is associated with. + #[inline] pub fn processid(&self) -> ProcessId { - self.process.processid() + self.process_ref().processid() } /// Run a function with access to the memory in the related process for the @@ -1304,74 +1921,29 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow where F: FnOnce(&mut GrantData, &GrantKernelData, &mut GrantRegionAllocator) -> R, { - // Access the grant that is in process memory. This can only fail if - // the grant is already entered. - let grant_ptr = self - .process - .enter_grant(self.grant_num) - .map_err(|_err| { - // If we get an error it is because the grant is already - // entered. `process.enter_grant()` can fail for several - // reasons, but only the double enter case can happen once a - // grant has been applied. The other errors would be detected - // earlier (i.e. before the grant can be applied). + // Setup an allocator in case the capsule needs additional memory in the + // grant space. + let mut allocator = GrantRegionAllocator { + processid: self.processid(), + }; - // If `panic_on_reenter` is false, we skip this error and do - // nothing with this grant. - if !panic_on_reenter { - return; - } + // Get grant data + let grant_data = self.get_grant_data(); + + // Create a wrapped objects that are passed to functor. + let mut kernel_data = self.get_kern_data(); + + // Use legacy enter + grant_data.legacy_enter(fun, panic_on_reenter, &mut kernel_data, &mut allocator) + } + + /// Note: self is borrowed as these structs contain raw process references which must not be + /// used across process lifetines. + pub fn get_grant_data(&self) -> NewGrantData<'a, T> { + // Access the grant T that is in process memory. Cannot fail, as a process grant would not + // be created if the process were not already allocated. + let grant_ptr = self.grant_mem; - // If `enter_grant` fails, we panic!() to notify the developer - // that they tried to enter the same grant twice which is - // prohibited because it would result in two mutable references - // existing for the same memory. This preserves type correctness - // (but does crash the system). - // - // ## Explanation and Rationale - // - // This panic represents a tradeoff. While it is undesirable to - // have the potential for a runtime crash in this grant region - // code, it balances usability with type correctness. The - // challenge is that calling `self.apps.iter()` is a common - // pattern in capsules to access the grant region of every app - // that is using the capsule, and sometimes it is intuitive to - // call that inside of a `self.apps.enter(app_id, |app| {...})` - // closure. However, `.enter()` means that app's grant region is - // entered, and then a naive `.iter()` would re-enter the grant - // region and cause undefined behavior. We considered different - // options to resolve this. - // - // 1. Have `.iter()` only iterate over grant regions which are - // not entered. This avoids the bug, but could lead to - // unexpected behavior, as `self.apps.iter()` will do - // different things depending on where in a capsule it is - // called. - // 2. Have the compiler detect when `.iter()` is called when a - // grant region has already been entered. We don't know of a - // viable way to implement this. - // 3. Panic if `.iter()` is called when a grant is already - // entered. - // - // We decided on option 3 because it balances minimizing - // surprises (`self.apps.iter()` will always iterate all grants) - // while also protecting against the bug. We expect that any - // code that attempts to call `self.apps.iter()` after calling - // `.enter()` will immediately encounter this `panic!()` and - // have to be refactored before any tests will be successful. - // Therefore, this `panic!()` should only occur at - // development/testing time. - // - // ## How to fix this error - // - // If you are seeing this panic, you need to refactor your - // capsule to not call `.iter()` or `.each()` from inside a - // `.enter()` closure. That is, you need to close the grant - // region you are currently in before trying to iterate over all - // grant regions. - panic!("Attempted to re-enter a grant region."); - }) - .ok()?; let grant_t_align = GrantDataAlign(align_of::()); let grant_t_size = GrantDataSize(size_of::()); @@ -1383,13 +1955,37 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow grant_t_align, ); - // Parse layout of entire grant allocation using the known base pointer. + let grant_data_ptr: NonNull = unsafe { + EnteredGrantKernelManagedLayout::get_grant_data_t(grant_ptr, alloc_size, grant_t_size) + } + .cast::(); + + unsafe { + NewGrantData::new( + EnteredGrantKernelManagedLayout::get_grant_data_t_ref(grant_ptr).as_ref(), + grant_data_ptr, + self.process, + ) + } + } + + #[inline] + pub fn get_kern_data(&self) -> GrantKernelData<'a> { + let grant_ptr = self.grant_mem; + let process = self.process_ref(); + let grant_num = unsafe { + process + .lookup_grant_from_driver_num(self.driver_num) + .unwrap_unchecked() + }; + + // Determine layout of entire grant alloc // // # Safety // // Grant pointer is well aligned and points to initialized data. let layout = unsafe { - EnteredGrantKernelManagedLayout::read_from_base(grant_ptr, self.process, self.grant_num) + EnteredGrantKernelManagedLayout::read_from_base(grant_ptr, process, grant_num) }; // Get references to all of the saved upcall data. @@ -1402,39 +1998,21 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow // is alive. // - Data is accessible for the entire duration of this immutable // reference. - // - No other mutable reference to this memory exists concurrently. - // Mutable reference to this memory are only created through the - // kernel in the syscall interface which is serialized in time with - // this call. - let (saved_upcalls_slice, saved_allow_ro_slice, saved_allow_rw_slice) = - layout.get_resource_slices(); - let grant_data = unsafe { - EnteredGrantKernelManagedLayout::offset_of_grant_data_t( - grant_ptr, - alloc_size, - grant_t_size, - ) - .cast() - .as_mut() - }; - - // Create a wrapped objects that are passed to functor. - let mut grant_data = GrantData::new(grant_data); - let kernel_data = GrantKernelData::new( + let saved_upcalls_slice = + unsafe { slice::from_raw_parts(layout.upcalls_array, Upcalls::COUNT.into()) }; + let saved_allow_ro_slice = + unsafe { slice::from_raw_parts(layout.allow_ro_array, AllowROs::COUNT.into()) }; + let saved_allow_rw_slice = + unsafe { slice::from_raw_parts(layout.allow_rw_array, AllowRWs::COUNT.into()) }; + + GrantKernelData::new( saved_upcalls_slice, saved_allow_ro_slice, saved_allow_rw_slice, self.driver_num, - self.process, - ); - // Setup an allocator in case the capsule needs additional memory in the - // grant space. - let mut allocator = GrantRegionAllocator { - processid: self.process.processid(), - }; - - // Call functor and pass back value. - Some(fun(&mut grant_data, &kernel_data, &mut allocator)) + // We would not have created this process grant if the process was NONE. + unsafe { self.process.proc_ref.get().unwrap_unchecked() }, + ) } } @@ -1449,7 +2027,7 @@ pub struct CustomGrant { /// Here, this is an opaque reference that Process uses to access the /// custom grant allocation. This setup ensures that Process owns the grant /// memory. - identifier: ProcessCustomGrantIdentifer, + identifier: ProcessCustomGrantIdentifier, /// Identifier for the process where this custom grant is allocated. processid: ProcessId, @@ -1460,7 +2038,7 @@ pub struct CustomGrant { impl CustomGrant { /// Creates a new `CustomGrant`. - fn new(identifier: ProcessCustomGrantIdentifer, processid: ProcessId) -> Self { + fn new(identifier: ProcessCustomGrantIdentifier, processid: ProcessId) -> Self { CustomGrant { identifier, processid, @@ -1505,9 +2083,9 @@ impl CustomGrant { // other references because the only way to create a reference // is using this `enter()` function, and it can only be called // once (because of the `&mut self` requirement). - let custom_grant = unsafe { &mut *(grant_ptr as *mut T) }; - let borrowed = GrantData::new(custom_grant); - Ok(fun(borrowed)) + let custom_grant = unsafe { GrantData::new(&mut *(grant_ptr as *mut T)) }; + + Ok(fun(custom_grant)) }) } } @@ -1522,6 +2100,8 @@ pub struct GrantRegionAllocator { } impl GrantRegionAllocator { + // FIXME: Include the RefCell here + /// Allocates a new `CustomGrant` initialized using the given closure. /// /// The closure will be called exactly once, and the result will be used to @@ -1589,8 +2169,8 @@ impl GrantRegionAllocator { /// /// The caller must initialize the memory. /// - /// Also returns a ProcessCustomGrantIdentifer to access the memory later. - fn alloc_raw(&mut self) -> Result<(ProcessCustomGrantIdentifer, NonNull), Error> { + /// Also returns a ProcessCustomGrantIdentifier to access the memory later. + fn alloc_raw(&mut self) -> Result<(ProcessCustomGrantIdentifier, NonNull), Error> { self.alloc_n_raw::(1) } @@ -1599,11 +2179,11 @@ impl GrantRegionAllocator { /// The caller is responsible for initializing the returned memory. /// /// Returns memory appropriate for storing `num_items` contiguous instances - /// of `T` and a ProcessCustomGrantIdentifer to access the memory later. + /// of `T` and a ProcessCustomGrantIdentifier to access the memory later. fn alloc_n_raw( &mut self, num_items: usize, - ) -> Result<(ProcessCustomGrantIdentifer, NonNull), Error> { + ) -> Result<(ProcessCustomGrantIdentifier, NonNull), Error> { let (custom_grant_identifier, raw_ptr) = self.alloc_n_raw_inner(num_items, size_of::(), align_of::())?; let typed_ptr = NonNull::cast::(raw_ptr); @@ -1617,7 +2197,7 @@ impl GrantRegionAllocator { num_items: usize, single_alloc_size: usize, alloc_align: usize, - ) -> Result<(ProcessCustomGrantIdentifer, NonNull), Error> { + ) -> Result<(ProcessCustomGrantIdentifier, NonNull), Error> { let alloc_size = single_alloc_size .checked_mul(num_items) .ok_or(Error::OutOfMemory)?; @@ -1668,7 +2248,11 @@ impl Self { + pub(crate) const fn new( + kernel: &'static Kernel, + driver_num: usize, + grant_index: usize, + ) -> Self { Self { kernel: kernel, driver_num: driver_num, @@ -1677,6 +2261,13 @@ impl Result, Error> { + ProcessGrant::new(self, processid) + } + /// Enter the grant for a specific process. /// /// This creates a `ProcessGrant` which is a handle for a grant allocated @@ -1695,6 +2286,26 @@ impl( + &self, + processid: ProcessId, + processid_inside: ProcessId, + grant_data_inside: &mut GrantData, + grant_kernel_data_inside: &GrantKernelData, + fun: F, + ) -> Result + where + F: FnOnce(&mut GrantData, &GrantKernelData) -> R, + { + if processid != processid_inside { + self.enter(processid, fun) + } else { + Ok(fun(grant_data_inside, grant_kernel_data_inside)) + } + } + /// Enter the grant for a specific process with access to an allocator. /// /// This creates an `ProcessGrant` which is a handle for a grant allocated @@ -1746,10 +2357,12 @@ impl Iter { + pub fn iter( + &self, + ) -> Iter> { Iter { grant: self, - subiter: self.kernel.get_process_iter(), + subiter: self.kernel.get_proc_entry_iter(), } } } @@ -1761,19 +2374,23 @@ pub struct Iter< Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: AllowRwSize, + SubIter: Iterator, > { /// The grant type to use. grant: &'a Grant, /// Iterator over valid processes. - subiter: core::iter::FilterMap< - core::slice::Iter<'a, Option<&'static dyn Process>>, - fn(&Option<&'static dyn Process>) -> Option<&'static dyn Process>, - >, + subiter: SubIter, } -impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: AllowRwSize> Iterator - for Iter<'a, T, Upcalls, AllowROs, AllowRWs> +impl< + 'a, + T: Default, + Upcalls: UpcallSize, + AllowROs: AllowRoSize, + AllowRWs: AllowRwSize, + SubIter: Iterator, + > Iterator for Iter<'a, T, Upcalls, AllowROs, AllowRWs, SubIter> { type Item = ProcessGrant<'a, T, Upcalls, AllowROs, AllowRWs>; @@ -1782,7 +2399,850 @@ impl<'a, T: Default, Upcalls: UpcallSize, AllowROs: AllowRoSize, AllowRWs: Allow // Get the next `ProcessId` from the kernel processes array that is // setup to use this grant. Since the iterator itself is saved calling // this function again will start where we left off. - self.subiter - .find_map(|process| ProcessGrant::new_if_allocated(grant, process)) + // SAFETY: subiter will only provide valid process entrys + unsafe { + self.subiter + .find_map(|process| ProcessGrant::new_if_allocated(grant, process)) + } + } +} + +/// A liveness tracker for a process +#[derive(Copy, Clone)] +pub struct PLiveTracker { + /// Comparing to the usize in this reference reveals whether 'r' should still exist + compare_to: &'static crate::kernel::ProcEntry, + /// What the value should be + compare: usize, +} + +/// Strictly, this should never roll-over in order to ensure safety (for the user). +/// However, if we are happy with a probabilistic defense we could make this smaller for a given +/// build. +type ATrackerInt = usize; + +/// A liveness tracker for a single allowed ro/rw buffer or callback +/// Must be paired with a PLiveTracker, as grants are no longer valid upon process death. +#[derive(Copy, Clone)] +pub struct ALiveTracker { + compare_to: NonNull>, + compare: ATrackerInt, +} + +impl PartialEq for PLiveTracker { + fn eq(&self, other: &Self) -> bool { + (self.compare_to as *const crate::kernel::ProcEntry + == other.compare_to as *const crate::kernel::ProcEntry) + && (self.compare == other.compare) + } +} + +impl PartialEq for ALiveTracker { + fn eq(&self, other: &Self) -> bool { + self.compare_to.eq(&other.compare_to) && (self.compare == other.compare) + } +} + +// To make non-pref types fit in with pref. +static DUMMY_ENTRY: ProcEntry = ProcEntry { + valid_proc_id: Cell::new(0), + proc_ref: Cell::new(None), +}; +struct TrackerIntWrapper(Cell); + +// We can imagine the kernel as one large grant that will never be out of lifetime +static KERNEL_GRANT: TrackerIntWrapper = TrackerIntWrapper(Cell::new(0 as ATrackerInt)); + +// Tock is single threaded +unsafe impl Sync for ProcEntry {} +unsafe impl Sync for TrackerIntWrapper {} + +fn get_dummy_entry() -> &'static ProcEntry { + &DUMMY_ENTRY +} +fn get_kernel_grant() -> &'static Cell { + &KERNEL_GRANT.0 +} + +/// Can verify whether a reference is still valid. +/// If it is, it will continue to be valid in the scope it was verified. +pub trait Track: Copy + Clone + PartialEq { + /// Safety: implementors may have requirements on where this gets called + unsafe fn is_still_alive(&self) -> bool; + + fn global_live() -> Self; + fn global_dead() -> Self; +} + +impl PLiveTracker { + #[inline] + pub fn new(with_proc: &'static crate::kernel::ProcEntry) -> Self { + Self::new_with_id(with_proc, with_proc.valid_proc_id.get()) + } + + #[inline] + pub fn new_with_id(with_proc: &'static crate::kernel::ProcEntry, id: usize) -> Self { + Self { + compare_to: with_proc, + compare: id, + } + } + + #[inline] + pub(crate) fn get_proc(&self) -> Option<&'static dyn process::Process> { + if unsafe { self.is_still_alive() } { + self.compare_to.proc_ref.get() + } else { + None + } + } +} + +impl Track for PLiveTracker { + #[inline] + unsafe fn is_still_alive(&self) -> bool { + self.compare == (*self.compare_to).valid_proc_id.get() + } + + #[inline] + fn global_live() -> Self { + // 0 is the expected value of the entry. + Self::new_with_id(get_dummy_entry(), 0) + } + + #[inline] + fn global_dead() -> Self { + // 0 is the expected value of the entry. Any non-zero value would do here. + Self::new_with_id(get_dummy_entry(), 1) + } +} + +impl ALiveTracker { + #[inline] + pub fn new(track: &Cell) -> Self { + Self::new_with_compare(track, track.get()) + } + + #[inline] + pub fn new_with_compare(track: &Cell, compare: ATrackerInt) -> Self { + Self { + compare_to: NonNull::from(track), + compare, + } + } +} + +impl Track for ALiveTracker { + /// Safety: can only be called AFTER the process has been checked for liveness + #[inline] + unsafe fn is_still_alive(&self) -> bool { + unsafe { self.compare_to.as_ref().get() == self.compare } + } + + #[inline] + fn global_live() -> Self { + Self::new_with_compare(get_kernel_grant(), 0) + } + + #[inline] + fn global_dead() -> Self { + Self::new_with_compare(get_kernel_grant(), 1) + } +} + +/// Both a tracker for process liveness, and allow liveness. +#[derive(Copy, Clone, PartialEq)] +pub struct DualTracker { + ptracker: PLiveTracker, + atracker: ALiveTracker, +} + +impl DualTracker { + pub fn new(ptracker: PLiveTracker, atracker: ALiveTracker) -> Self { + Self { ptracker, atracker } + } + pub(crate) fn get_proc(&self) -> Option<&'static dyn process::Process> { + let result = self.ptracker.get_proc()?; + unsafe { + if !self.atracker.is_still_alive() { + return None; + } + } + Some(result) + } +} + +impl Track for DualTracker { + #[inline] + unsafe fn is_still_alive(&self) -> bool { + // Safety: if the process is still alive, it is then safe to check the allow for liveness + self.ptracker.is_still_alive() && self.atracker.is_still_alive() + } + + fn global_live() -> Self { + Self::new(PLiveTracker::global_live(), ALiveTracker::global_live()) + } + + fn global_dead() -> Self { + Self::new(PLiveTracker::global_dead(), ALiveTracker::global_live()) + } +} + +/// A (shared) reference to a T that exists for as long as a specific tracker does. +/// We offer a cloneable and non-cloneable version. +/// The second is useful as it can be safely converted to/from &'static mut. +pub struct PRefBase { + r: NonNull, + tracker: Trk, +} + +pub type PRef = PRefBase; +pub type PRefNoClone = PRefBase; + +pub type ARef = PRefBase; +pub type ARefNoClone = PRefBase; + +impl Clone for PRefBase { + #[inline] + fn clone(&self) -> Self { + *self + } +} +impl Copy for PRefBase {} + +/// Same as a `PRef` but has been checked for liveness in a scope in which a process will never +/// be de-allocated. +/// Derefs into `&T` +#[repr(transparent)] +pub struct LivePRefBase<'a, T: ?Sized, Trk: Track, const CLONE: bool> { + r: PRefBase, + _phantom: PhantomData<&'a T>, +} + +pub type LivePRef<'a, T> = LivePRefBase<'a, T, PLiveTracker, true>; +pub type LivePRefNoClone<'a, T> = LivePRefBase<'a, T, PLiveTracker, false>; +pub type LiveARef<'a, T> = LivePRefBase<'a, T, DualTracker, true>; +pub type LiveARefNoClone<'a, T> = LivePRefBase<'a, T, DualTracker, false>; + +impl<'a, T: ?Sized, Trk: Track> Clone for LivePRefBase<'a, T, Trk, true> { + #[inline] + fn clone(&self) -> Self { + *self + } +} +impl<'a, T: ?Sized, Trk: Track> Copy for LivePRefBase<'a, T, Trk, true> {} + +impl Default for PRef { + fn default() -> Self { + Self { + r: NonNull::dangling(), + tracker: PLiveTracker::global_dead(), + } + } +} + +fn zero_sized_dangling() -> NonNull<[u8]> { + NonNull::slice_from_raw_parts(NonNull::dangling(), 0) +} + +// Defaults for the process slices to make it easier to have values before initialization. +// The default PRef is global dead to a zero-length slice +// The default LivePRef is a global live to a zero-length slice + +impl Default for PRefBase { + fn default() -> Self { + // SAFETY: safe to transmute from [u8] to ReadableProcessSlice for purpose of a dangling + // reference + unsafe { + Self { + r: core::mem::transmute(zero_sized_dangling()), + tracker: DualTracker::global_dead(), + } + } + } +} + +impl Default for PRefBase { + fn default() -> Self { + // SAFETY: safe to transmute from [u8] to WriteableProcessSlice for purpose of a dangling + // reference + unsafe { + Self { + r: core::mem::transmute(zero_sized_dangling()), + tracker: DualTracker::global_dead(), + } + } + } +} + +impl<'a, const CLONE: bool> Default for LivePRefBase<'a, ReadableProcessSlice, DualTracker, CLONE> { + fn default() -> Self { + // SAFETY: safe to transmute from [u8] to ReadableProcessSlice for purpose of a dangling + // reference + unsafe { + Self { + r: PRefBase:: { + r: core::mem::transmute(zero_sized_dangling()), + tracker: DualTracker::global_live(), + }, + _phantom: Default::default(), + } + } + } +} + +impl<'a, const CLONE: bool> Default + for LivePRefBase<'a, WriteableProcessSlice, DualTracker, CLONE> +{ + fn default() -> Self { + // SAFETY: safe to transmute from [u8] to WriteableProcessSlice for purpose of a dangling + // reference + unsafe { + Self { + r: PRefBase:: { + r: core::mem::transmute(zero_sized_dangling()), + tracker: DualTracker::global_live(), + }, + _phantom: Default::default(), + } + } + } +} + +impl PRefBase { + /// Safety: the caller guarantees this data would be valid to cast to a shared reference as + /// long as the ProcEntry keeps its ID the same. + #[inline] + pub(crate) unsafe fn new( + data: NonNull, + with_proc: &'static crate::kernel::ProcEntry, + ) -> Self { + Self::new_with_tracker(data, PLiveTracker::new(with_proc)) + } + + /// Get a ProcessID for this PRef. + /// This always succeeds, but the resulting ProcessID may be invalid. + /// PRef does not contain a kernel reference to keep them small so the caller must provide it. + #[inline] + pub fn get_process_id(&self, kernel: &'static Kernel) -> ProcessId { + ProcessId::new( + kernel, + self.tracker.compare, + // Note: if this is the DUMMY_ENTRY, then the index will always be + // invalid and so the process ID will return None when index is called. + kernel.index_of_proc_entry(self.tracker.compare_to), + ) + } +} + +impl PRefBase { + #[inline] + pub unsafe fn as_ref_unchecked(&self) -> &T { + self.r.as_ref() + } + + /// Get the raw pointer from this PRef. It is only safe to dereference it if the tracker is + /// valid. + #[inline] + pub fn get_ptr(&self) -> NonNull { + self.r + } + + /// To allow code to be authored once for both user buffers and kernel buffers, we can think + /// of the kernel as process 0 with static lifetime and one large allow + pub fn new_from_static(data: &'static T) -> Self { + // Safety: types that ascribe to static will always be live. + unsafe { Self::new_with_tracker(data.into(), Trk::global_live()) } + } + + pub fn try_unwrap_static(self) -> Result<&'static T, ()> { + if self.tracker.eq(&Trk::global_live()) { + Ok(unsafe { + // Safety: the only time we use the global_live tracker is the method above where + // the reference was originally a static ref. + self.r.as_ref() + }) + } else { + Err(()) + } + } + + /// Safety: the caller guarantees this data would be valid for as long as the tracker returns + /// true. + pub(crate) unsafe fn new_with_tracker(data: NonNull, tracker: Trk) -> Self { + Self { r: data, tracker } + } + + #[inline] + pub fn is_still_alive(&self) -> bool { + unsafe { self.tracker.is_still_alive() } + } + + /// A different version of try_into_live that also works with non-cloneable references + /// Prefer try_into_live(). + #[inline] + pub fn with_live>) -> R>(self, f: F) -> R { + let option_live = if self.is_still_alive() { + Some(LivePRefBase { + r: self, + _phantom: PhantomData, + }) + } else { + None + }; + f(option_live) + } + + /// Unchecked version of try_into_live. The caller guarantees bounding the lifetime 'a + #[inline] + pub unsafe fn into_live_unchecked<'a>(self) -> LivePRefBase<'a, T, Trk, CLONE> { + LivePRefBase { + r: self, + _phantom: PhantomData, + } + } + + /// Are the _pointers_ equal (not what they point to) + #[inline] + pub fn ptr_eq(&self, other: &Self) -> bool { + self.r == other.r && self.tracker == other.tracker + } +} + +impl PRefBase { + /// Get a lifetime bounded version of the reference. Will return None if the process has been + /// freed since this reference was created. + /// This on its own is safe to call, but should only be done so from a context where a process + /// cannot be reclaim-ed. The actual "Unsafe" part is whatever does that, and should take care + /// not to have any LivePRef types AT ALL in scope, nor be callable from any context that does. + /// The way that is guaranteed is to have such reclaims only be done from the main kernel loop, + /// and having calling the main kernel loop be unsafe with that invariant. + #[inline] + pub fn try_into_live(&self) -> Option> { + if self.is_still_alive() { + Some(LivePRefBase { + r: *self, + _phantom: PhantomData, + }) + } else { + None + } + } + + /// Borrow as live without allocating a new object. + /// See try_into_live. + #[inline] + pub fn try_borrow_live(&self) -> Option<&LivePRefBase> { + if self.is_still_alive() { + Some( + // Safety: LivePRefBase is a transparent wrapper around PRefBase with the + // only added invariant that it has been checked for liveness (which we + // have just done) + // Because the type is immutable (through a non-mut reference) this cannot + // change. + unsafe { core::mem::transmute(self) }, + ) + } else { + None + } + } + + pub fn as_noclone(self) -> PRefBase { + PRefBase { + r: self.r, + tracker: self.tracker, + } + } +} + +impl PRefBase { + /// Like new_from_static, we can consider the kernel a special process 0 with static lifetime. + pub fn new_from_static_mut(data: &'static mut T) -> Self { + // Safety: types that ascribe to static will always be live. + // This version of PRef cannot be cloned, so also obeys the requirements. + unsafe { Self::new_with_tracker(data.into(), Trk::global_live()) } + } + + pub fn try_unwrap_static_mut(mut self) -> Result<&'static mut T, ()> { + if self.tracker.eq(&Trk::global_live()) { + Ok(unsafe { + // Safety: the only time we use the global_live tracker is the method above where + // the reference was originally a static ref. + // Because we never allowed this to be cloned, it is OK to still be mut. + self.r.as_mut() + }) + } else { + Err(()) + } + } +} + +// Convert from &'static mut [u8] to a type that is similar to what might come from userspace. +// Allows same code path for both userspace and kernel +impl From<&'static mut [u8]> for ARefNoClone { + fn from(value: &'static mut [u8]) -> Self { + let as_pref = ARefNoClone::<[u8]>::new_from_static_mut(value); + // Safety: We originally had a &mut[u8], so a <[Cell]> is safe to transmute to. + unsafe { transmute(as_pref) } + } +} + +// Convert back. If this was not actually a &'static mut ref originally, we get a zero-length +// slice. +impl From> for &'static mut [u8] { + fn from(value: ARefNoClone) -> Self { + match value.try_unwrap_static_mut() { + Ok(proc_slice) => { + // SAFETY: the fact we just unwrapped from a PRefNoClone means that this was constructed + // from a mutable reference using new_from_static_mut. It is therefore safe to upgrade + // to a mut. + unsafe { transmute(proc_slice) } + } + Err(_) => { + // Safety: is valid for all 0 reads a zero slice would allow + unsafe { zero_sized_dangling().as_mut() } + } + } + } +} + +impl From<&'static T> for PRefBase { + #[inline] + fn from(value: &'static T) -> Self { + PRefBase::new_from_static(value) + } +} + +impl From<&'static mut T> for PRefBase { + fn from(value: &'static mut T) -> Self { + PRefBase::new_from_static_mut(value) + } +} + +// We can always convert back to the type that needs checking later +impl<'a, T: ?Sized, Trk: Track, const CLONE: bool> From> + for PRefBase +{ + #[inline] + fn from(live: LivePRefBase<'a, T, Trk, CLONE>) -> Self { + live.r + } +} + +// Converting to a live reference may fail +impl<'a, T: ?Sized, Trk: Track> TryFrom<&'a PRefBase> + for LivePRefBase<'a, T, Trk, true> +{ + type Error = (); + + fn try_from(value: &'a PRefBase) -> Result { + value.try_into_live().ok_or(()) + } +} + +// The live variant can be dereferenced freely +impl<'a, T: ?Sized, Trk: Track, const CLONE: bool> Deref for LivePRefBase<'a, T, Trk, CLONE> { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + // SAFETY: The lifetime on this reference is guaranteed by the kernel to not cover a + // process being freed. + unsafe { self.r.as_ref_unchecked() } + } +} + +impl<'a, T: ?Sized, Trk: Track, const CLONE: bool> LivePRefBase<'a, T, Trk, CLONE> { + /// Safety: the new reference must have a lifetime less than or equal to the old one + #[inline] + unsafe fn with_new_ref(self, new: &U) -> LivePRefBase<'a, U, Trk, CLONE> { + LivePRefBase { + r: PRefBase::new_with_tracker(NonNull::from(new), self.r.tracker), + _phantom: PhantomData, + } + } + + #[inline] + pub fn map &U>( + orig: Self, + f: F, + ) -> LivePRefBase<'a, U, Trk, CLONE> { + unsafe { + // Safe to cast to reference as this type guarantees the process is still live + let r = orig.r.r.as_ref(); + // Safety: f gives the guarantee for with_new_ref + orig.with_new_ref(f(r)) + } + } +} + +impl<'a, T: ?Sized, const CLONE: bool> LivePRefBase<'a, T, DualTracker, CLONE> { + /// Safety: the caller guarantees this data would be valid to cast to a shared reference as + /// long as the ProcEntry keeps its ID the same and the tracker does not change + #[inline] + pub(crate) unsafe fn new( + data: NonNull, + proc_tracker: PLiveTracker, + allow_tracker: &Cell, + ) -> Self { + Self { + r: PRefBase::new_with_tracker( + data, + DualTracker::new(proc_tracker, ALiveTracker::new(allow_tracker)), + ), + _phantom: PhantomData, + } + } +} + +impl<'a, T: ?Sized, const CLONE: bool> LivePRefBase<'a, T, PLiveTracker, CLONE> { + /// Safety: the caller guarantees this data would be valid to cast to a shared reference as + /// long as the ProcEntry keeps its ID the same AND that the process is currently live. + #[inline] + pub unsafe fn new(data: NonNull, with_proc: &'static crate::kernel::ProcEntry) -> Self { + Self { + r: PRefBase::new(data, with_proc), + _phantom: PhantomData, + } + } + + #[inline] + /// See get_process_id for PRef + pub fn get_process_id(&self, kernel: &'static Kernel) -> ProcessId { + self.r.get_process_id(kernel) + } +} + +impl<'a, T: ?Sized, Trk: Track> LivePRefBase<'a, T, Trk, true> { + #[inline] + pub fn map_split (&U, &V)>( + orig: Self, + f: F, + ) -> ( + LivePRefBase<'a, U, Trk, true>, + LivePRefBase<'a, V, Trk, true>, + ) { + let split = f(orig.deref()); + unsafe { (orig.with_new_ref(split.0), orig.with_new_ref(split.1)) } + } + + #[inline] + pub fn as_noclone(self) -> LivePRefBase<'a, T, Trk, false> { + LivePRefBase { + r: self.r.as_noclone(), + _phantom: self._phantom, + } + } +} + +impl PanicDeref for PRefBase { + type Target = T; + + /// This should be avoided in favour of try_into_live. This is provided + /// for interfaces that assume they are otherwise ensuring that the + /// PRef can only ever be live, but need this checked. + fn panic_deref(&self) -> &Self::Target { + if !self.is_still_alive() { + panic!() + } + unsafe { self.as_ref_unchecked() } + } +} + +// Blanket implementation of important traits + +// Length +impl BufLength for PRefBase +where + NonNull: BufLength, +{ + fn buf_len(&self) -> usize { + self.get_ptr().buf_len() + } +} + +#[cfg(test)] +mod tests { + use crate::collections::list::PanicDeref; + use crate::grant::{LivePRef, PLiveTracker, PRef, Track}; + use crate::kernel::ID_INVALID; + use crate::ProcEntry; + use core::cell::Cell; + use core::ops::Deref; + use core::ptr::NonNull; + use std::mem::transmute; + + // Tests of the PBuf types. NOTE: This does not test the core kernel allocation logic for now as + // that is very tied up with process logic. + // Instead, this tries to test the PRef as standalone types. + + // A proc entry for testing. Normally, this would be from the main process array. + thread_local! { + static TEST_PROC_ENTRY: ProcEntry = const { ProcEntry { + valid_proc_id: Cell::new(0), + proc_ref: Cell::new(None), + }}; + } + + // This is only for testing. Normally, the kernel would guarantee no LivePRefs exist when this + // is changed. Tests should maintain that. + fn change_test_id(id: usize) { + TEST_PROC_ENTRY.with(|entry| entry.valid_proc_id.set(id)); + } + + // Construct a PRef for testing. Normally, the NonNull would be provided by the grant + // allocator. + // This is only for testing. The caller should ensure that ptr never aliases with a mutable + // reference and stays in lifetime for the duration of the test. + fn make_test_pref(ptr: NonNull) -> PRef { + TEST_PROC_ENTRY.with(|entry| unsafe { PRef::new(ptr, transmute(entry)) }) + } + + #[test] + fn simple_test() { + // Mock process creation: + change_test_id(1); + + // Allocation. + let some_val: u32 = 123; + let r = make_test_pref(NonNull::from(&some_val)); + + // Can be converted in PRefLives + { + // Returns an option as it might fail + let as_live = r.try_into_live(); + // Unwrap should succeed at this point + let as_live = as_live.unwrap(); + // Can be used as a &u32 + assert_eq!(*as_live, 123); + } + + // Mock process destruction + change_test_id(ID_INVALID); + + // Can trying to convert into live again: + { + let as_live = r.try_into_live(); + // But it will fail as the process no longer exists + assert!(as_live.is_none()); + } + } + + struct LessBoringType { + first: u32, + second: u32, + } + + #[test] + fn test_mapping_and_conversion() { + // Mock process creation: + change_test_id(1); + + let some_val = LessBoringType { + first: 66, + second: 77, + }; + + let r = make_test_pref(NonNull::from(&some_val)); + let r_first: PRef; + { + let as_live = r.try_into_live().unwrap(); + // Accessing items is easy: + assert_eq!(as_live.first, 66); + assert_eq!(as_live.second, 77); + // However, a reference like: + let _normal_ref: &u32 = &as_live.deref().first; + // Is legal to construct, but has a limited lifetime that might not be flexible enough. + // Instead, use map can obtain a LivePRef to something like a member + let mapped_ref: LivePRef = LivePRef::map(as_live, |r| &r.first); + // And a like pref can be converted back to PRef, which has a more flexible lifetime + r_first = mapped_ref.into(); + } + + let pair: (PRef, PRef); + + // map_split can be used (possibly multiple times) to extract multiple references + { + let as_live = r.try_into_live().unwrap(); + let split = LivePRef::map_split(as_live, |r| (&r.first, &r.second)); + pair = (split.0.into(), split.1.into()); + } + + // All the PRefs can be used again while the process is live... + { + assert_eq!(*r_first.try_into_live().unwrap(), 66); + assert_eq!(*pair.0.try_into_live().unwrap(), 66); + assert_eq!(*pair.1.try_into_live().unwrap(), 77); + } + + // Mock process destruction + change_test_id(ID_INVALID); + + // ...but stop working after + { + assert!(r_first.try_into_live().is_none()); + assert!(pair.0.try_into_live().is_none()); + assert!(pair.1.try_into_live().is_none()); + } + } + + static SOME_GLOBAL: u32 = 7; + + #[test] + fn misc_lifetimes() { + // We could consider the kernel as a process that never dies + let static_ref = &SOME_GLOBAL; + // So if code expects a PRef and you want to pass a global static, this exists: + let as_pref = PRef::new_from_static(static_ref); + assert_eq!(*as_pref.try_into_live().unwrap(), 7); + // Such a PRef will is always alive. + + // Rather than using Option for a PRef that might not be there, a PRef can start out + // dead. This is the default. It will never be alive. + let default_pref: PRef = Default::default(); + assert!(default_pref.try_into_live().is_none()); + } + + #[test] + fn trackers() { + // PRef uses a tracker to check if it is live. If you want to make your own types that + // track process liveness, they are available: + + // Always alive + let live_tracker = PLiveTracker::global_live(); + assert!(unsafe { live_tracker.is_still_alive() }); + + // Always dead + let live_tracker = PLiveTracker::global_dead(); + assert!(unsafe { !live_tracker.is_still_alive() }); + + // Tracks a process: + change_test_id(1); + unsafe { + unsafe fn with_id(id: usize) -> PLiveTracker { + TEST_PROC_ENTRY.with(|entry| PLiveTracker::new_with_id(transmute(entry), id)) + } + + let live_tracker1 = with_id(1); + let live_tracker2 = with_id(2); + + assert!(live_tracker1.is_still_alive()); + assert!(!live_tracker2.is_still_alive()); + + change_test_id(ID_INVALID); + + assert!(!live_tracker1.is_still_alive()); + assert!(!live_tracker2.is_still_alive()); + } + } + + #[test] + #[should_panic] + fn test_panic() { + change_test_id(1); + let some_val: u32 = 123; + let r = make_test_pref(NonNull::from(&some_val)); + change_test_id(ID_INVALID); + r.panic_deref(); } } diff --git a/kernel/src/hil/time.rs b/kernel/src/hil/time.rs index 694b0ed99..35d1ab029 100644 --- a/kernel/src/hil/time.rs +++ b/kernel/src/hil/time.rs @@ -58,6 +58,8 @@ pub trait Ticks: Clone + Copy + From + fmt::Debug + Ord + PartialOrd + Eq { /// Scales the ticks by the specified numerator and denominator. If the resulting value would /// be greater than u32,`u32::MAX` is returned instead fn saturating_scale(self, numerator: u32, denominator: u32) -> u32; + + const ZERO: Self; } /// Represents a clock's frequency in Hz, allowing code to transform @@ -68,6 +70,15 @@ pub trait Frequency { fn frequency() -> u32; } +pub struct Freq(); + +impl Frequency for Freq { + #[inline] + fn frequency() -> u32 { + F + } +} + /// Represents a moment in time, obtained by calling `now`. pub trait Time { /// The number of ticks per second @@ -429,6 +440,8 @@ impl Ticks for Ticks32 { u32::MAX } } + + const ZERO: Self = Self { 0: 0 }; } impl PartialOrd for Ticks32 { @@ -510,6 +523,8 @@ impl Ticks for Ticks24 { u32::MAX } } + + const ZERO: Self = Self { 0: 0 }; } impl PartialOrd for Ticks24 { @@ -603,6 +618,8 @@ impl Ticks for Ticks16 { u32::MAX } } + + const ZERO: Self = Self { 0: 0 }; } impl PartialOrd for Ticks16 { @@ -692,6 +709,8 @@ impl Ticks for Ticks64 { u32::MAX } } + + const ZERO: Self = Self { 0: 0 }; } impl PartialOrd for Ticks64 { @@ -714,6 +733,36 @@ impl PartialEq for Ticks64 { impl Eq for Ticks64 {} +/// Basic implementation +impl Time for () { + type Frequency = Freq1MHz; + type Ticks = Ticks32; + + fn now(&self) -> Self::Ticks { + 0.into() + } +} + +/// Basic implementation +impl<'a> Alarm<'a> for () { + fn set_alarm_client(&self, _client: &'a dyn AlarmClient) {} + fn set_alarm(&self, _reference: Self::Ticks, _dt: Self::Ticks) {} + fn get_alarm(&self) -> Self::Ticks { + 0.into() + } + fn disarm(&self) -> Result<(), ErrorCode> { + Ok(()) + } + + fn is_armed(&self) -> bool { + false + } + + fn minimum_dt(&self) -> Self::Ticks { + 1.into() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/kernel/src/hil/uart.rs b/kernel/src/hil/uart.rs index f229f97cb..14bbfdb94 100644 --- a/kernel/src/hil/uart.rs +++ b/kernel/src/hil/uart.rs @@ -2,7 +2,10 @@ //! //! +use crate::utilities::leased_buffer::LeasedBufferCell; use crate::ErrorCode; +use core::cell::Cell; +use core::marker::PhantomData; #[derive(Copy, Clone, Debug, PartialEq)] pub enum StopBits { @@ -325,3 +328,145 @@ pub trait ReceiveAdvanced<'a>: Receive<'a> { interbyte_timeout: u8, ) -> Result<(), (ErrorCode, &'static mut [u8])>; } + +/// A zero-copy transmit interface. The client decides on the type of the buffer so the driver +/// can be agnostic. +/// Implementations are free to put bounds on `Client::BufT`, the traits `GenBuf{DMA}?{Read|Write}` +/// are intended to capture most of what would be desired to move around bytes. +pub trait ZeroTransmit { + /// Queue an operation. The callee should call `transmit_finish(buf, Result)` on `Client` + /// iff they return `Ok(None)` from this function. + /// The callee should not short circuit by calling the callback before returning, + /// instead returning `Ok(Some(buf))` if the operation completes early. + /// This is to avoid unbounded stack sizes. + /// TODO: Possibly the short circuit path should be indicated by EALREADY, or some other ERROR? + /// In the event of an error, the buffer is also returned within the `Err(...)`. + fn transmit(&self, buf: Client::Buf) -> Result, (Client::Buf, ErrorCode)>; + + /// Return the client for this transmitter. Transmitters should contain the storage for their + /// clients. + fn get_client(&self) -> &Client; +} + +/// Can act as a client for a zero-copy transmitter. +pub trait ZeroTransmitClient: Sized { + /// The type of buffer the client wishes to pass down to the transmitter. + type Buf; + /// The callback for when a transmission finishes. + /// Passes back the buffer, and the result of the transmission. + /// an `&self` can be reached using `transmitter.get_client()`. + fn transmit_finish>( + transmitter: &Transmit, + buf: Self::Buf, + res: Result<(), ErrorCode>, + ); +} + +/* A wrapper to make things that support the old interface support the new one. */ + +/// A zero transmit client that calls the legacy interface +pub struct ZeroTransmitLegacyWrapper<'a> { + client: Cell>, + leased_buffer: LeasedBufferCell<'static, u8>, +} + +impl<'a> ZeroTransmitLegacyWrapper<'a> { + pub const fn new() -> Self { + Self { + client: Cell::new(None), + leased_buffer: LeasedBufferCell::new(), + } + } + + pub const fn new_with_client(client: &'a dyn TransmitClient) -> Self { + Self { + client: Cell::new(Some(client)), + leased_buffer: LeasedBufferCell::new(), + } + } + + /// Get a reference that can be used by client of the legacy interface + pub const fn transmitter_as_legacy>( + transmitter: &T, + ) -> &dyn Transmit<'a> { + TransmitZeroBridge::get(transmitter) + } +} + +impl<'a> ZeroTransmitClient for ZeroTransmitLegacyWrapper<'a> { + type Buf = &'static mut [u8]; + + fn transmit_finish>( + transmitter: &Transmit, + buf: Self::Buf, + res: Result<(), ErrorCode>, + ) { + let slf = transmitter.get_client(); + let buf = slf.leased_buffer.take_buf(buf); + if let Some(transmitter) = slf.client.get() { + transmitter.transmitted_buffer(buf, buf.len(), res) + } + } +} + +misc::overload_impl!(TransmitZeroBridge); + +impl<'a, Transmitter: ZeroTransmit>> Transmit<'a> + for TransmitZeroBridge +{ + fn set_transmit_client(&self, client: &'a dyn TransmitClient) { + self.inner.get_client().client.set(Some(client)); + } + + fn transmit_buffer( + &self, + tx_buffer: &'static mut [u8], + tx_len: usize, + ) -> Result<(), (ErrorCode, &'static mut [u8])> { + let slf = self.inner.get_client(); + let buf = slf.leased_buffer.set_lease(tx_buffer, 0..tx_len); + match self.inner.transmit(buf) { + Ok(Some(buf)) => { + // FIXME: this should probably be a deferred callback as there is no short + // circuit case supported by the legacy interface. + let buf = slf.leased_buffer.take_buf(buf); + Err((ErrorCode::ALREADY, buf)) + } + Ok(None) => Ok(()), + Err((buf, code)) => { + let buf = slf.leased_buffer.take_buf(buf); + Err((code, buf)) + } + } + } + + fn transmit_word(&self, _word: u32) -> Result<(), ErrorCode> { + todo!() + } + + fn transmit_abort(&self) -> Result<(), ErrorCode> { + todo!() + } +} + +/// A factory for a legacy wrapper. Takes a factory for the legacy client as its argument. +pub struct LegacyTransmitComponent(PhantomData); + +use crate::component::StaticComponent; +use crate::component::StaticComponentFinalize; + +crate::simple_static_component!(impl<{LegacyFactory}> for LegacyTransmitComponent:: where + { + LegacyFactory : ~const StaticComponent, + LegacyFactory : StaticComponentFinalize, + LegacyFactory::Output : TransmitClient + }, + Inherit = LegacyFactory, + Output = ZeroTransmitLegacyWrapper<'static>, + NewInput = LegacyFactory::NewInput<'a>, + FinInput = LegacyFactory::FinaliseInput, + |_slf, input, spr| super{input} { + ZeroTransmitLegacyWrapper::new_with_client(spr) + }, + |_slf, input, _spr| super{input} {} +); diff --git a/kernel/src/introspection.rs b/kernel/src/introspection.rs index fe265ff8b..ed4aba382 100644 --- a/kernel/src/introspection.rs +++ b/kernel/src/introspection.rs @@ -71,14 +71,17 @@ impl KernelInfo { count.get() } - /// Get the name of the process. - pub fn process_name( + /// Get the name of the process in a limited scope. + /// A process name may not be in static memory. + pub fn with_process_name R>( &self, app: ProcessId, _capability: &dyn ProcessManagementCapability, ) -> &'static str { - self.kernel - .process_map_or("unknown", app, |process| process.get_process_name()) + match self.kernel.get_process(app) { + None => "unknown", + Some(process) => process.get_process_name(), + } } /// Returns the number of syscalls the app has called. diff --git a/kernel/src/kernel.rs b/kernel/src/kernel.rs index a1d081ad7..14f7f1859 100644 --- a/kernel/src/kernel.rs +++ b/kernel/src/kernel.rs @@ -6,14 +6,12 @@ //! selected by a board. use core::cell::Cell; -use core::ptr::NonNull; -use crate::capabilities; -use crate::config; -use crate::debug; +use crate::cheri::CPtrOps; +use crate::config::CONFIG; use crate::dynamic_deferred_call::DynamicDeferredCall; use crate::errorcode::ErrorCode; -use crate::grant::{AllowRoSize, AllowRwSize, Grant, UpcallSize}; +use crate::grant::{AllowRoSize, AllowRwSize, Grant, PLiveTracker, Track, UpcallSize}; use crate::ipc; use crate::memop; use crate::platform::chip::Chip; @@ -23,8 +21,8 @@ use crate::platform::platform::KernelResources; use crate::platform::platform::{ProcessFault, SyscallDriverLookup, SyscallFilter}; use crate::platform::scheduler_timer::SchedulerTimer; use crate::platform::watchdog::WatchDog; -use crate::process::ProcessId; use crate::process::{self, Task}; +use crate::process::{ProcessId, ProcessLoadError}; use crate::scheduler::{Scheduler, SchedulingDecision}; use crate::syscall::SyscallDriver; use crate::syscall::{ContextSwitchReason, SyscallReturn}; @@ -32,12 +30,68 @@ use crate::syscall::{Syscall, YieldCall}; use crate::syscall_driver::CommandReturn; use crate::upcall::{Upcall, UpcallId}; use crate::utilities::cells::NumericCellExt; +use crate::utilities::singleton_checker::SingletonChecker; +use crate::{assert_single, capabilities}; +use crate::{config, very_simple_component}; +use crate::{debug, TIfCfg}; /// Threshold in microseconds to consider a process's timeslice to be exhausted. /// That is, Tock will skip re-scheduling a process if its remaining timeslice /// is less than this threshold. pub(crate) const MIN_QUANTA_THRESHOLD_US: u32 = 500; +pub(crate) struct Counter { + grant_counter: Cell, + + /// Flag to mark that grants have been finalized. This means that the kernel + /// cannot support creating new grants because processes have already been + /// created and the data structures for grants have already been established + /// Initialised only if config feature "static_init" is disabled + grants_finalized: Cell, +} + +type StaticInitType = TIfCfg!(static_init, usize, Counter); +pub(crate) struct StaticInit(StaticInitType); + +impl StaticInit { + const fn new(value: usize) -> Self { + if CONFIG.static_init { + StaticInit(StaticInitType::new_true(value)) + } else { + StaticInit(StaticInitType::new_false(Counter { + grant_counter: Cell::new(value), + grants_finalized: Cell::new(true), + })) + } + } + + fn get_grant_count(&self) -> usize { + if CONFIG.static_init { + *self.0.get_true_ref() + } else { + self.0.get_false_ref().grant_counter.get() + } + } + + fn increment_grant_count(&self) { + if !CONFIG.static_init { + self.0.get_false_ref().grant_counter.increment(); + } + } + + fn get_grants_finalized(&self) -> bool { + if CONFIG.static_init { + true + } else { + self.0.get_false_ref().grants_finalized.get() + } + } + + fn set_grants_finalized(&self) { + self.0.get_false_ref().grants_finalized.set(true); + } +} + /// Main object for the kernel. Each board will need to create one. pub struct Kernel { /// How many "to-do" items exist at any given time. These include @@ -45,7 +99,10 @@ pub struct Kernel { work: Cell, /// This holds a pointer to the static array of Process pointers. - processes: &'static [Option<&'static dyn process::Process>], + processes: &'static [ProcEntry], + + /// Hom many slots are allocated in the processes array + processes_allocated: Cell, /// A counter which keeps track of how many process identifiers have been /// created. This is used to create new unique identifiers for processes. @@ -54,13 +111,7 @@ pub struct Kernel { /// How many grant regions have been setup. This is incremented on every /// call to `create_grant()`. We need to explicitly track this so that when /// processes are created they can be allocated pointers for each grant. - grant_counter: Cell, - - /// Flag to mark that grants have been finalized. This means that the kernel - /// cannot support creating new grants because processes have already been - /// created and the data structures for grants have already been - /// established. - grants_finalized: Cell, + grant_counter: StaticInit, } /// Enum used to inform scheduler why a process stopped executing (aka why @@ -108,17 +159,134 @@ fn try_allocate_grant(driver: &dyn SyscallDriver, process: &dyn process::Process } } +/// Prototype of a kernel. Should be used to initialise the main kernel object. +/// +pub struct ProtoKernel {} + +/// The intent is for c to eventually be a const generic. +/// `[generic_const_exprs]` was causing issues, so this has been converted back a dynamic value. +pub struct GrantCounter(usize); + +impl ProtoKernel { + /// Construct a prototype of the kernel. Grants can be allocated using this, and then it can + /// later to converted into a true instantiation of the kernel. + pub const fn new(chk: &mut SingletonChecker) -> (Self, GrantCounter) { + assert_single!(chk); + (Self {}, GrantCounter(0)) + } + + pub const fn create_grant< + T: Default, + Upcalls: UpcallSize, + AllowROs: AllowRoSize, + AllowRWs: AllowRwSize, + >( + &self, + kernel: &'static Kernel, + driver_num: usize, + counter: GrantCounter, + _capability: &dyn capabilities::MemoryAllocationCapability, + ) -> (Grant, GrantCounter) { + ( + Grant::new(kernel, driver_num, counter.0), + GrantCounter(counter.0 + 1), + ) + } +} + +/// Holds both the process ID in a location that can outlive a process, and an optional reference to +/// that process. Valid_proc_id needs to outlive the proc_ref to not make dangling pointers to +/// grants. The usize is set to ~0 to indicate accessing process memory no longer valid, even if the +/// optional is still SOME. +#[derive(Clone)] +pub struct ProcEntry { + pub valid_proc_id: Cell, + pub proc_ref: Cell>, +} + +pub(crate) const ID_INVALID: usize = !0usize; + +/// The type each board should allocate to hold processes. Boards should use this type, and use +/// init_process_array to create an array so they don't need to pay too much attention to what this +/// type actually is. +pub type ProcessArray = [ProcEntry; NUM_PROCS]; + +very_simple_component!(impl for Kernel, + new_from_proto(&'static [ProcEntry], GrantCounter) +); + impl Kernel { - pub fn new(processes: &'static [Option<&'static dyn process::Process>]) -> Kernel { + pub const fn new(processes: &'static [ProcEntry]) -> Kernel { Kernel { work: Cell::new(0), processes, + processes_allocated: Cell::new(0), process_identifier_max: Cell::new(0), - grant_counter: Cell::new(0), - grants_finalized: Cell::new(false), + grant_counter: StaticInit::new(0), + } + } + + pub const fn new_from_proto(processes: &'static [ProcEntry], counter: GrantCounter) -> Kernel { + Kernel { + work: Cell::new(0), + processes, + processes_allocated: Cell::new(0), + process_identifier_max: Cell::new(0), + grant_counter: StaticInit::new(counter.0), + } + } + + /// Create an empty array of processes required to construct a new kernel type + pub const fn init_process_array() -> ProcessArray { + const INVALID_ENTRY: ProcEntry = ProcEntry { + valid_proc_id: Cell::new(ID_INVALID), + proc_ref: Cell::new(None), + }; + [INVALID_ENTRY; NUM_PROCS] + } + + pub(crate) fn get_next_free_proc_entry(&self) -> Result { + let n = self.processes_allocated.get(); + if n != self.processes.len() { + Ok(n) + } else { + Err(ProcessLoadError::NotEnoughMemory) + } + } + + pub(crate) fn set_next_proc_entry_used( + &self, + proc: &'static dyn process::Process, + ) -> Result<(), ProcessLoadError> { + let index = self.processes_allocated.get(); + debug_assert_eq!(index, proc.processid().index); + + let slot = self + .processes + .get(index) + .ok_or(ProcessLoadError::NotEnoughMemory)?; + + // Save the reference to this process in the processes array. + slot.proc_ref.set(Some(proc)); + slot.valid_proc_id.set(proc.processid().id()); + self.processes_allocated + .set(self.processes_allocated.get() + 1); + Ok(()) + } + + pub(crate) fn index_of_proc_entry(&self, entry: &ProcEntry) -> usize { + unsafe { + (entry as *const ProcEntry).offset_from(&self.processes[0] as *const ProcEntry) as usize } } + /// Helper to get an iterator over just the & dyn Process part of the process array + /// This will also return the invalid entries. + /// Use get_process_iter to get only the valid entries + fn proc_iter(&self) -> impl Iterator>> { + self.processes.iter().map(|entry| &entry.proc_ref) + } + /// Something was scheduled for a process, so there is more work to do. /// /// This is only exposed in the core kernel crate. @@ -146,6 +314,10 @@ impl Kernel { self.work.decrement(); } + pub(crate) fn decrement_work_by(&self, by: usize) { + self.work.subtract(by); + } + /// Something finished for a process, so we decrement how much work there is /// to do. /// @@ -165,6 +337,57 @@ impl Kernel { self.work.get() == 0 } + /// Look up a process id from the the usize identifier, with an optional index as to what + /// its index is. Returns None if the ID does not exist, or the hint was wrong. + pub(crate) fn lookup_process_id( + &'static self, + id: usize, + index_hint: Option, + ) -> Option { + // Check for the special value which is not really a valid process ID + if id == ID_INVALID { + return None; + } + match index_hint { + Some(ndx) => { + let result = ProcessId::new(self, id, ndx); + match result.index() { + None => None, + Some(_) => Some(result), + } + } + None => { + for entry in self.processes.iter() { + if entry.valid_proc_id.get() == id { + if let Some(proc) = entry.proc_ref.get() { + return Some(proc.processid()); + } + } + } + None + } + } + } + + /// Returns a reference to the process entry, if valid. This is only for grants to have a faster + /// path. Other users should use get_process. + pub(crate) fn get_process_entry(&self, processid: ProcessId) -> Option<&ProcEntry> { + let id = processid.id(); + let entry = self.processes.get(processid.index)?; + if entry.valid_proc_id.get() == id { + Some(entry) + } else { + None + } + } + + pub(crate) fn get_live_tracker_for(&self, processid: ProcessId) -> PLiveTracker { + match self.processes.get(processid.index) { + None => PLiveTracker::global_dead(), + Some(entry) => PLiveTracker::new_with_id(entry, processid.id()), + } + } + /// Helper function that moves all non-generic portions of process_map_or /// into a non-generic function to reduce code bloat from monomorphization. pub(crate) fn get_process(&self, processid: ProcessId) -> Option<&dyn process::Process> { @@ -172,16 +395,14 @@ impl Kernel { // However, we are not guaranteed that the app still exists at that // index in the processes array. To avoid additional overhead, we do the // lookup and check here, rather than calling `.index()`. + let check_id = processid.id(); match self.processes.get(processid.index) { - Some(Some(process)) => { - // Check that the process stored here matches the identifier - // in the `appid`. - if process.processid() == processid { - Some(*process) - } else { - None - } - } + // Check that the process stored here matches the identifier + // in the `appid`. + Some(ProcEntry { + valid_proc_id: id, + proc_ref: proc, + }) if id.get() == check_id => proc.get(), _ => None, } } @@ -242,29 +463,37 @@ impl Kernel { where F: FnMut(&dyn process::Process), { - for process in self.processes.iter() { - match process { + for process in self.proc_iter() { + match process.get() { Some(p) => { - closure(*p); + closure(p); } None => {} } } } + pub(crate) fn get_proc_entry_iter(&self) -> impl Iterator { + fn filter(item: &'static ProcEntry) -> Option<&'static ProcEntry> { + if item.valid_proc_id.get() != ID_INVALID { + Some(item) + } else { + None + } + } + self.processes.iter().filter_map(filter) + } + /// Returns an iterator over all processes loaded by the kernel pub(crate) fn get_process_iter( &self, - ) -> core::iter::FilterMap< - core::slice::Iter>, - fn(&Option<&'static dyn process::Process>) -> Option<&'static dyn process::Process>, - > { + ) -> impl Iterator + '_ { fn keep_some( - &x: &Option<&'static dyn process::Process>, + x: &Cell>, ) -> Option<&'static dyn process::Process> { - x + x.get() } - self.processes.iter().filter_map(keep_some) + self.proc_iter().filter_map(keep_some) } /// Run a closure on every valid process. This will iterate the array of @@ -276,31 +505,24 @@ impl Kernel { pub fn process_each_capability( &'static self, _capability: &dyn capabilities::ProcessManagementCapability, - mut closure: F, + closure: F, ) where F: FnMut(&dyn process::Process), { - for process in self.processes.iter() { - match process { - Some(p) => { - closure(*p); - } - None => {} - } - } + self.process_each(closure) } /// Run a closure on every process, but only continue if the closure returns `None`. That is, /// if the closure returns any non-`None` value, iteration stops and the value is returned from /// this function to the called. - pub(crate) fn process_until(&self, closure: F) -> Option + pub(crate) fn process_until(&self, mut closure: F) -> Option where - F: Fn(&dyn process::Process) -> Option, + F: FnMut(&dyn process::Process) -> Option, { - for process in self.processes.iter() { - match process { + for process in self.proc_iter() { + match process.get() { Some(p) => { - let ret = closure(*p); + let ret = closure(p); if ret.is_some() { return ret; } @@ -318,9 +540,9 @@ impl Kernel { /// This is needed for `ProcessId` itself to implement the `.index()` command to /// verify that the referenced app is still at the correct index. pub(crate) fn processid_is_valid(&self, appid: &ProcessId) -> bool { - self.processes.get(appid.index).map_or(false, |p| { - p.map_or(false, |process| process.processid().id() == appid.id()) - }) + self.processes + .get(appid.index) + .map_or(false, |p| p.valid_proc_id.get() == appid.id()) } /// Create a new grant. This is used in board initialization to setup grants @@ -344,13 +566,13 @@ impl Kernel { driver_num: usize, _capability: &dyn capabilities::MemoryAllocationCapability, ) -> Grant { - if self.grants_finalized.get() { + if self.grant_counter.get_grants_finalized() { panic!("Grants finalized. Cannot create a new grant."); } // Create and return a new grant. - let grant_index = self.grant_counter.get(); - self.grant_counter.increment(); + let grant_index = self.grant_counter.get_grant_count(); + self.grant_counter.increment_grant_count(); Grant::new(self, driver_num, grant_index) } @@ -362,8 +584,8 @@ impl Kernel { /// In practice, this is called when processes are created, and the process /// memory is setup based on the number of current grants. pub(crate) fn get_grant_count_and_finalize(&self) -> usize { - self.grants_finalized.set(true); - self.grant_counter.get() + self.grant_counter.set_grants_finalized(); + self.grant_counter.get_grant_count() } /// Returns the number of grants that have been setup in the system and @@ -403,8 +625,8 @@ impl Kernel { /// function, since capsules should not be able to arbitrarily restart all /// apps. pub fn hardfault_all_apps(&self, _c: &C) { - for p in self.processes.iter() { - p.map(|process| { + for p in self.proc_iter() { + p.get().map(|process| { process.set_fault_state(); }); } @@ -620,7 +842,7 @@ impl Kernel { scheduler_timer.arm(); let context_switch_reason = process.switch_to(); scheduler_timer.disarm(); - chip.mpu().disable_app_mpu(); + process.disable_mmu(); // Now the process has returned back to the kernel. Check // why and handle the process as appropriate. @@ -822,7 +1044,16 @@ impl Kernel { } Syscall::Yield { which, address } => { if config::CONFIG.trace_syscalls { - debug!("[{:?}] yield. which: {}", process.processid(), which); + debug!( + "[{:?}] yield. which: {} ({})", + process.processid(), + which, + match which { + 0 => "no wait", + 1 => "wait", + _ => "inval", + } + ); } if which > (YieldCall::Wait as usize) { // Only 0 and 1 are valid, so this is not a valid yield @@ -886,14 +1117,15 @@ impl Kernel { subscribe_num: subdriver_number, }; + // TODO: when the compiler supports capability types bring this back // First check if `upcall_ptr` is null. A null `upcall_ptr` will // result in `None` here and represents the special // "unsubscribe" operation. - let ptr = NonNull::new(upcall_ptr); + // let ptr = NonNull::new(upcall_ptr); // For convenience create an `Upcall` type now. This is just a // data structure and doesn't do any checking or conversion. - let upcall = Upcall::new(process.processid(), upcall_id, appdata, ptr); + let upcall = Upcall::new(process.processid(), upcall_id, appdata, upcall_ptr); // If `ptr` is not null, we must first verify that the upcall // function pointer is within process accessible memory. Per @@ -902,12 +1134,18 @@ impl Kernel { // > If the passed upcall is not valid (is outside process // > executable memory...), the kernel...MUST immediately return // > a failure with a error code of `INVALID`. - let rval1 = ptr.map_or(None, |upcall_ptr_nonnull| { - if !process.is_valid_upcall_function_pointer(upcall_ptr_nonnull) { - Some(ErrorCode::INVAL) - } else { - None - } + + // CHERI note: we don't do any CHERI checks here because the architecture + // does them for us. The checks are only needed if we convert a capability into + // an integer pointer. + let rval1 = upcall_ptr.map_or(None, |upcall_ptr_nonnull| { + if !process + .is_valid_upcall_function_pointer(upcall_ptr_nonnull.as_ptr() as *const u8) + { + Some(ErrorCode::INVAL) + } else { + None + } }); // If the upcall is either null or valid, then we continue @@ -1002,7 +1240,7 @@ impl Kernel { process.processid(), driver_number, subdriver_number, - upcall_ptr as usize, + upcall_ptr, appdata, rval ); @@ -1060,7 +1298,7 @@ impl Kernel { rw_pbuf, ) { Ok(rw_pbuf) => { - let (ptr, len) = rw_pbuf.consume(); + let (ptr, len, _) = rw_pbuf.consume(); SyscallReturn::AllowReadWriteSuccess(ptr, len) } Err((rw_pbuf, err @ ErrorCode::NOMEM)) => { @@ -1078,13 +1316,13 @@ impl Kernel { rw_pbuf, ) { Ok(rw_pbuf) => { - let (ptr, len) = rw_pbuf.consume(); + let (ptr, len, _) = rw_pbuf.consume(); SyscallReturn::AllowReadWriteSuccess( ptr, len, ) } Err((rw_pbuf, err)) => { - let (ptr, len) = rw_pbuf.consume(); + let (ptr, len, _) = rw_pbuf.consume(); SyscallReturn::AllowReadWriteFailure( err, ptr, len, ) @@ -1108,7 +1346,7 @@ impl Kernel { } _ => {} } - let (ptr, len) = rw_pbuf.consume(); + let (ptr, len, _) = rw_pbuf.consume(); SyscallReturn::AllowReadWriteFailure( err, ptr, len, ) @@ -1116,7 +1354,7 @@ impl Kernel { } } Err((rw_pbuf, err)) => { - let (ptr, len) = rw_pbuf.consume(); + let (ptr, len, _) = rw_pbuf.consume(); SyscallReturn::AllowReadWriteFailure(err, ptr, len) } } @@ -1182,7 +1420,7 @@ impl Kernel { // allow operation. Pass the // previous buffer information back // to the process. - let (ptr, len) = returned_pbuf.consume(); + let (ptr, len, _) = returned_pbuf.consume(); SyscallReturn::UserspaceReadableAllowSuccess( ptr, len, ) @@ -1192,7 +1430,7 @@ impl Kernel { // allow operation. Pass the new // buffer information back to the // process. - let (ptr, len) = rejected_pbuf.consume(); + let (ptr, len, _) = rejected_pbuf.consume(); SyscallReturn::UserspaceReadableAllowFailure( err, ptr, len, ) @@ -1256,7 +1494,7 @@ impl Kernel { ro_pbuf, ) { Ok(ro_pbuf) => { - let (ptr, len) = ro_pbuf.consume(); + let (ptr, len, _) = ro_pbuf.consume(); SyscallReturn::AllowReadOnlySuccess(ptr, len) } Err((ro_pbuf, err @ ErrorCode::NOMEM)) => { @@ -1274,13 +1512,13 @@ impl Kernel { ro_pbuf, ) { Ok(ro_pbuf) => { - let (ptr, len) = ro_pbuf.consume(); + let (ptr, len, _) = ro_pbuf.consume(); SyscallReturn::AllowReadOnlySuccess( ptr, len, ) } Err((ro_pbuf, err)) => { - let (ptr, len) = ro_pbuf.consume(); + let (ptr, len, _) = ro_pbuf.consume(); SyscallReturn::AllowReadOnlyFailure( err, ptr, len, ) @@ -1304,7 +1542,7 @@ impl Kernel { } _ => {} } - let (ptr, len) = ro_pbuf.consume(); + let (ptr, len, _) = ro_pbuf.consume(); SyscallReturn::AllowReadOnlyFailure( err, ptr, len, ) @@ -1312,7 +1550,7 @@ impl Kernel { } } Err((ro_pbuf, err)) => { - let (ptr, len) = ro_pbuf.consume(); + let (ptr, len, _) = ro_pbuf.consume(); SyscallReturn::AllowReadOnlyFailure(err, ptr, len) } } @@ -1362,15 +1600,26 @@ impl Kernel { Syscall::Exit { which, completion_code, - } => match which { - // The process called the `exit-terminate` system call. - 0 => process.terminate(Some(completion_code as u32)), - // The process called the `exit-restart` system call. - 1 => process.try_restart(Some(completion_code as u32)), - // The process called an invalid variant of the Exit - // system call class. - _ => process.set_syscall_return_value(SyscallReturn::Failure(ErrorCode::NOSUPPORT)), - }, + } => { + if config::CONFIG.trace_syscalls { + debug!( + "[{:?}] syscall EXIT {} {}", + process.processid(), + which, + completion_code + ); + }; + match which { + // The process called the `exit-terminate` system call. + 0 => process.terminate(Some(completion_code as u32)), + // The process called the `exit-restart` system call. + 1 => process.try_restart(Some(completion_code as u32)), + // The process called an invalid variant of the Exit + // system call class. + _ => process + .set_syscall_return_value(SyscallReturn::Failure(ErrorCode::NOSUPPORT)), + } + } } } } diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 028f30220..a85c355d6 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -85,20 +85,46 @@ //! this use case. It is likely we will have to create new interfaces as new //! use cases are discovered. +#![feature(macro_metavar_expr)] +#![feature(const_precise_live_drops)] +#![feature(const_trait_impl)] +#![feature(const_mut_refs)] +#![feature(const_slice_split_at_mut)] #![feature(core_intrinsics)] +#![feature(slice_ptr_get)] +#![feature(slice_ptr_len)] +#![feature(nonnull_slice_from_raw_parts)] +#![feature(const_nonnull_slice_from_raw_parts)] +#![feature(const_refs_to_cell)] +#![feature(const_maybe_uninit_zeroed)] #![warn(unreachable_pub)] +#![feature(const_type_id)] +#![feature(as_array_of_cells)] +#![feature(maybe_uninit_slice)] +#![feature(layout_for_ptr)] +#![feature(const_convert)] +// Sometimes utility functions go into / out of use. This warning is annoying. +#![allow(dead_code)] #![no_std] +// This is used to run the tests on a host +#[cfg(test)] +#[macro_use] +extern crate std; + // Define the kernel major and minor versions. pub const KERNEL_MAJOR_VERSION: u16 = 2; pub const KERNEL_MINOR_VERSION: u16 = 1; pub mod capabilities; +pub mod cheri; pub mod collections; pub mod component; +pub mod config; pub mod debug; pub mod deferred_call; pub mod dynamic_deferred_call; +pub mod easm; pub mod errorcode; pub mod grant; pub mod hil; @@ -113,7 +139,6 @@ pub mod syscall; pub mod upcall; pub mod utilities; -mod config; mod kernel; mod memop; mod process_policies; @@ -124,6 +149,8 @@ mod syscall_driver; // Core resources exposed as `kernel::Type`. pub use crate::errorcode::ErrorCode; -pub use crate::kernel::Kernel; +pub use crate::kernel::{GrantCounter, Kernel, ProtoKernel}; pub use crate::process::ProcessId; pub use crate::scheduler::Scheduler; +// These types need to be leaked for use by schedulers and board specific type +pub use crate::kernel::{ProcEntry, ProcessArray}; diff --git a/kernel/src/memop.rs b/kernel/src/memop.rs index 51d89f37c..593bbb850 100644 --- a/kernel/src/memop.rs +++ b/kernel/src/memop.rs @@ -8,10 +8,11 @@ use crate::ErrorCode; /// /// ### `memop_num` /// -/// - `0`: BRK. Change the location of the program break and return a -/// SyscallReturn. -/// - `1`: SBRK. Change the location of the program break and return the -/// previous break address. +/// - `0`: BRK. Change the location of the program break and return the +/// PREVIOUS break address. If capabilities are supported, will be +/// bounded from the start of the RW segment to the NEW break. +/// - `1`: SBRK. Change the location of the program break relatively. +/// returns the same thing as BRK. /// - `2`: Get the address of the start of the application's RAM allocation. /// - `3`: Get the address pointing to the first address after the end of the /// application's RAM allocation. @@ -42,43 +43,43 @@ pub(crate) fn memop(process: &dyn Process, op_type: usize, r1: usize) -> Syscall // Op Type 0: BRK 0 /* BRK */ => { process.brk(r1 as *const u8) - .map(|_| SyscallReturn::Success) + .map(|new_region| SyscallReturn::SuccessPtr(new_region)) .unwrap_or(SyscallReturn::Failure(ErrorCode::NOMEM)) }, // Op Type 1: SBRK 1 /* SBRK */ => { process.sbrk(r1 as isize) - .map(|addr| SyscallReturn::SuccessU32(addr as u32)) + .map(|addr| SyscallReturn::SuccessPtr(addr)) .unwrap_or(SyscallReturn::Failure(ErrorCode::NOMEM)) }, // Op Type 2: Process memory start - 2 => SyscallReturn::SuccessU32(process.get_addresses().sram_start as u32), + 2 => SyscallReturn::SuccessUSize(process.get_addresses().sram_start), // Op Type 3: Process memory end - 3 => SyscallReturn::SuccessU32(process.get_addresses().sram_end as u32), + 3 => SyscallReturn::SuccessUSize(process.get_addresses().sram_end), // Op Type 4: Process flash start - 4 => SyscallReturn::SuccessU32(process.get_addresses().flash_start as u32), + 4 => SyscallReturn::SuccessUSize(process.get_addresses().flash_start), // Op Type 5: Process flash end - 5 => SyscallReturn::SuccessU32(process.get_addresses().flash_end as u32), + 5 => SyscallReturn::SuccessUSize(process.get_addresses().flash_end), // Op Type 6: Grant region begin - 6 => SyscallReturn::SuccessU32(process.get_addresses().sram_grant_start as u32), + 6 => SyscallReturn::SuccessUSize(process.get_addresses().sram_grant_start), // Op Type 7: Number of defined writeable regions in the TBF header. - 7 => SyscallReturn::SuccessU32(process.number_writeable_flash_regions() as u32), + 7 => SyscallReturn::SuccessUSize(process.number_writeable_flash_regions()), // Op Type 8: The start address of the writeable region indexed by r1. 8 => { - let flash_start = process.get_addresses().flash_start as u32; + let flash_start = process.get_addresses().flash_start; let (offset, size) = process.get_writeable_flash_region(r1); if size == 0 { SyscallReturn::Failure(ErrorCode::FAIL) } else { - SyscallReturn::SuccessU32(flash_start + offset) + SyscallReturn::SuccessUSize(flash_start + offset) } } @@ -86,12 +87,12 @@ pub(crate) fn memop(process: &dyn Process, op_type: usize, r1: usize) -> Syscall // Returns (void*) -1 on failure, meaning the selected writeable region // does not exist. 9 => { - let flash_start = process.get_addresses().flash_start as u32; + let flash_start = process.get_addresses().flash_start; let (offset, size) = process.get_writeable_flash_region(r1); if size == 0 { SyscallReturn::Failure(ErrorCode::FAIL) } else { - SyscallReturn::SuccessU32(flash_start + offset + size) + SyscallReturn::SuccessUSize(flash_start + offset + size) } } diff --git a/kernel/src/platform/chip.rs b/kernel/src/platform/chip.rs index ef0a10d57..10e877dec 100644 --- a/kernel/src/platform/chip.rs +++ b/kernel/src/platform/chip.rs @@ -3,6 +3,7 @@ use crate::platform::mpu; use crate::syscall; use core::fmt::Write; +use core::ptr::NonNull; /// Interface for individual MCUs. /// @@ -64,6 +65,11 @@ pub trait Chip { /// the Display trait. /// Used by panic. unsafe fn print_state(&self, writer: &mut dyn Write); + + /// Should be called whenever executable memory is written, + /// before any new execution. + /// This can be used, for example, to sync ICache and DCache. + fn on_executable_memory_changed(_range: NonNull<[u8]>) {} } /// Interface for handling interrupts and deferred calls on a hardware chip. diff --git a/kernel/src/platform/mpu.rs b/kernel/src/platform/mpu.rs index 5a55f72a2..adff708d5 100644 --- a/kernel/src/platform/mpu.rs +++ b/kernel/src/platform/mpu.rs @@ -1,6 +1,7 @@ //! Interface for configuring the Memory Protection Unit. use crate::process::ProcessId; +use crate::{ErrorCode, OnlyInCfg}; use core::cmp; use core::fmt::{self, Display}; @@ -62,6 +63,13 @@ impl Display for MpuConfigDefault { } } +pub enum RemoveRegionResult { + /// Region was removed synchronously + Sync, + /// Region will be revoked next time revoke_regions() is called + Async(OnlyInCfg!(async_mpu_config)), +} + /// The generic trait that particular memory protection unit implementations /// need to implement. /// @@ -90,6 +98,19 @@ pub trait MPU { /// current state to help with debugging. type MpuConfig: Default + Display; + /// The minimum power of two alignment this MPU supports. + /// Possibly, greater alignment is required for a particular span. + const MIN_MPUALIGN: usize; + + /// Align a specific range such that it could be an MPU region. If this does + /// Depend on the length/location, leave this as the default implementation. + fn align_range(base: usize, length: usize) -> (usize, usize) { + let mask = Self::MIN_MPUALIGN - 1; + let new_base = base & !mask; + let new_length = (length + (base - new_base) + mask) & !mask; + (new_base, new_length) + } + /// Clears the MPU. /// /// This function will clear any access control enforced by the @@ -104,6 +125,13 @@ pub trait MPU { /// regions protected by the MPU. fn enable_app_mpu(&self) {} + /// Notify the MPU there is a new process. + /// We do NOT provide the config argument here + /// as doing so blocks the MPU from allocating + /// in the grant region for the pprocess. + #[allow(unused_variables)] + fn new_process(&self, app_id: ProcessId) {} + /// Disables the MPU for userspace apps. /// /// This function must disable any access control that was previously setup @@ -114,6 +142,17 @@ pub trait MPU { /// manage processes. fn disable_app_mpu(&self) {} + /// # Arguments + /// + /// Same as above but provides a config and process ID. + /// + /// - `config`: MPU region configuration for the app + /// - `app_id`: ProcessId of the process that the MPU was configured for + #[allow(unused_variables)] + fn disable_app_mpu_config(&self, config: &Self::MpuConfig, app_id: &ProcessId) { + self.disable_app_mpu(); + } + /// Returns the maximum number of regions supported by the MPU. fn number_total_regions(&self) -> usize { 0 @@ -170,8 +209,26 @@ pub trait MPU { /// # Return Value /// /// Returns an error if the specified region is not exactly mapped to the process as specified + /// Returns `Ok(RemoveRegionResult::Sync)` if the region is removed immediately, + /// or `Ok(RemoveRegionResult::Async)` if `revoke_regions` must also be called. #[allow(unused_variables)] - fn remove_memory_region(&self, region: Region, config: &mut Self::MpuConfig) -> Result<(), ()> { + fn remove_memory_region( + &self, + region: Region, + config: &mut Self::MpuConfig, + ) -> Result { + Ok(RemoveRegionResult::Sync) + } + + /// Actually revoke regions previously requested with remove_memory_region + /// Safety: no LiveARef or LivePRef may exist to any memory that might be revoked, + /// Nor may any grants be entered via the legacy mechanism if allowed memory might be revoked. + #[allow(unused_variables)] + unsafe fn revoke_regions( + &self, + config: &mut Self::MpuConfig, + proc: &dyn crate::process::Process, + ) -> Result<(), ErrorCode> { Ok(()) } @@ -285,6 +342,7 @@ pub trait MPU { /// Implement default MPU trait for unit. impl MPU for () { type MpuConfig = MpuConfigDefault; + const MIN_MPUALIGN: usize = 1; } /// The generic trait that particular kernel level memory protection unit @@ -360,3 +418,23 @@ pub trait KernelMPU { #[allow(unused_variables)] fn enable_kernel_mpu(&self, config: &mut Self::KernelMpuConfig); } + +#[cfg(test)] +mod tests { + use crate::platform::mpu::{MpuConfigDefault, MPU}; + + struct FakeMPU {} + + impl MPU for FakeMPU { + type MpuConfig = MpuConfigDefault; + const MIN_MPUALIGN: usize = A; + } + + #[test] + fn test_alignment_logic() { + assert_eq!(FakeMPU::<1>::align_range(0, 100), (0, 100)); + assert_eq!(FakeMPU::<1>::align_range(123, 100), (123, 100)); + assert_eq!(FakeMPU::<128>::align_range(130, 100), (128, 128)); + assert_eq!(FakeMPU::<128>::align_range(200, 100), (128, 256)); + } +} diff --git a/kernel/src/platform/scheduler_timer.rs b/kernel/src/platform/scheduler_timer.rs index b067b8150..8e13daff2 100644 --- a/kernel/src/platform/scheduler_timer.rs +++ b/kernel/src/platform/scheduler_timer.rs @@ -155,7 +155,7 @@ pub struct VirtualSchedulerTimer> { } impl> VirtualSchedulerTimer { - pub fn new(alarm: &'static A) -> Self { + pub const fn new(alarm: &'static A) -> Self { Self { alarm } } } diff --git a/kernel/src/process.rs b/kernel/src/process.rs index c1794d9bf..a6308f093 100644 --- a/kernel/src/process.rs +++ b/kernel/src/process.rs @@ -7,6 +7,7 @@ use core::ptr::NonNull; use core::str; use crate::capabilities; +use crate::cheri::cptr; use crate::errorcode::ErrorCode; use crate::ipc; use crate::kernel::Kernel; @@ -24,7 +25,9 @@ pub use crate::process_policies::{ }; pub use crate::process_printer::{ProcessPrinter, ProcessPrinterContext, ProcessPrinterText}; pub use crate::process_standard::ProcessStandard; -pub use crate::process_utilities::{load_processes, load_processes_advanced, ProcessLoadError}; +pub use crate::process_utilities::{ + get_mems, load_processes, load_processes_advanced, try_load_process_pub, ProcessLoadError, +}; /// Userspace process identifier. /// @@ -197,6 +200,10 @@ pub trait Process { /// kernel-internal errors. fn enqueue_task(&self, task: Task) -> Result<(), ErrorCode>; + /// Performs the checks for enqueue_task and returns errors in the same cases, + /// or otherwise does nothing and returns unit. + fn could_enqueue_task(&self) -> Result<(), ErrorCode>; + /// Returns whether this process is ready to execute. fn ready(&self) -> bool; @@ -249,7 +256,7 @@ pub trait Process { fn get_restart_count(&self) -> usize; /// Get the name of the process. Used for IPC. - fn get_process_name(&self) -> &'static str; + fn get_process_name(&self) -> &str; /// Get the completion code if the process has previously terminated. /// @@ -263,6 +270,13 @@ pub trait Process { /// this will return `Some(Some(completion_code))`. fn get_completion_code(&self) -> Option>; + /// Try and reclaim all grant memory. + /// This can fail because there still exist grant references within the kernel. + /// The eventual plan is to try restart after some interval (to allow hardware to finish + /// with references naturally). + /// For now, panic is fine if this fails. + fn try_release_grants(&self) -> Result<(), ()>; + /// Stop and clear a process's state, putting it into the `Terminated` /// state. /// @@ -317,7 +331,7 @@ pub trait Process { /// This will fail with an error if the process is no longer active. An /// inactive process will not run again without being reset, and changing /// the memory pointers is not valid at this point. - fn brk(&self, new_break: *const u8) -> Result<*const u8, Error>; + fn brk(&self, new_break: *const u8) -> Result; /// Change the location of the program break, reallocate the MPU region /// covering program memory, and return the previous break address. @@ -325,7 +339,7 @@ pub trait Process { /// This will fail with an error if the process is no longer active. An /// inactive process will not run again without being reset, and changing /// the memory pointers is not valid at this point. - fn sbrk(&self, increment: isize) -> Result<*const u8, Error>; + fn sbrk(&self, increment: isize) -> Result; /// How many writeable flash regions defined in the TBF header for this /// process. @@ -333,7 +347,7 @@ pub trait Process { /// Get the offset from the beginning of flash and the size of the defined /// writeable flash region. - fn get_writeable_flash_region(&self, region_index: usize) -> (u32, u32); + fn get_writeable_flash_region(&self, region_index: usize) -> (usize, usize); /// Debug function to update the kernel on where the stack starts for this /// process. Processes are not required to call this through the memop @@ -422,6 +436,9 @@ pub trait Process { /// the process will not run again). fn setup_mpu(&self); + /// Disable MMU configuration specific to this process. + fn disable_mmu(&self); + /// Allocate a new MPU region for the process that is at least /// `min_region_size` bytes and lies within the specified stretch of /// unallocated memory. @@ -435,12 +452,24 @@ pub trait Process { min_region_size: usize, ) -> Option; + /// Align a region so that the MPU could enforce it + /// FIXME: I hate that this is in process, but ProcessStandard seems to be the only thing + /// that knows about the type of the MPU. Really, Kernel should be parameterised in the + /// Chip, rather than its individual methods, so the type does not get lost. + /// b/280426926 + fn align_mpu_region(&self, base: usize, length: usize) -> (usize, usize); + /// Removes an MPU region from the process that has been previouly added with /// `add_mpu_region`. /// /// It is not valid to call this function when the process is inactive (i.e. /// the process will not run again). - fn remove_mpu_region(&self, region: mpu::Region) -> Result<(), ErrorCode>; + fn remove_mpu_region(&self, region: mpu::Region) -> Result; + + /// Actually revoke regions previously requested with remove_memory_region + /// Safety: no LiveARef or LivePRef may exist to any memory that might be revoked, + /// Nor may any grants be entered via the legacy mechanism if allowed memory might be revoked. + unsafe fn revoke_regions(&self) -> Result<(), ErrorCode>; // grants @@ -453,7 +482,7 @@ pub trait Process { /// actual app_brk, as MPU alignment and size constraints may result in the /// MPU enforced region differing from the app_brk. /// - /// This will return `false` and fail if: + /// This will return `None` and fail if: /// - The process is inactive, or /// - There is not enough available memory to do the allocation, or /// - The grant_num is invalid, or @@ -464,13 +493,7 @@ pub trait Process { driver_num: usize, size: usize, align: usize, - ) -> bool; - - /// Check if a given grant for this process has been allocated. - /// - /// Returns `None` if the process is not active. Otherwise, returns `true` - /// if the grant has been allocated, `false` otherwise. - fn grant_is_allocated(&self, grant_num: usize) -> Option; + ) -> Option>; /// Allocate memory from the grant region that is `size` bytes long and /// aligned to `align` bytes. This is used for creating custom grants which @@ -484,18 +507,16 @@ pub trait Process { &self, size: usize, align: usize, - ) -> Option<(ProcessCustomGrantIdentifer, NonNull)>; + ) -> Option<(ProcessCustomGrantIdentifier, NonNull)>; - /// Enter the grant based on `grant_num` for this process. - /// - /// Entering a grant means getting access to the actual memory for the - /// object stored as the grant. + /// Get the grant based on `grant_num` for this process, getting access + /// to the actual memory for the object stored as the grant. /// /// This will return an `Err` if the process is inactive of the `grant_num` - /// is invalid, if the grant has not been allocated, or if the grant is - /// already entered. If this returns `Ok()` then the pointer points to the - /// previously allocated memory for this grant. - fn enter_grant(&self, grant_num: usize) -> Result, Error>; + /// is invalid, if the grant has not been allocated. + /// If this returns `Ok()` then the pointer points to the + /// previously allocated memory for this grant, or NULL. + fn get_grant_mem(&self, grant_num: usize) -> Result>, Error>; /// Enter a custom grant based on the `identifier`. /// @@ -504,25 +525,10 @@ pub trait Process { /// /// This returns an error if the custom grant is no longer accessible, or /// if the process is inactive. - fn enter_custom_grant(&self, identifier: ProcessCustomGrantIdentifer) - -> Result<*mut u8, Error>; - - /// Opposite of `enter_grant()`. Used to signal that the grant is no longer - /// entered. - /// - /// If `grant_num` is valid, this function cannot fail. If `grant_num` is - /// invalid, this function will do nothing. If the process is inactive then - /// grants are invalid and are not entered or not entered, and this function - /// will do nothing. - /// - /// ### Safety - /// - /// The caller must ensure that no references to the memory inside the grant - /// exist after calling `leave_grant()`. Otherwise, it would be possible to - /// effectively enter the grant twice (once using the existing reference, - /// once with a new call to `enter_grant()`) which breaks the memory safety - /// requirements of grants. - unsafe fn leave_grant(&self, grant_num: usize); + fn enter_custom_grant( + &self, + identifier: ProcessCustomGrantIdentifier, + ) -> Result<*mut u8, Error>; /// Return the count of the number of allocated grant pointers if the /// process is active. This does not count custom grants. This is used @@ -543,10 +549,14 @@ pub trait Process { /// /// Returns `true` if the upcall function pointer is valid for this process, /// and `false` otherwise. - fn is_valid_upcall_function_pointer(&self, upcall_fn: NonNull<()>) -> bool; + fn is_valid_upcall_function_pointer(&self, upcall_fn: *const u8) -> bool; // functions for processes that are architecture specific + /// Get extra arguments for commands. The value returned is indeterminate if + /// this is not called in the context of a driver handling a command. + fn get_extra_syscall_arg(&self, ndx: usize) -> Option; + /// Set the return value the process should see when it begins executing /// again after the syscall. /// @@ -634,7 +644,7 @@ pub trait Process { /// The fields of this struct are private so only Process can create this /// identifier. #[derive(Copy, Clone)] -pub struct ProcessCustomGrantIdentifer { +pub struct ProcessCustomGrantIdentifier { pub(crate) offset: usize, } @@ -824,8 +834,8 @@ pub struct FunctionCall { pub argument0: usize, pub argument1: usize, pub argument2: usize, - pub argument3: usize, - pub pc: usize, + pub argument3: cptr, + pub pc: cptr, } /// Collection of process state information related to the memory addresses diff --git a/kernel/src/process_printer.rs b/kernel/src/process_printer.rs index 2a8950525..61178a0db 100644 --- a/kernel/src/process_printer.rs +++ b/kernel/src/process_printer.rs @@ -5,6 +5,7 @@ use core::fmt::Write; use crate::process::Process; use crate::utilities::binary_write::BinaryWrite; use crate::utilities::binary_write::WriteToBinaryOffsetWrapper; +use crate::very_simple_component; /// A context token that the caller must pass back to us. This allows us to /// track where we are in the print operation. @@ -63,11 +64,13 @@ pub trait ProcessPrinter { pub struct ProcessPrinterText {} impl ProcessPrinterText { - pub fn new() -> ProcessPrinterText { + pub const fn new() -> ProcessPrinterText { ProcessPrinterText {} } } +very_simple_component!(impl for ProcessPrinterText, new()); + impl ProcessPrinter for ProcessPrinterText { // `print_overview()` must be synchronous, but does not assume a synchronous // writer or an infinite (or very large) underlying buffer in the writer. To diff --git a/kernel/src/process_standard.rs b/kernel/src/process_standard.rs index fa76e2c68..844588796 100644 --- a/kernel/src/process_standard.rs +++ b/kernel/src/process_standard.rs @@ -9,24 +9,27 @@ use core::fmt::Write; use core::ptr::NonNull; use core::{mem, ptr, slice, str}; -use crate::collections::queue::Queue; -use crate::collections::ring_buffer::RingBuffer; -use crate::config; +use crate::cheri::{cptr, CPtrOps}; +use crate::collections::ring_buffer::StaticSizedRingBuffer; +use crate::config::CONFIG; use crate::debug; use crate::errorcode::ErrorCode; +use crate::grant::try_free_grant; use crate::kernel::Kernel; use crate::platform::chip::Chip; -use crate::platform::mpu::{self, MPU}; +use crate::platform::mpu::{self, RemoveRegionResult, MPU}; use crate::process::{Error, FunctionCall, FunctionCallSource, Process, State, Task}; -use crate::process::{FaultAction, ProcessCustomGrantIdentifer, ProcessId, ProcessStateCell}; +use crate::process::{FaultAction, ProcessCustomGrantIdentifier, ProcessId, ProcessStateCell}; use crate::process::{ProcessAddresses, ProcessSizes}; use crate::process_policies::ProcessFaultPolicy; +use crate::process_standard::MPURegionState::InUse; use crate::process_utilities::ProcessLoadError; use crate::processbuffer::{ReadOnlyProcessBuffer, ReadWriteProcessBuffer}; use crate::storage_permissions; use crate::syscall::{self, Syscall, SyscallReturn, UserspaceKernelBoundary}; use crate::upcall::UpcallId; use crate::utilities::cells::{MapCell, NumericCellExt, OptionalCell}; +use crate::{config, OnlyInCfg}; use tock_tbf::types::CommandPermissions; /// State for helping with debugging apps. @@ -91,6 +94,16 @@ struct GrantPointerEntry { grant_ptr: *mut u8, } +#[derive(Copy, Clone)] +enum MPURegionState { + // Region can be allocated + Free, + // Region in active use by the process + InUse(mpu::Region), + // Process should not be using the region, but this has not yet been configured + BeingRevoked(OnlyInCfg!(async_mpu_config, mpu::Region)), +} + /// A type for userspace processes in Tock. pub struct ProcessStandard<'a, C: 'static + Chip> { /// Identifier of this process and the index of the process in the process @@ -163,10 +176,12 @@ pub struct ProcessStandard<'a, C: 'static + Chip> { /// Process flash segment. This is the region of nonvolatile flash that /// the process occupies. - flash: &'static [u8], + /// Note, if dynamic process loading is supported, this is where the + /// process was loaded from, but may not still contain the TBF. + flash: *const [u8], /// Collection of pointers to the TBF header in flash. - header: tock_tbf::types::TbfHeader, + header: tock_tbf::types::TbfHeader<'static>, /// State saved on behalf of the process each time the app switches to the /// kernel. @@ -191,11 +206,11 @@ pub struct ProcessStandard<'a, C: 'static + Chip> { mpu_config: MapCell<<::MPU as MPU>::MpuConfig>, /// MPU regions are saved as a pointer-size pair. - mpu_regions: [Cell>; 6], + mpu_regions: [Cell; 6], /// Essentially a list of upcalls that want to call functions in the /// process. - tasks: MapCell>, + tasks: StaticSizedRingBuffer, /// Count of how many times this process has entered the fault condition and /// been restarted. This is used by some `ProcessRestartPolicy`s to @@ -212,7 +227,9 @@ pub struct ProcessStandard<'a, C: 'static + Chip> { /// be stored as `Some(completion code)`. completion_code: OptionalCell>, - /// Name of the app. + /// Name of the app. This may not really be static, but instead live for + /// the lifetime of this header. The bound on the accessor for this field + /// ensures it will not live too long. process_name: &'static str, /// Values kept so that we can print useful debug messages when apps fault. @@ -231,19 +248,7 @@ impl Process for ProcessStandard<'_, C> { return Err(ErrorCode::NODEVICE); } - let ret = self.tasks.map_or(Err(ErrorCode::FAIL), |tasks| { - match tasks.enqueue(task) { - true => { - // The task has been successfully enqueued. - Ok(()) - } - false => { - // The task could not be enqueued as there is - // insufficient space in the ring buffer. - Err(ErrorCode::NOMEM) - } - } - }); + let ret = self.tasks.enqueue(task).map_err(|_| ErrorCode::NOMEM); if ret.is_ok() { self.kernel.increment_work(); @@ -258,41 +263,47 @@ impl Process for ProcessStandard<'_, C> { ret } + fn could_enqueue_task(&self) -> Result<(), ErrorCode> { + if !self.is_active() { + return Err(ErrorCode::NODEVICE); + } + if self.tasks.is_full() { + Err(ErrorCode::NOMEM) + } else { + Ok(()) + } + } + fn ready(&self) -> bool { - self.tasks.map_or(false, |ring_buf| ring_buf.has_elements()) - || self.state.get() == State::Running + self.tasks.has_elements() || self.state.get() == State::Running } fn remove_pending_upcalls(&self, upcall_id: UpcallId) { - self.tasks.map(|tasks| { - let count_before = tasks.len(); - tasks.retain(|task| match task { + let count_before = self.tasks.len(); + // Safety: this match does not call any functions and does access self + unsafe { + self.tasks.retain(|task| match task { // Remove only tasks that are function calls with an id equal // to `upcall_id`. Task::FunctionCall(function_call) => match function_call.source { FunctionCallSource::Kernel => true, - FunctionCallSource::Driver(id) => { - if id != upcall_id { - true - } else { - self.kernel.decrement_work(); - false - } - } + FunctionCallSource::Driver(id) => id != upcall_id, }, _ => true, }); - if config::CONFIG.trace_syscalls { - let count_after = tasks.len(); - debug!( - "[{:?}] remove_pending_upcalls[{:#x}:{}] = {} upcall(s) removed", - self.processid(), - upcall_id.driver_num, - upcall_id.subscribe_num, - count_before - count_after, - ); - } - }); + } + let count_after = self.tasks.len(); + self.kernel.decrement_work_by(count_before - count_after); + + if config::CONFIG.trace_syscalls { + debug!( + "[{:?}] remove_pending_upcalls[{:#x}:{}] = {} upcall(s) removed", + self.processid(), + upcall_id.driver_num, + upcall_id.subscribe_num, + count_before - count_after, + ); + } } fn get_state(&self) -> State { @@ -348,6 +359,14 @@ impl Process for ProcessStandard<'_, C> { } fn try_restart(&self, completion_code: Option) { + match self.try_release_grants() { + Ok(_) => {} + Err(_) => { + // TODO: we could also do with a policy here for handling zombies + panic!("") + } + } + // Terminate the process, freeing its state and removing any // pending tasks from the scheduler's queue. self.terminate(completion_code); @@ -360,18 +379,23 @@ impl Process for ProcessStandard<'_, C> { // want to reclaim the process resources. } + /// Try to release all grant memory. If no capsule have been allowed the + /// HoldGrantReferencesCapability or HoldAllowReferencesCapability then this cannot fail. + /// If they are still holding references (for the purpose of DMA) then this process cannot + /// release its memory. + fn try_release_grants(&self) -> Result<(), ()> { + let _ = try_free_grant(self); + Ok(()) + } + fn terminate(&self, completion_code: Option) { // Remove the tasks that were scheduled for the app from the // amount of work queue. - let tasks_len = self.tasks.map_or(0, |tasks| tasks.len()); - for _ in 0..tasks_len { - self.kernel.decrement_work(); - } + let tasks_len = self.tasks.len(); + self.kernel.decrement_work_by(tasks_len); // And remove those tasks - self.tasks.map(|tasks| { - tasks.empty(); - }); + self.tasks.empty(); // Clear any grant regions this app has setup with any capsules. unsafe { @@ -390,20 +414,21 @@ impl Process for ProcessStandard<'_, C> { } fn has_tasks(&self) -> bool { - self.tasks.map_or(false, |tasks| tasks.has_elements()) + self.tasks.has_elements() } fn dequeue_task(&self) -> Option { - self.tasks.map_or(None, |tasks| { - tasks.dequeue().map(|cb| { - self.kernel.decrement_work(); - cb - }) - }) + let task = self.tasks.dequeue().ok(); + + if task.is_some() { + self.kernel.decrement_work() + } + + task } fn pending_tasks(&self) -> usize { - self.tasks.map_or(0, |tasks| tasks.len()) + self.tasks.len() as usize } fn get_command_permissions(&self, driver_num: usize, offset: usize) -> CommandPermissions { @@ -436,7 +461,7 @@ impl Process for ProcessStandard<'_, C> { self.header.number_writeable_flash_regions() } - fn get_writeable_flash_region(&self, region_index: usize) -> (u32, u32) { + fn get_writeable_flash_region(&self, region_index: usize) -> (usize, usize) { self.header.get_writeable_flash_region(region_index) } @@ -466,6 +491,14 @@ impl Process for ProcessStandard<'_, C> { }); } + fn disable_mmu(&self) { + self.mpu_config.map(|config| { + self.chip + .mpu() + .disable_app_mpu_config(&config, &self.processid()); + }); + } + fn add_mpu_region( &self, unallocated_memory_start: *const u8, @@ -473,6 +506,12 @@ impl Process for ProcessStandard<'_, C> { min_region_size: usize, ) -> Option { self.mpu_config.and_then(|mut config| { + // Fail early if we would not be able to store in process struct + let region = self.mpu_regions.iter().find(|region| match region.get() { + MPURegionState::Free => true, + _ => false, + })?; + let new_region = self.chip.mpu().allocate_region( unallocated_memory_start, unallocated_memory_size, @@ -481,45 +520,76 @@ impl Process for ProcessStandard<'_, C> { &mut config, ); - if new_region.is_none() { - return None; - } - - for region in self.mpu_regions.iter() { - if region.get().is_none() { - region.set(new_region); - return new_region; - } + if let Some(new_region) = new_region { + region.set(InUse(new_region)); } - // Not enough room in Process struct to store the MPU region. - None + new_region }) } - fn remove_mpu_region(&self, region: mpu::Region) -> Result<(), ErrorCode> { + fn align_mpu_region(&self, base: usize, length: usize) -> (usize, usize) { + C::MPU::align_range(base, length) + } + + fn remove_mpu_region(&self, region: mpu::Region) -> Result { self.mpu_config.map_or(Err(ErrorCode::INVAL), |mut config| { // Find the existing mpu region that we are removing; it needs to match exactly. - if let Some(internal_region) = self - .mpu_regions - .iter() - .find(|r| r.get().map_or(false, |r| r == region)) - { - self.chip + if let Some(internal_region) = self.mpu_regions.iter().find(|r| match r.get() { + InUse(r) => r == region, + _ => false, + }) { + let result = self + .chip .mpu() .remove_memory_region(region, &mut config) .or(Err(ErrorCode::FAIL))?; - // Remove this region from the tracking cache of mpu_regions - internal_region.set(None); - Ok(()) + match result { + RemoveRegionResult::Sync => { + // Remove this region from the tracking cache of mpu_regions + internal_region.set(MPURegionState::Free); + } + RemoveRegionResult::Async(_) => { + // Track as revocation in progress + internal_region.set(MPURegionState::BeingRevoked(::new_true( + region + ))) + } + } + + Ok(result) } else { Err(ErrorCode::INVAL) } }) } - fn sbrk(&self, increment: isize) -> Result<*const u8, Error> { + /// Actually revoke regions previously requested with remove_memory_region + /// Safety: no LiveARef or LivePRef may exist to any memory that might be revoked, + /// Nor may any grants be entered via the legacy mechanism if allowed memory might be revoked. + unsafe fn revoke_regions(&self) -> Result<(), ErrorCode> { + self.mpu_config.map_or(Err(ErrorCode::INVAL), |config| { + let result = unsafe { self.chip.mpu().revoke_regions(config, self) }; + + // On success, all being revoked regions will now be free + if result.is_ok() { + for r in &self.mpu_regions { + match r.get() { + MPURegionState::BeingRevoked(_) => r.set(MPURegionState::Free), + _ => {} + } + } + } + + result + }) + } + + fn sbrk(&self, increment: isize) -> Result { // Do not modify an inactive process. if !self.is_active() { return Err(Error::InactiveApp); @@ -529,7 +599,7 @@ impl Process for ProcessStandard<'_, C> { self.brk(new_break) } - fn brk(&self, new_break: *const u8) -> Result<*const u8, Error> { + fn brk(&self, new_break: *const u8) -> Result { // Do not modify an inactive process. if !self.is_active() { return Err(Error::InactiveApp); @@ -544,15 +614,43 @@ impl Process for ProcessStandard<'_, C> { } else if let Err(_) = self.chip.mpu().update_app_memory_region( new_break, self.kernel_memory_break.get(), - mpu::Permissions::ReadWriteOnly, + if CONFIG.contiguous_load_procs { + mpu::Permissions::ReadWriteExecute + } else { + mpu::Permissions::ReadWriteOnly + }, &mut config, ) { Err(Error::OutOfMemory) } else { let old_break = self.app_break.get(); + + // On CHERI, we need to zero anything accessible by the app + if crate::config::CONFIG.is_cheri { + unsafe { + // Safety: Given that we are about to include this in the application break, + // this cannot also be used by the kernel. It also won't have been previously + // allowed as allow would not allow something past the break. + core::ptr::write_bytes( + old_break as *mut u8, + 0, + (new_break as usize) - (new_break as usize), + ); + } + } + self.app_break.set(new_break); self.chip.mpu().configure_mpu(&config, &self.processid()); - Ok(old_break) + + let mut break_result = cptr::default(); + let base = self.mem_start() as usize; + break_result.set_addr_from_ddc_restricted( + old_break as usize, + base, + (new_break as usize) - base, + ); + + Ok(break_result) } }) } @@ -706,50 +804,31 @@ impl Process for ProcessStandard<'_, C> { } } - fn grant_is_allocated(&self, grant_num: usize) -> Option { - // Do not modify an inactive process. - if !self.is_active() { - return None; - } - - // Update the grant pointer to the address of the new allocation. - self.grant_pointers.map_or(None, |grant_pointers| { - // Implement `grant_pointers[grant_num]` without a chance of a - // panic. - grant_pointers - .get(grant_num) - .map_or(None, |grant_entry| Some(!grant_entry.grant_ptr.is_null())) - }) - } - fn allocate_grant( &self, grant_num: usize, driver_num: usize, size: usize, align: usize, - ) -> bool { + ) -> Option> { // Do not modify an inactive process. if !self.is_active() { - return false; + return None; } // Verify the grant_num is valid. if grant_num >= self.kernel.get_grant_count_and_finalize() { - return false; - } - - // Verify that the grant is not already allocated. If the pointer is not - // null then the grant is already allocated. - if let Some(is_allocated) = self.grant_is_allocated(grant_num) { - if is_allocated { - return false; - } + return None; } // Verify that there is not already a grant allocated with the same // driver_num. let exists = self.grant_pointers.map_or(false, |grant_pointers| { + // Verify that the grant is not already allocated. If the pointer is not + // null then the grant is already allocated. + grant_pointers.get(grant_num).map_or(true, |grant_entry| + !grant_entry.grant_ptr.is_null()) || + // Check our list of grant pointers if the driver number is used. grant_pointers.iter().any(|grant_entry| { // Check if the grant is both allocated (its grant pointer is @@ -757,33 +836,34 @@ impl Process for ProcessStandard<'_, C> { (!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num }) }); + // If we find a match, then the driver_num must already be used and the // grant allocation fails. if exists { - return false; + return None; } // Use the shared grant allocator function to actually allocate memory. // Returns `None` if the allocation cannot be created. if let Some(grant_ptr) = self.allocate_in_grant_region_internal(size, align) { // Update the grant pointer to the address of the new allocation. - self.grant_pointers.map_or(false, |grant_pointers| { + self.grant_pointers.map_or(None, |grant_pointers| { // Implement `grant_pointers[grant_num] = grant_ptr` without a // chance of a panic. grant_pointers .get_mut(grant_num) - .map_or(false, |grant_entry| { + .map_or(None, |grant_entry| { // Actually set the driver num and grant pointer. grant_entry.driver_num = driver_num; grant_entry.grant_ptr = grant_ptr.as_ptr() as *mut u8; - // If all of this worked, return true. - true + // If all of this worked, return the allocated pointer. + Some(grant_ptr) }) }) } else { // Could not allocate the memory for the grant region. - false + None } } @@ -791,7 +871,7 @@ impl Process for ProcessStandard<'_, C> { &self, size: usize, align: usize, - ) -> Option<(ProcessCustomGrantIdentifer, NonNull)> { + ) -> Option<(ProcessCustomGrantIdentifier, NonNull)> { // Do not modify an inactive process. if !self.is_active() { return None; @@ -811,7 +891,7 @@ impl Process for ProcessStandard<'_, C> { } } - fn enter_grant(&self, grant_num: usize) -> Result, Error> { + fn get_grant_mem(&self, grant_num: usize) -> Result>, Error> { // Do not try to access the grant region of inactive process. if !self.is_active() { return Err(Error::InactiveApp); @@ -828,23 +908,7 @@ impl Process for ProcessStandard<'_, C> { Some(grant_entry) => { // Get a copy of the actual grant pointer. let grant_ptr = grant_entry.grant_ptr; - - // Check if the grant pointer is marked that the grant - // has already been entered. If so, return an error. - if (grant_ptr as usize) & 0x1 == 0x1 { - // Lowest bit is one, meaning this grant has been - // entered. - Err(Error::AlreadyInUse) - } else { - // Now, to mark that the grant has been entered, we - // set the lowest bit to one and save this as the - // grant pointer. - grant_entry.grant_ptr = (grant_ptr as usize | 0x1) as *mut u8; - - // And we return the grant pointer to the entered - // grant. - Ok(unsafe { NonNull::new_unchecked(grant_ptr) }) - } + Ok(NonNull::new(grant_ptr)) } None => Err(Error::AddressOutOfBounds), } @@ -853,7 +917,7 @@ impl Process for ProcessStandard<'_, C> { fn enter_custom_grant( &self, - identifier: ProcessCustomGrantIdentifer, + identifier: ProcessCustomGrantIdentifier, ) -> Result<*mut u8, Error> { // Do not try to access the grant region of inactive process. if !self.is_active() { @@ -868,30 +932,6 @@ impl Process for ProcessStandard<'_, C> { Ok(custom_grant_address as *mut u8) } - unsafe fn leave_grant(&self, grant_num: usize) { - // Do not modify an inactive process. - if !self.is_active() { - return; - } - - self.grant_pointers.map(|grant_pointers| { - // Implement `grant_pointers[grant_num]` without a chance of a - // panic. - match grant_pointers.get_mut(grant_num) { - Some(grant_entry) => { - // Get a copy of the actual grant pointer. - let grant_ptr = grant_entry.grant_ptr; - - // Now, to mark that the grant has been released, we set the - // lowest bit back to zero and save this as the grant - // pointer. - grant_entry.grant_ptr = (grant_ptr as usize & !0x1) as *mut u8; - } - None => {} - } - }); - } - fn grant_allocated_count(&self) -> Option { // Do not modify an inactive process. if !self.is_active() { @@ -925,15 +965,14 @@ impl Process for ProcessStandard<'_, C> { }) } - fn is_valid_upcall_function_pointer(&self, upcall_fn: NonNull<()>) -> bool { - let ptr = upcall_fn.as_ptr() as *const u8; + fn is_valid_upcall_function_pointer(&self, upcall_fn: *const u8) -> bool { let size = mem::size_of::<*const u8>(); // It is ok if this function is in memory or flash. - self.in_app_flash_memory(ptr, size) || self.in_app_owned_memory(ptr, size) + self.in_app_flash_memory(upcall_fn, size) || self.in_app_owned_memory(upcall_fn, size) } - fn get_process_name(&self) -> &'static str { + fn get_process_name(&self) -> &str { self.process_name } @@ -941,6 +980,20 @@ impl Process for ProcessStandard<'_, C> { self.completion_code.extract() } + fn get_extra_syscall_arg(&self, ndx: usize) -> Option { + self.stored_state + .map(|stored_state| unsafe { + // SAFETY: these are the correct bounds for the app + self.chip.userspace_kernel_boundary().get_extra_syscall_arg( + ndx, + self.mem_start(), + self.app_break.get(), + stored_state, + ) + }) + .flatten() + } + fn set_syscall_return_value(&self, return_value: SyscallReturn) { match self.stored_state.map(|stored_state| unsafe { // Actually set the return value for a particular process. @@ -1117,7 +1170,7 @@ impl Process for ProcessStandard<'_, C> { ProcessSizes { grant_pointers: mem::size_of::() * self.kernel.get_grant_count_and_finalize(), - upcall_list: Self::CALLBACKS_OFFSET, + upcall_list: 0, process_control_block: Self::PROCESS_STRUCT_OFFSET, } } @@ -1225,25 +1278,43 @@ impl Process for ProcessStandard<'_, C> { } } -impl ProcessStandard<'_, C> { - // Memory offset for upcall ring buffer (10 element length). - const CALLBACK_LEN: usize = 10; - const CALLBACKS_OFFSET: usize = mem::size_of::() * Self::CALLBACK_LEN; +// Power two sizes are preferable +const CALLBACK_LEN: usize = 8; +impl ProcessStandard<'_, C> { // Memory offset to make room for this process's metadata. const PROCESS_STRUCT_OFFSET: usize = mem::size_of::>(); - pub(crate) unsafe fn create<'a>( + pub(crate) unsafe fn create<'a, 'b>( kernel: &'static Kernel, chip: &'static C, - app_flash: &'static [u8], + app_flash: &'b [u8], header_length: usize, app_version: u16, - remaining_memory: &'a mut [u8], + remaining_memory_in: &mut Option<&'a mut [u8]>, fault_policy: &'static dyn ProcessFaultPolicy, require_kernel_version: bool, index: usize, - ) -> Result<(Option<&'static dyn Process>, &'a mut [u8]), ProcessLoadError> { + flash_is_static: bool, + ) -> Result, ProcessLoadError> { + // Keeping part of the app in flash makes sense if such a memory type actually exists. + // However, if being loaded from disk it makes little sense. + // Further, splitting the app is problematic. RISCV does not have the compiler mode + // to consider global accesses relative to some base. + // This means that splitting the application in half makes it non-relocatable even though + // the generic code is PIC. + // CHERI also adds complexity. + // Even though DDC/PCC are separate capabilities, hybrid will make all accesses PC- + // relative, but authorise via DDC, requiring read-only data to be covered by DDC. + // Purecap CHERI needs the captable to be PC-relative, so it cannot go in flash as + // flash cannot contain tags. + // With contiguous_load = true, the kernel copies the entire program from flash into + // RAM. + let contiguous_load = crate::config::CONFIG.contiguous_load_procs; + + // If flash is not static, processes must be loaded completely into RAM. + assert!(contiguous_load || flash_is_static); + // Get a slice for just the app header. let header_flash = app_flash .get(0..header_length as usize) @@ -1251,8 +1322,7 @@ impl ProcessStandard<'_, C> { // Parse the full TBF header to see if this is a valid app. If the // header can't parse, we will error right here. - let tbf_header = tock_tbf::parse::parse_tbf_header(header_flash, app_version)?; - + let tbf_header = tock_tbf::parse::parse_tbf_header_non_static(header_flash, app_version)?; let process_name = tbf_header.get_package_name(); // If this isn't an app (i.e. it is padding) or it is an app but it @@ -1277,7 +1347,7 @@ impl ProcessStandard<'_, C> { } } // Return no process and the full memory slice we were given. - return Ok((None, remaining_memory)); + return Ok(None); } if let Some((major, minor)) = tbf_header.get_kernel_version() { @@ -1318,10 +1388,25 @@ impl ProcessStandard<'_, C> { } } + // Save copies of these in case the app was compiled for fixed addresses + // for later debugging. + let fixed_address_flash = tbf_header.get_fixed_address_flash(); + let fixed_address_ram = if contiguous_load { + // TODO: Fix elf2tab to not use magic sentinel value of 0x80000000 vaddr to infer + // PIC. This is incompatible with using a sensible vaddr for text in SRAM. + None + } else { + tbf_header.get_fixed_address_ram() + }; + // A contiguously loaded process indicates where it would like to start in allocated + // memory using the fixed address ram field (as it's not using it for anything else). + // This allows it to put some stack/bss first if need be. + let copied_ram_start = tbf_header.get_fixed_address_ram().unwrap_or(0) as usize; + // Check that the process is at the correct location in // flash if the TBF header specified a fixed address. If there is a // mismatch we catch that early. - if let Some(fixed_flash_start) = tbf_header.get_fixed_address_flash() { + if let Some(fixed_flash_start) = fixed_address_ram { // The flash address in the header is based on the app binary, // so we need to take into account the header length. let actual_address = app_flash.as_ptr() as u32 + tbf_header.get_protected_size(); @@ -1335,35 +1420,32 @@ impl ProcessStandard<'_, C> { } // Otherwise, actually load the app. - let process_ram_requested_size = tbf_header.get_minimum_app_ram_size() as usize; - let init_fn = app_flash - .as_ptr() - .offset(tbf_header.get_init_function_offset() as isize) as usize; - // Initialize MPU region configuration. let mut mpu_config: <::MPU as MPU>::MpuConfig = Default::default(); - // Allocate MPU region for flash. - if chip - .mpu() - .allocate_region( - app_flash.as_ptr(), - app_flash.len(), - app_flash.len(), - mpu::Permissions::ReadExecuteOnly, - &mut mpu_config, - ) - .is_none() - { - if config::CONFIG.debug_load_processes { - debug!( - "[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate MPU region for flash", - app_flash.as_ptr() as usize, - app_flash.as_ptr() as usize + app_flash.len() - 1, - process_name - ); + if !contiguous_load { + // Allocate MPU region for flash. + if chip + .mpu() + .allocate_region( + app_flash.as_ptr(), + app_flash.len(), + app_flash.len(), + mpu::Permissions::ReadExecuteOnly, + &mut mpu_config, + ) + .is_none() + { + if config::CONFIG.debug_load_processes { + debug!( + "[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate MPU region for flash", + app_flash.as_ptr() as usize, + app_flash.as_ptr() as usize + app_flash.len() - 1, + process_name + ); + } + return Err(ProcessLoadError::MpuInvalidFlashLength); } - return Err(ProcessLoadError::MpuInvalidFlashLength); } // Determine how much space we need in the application's memory space @@ -1375,11 +1457,20 @@ impl ProcessStandard<'_, C> { let grant_ptrs_num = kernel.get_grant_count_and_finalize(); let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size; + let space_for_name = if flash_is_static { + 0 + } else { + match tbf_header.get_package_name() { + None => 0, + Some(name) => name.len(), + } + }; + // Initial size of the kernel-owned part of process memory can be // calculated directly based on the initial size of all kernel-owned // data structures. let initial_kernel_memory_size = - grant_ptrs_offset + Self::CALLBACKS_OFFSET + Self::PROCESS_STRUCT_OFFSET; + grant_ptrs_offset + Self::PROCESS_STRUCT_OFFSET + space_for_name; // By default we start with the initial size of process-accessible // memory set to 0. This maximizes the flexibility that processes have @@ -1391,10 +1482,22 @@ impl ProcessStandard<'_, C> { // the context switching implementation and allocate at least that much // memory so that we can successfully switch to the process. This is // architecture and implementation specific, so we query that now. - let min_process_memory_size = chip + let mut min_process_memory_size = chip .userspace_kernel_boundary() .initial_process_app_brk_size(); + let mut process_ram_requested_size = tbf_header.get_minimum_app_ram_size() as usize; + + let flash_protected_size = tbf_header.get_protected_size() as usize; + let non_header_flash = &app_flash[tbf_header.get_protected_size() as usize..]; + + if contiguous_load { + // appbrk should cover the moved flash and the process ram increases by that much + // The requested size also did not include the stack + let extra_size = non_header_flash.len() + copied_ram_start; + min_process_memory_size += extra_size; + process_ram_requested_size += extra_size; + } // We have to ensure that we at least ask the MPU for // `min_process_memory_size` so that we can be sure that `app_brk` is // not set inside the kernel-owned memory region. Now, in practice, @@ -1408,14 +1511,17 @@ impl ProcessStandard<'_, C> { // Minimum memory size for the process. let min_total_memory_size = min_process_ram_size + initial_kernel_memory_size; + let remaining_memory = remaining_memory_in + .take() + .ok_or(ProcessLoadError::InternalError)?; + // Check if this process requires a fixed memory start address. If so, // try to adjust the memory region to work for this process. // // Right now, we only support skipping some RAM and leaving a chunk // unused so that the memory region starts where the process needs it // to. - let remaining_memory = if let Some(fixed_memory_start) = tbf_header.get_fixed_address_ram() - { + let remaining_memory = if let Some(fixed_memory_start) = fixed_address_ram { // The process does have a fixed address. if fixed_memory_start == remaining_memory.as_ptr() as u32 { // Address already matches. @@ -1444,6 +1550,7 @@ impl ProcessStandard<'_, C> { // Address is earlier in memory, nothing we can do. let actual_address = remaining_memory.as_ptr() as u32; let expected_address = fixed_memory_start; + *remaining_memory_in = Some(remaining_memory); return Err(ProcessLoadError::MemoryAddressMismatch { actual_address, expected_address, @@ -1461,7 +1568,14 @@ impl ProcessStandard<'_, C> { min_total_memory_size, min_process_memory_size, initial_kernel_memory_size, - mpu::Permissions::ReadWriteOnly, + if contiguous_load { + // TODO: For CHERI, this will still result in W^X. For non-CHERI we may wish to use + // two regions still. However, I am uninterested in fixing this until we have a + // target without CHERI using this loading mode. + mpu::Permissions::ReadWriteExecute + } else { + mpu::Permissions::ReadWriteOnly + }, &mut mpu_config, ) { Some((memory_start, memory_size)) => (memory_start, memory_size), @@ -1476,10 +1590,34 @@ impl ProcessStandard<'_, C> { min_total_memory_size ); } + *remaining_memory_in = Some(remaining_memory); return Err(ProcessLoadError::NotEnoughMemory); } }; + // For split processes, text is in flash. Otherwise it is in app memory. + let (fn_base, fn_len, init_addr) = { + if contiguous_load { + ( + app_memory_start as usize, + min_process_memory_size as usize + copied_ram_start, + // We have to subtract flash_protected_size as the entry includes the protected size + app_memory_start as usize + tbf_header.get_init_function_offset() as usize + - flash_protected_size + + copied_ram_start, + ) + } else { + ( + app_flash.as_ptr() as usize, + app_flash.len(), + app_flash.as_ptr() as usize + tbf_header.get_init_function_offset() as usize, + ) + } + }; + + let mut init_fn = cptr::default(); + init_fn.set_addr_from_pcc_restricted(init_addr, fn_base, fn_len); + // Get a slice for the memory dedicated to the process. This can fail if // the MPU returns a region of memory that is not inside of the // `remaining_memory` slice passed to `create()` to allocate the @@ -1489,18 +1627,38 @@ impl ProcessStandard<'_, C> { // process memory and a slice that will not be used by this process. let (app_memory_oversize, unused_memory) = remaining_memory.split_at_mut(memory_start_offset + app_memory_size); + + *remaining_memory_in = Some(unused_memory); + // Then since the process's memory need not start at the beginning of // the remaining slice given to create(), get a smaller slice as needed. let app_memory = app_memory_oversize .get_mut(memory_start_offset..) .ok_or(ProcessLoadError::InternalError)?; + // Copy flash into RAM for the process + if contiguous_load { + // On CHERI, we need to zero anything accessible by the app + if crate::config::CONFIG.is_cheri { + app_memory[0..copied_ram_start].fill(0); + } + let dst = &mut app_memory[copied_ram_start..copied_ram_start + non_header_flash.len()]; + + dst.copy_from_slice(non_header_flash); + + C::on_executable_memory_changed(NonNull::from(dst)); + + if crate::config::CONFIG.is_cheri { + app_memory[copied_ram_start + non_header_flash.len()..].fill(0); + } + } + // Check if the memory region is valid for the process. If a process // included a fixed address for the start of RAM in its TBF header (this // field is optional, processes that are position independent do not // need a fixed address) then we check that we used the same address // when we allocated it in RAM. - if let Some(fixed_memory_start) = tbf_header.get_fixed_address_ram() { + if let Some(fixed_memory_start) = fixed_address_ram { let actual_address = app_memory.as_ptr() as u32; let expected_address = fixed_memory_start; if actual_address != expected_address { @@ -1522,64 +1680,65 @@ impl ProcessStandard<'_, C> { // Set up initial grant region. let mut kernel_memory_break = app_memory.as_mut_ptr().add(app_memory.len()); + fn aligned_for(ptr: *mut u8) -> *mut T { + // Following pattern ensures correct alignment + #[allow(clippy::cast_ptr_alignment)] + { + ((ptr as usize) & !(mem::align_of::() - 1)) as *mut T + } + } + // Now that we know we have the space we can setup the grant // pointers. kernel_memory_break = kernel_memory_break.offset(-(grant_ptrs_offset as isize)); + let grant_ptr = aligned_for::(kernel_memory_break); + kernel_memory_break = grant_ptr as *mut u8; - // This is safe today, as MPU constraints ensure that `memory_start` - // will always be aligned on at least a word boundary, and that - // memory_size will be aligned on at least a word boundary, and - // `grant_ptrs_offset` is a multiple of the word size. Thus, - // `kernel_memory_break` must be word aligned. While this is unlikely to - // change, it should be more proactively enforced. - // // TODO: https://github.com/tock/tock/issues/1739 - #[allow(clippy::cast_ptr_alignment)] // Set all grant pointers to null. - let grant_pointers = slice::from_raw_parts_mut( - kernel_memory_break as *mut GrantPointerEntry, - grant_ptrs_num, - ); + let grant_pointers = slice::from_raw_parts_mut(grant_ptr, grant_ptrs_num); for grant_entry in grant_pointers.iter_mut() { grant_entry.driver_num = 0; grant_entry.grant_ptr = ptr::null_mut(); } - // Now that we know we have the space we can setup the memory for the - // upcalls. - kernel_memory_break = kernel_memory_break.offset(-(Self::CALLBACKS_OFFSET as isize)); - - // This is safe today, as MPU constraints ensure that `memory_start` - // will always be aligned on at least a word boundary, and that - // memory_size will be aligned on at least a word boundary, and - // `grant_ptrs_offset` is a multiple of the word size. Thus, - // `kernel_memory_break` must be word aligned. While this is unlikely to - // change, it should be more proactively enforced. - // - // TODO: https://github.com/tock/tock/issues/1739 - #[allow(clippy::cast_ptr_alignment)] - // Set up ring buffer for upcalls to the process. - let upcall_buf = - slice::from_raw_parts_mut(kernel_memory_break as *mut Task, Self::CALLBACK_LEN); - let tasks = RingBuffer::new(upcall_buf); - // Last thing in the kernel region of process RAM is the process struct. kernel_memory_break = kernel_memory_break.offset(-(Self::PROCESS_STRUCT_OFFSET as isize)); - let process_struct_memory_location = kernel_memory_break; + let process_struct_memory_location = + aligned_for::>(kernel_memory_break); + kernel_memory_break = process_struct_memory_location as *mut u8; + + // Unless we need a bit of extra space to store the name of the process if flash is not + // static. + let process_name: Option<&'static str> = if flash_is_static { + // Safety: caller has declared flash is static + core::mem::transmute(tbf_header.get_package_name()) + } else { + match tbf_header.get_package_name() { + None => None, + Some(name) => { + kernel_memory_break = kernel_memory_break.offset(-(space_for_name as isize)); + let buf: &'static mut [u8] = + slice::from_raw_parts_mut(kernel_memory_break as *mut u8, space_for_name); + buf.copy_from_slice(name.as_bytes()); + Some(core::str::from_utf8_unchecked(buf)) + } + } + }; + + // Convert to a static version + let tbf_header = tbf_header.into_static(process_name); + + // TODO: https://github.com/tock/tock/issues/1739 // Create the Process struct in the app grant region. - let mut process: &mut ProcessStandard = - &mut *(process_struct_memory_location as *mut ProcessStandard<'static, C>); + // FIXME: Unsound. This should use maybe uninit. b/312546068 + let mut process: &mut ProcessStandard = &mut *(process_struct_memory_location); // Ask the kernel for a unique identifier for this process that is being // created. let unique_identifier = kernel.create_process_identifier(); - // Save copies of these in case the app was compiled for fixed addresses - // for later debugging. - let fixed_address_flash = tbf_header.get_fixed_address_flash(); - let fixed_address_ram = tbf_header.get_fixed_address_ram(); - process .process_id .set(ProcessId::new(kernel, unique_identifier, index)); @@ -1604,14 +1763,14 @@ impl ProcessStandard<'_, C> { process.mpu_config = MapCell::new(mpu_config); process.mpu_regions = [ - Cell::new(None), - Cell::new(None), - Cell::new(None), - Cell::new(None), - Cell::new(None), - Cell::new(None), + Cell::new(MPURegionState::Free), + Cell::new(MPURegionState::Free), + Cell::new(MPURegionState::Free), + Cell::new(MPURegionState::Free), + Cell::new(MPURegionState::Free), + Cell::new(MPURegionState::Free), ]; - process.tasks = MapCell::new(tasks); + process.tasks = StaticSizedRingBuffer::new_uninit(); process.process_name = process_name.unwrap_or(""); process.debug = MapCell::new(ProcessStandardDebug { @@ -1626,19 +1785,20 @@ impl ProcessStandard<'_, C> { timeslice_expiration_count: 0, }); - let flash_protected_size = process.header.get_protected_size() as usize; - let flash_app_start_addr = app_flash.as_ptr() as usize + flash_protected_size; - - process.tasks.map(|tasks| { - tasks.enqueue(Task::FunctionCall(FunctionCall { - source: FunctionCallSource::Kernel, - pc: init_fn, - argument0: flash_app_start_addr, - argument1: process.memory_start as usize, - argument2: process.memory_len, - argument3: process.app_break.get() as usize, - })); - }); + let flash_app_start_addr = if contiguous_load { + app_memory_start as usize + copied_ram_start + } else { + app_flash.as_ptr() as usize + flash_protected_size + }; + + let _ = process.tasks.enqueue(Task::FunctionCall(FunctionCall { + source: FunctionCallSource::Kernel, + pc: init_fn, + argument0: flash_app_start_addr, + argument1: process.memory_start as usize, + argument2: process.memory_len, + argument3: (process.app_break.get() as usize).into(), + })); // Handle any architecture-specific requirements for a new process. // @@ -1672,7 +1832,7 @@ impl ProcessStandard<'_, C> { kernel.increment_work(); // Return the process object and a remaining memory for processes slice. - Ok((Some(process), unused_memory)) + Ok(Some(process)) } /// Restart the process, resetting all of its state and re-initializing it @@ -1690,6 +1850,9 @@ impl ProcessStandard<'_, C> { self.process_id .set(ProcessId::new(self.kernel, new_identifier, old_index)); + // TODO: b/266802576 + // TODO: none of this has been updated for contigous loading / CHERI / RefCell in grants + // Reset debug information that is per-execution and not per-process. self.debug.map(|debug| { debug.syscall_count = 0; @@ -1703,10 +1866,18 @@ impl ProcessStandard<'_, C> { // We are going to start this process over again, so need the init_fn // location. let app_flash_address = self.flash_start(); - let init_fn = unsafe { + let init_addr = unsafe { app_flash_address.offset(self.header.get_init_function_offset() as isize) as usize }; + + let mut init_fn = cptr::default(); + init_fn.set_addr_from_pcc_restricted( + init_addr, + self.flash.as_ptr() as usize, + self.flash.len(), + ); + // Reset MPU region configuration. // // TODO: ideally, this would be moved into a helper function used by @@ -1746,7 +1917,7 @@ impl ProcessStandard<'_, C> { let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size; let initial_kernel_memory_size = - grant_ptrs_offset + Self::CALLBACKS_OFFSET + Self::PROCESS_STRUCT_OFFSET; + grant_ptrs_offset + Self::PROCESS_STRUCT_OFFSET; let app_mpu_mem = self.chip.mpu().allocate_app_memory_region( self.mem_start(), @@ -1820,16 +1991,14 @@ impl ProcessStandard<'_, C> { self.restart_count.increment(); // Enqueue the initial function. - self.tasks.map(|tasks| { - tasks.enqueue(Task::FunctionCall(FunctionCall { - source: FunctionCallSource::Kernel, - pc: init_fn, - argument0: flash_app_start, - argument1: self.mem_start() as usize, - argument2: self.memory_len, - argument3: self.app_break.get() as usize, - })); - }); + let _ = self.tasks.enqueue(Task::FunctionCall(FunctionCall { + source: FunctionCallSource::Kernel, + pc: init_fn, + argument0: flash_app_start, + argument1: self.mem_start() as usize, + argument2: self.memory_len, + argument3: (self.app_break.get() as usize).into(), + })); // Mark that the process is ready to run. self.kernel.increment_work(); @@ -1843,6 +2012,10 @@ impl ProcessStandard<'_, C> { /// to be accessible to the process and to not overlap with the grant /// region. fn in_app_owned_memory(&self, buf_start_addr: *const u8, size: usize) -> bool { + // TODO: On CHERI platforms, it impossible to form syscalls with pointers + // that are not in app memory. However, buf_start_addr is not the right + // type to make this function always return true. If cptr makes it + // slightly further, we can skip this check. let buf_end_addr = buf_start_addr.wrapping_add(size); buf_end_addr >= buf_start_addr @@ -1855,6 +2028,10 @@ impl ProcessStandard<'_, C> { /// this method returns true, the buffer is guaranteed to be readable to the /// process. fn in_app_flash_memory(&self, buf_start_addr: *const u8, size: usize) -> bool { + // TODO: On CHERI platforms, it impossible to form syscalls with pointers + // that are not in app memory. However, buf_start_addr is not the right + // type to make this function always return true. If cptr makes it + // slightly further, we can skip this check. let buf_end_addr = buf_start_addr.wrapping_add(size); buf_end_addr >= buf_start_addr @@ -1910,7 +2087,11 @@ impl ProcessStandard<'_, C> { } else if let Err(_) = self.chip.mpu().update_app_memory_region( self.app_break.get(), new_break, - mpu::Permissions::ReadWriteOnly, + if CONFIG.contiguous_load_procs { + mpu::Permissions::ReadWriteExecute + } else { + mpu::Permissions::ReadWriteOnly + }, &mut config, ) { None @@ -1939,20 +2120,20 @@ impl ProcessStandard<'_, C> { /// /// We create this identifier by calculating the number of bytes between /// where the custom grant starts and the end of the process memory. - fn create_custom_grant_identifier(&self, ptr: NonNull) -> ProcessCustomGrantIdentifer { + fn create_custom_grant_identifier(&self, ptr: NonNull) -> ProcessCustomGrantIdentifier { let custom_grant_address = ptr.as_ptr() as usize; let process_memory_end = self.mem_end() as usize; - ProcessCustomGrantIdentifer { + ProcessCustomGrantIdentifier { offset: process_memory_end - custom_grant_address, } } - /// Use a ProcessCustomGrantIdentifer to find the address of the custom + /// Use a ProcessCustomGrantIdentifier to find the address of the custom /// grant. /// /// This reverses `create_custom_grant_identifier()`. - fn get_custom_grant_address(&self, identifier: ProcessCustomGrantIdentifer) -> usize { + fn get_custom_grant_address(&self, identifier: ProcessCustomGrantIdentifier) -> usize { let process_memory_end = self.mem_end() as usize; // Subtract the offset in the identifier from the end of the process diff --git a/kernel/src/process_utilities.rs b/kernel/src/process_utilities.rs index 7945ec1f3..61f478f03 100644 --- a/kernel/src/process_utilities.rs +++ b/kernel/src/process_utilities.rs @@ -5,12 +5,12 @@ use core::fmt; use crate::capabilities::ProcessManagementCapability; use crate::config; -use crate::debug; use crate::kernel::Kernel; use crate::platform::chip::Chip; -use crate::process::Process; +use crate::platform::mpu::MPU; use crate::process_policies::ProcessFaultPolicy; use crate::process_standard::ProcessStandard; +use crate::{debug, ErrorCode}; /// Errors that can occur when trying to load and create processes. pub enum ProcessLoadError { @@ -64,6 +64,21 @@ pub enum ProcessLoadError { InternalError, } +impl From for ErrorCode { + fn from(er: ProcessLoadError) -> Self { + match er { + ProcessLoadError::NotEnoughMemory => ErrorCode::NOMEM, + ProcessLoadError::IncorrectFlashAddress { .. } + | ProcessLoadError::MemoryAddressMismatch { .. } + | ProcessLoadError::MpuInvalidFlashLength + | ProcessLoadError::NotEnoughFlash + | ProcessLoadError::TbfHeaderParseFailure(_) => ErrorCode::INVAL, + ProcessLoadError::IncompatibleKernelVersion { .. } => ErrorCode::NOSUPPORT, + ProcessLoadError::InternalError => ErrorCode::FAIL, + } + } +} + impl From for ProcessLoadError { /// Convert between a TBF Header parse error and a process load error. /// @@ -148,7 +163,7 @@ impl fmt::Debug for ProcessLoadError { /// processes from slices of flash an memory is fundamentally unsafe. Therefore, /// we require the `ProcessManagementCapability` to call this function. /// -/// Returns `Ok(())` if process discovery went as expected. Returns a +/// Returns `Ok()` if process discovery went as expected. Returns any remaining app_memory. /// `ProcessLoadError` if something goes wrong during TBF parsing or process /// creation. #[inline(always)] @@ -156,12 +171,12 @@ pub fn load_processes_advanced( kernel: &'static Kernel, chip: &'static C, app_flash: &'static [u8], - app_memory: &mut [u8], // not static, so that process.rs cannot hold on to slice w/o unsafe - procs: &'static mut [Option<&'static dyn Process>], + // must be static so caller cannot retain this + app_memory: &'static mut [u8], fault_policy: &'static dyn ProcessFaultPolicy, require_kernel_version: bool, _capability: &dyn ProcessManagementCapability, -) -> Result<(), ProcessLoadError> { +) -> Result, (ProcessLoadError, Option<&'static mut [u8]>)> { if config::CONFIG.debug_load_processes { debug!( "Loading processes from flash={:#010X}-{:#010X} into sram={:#010X}-{:#010X}", @@ -173,88 +188,127 @@ pub fn load_processes_advanced( } let mut remaining_flash = app_flash; - let mut remaining_memory = app_memory; - - // Try to discover up to `procs.len()` processes in flash. - let mut index = 0; - while index < procs.len() { - // Get the first eight bytes of flash to check if there is another - // app. - let test_header_slice = match remaining_flash.get(0..8) { - Some(s) => s, - None => { - // Not enough flash to test for another app. This just means - // we are at the end of flash, and there are no more apps to - // load. - return Ok(()); - } - }; + let mut remaining_memory = Some(app_memory); - // Pass the first eight bytes to tbfheader to parse out the length of - // the tbf header and app. We then use those values to see if we have - // enough flash remaining to parse the remainder of the header. - let (version, header_length, entry_length) = match tock_tbf::parse::parse_tbf_header_lengths( - test_header_slice - .try_into() - .or(Err(ProcessLoadError::InternalError))?, - ) { - Ok((v, hl, el)) => (v, hl, el), - Err(tock_tbf::types::InitialTbfParseError::InvalidHeader(entry_length)) => { - // If we could not parse the header, then we want to skip over - // this app and look for the next one. - (0, 0, entry_length) - } - Err(tock_tbf::types::InitialTbfParseError::UnableToParse) => { - // Since Tock apps use a linked list, it is very possible the - // header we started to parse is intentionally invalid to signal - // the end of apps. This is ok and just means we have finished - // loading apps. - return Ok(()); - } - }; + // Keep trying to load processes until there are no more. + // We intentionally keep loading even if the process array is full so that we don't silently + // forget to load processes. + loop { + let flash_before = remaining_flash; + let loaded_any = try_load_process( + kernel, + chip, + fault_policy, + require_kernel_version, + &mut remaining_flash, + &mut remaining_memory, + true, + _capability, + ) + .map_err(|er| (er, remaining_memory.take()))?; + + // Nothing more to load + if !loaded_any && flash_before.as_ptr() == remaining_flash.as_ptr() { + return Ok(remaining_memory.take()); + } + } +} - // Now we can get a slice which only encompasses the length of flash - // described by this tbf header. We will either parse this as an actual - // app, or skip over this region. - let entry_flash = remaining_flash - .get(0..entry_length as usize) - .ok_or(ProcessLoadError::NotEnoughFlash)?; +/// Tries to load a single tbf located at remaining_flash into memory at remaining_memory. +/// Returns OK(true) if a process was loaded +/// Returns OK(false) if a process was not loaded but no fatal error was encountered +fn try_load_process<'a, 'b, C: Chip>( + kernel: &'static Kernel, + chip: &'static C, + fault_policy: &'static dyn ProcessFaultPolicy, + require_kernel_version: bool, + remaining_flash_in: &mut &'b [u8], + // NOT static so that process.rs cannot keep a reference (if ever its interface changes) + // This cannot be public because remaining_memory needs to be static to the caller. + remaining_memory: &mut Option<&'a mut [u8]>, + flash_is_static: bool, + _capability: &dyn ProcessManagementCapability, +) -> Result { + let remaining_flash = *remaining_flash_in; + // Get the first eight bytes of flash to check if there is another + // app. + let test_header_slice = match remaining_flash.get(0..8) { + Some(s) => s, + None => { + // Not enough flash to test for another app. This just means + // we are at the end of flash, and there are no more apps to + // load. + return Ok(false); + } + }; - // Advance the flash slice for process discovery beyond this last entry. - // This will be the start of where we look for a new process since Tock - // processes are allocated back-to-back in flash. - remaining_flash = remaining_flash - .get(entry_flash.len()..) - .ok_or(ProcessLoadError::NotEnoughFlash)?; + // Pass the first eight bytes to tbfheader to parse out the length of + // the tbf header and app. We then use those values to see if we have + // enough flash remaining to parse the remainder of the header. + let (version, header_length, entry_length) = match tock_tbf::parse::parse_tbf_header_lengths( + test_header_slice + .try_into() + .or(Err(ProcessLoadError::InternalError))?, + ) { + Ok((v, hl, el)) => (v, hl, el), + Err(tock_tbf::types::InitialTbfParseError::InvalidHeader(entry_length)) => { + // If we could not parse the header, then we want to skip over + // this app and look for the next one. + (0, 0, entry_length) + } + Err(tock_tbf::types::InitialTbfParseError::UnableToParse) => { + // Since Tock apps use a linked list, it is very possible the + // header we started to parse is intentionally invalid to signal + // the end of apps. This is ok and just means we have finished + // loading apps. + return Ok(false); + } + }; + + // Now we can get a slice which only encompasses the length of flash + // described by this tbf header. We will either parse this as an actual + // app, or skip over this region. + let entry_flash = remaining_flash + .get(0..entry_length as usize) + .ok_or(ProcessLoadError::NotEnoughFlash)?; + // Advance the flash slice for process discovery beyond this last entry. + // This will be the start of where we look for a new process since Tock + // processes are allocated back-to-back in flash. + *remaining_flash_in = remaining_flash + .get(entry_flash.len()..) + .ok_or(ProcessLoadError::NotEnoughFlash)?; + + if header_length > 0 { + let index = kernel.get_next_free_proc_entry()?; + // If we found an actual app header, try to create a `Process` + // object. We also need to shrink the amount of remaining memory + // based on whatever is assigned to the new process if one is + // created. + + // Try to create a process object from that app slice. If we don't + // get a process and we didn't get a loading error (aka we got to + // this point), then the app is a disabled process or just padding. + let process_option = unsafe { + ProcessStandard::create( + kernel, + chip, + entry_flash, + header_length as usize, + version, + remaining_memory, + fault_policy, + require_kernel_version, + index, + flash_is_static, + )? + }; // Need to reassign remaining_memory in every iteration so the compiler // knows it will not be re-borrowed. - remaining_memory = if header_length > 0 { - // If we found an actual app header, try to create a `Process` - // object. We also need to shrink the amount of remaining memory - // based on whatever is assigned to the new process if one is - // created. - - // Try to create a process object from that app slice. If we don't - // get a process and we didn't get a loading error (aka we got to - // this point), then the app is a disabled process or just padding. - let (process_option, unused_memory) = unsafe { - ProcessStandard::create( - kernel, - chip, - entry_flash, - header_length as usize, - version, - remaining_memory, - fault_policy, - require_kernel_version, - index, - )? - }; - process_option.map(|process| { - if config::CONFIG.debug_load_processes { - let addresses = process.get_addresses(); - debug!( + process_option.map(|process| { + if config::CONFIG.debug_load_processes { + let addresses = process.get_addresses(); + debug!( "Loaded process[{}] from flash={:#010X}-{:#010X} into sram={:#010X}-{:#010X} = {:?}", index, entry_flash.as_ptr() as usize, @@ -263,24 +317,40 @@ pub fn load_processes_advanced( addresses.sram_end - 1, process.get_process_name() ); - } - - // Save the reference to this process in the processes array. - procs[index] = Some(process); - // Can now increment index to use the next spot in the processes - // array. Padding apps mean we might detect valid headers but - // not actually insert a new process in the array. - index += 1; - }); - unused_memory - } else { - // We are just skipping over this region of flash, so we have the - // same amount of process memory to allocate from. - remaining_memory - }; + } + + let _ = kernel.set_next_proc_entry_used(process); + + chip.mpu().new_process(process.processid()); + }); + return Ok(true); } - Ok(()) + // We are just skipping over this region of flash, so we have the + // same amount of process memory to allocate from. + Ok(false) +} + +/// Public version of try_load_process that ensures remaining_memory is static. +pub fn try_load_process_pub<'b, C: Chip>( + kernel: &'static Kernel, + chip: &'static C, + fault_policy: &'static dyn ProcessFaultPolicy, + require_kernel_version: bool, + flash: &mut &'b [u8], + remaining_memory: &mut Option<&'static mut [u8]>, + _capability: &dyn ProcessManagementCapability, +) -> Result { + try_load_process( + kernel, + chip, + fault_policy, + require_kernel_version, + flash, + remaining_memory, + false, + _capability, + ) } /// This is a wrapper function for `load_processes_advanced` that uses @@ -293,19 +363,57 @@ pub fn load_processes( kernel: &'static Kernel, chip: &'static C, app_flash: &'static [u8], - app_memory: &mut [u8], // not static, so that process.rs cannot hold on to slice w/o unsafe - procs: &'static mut [Option<&'static dyn Process>], + // this must be static because the caller should not be able to simply borrow the buffer and then + // use it again afterwards. App memory will eventually come back to the kernel via the allow + // mechanism. + app_memory: &'static mut [u8], fault_policy: &'static dyn ProcessFaultPolicy, capability: &dyn ProcessManagementCapability, ) -> Result<(), ProcessLoadError> { - load_processes_advanced( + match load_processes_advanced( kernel, chip, app_flash, app_memory, - procs, fault_policy, true, capability, - ) + ) { + Ok(_) => Ok(()), + Err((er, _)) => Err(er), + } +} + +/// Return (flash, ram) +/// Must call this only once as the ram is mut. +pub unsafe fn get_mems() -> (&'static [u8], &'static mut [u8]) { + #[cfg(target_os = "none")] + { + // These symbols are defined in the linker script. + extern "C" { + /// Beginning of the ROM region containing app images. + static _sapps: u8; + /// End of the ROM region containing app images. + static _eapps: u8; + /// Beginning of the RAM region for app memory. + static mut _sappmem: u8; + /// End of the RAM region for app memory. + static _eappmem: u8; + } + ( + core::slice::from_raw_parts( + &_sapps as *const u8, + &_eapps as *const u8 as usize - &_sapps as *const u8 as usize, + ), + core::slice::from_raw_parts_mut( + &mut _sappmem as *mut u8, + &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, + ), + ) + } + + #[cfg(not(target_os = "none"))] + { + (&[], &mut []) + } } diff --git a/kernel/src/processbuffer.rs b/kernel/src/processbuffer.rs index f3b5a79c0..ba6311b99 100644 --- a/kernel/src/processbuffer.rs +++ b/kernel/src/processbuffer.rs @@ -22,12 +22,38 @@ use core::cell::Cell; use core::marker::PhantomData; -use core::ops::{Deref, Index, Range, RangeFrom, RangeTo}; +use core::ops::{Deref, DerefMut, Index, IndexMut, Range, RangeFrom, RangeTo}; +use core::ptr::NonNull; use crate::capabilities; use crate::process::{self, ProcessId}; use crate::ErrorCode; +/// Get a valid non-null from a range in process memory. +/// # Safety requirements +/// If len is non-zero, ptr must be non zero. +/// +pub unsafe fn raw_processbuf_to_nonnul(ptr: *const u8, len: usize) -> NonNull<[u8]> { + // Rust has very strict requirements on pointer validity[1] + // which also in part apply to accesses of length 0. We allow + // an application to supply arbitrary pointers if the buffer + // length is 0, but this is not allowed for Rust slices. For + // instance, a null pointer is _never_ valid, not even for + // accesses of size zero. + // + // To get a pointer which does not point to valid (allocated) + // memory, but is safe to construct for accesses of size zero, + // we must call NonNull::dangling(). The resulting pointer is + // guaranteed to be well-aligned and uphold the guarantees + // required for accesses of size zero. + // + // [1]: https://doc.rust-lang.org/core/ptr/index.html#safety + match len { + 0 => NonNull::slice_from_raw_parts(core::ptr::NonNull::::dangling(), 0), + _ => NonNull::slice_from_raw_parts(NonNull::new_unchecked(ptr as *mut u8), len), + } +} + /// Convert a process buffer's internal representation to a /// ReadableProcessSlice. /// @@ -46,7 +72,7 @@ use crate::ErrorCode; /// /// It is sound for multiple overlapping [`ReadableProcessSlice`]s or /// [`WriteableProcessSlice`]s to be in scope at the same time. -unsafe fn raw_processbuf_to_roprocessslice<'a>( +pub unsafe fn raw_processbuf_to_roprocessslice<'a>( ptr: *const u8, len: usize, ) -> &'a ReadableProcessSlice { @@ -58,24 +84,7 @@ unsafe fn raw_processbuf_to_roprocessslice<'a>( // around an [UnsafeCell], which finally #[repr(transparent)] // wraps a [u8] core::mem::transmute::<&[u8], &ReadableProcessSlice>( - // Rust has very strict requirements on pointer validity[1] - // which also in part apply to accesses of length 0. We allow - // an application to supply arbitrary pointers if the buffer - // length is 0, but this is not allowed for Rust slices. For - // instance, a null pointer is _never_ valid, not even for - // accesses of size zero. - // - // To get a pointer which does not point to valid (allocated) - // memory, but is safe to construct for accesses of size zero, - // we must call NonNull::dangling(). The resulting pointer is - // guaranteed to be well-aligned and uphold the guarantees - // required for accesses of size zero. - // - // [1]: https://doc.rust-lang.org/core/ptr/index.html#safety - match len { - 0 => core::slice::from_raw_parts(core::ptr::NonNull::::dangling().as_ptr(), 0), - _ => core::slice::from_raw_parts(ptr, len), - }, + raw_processbuf_to_nonnul(ptr, len).as_ref(), ) } @@ -106,7 +115,7 @@ unsafe fn raw_processbuf_to_roprocessslice<'a>( /// However, it is sound for multiple overlapping /// [`ReadableProcessSlice`]s or [`WriteableProcessSlice`]s to be in /// scope at the same time. -unsafe fn raw_processbuf_to_rwprocessslice<'a>( +pub unsafe fn raw_processbuf_to_rwprocessslice<'a>( ptr: *mut u8, len: usize, ) -> &'a WriteableProcessSlice { @@ -172,6 +181,11 @@ pub trait ReadableProcessBuffer { /// to address `0x0`. fn ptr(&self) -> *const u8; + /// Get the pointer as a raw slice ref + fn slice(&self) -> *const [u8] { + core::ptr::slice_from_raw_parts(self.ptr(), self.len()) + } + /// Applies a function to the (read only) process slice reference /// pointed to by the process buffer. /// @@ -251,6 +265,20 @@ impl ReadOnlyProcessBuffer { } } + /// Same as new() but takes an option of a process ID + /// If none, then the buffer is always invalid. + pub(crate) unsafe fn new_option( + ptr: *const u8, + len: usize, + process_id: Option, + ) -> Self { + ReadOnlyProcessBuffer { + ptr, + len, + process_id, + } + } + /// Construct a new [`ReadOnlyProcessBuffer`] over a given pointer /// and length. /// @@ -282,6 +310,9 @@ impl ReadOnlyProcessBuffer { /// `core::mem::align_of::()` on the respective platform. It /// must point to memory mapped as _readable_ and optionally /// _writable_ and _executable_. + /// On a CHERI platform there are additional requirements that + /// that the process had a valid capability for the span + /// indicated by the combination of ptr and len. pub unsafe fn new_external( ptr: *const u8, len: usize, @@ -299,8 +330,8 @@ impl ReadOnlyProcessBuffer { /// `consume` can be used when the kernel needs to pass the /// underlying values across the kernel-to-user boundary (e.g., in /// return values to system calls). - pub(crate) fn consume(self) -> (*const u8, usize) { - (self.ptr, self.len) + pub(crate) fn consume(self) -> (*const u8, usize, Option) { + (self.ptr, self.len, self.process_id) } } @@ -368,7 +399,7 @@ pub struct ReadOnlyProcessBufferRef<'a> { _phantom: PhantomData<&'a ()>, } -impl ReadOnlyProcessBufferRef<'_> { +impl<'a> ReadOnlyProcessBufferRef<'a> { /// Construct a new [`ReadOnlyProcessBufferRef`] over a given pointer and /// length with a lifetime derived from the caller. /// @@ -415,6 +446,31 @@ pub struct ReadWriteProcessBuffer { process_id: Option, } +/// Implement PartialEq for ease of comparing buffers +impl PartialEq for ReadableProcessByte { + fn eq(&self, other: &ReadableProcessByte) -> bool { + self.cell.get() == other.cell.get() + } +} + +impl PartialEq for ReadableProcessByte { + fn eq(&self, other: &u8) -> bool { + self.cell.get() == *other + } +} + +impl PartialEq for u8 { + fn eq(&self, other: &ReadableProcessByte) -> bool { + *self == other.cell.get() + } +} + +impl PartialEq> for ReadableProcessByte { + fn eq(&self, other: &Cell) -> bool { + self.cell.get() == other.get() + } +} + impl ReadWriteProcessBuffer { /// Construct a new [`ReadWriteProcessBuffer`] over a given /// pointer and length. @@ -431,6 +487,20 @@ impl ReadWriteProcessBuffer { } } + /// Same as new() but takes an option of a process ID + /// If none, then the buffer is always invalid. + pub(crate) unsafe fn new_option( + ptr: *mut u8, + len: usize, + process_id: Option, + ) -> Self { + ReadWriteProcessBuffer { + ptr, + len, + process_id, + } + } + /// Construct a new [`ReadWriteProcessBuffer`] over a given /// pointer and length. /// @@ -479,8 +549,8 @@ impl ReadWriteProcessBuffer { /// `consume` can be used when the kernel needs to pass the /// underlying values across the kernel-to-user boundary (e.g., in /// return values to system calls). - pub(crate) fn consume(self) -> (*mut u8, usize) { - (self.ptr, self.len) + pub(crate) fn consume(self) -> (*mut u8, usize, Option) { + (self.ptr, self.len, self.process_id) } /// This is a `const` version of `Default::default` with the same @@ -599,7 +669,7 @@ pub struct ReadWriteProcessBufferRef<'a> { _phantom: PhantomData<&'a ()>, } -impl ReadWriteProcessBufferRef<'_> { +impl<'a> ReadWriteProcessBufferRef<'a> { /// Construct a new [`ReadWriteProcessBufferRef`] over a given pointer and /// length with a lifetime derived from the caller. /// @@ -649,14 +719,22 @@ pub type UserspaceReadableProcessBuffer = ReadWriteProcessBuffer; /// This read-only wrapper around a [`Cell`] only exposes methods /// which are safe to call on a process-shared read-only `allow` /// memory. +/// +/// Because userspace might choose to arbitrarily mutate the contents there is another bound +/// required that every bit pattern be valid for the type. This is true for integers and +/// compositions of them. It is not true for enums. We might add a marker for this and a derive +/// macro, but for now we will just not give ways of constructing anything but integer versions #[repr(transparent)] -pub struct ReadableProcessByte { - cell: Cell, +pub struct ReadableProcessT { + cell: Cell, } -impl ReadableProcessByte { +pub type ReadableProcessByte = ReadableProcessT; +pub type ReadableProcessU32 = ReadableProcessT; + +impl ReadableProcessT { #[inline] - pub fn get(&self) -> u8 { + pub fn get(&self) -> T { self.cell.get() } } @@ -676,18 +754,52 @@ pub struct ReadableProcessSlice { slice: [ReadableProcessByte], } -fn cast_byte_slice_to_process_slice<'a>( +impl<'a> const Default for &'a ReadableProcessSlice { + fn default() -> Self { + let slice: &[ReadableProcessByte] = [].as_slice(); + slice.into() + } +} + +impl<'a> const From<&'a [ReadableProcessByte]> for &'a ReadableProcessSlice { + fn from(slice: &'a [ReadableProcessByte]) -> Self { + // As ReadableProcessSlice is a transparent wrapper around its inner type, + // [ReadableProcessByte], we can safely transmute a reference to the inner + // type as a reference to the outer type with the same lifetime. + unsafe { core::mem::transmute::<&[ReadableProcessByte], &ReadableProcessSlice>(slice) } + } +} + +impl<'a> const From<&'a mut [ReadableProcessByte]> for &'a mut ReadableProcessSlice { + fn from(slice: &'a mut [ReadableProcessByte]) -> Self { + // Same justification as non-mut version. + unsafe { + core::mem::transmute::<&mut [ReadableProcessByte], &mut ReadableProcessSlice>(slice) + } + } +} + +impl<'a> const From<&'a ReadableProcessSlice> for &'a [ReadableProcessByte] { + fn from(value: &'a ReadableProcessSlice) -> Self { + &value.slice + } +} + +impl PartialEq for ReadableProcessSlice { + fn eq(&self, other: &Self) -> bool { + self.slice == other.slice + } +} + +const fn cast_byte_slice_to_process_slice<'a>( byte_slice: &'a [ReadableProcessByte], ) -> &'a ReadableProcessSlice { - // As ReadableProcessSlice is a transparent wrapper around its inner type, - // [ReadableProcessByte], we can safely transmute a reference to the inner - // type as a reference to the outer type with the same lifetime. - unsafe { core::mem::transmute::<&[ReadableProcessByte], &ReadableProcessSlice>(byte_slice) } + byte_slice.into() } // Allow a u8 slice to be viewed as a ReadableProcessSlice to allow client code // to be authored once and accept either [u8] or ReadableProcessSlice. -impl<'a> From<&'a [u8]> for &'a ReadableProcessSlice { +impl<'a> const From<&'a [u8]> for &'a ReadableProcessSlice { fn from(val: &'a [u8]) -> Self { // # Safety // @@ -701,7 +813,7 @@ impl<'a> From<&'a [u8]> for &'a ReadableProcessSlice { // Allow a mutable u8 slice to be viewed as a ReadableProcessSlice to allow // client code to be authored once and accept either [u8] or // ReadableProcessSlice. -impl<'a> From<&'a mut [u8]> for &'a ReadableProcessSlice { +impl<'a> const From<&'a mut [u8]> for &'a ReadableProcessSlice { fn from(val: &'a mut [u8]) -> Self { // # Safety // @@ -713,6 +825,22 @@ impl<'a> From<&'a mut [u8]> for &'a ReadableProcessSlice { } impl ReadableProcessSlice { + /// Safe wrapper that aligns a ReadableProcessSlice + #[inline] + pub fn align_to_u32( + &self, + ) -> ( + &ReadableProcessSlice, + &[ReadableProcessU32], + &ReadableProcessSlice, + ) { + unsafe { + // Safety: all (aligned) groups of 4 ReadableProcessBytes are a ReadableProcessU32 + let (l, m, r) = self.slice.align_to(); + (l.into(), m, r.into()) + } + } + /// Copy the contents of a [`ReadableProcessSlice`] into a mutable /// slice reference. /// @@ -740,6 +868,20 @@ impl ReadableProcessSlice { } } + /// DO. NOT. CALL. THIS. + /// TODO: remove this. b/290209039 + /// This is technically unsound. It is currently only used by the dynamic process loader. + /// One solution might be to refactor the process loading logic to take a ReadableProcessSlice + /// as the conversion the other way is safe. + /// For now, this suffices. All the kernel will do with the slice is copy it for program + /// initialisation. It is doing so in a single syscall so the contents of the buffer actually + /// ARE stable (DMA aside). + /// This is being marked as safe only so I don't have to introduce a capability just to remove + /// it again when I have refactored. + pub fn to_byte_slice(&self) -> &[u8] { + unsafe { core::mem::transmute(self) } + } + /// Copy the contents of a [`ReadableProcessSlice`] into a mutable /// slice reference. /// @@ -766,7 +908,7 @@ impl ReadableProcessSlice { } } - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.slice.len() } @@ -808,46 +950,18 @@ impl ReadableProcessSlice { } } -impl Index> for ReadableProcessSlice { - // Subslicing will still yield a ReadableProcessSlice reference - type Output = Self; +impl<'a> IntoIterator for &'a ReadableProcessSlice { + type IntoIter = core::slice::Iter<'a, ReadableProcessByte>; + type Item = ::Item; - fn index(&self, idx: Range) -> &Self::Output { - cast_byte_slice_to_process_slice(&self.slice[idx]) + fn into_iter(self) -> Self::IntoIter { + self.iter() } } -impl Index> for ReadableProcessSlice { - // Subslicing will still yield a ReadableProcessSlice reference - type Output = Self; - - fn index(&self, idx: RangeTo) -> &Self::Output { - &self[0..idx.end] - } -} - -impl Index> for ReadableProcessSlice { - // Subslicing will still yield a ReadableProcessSlice reference - type Output = Self; - - fn index(&self, idx: RangeFrom) -> &Self::Output { - &self[idx.start..self.len()] - } -} - -impl Index for ReadableProcessSlice { - // Indexing into a ReadableProcessSlice must yield a - // ReadableProcessByte, to limit the API surface of the wrapped - // Cell to read-only operations - type Output = ReadableProcessByte; - - fn index(&self, idx: usize) -> &Self::Output { - // As ReadableProcessSlice is a transparent wrapper around its - // inner type, [ReadableProcessByte], we can use the regular - // slicing operator here with its usual semantics. - &self.slice[idx] - } -} +pub type WriteableProcessT = Cell; +pub type WriteableProcessByte = WriteableProcessT; +pub type WriteableProcessU32 = WriteableProcessT; /// Read-writeable and accessible slice of memory of a process buffer /// @@ -863,19 +977,39 @@ pub struct WriteableProcessSlice { slice: [Cell], } +impl<'a> const From<&'a [Cell]> for &'a WriteableProcessSlice { + fn from(cell_slice: &'a [Cell]) -> Self { + // # Safety + // + // As WriteableProcessSlice is a transparent wrapper around its inner type, + // [Cell], we can safely transmute a reference to the inner type as the + // outer type with the same lifetime. + unsafe { core::mem::transmute(cell_slice) } + } +} + +impl<'a> const From<&'a mut [Cell]> for &'a mut WriteableProcessSlice { + fn from(cell_slice: &'a mut [Cell]) -> Self { + // # Safety + // Same as non-mut version + unsafe { core::mem::transmute(cell_slice) } + } +} + +impl<'a> const From<&'a mut WriteableProcessSlice> for &'a mut [Cell] { + fn from(value: &'a mut WriteableProcessSlice) -> Self { + &mut value.slice + } +} + fn cast_cell_slice_to_process_slice<'a>(cell_slice: &'a [Cell]) -> &'a WriteableProcessSlice { - // # Safety - // - // As WriteableProcessSlice is a transparent wrapper around its inner type, - // [Cell], we can safely transmute a reference to the inner type as the - // outer type with the same lifetime. - unsafe { core::mem::transmute(cell_slice) } + cell_slice.into() } // Allow a mutable u8 slice to be viewed as a WritableProcessSlice to allow // client code to be authored once and accept either [u8] or // WriteableProcessSlice. -impl<'a> From<&'a mut [u8]> for &'a WriteableProcessSlice { +impl<'a> const From<&'a mut [u8]> for &'a WriteableProcessSlice { fn from(val: &'a mut [u8]) -> Self { // # Safety // @@ -887,6 +1021,22 @@ impl<'a> From<&'a mut [u8]> for &'a WriteableProcessSlice { } impl WriteableProcessSlice { + /// Safe wrapper that aligns a WriteableProcessSlice + #[inline] + pub fn align_to_u32( + &self, + ) -> ( + &WriteableProcessSlice, + &[WriteableProcessU32], + &WriteableProcessSlice, + ) { + unsafe { + // Safety: all (aligned) groups of 4 WriteableProcessBytes are a WriteableProcessU32 + let (l, m, r) = self.slice.align_to(); + (l.into(), m, r.into()) + } + } + /// Copy the contents of a [`WriteableProcessSlice`] into a mutable /// slice reference. /// @@ -1036,42 +1186,152 @@ impl WriteableProcessSlice { } } -impl Index> for WriteableProcessSlice { - // Subslicing will still yield a WriteableProcessSlice reference. - type Output = Self; +impl<'a> IntoIterator for &'a WriteableProcessSlice { + type IntoIter = core::slice::Iter<'a, Cell>; + type Item = ::Item; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +/// ReadableProcessSlice and WriteableProcessSlice are just wrappers around a slice, and have the +/// same sub-slicing semantics as the type they wrap. +/// To avoid so much boilerplate in indexing the process slices this macro will generates an +/// implementation of a trait. +/// Deref also exposes slices, and combined with Into it is questionable why we need these at all. +macro_rules! DeriveSliceIndexForHelper { + ($s : ident, $field : ident, $trait : ident, $trait_type : ty, $f : ident, $self_ref : ty, $out_ref : ty, $($output : ty)?) => { + impl $trait<$trait_type> for $s { + $(type Output = $output;)? + fn $f(self : $self_ref, idx : $trait_type) -> $out_ref { + self.$field.$f(idx).into() + } + } + }; +} + +/// Implement the normal suite of index operations. +macro_rules! DeriveSliceIndexFor { + ($s : ident, $field : ident, $t : ty) => { + DeriveSliceIndexForHelper!($s, $field, Index, usize, index, &Self, &$t, $t); + DeriveSliceIndexForHelper!($s, $field, Index, Range, index, &Self, &Self, Self); + DeriveSliceIndexForHelper!($s, $field, Index, RangeTo, index, &Self, &Self, Self); + DeriveSliceIndexForHelper!( + $s, + $field, + Index, + RangeFrom, + index, + &Self, + &Self, + Self + ); + // Although we don't currently have a way to obtain exclusive references to these slices, + // this makes some other traits happy. + DeriveSliceIndexForHelper!($s, $field, IndexMut, usize, index_mut, &mut Self, &mut $t,); + DeriveSliceIndexForHelper!( + $s, + $field, + IndexMut, + Range, + index_mut, + &mut Self, + &mut Self, + ); + DeriveSliceIndexForHelper!( + $s, + $field, + IndexMut, + RangeTo, + index_mut, + &mut Self, + &mut Self, + ); + DeriveSliceIndexForHelper!( + $s, + $field, + IndexMut, + RangeFrom, + index_mut, + &mut Self, + &mut Self, + ); + }; +} + +// Indexing into a WriteableProcessSlice yields a Cell, as mutating the memory contents is allowed. +DeriveSliceIndexFor!(WriteableProcessSlice, slice, Cell); +// Indexing into a ReadableProcessSlice must yield a ReadableProcessByte, to limit the API surface +// of the wrapped Cell to read-only operations. +DeriveSliceIndexFor!(ReadableProcessSlice, slice, ReadableProcessByte); + +// Impl Deref for the slices to expose the slice methods directly +impl Deref for ReadableProcessSlice { + type Target = [ReadableProcessByte]; + + fn deref(&self) -> &Self::Target { + &self.slice + } +} + +impl DerefMut for ReadableProcessSlice { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.slice + } +} + +impl Deref for WriteableProcessSlice { + type Target = [Cell]; - fn index(&self, idx: Range) -> &Self::Output { - cast_cell_slice_to_process_slice(&self.slice[idx]) + fn deref(&self) -> &Self::Target { + &self.slice + } +} + +impl DerefMut for WriteableProcessSlice { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.slice } } -impl Index> for WriteableProcessSlice { - // Subslicing will still yield a WriteableProcessSlice reference. - type Output = Self; +// Allow casting lifelessRefs to inner types. - fn index(&self, idx: RangeTo) -> &Self::Output { - &self[0..idx.end] +// Even though not exactly correct, we can cast ReadableProcessSlice to [u8] as lifeless refs are +// for hardware, which doesn't have the same requirements as rust on having mutable references being +// exclusive with immutable ones. +impl misc::divorce::LifelessCastEither<[u8]> for ReadableProcessSlice { + fn cast_either(value: NonNull) -> NonNull<[u8]> { + NonNull::slice_from_raw_parts(value.cast(), (value.as_ptr() as *mut [u8]).len()) + } + + fn cast_back_either(value: NonNull<[u8]>) -> NonNull { + // Safety: Cast maintains NonNull-ness + unsafe { NonNull::new_unchecked(value.as_ptr() as *mut ReadableProcessSlice) } } } -impl Index> for WriteableProcessSlice { - // Subslicing will still yield a WriteableProcessSlice reference. - type Output = Self; +// WriteableProcessSlice can just be unwrapped +impl misc::divorce::LifelessCastEither<[Cell]> for WriteableProcessSlice { + fn cast_either(value: NonNull) -> NonNull<[Cell]> { + NonNull::slice_from_raw_parts(value.cast(), (value.as_ptr() as *mut [Cell]).len()) + } - fn index(&self, idx: RangeFrom) -> &Self::Output { - &self[idx.start..self.len()] + fn cast_back_either(value: NonNull<[Cell]>) -> NonNull { + // Safety: Cast maintains NonNull-ness + unsafe { NonNull::new_unchecked(value.as_ptr() as *mut WriteableProcessSlice) } } } -impl Index for WriteableProcessSlice { - // Indexing into a WriteableProcessSlice yields a Cell, as - // mutating the memory contents is allowed. - type Output = Cell; +// WriteableProcessSlice can be unwrapped and drop the cell part +// See comment on ReadableProcessSlice +impl misc::divorce::LifelessCastEither<[u8]> for WriteableProcessSlice { + fn cast_either(value: NonNull) -> NonNull<[u8]> { + NonNull::slice_from_raw_parts(value.cast(), (value.as_ptr() as *mut [u8]).len()) + } - fn index(&self, idx: usize) -> &Self::Output { - // As WriteableProcessSlice is a transparent wrapper around - // its inner type, [Cell], we can use the regular slicing - // operator here with its usual semantics. - &self.slice[idx] + fn cast_back_either(value: NonNull<[u8]>) -> NonNull { + // Safety: Cast maintains NonNull-ness + unsafe { NonNull::new_unchecked(value.as_ptr() as *mut WriteableProcessSlice) } } } diff --git a/kernel/src/scheduler/cooperative.rs b/kernel/src/scheduler/cooperative.rs index 873398143..9c02d2702 100644 --- a/kernel/src/scheduler/cooperative.rs +++ b/kernel/src/scheduler/cooperative.rs @@ -16,15 +16,16 @@ use crate::kernel::{Kernel, StoppedExecutingReason}; use crate::platform::chip::Chip; use crate::process::Process; use crate::scheduler::{Scheduler, SchedulingDecision}; +use core::cell::Cell; /// A node in the linked list the scheduler uses to track processes pub struct CoopProcessNode<'a> { - proc: &'static Option<&'static dyn Process>, + proc: &'static Cell>, next: ListLink<'a, CoopProcessNode<'a>>, } impl<'a> CoopProcessNode<'a> { - pub fn new(proc: &'static Option<&'static dyn Process>) -> CoopProcessNode<'a> { + pub fn new(proc: &'static Cell>) -> CoopProcessNode<'a> { CoopProcessNode { proc, next: ListLink::empty(), @@ -33,7 +34,7 @@ impl<'a> CoopProcessNode<'a> { } impl<'a> ListNode<'a, CoopProcessNode<'a>> for CoopProcessNode<'a> { - fn next(&'a self) -> &'a ListLink<'a, CoopProcessNode> { + fn next(&self) -> &ListLink<'a, CoopProcessNode<'a>> { &self.next } } @@ -63,7 +64,7 @@ impl<'a, C: Chip> Scheduler for CooperativeSched<'a> { // Find next ready process. Place any *empty* process slots, or not-ready // processes, at the back of the queue. for node in self.processes.iter() { - match node.proc { + match node.proc.get() { Some(proc) => { if proc.ready() { next = Some(proc.processid()); diff --git a/kernel/src/scheduler/mlfq.rs b/kernel/src/scheduler/mlfq.rs index 59f0f7f5a..f8cfe8b5e 100644 --- a/kernel/src/scheduler/mlfq.rs +++ b/kernel/src/scheduler/mlfq.rs @@ -35,13 +35,13 @@ struct MfProcState { /// Nodes store per-process state pub struct MLFQProcessNode<'a> { - proc: &'static Option<&'static dyn Process>, + proc: &'static Cell>, state: MfProcState, next: ListLink<'a, MLFQProcessNode<'a>>, } impl<'a> MLFQProcessNode<'a> { - pub fn new(proc: &'static Option<&'static dyn Process>) -> MLFQProcessNode<'a> { + pub fn new(proc: &'static Cell>) -> MLFQProcessNode<'a> { MLFQProcessNode { proc, state: MfProcState::default(), @@ -51,7 +51,7 @@ impl<'a> MLFQProcessNode<'a> { } impl<'a> ListNode<'a, MLFQProcessNode<'a>> for MLFQProcessNode<'a> { - fn next(&'a self) -> &'static ListLink<'a, MLFQProcessNode<'a>> { + fn next(&self) -> &ListLink<'a, MLFQProcessNode<'a>> { &self.next } } @@ -111,7 +111,7 @@ impl<'a, A: 'static + time::Alarm<'static>> MLFQSched<'a, A> { for (idx, queue) in self.processes.iter().enumerate() { let next = queue .iter() - .find(|node_ref| node_ref.proc.map_or(false, |proc| proc.ready())); + .find(|node_ref| node_ref.proc.get().map_or(false, |proc| proc.ready())); if next.is_some() { // pop procs to back until we get to match loop { @@ -159,7 +159,7 @@ impl<'a, A: 'static + time::Alarm<'static>, C: Chip> Scheduler for MLFQSched< let node_ref = node_ref_opt.unwrap(); // Panic if fail bc processes_blocked()! let timeslice = self.get_timeslice_us(queue_idx) - node_ref.state.us_used_this_queue.get(); - let next = node_ref.proc.unwrap().processid(); // Panic if fail bc processes_blocked()! + let next = node_ref.proc.get().unwrap().processid(); // Panic if fail bc processes_blocked()! self.last_queue_idx.set(queue_idx); self.last_timeslice.set(timeslice); diff --git a/kernel/src/scheduler/round_robin.rs b/kernel/src/scheduler/round_robin.rs index 6d17ee4bf..aadee217e 100644 --- a/kernel/src/scheduler/round_robin.rs +++ b/kernel/src/scheduler/round_robin.rs @@ -25,21 +25,29 @@ use crate::scheduler::{Scheduler, SchedulingDecision}; /// A node in the linked list the scheduler uses to track processes /// Each node holds a pointer to a slot in the processes array pub struct RoundRobinProcessNode<'a> { - proc: &'static Option<&'static dyn Process>, + proc: &'static Cell>, next: ListLink<'a, RoundRobinProcessNode<'a>>, } impl<'a> RoundRobinProcessNode<'a> { - pub fn new(proc: &'static Option<&'static dyn Process>) -> RoundRobinProcessNode<'a> { + pub const fn new( + proc: &'static Cell>, + ) -> RoundRobinProcessNode<'a> { RoundRobinProcessNode { proc, next: ListLink::empty(), } } + pub const fn new_with_next( + proc: &'static Cell>, + next: ListLink<'a, RoundRobinProcessNode<'a>>, + ) -> RoundRobinProcessNode<'a> { + RoundRobinProcessNode { proc, next } + } } impl<'a> ListNode<'a, RoundRobinProcessNode<'a>> for RoundRobinProcessNode<'a> { - fn next(&'a self) -> &'a ListLink<'a, RoundRobinProcessNode> { + fn next(&self) -> &ListLink<'a, RoundRobinProcessNode<'a>> { &self.next } } @@ -54,13 +62,18 @@ pub struct RoundRobinSched<'a> { impl<'a> RoundRobinSched<'a> { /// How long a process can run before being pre-empted const DEFAULT_TIMESLICE_US: u32 = 10000; - pub const fn new() -> RoundRobinSched<'a> { + pub const fn new_with_head( + head: ListLink<'a, RoundRobinProcessNode<'a>>, + ) -> RoundRobinSched<'a> { RoundRobinSched { time_remaining: Cell::new(Self::DEFAULT_TIMESLICE_US), - processes: List::new(), + processes: List::new_with_head(head), last_rescheduled: Cell::new(false), } } + pub const fn new() -> RoundRobinSched<'a> { + Self::new_with_head(ListLink::empty()) + } } impl<'a, C: Chip> Scheduler for RoundRobinSched<'a> { @@ -75,7 +88,7 @@ impl<'a, C: Chip> Scheduler for RoundRobinSched<'a> { // Find next ready process. Place any *empty* process slots, or not-ready // processes, at the back of the queue. for node in self.processes.iter() { - match node.proc { + match node.proc.get() { Some(proc) => { if proc.ready() { next = Some(proc.processid()); diff --git a/kernel/src/syscall.rs b/kernel/src/syscall.rs index ccd33ff7a..1fcfda844 100644 --- a/kernel/src/syscall.rs +++ b/kernel/src/syscall.rs @@ -3,10 +3,11 @@ use core::convert::TryFrom; use core::fmt::Write; +use crate::cheri::{cheri_perms, cptr, CPtrOps}; use crate::errorcode::ErrorCode; use crate::process; -pub use crate::syscall_driver::{CommandReturn, SyscallDriver}; +pub use crate::syscall_driver::{CommandReturn, CommandReturnResult, SyscallDriver}; /// Helper function to split a u64 into a higher and lower u32. /// @@ -84,8 +85,10 @@ pub enum Syscall { Subscribe { driver_number: usize, subdriver_number: usize, - upcall_ptr: *mut (), - appdata: usize, + // On a CHERI platform we need to maintain the full width as these are passed back + // to userspace + upcall_ptr: cptr, + appdata: cptr, }, /// Structure representing an invocation of the Command system call class. @@ -157,52 +160,52 @@ impl Syscall { pub fn from_register_arguments( syscall_number: u8, r0: usize, - r1: usize, - r2: usize, - r3: usize, + r1: cptr, + r2: cptr, + r3: cptr, ) -> Option { match SyscallClass::try_from(syscall_number) { Ok(SyscallClass::Yield) => Some(Syscall::Yield { - which: r0, - address: r1 as *mut u8, + which: r0.into(), + address: r1.as_ptr_checked(1, cheri_perms::STORE) as *mut u8, }), Ok(SyscallClass::Subscribe) => Some(Syscall::Subscribe { driver_number: r0, - subdriver_number: r1, - upcall_ptr: r2 as *mut (), + subdriver_number: r1.into(), + upcall_ptr: r2, appdata: r3, }), Ok(SyscallClass::Command) => Some(Syscall::Command { driver_number: r0, - subdriver_number: r1, - arg0: r2, - arg1: r3, + subdriver_number: r1.into(), + arg0: r2.into(), + arg1: r3.into(), }), Ok(SyscallClass::ReadWriteAllow) => Some(Syscall::ReadWriteAllow { driver_number: r0, - subdriver_number: r1, - allow_address: r2 as *mut u8, - allow_size: r3, + subdriver_number: r1.into(), + allow_address: r2.as_ptr_checked(r3.into(), cheri_perms::DEFAULT_RW) as *mut u8, + allow_size: r3.into(), }), Ok(SyscallClass::UserspaceReadableAllow) => Some(Syscall::UserspaceReadableAllow { driver_number: r0, - subdriver_number: r1, - allow_address: r2 as *mut u8, - allow_size: r3, + subdriver_number: r1.into(), + allow_address: r2.as_ptr_checked(r3.into(), cheri_perms::DEFAULT_R) as *mut u8, + allow_size: r3.into(), }), Ok(SyscallClass::ReadOnlyAllow) => Some(Syscall::ReadOnlyAllow { driver_number: r0, - subdriver_number: r1, - allow_address: r2 as *const u8, - allow_size: r3, + subdriver_number: r1.into(), + allow_address: r2.as_ptr_checked(r3.into(), cheri_perms::DEFAULT_R) as *mut u8, + allow_size: r3.into(), }), Ok(SyscallClass::Memop) => Some(Syscall::Memop { operand: r0, - arg0: r1, + arg0: r1.into(), }), Ok(SyscallClass::Exit) => Some(Syscall::Exit { which: r0, - completion_code: r1, + completion_code: r1.into(), }), Err(_) => None, } @@ -271,6 +274,9 @@ pub enum SyscallReturn { /// data field SuccessU32U64(u32, u64), + /// Generic success case, with an additional (possibly capability-width) pointer + /// On CHERI, this grants authority. Access to this return is therefore privileged + SuccessPtr(cptr), // These following types are used by the scheduler so that it can // return values to userspace in an architecture (pointer-width) // independent way. The kernel passes these types (rather than @@ -281,6 +287,14 @@ pub enum SyscallReturn { // (pointers out of valid memory), the kernel cannot construct an // ProcessBuffer or Upcall type but needs to be able to return a // failure. -pal 11/24/20 + // FIXME: We need to think about what these look like on CHERI + // Really, things that were capabilities should come back as capabilities. + // However, we discarded all capability information at the syscall boundary. + // We could always use our own DDC, with just the permissions and length implied by the + // specific syscall. This would certainly got give userspace _extra_ authority, + // but might rob them of some bounds / permissions. This is what is implemented currently. + // Preferable behavior is not to discard the capability so early (it should make it as far + // as grant is stored in grant allow slots) /// Read/Write allow success case, returns the previous allowed /// buffer and size to the process. AllowReadWriteSuccess(*mut u8, usize), @@ -321,6 +335,18 @@ impl SyscallReturn { res.into_inner() } + /// Construct either SuccessU32 or SuccessU64 depending on platform. + #[allow(non_snake_case)] + pub(crate) fn SuccessUSize(val: usize) -> Self { + if core::mem::size_of::() == 8 { + SyscallReturn::SuccessU64(val as u64) + } else if core::mem::size_of::() == 4 { + SyscallReturn::SuccessU32(val as u32) + } else { + panic!(); + } + } + /// Returns true if the `SyscallReturn` is any success type. pub(crate) fn is_success(&self) -> bool { match self { @@ -330,6 +356,7 @@ impl SyscallReturn { SyscallReturn::SuccessU32U32U32(_, _, _) => true, SyscallReturn::SuccessU64(_) => true, SyscallReturn::SuccessU32U64(_, _) => true, + SyscallReturn::SuccessPtr(_) => true, SyscallReturn::AllowReadWriteSuccess(_, _) => true, SyscallReturn::UserspaceReadableAllowSuccess(_, _) => true, SyscallReturn::AllowReadOnlySuccess(_, _) => true, @@ -347,107 +374,171 @@ impl SyscallReturn { /// Encode the system call return value into 4 registers, following /// the encoding specified in TRD104. Architectures which do not follow - /// TRD104 are free to define their own encoding. + /// TRD104 are free to define their own encoding.> + /// TODO: deprecate in favour of the more general one pub fn encode_syscall_return(&self, a0: &mut u32, a1: &mut u32, a2: &mut u32, a3: &mut u32) { + if core::mem::size_of::() == core::mem::size_of::() { + // SAFETY: if the two unsized integers are the same size references to them + // can be safely transmuted. + // Ugly coercion could be avoided by first copying to the stack, then assigning with + // "as" in order to satisfy the compiler. But I expect this function will disappear + // in favour of just using the usize one. + unsafe { + let (a0, a1, a2, a3) = core::mem::transmute((a0, a1, a2, a3)); + self.encode_syscall_return_cptr(a0, a1, a2, a3); + } + } else { + panic!("encode_syscall_return used on a 64-bit platform or CHERI platform") + } + } + + /// The obvious extension of TRD104 that works for 32-bit and 64-bit platforms. + /// This makes no changes to TRD104 on 32-bit platforms. + /// On 64-bit platforms, both 64-bit and usize values are passed as a single register, + /// shifting down register number if that means fewer registers are needed needed. + /// For usize, this is the obvious choice. + /// For explicitly 64-bit arguments, this would require rewriting prototypes for userspace + /// functions between 32 and 64 bit platforms. + /// However, no driver currently USES any 64-bit values. + /// Any new ones on 64-bit platforms would likely prefer just passing the value. + /// If they would not, I suspect many really want usize anyway (and that is what is used for + /// the syscalls handled directly by the kernel). Maybe they should be written in terms of that, + /// and some helpful aliases created for FailureUSIZE etc. + /// I think packing two 32-bit values into 64-bits would gain nothing and pollute a whole lot + /// of user code. + /// I have not considered usize other than 4 and 8 bytes. + /// Also handles the CHERI extension as follows: + /// the high part of any cptr register is zero'd if any non capability-sized arguments are + /// passed. + /// SuccessPtr is as passed the full cptr register. + /// Pointers from allow'd buffers have minimal bounds attached that cover their length, + /// and the same permissions that were checked at the syscall boundary. + pub fn encode_syscall_return_cptr( + &self, + a0: &mut cptr, + a1: &mut cptr, + a2: &mut cptr, + a3: &mut cptr, + ) { + // On 32-bit CHERI, given that capabilities cannot be used as 64-bit integers, 64-bit + // integers will still be returned as two 32-bit values in different registers. + fn write_64(a: &mut cptr, b: &mut cptr, val: u64) { + let is_64_bit = core::mem::size_of::() == 8; + if !is_64_bit { + let (msb, lsb) = u64_to_be_u32s(val); + *a = (lsb as usize).into(); + *b = (msb as usize).into(); + } else { + *a = (val as usize).into(); + } + } + + // Given that the user initially provided a capability of a size that covered length, and + // with all the appropriate permissions, it is safe to give them that much back. + // Given that the original existed, it is okay not to use the exact set bounds + fn rederive(addr: usize, len: usize, perms: usize) -> cptr { + let mut result = cptr::default(); + result.set_addr_from_ddc(addr); + result.set_bounds(len); + result.and_perms(perms); + result + } + match self { &SyscallReturn::Failure(e) => { - *a0 = SyscallReturnVariant::Failure as u32; - *a1 = usize::from(e) as u32; + *a0 = (SyscallReturnVariant::Failure as usize).into(); + *a1 = (usize::from(e)).into(); } &SyscallReturn::FailureU32(e, data0) => { - *a0 = SyscallReturnVariant::FailureU32 as u32; - *a1 = usize::from(e) as u32; - *a2 = data0; + *a0 = (SyscallReturnVariant::FailureU32 as usize).into(); + *a1 = usize::from(e).into(); + *a2 = (data0 as usize).into(); } &SyscallReturn::FailureU32U32(e, data0, data1) => { - *a0 = SyscallReturnVariant::FailureU32U32 as u32; - *a1 = usize::from(e) as u32; - *a2 = data0; - *a3 = data1; + *a0 = (SyscallReturnVariant::FailureU32U32 as usize).into(); + *a1 = (usize::from(e)).into(); + *a2 = (data0 as usize).into(); + *a3 = (data1 as usize).into(); } &SyscallReturn::FailureU64(e, data0) => { - let (data0_msb, data0_lsb) = u64_to_be_u32s(data0); - *a0 = SyscallReturnVariant::FailureU64 as u32; - *a1 = usize::from(e) as u32; - *a2 = data0_lsb; - *a3 = data0_msb; + *a0 = (SyscallReturnVariant::FailureU64 as usize).into(); + *a1 = (usize::from(e) as usize).into(); + write_64(a2, a3, data0) } &SyscallReturn::Success => { - *a0 = SyscallReturnVariant::Success as u32; + *a0 = (SyscallReturnVariant::Success as usize).into(); } &SyscallReturn::SuccessU32(data0) => { - *a0 = SyscallReturnVariant::SuccessU32 as u32; - *a1 = data0; + *a0 = (SyscallReturnVariant::SuccessU32 as usize).into(); + *a1 = (data0 as usize).into(); } &SyscallReturn::SuccessU32U32(data0, data1) => { - *a0 = SyscallReturnVariant::SuccessU32U32 as u32; - *a1 = data0; - *a2 = data1; + *a0 = (SyscallReturnVariant::SuccessU32U32 as usize).into(); + *a1 = (data0 as usize).into(); + *a2 = (data1 as usize).into(); } &SyscallReturn::SuccessU32U32U32(data0, data1, data2) => { - *a0 = SyscallReturnVariant::SuccessU32U32U32 as u32; - *a1 = data0; - *a2 = data1; - *a3 = data2; + *a0 = (SyscallReturnVariant::SuccessU32U32U32 as usize).into(); + *a1 = (data0 as usize).into(); + *a2 = (data1 as usize).into(); + *a3 = (data2 as usize).into(); } &SyscallReturn::SuccessU64(data0) => { - let (data0_msb, data0_lsb) = u64_to_be_u32s(data0); - - *a0 = SyscallReturnVariant::SuccessU64 as u32; - *a1 = data0_lsb; - *a2 = data0_msb; + *a0 = (SyscallReturnVariant::SuccessU64 as usize).into(); + write_64(a1, a2, data0); } &SyscallReturn::SuccessU32U64(data0, data1) => { - let (data1_msb, data1_lsb) = u64_to_be_u32s(data1); - - *a0 = SyscallReturnVariant::SuccessU32U64 as u32; - *a1 = data0; - *a2 = data1_lsb; - *a3 = data1_msb; + *a0 = (SyscallReturnVariant::SuccessU32U64 as usize).into(); + *a1 = (data0 as usize).into(); + write_64(a2, a3, data1.into()); } &SyscallReturn::AllowReadWriteSuccess(ptr, len) => { - *a0 = SyscallReturnVariant::SuccessU32U32 as u32; - *a1 = ptr as u32; - *a2 = len as u32; + *a0 = (SyscallReturnVariant::Success as usize).into(); + *a1 = rederive(ptr as usize, len, cheri_perms::DEFAULT_RW); + *a2 = (len as usize).into(); } &SyscallReturn::UserspaceReadableAllowSuccess(ptr, len) => { - *a0 = SyscallReturnVariant::SuccessU32U32 as u32; - *a1 = ptr as u32; - *a2 = len as u32; + *a0 = (SyscallReturnVariant::Success as usize).into(); + *a1 = rederive(ptr as usize, len, cheri_perms::DEFAULT_R); + *a2 = (len as usize).into(); } &SyscallReturn::AllowReadWriteFailure(err, ptr, len) => { - *a0 = SyscallReturnVariant::FailureU32U32 as u32; - *a1 = usize::from(err) as u32; - *a2 = ptr as u32; - *a3 = len as u32; + *a0 = (SyscallReturnVariant::Failure as usize).into(); + *a1 = (usize::from(err) as usize).into(); + *a2 = rederive(ptr as usize, len, cheri_perms::DEFAULT_RW); + *a3 = (len as usize).into(); } &SyscallReturn::UserspaceReadableAllowFailure(err, ptr, len) => { - *a0 = SyscallReturnVariant::FailureU32U32 as u32; - *a1 = usize::from(err) as u32; - *a2 = ptr as u32; - *a3 = len as u32; + *a0 = (SyscallReturnVariant::Failure as usize).into(); + *a1 = (usize::from(err)).into(); + *a2 = rederive(ptr as usize, len, cheri_perms::DEFAULT_R); + *a3 = (len as usize).into(); } &SyscallReturn::AllowReadOnlySuccess(ptr, len) => { - *a0 = SyscallReturnVariant::SuccessU32U32 as u32; - *a1 = ptr as u32; - *a2 = len as u32; + *a0 = (SyscallReturnVariant::Success as usize).into(); + *a1 = rederive(ptr as usize, len, cheri_perms::DEFAULT_R); + *a2 = (len as usize).into(); } &SyscallReturn::AllowReadOnlyFailure(err, ptr, len) => { - *a0 = SyscallReturnVariant::FailureU32U32 as u32; - *a1 = usize::from(err) as u32; - *a2 = ptr as u32; - *a3 = len as u32; + *a0 = (SyscallReturnVariant::Failure as usize).into(); + *a1 = (usize::from(err)).into(); + *a2 = rederive(ptr as usize, len, cheri_perms::DEFAULT_R); + *a3 = (len as usize).into(); } &SyscallReturn::SubscribeSuccess(ptr, data) => { - *a0 = SyscallReturnVariant::SuccessU32U32 as u32; - *a1 = ptr as u32; - *a2 = data as u32; + *a0 = (SyscallReturnVariant::Success as usize).into(); + *a1 = (ptr as usize).into(); + *a2 = (data as usize).into(); } &SyscallReturn::SubscribeFailure(err, ptr, data) => { - *a0 = SyscallReturnVariant::FailureU32U32 as u32; - *a1 = usize::from(err) as u32; - *a2 = ptr as u32; - *a3 = data as u32; + *a0 = (SyscallReturnVariant::Failure as usize).into(); + *a1 = (usize::from(err)).into(); + *a2 = (ptr as usize).into(); + *a3 = (data as usize).into(); + } + &SyscallReturn::SuccessPtr(cptr) => { + *a0 = (SyscallReturnVariant::Success as usize).into(); + *a1 = cptr; } } } @@ -543,6 +634,24 @@ pub trait UserspaceKernelBoundary { state: &mut Self::StoredState, ) -> Result<(), ()>; + /// Get extra arguments. This should only be called in the context of handling a syscall, + /// otherwise the values returned may not be meaningful. + /// ### Safety + /// + /// This function guarantees that it if needs to change process memory, it + /// will only change memory starting at `accessible_memory_start` and before + /// `app_brk`. The caller is responsible for guaranteeing that those + /// pointers are valid for the process. + unsafe fn get_extra_syscall_arg( + &self, + _ndx: usize, + _accessible_memory_start: *const u8, + _app_brk: *const u8, + _state: &Self::StoredState, + ) -> Option { + None + } + /// Set the return value the process should see when it begins executing /// again after the syscall. This will only be called after a process has /// called a syscall. diff --git a/kernel/src/syscall_driver.rs b/kernel/src/syscall_driver.rs index 0e2943fe4..dcb49bb3b 100644 --- a/kernel/src/syscall_driver.rs +++ b/kernel/src/syscall_driver.rs @@ -153,6 +153,18 @@ impl CommandReturn { } } +/// A type to more conveniently propagate errors with the "?" operator +pub type CommandReturnResult = Result; + +impl From for CommandReturn { + fn from(res: CommandReturnResult) -> Self { + match res { + Ok(command) => command, + Err(code) => CommandReturn::failure(code), + } + } +} + impl From> for CommandReturn { fn from(rc: Result<(), ErrorCode>) -> Self { match rc { diff --git a/kernel/src/upcall.rs b/kernel/src/upcall.rs index f7795e204..a90b463e3 100644 --- a/kernel/src/upcall.rs +++ b/kernel/src/upcall.rs @@ -1,9 +1,10 @@ //! Data structure for storing an upcall from the kernel to a process. -use core::ptr::NonNull; +use crate::cheri::{cptr, CPtrOps}; use crate::config; use crate::debug; +use crate::grant::{DualTracker, Track}; use crate::process; use crate::process::ProcessId; use crate::syscall::SyscallReturn; @@ -59,6 +60,11 @@ pub enum UpcallError { KernelError, } +// FIXME: When we get CHERI compiler support, these can go back to the proper types +// b/274586199 +pub(crate) type AppdataType = cptr; +pub(crate) type FnPtrType = cptr; + /// Type for calling an upcall in a process. /// /// This is essentially a wrapper around a function pointer with @@ -72,7 +78,7 @@ pub(crate) struct Upcall { pub(crate) upcall_id: UpcallId, /// The application data passed by the app when `subscribe()` was called - pub(crate) appdata: usize, + pub(crate) appdata: AppdataType, /// A pointer to the first instruction of a function in the app /// associated with app_id. @@ -80,15 +86,71 @@ pub(crate) struct Upcall { /// If this value is `None`, this is a null upcall, which cannot actually be /// scheduled. An `Upcall` can be null when it is first created, or after an /// app unsubscribes from an upcall. - pub(crate) fn_ptr: Option>, + pub(crate) fn_ptr: FnPtrType, +} + +/// A type for calling an upcall in a process to be used by drivers that +/// wish to store upcalls across syscalls +#[derive(Copy, Clone)] +pub struct PUpcall { + /// Liveness tracker in case the process dies + liveness: DualTracker, + /// The application data passed by the app when `subscribe()` was called + pub(crate) appdata: AppdataType, + /// A pointer to the first instruction of a function in the app + /// associated with app_id. + pub(crate) fn_ptr: FnPtrType, + /// TODO: only really need for logging + pub(crate) upcall_id: UpcallId, +} + +impl PUpcall { + pub(crate) fn new( + liveness: DualTracker, + appdata: AppdataType, + fn_ptr: FnPtrType, + upcall_id: UpcallId, + ) -> Self { + Self { + liveness, + appdata, + fn_ptr, + upcall_id, + } + } + + pub fn schedule(&self, r0: usize, r1: usize, r2: usize) -> Result<(), UpcallError> { + match self.liveness.get_proc() { + None => Ok(()), + Some(proc) => { + let mut upcall = + Upcall::new(proc.processid(), self.upcall_id, self.appdata, self.fn_ptr); + upcall.schedule(proc, r0, r1, r2) + } + } + } +} + +impl Default for PUpcall { + fn default() -> Self { + Self { + liveness: DualTracker::global_dead(), + appdata: Default::default(), + fn_ptr: Default::default(), + upcall_id: UpcallId { + driver_num: 0, + subscribe_num: 0, + }, + } + } } impl Upcall { pub(crate) fn new( process_id: ProcessId, upcall_id: UpcallId, - appdata: usize, - fn_ptr: Option>, + appdata: AppdataType, + fn_ptr: FnPtrType, ) -> Upcall { Upcall { process_id, @@ -132,7 +194,7 @@ impl Upcall { argument1: r1, argument2: r2, argument3: self.appdata, - pc: fp.as_ptr() as usize, + pc: *fp, })); match enqueue_res { @@ -163,7 +225,8 @@ impl Upcall { self.process_id, self.upcall_id.driver_num, self.upcall_id.subscribe_num, - self.fn_ptr.map_or(0x0 as *mut (), |fp| fp.as_ptr()) as usize, + self.fn_ptr + .map_or(0x0 as *mut (), |fp| fp.as_ptr() as *mut ()) as usize, r0, r1, r2, @@ -183,10 +246,10 @@ impl Upcall { /// We provide this `.into` function because the return type needs to /// include the function pointer of the upcall. pub(crate) fn into_subscribe_success(self) -> SyscallReturn { - match self.fn_ptr { - Some(fp) => SyscallReturn::SubscribeSuccess(fp.as_ptr(), self.appdata), - None => SyscallReturn::SubscribeSuccess(0 as *const (), self.appdata), - } + self.fn_ptr.map_or( + SyscallReturn::SubscribeSuccess(0 as *const (), self.appdata.into()), + |fp| SyscallReturn::SubscribeSuccess(fp.as_ptr(), self.appdata.into()), + ) } /// Create a failure case syscall return type suitable for returning to @@ -199,9 +262,9 @@ impl Upcall { /// We provide this `.into` function because the return type needs to /// include the function pointer of the upcall. pub(crate) fn into_subscribe_failure(self, err: ErrorCode) -> SyscallReturn { - match self.fn_ptr { - Some(fp) => SyscallReturn::SubscribeFailure(err, fp.as_ptr(), self.appdata), - None => SyscallReturn::SubscribeFailure(err, 0 as *const (), self.appdata), - } + self.fn_ptr.map_or( + SyscallReturn::SubscribeFailure(err, 0 as *const (), self.appdata.into()), + |fp| SyscallReturn::SubscribeFailure(err, fp.as_ptr(), self.appdata.into()), + ) } } diff --git a/kernel/src/utilities/helpers.rs b/kernel/src/utilities/helpers.rs index bd2959e71..d08609467 100644 --- a/kernel/src/utilities/helpers.rs +++ b/kernel/src/utilities/helpers.rs @@ -22,6 +22,23 @@ macro_rules! create_capability { };}; } +/// Can create a capability with static storage. +/// Usage: +/// ```ignore +/// use kernel::capabilities::ProcessManagementCapability; +/// create_static_capability(MY_CAP = MyCap : ProcessManagementCapability); +/// ``` +/// MyCap can be any type name you don't use elsewhere. +#[macro_export] +macro_rules! create_static_capability { + (static $id : ident : $t : ident = $T:ty $(,)?) => { + struct $t; + #[allow(unsafe_code)] + unsafe impl $T for $t {} + static $id: $t = { $t }; + }; +} + /// Count the number of passed expressions. /// Useful for constructing variable sized arrays in other macros. /// Taken from the Little Book of Rust Macros @@ -37,3 +54,53 @@ macro_rules! count_expressions { ($head:expr $(,)?) => (1usize); ($head:expr, $($tail:expr),* $(,)?) => (1usize + count_expressions!($($tail),*)); } + +/// A safe (const) array initialisation pattern with an accumulator. +/// Usage: +/// ```ignore +/// let (acc, array) = new_const_array!([ArrayT; N], a_init, |acc, ndx| {...}); +/// +/// // The array will filled as if the following were written: +/// let acc = a_init; +/// let mut ndx = 0; +/// let (elem0, acc) = {...}; ndx +=1; +/// let (elem1, acc) = {...}; ndx +=1; +/// ... +/// let (elemN, acc) = {...}; ndx +=1; +/// return (acc, [elem0, elem1, ..., elemN]) +/// ``` +/// const fn pointer / trait bounds are still a little broken, so I have provided this as a macro +/// instead of a function. +#[macro_export] +macro_rules! new_const_array { + ([$T : ty; $N : expr], $a_init : expr, |$acc : ident, $ndx : ident| {$($t : tt )*}) => { + { + const UNINIT_ELEM: MaybeUninit<$T> = MaybeUninit::uninit(); + let mut _uninit_array = [UNINIT_ELEM; $N]; + let mut $acc = $a_init; + let mut $ndx = 0; + while $ndx < $N { + let (elem, next_acc) = { + // Shadowing the variables in this loop stops them from being modified by a + // naughty bit of user code. + let $ndx = $ndx; + let _uninit_array = [UNINIT_ELEM; $N]; + { + $($t)* + } + }; + _uninit_array[$ndx] = MaybeUninit::new(elem); + $acc = next_acc; + $ndx +=1; + } + unsafe { + // Because the loop may have broken + if ($ndx != $N) { + panic!(); + } + // The loop above sets every element + ($acc, MaybeUninit::array_assume_init(_uninit_array)) + } + } + }; +} diff --git a/kernel/src/utilities/leased_buffer.rs b/kernel/src/utilities/leased_buffer.rs new file mode 100644 index 000000000..0f6fb38a2 --- /dev/null +++ b/kernel/src/utilities/leased_buffer.rs @@ -0,0 +1,170 @@ +//! Allows leasing part of a buffer and then restoring the original (See LeasableBuffer as well). + +use core::cell::Cell; +use core::marker::PhantomData; +use core::ops::IndexMut; +use core::ops::Range; +use core::ptr::NonNull; + +/// Leasable buffer must pass a different type to the receiver but cannot fail. +/// Leased buffer passes a normal &mut reference and so does not complicate the receiver, +/// but performs a dynamic check and can therefore fail. +/// Either this needs to be handled, or the buffer will be lost in the event of error. +pub struct LeasedBuffer<'a, T> { + original: NonNull<[T]>, + leased: NonNull<[T]>, + p: PhantomData<&'a mut T>, +} + +/// A wrapper for leased buffer to easily set/take interface +pub struct LeasedBufferCell<'a, T> { + inner: Cell>, +} + +impl<'a, T> LeasedBufferCell<'a, T> { + pub const fn new() -> Self { + Self { + inner: Cell::new(LeasedBuffer::empty()), + } + } + /// Return v.get(range) and put the rest in the cell + pub fn set_lease(&self, v: &'a mut [T], range: Range) -> &'a mut [T] { + let (lease, result) = LeasedBuffer::lease(v, range); + self.inner.set(lease); + result + } + /// Restore from the cell part of the leased buffer + pub fn take_buf(&self, lease: &'a mut [T]) -> &'a mut [T] { + let leased = self.inner.replace(LeasedBuffer::empty()); + leased.restore_min(lease) + } +} + +impl<'a, T> LeasedBuffer<'a, T> { + /// Lease a sub-range of a slice. + /// Can be restored later with restore. + #[inline] + pub fn lease(v: &'a mut [T], range: Range) -> (Self, &'a mut [T]) { + let original = NonNull::from(&*v); + let sub_range = v.index_mut(range); + let leased = NonNull::from(&*sub_range); + ( + Self { + original, + leased, + p: PhantomData, + }, + sub_range, + ) + } + + #[inline] + /// A state that would never restore with anything, saves wrapping with an Option + pub const fn empty() -> Self { + Self { + original: NonNull::slice_from_raw_parts(NonNull::dangling(), 0), + leased: NonNull::slice_from_raw_parts(NonNull::dangling(), 0), + p: PhantomData, + } + } + + /// Restore leased part of buffer. + /// If it does not match exactly, Err(()) is returned. + #[inline] + pub fn restore(mut self, leased: &'a mut [T]) -> Result<&'a mut [T], ()> { + if NonNull::from(&*leased).eq(&self.leased) { + unsafe { + // Safety: we are giving back exactly the same reference with the same lifetime + // this means that the we can restore the original reference + Ok(self.original.as_mut()) + } + } else { + Err(()) + } + } + /// Restore leased part of buffer. + /// If it does not match exactly, the leased buffer is returned + #[inline] + pub fn restore_min(mut self, leased: &'a mut [T]) -> &'a mut [T] { + if NonNull::from(&*leased).eq(&self.leased) { + unsafe { + // Safety: same as restore + self.original.as_mut() + } + } else { + leased + } + } +} + +#[cfg(test)] +mod tests { + use crate::utilities::leased_buffer::LeasedBufferCell; + + #[test] + fn test_lease() { + let buf = &mut [1, 2, 3, 4]; + + let cell = LeasedBufferCell::new(); + + let part = cell.set_lease(buf, 1..2); + + // Buf no longer usable + // buf[1] = 2; // error + + assert_eq!(part[0], 2); + + // Get the buffer back + let whole_buf = cell.take_buf(part); + + assert_eq!(whole_buf[0], 1); + assert_eq!(whole_buf[3], 4); + } + + #[test] + fn test_wrong_buffer() { + let buf = &mut [1, 2, 3, 4]; + + let cell = LeasedBufferCell::new(); + + let part = cell.set_lease(buf, 1..3); + + assert_eq!(part[0], 2); + + let other = &mut [2, 3, 4, 5, 6, 7, 8]; + + // Get the buffer back + let whole_buf = cell.take_buf(other); + + assert_eq!(whole_buf.len(), 7); + } + + #[test] + fn test_wrong_buffer_length() { + let buf = &mut [1, 2, 3, 4]; + + let cell = LeasedBufferCell::new(); + + let part = cell.set_lease(buf, 1..3); + + assert_eq!(part[0], 2); + + let part = &mut part[0..1]; + + // Get the buffer back + let whole_buf = cell.take_buf(part); + + assert_eq!(whole_buf.len(), 1); + } + + #[test] + fn test_empty() { + let buf = &mut [1, 2, 3, 4]; + + let cell = LeasedBufferCell::new(); + + let x = cell.take_buf(buf); + + assert!(x.eq(&[1, 2, 3, 4])); + } +} diff --git a/kernel/src/utilities/mod.rs b/kernel/src/utilities/mod.rs index d5d428ee9..425890dfa 100644 --- a/kernel/src/utilities/mod.rs +++ b/kernel/src/utilities/mod.rs @@ -4,9 +4,11 @@ pub mod binary_write; pub mod copy_slice; pub mod helpers; pub mod leasable_buffer; +pub mod leased_buffer; pub mod math; pub mod mut_imut_buffer; pub mod peripheral_management; +pub mod singleton_checker; pub mod static_init; pub mod storage_volume; @@ -30,7 +32,7 @@ pub mod registers { /// /// use kernel::utilities::cells::TakeCell; pub mod cells { - pub use tock_cells::map_cell::MapCell; + pub use tock_cells::map_cell::*; pub use tock_cells::numeric_cell_ext::NumericCellExt; pub use tock_cells::optional_cell::OptionalCell; pub use tock_cells::take_cell::TakeCell; diff --git a/kernel/src/utilities/singleton_checker.rs b/kernel/src/utilities/singleton_checker.rs new file mode 100644 index 000000000..e0c975295 --- /dev/null +++ b/kernel/src/utilities/singleton_checker.rs @@ -0,0 +1,94 @@ +//! Compile time enforcement of singleton pattern + +use core::any::TypeId; + +/// This type keeps track of constructors / initializers that are only intended +/// to be called once during const initialisation. +/// Its implementation is really inefficient, do not use at runtime. +/// The advantage of this checker is that it should operate at compile time, +/// allowing runtime singletons without keeping track of which have already +/// been constructed. Simply construct them with a const-expr and assign to +/// a static. +/// This type itself should also be a singleton. However, because we cannot +/// mutate statics at compile time there is no way to check this. +/// Therefore, the constructor for this is unsafe, but should allow all other +/// constructors to be safe. +pub struct SingletonCheckerBase { + used: T, +} + +pub type SingletonChecker = SingletonCheckerBase<[Option]>; +pub type SingletonCheckerSized = SingletonCheckerBase<[Option; SLOTS]>; + +impl SingletonCheckerSized { + pub const fn as_unsized(&mut self) -> &mut SingletonChecker { + self + } +} + +impl SingletonChecker { + pub const fn id_eq(id1: TypeId, id2: TypeId) -> bool { + // Const comparison has not yet been stabilised. + // Instead we currently peek inside the type to see what integer is in + // use. + // This will need updating if the type changes / the official method + // is made const. + unsafe { + core::mem::transmute::(id1) == core::mem::transmute::(id2) + } + } + + /// Check an ID is not used. You should probably be using assert_single. + pub const fn check_single(&mut self, id: TypeId) { + let len = self.used.len(); + let mut ndx = 0; + while ndx < len { + match self.used[ndx] { + Some(other_id) => { + if Self::id_eq(id, other_id) { + panic!("Not a singleton"); + } + } + None => { + self.used[ndx] = Some(id); + return; + } + } + ndx += 1; + } + panic!("Too few slots. Increase SLOTS in new"); + } + + /// Construct a new checker. This should be called once, GLOBALLY. Not once + /// per const initializer. Collect as much global state as you can into a + /// single struct, and use this within the initializer for that. + pub const unsafe fn new_sized() -> SingletonCheckerSized { + const NONE: Option = None; + SingletonCheckerSized:: { + used: [NONE; SLOTS], + } + } +} + +/// Helper to assert that this place in code is reached only once +/// +/// Usage: +/// ``` +/// use kernel::utilities::singleton_checker::SingletonChecker; +/// let mut checker = unsafe { +/// SingletonChecker::new_sized::<100>() // Size any number larger than uses of assert_single! +/// }; +/// let checker = checker.as_unsized(); +/// // ... +/// kernel::assert_single!(checker) +/// ``` +/// +/// Note that, if you have multiple constructors of your type, you either need to call a helper +/// that has this macro. +#[macro_export] +macro_rules! assert_single { + ($checker : expr) => {{ + struct S(); + $checker.check_single::(core::any::TypeId::of::()); + }}; +} diff --git a/kernel/src/utilities/static_init.rs b/kernel/src/utilities/static_init.rs index c6e7c915e..7f75e2095 100644 --- a/kernel/src/utilities/static_init.rs +++ b/kernel/src/utilities/static_init.rs @@ -16,7 +16,7 @@ #[macro_export] macro_rules! static_init { ($T:ty, $e:expr $(,)?) => {{ - let mut buf = $crate::static_buf!($T); + let buf = $crate::static_buf!($T); buf.initialize($e) }}; } diff --git a/kernel/src/utilities/static_ref.rs b/kernel/src/utilities/static_ref.rs index 710ca7a50..0de79e071 100644 --- a/kernel/src/utilities/static_ref.rs +++ b/kernel/src/utilities/static_ref.rs @@ -25,6 +25,63 @@ impl StaticRef { pub const unsafe fn new(ptr: *const T) -> StaticRef { StaticRef { ptr: ptr } } + + pub const fn unwrap(&self) -> *const T { + self.ptr + } +} + +/// This macro, given a StaticRef to a struct, constructs a StaticRef to a member of said struct. +/// i.e., `StaticRefGEP(struct, field)` is meant to be the same as `&struct.field`. +/// StaticRef often refers to addresses that do not really exist within a const-expr because they +/// only represent an allocation at runtime (e.g., a memory-mapped device) +/// Reading one is almost certainly an error. It is therefore sensible that Deref cannot be applied +/// as a const-fn. +/// Sadly, the syntax for accessing a element point of a struct &(struct.field) implicitly makes +/// a deref. The compiler can spot a deref of a location that does not exist, and will create +/// an error. +/// This macro operates StaticRefGEP in such a way as not to invoke any UB or implicit dereferences +/// of locations that are not allocated. +#[macro_export] +macro_rules! StaticRefGEP { + ($from : expr, $field : ident) => { + unsafe { + // An explanation of what is happening here: + // We would like to simply write: &$from.unwrap().$field to get a ptr to the field. + // But there are two problems with that: + + // 1) It (very briefly) constructs a reference that does not exist + // 2) Does arithmetic on a pointer range that is out of bounds. + + // core::ptr::addr_of! normally solves (1) + // (2) Is pretty tricky to get around. You cannot cast pointers to usize with a const + // expr because at compile time pointers do not have integer values. + // Also, our pointer is very much out of bounds at compile time because the device + // does not really exist at that point. + // Methods like offset() and add() must result in a within bounds pointer. + // wrapping_offset is pretty useful in that it is allowed to construct an OOB + // pointer. We first construct a stand-in maybe uninit object of the same type as the real + // reference. We can work out the offset of the field on the stand_in. + // We then use wrapping_offset on the "real" pointer. + // The functions here just exist to help infer the types of the object and element + // because type_of does not exist in rust. + const fn cast_helper(raw_ptr: *const u8, _ref: *const V) -> *const V { + raw_ptr as *const V + } + const fn make_stand_in(_raw_ptr: *const T) -> core::mem::MaybeUninit { + core::mem::MaybeUninit::uninit() + } + let raw_ptr = $from.unwrap(); + let stand_in_alloc = make_stand_in(raw_ptr); + let stand_in = stand_in_alloc.as_ptr(); + let stand_in_field = core::ptr::addr_of!((*stand_in).$field); + let offset = (stand_in_field as *const u8).offset_from(stand_in as *const u8); + StaticRef::new(cast_helper( + (raw_ptr as *const u8).wrapping_offset(offset), + stand_in_field, + )) + } + }; } impl Clone for StaticRef { @@ -37,7 +94,7 @@ impl Copy for StaticRef {} impl Deref for StaticRef { type Target = T; - fn deref(&self) -> &'static T { + fn deref(&self) -> &T { unsafe { &*self.ptr } } } diff --git a/libraries/misc/Cargo.toml b/libraries/misc/Cargo.toml new file mode 100644 index 000000000..4fab6efa0 --- /dev/null +++ b/libraries/misc/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "misc" +version = "0.1.0" +edition = "2021" + +[lib] +test = true + +[dependencies] + +[features] +default = [] +track_potatoes = [] +global_test = [] \ No newline at end of file diff --git a/libraries/misc/src/const_env.rs b/libraries/misc/src/const_env.rs new file mode 100644 index 000000000..fe561f95f --- /dev/null +++ b/libraries/misc/src/const_env.rs @@ -0,0 +1,185 @@ +//! Provides a helper to initialise constants (such as integers) provided via environment variables +//! at compile time. This allows passing configuration constants from the command line to program. + +use core::panic; + +/// Usage: +/// ```ignore +/// use misc::const_env_int; +/// const_env_int!(pub MY_U8:u8); +/// const_env_int!(MY_SIZE:usize); +/// const_env_int!(pub MY_VAR:u8 = default 77); +/// ``` +/// +/// Format for numbers passed is [0x|0b]? [0..9]* [K|M|G|Ki|Mi|Gi]? +/// +/// If the command line parameter is not set, the default will be chosen if one has been specified. +/// Otherwise, a compile time panic will occur. +/// +#[macro_export] +macro_rules! const_env_int { + ($vis:vis $name:ident : $ty:ty $(= default $e : expr)?) => ( + $vis const $name : $ty = { + $crate::parse_env_int!($name : $ty $(= default $e)?) + }; + ); +} + +/// Usage: +/// parse_env_int!(Name : Ty (= default Value)?) +#[macro_export] +macro_rules! parse_env_int { + ($name:ident : $ty : ty = default $e : expr) => ( + { + const RESULT : $ty = match option_env!(stringify!($name)) { + Some (s) => $crate::const_env::const_parse(s) as $ty, + None => $e + }; + RESULT + } + ); + ($name:ident : $ty : ty) => ( + $crate::parse_env_int!($name : $ty = default panic!("Environment variable not set")) + ); +} + +// Need to rewrite most of this logic that probably exists in core because it has to be const fn +pub const fn const_parse(s: &str) -> u64 { + let mut res = 0u64; + let mut i = 0usize; + + // Trim leading whitespace + while i < s.len() && s.as_bytes()[i] == b' ' { + i += 1; + } + + // Get base from prefix (either 0x, 0b, or else base 10. Base 8 is rubbish) + let base = { + if (s.len() - i) >= 2 && s.as_bytes()[i] == b'0' { + match s.as_bytes()[i + 1] { + b'x' | b'X' => { + i += 2; + 16 + } + b'b' | b'B' => { + i += 2; + 2 + } + _ => 10, + } + } else { + 10 + } + }; + + // Parse number + while i < s.len() { + let mut c = s.as_bytes()[i]; + let val = if c >= b'0' && c <= b'9' { + c - b'0' + } else { + if c >= b'a' { + c -= b'a' - b'A'; + } + if c >= b'A' && c <= b'F' { + (c - b'A') + 10 + } else { + break; + } + }; + res *= base; + res += val as u64; + i += 1; + } + + // Parse suffixes + if i != s.len() && s.as_bytes()[i] != b' ' { + let c = s.as_bytes()[i]; + i += 1; + let mult = if i == s.len() { + 1000 + } else { + let c2 = s.as_bytes()[i]; + i += 1; + if c2 == b'i' { + 1024 + } else if c2 == b' ' { + 1000 + } else { + panic!("Invalid character of suffix"); + } + }; + res *= match c { + b'k' | b'K' => mult, + b'm' | b'M' => mult * mult, + b'g' | b'G' => mult * mult * mult, + _ => panic!("Suffix should start with K, M, or G"), + }; + } + + // Trim trailing whitespace + while i != s.len() && s.as_bytes()[i] == b' ' { + i += 1; + } + + // Error check + if i != s.len() { + panic!("Invalid suffix"); + } + + res +} + +#[cfg(test)] +mod tests { + use crate::const_env::const_parse; + + #[test] + fn test_base_10() { + assert_eq!(const_parse("1"), 1); + assert_eq!(const_parse("123"), 123); + assert_eq!(const_parse("0"), 0); + assert_eq!(const_parse("01"), 1); + } + + #[test] + fn test_base_16() { + assert_eq!(const_parse("0x1"), 0x1); + assert_eq!(const_parse("0x01"), 0x1); + assert_eq!(const_parse("0X123"), 0x123); + assert_eq!(const_parse("0x123aAaFf"), 0x123aAaFf); + assert_eq!(const_parse("0x0"), 0x0); + } + + #[test] + fn test_base_2() { + assert_eq!(const_parse("0b1"), 0b1); + assert_eq!(const_parse("0b01"), 0b01); + assert_eq!(const_parse("0B101010"), 0b101010); + assert_eq!(const_parse("0b0"), 0b0); + } + + #[test] + fn test_suffix() { + assert_eq!(const_parse("0x123k"), 0x123 * 1000); + assert_eq!(const_parse("0x123K"), 0x123 * 1000); + assert_eq!(const_parse("0x123Ki"), 0x123 * 1024); + assert_eq!(const_parse("0x123Ki"), 0x123 * 1024); + assert_eq!(const_parse("0x123Mi"), 0x123 * 1024 * 1024); + assert_eq!(const_parse("0x123Gi"), 0x123 * 1024 * 1024 * 1024); + } + + #[test] + fn test_trim() { + assert_eq!(const_parse("0x123Ki "), 0x123 * 1024); + assert_eq!(const_parse(" 0x123Ki"), 0x123 * 1024); + assert_eq!(const_parse(" 0x123Ki "), 0x123 * 1024); + assert_eq!(const_parse(" 0x123 "), 0x123); + } + + #[test] + fn test_default() { + const_env_int!(THIS_WILL_NOT_BE_A_VAR : u32 = default 77); + assert_eq!(THIS_WILL_NOT_BE_A_VAR, 77); + } +} diff --git a/libraries/misc/src/default_array.rs b/libraries/misc/src/default_array.rs new file mode 100644 index 000000000..3c1312d0b --- /dev/null +++ b/libraries/misc/src/default_array.rs @@ -0,0 +1,92 @@ +//! Trait for constructing default arrays. Rust can only do this up to 32 elements. + +use core::mem::MaybeUninit; + +/// A version of default that works better inside arrays. +/// Arrays automatically implement this if elements do. +/// Non-array types must be marked with NotArrayMarker to support this interface. +pub trait DefaultArray: Sized { + /// Construct a default value, filling arrays with repeated calls to default. + fn default_array() -> Self; +} + +/// Implement this for your non-array types if they are to be used with DefaultArray. +pub trait NotArrayMarker {} + +// Blanket definitions. Without specialisation there is no way of implementing something only +// for non-array types unless we use a marker. + +impl DefaultArray for [T; M] { + fn default_array() -> Self { + unsafe { + // Safety: iterating to M guarantees being in bounds. We only write each element + // once so are not forgetting to drop anything, and are definitely initializing + // everything. + let mut mem = MaybeUninit::<[T; M]>::uninit(); + for i in 0..M { + (*mem.as_mut_ptr())[i] = T::default_array(); + } + mem.assume_init() + } + } +} + +impl DefaultArray for T { + fn default_array() -> Self { + T::default() + } +} + +#[cfg(test)] +mod tests { + use crate::default_array::{DefaultArray, NotArrayMarker}; + + struct MyThingDefault { + v: i32, + } + + impl Default for MyThingDefault { + fn default() -> Self { + Self { v: 6 } + } + } + + // Implementing NotArrayMarker allows use of DefaultArray::default_array() + impl NotArrayMarker for MyThingDefault {} + + #[test] + fn test_single() { + let single: MyThingDefault = DefaultArray::default_array(); + assert_eq!(single.v, 6); + } + + #[test] + fn test_1d() { + let array: [MyThingDefault; 100] = DefaultArray::default_array(); + for el in array { + assert_eq!(el.v, 6); + } + } + + #[test] + fn test_2d() { + let twod: [[MyThingDefault; 100]; 100] = DefaultArray::default_array(); + for row in twod { + for el in row { + assert_eq!(el.v, 6); + } + } + } + + #[test] + fn test_3d() { + let threed: [[[MyThingDefault; 40]; 40]; 40] = DefaultArray::default_array(); + for twod in threed { + for row in twod { + for el in row { + assert_eq!(el.v, 6); + } + } + } + } +} diff --git a/libraries/misc/src/divorce.rs b/libraries/misc/src/divorce.rs new file mode 100644 index 000000000..fd915d297 --- /dev/null +++ b/libraries/misc/src/divorce.rs @@ -0,0 +1,628 @@ +//! Divorce library. +//! +//! Divorcing from a type is like borrowing it, but rather than the borrow checker statically +//! verifying that the borrow does not outlive the value, you must manually "reunite" the borrow +//! (called the subset) and what was left of the original value (called the "remainder"). +//! +//! To stop you from forgetting to reunite the two, or dropping the remainder too early, dropping +//! a remainder panics. +//! +//! It is intended for DMA where the foreign interfaces is very constrained (i.e. it is +//! hardware) and cannot understand the semantics of lifetimes or smart pointers, and async +//! is not available / not preferred. +//! +//! LifelessRef<> is a transparent for a raw pointer and can safely be passed to hardware. +//! However, it is safer to do so as they cannot be arbitrarily forged. The existence of a +//! LifelessRef implies the existence of another value that does have the appropriate lifetime. +//! +//! The following example shows how a driver might pass a 256 byte buffer to asynchronous hardware +//! +//! ```rust +//! use std::cell::Ref; +//! use misc::divorce::{Divorceable, Divorced, LifelessRef, Reunitable}; +//! +//! +//! struct State { +//! in_progress : Option, LifelessRef<[u8;256]>>>, +//! } +//! +//! fn pass_to_hardware(ptr : LifelessRef<[u8;256]>) { +//! // ... program some registers for an async workload, +//! // calls on_work_finish at some point in the future. ... +//! } +//! +//! fn work_start(state: &mut State, r: Ref<'static, [u8;256]>) { +//! // If we just borrowed here, one of two things would have happened: +//! // 1) The borrow would only last as long as it would take to extract a raw pointer. This +//! // would lead to (r) being dropped at the end of the function, dropping the reference count +//! // and leading to hardware (possibly) having a dangling pointer. +//! // 2) The borrow would last too long, and we would get an error trying to store it to +//! // in_progress later. +//! // Instead, divorce gives us two types. One that we have to look after ourselves (remainder), +//! // and one we can pass to hardware (sub). +//! let (remainder, subset) = r.divorce(); +//! pass_to_hardware(subset); +//! // Note, if state.in_progress were already something it would be dropped. If that were an +//! // in progress transaction, we would get the panic as expected. +//! state.in_progress = Some(remainder); +//! // Had we forgotten to handle the remainder, the function would panic here to remind us we +//! // need to store it somewhere. However, here we assigned it to state to handle later. +//! } +//! +//! // This would be called on an interrupt from the driver. The unsafe portion that bridges +//! // the gap between hardware and rust is re-constructing the "LifelessRef<[u8;256]>". +//! // The driver must "claim" (which is unsafe, as possibly it is wrong) that a particular +//! // reference has been finished with by the hardware. +//! fn on_work_finish(state: &mut State, subset : LifelessRef<[u8;256]>) { +//! let remainder = state.in_progress.take().unwrap(); +//! +//! // We get the original reference back, with any lifetime/semantics preserved. +//! // Reunite does its best to check that the length/address of the subset match, however +//! // there is no provenance tracking. +//! let original_r : Ref<'static, [u8;256]> = remainder.reunite(subset); +//! } +//! ``` + +use crate::divorce::private::DivorceableIntoFrom; +use crate::potatoes::HotPotato; +#[warn(unused_must_use)] +use core::cell::Ref; +use core::cell::{Cell, RefMut}; +use core::ptr::{slice_from_raw_parts, slice_from_raw_parts_mut, NonNull}; + +/// A Divorced is one that needs to be reunited with its subset S to +/// reconstruct a T before it can be dropped. +/// The intent for these are cases where some portion of a +/// type needs to be passed to an interface (such as low-level hardware) +/// which cannot understand the semantics of the larger type. +/// Once the subset has been passed back, the types can be reunited to form +/// the original. +/// This is much like a borrow, but borrows block the source from being moved, and need to be +/// verified statically. This does not work well with event based code. +/// The divorced type can be moved / stored anywhere the original T could. +/// Remainder should be a same size / aligned type as T, and can preferably be +/// T in some cases. +pub struct Divorced +where + T: Divorceable, +{ + // Divorced types should be reunited and never dropped + potato: HotPotato, + value: T::Remainder, + // A divorced type has all the lifetime requirements of T + marker: core::marker::PhantomData<(T, Subset)>, +} + +/// You cannot have a private trait in a public interface, this module is private and so protects +/// the DivorceableIntoFrom trait from use outside this module. +mod private { + pub trait DivorceableIntoFrom { + /// Convert between the T and Remainder for the Divorced type. + fn into_divorced(self) -> Remainder; + fn from_divorced(t2: Remainder) -> Self; + } +} + +/// Subset: The type of the subset of Self that can be divorced from it +pub trait Divorceable: DivorceableIntoFrom { + /// What remains after removing the subset, although to make matches as precise as + /// possible it should fundamentally be transmutable with Self. + type Remainder; + /// A handy type that is what will remain after divorcing the subset + /// When default associated types are stable do the following: + // type DivorceT = Divorced + /// For now, every implementer has to do it themself. + type DivorceT; + + /// Get the subset of the type that can be divorced from it + fn divorceable_subset(&self) -> Subset; + + /// Divorce the subset from the type + #[must_use] + #[track_caller] + fn divorce(self) -> (Divorced, Subset) + where + Self: Sized, + { + let subset = self.divorceable_subset(); + ( + Divorced:: { + potato: HotPotato::new(), + value: self.into_divorced(), + marker: core::marker::PhantomData, + }, + subset, + ) + } + + /// Safety check that a divorced subset can be reunited + /// Because `Divorced` contains the entire `Self`, + /// this does not allow arbitrary construction of `Self`. + /// However, depending on whether `T` is a copyable type + /// match may give false positives. How bad that is + /// depends on the exact `T`. In the best case, it just + /// gives some slightly dodgy provenance issues. + fn matches(divorced: &Divorced, subset: &Subset) -> bool + where + Self: Sized; +} + +/// Undo divorce. Separate trait so a default implementation can be given. +pub trait Reunitable { + type Original; + type With; + /// Reunite the subset with what it was divorced from + fn reunite(self, subset: Self::With) -> Self::Original; +} + +// Default implementation for all Divorced types +impl> Reunitable for Divorced { + type Original = T; + type With = S; + fn reunite(self, subset: S) -> T { + assert!(Divorceable::::matches(&self, &subset)); + self.potato.consume(); + T::from_divorced(self.value) + } +} + +// Where Divorced can contain the entire type, the conversion is trivial. +// The only reason the types would not be the same is if T contains references +// that need converting to pointers for the sake of soundness. +impl DivorceableIntoFrom for T { + fn into_divorced(self) -> Self { + self + } + fn from_divorced(s: Self) -> Self { + s + } +} + +/// This is effectively meant to be a reference divorced from a lifetime. +/// Unlike a pointer (which has no lifetime), the lifetime of this reference is +/// just elsewhere. +/// This is a lot like a borrow of the original reference, but it allows the original value +/// can be moved around and the borrow can be of an indeterminate period. +/// These references can be passed across boundaries that do not understand lifetimes while +/// keeping some of the lifetime within rust so the compiler can spot errors. +/// The advantage over the raw pointer this wraps is that even "safe" code can +/// arbitrarily construct pointers. LifelessRefs can only be constructed by +/// divorcing them from a real reference. +/// Interfaces can therefore trust that a LifelessRef does in fact point to some valid object, +/// as somewhere else there is a type that has the correct lifetime and is not allowed to +/// go out of scope. Note, in some cases, std::mem::forget may still allow for unsafe behaviour. +/// Note, this type CANNOT be copy as it is paired with another object that would need copying too. +/// I might add a "pairwise clone" to Divorced<> +/// +#[repr(transparent)] +pub struct LifelessRef { + value: NonNull, +} +/// Same as LifelessRef, but the reference it was formed from was "mutable". +#[repr(transparent)] +pub struct LifelessRefMut { + value: NonNull, +} + +#[inline(always)] +fn split_nonnull_slice(slice: NonNull<[T]>, mid: usize) -> (NonNull<[T]>, NonNull<[T]>) { + let len = slice.len(); + assert!(mid <= len); + let ptr = slice.as_mut_ptr(); + // SAFETY: slice was already non-null and we did the assert on length + unsafe { + let first = NonNull::<[T]>::new_unchecked(slice_from_raw_parts_mut(ptr, mid)); + let second = + NonNull::<[T]>::new_unchecked(slice_from_raw_parts_mut(ptr.add(mid), len - mid)); + (first, second) + } +} + +impl LifelessRef<[T]> { + /// Number of elements in the slice + pub fn len(&self) -> usize { + self.value.len() + } + /// Base address of the slice + pub fn base(&self) -> usize { + self.value.as_ptr().as_mut_ptr() as usize + } + /// Split into two ranges, mid is the start of the second range + pub fn split_at(self, mid: usize) -> (Self, Self) { + let (first, second) = split_nonnull_slice(self.value, mid); + (LifelessRef { value: first }, LifelessRef { value: second }) + } + /// Get the wrapped pointer. This is not really unsafe, it is just a potential footshoot. + /// Safety: do not ever actually dereference the result of this. If you want to do that, + /// call the safe consume() method. + pub unsafe fn peek_value(&self) -> NonNull<[T]> { + self.value + } +} + +impl LifelessRefMut<[T]> { + /// Number of elements in the slice + pub fn len(&self) -> usize { + self.value.len() + } + /// Base address of the slice + pub fn base(&self) -> usize { + self.value.as_ptr().as_mut_ptr() as usize + } + /// Split into two ranges, mid is the start of the second range + pub fn split_at(self, mid: usize) -> (Self, Self) { + let (first, second) = split_nonnull_slice(self.value, mid); + ( + LifelessRefMut { value: first }, + LifelessRefMut { value: second }, + ) + } + /// Get the wrapped pointer. This is not really unsafe, it is just a potential footshoot. + /// Safety: do not ever actually dereference the result of this. If you want to do that, + /// call the safe consume() method. + pub unsafe fn peek_value(&self) -> NonNull<[T]> { + self.value + } +} + +impl LifelessRef<[T; N]> { + /// Convert array reference to slice reference + pub fn as_slice(self) -> LifelessRef<[T]> { + // Safety: self.value.as_ptr() will never be null as it is a NonNull + unsafe { + LifelessRef { + value: NonNull::<[T]>::new_unchecked(slice_from_raw_parts( + self.value.as_ptr() as *const T, + N, + ) as *mut [T]), + } + } + } +} + +impl LifelessRefMut<[T; N]> { + /// Convert array reference to slice reference + pub fn as_slice(self) -> LifelessRefMut<[T]> { + // Safety: self.value.as_ptr() will never be null as it is a NonNull + unsafe { + LifelessRefMut { + value: NonNull::<[T]>::new_unchecked(core::ptr::slice_from_raw_parts_mut( + self.value.as_ptr() as *mut T, + N, + )), + } + } + } +} + +/// Collection of traits that both LifelessRef and LifelessRefMut have +pub trait LifelessRefTraits { + // SAFETY: The caller promises they called consume on exactly the same value + // If they did not, then the damage is still limited by the fact the ref will not match + // what it was divorced from + unsafe fn remake(value: NonNull) -> Self; + fn consume(self) -> NonNull; +} + +impl LifelessRefTraits for LifelessRef { + unsafe fn remake(value: NonNull) -> Self { + LifelessRef { value } + } + fn consume(self) -> NonNull { + self.value + } +} + +impl LifelessRefTraits for LifelessRefMut { + unsafe fn remake(value: NonNull) -> Self { + LifelessRefMut { value } + } + fn consume(self) -> NonNull { + self.value + } +} + +/// Can cast into a more general lifeless reference. +/// The way back is possibly unsafe (e.g., slice to array), but is OK to do immediately before a +/// reunite. +pub trait LifelessCast: Sized { + /// (safely) cast to a T + fn cast(self) -> T; + /// (possibly unsafely) cast back to the original LifelessRef type + /// This should only be used immediately before trying to reunite with a divorced type + unsafe fn cast_back(lifeless: T) -> Self { + Self::try_cast_back(lifeless).unwrap() + } + /// Non-panicking version of cast_back. Returns None if cast would be illegal. + unsafe fn try_cast_back(lifeless: T) -> Option; +} + +// Allow downgrading a LifelessRefMut into a LifelessRef +// Possibly also apply another cast at the same time. +impl LifelessCast> for LifelessRefMut +where + LifelessRef: LifelessCast>, +{ + fn cast(self) -> LifelessRef { + self.downgrade().cast() + } + + unsafe fn try_cast_back(lifeless: LifelessRef) -> Option { + Some(LifelessRef::::try_cast_back(lifeless)?.upgrade()) + } +} + +// Allow upgrading a LifelessRef into a LifelessRefMut if the reference is to a cell slice +// Possibly make another cast first. +// This is not valid for normal rust, e.g.: &'a mut [u8] =/= &'a [Cell] because although +// they both represent a variable length span of mutable bytes, the LHS cannot alias with any +// other reference in the rust type system, and the RHS can. +// For hardware, they are the same because hardware has no rules on aliasing. +impl LifelessCast> for LifelessRef +where + LifelessRef: LifelessCast]>>, +{ + fn cast(self) -> LifelessRefMut<[T]> { + // Cast first to LifelessRef<[Cell]> + let cast: LifelessRef<[Cell]> = self.cast(); + // Then to LifelessRefMut<[T]> + LifelessRefMut { + value: NonNull::slice_from_raw_parts(cast.value.cast(), cast.value.len()), + } + } + + unsafe fn try_cast_back(lifeless: LifelessRefMut<[T]>) -> Option { + let lr = LifelessRef { + value: NonNull::slice_from_raw_parts(lifeless.value.cast(), lifeless.value.len()), + }; + LifelessRef::::try_cast_back(lr) + } +} + +// Implement cast for LifelessRef +impl + ?Sized> LifelessCast> + for LifelessRef +{ + fn cast(self) -> LifelessRef { + LifelessRef { + value: From::cast_either(self.value), + } + } + + unsafe fn try_cast_back(lifeless: LifelessRef) -> Option { + Some(LifelessRef { + value: From::try_cast_back_either(lifeless.value)?, + }) + } +} +// And again for mut +impl + ?Sized> LifelessCast> + for LifelessRefMut +{ + fn cast(self) -> LifelessRefMut { + LifelessRefMut { + value: From::cast_either(self.value), + } + } + + unsafe fn try_cast_back(lifeless: LifelessRefMut) -> Option { + Some(LifelessRefMut { + value: From::try_cast_back_either(lifeless.value)?, + }) + } +} + +/// Implement once for use by different wrappers LifelessRef and LifelessRefMut +pub trait LifelessCastEither { + fn cast_either(value: NonNull) -> NonNull; + fn cast_back_either(value: NonNull) -> NonNull; + fn try_cast_back_either(value: NonNull) -> Option> { + Some(Self::cast_back_either(value)) + } +} + +// Allow identity cast +impl LifelessCastEither for T { + fn cast_either(value: NonNull) -> NonNull { + value + } + + fn cast_back_either(value: NonNull) -> NonNull { + value + } +} + +// Allow casting array to slice +impl LifelessCastEither<[T]> for [T; N] { + fn cast_either(value: NonNull) -> NonNull<[T]> { + NonNull::slice_from_raw_parts(value.cast(), N) + } + + fn cast_back_either(value: NonNull<[T]>) -> NonNull { + assert_eq!(value.len(), N); + value.cast() + } + + fn try_cast_back_either(value: NonNull<[T]>) -> Option> { + if value.len() != N { + None + } else { + Some(value.cast()) + } + } +} + +/// Allow discarding the `Cell` in `[Cell]`. Hardware does not understand any semantics of interior +/// mutability. +impl LifelessCastEither<[T]> for [Cell] { + fn cast_either(value: NonNull) -> NonNull<[T]> { + NonNull::slice_from_raw_parts(value.cast(), value.len()) + } + + fn cast_back_either(value: NonNull<[T]>) -> NonNull { + NonNull::slice_from_raw_parts(value.cast(), value.len()) + } +} + +impl LifelessRefMut { + /// Downgrade a `LifelessRefMut` into `LifelessRef` + pub fn downgrade(self) -> LifelessRef { + LifelessRef { value: self.value } + } +} + +impl LifelessRef { + /// Upgrading is unsafe if not called on a value that was originally a LifelessRefMut + pub unsafe fn upgrade(self) -> LifelessRefMut { + LifelessRefMut { value: self.value } + } +} + +/// Implement divorcing lifeless refs from normal references + +impl DivorceableIntoFrom> for &T { + fn into_divorced(self) -> NonNull { + self.into() + } + fn from_divorced(d: NonNull) -> Self { + // Will not be NULL as it was constructed from as_ptr + // into_divorced also consumed the reference, so the borrow checker + // will not allow there to be any mutable references + unsafe { d.as_ref() } + } +} + +impl Divorceable> for &T { + type Remainder = NonNull; + type DivorceT = Divorced>; + + fn divorceable_subset(&self) -> LifelessRef { + LifelessRef { + value: (*self).into(), + } + } + fn matches(divorced: &Divorced>, subset: &LifelessRef) -> bool { + subset.value == divorced.value + } +} + +/// A helper trait for anything that is `Divorceable`, +/// where `T` is a `LifelessRef` or `LifelessRefMut` +pub trait DivorceLifeless: Divorceable { + type InnerT: ?Sized; + type Lifeless: LifelessRefTraits; +} + +impl DivorceLifeless for &T { + type InnerT = T; + type Lifeless = LifelessRef; +} + +impl DivorceableIntoFrom> for &mut T { + fn into_divorced(self) -> NonNull { + self.into() + } + fn from_divorced(mut d: NonNull) -> Self { + // Will not be NULL as it was constructed from as_ptr + // into_divorced also consumed the reference, so the borrow checker + // will not allow there to be any mutable references + unsafe { d.as_mut() } + } +} + +impl Divorceable> for &mut T { + type Remainder = NonNull; + type DivorceT = Divorced>; + + fn divorceable_subset(&self) -> LifelessRefMut { + LifelessRefMut { + value: (*self as &T).into(), + } + } + fn matches(divorced: &Divorced>, subset: &LifelessRefMut) -> bool { + subset.value == divorced.value + } +} + +impl DivorceLifeless for &mut T { + type InnerT = T; + type Lifeless = LifelessRefMut; +} + +/// Implement divorcing lifeless refs from Ref<> + +impl<'a, T: 'a + ?Sized> Divorceable> for Ref<'a, T> { + type Remainder = Ref<'a, T>; + type DivorceT = Divorced>; + + fn divorceable_subset(&self) -> LifelessRef { + LifelessRef { + value: (&**self).into(), + } + } + fn matches(divorced: &Divorced>, subset: &LifelessRef) -> bool { + subset.value == divorced.value.divorceable_subset().value + } +} + +impl<'a, T: 'a + ?Sized> DivorceLifeless for Ref<'a, T> { + type InnerT = T; + type Lifeless = LifelessRef; +} + +impl<'a, T: 'a + ?Sized> Divorceable> for RefMut<'a, T> { + type Remainder = RefMut<'a, T>; + type DivorceT = Divorced>; + + fn divorceable_subset(&self) -> LifelessRefMut { + LifelessRefMut { + value: (&**self).into(), + } + } + fn matches(divorced: &Divorced>, subset: &LifelessRefMut) -> bool { + subset.value == divorced.value.divorceable_subset().value + } +} + +impl<'a, T: 'a + ?Sized> DivorceLifeless for RefMut<'a, T> { + type InnerT = T; + type Lifeless = LifelessRefMut; +} + +#[cfg(test)] +mod tests { + use crate::divorce::{Divorceable, LifelessRef, Reunitable}; + use core::cell::RefCell; + + fn test_ptr(l: LifelessRef, compare: i32) -> LifelessRef { + let as_ref = unsafe { l.value.as_ref() }; + assert_eq!(*as_ref, compare); + l + } + + #[test] + fn test_normal_ref() { + let val: i32 = 77; + let r = &val; + let (remain, lifeless) = r.divorce(); + let lifeless = test_ptr(lifeless, 77); + remain.reunite(lifeless); + } + + #[test] + #[should_panic] + fn test_normal_forget_reunite() { + let val: i32 = 77; + let r = &val; + let (_remain, _lifeless) = r.divorce(); + } + + #[test] + fn test_refcell() { + let val_cell = RefCell::new(77); + let r = val_cell.borrow(); + let (remain, lifeless) = r.divorce(); + let lifeless = test_ptr(lifeless, 77); + remain.reunite(lifeless); + } +} diff --git a/libraries/misc/src/lib.rs b/libraries/misc/src/lib.rs new file mode 100644 index 000000000..899a472d5 --- /dev/null +++ b/libraries/misc/src/lib.rs @@ -0,0 +1,18 @@ +#![feature(nonnull_slice_from_raw_parts)] +#![feature(slice_ptr_len)] +#![feature(slice_ptr_get)] +#![feature(const_trait_impl)] +#![crate_type = "rlib"] +#![no_std] + +pub mod const_env; +pub mod default_array; +pub mod divorce; +pub mod misc_macros; +pub mod never; +pub mod overload_impl; +pub mod potatoes; +pub mod take_borrow; +pub mod tpanic; +pub mod trait_alias; +pub mod unsigned_allocators; diff --git a/libraries/misc/src/misc_macros.rs b/libraries/misc/src/misc_macros.rs new file mode 100644 index 000000000..e353edf08 --- /dev/null +++ b/libraries/misc/src/misc_macros.rs @@ -0,0 +1,12 @@ +///! Contains lots of small macros that don't really belong anywhere else + +/// This macro is horrendously unsafe and intended just for testing +/// When you call this, you are making the declaration that nothing derived from this +/// reference will ever be shared between threads. +/// For testing, if you have no global state apart from that within thread_local!, this is true. +#[macro_export] +macro_rules! leak_thread_local { + ($e : expr) => { + ($e).with(|re| core::ptr::NonNull::from(re)).as_ref() + }; +} diff --git a/libraries/misc/src/never.rs b/libraries/misc/src/never.rs new file mode 100644 index 000000000..81f6868d9 --- /dev/null +++ b/libraries/misc/src/never.rs @@ -0,0 +1,8 @@ +/// A custom never type because ! is not stable +#[derive(Copy, Clone)] +pub enum Never {} +impl Default for Never { + fn default() -> Self { + panic!() + } +} diff --git a/libraries/misc/src/overload_impl.rs b/libraries/misc/src/overload_impl.rs new file mode 100644 index 000000000..3b227b5c6 --- /dev/null +++ b/libraries/misc/src/overload_impl.rs @@ -0,0 +1,59 @@ +//! A helper type to allow statically overloading traits or implement foreign traits for foregin +//! types. + +/// Create an overload for base, called overloaded. +/// References to one can be treated as references to the other. +/// You cannot rely on the overload having any constraints other than those imposed by +/// the type it wraps. +/// Usage: +/// ``` +/// #![feature(const_mut_refs)] +/// trait ForeignTrait{}; +/// struct ForeignType; +/// // ... +/// use misc::overload_impl; +/// overload_impl!(MyWrapper); +/// impl ForeignTrait for MyWrapper { +/// } +/// ``` +/// The wrapped type is accessible via the public inner field. +/// Transmute to the wrapper using `MyWrapper::get(&unwrapped)` +/// If you need to constrain a type with phantom data, there is second +/// parameter to every type created for that purpose: +/// e.g.: `MyWrapper. +#[macro_export] +macro_rules! overload_impl { + ($overloaded : ident) => { + #[repr(transparent)] + pub struct $overloaded { + pub inner: T, + _p: core::marker::PhantomData

, + } + + // Safety: repr transparent around a struct with a single field will have exactly the same + // layout and alignment requirements as the wrapped type. + impl $overloaded { + #[inline] + pub const fn get(inner: &T) -> &Self { + unsafe { core::mem::transmute(inner) } + } + #[inline] + pub const fn get_mut(inner: &mut T) -> &mut Self { + unsafe { core::mem::transmute(inner) } + } + } + + impl core::ops::Deref for $overloaded { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.inner + } + } + + impl core::ops::DerefMut for $overloaded { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } + } + }; +} diff --git a/libraries/misc/src/potatoes.rs b/libraries/misc/src/potatoes.rs new file mode 100644 index 000000000..4231eac62 --- /dev/null +++ b/libraries/misc/src/potatoes.rs @@ -0,0 +1,90 @@ +//! Type markers for linear-types +//! Types that contain these markers should not be copied or dropped (apart from a set of methods +//! that officially consume them +//! Note that because Drop is never guaranteed to be called you should only rely on this for +//! safety if leaking the containing type is also safe. + +use crate::tpanic; + +#[derive(Debug)] +#[must_use] +pub struct HotPotato { + #[cfg(feature = "track_potatoes")] + created_at: &'static core::panic::Location<'static>, +} + +/// Really just a marker to try make a type linear. +/// Sad that Rust does not have support for this statically even though this is +/// completely checked at compile time. +/// Note, currently, HotPotato can catch errors, NOT enforce safety. +/// We cannot stop users from manually dropping getting rid of the with std::mem::forget, +/// or do the same to containing types. +impl Drop for HotPotato { + #[track_caller] + #[inline(always)] + fn drop(&mut self) { + #[cfg(feature = "track_potatoes")] + { + tpanic!( + "\nCreated here: {}\nDropping this is likely an error.\n", + self.created_at + ); + } + #[cfg(not(feature = "track_potatoes"))] + { + tpanic!("Dropping this is likely an error"); + } + } +} + +impl HotPotato { + #[inline(always)] + pub fn consume(self) { + core::mem::forget(self); + } + + #[track_caller] + pub fn new() -> HotPotato { + HotPotato { + #[cfg(feature = "track_potatoes")] + created_at: core::panic::Location::caller(), + } + } +} + +/// Debug version of HotPotato. Does not panic in release. +#[derive(Debug)] +#[must_use] +pub struct DebugPotato { + #[cfg(feature = "track_potatoes")] + created_at: &'static core::panic::Location<'static>, +} + +impl Drop for DebugPotato { + #[track_caller] + #[inline(always)] + fn drop(&mut self) { + #[cfg(feature = "track_potatoes")] + { + tpanic!( + "\nCreated here: {}\nDropping this is likely an error.\n", + self.created_at + ); + } + } +} + +impl DebugPotato { + #[inline(always)] + pub fn consume(self) { + core::mem::forget(self); + } + + #[track_caller] + pub fn new() -> Self { + Self { + #[cfg(feature = "track_potatoes")] + created_at: core::panic::Location::caller(), + } + } +} diff --git a/libraries/misc/src/take_borrow.rs b/libraries/misc/src/take_borrow.rs new file mode 100644 index 000000000..f81e23104 --- /dev/null +++ b/libraries/misc/src/take_borrow.rs @@ -0,0 +1,128 @@ +//! Take borrow helper. +//! Allows borrowing from containers that have a take interface by taking the value, borrowing the +//! moved value, then moving it back after the borrow ends. + +use core::cell::Cell; +use core::mem::take; +use core::ops::{Deref, DerefMut}; + +/// It gets tedious to call take, mutate a value, then set on Cells. This allows a single method +/// call to quickly get a mutable reference to Cell contents that needs putting back after. +/// e.g.: +/// ``` +/// use core::cell::Cell; +/// use misc::take_borrow::TakeBorrow; +/// pub fn is_some(arg : &Cell>) -> bool { +/// arg.take_borrow().is_some() +/// } +/// // Instead of +/// pub fn is_some_but_annoying_to_write(arg : &Cell>) -> bool { +/// let tmp = arg.take(); +/// let result = tmp.is_some(); +/// arg.set(tmp); +/// result +/// } +/// ``` +/// Care should be taken not to access the cell again for the lifetime of a CellTakeBorrow. +/// Although doing so is not unsafe, the Cell will be found empty, and will be over-written +/// later. This is no different from accidentally accessing a Cell that has just has "take" called +/// on it. +pub struct CellTakeBorrow<'a, T: Default> { + cell_ref: &'a Cell, + val: T, +} + +/// The trait that offers the take_borrow method, which borrows by first taking a value and +/// borrowing that. The value is automatically put back afterwards. +pub trait TakeBorrow { + type Output<'a> + where + Self: 'a; + /// Take the value out of self, borrow that, and set it back again when that borrow ends + fn take_borrow(&self) -> Self::Output<'_>; +} + +impl<'a, T: Default> Drop for CellTakeBorrow<'a, T> { + #[inline(always)] + fn drop(&mut self) { + self.cell_ref.set(take(&mut self.val)); + } +} + +impl TakeBorrow for Cell { + type Output<'a> = CellTakeBorrow<'a, T> where Self: 'a,; + + #[inline(always)] + fn take_borrow(&self) -> Self::Output<'_> { + CellTakeBorrow { + cell_ref: self, + val: self.take(), + } + } +} + +impl<'a, T: Default> Deref for CellTakeBorrow<'a, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.val + } +} + +impl<'a, T: Default> DerefMut for CellTakeBorrow<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.val + } +} + +#[cfg(test)] +mod tests { + use crate::take_borrow::TakeBorrow; + use core::cell::Cell; + + #[derive(Copy, Clone, Default)] + struct SimpleThing { + val: u32, + } + + impl SimpleThing { + fn increment(&mut self) { + self.val += 1; + } + } + + // Test the expected use of take_borrow + #[test] + fn simple_test() { + let cell = Cell::::new(SimpleThing { val: 123 }); + let r1 = &cell; + let r2 = &cell; + r1.take_borrow().increment(); + assert_eq!(cell.get().val, 124); + r2.take_borrow().increment(); + assert_eq!(cell.get().val, 125); + r1.take_borrow().increment(); + assert_eq!(cell.take().val, 126); + } + + // Test clobbering works as expected + #[test] + fn clobber_test() { + let cell = Cell::::new(SimpleThing { val: 123 }); + + let mut borrow = cell.take_borrow(); + + // While borrowed, value should go back to its default value + assert_eq!(cell.get().val, 0); + cell.set(SimpleThing { val: 13 }); + assert_eq!(cell.get().val, 13); + + borrow.increment(); + assert_eq!(cell.get().val, 13); + + drop(borrow); + assert_eq!(cell.get().val, 124); + } +} diff --git a/libraries/misc/src/tpanic.rs b/libraries/misc/src/tpanic.rs new file mode 100644 index 000000000..2a2a78800 --- /dev/null +++ b/libraries/misc/src/tpanic.rs @@ -0,0 +1,24 @@ +/// A panic that interacts better with testing. +/// If we are running unit tests and already panicking, this prints the message but does not panic. +/// Otherwise, it will panic as normal. +/// This behaviour can be configured when testing other creates using the global_test feature. +#[macro_export] +macro_rules! tpanic { + ($($t : tt)*) => { + { + #[cfg(any(test, feature = "global_test"))] + { + extern crate std; + if std::thread::panicking() { + std::println!($($t)*); + } else { + panic!($($t)*); + } + } + #[cfg(not(any(test, feature = "global_test")))] + { + panic!($($t)*); + } + } + }; +} diff --git a/libraries/misc/src/trait_alias.rs b/libraries/misc/src/trait_alias.rs new file mode 100644 index 000000000..6a2b22931 --- /dev/null +++ b/libraries/misc/src/trait_alias.rs @@ -0,0 +1,144 @@ +/// Macro to create an alias for a number of traits, with automatically propagated bounds on their +/// associated types. Works with generics, const generics, does not (yet) work for GATS. +/// +/// Usage: +/// ```text +/// misc::trait_alias! { +/// [vis] trait TheTrait = SuperTrait1, SuperTrait2, SuperTrait3, ..., +/// as where (SuperTraitX:::AssociatedType as NewName : Bound1 | Bound2)* +/// where [other bounds] +/// ``` +/// Either of the "as where" or "where" clauses can be skipped. +/// Don't put bounds in the definition of TheTrait, put them in the [other bounds]. +/// +/// If you have a bound T : TheTrait, all the "as where" bounds are automatically propagated +/// +/// Also, put any ?Trait bounds on associated types AT THE END. Parsing is hard. +/// +/// Note the triple ":::" rather than "::" in as the _last_ path separator in the "as where" block. +/// +/// Note a slightly different syntax is required for parameters of TheTrait. Instead of: +/// TheTrait<'a, 'b, const X : u8, T, const Y : bool> +/// write +/// TheTrait<'a, 'b, T ; const X : u8, const Y : bool> +/// +/// The rules are: lifetimes first, followed by types, followed by a semicolon (not a comma), +/// followed by the consts. +/// +/// Example, a trait for anything that be converted to and from a u8: +/// ``` +/// misc::trait_alias!( +/// pub trait ConvertU8 = Into, From +/// ); +/// ``` +/// +/// Example, a trait for any iterator where the `Item : Into`: +/// ``` +/// misc::trait_alias!( +/// pub trait IntoXIterator = Iterator as where Iterator:::Item as IntoItem : Into +/// ); +/// ``` +/// +#[macro_export] +macro_rules! trait_alias { + // Do the work + ($(#[$attr:meta])* $vis:vis trait $TraitAlias : ident $(<$($t:tt),* $(; $(const $n : tt : $ty : tt),*)?>)? = $Super1 : path $(, $Super : path)* + $(as where $($SP : ident $(:: $SPS : ident)* $(<$($($SLife : lifetime)? $($SIdent : path)?),*>)? ::: $AT : ident as $NAT : ident : $ATB1 : path $(| $ATBs : path)* $(| ? $Sized: path)? ),*)? + $(where $($other : tt)*)? + ) => + ( + // First declare the trait with appropriate supertraits + $(#[$attr])* $vis trait $TraitAlias $(<$($t),* $(, $(const $n : $ty),*)?>)? : $Super1 $(+ $Super)* where + // Then add in bounds that bound the new traits associated types to be the same as the supertraits + $($(Self : $SP $(:: $SPS)* <$($($($SIdent)? $($SLife)?),* ,)? $AT = Self::$NAT>,)*)? $($($other)*)? { + // Then declare the new associated types that have the required bounds + $($(type $NAT : $ATB1 $(+ $ATBs)* $(+ ? $Sized)?;)*)? + } + // Then provide a blanket implementation + impl<$($($t),* $(, $(const $n : $ty),*)?,)? + TTT : ?Sized + $Super1 $(+ $Super)*> $TraitAlias $(<$($t),* $(, $($n),*)?>)? for TTT + where $($()?>::$AT : $ATB1 $(+ $ATBs)*,)* )? $($($other)*)? + { + // Set the new associated type to the supertrait's associated type + $($(type $NAT = )?>::$AT ;)*)? + } + ); +} + +#[cfg(test)] +mod tests { + + // This is how we might normally specify a bound. In this instance, we are saying that + // we are being passed some collection of type T that can be converted into an iterator + // where the items support trivial conversion to an i32. Such a bound is what would be + // required to sum a collection. + // + // This is already pretty complicated, but gets worse if we need dozens of traits. + // Any other item (e.g. helper function) that needs 'T also needs to copy and paste all of + // these bounds around. + fn sum_of, T: IntoIterator>(collection: T) -> i32 { + let mut sum: i32 = 0; + for val in collection { + sum += val.into(); + } + sum + } + + // Instead we can use the alias. This only requires stating the bounds once: + // "NumberIterator is an IntoIterator where the IntoIterator:::Item (call that NumItem) can + // be converted into an i32" + trait_alias!( + pub trait NumIterator = IntoIterator as where IntoIterator:::Item as NumItem : Into + ); + + // Now using that trait (NumIterator) results in much cleaner code + fn sum_of2(collection: T) -> i32 { + let mut sum: i32 = 0; + for val in collection { + sum += val.into(); + } + sum + } + + #[test] + fn test_normal() { + assert_eq!(sum_of([0u8, 1u8, 2u8, 3u8]), 6i32); + assert_eq!(sum_of([-1i32, 777i32, 0i32, -1000i32]), -224i32); + } + + // Note, we never said that [u8;4] or [i32;4] (the two types used) supported NumIterator. + // It was implemented automatically because they met the bounds. + + #[test] + fn test_with_alias() { + assert_eq!(sum_of2([0u8, 1u8, 2u8, 3u8]), 6i32); + assert_eq!(sum_of2([-1i32, 777i32, 0i32, -1000i32]), -224i32); + } + + /* + Note, the alias above will expand to: + + pub trait NumIterator: IntoIterator where Self : IntoIterator, { + type NumItem: Into; + } + impl NumIterator for TTT + where ::Item: Into, { + type NumItem = ::Item; + } + + The first item declares a new trait to be an alias. The second implements that trait for any + types that satisfy the bounds. Note the trick with the associated type NumItem. Only + super-trait bounds are implicitly propagated. + + One would naively try simply the NumIterator trait by writing + pub trait NumIterator: IntoIterator where + Self : IntoIterator, + ::Item : Into + + However, ::Item : Into does not count as a super-trait as it is + not a bound on Self. Self : IntoIterator is a bound on self, and then + the desired bound can be put (as a super-trait) on the associated type: + + type NumItem: Into; + */ +} diff --git a/libraries/misc/src/unsigned_allocators.rs b/libraries/misc/src/unsigned_allocators.rs new file mode 100644 index 000000000..afd305e48 --- /dev/null +++ b/libraries/misc/src/unsigned_allocators.rs @@ -0,0 +1,306 @@ +use core::cell::Cell; +use core::cmp::PartialOrd; +use core::mem; +use core::mem::MaybeUninit; +use core::ops::{AddAssign, Not}; + +/// An allocator for unsigned integers from [0,MAX_V) +/// Calling free on an integer not allocated is allowed to cause all future allocations to return +/// any value. +/// Usage: +/// +/// ``` +/// use crate::misc::unsigned_allocators::UnsignedAllocator; +/// let mut my_allocator = misc::unsigned_allocators::ArrayUnsignedAllocator::::default(); +/// let int1 : u8 = my_allocator.alloc().unwrap(); +/// let int2 : u8 = my_allocator.alloc().unwrap(); +/// let int3 : u8 = my_allocator.alloc().unwrap(); +/// // some_use(int1, int2, int3); +/// my_allocator.free(int1); +/// my_allocator.free(int2); +/// // some_use(int3); +/// my_allocator.free(int3); +/// ``` +/// +pub trait UnsignedAllocator { + fn alloc(&mut self) -> Option; + fn free(&mut self, val: T); +} + +/// An allocator over unsigned integers [0,MAX_V). T AND Ts must be able to hold MAX_V +/// For example. Valid: +/// ``` +/// use misc::unsigned_allocators::ArrayUnsignedAllocator; +/// type A = ArrayUnsignedAllocator; +/// type B = ArrayUnsignedAllocator; +/// type C = ArrayUnsignedAllocator; +/// ``` +/// Invalid: +/// ```should_fail +/// ArrayUnsignedAllocator; +/// ``` +pub struct ArrayUnsignedAllocator { + next_free: Ts, + vals: [MaybeUninit; MAX_V], +} + +/// This trait is meant to provide a generic interface for the 'as' keyword for integers. +/// +/// Equivalent: +/// ``` +/// use misc::unsigned_allocators::As; +/// type C = u16; +/// let b : u8 = 0; +/// +/// let a = b as C; +/// let a = C::from_as(b); +/// let a : C = b.into_as(); +/// ``` +/// +/// `Into`/`From` is not provided for integer types that do not fit into each other. +/// e.g., there is no `Into` for `i8`, etc. +/// Lossy conversion is supplied via the "as" keyword, which does not work for generics as it is not +/// also a trait for a reason beyond me. +/// I am sure some of this would be in the numbers crate, but we don't pull that in. +#[const_trait] +pub trait As { + fn into_as(self) -> Target; + fn from_as(from: Target) -> Self; +} + +// Auto generate all 100 combinations. +macro_rules! cast_to_from { + ($to : ty, $from : ty) => { + impl const As<$to> for $from { + fn into_as(self) -> $to { + self as $to + } + fn from_as(from: $to) -> Self { + from as Self + } + } + }; +} +macro_rules! cast_to_all { + ($from: ty) => { + cast_to_from!(u8, $from); + cast_to_from!(i8, $from); + cast_to_from!(u16, $from); + cast_to_from!(i16, $from); + cast_to_from!(u32, $from); + cast_to_from!(i32, $from); + cast_to_from!(u64, $from); + cast_to_from!(i64, $from); + cast_to_from!(usize, $from); + cast_to_from!(isize, $from); + }; +} +cast_to_all!(u8); +cast_to_all!(i8); +cast_to_all!(u16); +cast_to_all!(i16); +cast_to_all!(u32); +cast_to_all!(i32); +cast_to_all!(u64); +cast_to_all!(i64); +cast_to_all!(usize); +cast_to_all!(isize); + +/// Explanation of this implementation: +/// This allocator has two modes: +/// Mode one: a simple counter that counts from free numbers. The counter is stored as itself +/// in 'next_free' +/// Mode two: an intrusive free-chain through the array. Links are stored as _complement_ of the +/// index in order to not confuse with the counter. +impl< + Ts: PartialOrd + Not + AddAssign + As + Copy + From<::Output>, + T: As + As + Copy, + const MAX_V: usize, + > UnsignedAllocator for ArrayUnsignedAllocator +{ + fn alloc(&mut self) -> Option { + // The first branch path is for when the next free item is the head of a linked list + // through the array. + if self.next_free < Ts::from_as(0usize) { + // Values < 0 are complements of the index of the next free item, so complement again + let res: T = T::from_as(Ts::from(!self.next_free)); + // Read next free item from list + // Safety: the only time we store an index is when it was freed, which would + // initialise this element in the array. + unsafe { + self.next_free = self.vals[>::into_as(res)].assume_init(); + } + // Return result + Some(res) + } else { + // Values >= 0 are a counter over free items + if self.next_free == Ts::from_as(MAX_V) { + // If the counter has hit the max value, we can allocate no more + None + } else { + // Otherwise increment and return + let res = T::from_as(self.next_free); + self.next_free += Ts::from_as(1); + Some(res) + } + } + } + + fn free(&mut self, val: T) { + let ndx: usize = >::into_as(val); + // Add the current next free into the free list + self.vals[ndx].write(self.next_free); + // Store the (negative) of the value we just freed as the head of the free list + self.next_free = >::from_as(!ndx); + } +} + +impl, const MAX_V: usize> const Default for ArrayUnsignedAllocator { + fn default() -> Self { + ArrayUnsignedAllocator { + next_free: Ts::from_as(0u8), + // Vals are MaybeUninit which do not require initialization + vals: unsafe { mem::MaybeUninit::uninit().assume_init() }, + } + } +} + +/// Stores state as a bitfield +pub struct BitfieldAllocator { + // 0 means used, 1 means free. + val: Cell, +} + +impl BitfieldAllocator { + #[inline] + pub const fn new() -> Self { + let bits = mem::size_of::() * 8; + assert!(N <= bits); + Self { + // N set ones in the low bits + val: Cell::new( + // Handle shift by bits of usize as special case + if N == bits { !0 } else { (1 << N) - 1 }, + ), + } + } + + pub fn alloc(&self) -> Option { + let v = self.val.get(); + + if v == 0 { + return None; + } + + let result = v.trailing_zeros(); + + // Clear the result bit to allocate it + self.val.set(v ^ (1usize << result as usize)); + + Some(result as u8) + } + + pub fn free(&self, val: u8) { + // Set the val'th bit to free it. + self.val.set(self.val.get() | (1usize << val as usize)); + } +} + +#[cfg(test)] +mod tests { + use crate::unsigned_allocators::{ + ArrayUnsignedAllocator, BitfieldAllocator, UnsignedAllocator, + }; + + const N: usize = 7; + + fn alloc_all(alloc: &BitfieldAllocator, used: &mut [bool; N]) { + for _ in 0..N { + let result = alloc.alloc(); + assert!(result.is_some()); + let result = result.unwrap(); + assert!(!used[result as usize]); + used[result as usize] = true; + } + } + + #[test] + fn bitfield_max() { + let alloc = BitfieldAllocator::::new(); + + let mut used: [bool; N] = [false; N]; + + alloc_all(&alloc, &mut used); + + // Now we allocated max items we should get none + assert!(alloc.alloc().is_none()) + } + + #[test] + fn bitfield_reuse() { + const N: usize = 7; + let alloc = BitfieldAllocator::::new(); + + let mut used: [bool; N] = [false; N]; + + // Allocate and de-allocate a few times + for _ in 0..2 { + // Allocate all + alloc_all(&alloc, &mut used); + + // Free all + for i in 0..N { + alloc.free(i as u8); + used[i] = false; + } + } + } + + #[test] + fn small_test() { + let mut my_alloc: ArrayUnsignedAllocator = Default::default(); + + // Try allocating all the numbers + for i in 0u8..100u8 { + assert_eq!(my_alloc.alloc(), Some(i)) + } + + // No more should be possible + assert_eq!(UnsignedAllocator::::alloc(&mut my_alloc), None); + + // Free some up + my_alloc.free(1u8); + my_alloc.free(7u8); + my_alloc.free(16u8); + + // Allocate them again + assert_eq!(my_alloc.alloc(), Some(16u8)); + assert_eq!(my_alloc.alloc(), Some(7u8)); + assert_eq!(my_alloc.alloc(), Some(1u8)); + } + + #[test] + fn larger_test() { + let mut my_alloc: ArrayUnsignedAllocator = Default::default(); + + // Try allocating some the numbers + for i in 0u16..100u16 { + assert_eq!(my_alloc.alloc(), Some(i)) + } + + // Free the even ones + for i in 0u16..50u16 { + my_alloc.free(2 * i); + } + + // Allocate them again + for i in 0u16..50u16 { + assert_eq!(my_alloc.alloc(), Some(98 - (2 * i))); + } + + // Go back to allocating in order + for i in 100u16..1000u16 { + assert_eq!(my_alloc.alloc(), Some(i)); + } + } +} diff --git a/libraries/riscv-csr/src/csr.rs b/libraries/riscv-csr/src/csr.rs index 45cf69515..9968ca0d9 100644 --- a/libraries/riscv-csr/src/csr.rs +++ b/libraries/riscv-csr/src/csr.rs @@ -103,6 +103,10 @@ pub const PMPADDR61: usize = 0x3ED; pub const PMPADDR62: usize = 0x3EE; pub const PMPADDR63: usize = 0x3EF; +pub const HMPCOUNTER_BASE: usize = 0xC00; + +pub const SATP: usize = 0x180; + /// Read/Write registers. #[derive(Copy, Clone)] pub struct ReadWriteRiscvCsr { diff --git a/libraries/tock-cells/src/lib.rs b/libraries/tock-cells/src/lib.rs index 31e3b8f8c..5d91cd12c 100644 --- a/libraries/tock-cells/src/lib.rs +++ b/libraries/tock-cells/src/lib.rs @@ -1,6 +1,7 @@ //! Tock Cell types. #![no_std] +#![feature(const_mut_refs)] pub mod map_cell; pub mod numeric_cell_ext; diff --git a/libraries/tock-cells/src/map_cell.rs b/libraries/tock-cells/src/map_cell.rs index 85a65dd7e..b6105ebe9 100644 --- a/libraries/tock-cells/src/map_cell.rs +++ b/libraries/tock-cells/src/map_cell.rs @@ -1,8 +1,67 @@ //! Tock specific `MapCell` type for sharing references. +use self::MapCellErr::AlreadyBorrowed; +use self::MapCellState::{Init, InitBorrowed, Uninit}; use core::cell::{Cell, UnsafeCell}; use core::mem::MaybeUninit; +use core::ops::{Deref, DerefMut}; use core::ptr; +use core::ptr::drop_in_place; + +#[derive(Clone, Copy, PartialEq)] +enum MapCellState { + Uninit, + Init, + InitBorrowed, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum MapCellErr { + AlreadyBorrowed, + Uninit, +} + +/// Smart pointer to a T that will automatically set the MapCell back to the init state. +/// You probably want to Deref this immediately. +pub struct MapCellRef<'a, T> { + map_cell: &'a MapCell, +} + +impl<'a, T> Deref for MapCellRef<'a, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + unsafe { + // Safety: There will only ever be one MapCellRef to a MapCell as we only allow their + // construction when the cell is in the 'Init' state, and move the MapCell to the + // 'InitBorrowed' state for the duration of the existence of this type. + let valref = &*self.map_cell.val.get(); + // Safety: when this MapCellRef was constructed, we checked that the MapCell was in the + // Init state. + valref.assume_init_ref() + } + } +} + +impl<'a, T> DerefMut for MapCellRef<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { + // Safety: Same as deref, but because deref_mut requires borrowing self as mut, there + // will also be no immutable references to the data. + let valref = &mut *self.map_cell.val.get(); + valref.assume_init_mut() + } + } +} + +impl<'a, T> Drop for MapCellRef<'a, T> { + #[inline] + fn drop(&mut self) { + self.map_cell.occupied.set(Init) + } +} /// A mutable memory location that enforces borrow rules at runtime without /// possible panics. @@ -17,40 +76,60 @@ pub struct MapCell { // `.occupied` before calling `.val.get()` or `.val.assume_init()`. See // [mem::MaybeUninit](https://doc.rust-lang.org/core/mem/union.MaybeUninit.html). val: UnsafeCell>, - occupied: Cell, + occupied: Cell, +} + +impl Drop for MapCell { + #[inline(always)] + fn drop(&mut self) { + let state = self.occupied.get(); + debug_assert!(state != InitBorrowed); + if state == Init { + unsafe { + // Safety: state being Init means that the MaybeUninit data was initted. + // the pointer to the data can never be used again as this + drop_in_place(self.val.get_mut().as_mut_ptr()) + } + } + } } impl MapCell { /// Creates an empty `MapCell` + #[inline(always)] pub const fn empty() -> MapCell { MapCell { val: UnsafeCell::new(MaybeUninit::uninit()), - occupied: Cell::new(false), + occupied: Cell::new(Uninit), } } /// Creates a new `MapCell` containing `value` + #[inline(always)] pub const fn new(value: T) -> MapCell { MapCell { val: UnsafeCell::new(MaybeUninit::::new(value)), - occupied: Cell::new(true), + occupied: Cell::new(Init), } } /// Returns a boolean which indicates if the MapCell is unoccupied. + #[inline(always)] pub fn is_none(&self) -> bool { !self.is_some() } - /// Returns a boolean which indicates if the MapCell is occupied. + /// Returns a boolean which indicates if the MapCell is occupied (regardless of whether it is + /// borrowed or not). + #[inline(always)] pub fn is_some(&self) -> bool { - self.occupied.get() + self.occupied.get() != Uninit } /// Takes the value out of the `MapCell` leaving it empty. If /// the value has already been taken elsewhere (and not `replace`ed), the /// returned `Option` will be `None`. - /// + /// If the value is currently borrowed, also returns None. /// # Examples /// /// ``` @@ -65,13 +144,15 @@ impl MapCell { /// assert_eq!(y.take(), None); /// ``` pub fn take(&self) -> Option { - if self.is_none() { + if self.occupied.get() != Init { None } else { - self.occupied.set(false); + self.occupied.set(Uninit); unsafe { + // SAFETY: not in InitBorrowed state, so we are not leaving a dangling reference let result: MaybeUninit = ptr::replace(self.val.get(), MaybeUninit::::uninit()); + // SAFETY: The Init state means that the MaybeUninit is init // `result` is _initialized_ and now `self.val` is now a new uninitialized value Some(result.assume_init()) } @@ -80,24 +161,66 @@ impl MapCell { /// Puts a value into the `MapCell`. pub fn put(&self, val: T) { - self.occupied.set(true); - unsafe { - ptr::write(self.val.get(), MaybeUninit::::new(val)); - } + // This will ensure the value as dropped + self.replace(val); } /// Replaces the contents of the `MapCell` with `val`. If the cell was not /// empty, the previous value is returned, otherwise `None` is returned. - pub fn replace(&self, val: T) -> Option { - if self.is_none() { - self.put(val); - None - } else { - unsafe { + /// In the event the cell is currently borrowed, returns Err(AlreadyBorrowed) + pub fn try_replace(&self, val: T) -> Result, MapCellErr> { + match self.occupied.get() { + Uninit => { + unsafe { + // Safety: Because we are in the Uninit state, we are not writing over anything + // that needs to be dropped + ptr::write(self.val.get(), MaybeUninit::::new(val)); + self.occupied.set(Init) + } + Ok(None) + } + Init => unsafe { + // Safety: Because we are not in the InitBorrowed state, nothing is currently also + // referencing this value let result: MaybeUninit = ptr::replace(self.val.get(), MaybeUninit::new(val)); // `result` is _initialized_ and now `self.val` is now a new uninitialized value - Some(result.assume_init()) + Ok(Some(result.assume_init())) + }, + InitBorrowed => Err(AlreadyBorrowed), + } + } + + /// Same as try_replace but panics if the cell is already borrowed + pub fn replace(&self, val: T) -> Option { + self.try_replace(val).unwrap() + } + + /// Try borrow a mutable reference to the data contained in this cell + /// The type wrapped in the Option is a smart pointer to a T + /// Using this method rather than the callback based logic below will result in considerably + /// less code noise due to nested callbacks. + /// # Examples + /// + /// ``` + /// extern crate tock_cells; + /// use tock_cells::map_cell::MapCell; + /// let cell = MapCell::new(1234); + /// let x = &cell; + /// let y = &cell; + /// + /// *x.try_borrow_mut().unwrap() += 1; + /// + /// assert_eq!(y.take(), Some(1235)); + /// ``` + #[inline(always)] + pub fn try_borrow_mut(&self) -> Result, MapCellErr> { + match self.occupied.get() { + Init => { + self.occupied.set(InitBorrowed); + Ok(MapCellRef { map_cell: self }) } + Uninit => Err(MapCellErr::Uninit), + InitBorrowed => Err(MapCellErr::AlreadyBorrowed), } } @@ -124,22 +247,15 @@ impl MapCell { /// // but potentially changed. /// assert_eq!(y.take(), Some(1235)); /// ``` + #[inline(always)] pub fn map(&self, closure: F) -> Option where F: FnOnce(&mut T) -> R, { - if self.is_some() { - self.occupied.set(false); - let valref = unsafe { &mut *self.val.get() }; - // TODO: change to valref.get_mut() once stabilized [#53491](https://github.com/rust-lang/rust/issues/53491) - let res = closure(unsafe { &mut *valref.as_mut_ptr() }); - self.occupied.set(true); - Some(res) - } else { - None - } + Some(closure(&mut *self.try_borrow_mut().ok()?)) } + #[inline(always)] pub fn map_or(&self, default: R, closure: F) -> R where F: FnOnce(&mut T) -> R, @@ -149,22 +265,15 @@ impl MapCell { /// Behaves the same as `map`, except the closure is allowed to return /// an `Option`. + #[inline(always)] pub fn and_then(&self, closure: F) -> Option where F: FnOnce(&mut T) -> Option, { - if self.is_some() { - self.occupied.set(false); - let valref = unsafe { &mut *self.val.get() }; - // TODO: change to valref.get_mut() once stabilized [#53491](https://github.com/rust-lang/rust/issues/53491) - let res = closure(unsafe { &mut *valref.as_mut_ptr() }); - self.occupied.set(true); - res - } else { - None - } + closure(&mut *self.try_borrow_mut().ok()?) } + #[inline(always)] pub fn modify_or_replace(&self, modify: F, mkval: G) where F: FnOnce(&mut T), @@ -175,3 +284,77 @@ impl MapCell { } } } + +#[cfg(test)] +mod tests { + use map_cell::{MapCell, MapCellErr}; + + struct DropCheck<'a> { + flag: &'a mut bool, + } + impl<'a> Drop for DropCheck<'a> { + fn drop(&mut self) { + *self.flag = true; + } + } + + #[test] + fn test_drop() { + let mut dropped = false; + { + let _a_cell = MapCell::new(DropCheck { flag: &mut dropped }); + } + assert!(dropped) + } + + #[test] + fn test_replace() { + let a_cell = MapCell::new(1); + let old = a_cell.replace(2); + assert_eq!(old, Some(1)); + assert_eq!(a_cell.take(), Some(2)); + assert_eq!(a_cell.take(), None); + } + + #[test] + fn test_try_replace() { + let a_cell = MapCell::new(1); + let borrow = a_cell.try_borrow_mut().unwrap(); + assert_eq!(a_cell.try_replace(2), Err(MapCellErr::AlreadyBorrowed)); + drop(borrow); + assert_eq!(a_cell.try_replace(1), Ok(Some(1))); + } + + #[test] + fn test_borrow() { + let a_cell = MapCell::new(1); + *a_cell.try_borrow_mut().unwrap() = 2; + { + let mut borrowed2 = a_cell.try_borrow_mut().unwrap(); + assert_eq!(*borrowed2, 2); + *borrowed2 = 3; + } + assert_eq!(a_cell.take(), Some(3)) + } + + #[test] + #[should_panic] + fn test_double_borrow() { + let a_cell = MapCell::new(1); + let mut borrowed = a_cell.try_borrow_mut().unwrap(); + let mut borrowed2 = a_cell.try_borrow_mut().unwrap(); + *borrowed2 = 2; + *borrowed = 3; + } + + #[test] + #[should_panic] + fn test_replace_in_borrow() { + let my_cell = MapCell::new(55); + my_cell.map(|_ref1: &mut i32| { + // Should fail + my_cell.put(56); + my_cell.map(|_ref2: &mut i32| {}) + }); + } +} diff --git a/libraries/tock-cells/src/optional_cell.rs b/libraries/tock-cells/src/optional_cell.rs index 8bc786a21..cc785f2ba 100644 --- a/libraries/tock-cells/src/optional_cell.rs +++ b/libraries/tock-cells/src/optional_cell.rs @@ -17,6 +17,13 @@ impl OptionalCell { } } + /// Create a new OptionalCell from an option + pub const fn new_option(val: Option) -> OptionalCell { + OptionalCell { + value: Cell::new(val), + } + } + /// Create an empty `OptionalCell` (contains just `None`). pub const fn empty() -> OptionalCell { OptionalCell { diff --git a/libraries/tock-cells/src/take_cell.rs b/libraries/tock-cells/src/take_cell.rs index 1b0f6435e..fc8d212b3 100644 --- a/libraries/tock-cells/src/take_cell.rs +++ b/libraries/tock-cells/src/take_cell.rs @@ -16,14 +16,14 @@ pub struct TakeCell<'a, T: 'a + ?Sized> { } impl<'a, T: ?Sized> TakeCell<'a, T> { - pub fn empty() -> TakeCell<'a, T> { + pub const fn empty() -> TakeCell<'a, T> { TakeCell { val: Cell::new(None), } } /// Creates a new `TakeCell` containing `value` - pub fn new(value: &'a mut T) -> TakeCell<'a, T> { + pub const fn new(value: &'a mut T) -> TakeCell<'a, T> { TakeCell { val: Cell::new(Some(value)), } diff --git a/libraries/tock-register-interface/src/fields.rs b/libraries/tock-register-interface/src/fields.rs index e0bf66141..73172bced 100644 --- a/libraries/tock-register-interface/src/fields.rs +++ b/libraries/tock-register-interface/src/fields.rs @@ -270,7 +270,7 @@ impl FieldValue { } // Combine two fields with the addition operator -impl Add for FieldValue { +impl const Add for FieldValue { type Output = Self; #[inline] diff --git a/libraries/tock-register-interface/src/lib.rs b/libraries/tock-register-interface/src/lib.rs index ecc7a09c7..06e894d25 100644 --- a/libraries/tock-register-interface/src/lib.rs +++ b/libraries/tock-register-interface/src/lib.rs @@ -1,3 +1,4 @@ +#![feature(const_trait_impl)] //! Tock Register Interface //! //! Provides efficient mechanisms to express and use type-checked diff --git a/libraries/tock-register-interface/src/macros.rs b/libraries/tock-register-interface/src/macros.rs index 4bc297d3a..76f0beca3 100644 --- a/libraries/tock-register-interface/src/macros.rs +++ b/libraries/tock-register-interface/src/macros.rs @@ -1,14 +1,61 @@ //! Macros for cleanly defining peripheral registers. +use core::mem::ManuallyDrop; +use core::ops::Deref; + +/// Helper to pass through a generic parameter, whether it be a type, lifetime, or constant. +#[macro_export] +macro_rules! pass_generic { + (const $t : ident : $x : ty) => { + $t + }; + ($t : ident) => { + $t + }; + ($life : lifetime) => { + $life + }; +} + +/// A wrapper around a T that pads it to (at least, not exactly) N bytes +pub union PaddedTo { + inner: ManuallyDrop, + _padding: [u8; N], +} + +impl PaddedTo { + pub fn new(inner: T) -> Self { + Self { + inner: ManuallyDrop::new(inner), + } + } +} + +impl Deref for PaddedTo { + type Target = T; + + fn deref(&self) -> &Self::Target { + // Safety: this is the only variant we ever construct / allow access to + unsafe { self.inner.deref() } + } +} + +impl core::ops::DerefMut for PaddedTo { + fn deref_mut(&mut self) -> &mut Self::Target { + // Safety: this is the only variant we ever construct / allow access to + unsafe { self.inner.deref_mut() } + } +} + #[macro_export] macro_rules! register_fields { // Macro entry point. - (@root $(#[$attr_struct:meta])* $vis_struct:vis $name:ident $(<$life:lifetime>)? { $($input:tt)* } ) => { + (@root $(#[$attr_struct:meta])* $vis_struct:vis $name:ident $(<$($($idents : ident)* $($life : lifetime)? $(: $T: tt) ?),+>)? { $($input:tt)* } ) => { $crate::register_fields!( @munch ( $($input)* ) -> { - $vis_struct struct $(#[$attr_struct])* $name $(<$life>)? + $vis_struct struct $(#[$attr_struct])* $name $(<$($($idents)* $($life)? $(: $T) ?),+>)? } ); }; @@ -19,14 +66,15 @@ macro_rules! register_fields { $(#[$attr_end:meta])* ($offset:expr => @END), ) - -> {$vis_struct:vis struct $(#[$attr_struct:meta])* $name:ident $(<$life:lifetime>)? $( + -> { $vis_struct:vis struct $(#[$attr_struct:meta])* $name:ident $(<$($($idents : ident)* $($life : lifetime)? $(: $T: tt) ?),+>)? $( $(#[$attr:meta])* ($vis:vis $id:ident: $ty:ty) )*} ) => { $(#[$attr_struct])* #[repr(C)] - $vis_struct struct $name $(<$life>)? { + $vis_struct struct $name $(<$($($idents)* $($life)? $(: $T) ?),+>)? + { $( $(#[$attr])* $vis $id: $ty @@ -54,6 +102,30 @@ macro_rules! register_fields { ); }; + // Munch field with dynamically calculated size. + (@munch + ( + $(#[$attr:meta])* + ($offset_start:expr => pad $vis:vis $field:ident: $ty:ty), + $(#[$attr_next:meta])* + ($offset_end:expr => $($next:tt)*), + $($after:tt)* + ) + -> {$($output:tt)*} + ) => { + $crate::register_fields!( + @munch ( + $(#[$attr_next])* + ($offset_end => $($next)*), + $($after)* + ) -> { + $($output)* + $(#[$attr])* + ($vis $field: $crate::macros::PaddedTo<$ty, {$offset_end - $offset_start}>) + } + ); + }; + // Munch padding. (@munch ( @@ -79,6 +151,32 @@ macro_rules! register_fields { }; } +#[macro_export] +macro_rules! test_constants { + // Match cases where are no parameters / defaults + (,) => (); + ({$($anything : tt)*},) => (); + // Match a constant + ({{const $name : ident : $t : ty} $($rest1 : tt)* }, {{$default : tt} $($rest2 : tt)*}) => + ( + const $name : $t = $default; + $crate::test_constants!({$($rest1)*}, {$($rest2)*}); + ); + // Match a lifetime (outputs nothing) + ({{$l : lifetime} $($rest1 : tt)* }, {$($rest2 : tt)*}) => + ( + $crate::test_constants!({$($rest1)*}, {$($rest2)*}); + ); + // Match a type + ({{$name : ident : $t : tt} $($rest1 : tt)* }, {{$default : tt} $($rest2 : tt)*}) => + ( + type $name = $default; + $crate::test_constants!({$($rest1)*} {$($rest2)*}); + ); + // Finish + ({}, {}) => (); +} + // TODO: All of the rustdoc tests below use a `should_fail` attribute instead of // `should_panic` because a const panic will result in a failure to evaluate a // constant value, and thus a compiler error. However, this means that these @@ -177,14 +275,15 @@ macro_rules! test_fields { // const-evaluable. // Macro entry point. - (@root $struct:ident $(<$life:lifetime>)? { $($input:tt)* } ) => { + (@root $struct:ident $(<$($($idents : ident)* $($life : lifetime)? $(: $T: tt) ?),+>)? $(test_defaults<$($default : tt),*>)? { $($input:tt)* } ) => { // Start recursion at offset 0. - $crate::test_fields!(@munch $struct $(<$life>)? ($($input)*) : (0, 0)); + $crate::test_constants!($({$({$($idents)* $($life)? $(: $T) ?})+})?, $({$({$default})*})?); + $crate::test_fields!(@munch $struct $(<$($($idents)* $($life)? $(: $T) ?),+>)? ($($input)*) : (0, 0)); }; // Consume the ($size:expr => @END) field, which MUST be the last field in // the register struct. - (@munch $struct:ident $(<$life:lifetime>)? + (@munch $struct:ident $(<$($($idents : ident)* $($life : lifetime)? $(: $T: tt) ?),+>)? ( $(#[$attr_end:meta])* ($size:expr => @END), @@ -206,7 +305,7 @@ macro_rules! test_fields { // and the claimed end offset MUST be equal. assert!(SUM == $size); - const STRUCT_SIZE: usize = core::mem::size_of::<$struct $(<$life>)?>(); + const STRUCT_SIZE: usize = core::mem::size_of::<$struct $(<$($crate::pass_generic!($($idents)* $($life)? $(: $T) ?)),+>)?>(); const ALIGNMENT_CORRECTED_SIZE: usize = if $size % MAX_ALIGN != 0 { $size + (MAX_ALIGN - ($size % MAX_ALIGN)) } else { $size }; assert!( @@ -216,26 +315,51 @@ macro_rules! test_fields { "Invalid size for struct ", stringify!($struct), " (expected ", - $size, + stringify!($size), ", actual struct size differs)", ), ); + }; }; + // Consume a proper ($offset:expr => pad $field:ident: $ty:ty) field. + (@munch $struct:ident $(<$($($idents : ident)* $($life : lifetime)? $(: $T: tt) ?),+>)? + ( + $(#[$attr:meta])* + ($offset_start:expr => pad $vis:vis $field:ident: $ty:ty), + $(#[$attr_next:meta])* + ($offset_end:expr => $($next:tt)*), + $($after:tt)* + ) + : $output:expr + ) => { + // Just replace the type and then use the non-pad matcher + $crate::test_fields!(@munch $struct $(<$($($idents)* $($life)? $(: $T) ?),+>)? + ( + $(#[$attr])* + ($offset_start => $vis $field: $crate::macros::PaddedTo<$ty, {$offset_end - $offset_start}>), + $(#[$attr_next])* + ($offset_end => $($next)*), + $($after)* + ) + : $output + ); + }; + // Consume a proper ($offset:expr => $field:ident: $ty:ty) field. - (@munch $struct:ident $(<$life:lifetime>)? + (@munch $struct:ident $(<$($($idents : ident)* $($life : lifetime)? $(: $T: tt) ?),+>)? ( $(#[$attr:meta])* ($offset_start:expr => $vis:vis $field:ident: $ty:ty), $(#[$attr_next:meta])* - ($offset_end:expr => $($next:tt)*), + ($offset_end:expr => $($next : tt)*), $($after:tt)* ) : $output:expr ) => { $crate::test_fields!( - @munch $struct $(<$life>)? ( + @munch $struct $(<$($($idents)* $($life)? $(: $T) ?),+>)? ( $(#[$attr_next])* ($offset_end => $($next)*), $($after)* @@ -257,7 +381,7 @@ macro_rules! test_fields { "Invalid start offset for field ", stringify!($field), " (expected ", - $offset_start, + stringify!($offset_start), " but actual value differs)", ), ); @@ -281,9 +405,8 @@ macro_rules! test_fields { ); } - // Add the current field's length to the offset and validate the - // end offset of the field based on the next field's claimed - // start offset. + // Add the current field's length to the offset. This is validated by the next + // iteration (unless the wildcard _ is used). const NEW_SUM: usize = SUM + core::mem::size_of::<$ty>(); assert!( NEW_SUM == $offset_end, @@ -292,7 +415,7 @@ macro_rules! test_fields { "Invalid end offset for field ", stringify!($field), " (expected ", - $offset_end, + stringify!($offset_end), " but actual value differs)", ), ); @@ -309,7 +432,7 @@ macro_rules! test_fields { }; // Consume a padding ($offset:expr => $padding:ident) field. - (@munch $struct:ident $(<$life:lifetime>)? + (@munch $struct:ident $(<$($($idents : ident)* $($life : lifetime)? $(: $T: tt) ?),+>)? ( $(#[$attr:meta])* ($offset_start:expr => $padding:ident), @@ -320,7 +443,7 @@ macro_rules! test_fields { : $output:expr ) => { $crate::test_fields!( - @munch $struct $(<$life>)? ( + @munch $struct $(<$($($idents)* $($life)? $(: $T) ?),+>)? ( $(#[$attr_next])* ($offset_end => $($next)*), $($after)* @@ -341,7 +464,7 @@ macro_rules! test_fields { "Invalid start offset for padding ", stringify!($padding), " (expected ", - $offset_start, + stringify!($offset_start), " but actual value differs)", ), ); @@ -359,12 +482,12 @@ macro_rules! register_structs { { $( $(#[$attr:meta])* - $vis_struct:vis $name:ident $(<$life:lifetime>)? { + $vis_struct:vis $name:ident$(<$($($idents : ident)* $($life : lifetime)? $(: $T: tt) ? ),+>)? $(test_defaults<$($default : tt),*>)? { $( $fields:tt )* } ),* } => { - $( $crate::register_fields!(@root $(#[$attr])* $vis_struct $name $(<$life>)? { $($fields)* } ); )* + $( $crate::register_fields!(@root $(#[$attr])* $vis_struct $name $(<$($($idents)* $($life)? $(: $T) ?),+>)? { $($fields)* } ); )* mod static_validate_register_structs { $( @@ -372,7 +495,7 @@ macro_rules! register_structs { mod $name { use super::super::*; - $crate::test_fields!(@root $name $(<$life>)? { $($fields)* } ); + $crate::test_fields!(@root $name $(<$($($idents)* $($life)? $(: $T) ?),+>)? $(test_defaults<$($default),*>)? { $($fields)* } ); } )* } diff --git a/libraries/tock-tbf/src/parse.rs b/libraries/tock-tbf/src/parse.rs index ae856abb5..d907efb15 100644 --- a/libraries/tock-tbf/src/parse.rs +++ b/libraries/tock-tbf/src/parse.rs @@ -29,7 +29,7 @@ macro_rules! align4 { /// we can skip over it and check for the next app. /// - Err(InitialTbfParseError::InvalidHeader(app_length)) pub fn parse_tbf_header_lengths( - app: &'static [u8; 8], + app: &[u8; 8], ) -> Result<(u16, u16, u32), types::InitialTbfParseError> { // Version is the first 16 bits of the app TBF contents. We need this to // correctly parse the other lengths. @@ -73,8 +73,8 @@ pub fn parse_tbf_header_lengths( /// The `header` must be a slice that only contains the TBF header. The caller /// should use the `parse_tbf_header_lengths()` function to determine this /// length to create the correct sized slice. -pub fn parse_tbf_header( - header: &'static [u8], +pub fn parse_tbf_header_non_static( + header: &[u8], version: u16, ) -> Result { match version { @@ -287,3 +287,10 @@ pub fn parse_tbf_header( _ => Err(types::TbfParseError::UnsupportedVersion(version)), } } + +pub fn parse_tbf_header( + header: &'static [u8], + version: u16, +) -> Result, types::TbfParseError> { + parse_tbf_header_non_static(header, version) +} diff --git a/libraries/tock-tbf/src/types.rs b/libraries/tock-tbf/src/types.rs index aaaeabbb6..31109dcf0 100644 --- a/libraries/tock-tbf/src/types.rs +++ b/libraries/tock-tbf/src/types.rs @@ -1,5 +1,6 @@ //! Types and Data Structures for TBFs. +use crate::types::TbfHeader::Padding; use core::convert::TryInto; use core::fmt; use core::mem::size_of; @@ -517,10 +518,10 @@ pub enum CommandPermissions { /// four since we need to statically know the length of the array to store in /// this type. #[derive(Clone, Copy, Debug)] -pub struct TbfHeaderV2 { +pub struct TbfHeaderV2<'a> { pub(crate) base: TbfHeaderV2Base, pub(crate) main: Option, - pub(crate) package_name: Option<&'static str>, + pub(crate) package_name: Option<&'a str>, pub(crate) writeable_regions: Option<[Option; 4]>, pub(crate) fixed_addresses: Option, pub(crate) permissions: Option>, @@ -535,12 +536,30 @@ pub struct TbfHeaderV2 { /// The kernel can also use this header to keep persistent state about /// the application. #[derive(Debug)] -pub enum TbfHeader { - TbfHeaderV2(TbfHeaderV2), +pub enum TbfHeader<'a> { + TbfHeaderV2(TbfHeaderV2<'a>), Padding(TbfHeaderV2Base), } -impl TbfHeader { +impl<'a> TbfHeader<'a> { + /// Return a static version of the header. + /// Must provide a string with static lifetime to replace the name. + pub fn into_static(&self, name: Option<&'static str>) -> TbfHeader<'static> { + match self { + TbfHeader::TbfHeaderV2(header) => crate::types::TbfHeader::TbfHeaderV2(TbfHeaderV2 { + base: header.base, + main: header.main, + package_name: name, + writeable_regions: header.writeable_regions, + fixed_addresses: header.fixed_addresses, + permissions: header.permissions, + persistent_acls: header.persistent_acls, + kernel_version: header.kernel_version, + }), + TbfHeader::Padding(p) => Padding(*p), + } + } + /// Return whether this is an app or just padding between apps. pub fn is_app(&self) -> bool { match *self { @@ -594,7 +613,7 @@ impl TbfHeader { } /// Get the name of the app. - pub fn get_package_name(&self) -> Option<&'static str> { + pub fn get_package_name(&self) -> Option<&'a str> { match *self { TbfHeader::TbfHeaderV2(hd) => hd.package_name, _ => None, @@ -613,13 +632,13 @@ impl TbfHeader { } /// Get the offset and size of a given flash region. - pub fn get_writeable_flash_region(&self, index: usize) -> (u32, u32) { + pub fn get_writeable_flash_region(&self, index: usize) -> (usize, usize) { match *self { TbfHeader::TbfHeaderV2(hd) => hd.writeable_regions.map_or((0, 0), |wrs| { wrs.get(index).unwrap_or(&None).map_or((0, 0), |wr| { ( - wr.writeable_flash_region_offset, - wr.writeable_flash_region_size, + wr.writeable_flash_region_offset as usize, + wr.writeable_flash_region_size as usize, ) }) }), diff --git a/rust-toolchain b/rust-toolchain index 61185ac66..a85e686ac 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2022-07-03 +cheri \ No newline at end of file diff --git a/tools/run_cargo_fmt.sh b/tools/run_cargo_fmt.sh index 43e9078c5..34440648b 100755 --- a/tools/run_cargo_fmt.sh +++ b/tools/run_cargo_fmt.sh @@ -16,9 +16,11 @@ if [ ! -x tools/run_cargo_fmt.sh ]; then fi # Add the rustfmt component if needed. -if ! rustup component list | grep 'rustfmt.*(installed)' -q; then - # Some versions of OS X want the -preview version, retry that on failure - rustup component add rustfmt || rustup component add rustfmt-preview +if ! rustup which rustfmt; then + if ! rustup component list | grep 'rustfmt.*(installed)' -q; then + # Some versions of OS X want the -preview version, retry that on failure + rustup component add rustfmt || rustup component add rustfmt-preview + fi fi # Format overwrites changes, which is probably good, but it's nice to see