diff --git a/app/lpc55xpresso/app.toml b/app/lpc55xpresso/app.toml index 34bd7efec..b6abfe0ab 100644 --- a/app/lpc55xpresso/app.toml +++ b/app/lpc55xpresso/app.toml @@ -48,7 +48,7 @@ start = true [tasks.update_server] name = "lpc55-update-server" priority = 3 -max-sizes = {flash = 26720, ram = 16704} +max-sizes = {flash = 27008, ram = 16704} stacksize = 8192 start = true sections = {bootstate = "usbsram"} diff --git a/app/oxide-rot-1/app-dev.toml b/app/oxide-rot-1/app-dev.toml index c1b7fa643..c5b2d422d 100644 --- a/app/oxide-rot-1/app-dev.toml +++ b/app/oxide-rot-1/app-dev.toml @@ -53,7 +53,7 @@ start = true [tasks.update_server] name = "lpc55-update-server" priority = 3 -max-sizes = {flash = 26080, ram = 17000, usbsram = 4096} +max-sizes = {flash = 27904, ram = 17344, usbsram = 4096} # TODO: Size this appropriately stacksize = 8192 start = true diff --git a/drv/lpc55-update-server/src/images.rs b/drv/lpc55-update-server/src/images.rs index bd9f085fb..425a9ac2a 100644 --- a/drv/lpc55-update-server/src/images.rs +++ b/drv/lpc55-update-server/src/images.rs @@ -2,14 +2,32 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +/// Implement LPC55_`update_server`'s knowledge about bits inside an image. +/// +/// The update server needs to work with partial or otherwise invalid images. +/// Signature checks are only performed at boot time. The update server +/// does match FWIDs against the boot-time-info in some cases. But, because +/// flash areas are mutated during update_server operations and stuff can +/// happen, any data in the non-active Hubris image needs to be treated as +/// untrusted (update_server does not alter its own image). +/// Data structures, pointers, and offsets within an image are tested to +/// ensure no mischief during `update_server` operations. The remainder +/// of reliability and security concerns rely on the boot-time policies +/// of the LPC55 ROM and the stage0 bootloader. +use crate::{ + indirect_flash_read, round_up_to_flash_page, SIZEOF_U32, U32_SIZE, +}; +use abi::{ImageHeader, CABOOSE_MAGIC, HEADER_MAGIC}; use core::ops::Range; -use drv_lpc55_update_api::BLOCK_SIZE_BYTES; -use drv_lpc55_update_api::{RotComponent, SlotId}; +use core::ptr::addr_of; +use drv_lpc55_update_api::{RawCabooseError, RotComponent, SlotId}; use drv_update_api::UpdateError; -use userlib::UnwrapLite; +use zerocopy::{AsBytes, FromBytes}; -// We shouldn't actually dereference these. The types are not correct. -// They are just here to allow a mechanism for getting the addresses. +// Our layout of flash banks on the LPC55. +// Addresses are from the linker. +// The bootloader (`bootleby`) resides at __STAGE0_BASE and +// only references IMAGE_A and IMAGE_B. extern "C" { static __IMAGE_A_BASE: [u32; 0]; static __IMAGE_B_BASE: [u32; 0]; @@ -26,79 +44,514 @@ extern "C" { // Location of the NXP header pub const HEADER_BLOCK: usize = 0; -// NXP LPC55's mixed header/vector table offsets -const RESET_VECTOR_OFFSET: usize = 0x04; -pub const LENGTH_OFFSET: usize = 0x20; -pub const HEADER_OFFSET: u32 = 0x130; -const MAGIC_OFFSET: usize = HEADER_OFFSET as usize; +// An image may have an ImageHeader located after the +// LPC55's mixed header/vector table. +pub const IMAGE_HEADER_OFFSET: u32 = 0x130; -// Perform some sanity checking on the header block. -pub fn validate_header_block( - component: RotComponent, - slot: SlotId, - block: &[u8; BLOCK_SIZE_BYTES], -) -> Result<(), UpdateError> { - let exec = image_range(component, slot).1; +/// Address ranges that may contain an image during storage and active use. +/// `stored` and `at_runtime` ranges are the same except for `stage0next`. +// TODO: Make these RangeInclusive in case we ever need to model +// some slot at the end of the address space. +pub struct FlashRange { + pub stored: Range, + pub at_runtime: Range, +} + +/// Get the flash storage address range and flash execution address range. +pub fn flash_range(component: RotComponent, slot: SlotId) -> FlashRange { + // Safety: this block requires unsafe code to generate references to the + // extern "C" statics. Because we're only getting their addresses (all + // operations below are just as_ptr), we can't really trigger any UB here. + // The addresses themselves are assumed to be valid because they're + // produced by the linker, which we implicitly trust. + unsafe { + match (component, slot) { + (RotComponent::Hubris, SlotId::A) => FlashRange { + stored: __IMAGE_A_BASE.as_ptr() as u32 + ..__IMAGE_A_END.as_ptr() as u32, + at_runtime: __IMAGE_A_BASE.as_ptr() as u32 + ..__IMAGE_A_END.as_ptr() as u32, + }, + (RotComponent::Hubris, SlotId::B) => FlashRange { + stored: __IMAGE_B_BASE.as_ptr() as u32 + ..__IMAGE_B_END.as_ptr() as u32, + at_runtime: __IMAGE_B_BASE.as_ptr() as u32 + ..__IMAGE_B_END.as_ptr() as u32, + }, + (RotComponent::Stage0, SlotId::A) => FlashRange { + stored: __IMAGE_STAGE0_BASE.as_ptr() as u32 + ..__IMAGE_STAGE0_END.as_ptr() as u32, + at_runtime: __IMAGE_STAGE0_BASE.as_ptr() as u32 + ..__IMAGE_STAGE0_END.as_ptr() as u32, + }, + (RotComponent::Stage0, SlotId::B) => FlashRange { + stored: __IMAGE_STAGE0NEXT_BASE.as_ptr() as u32 + ..__IMAGE_STAGE0NEXT_END.as_ptr() as u32, + at_runtime: __IMAGE_STAGE0_BASE.as_ptr() as u32 + ..__IMAGE_STAGE0_END.as_ptr() as u32, + }, + } + } +} + +/// Does (component, slot) refer to the currently running Hubris image? +pub fn is_current_hubris_image(component: RotComponent, slot: SlotId) -> bool { + // Safety: extern statics aren't controlled by Rust so poking them can + // cause UB; in this case, it's zero length and we are only taking its + // numerical address, so we're not at risk. + flash_range(component, slot).stored.start == addr_of!(__this_image) as u32 +} + +// LPC55 defined image content + +/// Image header for the LPC55S6x device as documented in NXP UM11126 +#[repr(C)] +#[derive(Default, AsBytes, FromBytes)] +pub struct ImageVectorsLpc55 { + initial_sp: u32, // 0x00 + initial_pc: u32, // 0x04 + _vector_table_0: [u32; 6], // 0x08, 0c, 10, 14, 18, 1c + nxp_image_length: u32, // 0x20 + nxp_image_type: u32, // 0x24 + nxp_offset_to_specific_header: u32, // 0x28 + _vector_table_1: [u32; 2], // 0x2c, 0x30 + nxp_image_executation_address: u32, // 0x32 + // Additional trailing vectors are not + // interesting here. + // _vector_table_2[u32; 2] +} + +impl ImageVectorsLpc55 { + const IMAGE_TYPE_PLAIN_SIGNED_XIP_IMAGE: u32 = 4; + + pub fn is_image_type_signed_xip(&self) -> bool { + self.nxp_image_type == Self::IMAGE_TYPE_PLAIN_SIGNED_XIP_IMAGE + } // This part aliases flash in two positions that differ in bit 28. To allow // for either position to be used in new images, we clear bit 28 in all of // the numbers used for comparison below, by ANDing them with this mask: - const ADDRMASK: u32 = !(1 << 28); + pub fn normalized_initial_pc(&self) -> u32 { + const ADDRMASK: u32 = !(1 << 28); + self.initial_pc & ADDRMASK + } - let reset_vector = u32::from_le_bytes( - block[RESET_VECTOR_OFFSET..][..4].try_into().unwrap_lite(), - ) & ADDRMASK; + // Length of image from offset zero to end of the signature block (without padding) + // a.k.a. ImageVectorsLpc55.nxp_image_length + pub fn image_length(&self) -> Option { + if self.is_image_type_signed_xip() { + Some(self.nxp_image_length) + } else { + None + } + } + + /// Image length padded to nearest page size. + pub fn padded_image_len(&self) -> Option { + round_up_to_flash_page(self.image_length()?) + } - // Ensure the image is destined for the right target - if !exec.contains(&reset_vector) { + /// Determine the bounds of an image assuming the given flash bank + /// addresses. + pub fn padded_image_range( + &self, + at_runtime: &Range, + ) -> Option> { + let image_start = at_runtime.start; + let image_end = + at_runtime.start.checked_add(self.padded_image_len()?)?; + Some(image_start..image_end) + } +} + +impl TryFrom<&[u8]> for ImageVectorsLpc55 { + type Error = (); + + fn try_from(buffer: &[u8]) -> Result { + match ImageVectorsLpc55::read_from_prefix(buffer) { + Some(vectors) => Ok(vectors), + None => Err(()), + } + } +} + +/// Sanity check the image header block. +/// Return the offset to the end of the executable code which is also +/// the end of optional caboose and the beginning of the signature block. +pub fn validate_header_block( + header_access: &ImageAccess<'_>, +) -> Result { + let mut vectors = ImageVectorsLpc55::new_zeroed(); + let mut header = ImageHeader::new_zeroed(); + + // Read block 0 and the header contained within (if available). + if header_access.read_bytes(0, vectors.as_bytes_mut()).is_err() + || header_access + .read_bytes(IMAGE_HEADER_OFFSET, header.as_bytes_mut()) + .is_err() + { + return Err(UpdateError::InvalidHeaderBlock); + } + + // Check image type and presence of signature block. + if !vectors.is_image_type_signed_xip() + || vectors.nxp_offset_to_specific_header >= vectors.nxp_image_length + { + // Not a signed XIP image or no signature block. + // If we figure out a reasonable minimum size for the signature block + // we should test for that. return Err(UpdateError::InvalidHeaderBlock); } - // Ensure the MAGIC is correct. - // Bootloaders have been released without an ImageHeader. Allow those. - let magic = - u32::from_le_bytes(block[MAGIC_OFFSET..][..4].try_into().unwrap_lite()); - if component == RotComponent::Hubris && magic != abi::HEADER_MAGIC { + // We don't rely on the ImageHeader, but if it is there, it needs to be valid. + // Note that `ImageHeader.epoch` is used by rollback protection for early + // rejection of invalid images. + // TODO: Improve estimate of where the first executable instruction can be. + let code_offset = if header.magic == HEADER_MAGIC { + if header.total_image_len != vectors.nxp_offset_to_specific_header { + // ImageHeader disagrees with LPC55 vectors. + return Err(UpdateError::InvalidHeaderBlock); + } + // Adding constants should be resolved at compile time: no call to panic. + IMAGE_HEADER_OFFSET + (core::mem::size_of::() as u32) + } else { + IMAGE_HEADER_OFFSET + }; + + if vectors.nxp_image_length as usize > header_access.at_runtime().len() { + // Image extends outside of flash bank. return Err(UpdateError::InvalidHeaderBlock); } - Ok(()) + // Check that the initial PC is pointing to a reasonable location. + // We only have information from image block zero, so this is just + // a basic sanity check. + // A check at signing time can be more exact, but this helps reject + // ridiculous images before any flash is erased. + let caboose_end = header_access + .at_runtime() + .start + .checked_add(vectors.nxp_offset_to_specific_header) + .ok_or(UpdateError::InvalidHeaderBlock)?; + let text_start = header_access + .at_runtime() + .start + .checked_add(code_offset) + .ok_or(UpdateError::InvalidHeaderBlock)?; + if !(text_start..caboose_end).contains(&vectors.normalized_initial_pc()) { + return Err(UpdateError::InvalidHeaderBlock); + } + + Ok(vectors.nxp_offset_to_specific_header) } -pub fn same_image(component: RotComponent, slot: SlotId) -> bool { - // Safety: We are trusting the linker. - image_range(component, slot).0.start - == unsafe { &__this_image } as *const _ as u32 +/// Get the range of the caboose contained within an image if it exists. +/// +/// This implementation has similar logic to the one in `stm32h7-update-server`, +/// but uses ImageAccess for images that, during various operations, +/// may be in RAM, Flash, or split between both. +pub fn caboose_slice( + image: &ImageAccess<'_>, +) -> Result, RawCabooseError> { + // The ImageHeader is optional since the offset to the start of + // the signature block (end of image) is also found in an LPC55 + // Type 4 (Signed XIP) image. + // + // In this context, NoImageHeader actually means that the image + // is not well formed. + let image_end_offset = validate_header_block(image) + .map_err(|_| RawCabooseError::NoImageHeader)?; + + // By construction, the last word of the caboose is its size as a `u32` + let caboose_size_offset = image_end_offset + .checked_sub(U32_SIZE) + .ok_or(RawCabooseError::MissingCaboose)?; + let caboose_size = image + .read_word(caboose_size_offset) + .map_err(|_| RawCabooseError::ReadFailed)?; + + // Security considerations: + // A maliciously constructed image could be staged in flash + // with an apparently large caboose that would allow some access + // within its own flash slot. Presumably, we would never sign + // such an image so the bootloader would never execute it. + // However, reading out that image's caboose would be allowed. + // The range and size checks on caboose access are meant to keep + // accesses within the Hubris image which is constrained to its + // flash slot. + // There is no sensitive information to be found there. + let caboose_magic_offset = image_end_offset + .checked_sub(caboose_size) + .ok_or(RawCabooseError::MissingCaboose)?; + if ((caboose_magic_offset % U32_SIZE) != 0) + || !(IMAGE_HEADER_OFFSET..caboose_size_offset) + .contains(&caboose_magic_offset) + { + return Err(RawCabooseError::MissingCaboose); + } + + let caboose_magic = image + .read_word(caboose_magic_offset) + .map_err(|_| RawCabooseError::MissingCaboose)?; + + if caboose_magic == CABOOSE_MAGIC { + let caboose_start = caboose_magic_offset + .checked_add(U32_SIZE) + .ok_or(RawCabooseError::MissingCaboose)?; + Ok(caboose_start..caboose_size_offset) + } else { + Err(RawCabooseError::MissingCaboose) + } } -/// Return the flash storage address range and flash execution address range. -/// These are only different for the staged stage0 image. -pub fn image_range( - component: RotComponent, - slot: SlotId, -) -> (Range, Range) { - unsafe { - match (component, slot) { - (RotComponent::Hubris, SlotId::A) => ( - __IMAGE_A_BASE.as_ptr() as u32..__IMAGE_A_END.as_ptr() as u32, - __IMAGE_A_BASE.as_ptr() as u32..__IMAGE_A_END.as_ptr() as u32, - ), - (RotComponent::Hubris, SlotId::B) => ( - __IMAGE_B_BASE.as_ptr() as u32..__IMAGE_B_END.as_ptr() as u32, - __IMAGE_B_BASE.as_ptr() as u32..__IMAGE_B_END.as_ptr() as u32, - ), - (RotComponent::Stage0, SlotId::A) => ( - __IMAGE_STAGE0_BASE.as_ptr() as u32 - ..__IMAGE_STAGE0_END.as_ptr() as u32, - __IMAGE_STAGE0_BASE.as_ptr() as u32 - ..__IMAGE_STAGE0_END.as_ptr() as u32, - ), - (RotComponent::Stage0, SlotId::B) => ( - __IMAGE_STAGE0NEXT_BASE.as_ptr() as u32 - ..__IMAGE_STAGE0NEXT_END.as_ptr() as u32, - __IMAGE_STAGE0_BASE.as_ptr() as u32 - ..__IMAGE_STAGE0_END.as_ptr() as u32, - ), +/// Accessor keeps the implementation details of ImageAccess private +enum Accessor<'a> { + // Flash driver, flash device range + Flash { + flash: &'a drv_lpc55_flash::Flash<'a>, + span: FlashRange, + }, + Ram { + buffer: &'a [u8], + span: FlashRange, + }, + // Hybrid is used for later implementation of rollback protection. + // The buffer is used in place of the beginning of the flash range. + _Hybrid { + buffer: &'a [u8], + flash: &'a drv_lpc55_flash::Flash<'a>, + span: FlashRange, + }, +} + +impl Accessor<'_> { + fn at_runtime(&self) -> &Range { + match self { + Accessor::Flash { span, .. } + | Accessor::Ram { span, .. } + | Accessor::_Hybrid { span, .. } => &span.at_runtime, + } + } +} + +/// In addition to images that are located in their respective +/// flash slots, the `update_server` needs to read data from +/// complete and partial images in RAM or split between RAM +/// and flash. +/// The specific cases are when the +/// - image is entirely in flash. +/// - header block is in RAM with the remainder unavailable. +/// - header block is in RAM with the remainder in flash. +/// - entire image is in RAM (in the case of a cached Stage0 image). +/// +/// Calls to methods use offsets into the image which is helpful +/// when dealing with the offsets and sizes found in image headers +/// and the caboose. +pub struct ImageAccess<'a> { + accessor: Accessor<'a>, +} + +impl ImageAccess<'_> { + pub fn new_flash<'a>( + flash: &'a drv_lpc55_flash::Flash<'a>, + component: RotComponent, + slot: SlotId, + ) -> ImageAccess<'a> { + let span = flash_range(component, slot); + ImageAccess { + accessor: Accessor::Flash { flash, span }, } } + + pub fn new_ram( + buffer: &[u8], + component: RotComponent, + slot: SlotId, + ) -> ImageAccess<'_> { + let span = flash_range(component, slot); + ImageAccess { + accessor: Accessor::Ram { buffer, span }, + } + } + + pub fn _new_hybrid<'a>( + flash: &'a drv_lpc55_flash::Flash<'a>, + buffer: &'a [u8], + component: RotComponent, + slot: SlotId, + ) -> ImageAccess<'a> { + let span = flash_range(component, slot); + ImageAccess { + accessor: Accessor::_Hybrid { + flash, + buffer, + span, + }, + } + } + + fn at_runtime(&self) -> &Range { + self.accessor.at_runtime() + } + + /// True if the u32 at offset is contained within the slot. + pub fn is_addressable(&self, offset: u32) -> bool { + let len = self.at_runtime().len() as u32; + if let Some(end) = offset.checked_add(U32_SIZE) { + end <= len + } else { + false + } + } + + /// Fetch a u32 from an image. + pub fn read_word(&self, offset: u32) -> Result { + if !self.is_addressable(offset) { + return Err(UpdateError::OutOfBounds); + } + match &self.accessor { + Accessor::Flash { flash, span } => { + let addr = span + .stored + .start + .checked_add(offset) + .ok_or(UpdateError::OutOfBounds)?; + let mut word = 0u32; + indirect_flash_read(flash, addr, word.as_bytes_mut())?; + Ok(word) + } + Accessor::Ram { buffer, .. } => { + let word_end = (offset as usize) + .checked_add(SIZEOF_U32) + .ok_or(UpdateError::OutOfBounds)?; + Ok(buffer + .get(offset as usize..word_end) + .and_then(u32::read_from) + .ok_or(UpdateError::OutOfBounds)?) + } + Accessor::_Hybrid { + buffer, + flash, + span, + } => { + if (offset as usize) < buffer.len() { + // Word is in the RAM portion + let word_end = (offset as usize) + .checked_add(SIZEOF_U32) + .ok_or(UpdateError::OutOfBounds)?; + Ok(buffer + .get(offset as usize..word_end) + .and_then(u32::read_from) + .ok_or(UpdateError::OutOfBounds)?) + } else { + let addr = span + .stored + .start + .checked_add(offset) + .ok_or(UpdateError::OutOfBounds)?; + let mut word = 0u32; + indirect_flash_read(flash, addr, word.as_bytes_mut())?; + Ok(word) + } + } + } + } + + pub fn read_bytes( + &self, + offset: u32, + buffer: &mut [u8], + ) -> Result<(), UpdateError> { + let len = buffer.len() as u32; + match &self.accessor { + Accessor::Flash { flash, span } => { + let start = span + .stored + .start + .checked_add(offset) + .ok_or(UpdateError::OutOfBounds)?; + let end = + start.checked_add(len).ok_or(UpdateError::OutOfBounds)?; + if span.stored.contains(&start) + && (span.stored.start..=span.stored.end).contains(&end) + { + Ok(indirect_flash_read(flash, start, buffer)?) + } else { + Err(UpdateError::OutOfBounds) + } + } + Accessor::Ram { buffer: src, .. } => { + let end = + offset.checked_add(len).ok_or(UpdateError::OutOfBounds)?; + if let Some(data) = src.get((offset as usize)..(end as usize)) { + buffer.copy_from_slice(data); + Ok(()) + } else { + Err(UpdateError::OutOfBounds) + } + } + Accessor::_Hybrid { + buffer: ram, + flash, + span, + } => { + let mut start_offset = offset as usize; + let mut remainder = buffer.len(); + let end_offset = start_offset + .checked_add(remainder) + .ok_or(UpdateError::OutOfBounds)?; + // Transfer data from the RAM portion of the image + if start_offset < ram.len() { + let ram_end_offset = ram.len().min(end_offset); + // Transfer starts within the RAM part of this image. + let data = ram + .get((start_offset)..ram_end_offset) + .ok_or(UpdateError::OutOfBounds)?; + buffer.copy_from_slice(data); + remainder = remainder + .checked_sub(data.len()) + .ok_or(UpdateError::OutOfBounds)?; + start_offset = ram_end_offset; + } + // Transfer data from the flash-backed portion of the image. + if remainder > 0 { + let start = span + .stored + .start + .checked_add(start_offset as u32) + .ok_or(UpdateError::OutOfBounds)?; + let end = start + .checked_add(remainder as u32) + .ok_or(UpdateError::OutOfBounds)?; + if span.stored.contains(&start) + && (span.stored.start..=span.stored.end).contains(&end) + { + indirect_flash_read(flash, start, buffer)?; + } else { + return Err(UpdateError::OutOfBounds); + } + } + Ok(()) + } + } + } + + /// Get the rounded up length of an LPC55 image if present. + pub fn padded_image_len(&self) -> Result { + let vectors = match self.accessor { + Accessor::Flash { .. } => { + let buffer = + &mut [0u8; core::mem::size_of::()]; + self.read_bytes(0u32, buffer.as_bytes_mut())?; + ImageVectorsLpc55::read_from_prefix(&buffer[..]) + .ok_or(UpdateError::OutOfBounds) + } + Accessor::Ram { buffer, .. } | Accessor::_Hybrid { buffer, .. } => { + ImageVectorsLpc55::read_from_prefix(buffer) + .ok_or(UpdateError::OutOfBounds) + } + }?; + let len = vectors.image_length().ok_or(UpdateError::BadLength)?; + round_up_to_flash_page(len).ok_or(UpdateError::BadLength) + } } diff --git a/drv/lpc55-update-server/src/main.rs b/drv/lpc55-update-server/src/main.rs index 7e013d63f..5a0041c3f 100644 --- a/drv/lpc55-update-server/src/main.rs +++ b/drv/lpc55-update-server/src/main.rs @@ -9,8 +9,9 @@ #![no_std] #![no_main] +use crate::images::{validate_header_block, ImageVectorsLpc55}; use core::convert::Infallible; -use core::mem::MaybeUninit; +use core::mem::{size_of, MaybeUninit}; use core::ops::Range; use drv_lpc55_flash::{BYTES_PER_FLASH_PAGE, BYTES_PER_FLASH_WORD}; use drv_lpc55_update_api::{ @@ -28,14 +29,19 @@ use stage0_handoff::{ RotBootStateV2, }; use userlib::*; -use zerocopy::{AsBytes, FromBytes}; +use zerocopy::AsBytes; mod images; -use crate::images::*; +use crate::images::{ + caboose_slice, flash_range, is_current_hubris_image, ImageAccess, + HEADER_BLOCK, +}; -const U32_SIZE: u32 = core::mem::size_of::() as u32; const PAGE_SIZE: u32 = BYTES_PER_FLASH_PAGE as u32; +const SIZEOF_U32: usize = size_of::(); +const U32_SIZE: u32 = SIZEOF_U32 as u32; + #[used] #[link_section = ".bootstate"] static BOOTSTATE: MaybeUninit<[u8; 0x1000]> = MaybeUninit::uninit(); @@ -56,7 +62,7 @@ enum Trace { ringbuf!(Trace, 16, Trace::None); -/// FW_CACHE_MAX accomodates the largest production +/// FW_CACHE_MAX accommodates the largest production /// bootloader image while allowing some room for growth. /// /// NOTE: The erase/flash of stage0 can be interrupted by a power failure or @@ -88,7 +94,7 @@ struct ServerImpl<'a> { // Used to enforce sequential writes from the control plane. next_block: Option, // Keep the fw cache 32-bit aligned to make NXP header access easier. - fw_cache: &'a mut [u32; FW_CACHE_MAX / core::mem::size_of::()], + fw_cache: &'a mut [u32; FW_CACHE_MAX / SIZEOF_U32], } const BLOCK_SIZE_BYTES: usize = BYTES_PER_FLASH_PAGE; @@ -175,7 +181,7 @@ impl idl::InOrderUpdateImpl for ServerImpl<'_> { return Err(UpdateError::BadLength.into()); } - // Match the behvaior of the CMSIS flash driver where erased bytes are + // Match the behavior of the CMSIS flash driver where erased bytes are // read as 0xff so the image is padded with 0xff const ERASE_BYTE: u8 = 0xff; let mut flash_page = [ERASE_BYTE; BLOCK_SIZE_BYTES]; @@ -188,8 +194,9 @@ impl idl::InOrderUpdateImpl for ServerImpl<'_> { .read_range(0..len, &mut header_block[..]) .map_err(|_| RequestError::Fail(ClientError::WentAway))?; header_block[len..].fill(ERASE_BYTE); - if let Err(e) = validate_header_block(component, slot, header_block) - { + let next_image = + ImageAccess::new_ram(header_block, component, slot); + if let Err(e) = validate_header_block(&next_image) { self.header_block = None; return Err(e.into()); } @@ -253,7 +260,7 @@ impl idl::InOrderUpdateImpl for ServerImpl<'_> { // Now erase the unused portion of the flash slot so that // flash slot has predictable contents and the FWID for it // has some meaning. - let range = image_range(component, slot).0; + let range = &flash_range(component, slot).stored; let erase_start = range.start + (endblock as u32 * PAGE_SIZE); self.flash_erase_range(erase_start..range.end)?; self.state = UpdateState::Finished; @@ -270,7 +277,7 @@ impl idl::InOrderUpdateImpl for ServerImpl<'_> { } // TODO(AJS): Remove this in favor of `status`, once SP code is updated. - // This has ripple effects up thorugh control-plane-agent. + // This has ripple effects up through control-plane-agent. fn current_version( &mut self, _: &RecvMessage, @@ -368,30 +375,26 @@ impl idl::InOrderUpdateImpl for ServerImpl<'_> { fn read_raw_caboose( &mut self, - _msg: &RecvMessage, + msg: &RecvMessage, slot: SlotId, offset: u32, data: Leased, ) -> Result<(), RequestError> { - let caboose = caboose_slice(&self.flash, RotComponent::Hubris, slot)?; - if offset as usize + data.len() > caboose.len() { - return Err(RawCabooseError::InvalidRead.into()); - } - copy_from_caboose_chunk( - &self.flash, - caboose, - offset..offset + data.len() as u32, + self.component_read_raw_caboose( + msg, + RotComponent::Hubris, + slot, + offset, data, ) } fn caboose_size( &mut self, - _: &RecvMessage, + msg: &RecvMessage, slot: SlotId, ) -> Result> { - let caboose = caboose_slice(&self.flash, RotComponent::Hubris, slot)?; - Ok(caboose.end - caboose.start) + self.component_caboose_size(msg, RotComponent::Hubris, slot) } fn switch_default_image( @@ -452,8 +455,13 @@ impl idl::InOrderUpdateImpl for ServerImpl<'_> { component: RotComponent, slot: SlotId, ) -> Result> { - let caboose = caboose_slice(&self.flash, component, slot)?; - Ok(caboose.end - caboose.start) + let image = ImageAccess::new_flash(&self.flash, component, slot); + let caboose = caboose_slice(&image)?; + if let Some(caboose_len) = caboose.end.checked_sub(caboose.start) { + Ok(caboose_len) + } else { + Err(RawCabooseError::MissingCaboose.into()) + } } fn component_read_raw_caboose( @@ -464,12 +472,16 @@ impl idl::InOrderUpdateImpl for ServerImpl<'_> { offset: u32, data: Leased, ) -> Result<(), idol_runtime::RequestError> { - let caboose = caboose_slice(&self.flash, component, slot)?; - if offset as usize + data.len() > caboose.len() { + let image = ImageAccess::new_flash(&self.flash, component, slot); + let caboose = caboose_slice(&image)?; + let Some(caboose_len) = caboose.end.checked_sub(caboose.start) else { + return Err(RawCabooseError::MissingCaboose.into()); + }; + if offset as usize + data.len() > (caboose_len as usize) { return Err(RawCabooseError::InvalidRead.into()); } - copy_from_caboose_chunk( - &self.flash, + copy_from_caboose_chunk_to_lease( + &image, caboose, offset..offset + data.len() as u32, data, @@ -679,7 +691,7 @@ impl ServerImpl<'_> { // through 3) to ensure that RoT image signatures are valid before any // system continues to step 4. // - // TBD: While Failures up to step 3 do not adversly affect the RoT, + // TBD: While Failures up to step 3 do not adversely affect the RoT, // resetting the RoT to evaluate signatures may be service affecting // to the system depending on how the RoT and SP interact with respect // to their reset handling and the RoT measurement of the SP. @@ -690,14 +702,13 @@ impl ServerImpl<'_> { // updating stage0next to the original stage0 contents that were // validated reset and then copying those to stage0. // - // It is assumed that a hash collision is not computaionally feasible + // It is assumed that a hash collision is not computationally feasible // for either the image hash done by rot-startup or used by the ROM // signature routine. // Read stage0next contents into RAM. - let staged = image_range(RotComponent::Stage0, SlotId::B); - let len = self.read_flash_image_to_cache(staged.0)?; - let bootloader = &self.fw_cache[..len / core::mem::size_of::()]; + let len = self.read_stage0next_to_cache()?; + let bootloader = &self.fw_cache[..len / SIZEOF_U32]; let mut hash = Sha3_256::new(); for page in bootloader.as_bytes().chunks(512) { @@ -722,12 +733,9 @@ impl ServerImpl<'_> { }; // Don't risk an update if the cache already matches the bootloader. - let stage0 = image_range(RotComponent::Stage0, SlotId::A); - match self.compare_cache_to_flash(&stage0.0) { + match self.compare_cache_to_stage0() { Err(UpdateError::ImageMismatch) => { - if let Err(e) = - self.write_cache_to_flash(RotComponent::Stage0, SlotId::A) - { + if let Err(e) = self.write_cache_to_stage0() { // N.B. An error here is bad since it means we've likely // bricked the machine if we reset now. // We do not want the RoT reset. @@ -757,8 +765,10 @@ impl ServerImpl<'_> { // Finish by erasing the unused portion of flash bank. // An error here means that the stage0 slot may not be clean but at least // it has the intended bootloader written. - let erase_start = stage0.0.start.checked_add(len as u32).unwrap_lite(); - self.flash_erase_range(erase_start..stage0.0.end)?; + let stage0 = &flash_range(RotComponent::Stage0, SlotId::A).stored; + if let Some(erase_start) = stage0.start.checked_add(len as u32) { + self.flash_erase_range(erase_start..stage0.end)?; + } Ok(()) } @@ -781,105 +791,91 @@ impl ServerImpl<'_> { } } - fn compare_cache_to_flash( - &self, - span: &Range, - ) -> Result<(), UpdateError> { - // Is there a cached image? - // no, return error - + fn compare_cache_to_stage0(&self) -> Result<(), UpdateError> { // Lengths are rounded up to a flash page boundary. - let clen = self.cache_image_len()?; - let flen = self.flash_image_len(span)?; - if clen != flen { + let clen = self.cached_stage0_image_len()?; + + let stage0 = ImageAccess::new_flash( + &self.flash, + RotComponent::Stage0, + SlotId::A, + ); + if let Ok(stage0_len) = stage0.padded_image_len() { + if clen != stage0_len as usize { + // Different sizes cannot match. + // Ok to update if signature on stage0next is good. + return Err(UpdateError::ImageMismatch); + } + } else { + // We can't get a length for stage0. + // Perhaps it has been corrupted by an earlier update attempt + // and we're desperately trying to fix it by writing a good stage0 + // image. + // Ok to update if signature on stage0next is good. return Err(UpdateError::ImageMismatch); - } + }; // compare flash page to cache - let cached = - self.fw_cache[0..flen / core::mem::size_of::()].as_bytes(); + let cached = self.fw_cache[0..clen / SIZEOF_U32].as_bytes(); let mut flash_page = [0u8; BYTES_PER_FLASH_PAGE]; - for addr in (0..flen).step_by(BYTES_PER_FLASH_PAGE) { - let size = if addr + BYTES_PER_FLASH_PAGE > flen { - flen - addr - } else { - BYTES_PER_FLASH_PAGE + for addr in (0u32..(clen as u32)).step_by(BYTES_PER_FLASH_PAGE) { + // If we encounter an unreadable page in stage0 then + // ok to update if signature on stage0next later tests ok. + stage0 + .read_bytes(addr, flash_page.as_bytes_mut()) + .map_err(|_| UpdateError::ImageMismatch)?; + let Some(end) = (addr as usize).checked_add(BYTES_PER_FLASH_PAGE) + else { + return Err(UpdateError::ImageMismatch); }; - - indirect_flash_read( - &self.flash, - addr as u32, - &mut flash_page[..size], - )?; - if flash_page[0..size] != cached[addr..addr + size] { + if flash_page != cached[(addr as usize)..end] { return Err(UpdateError::ImageMismatch); } } + // Images already match, don't wear out flash more than necessary. Ok(()) } - // Looking at a region of flash, determine if there is a possible NXP - // image programmed. Return the length in bytes of the flash pages - // comprising the image including padding to fill to a page boundary. - fn flash_image_len(&self, span: &Range) -> Result { - let buf = &mut [0u32; 1]; - indirect_flash_read( - &self.flash, - span.start + LENGTH_OFFSET as u32, - buf[..].as_bytes_mut(), - )?; - if let Some(len) = round_up_to_flash_page(buf[0]) { - // The minimum image size should be further constrained - // but this is enough bytes for an NXP header and not - // bigger than the flash slot. - if len as usize <= span.len() && len >= HEADER_OFFSET { - return Ok(len as usize); + fn cached_stage0_image_len(&self) -> Result { + if let Ok(vectors) = ImageVectorsLpc55::try_from( + self.fw_cache[0..BLOCK_SIZE_BYTES].as_bytes(), + ) { + // Get the length of the image iff it fits in its ultimate destination. + let exec_range = + &flash_range(RotComponent::Stage0, SlotId::A).at_runtime; + if let Some(image) = vectors.padded_image_range(exec_range) { + return Ok(image.len()); } } Err(UpdateError::BadLength) } - fn cache_image_len(&self) -> Result { - let len = round_up_to_flash_page( - self.fw_cache[LENGTH_OFFSET / core::mem::size_of::()], - ) - .ok_or(UpdateError::BadLength)?; - - if len as usize > self.fw_cache.as_bytes().len() || len < HEADER_OFFSET - { - return Err(UpdateError::BadLength); - } - Ok(len as usize) - } - - fn read_flash_image_to_cache( - &mut self, - span: Range, - ) -> Result { - // Returns error if flash page is erased. - let staged = image_range(RotComponent::Stage0, SlotId::B); - let len = self.flash_image_len(&staged.0)?; - if len as u32 > span.end || len > self.fw_cache.as_bytes().len() { + // Note: The only use case is to read stage0next into RAM. + fn read_stage0next_to_cache(&mut self) -> Result { + let stage0next = ImageAccess::new_flash( + &self.flash, + RotComponent::Stage0, + SlotId::B, + ); + let len = stage0next.padded_image_len()? as usize; + if len > self.fw_cache.as_bytes().len() { + // This stage0 image is too big for our buffer. return Err(UpdateError::BadLength); } - indirect_flash_read( - &self.flash, - span.start, - self.fw_cache[0..len / core::mem::size_of::()].as_bytes_mut(), - )?; + stage0next + .read_bytes(0, self.fw_cache.as_bytes_mut()[0..len].as_mut())?; Ok(len) } - fn write_cache_to_flash( - &mut self, - component: RotComponent, - slot: SlotId, - ) -> Result<(), UpdateError> { - let clen = self.cache_image_len()?; + fn write_cache_to_stage0(&mut self) -> Result<(), UpdateError> { + let clen = self.cached_stage0_image_len()?; if clen % BYTES_PER_FLASH_PAGE != 0 { return Err(UpdateError::BadLength); } - let span = image_range(component, slot).0; - if span.end < span.start + clen as u32 { + let span = &flash_range(RotComponent::Stage0, SlotId::A); + let Some(end) = span.stored.start.checked_add(clen as u32) else { + return Err(UpdateError::BadLength); + }; + if span.stored.end < end { return Err(UpdateError::BadLength); } // Sanity check could be repeated here. @@ -891,8 +887,8 @@ impl ServerImpl<'_> { let flash_page = block.try_into().unwrap_lite(); do_block_write( &mut self.flash, - component, - slot, + RotComponent::Stage0, + SlotId::A, block_num, flash_page, )?; @@ -1158,7 +1154,7 @@ fn do_block_write( let page_num = block_num as u32; // Can only update opposite image - if same_image(component, slot) { + if is_current_hubris_image(component, slot) { return Err(UpdateError::RunningImage); } @@ -1188,7 +1184,7 @@ fn target_addr( slot: SlotId, page_num: u32, ) -> Option { - let range = image_range(component, slot).0; + let range = &flash_range(component, slot).stored; // This is safely calculating addr = base + page_num * PAGE_SIZE let addr = page_num @@ -1203,81 +1199,8 @@ fn target_addr( Some(addr) } -/// Finds the memory range which contains the caboose for the given slot -/// -/// This implementation has similar logic to the one in `stm32h7-update-server`, -/// but uses indirect reads instead of mapping the alternate bank into flash. -fn caboose_slice( - flash: &drv_lpc55_flash::Flash<'_>, - component: RotComponent, - slot: SlotId, -) -> Result, RawCabooseError> { - let flash_range = image_range(component, slot).0; - - // If all is going according to plan, there will be a valid Hubris image - // flashed into the other slot, delimited by `__IMAGE_A/B_BASE` and - // `__IMAGE_A/B_END` (which are symbols injected by the linker). - // - // We'll first want to read the image header, which is at a fixed - // location at the end of the vector table. The length of the vector - // table is fixed in hardware, so this should never change. - const HEADER_OFFSET: u32 = 0x130; - let mut header = ImageHeader::new_zeroed(); - - indirect_flash_read( - flash, - flash_range.start + HEADER_OFFSET, - header.as_bytes_mut(), - ) - .map_err(|_| RawCabooseError::ReadFailed)?; - if header.magic != HEADER_MAGIC { - return Err(RawCabooseError::NoImageHeader); - } - - // Calculate where the image header implies that the image should end - // - // This is a one-past-the-end value. - let image_end = flash_range.start + header.total_image_len; - - // Then, check that value against the BANK2 bounds. - // - // Safety: populated by the linker, so this should be valid - if image_end > flash_range.end { - return Err(RawCabooseError::MissingCaboose); - } - - // By construction, the last word of the caboose is its size as a `u32` - let mut caboose_size = 0u32; - indirect_flash_read( - flash, - image_end - U32_SIZE, - caboose_size.as_bytes_mut(), - ) - .map_err(|_| RawCabooseError::ReadFailed)?; - - let caboose_start = image_end.saturating_sub(caboose_size); - let caboose_range = if caboose_start < flash_range.start { - // This branch will be encountered if there's no caboose, because - // then the nominal caboose size will be 0xFFFFFFFF, which will send - // us out of the bank2 region. - return Err(RawCabooseError::MissingCaboose); - } else { - // Safety: we know this pointer is within the programmed flash region, - // since it's checked above. - let mut v = 0u32; - indirect_flash_read(flash, caboose_start, v.as_bytes_mut()) - .map_err(|_| RawCabooseError::ReadFailed)?; - if v == CABOOSE_MAGIC { - caboose_start + U32_SIZE..image_end - U32_SIZE - } else { - return Err(RawCabooseError::MissingCaboose); - } - }; - Ok(caboose_range) -} - -fn copy_from_caboose_chunk( - flash: &drv_lpc55_flash::Flash<'_>, +fn copy_from_caboose_chunk_to_lease( + image: &ImageAccess<'_>, caboose: core::ops::Range, pos: core::ops::Range, data: Leased, @@ -1289,17 +1212,22 @@ fn copy_from_caboose_chunk( } const BUF_SIZE: usize = 128; - let mut offset = 0; + let mut offset = 0u32; let mut buf = [0u8; BUF_SIZE]; while remaining > 0 { let count = remaining.min(buf.len() as u32); let buf = &mut buf[..count as usize]; - indirect_flash_read(flash, caboose.start + pos.start + offset, buf) + image + .read_bytes(caboose.start + pos.start + offset, buf) .map_err(|_| RequestError::from(RawCabooseError::ReadFailed))?; data.write_range(offset as usize..(offset + count) as usize, buf) .map_err(|_| RequestError::Fail(ClientError::WentAway))?; - offset += count; - remaining -= count; + offset = offset + .checked_add(count) + .ok_or(RawCabooseError::ReadFailed)?; + remaining = remaining + .checked_sub(count) + .ok_or(RawCabooseError::ReadFailed)?; } Ok(()) } @@ -1351,7 +1279,7 @@ fn main() -> ! { // Go ahead and put the HASHCRYPT unit into reset. syscon.enter_reset(drv_lpc55_syscon_api::Peripheral::HashAes); let fw_cache = mutable_statics::mutable_statics! { - static mut FW_CACHE: [u32; FW_CACHE_MAX / core::mem::size_of::()] = [|| 0; _]; + static mut FW_CACHE: [u32; FW_CACHE_MAX / SIZEOF_U32] = [|| 0; _]; }; let mut server = ServerImpl { header_block: None,