From 91a16b4c39a5a205d5d34f1f761720107e078f8b Mon Sep 17 00:00:00 2001 From: Alasdair Date: Tue, 14 May 2024 22:39:33 +0100 Subject: [PATCH] Handle address translation for misaligned loads and stores better Refactor the LOAD and STORE instruction so they split misaligned accesses into multiple sub-accesses and perform address translation separately. This means we should handle the case where a misaligned access straddles a page boundary in a sensible way, even if we don't yet cover the full range of possibilities allowed for any RISC-V implementation. There are options for the order in which misaligned happen, i.e. from high-to-low or from low-to-high as well as the granularity of the splitting, either all the way to bytes or to the largest aligned size. The splitting can also be disabled if an implementation supports misaligned accesses in hardware. The Zama16b extension is support with an --enable-zama16b flag on the simulator. In addition tidy up the implementation in a few ways: - Very long lines on the LOAD encdec were fixed by adding a helper - Add some linebreaks in the code so it reads as less claustrophobic - Ensure we use the same names for arguments in encdec/execute/assembly. Previously we used 'size' and 'width'. I opted for 'width' consistently. --- Makefile | 6 +- c_emulator/riscv_platform.c | 20 +++ c_emulator/riscv_platform.h | 5 + c_emulator/riscv_platform_impl.c | 6 + c_emulator/riscv_platform_impl.h | 5 + c_emulator/riscv_sim.c | 31 +++++ model/prelude.sail | 1 + model/riscv_insts_aext.sail | 116 +++++++++------- model/riscv_insts_base.sail | 94 +++++++------ model/riscv_mem.sail | 8 ++ model/riscv_vmem_utils.sail | 225 +++++++++++++++++++++++++++++++ 11 files changed, 420 insertions(+), 97 deletions(-) create mode 100644 model/riscv_vmem_utils.sail diff --git a/Makefile b/Makefile index 119d4e321..6e3150d1f 100644 --- a/Makefile +++ b/Makefile @@ -92,6 +92,7 @@ SAIL_VM_SRCS += riscv_vmem_pte.sail SAIL_VM_SRCS += riscv_vmem_ptw.sail SAIL_VM_SRCS += riscv_vmem_tlb.sail SAIL_VM_SRCS += riscv_vmem.sail +SAIL_VM_SRCS += riscv_vmem_utils.sail # Non-instruction sources PRELUDE = prelude.sail $(SAIL_XLEN) $(SAIL_FLEN) $(SAIL_VLEN) prelude_mem_metadata.sail prelude_mem.sail @@ -194,7 +195,7 @@ RISCV_EXTRAS_LEM = $(addprefix handwritten_support/,$(RISCV_EXTRAS_LEM_FILES)) .PHONY: all: c_emulator/riscv_sim_$(ARCH) -.PHONY: all +.PHONY: all check_properties # the following ensures empty sail-generated .c files don't hang around and # break future builds if sail exits badly @@ -203,6 +204,9 @@ all: c_emulator/riscv_sim_$(ARCH) check: $(SAIL_SRCS) model/main.sail Makefile $(SAIL) $(SAIL_FLAGS) $(SAIL_SRCS) model/main.sail +check_properties: $(SAIL_SRCS) Makefile + $(SAIL) --smt --smt-auto $(SAIL_FLAGS) $(SAIL_SRCS) + interpret: $(SAIL_SRCS) model/main.sail $(SAIL) -i $(SAIL_FLAGS) $(SAIL_SRCS) model/main.sail diff --git a/c_emulator/riscv_platform.c b/c_emulator/riscv_platform.c index ae67bac42..d5e11d834 100644 --- a/c_emulator/riscv_platform.c +++ b/c_emulator/riscv_platform.c @@ -67,6 +67,26 @@ bool sys_enable_zicboz(unit u) return rv_enable_zicboz; } +bool sys_enable_zama16b(unit u) +{ + return rv_enable_zama16b; +} + +bool sys_misaligned_order_decreasing(unit u) +{ + return rv_misaligned_order_decreasing; +} + +bool sys_misaligned_to_byte(unit u) +{ + return rv_misaligned_to_byte; +} + +uint64_t sys_misaligned_allowed_within(unit u) +{ + return rv_misaligned_allowed_within; +} + uint64_t sys_pmp_count(unit u) { return rv_pmp_count; diff --git a/c_emulator/riscv_platform.h b/c_emulator/riscv_platform.h index fe81c901b..024e64fcb 100644 --- a/c_emulator/riscv_platform.h +++ b/c_emulator/riscv_platform.h @@ -12,6 +12,11 @@ bool sys_enable_vext(unit); bool sys_enable_bext(unit); bool sys_enable_zicbom(unit); bool sys_enable_zicboz(unit); +bool sys_enable_zama16b(unit); + +bool sys_misaligned_order_decreasing(unit); +bool sys_misaligned_to_byte(unit); +uint64_t sys_misaligned_allowed_within(unit); uint64_t sys_pmp_count(unit); uint64_t sys_pmp_grain(unit); diff --git a/c_emulator/riscv_platform_impl.c b/c_emulator/riscv_platform_impl.c index 2713715cc..74d32d02a 100644 --- a/c_emulator/riscv_platform_impl.c +++ b/c_emulator/riscv_platform_impl.c @@ -9,6 +9,11 @@ uint64_t rv_pmp_grain = 0; uint64_t rv_vector_vlen_exp = 0x9; uint64_t rv_vector_elen_exp = 0x6; +/* Defaults for misaligned access behavior in the generated simulator */ +bool rv_misaligned_order_decreasing = false; +bool rv_misaligned_to_byte = false; +uint64_t rv_misaligned_allowed_within = 0; + bool rv_enable_svinval = false; bool rv_enable_zcb = false; bool rv_enable_zfinx = false; @@ -19,6 +24,7 @@ bool rv_enable_vext = true; bool rv_enable_bext = false; bool rv_enable_zicbom = false; bool rv_enable_zicboz = false; +bool rv_enable_zama16b = false; bool rv_enable_dirty_update = false; bool rv_enable_misaligned = false; diff --git a/c_emulator/riscv_platform_impl.h b/c_emulator/riscv_platform_impl.h index 8bda5e624..7a10ffd8d 100644 --- a/c_emulator/riscv_platform_impl.h +++ b/c_emulator/riscv_platform_impl.h @@ -14,6 +14,10 @@ extern uint64_t rv_pmp_grain; extern uint64_t rv_vector_vlen_exp; extern uint64_t rv_vector_elen_exp; +extern bool rv_misaligned_order_decreasing; +extern bool rv_misaligned_to_byte; +extern uint64_t rv_misaligned_allowed_within; + extern bool rv_enable_svinval; extern bool rv_enable_zcb; extern bool rv_enable_zfinx; @@ -23,6 +27,7 @@ extern bool rv_enable_vext; extern bool rv_enable_bext; extern bool rv_enable_zicbom; extern bool rv_enable_zicboz; +extern bool rv_enable_zama16b; extern bool rv_enable_writable_misa; extern bool rv_enable_dirty_update; extern bool rv_enable_misaligned; diff --git a/c_emulator/riscv_sim.c b/c_emulator/riscv_sim.c index 78838462a..cb7531c00 100644 --- a/c_emulator/riscv_sim.c +++ b/c_emulator/riscv_sim.c @@ -58,7 +58,11 @@ enum { OPT_ENABLE_ZCB, OPT_ENABLE_ZICBOM, OPT_ENABLE_ZICBOZ, + OPT_ENABLE_ZAMA16B, OPT_CACHE_BLOCK_SIZE, + OPT_MISALIGNED_ORDER_DEC, + OPT_MISALIGNED_TO_BYTE, + OPT_MISALIGNED_WITHIN, }; static bool do_dump_dts = false; @@ -133,6 +137,9 @@ static struct option options[] = { {"enable-misaligned", no_argument, 0, 'm' }, {"pmp-count", required_argument, 0, OPT_PMP_COUNT }, {"pmp-grain", required_argument, 0, OPT_PMP_GRAIN }, + {"misaligned-order-decreasing", no_argument, 0, OPT_MISALIGNED_ORDER_DEC}, + {"misaligned-to-byte", no_argument, 0, OPT_MISALIGNED_TO_BYTE }, + {"misaligned-allowed-within", required_argument, 0, OPT_MISALIGNED_WITHIN }, {"ram-size", required_argument, 0, 'z' }, {"disable-compressed", no_argument, 0, 'C' }, {"disable-writable-misa", no_argument, 0, 'I' }, @@ -158,6 +165,7 @@ static struct option options[] = { {"enable-zcb", no_argument, 0, OPT_ENABLE_ZCB }, {"enable-zicbom", no_argument, 0, OPT_ENABLE_ZICBOM }, {"enable-zicboz", no_argument, 0, OPT_ENABLE_ZICBOZ }, + {"enable-zama16b", no_argument, 0, OPT_ENABLE_ZAMA16B }, {"cache-block-size", required_argument, 0, OPT_CACHE_BLOCK_SIZE }, #ifdef SAILCOV {"sailcov-file", required_argument, 0, 'c' }, @@ -334,6 +342,25 @@ static int process_args(int argc, char **argv) } rv_pmp_grain = pmp_grain; break; + case OPT_MISALIGNED_ORDER_DEC: + fprintf(stderr, + "misaligned access virtual addresses will be translated in " + "decreasing order.\n"); + rv_misaligned_order_decreasing = true; + break; + case OPT_MISALIGNED_TO_BYTE: + fprintf(stderr, + "misaligned accesses will be split into individual " + "byte operations.\n"); + rv_misaligned_to_byte = true; + break; + case OPT_MISALIGNED_WITHIN: + rv_misaligned_allowed_within = atol(optarg); + fprintf(stderr, + "misaligned accesses will be atomic when within %" PRIu64 + " regions\n", + rv_misaligned_allowed_within); + break; case 'C': fprintf(stderr, "disabling RVC compressed instructions.\n"); rv_enable_rvc = false; @@ -424,6 +451,10 @@ static int process_args(int argc, char **argv) fprintf(stderr, "enabling Zicbom extension.\n"); rv_enable_zicbom = true; break; + case OPT_ENABLE_ZAMA16B: + fprintf(stderr, "enabling Zama16b extension.\n"); + rv_enable_zama16b = true; + break; case OPT_ENABLE_ZICBOZ: fprintf(stderr, "enabling Zicboz extension.\n"); rv_enable_zicboz = true; diff --git a/model/prelude.sail b/model/prelude.sail index 8c03c58da..efa7455ec 100644 --- a/model/prelude.sail +++ b/model/prelude.sail @@ -10,6 +10,7 @@ default Order dec $include $include +$include $include $include $include diff --git a/model/riscv_insts_aext.sail b/model/riscv_insts_aext.sail index 17863b820..7b35297e4 100644 --- a/model/riscv_insts_aext.sail +++ b/model/riscv_insts_aext.sail @@ -81,20 +81,30 @@ function clause execute(LOADRES(aq, rl, rs1, width, rd)) = { * Extensions might perform additional checks on address validity. */ match ext_data_get_addr(rs1, zeros(), Read(Data), width_bytes) { - Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, + Ext_DataAddr_Error(e) => { + ext_handle_data_check_error(e); + RETIRE_FAIL + }, + Ext_DataAddr_OK(vaddr) => { /* "LR faults like a normal load, even though it's in the AMO major opcode space." * - Andrew Waterman, isa-dev, 10 Jul 2018. */ - if not(is_aligned(vaddr, width)) - then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(addr, _) => - match mem_read(Read(Data), addr, width_bytes, aq, aq & rl, true) { - MemValue(result) => { load_reservation(addr); X(rd) = sign_extend(result); RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } - }, + if not(is_aligned(vaddr, width)) then { + handle_mem_exception(vaddr, E_Load_Addr_Align()); + return RETIRE_FAIL + }; + + match vmem_read(vaddr, width_bytes, aq, aq & rl, true) { + Err(vaddr, e) => { + handle_mem_exception(vaddr, e); + RETIRE_FAIL + }, + + Ok(data) => { + X(rd) = sign_extend(data); + RETIRE_SUCCESS + }, } } } @@ -116,49 +126,51 @@ function clause execute (STORECON(aq, rl, rs2, rs1, width, rd)) = { // This is checked during decoding. assert(width_bytes <= xlen_bytes); - if speculate_conditional () == false then { - /* should only happen in rmem - * rmem: allow SC to fail very early + if speculate_conditional() == false then { + /* should only happen in RMEM + * RMEM: allow SC to fail very early */ - X(rd) = zero_extend(0b1); RETIRE_SUCCESS - } else { - /* normal non-rmem case - * rmem: SC is allowed to succeed (but might fail later) - */ - /* Get the address, X(rs1) (no offset). - * Extensions might perform additional checks on address validity. - */ - match ext_data_get_addr(rs1, zeros(), Write(Data), width_bytes) { - Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, - Ext_DataAddr_OK(vaddr) => { - if not(is_aligned(vaddr, width)) - then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } - else { - match translateAddr(vaddr, Write(Data)) { /* Write and ReadWrite are equivalent here: - * both result in a SAMO exception */ - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(addr, _) => { - // Check reservation with physical address. - if not(match_reservation(addr)) then { - /* cannot happen in rmem */ - X(rd) = zero_extend(0b1); cancel_reservation(); RETIRE_SUCCESS - } else { - let eares = mem_write_ea(addr, width_bytes, aq & rl, rl, true); - match eares { - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - MemValue(_) => { - let rs2_val = X(rs2); - match mem_write_value(addr, width_bytes, rs2_val[width_bytes * 8 - 1 .. 0], aq & rl, rl, true) { - MemValue(true) => { X(rd) = zero_extend(0b0); cancel_reservation(); RETIRE_SUCCESS }, - MemValue(false) => { X(rd) = zero_extend(0b1); cancel_reservation(); RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } - } - } - } - } - } - } - } + X(rd) = zero_extend(0b1); + return RETIRE_SUCCESS + }; + + /* normal non-rmem case + * RMEM: SC is allowed to succeed (but might fail later) + */ + /* Get the address, X(rs1) (no offset). + * Extensions might perform additional checks on address validity. + */ + match ext_data_get_addr(rs1, zeros(), Write(Data), width_bytes) { + Ext_DataAddr_Error(e) => { + ext_handle_data_check_error(e); + RETIRE_FAIL + }, + + Ext_DataAddr_OK(vaddr) => { + if not(is_aligned(vaddr, width)) then { + handle_mem_exception(vaddr, E_SAMO_Addr_Align()); + return RETIRE_FAIL + }; + + let data = X(rs2)[width_bytes * 8 - 1 .. 0]; + + match vmem_write(vaddr, width_bytes, data, aq & rl, rl, true) { + Err(vaddr, e) => { + handle_mem_exception(vaddr, e); + RETIRE_FAIL + }, + + Ok(true) => { + X(rd) = zero_extend(0b0); + cancel_reservation(); + RETIRE_SUCCESS + }, + + Ok(false) => { + X(rd) = zero_extend(0b1); + cancel_reservation(); + RETIRE_SUCCESS + }, } } } diff --git a/model/riscv_insts_base.sail b/model/riscv_insts_base.sail index 1510dd425..d84401199 100644 --- a/model/riscv_insts_base.sail +++ b/model/riscv_insts_base.sail @@ -302,8 +302,8 @@ union clause ast = LOAD : (bits(12), regidx, regidx, bool, word_width, bool, boo /* unsigned loads are only present for widths strictly less than xlen, signed loads also present for widths equal to xlen */ -mapping clause encdec = LOAD(imm, rs1, rd, is_unsigned, size, false, false) if (size_bytes(size) < xlen_bytes) | (not(is_unsigned) & size_bytes(size) <= xlen_bytes) - <-> imm @ rs1 @ bool_bits(is_unsigned) @ size_enc(size) @ rd @ 0b0000011 if (size_bytes(size) < xlen_bytes) | (not(is_unsigned) & size_bytes(size) <= xlen_bytes) +function valid_load_encdec(width : word_width, is_unsigned : bool) -> bool = + (size_bytes(width) < xlen_bytes) | (not(is_unsigned) & size_bytes(width) <= xlen_bytes) val extend_value : forall 'n, 0 < 'n <= xlen. (bool, bits('n)) -> xlenbits function extend_value(is_unsigned, value) = if is_unsigned then zero_extend(value) else sign_extend(value) @@ -316,9 +316,8 @@ function is_aligned(vaddr : xlenbits, width : word_width) -> bool = DOUBLE => vaddr[2..0] == zeros(), } -// Return true if the address is misaligned and we don't support misaligned access. -function check_misaligned(vaddr : xlenbits, width : word_width) -> bool = - not(plat_enable_misaligned_access()) & not(is_aligned(vaddr, width)) +mapping clause encdec = LOAD(imm, rs1, rd, is_unsigned, width, false, false) if valid_load_encdec(width, is_unsigned) + <-> imm @ rs1 @ bool_bits(is_unsigned) @ size_enc(width) @ rd @ 0b0000011 if valid_load_encdec(width, is_unsigned) function clause execute(LOAD(imm, rs1, rd, is_unsigned, width, aq, rl)) = { let offset : xlenbits = sign_extend(imm); @@ -330,20 +329,29 @@ function clause execute(LOAD(imm, rs1, rd, is_unsigned, width, aq, rl)) = { /* Get the address, X(rs1) + offset. Some extensions perform additional checks on address validity. */ match ext_data_get_addr(rs1, offset, Read(Data), width_bytes) { - Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, + Ext_DataAddr_Error(e) => { + ext_handle_data_check_error(e); + RETIRE_FAIL + }, + Ext_DataAddr_OK(vaddr) => { - if check_misaligned(vaddr, width) - then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(paddr, _) => - - match mem_read(Read(Data), paddr, width_bytes, aq, rl, false) { - MemValue(result) => { X(rd) = extend_value(is_unsigned, result); RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - }, + if check_misaligned(vaddr, width) then { + handle_mem_exception(vaddr, E_Load_Addr_Align()); + return RETIRE_FAIL + }; + + match vmem_read(vaddr, width_bytes, aq, rl, false) { + Err(vaddr, e) => { + handle_mem_exception(vaddr, e); + RETIRE_FAIL + }, + + Ok(data) => { + X(rd) = extend_value(is_unsigned, data); + RETIRE_SUCCESS + } } - }, + } } } @@ -365,17 +373,15 @@ mapping maybe_u = { false <-> "" } -mapping clause assembly = LOAD(imm, rs1, rd, is_unsigned, size, aq, rl) - <-> "l" ^ size_mnemonic(size) ^ maybe_u(is_unsigned) ^ maybe_aq(aq) ^ maybe_rl(rl) ^ spc() ^ reg_name(rd) ^ sep() ^ hex_bits_signed_12(imm) ^ "(" ^ reg_name(rs1) ^ ")" +mapping clause assembly = LOAD(imm, rs1, rd, is_unsigned, width, aq, rl) + <-> "l" ^ size_mnemonic(width) ^ maybe_u(is_unsigned) ^ maybe_aq(aq) ^ maybe_rl(rl) ^ spc() ^ reg_name(rd) ^ sep() ^ hex_bits_signed_12(imm) ^ "(" ^ reg_name(rs1) ^ ")" /* ****************************************************************** */ union clause ast = STORE : (bits(12), regidx, regidx, word_width, bool, bool) -mapping clause encdec = STORE(imm7 @ imm5, rs2, rs1, size, false, false) if size_bytes(size) <= xlen_bytes - <-> imm7 : bits(7) @ rs2 @ rs1 @ 0b0 @ size_enc(size) @ imm5 : bits(5) @ 0b0100011 if size_bytes(size) <= xlen_bytes +mapping clause encdec = STORE(imm7 @ imm5, rs2, rs1, width, false, false) if size_bytes(width) <= xlen_bytes + <-> imm7 : bits(7) @ rs2 @ rs1 @ 0b0 @ size_enc(width) @ imm5 : bits(5) @ 0b0100011 if size_bytes(width) <= xlen_bytes -/* NOTE: Currently, we only EA if address translation is successful. - This may need revisiting. */ function clause execute (STORE(imm, rs2, rs1, width, aq, rl)) = { let offset : xlenbits = sign_extend(imm); let width_bytes = size_bytes(width); @@ -386,32 +392,32 @@ function clause execute (STORE(imm, rs2, rs1, width, aq, rl)) = { /* Get the address, X(rs1) + offset. Some extensions perform additional checks on address validity. */ match ext_data_get_addr(rs1, offset, Write(Data), width_bytes) { - Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, - Ext_DataAddr_OK(vaddr) => - if check_misaligned(vaddr, width) - then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(paddr, _) => { - let eares = mem_write_ea(paddr, width_bytes, aq, rl, false); - match (eares) { - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - MemValue(_) => { - let rs2_val = X(rs2); - match mem_write_value(paddr, width_bytes, rs2_val[width_bytes * 8 - 1 .. 0], aq, rl, false) { - MemValue(true) => RETIRE_SUCCESS, - MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } - } - } - } + Ext_DataAddr_Error(e) => { + ext_handle_data_check_error(e); + RETIRE_FAIL + }, + + Ext_DataAddr_OK(vaddr) => { + if check_misaligned(vaddr, width) then { + handle_mem_exception(vaddr, E_SAMO_Addr_Align()); + return RETIRE_FAIL + }; + + let data = X(rs2)[width_bytes * 8 - 1 .. 0]; + + match vmem_write(vaddr, width_bytes, data, aq, rl, false) { + Ok(_) => RETIRE_SUCCESS, + Err(vaddr, e) => { + handle_mem_exception(vaddr, e); + return RETIRE_FAIL } } + } } } -mapping clause assembly = STORE(imm, rs2, rs1, size, aq, rl) - <-> "s" ^ size_mnemonic(size) ^ maybe_aq(aq) ^ maybe_rl(rl) ^ spc() ^ reg_name(rs2) ^ sep() ^ hex_bits_signed_12(imm) ^ opt_spc() ^ "(" ^ opt_spc() ^ reg_name(rs1) ^ opt_spc() ^ ")" +mapping clause assembly = STORE(imm, rs2, rs1, width, aq, rl) + <-> "s" ^ size_mnemonic(width) ^ maybe_aq(aq) ^ maybe_rl(rl) ^ spc() ^ reg_name(rs2) ^ sep() ^ hex_bits_signed_12(imm) ^ opt_spc() ^ "(" ^ opt_spc() ^ reg_name(rs1) ^ opt_spc() ^ ")" /* ****************************************************************** */ union clause ast = ADDIW : (bits(12), regidx, regidx) diff --git a/model/riscv_mem.sail b/model/riscv_mem.sail index 82227ad7e..cbcbef948 100644 --- a/model/riscv_mem.sail +++ b/model/riscv_mem.sail @@ -38,6 +38,14 @@ function is_aligned_addr forall 'n. (addr : xlenbits, width : int('n)) -> bool = unsigned(addr) % width == 0 +function check_misaligned(vaddr : xlenbits, width : word_width) -> bool = { + if plat_enable_misaligned_access() then { + false + } else { + not(is_aligned_addr(vaddr, size_bytes(width))) + } +} + function read_kind_of_flags (aq : bool, rl : bool, res : bool) -> option(read_kind) = match (aq, rl, res) { (false, false, false) => Some(Read_plain), diff --git a/model/riscv_vmem_utils.sail b/model/riscv_vmem_utils.sail new file mode 100644 index 000000000..a7162f0ce --- /dev/null +++ b/model/riscv_vmem_utils.sail @@ -0,0 +1,225 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ + +/* This file implements utility functions for accessing memory that + * can be used by instruction definitions. + */ + +/* This is a external option that controls the order in which the model + * performs misaligned accesses. + */ +val sys_misaligned_order_decreasing = pure "sys_misaligned_order_decreasing" : unit -> bool + +/* This is an external option that, when true, causes all misaligned accesses + * to be split into single byte operations. + */ +val sys_misaligned_to_byte = pure "sys_misaligned_to_byte" : unit -> bool + +/* This is an external option that returns an integer N, such that + * when N is greater than zero, misaligned accesses to physical memory + * (as atomic events) are allowed provided the access occurs within a N + * byte region. N must be a power of two. + * + * This option cannot be unlimited (i.e. allowing all misaligned accesses + * to be atomic) as this would be incorrect above the maximum supported + * page size. + */ +val sys_misaligned_allowed_within = pure "sys_misaligned_allowed_within" : unit -> bits(64) + +/* The Zama16b extensions implies that misaligned loads, stores, and + * AMOs to main memory regions that do not cross a naturally aligned + * 16-byte boundary are atomic. + */ +enum clause extension = Ext_Zama16b + +val sys_enable_zama16b = pure "sys_enable_zama16b" : unit -> bool + +function clause extensionEnabled(Ext_Zama16b) = sys_enable_zama16b() + +/* Check if an 'n byte access for an address is within an aligned 'r byte region */ +val access_within : forall 'width 'n 'r, 1 <= 'n <= 'r. (bits('width), int('n), int('r)) -> bool + +function access_within(addr, bytes, region_bytes) = { + let low = addr & ~(get_slice_int(length(addr), bytes - 1, 0)); + let high = low + (region_bytes - 1); + let addr_high = addr + (bytes - 1); + low <=_u addr & addr <=_u addr_high & addr_high <=_u high +} + +/* This property demonstrates that when bytes == region_bytes, the access_within check above is + * equivalent to a regular alignment check (for a constrained set of inputs to help the SMT solver). + */ +$[property] +function prop_access_within_is_aligned(addr : bits(32), bytes : bits(4)) -> bool = { + let bytes = unsigned(zero_extend(32, 0b1) << unsigned(bytes)); + if bytes > 0 then { + access_within(addr, bytes, bytes) == (fmod_int(unsigned(addr), bytes) == 0) + } else { + true + } +} + +/* A 1-byte access is always within a 1-byte region. */ +$[property] +function prop_access_within_single(addr : bits(32)) -> bool = { + access_within(addr, 1, 1) +} + +val allowed_misaligned : forall 'width, 'width > 0. (xlenbits, int('width)) -> bool + +function allowed_misaligned(vaddr, width) = { + let region_width = min_int(2 ^ pagesize_bits, unsigned(sys_misaligned_allowed_within())); + + // If the Zama16b extension is enabled, the region_width must be at least 16 + let region_width = if extensionEnabled(Ext_Zama16b) then { + max_int(16, region_width) + } else { + region_width + }; + + if width > region_width then return false; + + access_within(vaddr, width, region_width) +} + +val count_trailing_zeros : forall 'n, 'n >= 0. (bits('n)) -> range(0, 'n) + +function count_trailing_zeros(xs) = { + foreach (i from 0 to (length(xs) - 1)) { + if xs[i] == bitone then return i; + }; + length(xs) +} + +val split_misaligned : forall 'width, 'width > 0. + (xlenbits, int('width)) -> {'n 'bytes, 'width == 'n * 'bytes & 'bytes > 0. (int('n), int('bytes))} + +function split_misaligned(vaddr, width) = { + if is_aligned_addr(vaddr, width) | allowed_misaligned(vaddr, width) then (1, width) + else if sys_misaligned_to_byte() then (width, 1) + else { + let num_accesses = 2 ^ count_trailing_zeros(vaddr); + let bytes_per_access = width / num_accesses; + assert(width == num_accesses * bytes_per_access); + (num_accesses, bytes_per_access) + } +} + +type valid_misaligned_order('n, 'first, 'last, 'step) -> Bool = + ('first == 0 & 'last == 'n - 1 & 'step == 1) + | ('first == 'n - 1 & 'last == 0 & 'step == -1) + +val misaligned_order : forall 'n. + int('n) -> {'first 'last 'step, valid_misaligned_order('n, 'first, 'last, 'step). (int('first), int('last), int('step))} + +function misaligned_order(n) = { + if sys_misaligned_order_decreasing() then { + (n - 1, 0, -1) + } else { + (0, n - 1, 1) + } +} + +val vmem_read : forall 'width, 'width in {1, 2, 4, 8} & 'width <= xlen_bytes. + (xlenbits, int('width), bool, bool, bool) -> result(bits(8 * 'width), (xlenbits, ExceptionType)) + +function vmem_read(vaddr, bytes, aq, rl, res) = { + /* If the load is misaligned, split into `n` (single-copy-atomic) memory operations, + each of `bytes` width. If the load is aligned, then `n` = 1 and bytes will remain + unchanged. */ + let ('n, bytes) = split_misaligned(vaddr, bytes); + var data = zeros(8 * n * bytes); + + let (first, last, step) = misaligned_order(n); + var i : range(0, 'n - 1) = first; + var finished : bool = false; + + repeat { + let offset = i; + let vaddr = vaddr + (offset * bytes); + match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => return Err(vaddr, e), + + TR_Address(paddr, _) => match mem_read(Read(Data), paddr, bytes, aq, rl, res) { + MemException(e) => return Err(vaddr, e), + + MemValue(v) => { + if res then { + load_reservation(paddr) + }; + + data[(8 * (offset + 1) * bytes) - 1 .. 8 * offset * bytes] = v + }, + } + }; + + if offset == last then { + finished = true + } else { + i = offset + step + } + } until finished; + + Ok(data) +} + +/* Currently this function takes the X register index as an argument. + * It does this so the register access only occurs after the effective + * address for the access has been announced. This is for the RVWMO + * operational model. + */ +val vmem_write : forall 'width, 'width in {1, 2, 4, 8} & 'width <= xlen_bytes. + (xlenbits, int('width), bits(8 * 'width), bool, bool, bool) -> result(bool, (xlenbits, ExceptionType)) + +function vmem_write(vaddr, bytes, data, aq, rl, res) = { + /* If the store is misaligned, split into `n` (single-copy-atomic) memory operations, + each of `bytes` width. If the store is aligned, then `n` = 1 and bytes will remain + unchanged. */ + let ('n, bytes) = split_misaligned(vaddr, bytes); + + let (first, last, step) = misaligned_order(n); + var i : range(0, 'n - 1) = first; + var finished : bool = false; + + var write_success : bool = true; + + repeat { + let offset = i; + let vaddr = vaddr + (offset * bytes); + match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => return Err(vaddr, e), + + /* NOTE: Currently, we only announce the effective address if address translation is successful. + This may need revisiting, particularly in the misaligned case. */ + TR_Address(paddr, _) => { + /* If res is true, the load should be aligned, and this loop should only execute once */ + if res & not(match_reservation(paddr)) then { + write_success = false + } else match mem_write_ea(paddr, bytes, aq, rl, res) { + MemException(e) => return Err(vaddr, e), + + MemValue(()) => { + let write_value = data[(8 * (offset + 1) * bytes) - 1 .. 8 * offset * bytes]; + match mem_write_value(paddr, bytes, write_value, aq, rl, res) { + MemException(e) => return Err(vaddr, e), + MemValue(s) => write_success = write_success & s, + } + } + } + } + }; + + if offset == last then { + finished = true + } else { + i = offset + step + } + } until finished; + + Ok(write_success) +}