diff --git a/Makefile b/Makefile index 119d4e321..22604e53f 100644 --- a/Makefile +++ b/Makefile @@ -92,6 +92,7 @@ SAIL_VM_SRCS += riscv_vmem_pte.sail SAIL_VM_SRCS += riscv_vmem_ptw.sail SAIL_VM_SRCS += riscv_vmem_tlb.sail SAIL_VM_SRCS += riscv_vmem.sail +SAIL_VM_SRCS += riscv_vmem_utils.sail # Non-instruction sources PRELUDE = prelude.sail $(SAIL_XLEN) $(SAIL_FLEN) $(SAIL_VLEN) prelude_mem_metadata.sail prelude_mem.sail diff --git a/c_emulator/riscv_platform.c b/c_emulator/riscv_platform.c index ae67bac42..82235e001 100644 --- a/c_emulator/riscv_platform.c +++ b/c_emulator/riscv_platform.c @@ -67,6 +67,16 @@ bool sys_enable_zicboz(unit u) return rv_enable_zicboz; } +bool sys_misaligned_order_decreasing(unit u) +{ + return rv_misaligned_order_decreasing; +} + +bool sys_misaligned_to_byte(unit u) +{ + return rv_misaligned_to_byte; +} + uint64_t sys_pmp_count(unit u) { return rv_pmp_count; diff --git a/c_emulator/riscv_platform.h b/c_emulator/riscv_platform.h index fe81c901b..6ca993795 100644 --- a/c_emulator/riscv_platform.h +++ b/c_emulator/riscv_platform.h @@ -13,6 +13,9 @@ bool sys_enable_bext(unit); bool sys_enable_zicbom(unit); bool sys_enable_zicboz(unit); +bool sys_misaligned_order_decreasing(unit); +bool sys_misaligned_to_byte(unit); + uint64_t sys_pmp_count(unit); uint64_t sys_pmp_grain(unit); diff --git a/c_emulator/riscv_platform_impl.c b/c_emulator/riscv_platform_impl.c index 2713715cc..891b68b44 100644 --- a/c_emulator/riscv_platform_impl.c +++ b/c_emulator/riscv_platform_impl.c @@ -9,6 +9,10 @@ uint64_t rv_pmp_grain = 0; uint64_t rv_vector_vlen_exp = 0x9; uint64_t rv_vector_elen_exp = 0x6; +/* Defaults for misaligned access behavior in the generated simulator */ +bool rv_misaligned_order_decreasing = false; +bool rv_misaligned_to_byte = false; + bool rv_enable_svinval = false; bool rv_enable_zcb = false; bool rv_enable_zfinx = false; diff --git a/c_emulator/riscv_platform_impl.h b/c_emulator/riscv_platform_impl.h index 8bda5e624..c5dc5ef98 100644 --- a/c_emulator/riscv_platform_impl.h +++ b/c_emulator/riscv_platform_impl.h @@ -14,6 +14,9 @@ extern uint64_t rv_pmp_grain; extern uint64_t rv_vector_vlen_exp; extern uint64_t rv_vector_elen_exp; +extern bool rv_misaligned_order_decreasing; +extern bool rv_misaligned_to_byte; + extern bool rv_enable_svinval; extern bool rv_enable_zcb; extern bool rv_enable_zfinx; diff --git a/c_emulator/riscv_sim.c b/c_emulator/riscv_sim.c index 78838462a..2605dbade 100644 --- a/c_emulator/riscv_sim.c +++ b/c_emulator/riscv_sim.c @@ -59,6 +59,8 @@ enum { OPT_ENABLE_ZICBOM, OPT_ENABLE_ZICBOZ, OPT_CACHE_BLOCK_SIZE, + OPT_MISALIGNED_ORDER_DEC, + OPT_MISALIGNED_TO_BYTE, }; static bool do_dump_dts = false; @@ -133,6 +135,8 @@ static struct option options[] = { {"enable-misaligned", no_argument, 0, 'm' }, {"pmp-count", required_argument, 0, OPT_PMP_COUNT }, {"pmp-grain", required_argument, 0, OPT_PMP_GRAIN }, + {"misaligned-order-decreasing", no_argument, 0, OPT_MISALIGNED_ORDER_DEC}, + {"misaligned-to-byte", no_argument, 0, OPT_MISALIGNED_TO_BYTE }, {"ram-size", required_argument, 0, 'z' }, {"disable-compressed", no_argument, 0, 'C' }, {"disable-writable-misa", no_argument, 0, 'I' }, @@ -334,6 +338,18 @@ static int process_args(int argc, char **argv) } rv_pmp_grain = pmp_grain; break; + case OPT_MISALIGNED_ORDER_DEC: + fprintf(stderr, + "misaligned access virtual addresses will be translated in " + "decreasing order.\n"); + rv_misaligned_order_decreasing = true; + break; + case OPT_MISALIGNED_TO_BYTE: + fprintf(stderr, + "misaligned accesses will be split into individual " + "byte operations.\n"); + rv_misaligned_to_byte = true; + break; case 'C': fprintf(stderr, "disabling RVC compressed instructions.\n"); rv_enable_rvc = false; diff --git a/model/prelude.sail b/model/prelude.sail index 8c03c58da..efa7455ec 100644 --- a/model/prelude.sail +++ b/model/prelude.sail @@ -10,6 +10,7 @@ default Order dec $include $include +$include $include $include $include diff --git a/model/riscv_insts_aext.sail b/model/riscv_insts_aext.sail index 17863b820..7b35297e4 100644 --- a/model/riscv_insts_aext.sail +++ b/model/riscv_insts_aext.sail @@ -81,20 +81,30 @@ function clause execute(LOADRES(aq, rl, rs1, width, rd)) = { * Extensions might perform additional checks on address validity. */ match ext_data_get_addr(rs1, zeros(), Read(Data), width_bytes) { - Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, + Ext_DataAddr_Error(e) => { + ext_handle_data_check_error(e); + RETIRE_FAIL + }, + Ext_DataAddr_OK(vaddr) => { /* "LR faults like a normal load, even though it's in the AMO major opcode space." * - Andrew Waterman, isa-dev, 10 Jul 2018. */ - if not(is_aligned(vaddr, width)) - then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(addr, _) => - match mem_read(Read(Data), addr, width_bytes, aq, aq & rl, true) { - MemValue(result) => { load_reservation(addr); X(rd) = sign_extend(result); RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } - }, + if not(is_aligned(vaddr, width)) then { + handle_mem_exception(vaddr, E_Load_Addr_Align()); + return RETIRE_FAIL + }; + + match vmem_read(vaddr, width_bytes, aq, aq & rl, true) { + Err(vaddr, e) => { + handle_mem_exception(vaddr, e); + RETIRE_FAIL + }, + + Ok(data) => { + X(rd) = sign_extend(data); + RETIRE_SUCCESS + }, } } } @@ -116,49 +126,51 @@ function clause execute (STORECON(aq, rl, rs2, rs1, width, rd)) = { // This is checked during decoding. assert(width_bytes <= xlen_bytes); - if speculate_conditional () == false then { - /* should only happen in rmem - * rmem: allow SC to fail very early + if speculate_conditional() == false then { + /* should only happen in RMEM + * RMEM: allow SC to fail very early */ - X(rd) = zero_extend(0b1); RETIRE_SUCCESS - } else { - /* normal non-rmem case - * rmem: SC is allowed to succeed (but might fail later) - */ - /* Get the address, X(rs1) (no offset). - * Extensions might perform additional checks on address validity. - */ - match ext_data_get_addr(rs1, zeros(), Write(Data), width_bytes) { - Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, - Ext_DataAddr_OK(vaddr) => { - if not(is_aligned(vaddr, width)) - then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } - else { - match translateAddr(vaddr, Write(Data)) { /* Write and ReadWrite are equivalent here: - * both result in a SAMO exception */ - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(addr, _) => { - // Check reservation with physical address. - if not(match_reservation(addr)) then { - /* cannot happen in rmem */ - X(rd) = zero_extend(0b1); cancel_reservation(); RETIRE_SUCCESS - } else { - let eares = mem_write_ea(addr, width_bytes, aq & rl, rl, true); - match eares { - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - MemValue(_) => { - let rs2_val = X(rs2); - match mem_write_value(addr, width_bytes, rs2_val[width_bytes * 8 - 1 .. 0], aq & rl, rl, true) { - MemValue(true) => { X(rd) = zero_extend(0b0); cancel_reservation(); RETIRE_SUCCESS }, - MemValue(false) => { X(rd) = zero_extend(0b1); cancel_reservation(); RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } - } - } - } - } - } - } - } + X(rd) = zero_extend(0b1); + return RETIRE_SUCCESS + }; + + /* normal non-rmem case + * RMEM: SC is allowed to succeed (but might fail later) + */ + /* Get the address, X(rs1) (no offset). + * Extensions might perform additional checks on address validity. + */ + match ext_data_get_addr(rs1, zeros(), Write(Data), width_bytes) { + Ext_DataAddr_Error(e) => { + ext_handle_data_check_error(e); + RETIRE_FAIL + }, + + Ext_DataAddr_OK(vaddr) => { + if not(is_aligned(vaddr, width)) then { + handle_mem_exception(vaddr, E_SAMO_Addr_Align()); + return RETIRE_FAIL + }; + + let data = X(rs2)[width_bytes * 8 - 1 .. 0]; + + match vmem_write(vaddr, width_bytes, data, aq & rl, rl, true) { + Err(vaddr, e) => { + handle_mem_exception(vaddr, e); + RETIRE_FAIL + }, + + Ok(true) => { + X(rd) = zero_extend(0b0); + cancel_reservation(); + RETIRE_SUCCESS + }, + + Ok(false) => { + X(rd) = zero_extend(0b1); + cancel_reservation(); + RETIRE_SUCCESS + }, } } } diff --git a/model/riscv_insts_base.sail b/model/riscv_insts_base.sail index 1510dd425..d84401199 100644 --- a/model/riscv_insts_base.sail +++ b/model/riscv_insts_base.sail @@ -302,8 +302,8 @@ union clause ast = LOAD : (bits(12), regidx, regidx, bool, word_width, bool, boo /* unsigned loads are only present for widths strictly less than xlen, signed loads also present for widths equal to xlen */ -mapping clause encdec = LOAD(imm, rs1, rd, is_unsigned, size, false, false) if (size_bytes(size) < xlen_bytes) | (not(is_unsigned) & size_bytes(size) <= xlen_bytes) - <-> imm @ rs1 @ bool_bits(is_unsigned) @ size_enc(size) @ rd @ 0b0000011 if (size_bytes(size) < xlen_bytes) | (not(is_unsigned) & size_bytes(size) <= xlen_bytes) +function valid_load_encdec(width : word_width, is_unsigned : bool) -> bool = + (size_bytes(width) < xlen_bytes) | (not(is_unsigned) & size_bytes(width) <= xlen_bytes) val extend_value : forall 'n, 0 < 'n <= xlen. (bool, bits('n)) -> xlenbits function extend_value(is_unsigned, value) = if is_unsigned then zero_extend(value) else sign_extend(value) @@ -316,9 +316,8 @@ function is_aligned(vaddr : xlenbits, width : word_width) -> bool = DOUBLE => vaddr[2..0] == zeros(), } -// Return true if the address is misaligned and we don't support misaligned access. -function check_misaligned(vaddr : xlenbits, width : word_width) -> bool = - not(plat_enable_misaligned_access()) & not(is_aligned(vaddr, width)) +mapping clause encdec = LOAD(imm, rs1, rd, is_unsigned, width, false, false) if valid_load_encdec(width, is_unsigned) + <-> imm @ rs1 @ bool_bits(is_unsigned) @ size_enc(width) @ rd @ 0b0000011 if valid_load_encdec(width, is_unsigned) function clause execute(LOAD(imm, rs1, rd, is_unsigned, width, aq, rl)) = { let offset : xlenbits = sign_extend(imm); @@ -330,20 +329,29 @@ function clause execute(LOAD(imm, rs1, rd, is_unsigned, width, aq, rl)) = { /* Get the address, X(rs1) + offset. Some extensions perform additional checks on address validity. */ match ext_data_get_addr(rs1, offset, Read(Data), width_bytes) { - Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, + Ext_DataAddr_Error(e) => { + ext_handle_data_check_error(e); + RETIRE_FAIL + }, + Ext_DataAddr_OK(vaddr) => { - if check_misaligned(vaddr, width) - then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(paddr, _) => - - match mem_read(Read(Data), paddr, width_bytes, aq, rl, false) { - MemValue(result) => { X(rd) = extend_value(is_unsigned, result); RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - }, + if check_misaligned(vaddr, width) then { + handle_mem_exception(vaddr, E_Load_Addr_Align()); + return RETIRE_FAIL + }; + + match vmem_read(vaddr, width_bytes, aq, rl, false) { + Err(vaddr, e) => { + handle_mem_exception(vaddr, e); + RETIRE_FAIL + }, + + Ok(data) => { + X(rd) = extend_value(is_unsigned, data); + RETIRE_SUCCESS + } } - }, + } } } @@ -365,17 +373,15 @@ mapping maybe_u = { false <-> "" } -mapping clause assembly = LOAD(imm, rs1, rd, is_unsigned, size, aq, rl) - <-> "l" ^ size_mnemonic(size) ^ maybe_u(is_unsigned) ^ maybe_aq(aq) ^ maybe_rl(rl) ^ spc() ^ reg_name(rd) ^ sep() ^ hex_bits_signed_12(imm) ^ "(" ^ reg_name(rs1) ^ ")" +mapping clause assembly = LOAD(imm, rs1, rd, is_unsigned, width, aq, rl) + <-> "l" ^ size_mnemonic(width) ^ maybe_u(is_unsigned) ^ maybe_aq(aq) ^ maybe_rl(rl) ^ spc() ^ reg_name(rd) ^ sep() ^ hex_bits_signed_12(imm) ^ "(" ^ reg_name(rs1) ^ ")" /* ****************************************************************** */ union clause ast = STORE : (bits(12), regidx, regidx, word_width, bool, bool) -mapping clause encdec = STORE(imm7 @ imm5, rs2, rs1, size, false, false) if size_bytes(size) <= xlen_bytes - <-> imm7 : bits(7) @ rs2 @ rs1 @ 0b0 @ size_enc(size) @ imm5 : bits(5) @ 0b0100011 if size_bytes(size) <= xlen_bytes +mapping clause encdec = STORE(imm7 @ imm5, rs2, rs1, width, false, false) if size_bytes(width) <= xlen_bytes + <-> imm7 : bits(7) @ rs2 @ rs1 @ 0b0 @ size_enc(width) @ imm5 : bits(5) @ 0b0100011 if size_bytes(width) <= xlen_bytes -/* NOTE: Currently, we only EA if address translation is successful. - This may need revisiting. */ function clause execute (STORE(imm, rs2, rs1, width, aq, rl)) = { let offset : xlenbits = sign_extend(imm); let width_bytes = size_bytes(width); @@ -386,32 +392,32 @@ function clause execute (STORE(imm, rs2, rs1, width, aq, rl)) = { /* Get the address, X(rs1) + offset. Some extensions perform additional checks on address validity. */ match ext_data_get_addr(rs1, offset, Write(Data), width_bytes) { - Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, - Ext_DataAddr_OK(vaddr) => - if check_misaligned(vaddr, width) - then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(paddr, _) => { - let eares = mem_write_ea(paddr, width_bytes, aq, rl, false); - match (eares) { - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - MemValue(_) => { - let rs2_val = X(rs2); - match mem_write_value(paddr, width_bytes, rs2_val[width_bytes * 8 - 1 .. 0], aq, rl, false) { - MemValue(true) => RETIRE_SUCCESS, - MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } - } - } - } + Ext_DataAddr_Error(e) => { + ext_handle_data_check_error(e); + RETIRE_FAIL + }, + + Ext_DataAddr_OK(vaddr) => { + if check_misaligned(vaddr, width) then { + handle_mem_exception(vaddr, E_SAMO_Addr_Align()); + return RETIRE_FAIL + }; + + let data = X(rs2)[width_bytes * 8 - 1 .. 0]; + + match vmem_write(vaddr, width_bytes, data, aq, rl, false) { + Ok(_) => RETIRE_SUCCESS, + Err(vaddr, e) => { + handle_mem_exception(vaddr, e); + return RETIRE_FAIL } } + } } } -mapping clause assembly = STORE(imm, rs2, rs1, size, aq, rl) - <-> "s" ^ size_mnemonic(size) ^ maybe_aq(aq) ^ maybe_rl(rl) ^ spc() ^ reg_name(rs2) ^ sep() ^ hex_bits_signed_12(imm) ^ opt_spc() ^ "(" ^ opt_spc() ^ reg_name(rs1) ^ opt_spc() ^ ")" +mapping clause assembly = STORE(imm, rs2, rs1, width, aq, rl) + <-> "s" ^ size_mnemonic(width) ^ maybe_aq(aq) ^ maybe_rl(rl) ^ spc() ^ reg_name(rs2) ^ sep() ^ hex_bits_signed_12(imm) ^ opt_spc() ^ "(" ^ opt_spc() ^ reg_name(rs1) ^ opt_spc() ^ ")" /* ****************************************************************** */ union clause ast = ADDIW : (bits(12), regidx, regidx) diff --git a/model/riscv_mem.sail b/model/riscv_mem.sail index 82227ad7e..cbcbef948 100644 --- a/model/riscv_mem.sail +++ b/model/riscv_mem.sail @@ -38,6 +38,14 @@ function is_aligned_addr forall 'n. (addr : xlenbits, width : int('n)) -> bool = unsigned(addr) % width == 0 +function check_misaligned(vaddr : xlenbits, width : word_width) -> bool = { + if plat_enable_misaligned_access() then { + false + } else { + not(is_aligned_addr(vaddr, size_bytes(width))) + } +} + function read_kind_of_flags (aq : bool, rl : bool, res : bool) -> option(read_kind) = match (aq, rl, res) { (false, false, false) => Some(Read_plain), diff --git a/model/riscv_vmem_utils.sail b/model/riscv_vmem_utils.sail new file mode 100644 index 000000000..8a5e62296 --- /dev/null +++ b/model/riscv_vmem_utils.sail @@ -0,0 +1,157 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ + +/* This file implements utility functions for accessing memory that + * can be used by instruction definitions. + */ + +/* This is a external option that controls the order in which the model + * performs misaligned accesses. + */ +val sys_misaligned_order_decreasing = pure "sys_misaligned_order_decreasing" : unit -> bool + +/* This is an external option that, when true, causes all misaligned accesses + * to be split into single byte operations. + */ +val sys_misaligned_to_byte = pure "sys_misaligned_to_byte" : unit -> bool + +val split_misaligned : forall 'width, 'width in {1, 2, 4, 8}. + (xlenbits, int('width)) -> {'n 'bytes, 'width == 'n * 'bytes & 'bytes > 0. (int('n), int('bytes))} + +function split_misaligned(vaddr, width) = { + let misaligned_to_byte = sys_misaligned_to_byte(); + match width { + 1 => (1, 1), + + 2 if is_aligned_addr(vaddr, 2) => (1, 2), + 2 => (2, 1), + + 4 if is_aligned_addr(vaddr, 4) => (1, 4), + 4 if is_aligned_addr(vaddr, 2) & not(misaligned_to_byte) => (2, 2), + 4 => (4, 1), + + 8 if is_aligned_addr(vaddr, 8) => (1, 8), + 8 if is_aligned_addr(vaddr, 4) & not(misaligned_to_byte) => (2, 4), + 8 if is_aligned_addr(vaddr, 2) & not(misaligned_to_byte) => (4, 2), + 8 => (8, 1) + } +} + +type valid_misaligned_order('n, 'first, 'last, 'step) -> Bool = + ('first == 0 & 'last == 'n - 1 & 'step == 1) + | ('first == 'n - 1 & 'last == 0 & 'step == -1) + +val misaligned_order : forall 'n. + int('n) -> {'first 'last 'step, valid_misaligned_order('n, 'first, 'last, 'step). (int('first), int('last), int('step))} + +function misaligned_order(n) = { + if sys_misaligned_order_decreasing() then { + (n - 1, 0, -1) + } else { + (0, n - 1, 1) + } +} + +val vmem_read : forall 'width, 'width in {1, 2, 4, 8} & 'width <= xlen_bytes. + (xlenbits, int('width), bool, bool, bool) -> result(bits(8 * 'width), (xlenbits, ExceptionType)) + +function vmem_read(vaddr, bytes, aq, rl, res) = { + /* If the load is misaligned, split into `n` (single-copy-atomic) memory operations, + each of `bytes` width. If the load is aligned, then `n` = 1 and bytes will remain + unchanged. */ + let ('n, bytes) = split_misaligned(vaddr, bytes); + var data = zeros(8 * n * bytes); + + let (first, last, step) = misaligned_order(n); + var i : range(0, 'n - 1) = first; + var finished : bool = false; + + repeat { + let offset = i; + let vaddr = vaddr + (offset * bytes); + match translateAddr(vaddr, Read(Data)) { + TR_Failure(e, _) => return Err(vaddr, e), + + TR_Address(paddr, _) => match mem_read(Read(Data), paddr, bytes, aq, rl, res) { + MemException(e) => return Err(vaddr, e), + + MemValue(v) => { + if res then { + load_reservation(paddr) + }; + + data[(8 * (offset + 1) * bytes) - 1 .. 8 * offset * bytes] = v + }, + } + }; + + if offset == last then { + finished = true + } else { + i = offset + step + } + } until finished; + + Ok(data) +} + +/* Currently this function takes the X register index as an argument. + * It does this so the register access only occurs after the effective + * address for the access has been announced. This is for the RVWMO + * operational model. + */ +val vmem_write : forall 'width, 'width in {1, 2, 4, 8} & 'width <= xlen_bytes. + (xlenbits, int('width), bits(8 * 'width), bool, bool, bool) -> result(bool, (xlenbits, ExceptionType)) + +function vmem_write(vaddr, bytes, data, aq, rl, res) = { + /* If the store is misaligned, split into `n` (single-copy-atomic) memory operations, + each of `bytes` width. If the store is aligned, then `n` = 1 and bytes will remain + unchanged. */ + let ('n, bytes) = split_misaligned(vaddr, bytes); + + let (first, last, step) = misaligned_order(n); + var i : range(0, 'n - 1) = first; + var finished : bool = false; + + var write_success : bool = true; + + repeat { + let offset = i; + let vaddr = vaddr + (offset * bytes); + match translateAddr(vaddr, Write(Data)) { + TR_Failure(e, _) => return Err(vaddr, e), + + /* NOTE: Currently, we only announce the effective address if address translation is successful. + This may need revisiting, particularly in the misaligned case. */ + TR_Address(paddr, _) => { + /* If res is true, the load should be aligned, and this loop should only execute once */ + if res & not(match_reservation(paddr)) then { + write_success = false + } else match mem_write_ea(paddr, bytes, aq, rl, res) { + MemException(e) => return Err(vaddr, e), + + MemValue(()) => { + let write_value = data[(8 * (offset + 1) * bytes) - 1 .. 8 * offset * bytes]; + match mem_write_value(paddr, bytes, write_value, aq, rl, res) { + MemException(e) => return Err(vaddr, e), + MemValue(s) => write_success = write_success & s, + } + } + } + } + }; + + if offset == last then { + finished = true + } else { + i = offset + step + } + } until finished; + + Ok(write_success) +}