From 7aa91607f2f9458ef23b581f85d8fe802610eea2 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Wed, 16 Oct 2024 17:27:56 -0600 Subject: [PATCH 1/9] Validate MAP_ANONYMOUS in mmap syscall --- rvgo/fast/vm.go | 45 ++++++++++++++++++++++++++++----------------- rvgo/slow/vm.go | 45 ++++++++++++++++++++++++++++----------------- rvsol/src/RISCV.sol | 40 ++++++++++++++++++++++++++-------------- 3 files changed, 82 insertions(+), 48 deletions(-) diff --git a/rvgo/fast/vm.go b/rvgo/fast/vm.go index 64a009e..daf85d4 100644 --- a/rvgo/fast/vm.go +++ b/rvgo/fast/vm.go @@ -390,28 +390,39 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { // A1 = n (length) length := getRegister(toU64(11)) // A2 = prot (memory protection type, can ignore) - // A3 = flags (shared with other process and or written back to file, can ignore) // TODO maybe assert the MAP_ANONYMOUS flag is set + // A3 = flags (shared with other process and or written back to file) + flags := getRegister(toU64(13)) // A4 = fd (file descriptor, can ignore because we support anon memory only) + fd := getRegister(toU64(14)) // A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this) - // ignore: prot, flags, fd, offset - switch addr { - case 0: - // No hint, allocate it ourselves, by as much as the requested length. - // Increase the length to align it with desired page size if necessary. - align := and64(length, shortToU64(4095)) - if align != 0 { - length = add64(length, sub64(shortToU64(4096), align)) + errCode := toU64(0) + + // ensure MAP_ANONYMOUS is set and fd == -1 + if (flags&0x20) == 0 || fd != u64Mask() { + addr = u64Mask() + errCode = toU64(0x4d) // no error + } else { + // ignore: prot, flags, fd, offset + switch addr { + case 0: + // No hint, allocate it ourselves, by as much as the requested length. + // Increase the length to align it with desired page size if necessary. + align := and64(length, shortToU64(4095)) + if align != 0 { + length = add64(length, sub64(shortToU64(4096), align)) + } + prevHeap := getHeap() + addr = prevHeap + setHeap(add64(prevHeap, length)) // increment heap with length + //fmt.Printf("mmap: 0x%016x (+ 0x%x increase)\n", s.Heap, length) + default: + // allow hinted memory address (leave it in A0 as return argument) + //fmt.Printf("mmap: 0x%016x (0x%x allowed)\n", addr, length) } - prevHeap := getHeap() - setRegister(toU64(10), prevHeap) - setHeap(add64(prevHeap, length)) // increment heap with length - //fmt.Printf("mmap: 0x%016x (+ 0x%x increase)\n", s.Heap, length) - default: - // allow hinted memory address (leave it in A0 as return argument) - //fmt.Printf("mmap: 0x%016x (0x%x allowed)\n", addr, length) } - setRegister(toU64(11), toU64(0)) // no error + setRegister(toU64(10), addr) + setRegister(toU64(11), errCode) case riscv.SysRead: // read fd := getRegister(toU64(10)) // A0 = fd addr := getRegister(toU64(11)) // A1 = *buf addr diff --git a/rvgo/slow/vm.go b/rvgo/slow/vm.go index 90ba0f1..b75d1ce 100644 --- a/rvgo/slow/vm.go +++ b/rvgo/slow/vm.go @@ -590,28 +590,39 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err // A1 = n (length) length := getRegister(toU64(11)) // A2 = prot (memory protection type, can ignore) - // A3 = flags (shared with other process and or written back to file, can ignore) // TODO maybe assert the MAP_ANONYMOUS flag is set + // A3 = flags (shared with other process and or written back to file) + flags := getRegister(toU64(13)) // A4 = fd (file descriptor, can ignore because we support anon memory only) + fd := getRegister(toU64(14)) // A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this) - // ignore: prot, flags, fd, offset - switch addr.val() { - case 0: - // No hint, allocate it ourselves, by as much as the requested length. - // Increase the length to align it with desired page size if necessary. - align := and64(length, shortToU64(4095)) - if align != (U64{}) { - length = add64(length, sub64(shortToU64(4096), align)) + errCode := toU64(0) + + // ensure MAP_ANONYMOUS is set and fd == -1 + if (flags.val()&0x20) == 0 || fd != u64Mask() { + addr = u64Mask() + errCode = toU64(0x4d) // no error + } else { + // ignore: prot, flags, fd, offset + switch addr.val() { + case 0: + // No hint, allocate it ourselves, by as much as the requested length. + // Increase the length to align it with desired page size if necessary. + align := and64(length, shortToU64(4095)) + if align != (U64{}) { + length = add64(length, sub64(shortToU64(4096), align)) + } + prevHeap := getHeap() + addr = prevHeap + setHeap(add64(prevHeap, length)) // increment heap with length + //fmt.Printf("mmap: 0x%016x (+ 0x%x increase)\n", s.Heap, length) + default: + // allow hinted memory address (leave it in A0 as return argument) + //fmt.Printf("mmap: 0x%016x (0x%x allowed)\n", addr, length) } - prevHeap := getHeap() - setRegister(toU64(10), prevHeap) - setHeap(add64(prevHeap, length)) // increment heap with length - //fmt.Printf("mmap: 0x%016x (+ 0x%x increase)\n", s.Heap, length) - default: - // allow hinted memory address (leave it in A0 as return argument) - //fmt.Printf("mmap: 0x%016x (0x%x allowed)\n", addr, length) } - setRegister(toU64(11), toU64(0)) // no error + setRegister(toU64(10), addr) + setRegister(toU64(11), errCode) case riscv.SysRead: // read fd := getRegister(toU64(10)) // A0 = fd addr := getRegister(toU64(11)) // A1 = *buf addr diff --git a/rvsol/src/RISCV.sol b/rvsol/src/RISCV.sol index 4467c0f..44b8b9d 100644 --- a/rvsol/src/RISCV.sol +++ b/rvsol/src/RISCV.sol @@ -899,26 +899,38 @@ contract RISCV { // A1 = n (length) let length := getRegister(toU64(11)) // A2 = prot (memory protection type, can ignore) - // A3 = flags (shared with other process and or written back to file, can ignore) // TODO maybe - // assert the MAP_ANONYMOUS flag is set + // A3 = flags (shared with other process and or written back to file) + let flags := getRegister(toU64(13)) // A4 = fd (file descriptor, can ignore because we support anon memory only) + let fd := getRegister(toU64(14)) // A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this) - // ignore: prot, flags, fd, offset - switch addr - case 0 { - // No hint, allocate it ourselves, by as much as the requested length. - // Increase the length to align it with desired page size if necessary. - let align := and64(length, shortToU64(4095)) - if align { length := add64(length, sub64(shortToU64(4096), align)) } - let prevHeap := getHeap() - setRegister(toU64(10), prevHeap) - setHeap(add64(prevHeap, length)) // increment heap with length + + let errCode := 0 + // ensure MAP_ANONYMOUS is set and fd == -1 + switch or(iszero(and(flags, 0x20)), not(eq(fd, u64Mask()))) + case 1 { + addr := u64Mask() + errCode := toU64(0x4d) } default { - // allow hinted memory address (leave it in A0 as return argument) + switch addr + case 0 { + // No hint, allocate it ourselves, by as much as the requested length. + // Increase the length to align it with desired page size if necessary. + let align := and64(length, shortToU64(4095)) + if align { length := add64(length, sub64(shortToU64(4096), align)) } + let prevHeap := getHeap() + addr := prevHeap + setHeap(add64(prevHeap, length)) // increment heap with length + } + default { + // allow hinted memory address (leave it in A0 as return argument) + } } - setRegister(toU64(11), toU64(0)) // no error + + setRegister(toU64(10), addr) + setRegister(toU64(11), errCode) } case 63 { // read From e5281748fddbc45ed281e50d6294aabb31a78c73 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Wed, 16 Oct 2024 17:31:03 -0600 Subject: [PATCH 2/9] Add address alignment check to RV32A and RV32A atomic operations extension --- rvgo/fast/vm.go | 4 ++- rvgo/slow/vm.go | 4 ++- rvsol/src/RISCV.sol | 5 ++- rvsol/test/RISCV.t.sol | 70 +++++++++++++++++++++++++++++++----------- 4 files changed, 62 insertions(+), 21 deletions(-) diff --git a/rvgo/fast/vm.go b/rvgo/fast/vm.go index daf85d4..49591a6 100644 --- a/rvgo/fast/vm.go +++ b/rvgo/fast/vm.go @@ -905,7 +905,9 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size)) } addr := getRegister(rs1) - // TODO check if addr is aligned + if addr&3 != 0 { // quick addr alignment check + revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 4 bytes", addr)) + } op := shr64(toU64(2), funct7) switch op { diff --git a/rvgo/slow/vm.go b/rvgo/slow/vm.go index b75d1ce..98e33ed 100644 --- a/rvgo/slow/vm.go +++ b/rvgo/slow/vm.go @@ -1076,7 +1076,9 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size)) } addr := getRegister(rs1) - // TODO check if addr is aligned + if and64(addr, toU64(3)) != (U64{}) { // quick addr alignment check + revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 4 bytes", addr)) + } op := shr64(toU64(2), funct7) switch op.val() { diff --git a/rvsol/src/RISCV.sol b/rvsol/src/RISCV.sol index 44b8b9d..80126d7 100644 --- a/rvsol/src/RISCV.sol +++ b/rvsol/src/RISCV.sol @@ -1571,7 +1571,10 @@ contract RISCV { if or(lt64(size, toU64(4)), gt64(size, toU64(8))) { revertWithCode(0xbada70) } // bad AMO size let addr := getRegister(rs1) - // TODO check if addr is aligned + if and64(addr, toU64(3)) { + // quick addr alignment check + revertWithCode(0xbad10ad0) // addr not aligned with 4 bytes + } let op := shr64(toU64(2), funct7) switch op diff --git a/rvsol/test/RISCV.t.sol b/rvsol/test/RISCV.t.sol index 7f1cb3f..7e6d146 100644 --- a/rvsol/test/RISCV.t.sol +++ b/rvsol/test/RISCV.t.sol @@ -628,7 +628,7 @@ contract RISCV_Test is CommonTest { function test_lrw_succeeds() public { bytes32 value = hex"1e0acbdd44d41d85"; - uint64 addr = 0x233f3d38d3ce666b; + uint64 addr = 0x233f3d38d3ce6668; uint8 funct3 = 0x2; uint8 funct7 = encodeFunct7(0x2, 0x0, 0x0); uint8 size = uint8(1 << (funct3 & 0x3)); @@ -678,7 +678,7 @@ contract RISCV_Test is CommonTest { } function test_amoswapw_succeeds() public { - uint64 addr = 0x44c23256360226b2; + uint64 addr = 0x44c23256360226b0; uint32 insn; uint8 size; { @@ -709,7 +709,7 @@ contract RISCV_Test is CommonTest { } function test_amoaddw_succeeds() public { - uint64 addr = 0xbf1cd3785c3b5e3; + uint64 addr = 0xbf1cd3785c3b5e0; uint32 insn; uint8 size; { @@ -746,7 +746,7 @@ contract RISCV_Test is CommonTest { } function test_amoxorw_succeeds() public { - uint64 addr = 0xd9a8dd911b0547cd; + uint64 addr = 0xd9a8dd911b0547cc; uint32 insn; uint8 size; { @@ -783,7 +783,7 @@ contract RISCV_Test is CommonTest { } function test_amoandw_succeeds() public { - uint64 addr = 0x5519c1cd82d36829; + uint64 addr = 0x5519c1cd82d36828; uint32 insn; uint8 size; { @@ -819,7 +819,7 @@ contract RISCV_Test is CommonTest { } function test_amoorw_succeeds() public { - uint64 addr = 0x2dbd6638ebe8a251; + uint64 addr = 0x2dbd6638ebe8a250; uint32 insn; uint8 size; { @@ -856,7 +856,7 @@ contract RISCV_Test is CommonTest { } function test_amominw_succeeds() public { - uint64 addr = 0xbb0517653427ed99; + uint64 addr = 0xbb0517653427ed98; uint32 insn; uint8 size; { @@ -888,7 +888,7 @@ contract RISCV_Test is CommonTest { } function test_amomaxw_succeeds() public { - uint64 addr = 0xb320adad61ff64b9; + uint64 addr = 0xb320adad61ff64b8; uint32 insn; uint8 size; { @@ -920,7 +920,7 @@ contract RISCV_Test is CommonTest { } function test_amominuw_succeeds() public { - uint64 addr = 0xc00b31ae34210acb; + uint64 addr = 0xc00b31ae34210ac8; uint32 insn; uint8 size; { @@ -953,7 +953,7 @@ contract RISCV_Test is CommonTest { } function test_amomaxuw_succeeds() public { - uint64 addr = 0xca0b8f3993fbb896; + uint64 addr = 0xca0b8f3993fbb894; uint32 insn; uint8 size; { @@ -987,7 +987,7 @@ contract RISCV_Test is CommonTest { function test_lrd_succeeds() public { bytes32 value = hex"a0b1df92a49eec39"; - uint64 addr = 0xb86a394544c084ef; + uint64 addr = 0xb86a394544c084ec; uint8 funct3 = 0x3; uint8 funct7 = encodeFunct7(0x2, 0x0, 0x0); uint8 size = uint8(1 << (funct3 & 0x3)); @@ -1036,7 +1036,7 @@ contract RISCV_Test is CommonTest { } function test_amoswapd_succeeds() public { - uint64 addr = 0x15f4716cd3aa7306; + uint64 addr = 0x15f4716cd3aa7308; uint32 insn; uint8 size; { @@ -1067,7 +1067,7 @@ contract RISCV_Test is CommonTest { } function test_amoaddd_succeeds() public { - uint64 addr = 0xeae426a36ff2bb67; + uint64 addr = 0xeae426a36ff2bb64; uint32 insn; uint8 size; { @@ -1162,7 +1162,7 @@ contract RISCV_Test is CommonTest { } function test_amoord_succeeds() public { - uint64 addr = 0xa0d7a5ea65b35666; + uint64 addr = 0xa0d7a5ea65b35664; uint32 insn; uint8 size; { @@ -1194,7 +1194,7 @@ contract RISCV_Test is CommonTest { } function test_amomind_succeeds() public { - uint64 addr = 0x1f817b9eab194b3; + uint64 addr = 0x1f817b9eab194b0; uint32 insn; uint8 size; { @@ -1258,7 +1258,7 @@ contract RISCV_Test is CommonTest { } function test_amominud_succeeds() public { - uint64 addr = 0xe094be571f4baca6; + uint64 addr = 0xe094be571f4baca4; uint32 insn; uint8 size; { @@ -1290,7 +1290,7 @@ contract RISCV_Test is CommonTest { } function test_amomaxud_succeeds() public { - uint64 addr = 0x2bcfe03b376a17e2; + uint64 addr = 0x2bcfe03b376a17e0; uint32 insn; uint8 size; { @@ -2403,8 +2403,42 @@ contract RISCV_Test is CommonTest { riscv.step(encodedState, proof, 0); } + function test_unaligned_address() public { + uint64 addr = 0xeae426a36ff2bb65; // unaligned address + + // Valid amoadd instr + uint32 insn; + uint8 size; + { + uint8 funct3 = 0x3; + uint8 funct7 = encodeFunct7(0x0, 0x0, 0x0); + size = uint8(1 << (funct3 & 0x3)); + insn = encodeRType(0x2f, 14, funct3, 8, 28, funct7); // amoaddd x14, x28, (x8) + } + (, uint64 rs2ValueU64) = truncate(hex"a0821b98f6c0d237", size); + (bytes32 memValueBytes32, uint64 memValueU64) = truncate(hex"f47daefa285404dc", size); + (State memory state, bytes memory proof) = constructRISCVState(0, insn, addr, memValueBytes32); + state.registers[8] = addr; + state.registers[28] = rs2ValueU64; + bytes memory encodedState = encodeState(state); + + State memory expect; + // check memory stores value of M[x[rs1]] + x[rs2] + bytes32 result = uint256ToBytes32(uint256(uint128(int128(int64(rs2ValueU64)) + int128(int64(memValueU64))))); + (expect.memRoot,) = ffi.getAsteriscMemoryProof(0, insn, addr, result); + expect.pc = state.pc + 4; + expect.step = state.step + 1; + // check rd value stores original mem value. + expect.registers[14] = memValueU64; + expect.registers[8] = state.registers[8]; + expect.registers[28] = state.registers[28]; + + vm.expectRevert(hex"00000000000000000000000000000000000000000000000000000000bad10ad0"); + riscv.step(encodedState, proof, 0); + } + function test_unknown_atomic_operation() public { - uint64 addr = 0xeae426a36ff2bb67; + uint64 addr = 0xeae426a36ff2bb64; uint32 insn; uint8 size; { From 0a316942f981fb2e07a7ff6d461f9900349778a1 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Wed, 16 Oct 2024 17:35:04 -0600 Subject: [PATCH 3/9] Remove CSR related code --- rvgo/fast/parse.go | 4 ---- rvgo/fast/vm.go | 32 +------------------------------- rvgo/riscv/constants.go | 1 - rvgo/slow/parse.go | 4 ---- rvgo/slow/vm.go | 34 ++-------------------------------- rvsol/src/RISCV.sol | 39 ++------------------------------------- 6 files changed, 5 insertions(+), 109 deletions(-) diff --git a/rvgo/fast/parse.go b/rvgo/fast/parse.go index 629d38f..ae32aa6 100644 --- a/rvgo/fast/parse.go +++ b/rvgo/fast/parse.go @@ -75,7 +75,3 @@ func parseRs2(instr U64) U64 { func parseFunct7(instr U64) U64 { return shr64(toU64(25), instr) } - -func parseCSSR(instr U64) U64 { - return shr64(toU64(20), instr) -} diff --git a/rvgo/fast/vm.go b/rvgo/fast/vm.go index 49591a6..44d25a0 100644 --- a/rvgo/fast/vm.go +++ b/rvgo/fast/vm.go @@ -269,29 +269,6 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { // // CSR (control and status registers) functions // - readCSR := func(num U64) U64 { - // TODO: do we need CSR? - return toU64(0) - } - - writeCSR := func(num U64, v U64) { - // TODO: do we need CSR? - } - - updateCSR := func(num U64, v U64, mode U64) (out U64) { - out = readCSR(num) - switch mode { - case 1: // ?01 = CSRRW(I) - case 2: // ?10 = CSRRS(I) - v = or64(out, v) - case 3: // ?11 = CSRRC(I) - v = and64(out, not64(v)) - default: - revertWithCode(riscv.ErrUnknownCSRMode, fmt.Errorf("unknown CSR mode: %d", mode)) - } - writeCSR(num, v) - return - } // // Preimage oracle interactions @@ -878,14 +855,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { setPC(add64(pc, toU64(4))) // ignore breakpoint } default: // CSR instructions - imm := parseCSSR(instr) - value := rs1 - if iszero64(and64(funct3, toU64(4))) { - value = getRegister(rs1) - } - mode := and64(funct3, toU64(3)) - rdValue := updateCSR(imm, value, mode) - setRegister(rd, rdValue) + setRegister(rd, 0) // ignore CSR instructions setPC(add64(pc, toU64(4))) } case 0x2F: // 010_1111: RV32A and RV32A atomic operations extension diff --git a/rvgo/riscv/constants.go b/rvgo/riscv/constants.go index 9360e86..4ff9677 100644 --- a/rvgo/riscv/constants.go +++ b/rvgo/riscv/constants.go @@ -51,7 +51,6 @@ const ( ErrUnexpectedRProofLoad = uint64(0xbad22220) ErrUnexpectedRProofStoreUnaligned = uint64(0xbad22221) ErrUnexpectedRProofStore = uint64(0xbad2222f) - ErrUnknownCSRMode = uint64(0xbadc0de0) ErrBadAMOSize = uint64(0xbada70) ErrFailToReadPreimage = uint64(0xbadf00d0) ErrBadMemoryProof = uint64(0xbadf00d1) diff --git a/rvgo/slow/parse.go b/rvgo/slow/parse.go index 3b714df..7ca4f17 100644 --- a/rvgo/slow/parse.go +++ b/rvgo/slow/parse.go @@ -75,7 +75,3 @@ func parseRs2(instr U64) U64 { func parseFunct7(instr U64) U64 { return shr64(toU64(25), instr) } - -func parseCSSR(instr U64) U64 { - return shr64(toU64(20), instr) -} diff --git a/rvgo/slow/vm.go b/rvgo/slow/vm.go index 98e33ed..7893874 100644 --- a/rvgo/slow/vm.go +++ b/rvgo/slow/vm.go @@ -457,29 +457,6 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err // // CSR (control and status registers) functions // - readCSR := func(num U64) U64 { - // TODO: do we need CSR? - return toU64(0) - } - - writeCSR := func(num U64, v U64) { - // TODO: do we need CSR? - } - - updateCSR := func(num U64, v U64, mode U64) (out U64) { - out = readCSR(num) - switch mode.val() { - case 1: // ?01 = CSRRW(I) - case 2: // ?10 = CSRRS(I) - v = or64(out, v) - case 3: // ?11 = CSRRC(I) - v = and64(out, not64(v)) - default: - revertWithCode(riscv.ErrUnknownCSRMode, fmt.Errorf("unknown CSR mode: %d", mode.val())) - } - writeCSR(num, v) - return - } // // Preimage oracle interactions @@ -1048,15 +1025,8 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err default: // imm12 = 000000000001 EBREAK setPC(add64(pc, toU64(4))) // ignore breakpoint } - default: // CSR instructions - imm := parseCSSR(instr) - value := rs1 - if iszero64(and64(funct3, toU64(4))) { - value = getRegister(rs1) - } - mode := and64(funct3, toU64(3)) - rdValue := updateCSR(imm, value, mode) - setRegister(rd, rdValue) + default: // ignore CSR instructions + setRegister(rd, toU64(0)) // ignore CSR instructions setPC(add64(pc, toU64(4))) } case 0x2F: // 010_1111: RV32A and RV32A atomic operations extension diff --git a/rvsol/src/RISCV.sol b/rvsol/src/RISCV.sol index 80126d7..4f4ebee 100644 --- a/rvsol/src/RISCV.sol +++ b/rvsol/src/RISCV.sol @@ -548,10 +548,6 @@ contract RISCV { out := shr64(toU64(25), instr) } - function parseCSRR(instr) -> out { - out := shr64(toU64(20), instr) - } - // // Memory functions // @@ -737,31 +733,6 @@ contract RISCV { // // CSR (control and status registers) functions // - function readCSR(num) -> out { - out := 0 // just return zero, CSR is not supported, but may be in the future. - } - - function writeCSR(num, v) { - // no-op - } - - function updateCSR(num, v, mode) -> out { - out := readCSR(num) - switch mode - case 1 { // ?01 = CSRRW(I) - } - case 2 { - // ?10 = CSRRS(I) - v := or64(out, v) - } - case 3 { - // ?11 = CSRRC(I) - v := and64(out, not64(v)) - } - default { revertWithCode(0xbadc0de0) } // unknown CSR mode - - writeCSR(num, v) - } // // Preimage oracle interactions @@ -1543,14 +1514,8 @@ contract RISCV { setPC(add64(_pc, toU64(4))) // ignore breakpoint } } - default { - // CSR instructions - let imm := parseCSRR(instr) - let value := rs1 - if iszero64(and64(funct3, toU64(4))) { value := getRegister(rs1) } - let mode := and64(funct3, toU64(3)) - let rdValue := updateCSR(imm, value, mode) - setRegister(rd, rdValue) + default { // CSR instructions + setRegister(rd, toU64(0)) // ignore CSR instructions setPC(add64(_pc, toU64(4))) } } From 02c7297dadc7b0604503a9a4b4a67f9124ee9d5f Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Wed, 16 Oct 2024 17:38:54 -0600 Subject: [PATCH 4/9] Rename SetUnaligned to SetAligned as we only do aligned writes --- rvgo/fast/memory.go | 18 +------- rvgo/fast/memory_test.go | 44 +++++++++--------- rvgo/fast/vm.go | 14 +++--- rvgo/scripts/go-ffi/differential-testing.go | 4 +- rvgo/test/syscall_test.go | 50 ++++++++++----------- 5 files changed, 57 insertions(+), 73 deletions(-) diff --git a/rvgo/fast/memory.go b/rvgo/fast/memory.go index 50ac86a..8b45ee7 100644 --- a/rvgo/fast/memory.go +++ b/rvgo/fast/memory.go @@ -180,8 +180,7 @@ func (m *Memory) pageLookup(pageIndex uint64) (*CachedPage, bool) { return p, ok } -// TODO: we never do unaligned writes, this should be simplified -func (m *Memory) SetUnaligned(addr uint64, dat []byte) { +func (m *Memory) SetAligned(addr uint64, dat []byte) { if len(dat) > 32 { panic("cannot set more than 32 bytes") } @@ -200,21 +199,6 @@ func (m *Memory) SetUnaligned(addr uint64, dat []byte) { if d == len(dat) { return // if all the data fitted in the page, we're done } - - // continue to remaining part - addr += uint64(d) - pageIndex = addr >> PageAddrSize - pageAddr = addr & PageAddrMask - p, ok = m.pageLookup(pageIndex) - if !ok { - // allocate the page if we have not already. - // Go may mmap relatively large ranges, but we only allocate the pages just in time. - p = m.AllocPage(pageIndex) - } else { - m.Invalidate(addr) // invalidate this branch of memory, now that the value changed - } - - copy(p.Data[pageAddr:], dat) } func (m *Memory) GetUnaligned(addr uint64, dest []byte) { diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index 0c4a955..fcd78f4 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -15,7 +15,7 @@ import ( func TestMemoryMerkleProof(t *testing.T) { t.Run("nearly empty tree", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) + m.SetAligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) proof := m.MerkleProof(0x10000) require.Equal(t, uint32(0xaabbccdd), binary.BigEndian.Uint32(proof[:4])) for i := 0; i < 32-5; i++ { @@ -24,9 +24,9 @@ func TestMemoryMerkleProof(t *testing.T) { }) t.Run("fuller tree", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) - m.SetUnaligned(0x80004, []byte{42}) - m.SetUnaligned(0x13370000, []byte{123}) + m.SetAligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) + m.SetAligned(0x80004, []byte{42}) + m.SetAligned(0x13370000, []byte{123}) root := m.MerkleRoot() proof := m.MerkleProof(0x80004) require.Equal(t, uint32(42<<24), binary.BigEndian.Uint32(proof[4:8])) @@ -53,35 +53,35 @@ func TestMemoryMerkleRoot(t *testing.T) { }) t.Run("empty page", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(0xF000, []byte{0}) + m.SetAligned(0xF000, []byte{0}) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "fully zeroed memory should have expected zero hash") }) t.Run("single page", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(0xF000, []byte{1}) + m.SetAligned(0xF000, []byte{1}) root := m.MerkleRoot() require.NotEqual(t, zeroHashes[64-5], root, "non-zero memory") }) t.Run("repeat zero", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(0xF000, []byte{0}) - m.SetUnaligned(0xF004, []byte{0}) + m.SetAligned(0xF000, []byte{0}) + m.SetAligned(0xF004, []byte{0}) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) t.Run("two empty pages", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(PageSize*3, []byte{0}) - m.SetUnaligned(PageSize*10, []byte{0}) + m.SetAligned(PageSize*3, []byte{0}) + m.SetAligned(PageSize*10, []byte{0}) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) t.Run("random few pages", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(PageSize*3, []byte{1}) - m.SetUnaligned(PageSize*5, []byte{42}) - m.SetUnaligned(PageSize*6, []byte{123}) + m.SetAligned(PageSize*3, []byte{1}) + m.SetAligned(PageSize*5, []byte{42}) + m.SetAligned(PageSize*6, []byte{123}) p3 := m.MerkleizeSubtree((1 << PageKeySize) | 3) p5 := m.MerkleizeSubtree((1 << PageKeySize) | 5) p6 := m.MerkleizeSubtree((1 << PageKeySize) | 6) @@ -101,11 +101,11 @@ func TestMemoryMerkleRoot(t *testing.T) { }) t.Run("invalidate page", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(0xF000, []byte{0}) + m.SetAligned(0xF000, []byte{0}) require.Equal(t, zeroHashes[64-5], m.MerkleRoot(), "zero at first") - m.SetUnaligned(0xF004, []byte{1}) + m.SetAligned(0xF004, []byte{1}) require.NotEqual(t, zeroHashes[64-5], m.MerkleRoot(), "non-zero") - m.SetUnaligned(0xF004, []byte{0}) + m.SetAligned(0xF004, []byte{0}) require.Equal(t, zeroHashes[64-5], m.MerkleRoot(), "zero again") }) } @@ -141,22 +141,22 @@ func TestMemoryReadWrite(t *testing.T) { t.Run("read-write", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(12, []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}) + m.SetAligned(12, []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}) var tmp [5]byte m.GetUnaligned(12, tmp[:]) require.Equal(t, [5]byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}, tmp) - m.SetUnaligned(12, []byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}) + m.SetAligned(12, []byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}) m.GetUnaligned(12, tmp[:]) require.Equal(t, [5]byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}, tmp) }) t.Run("read-write-unaligned", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(13, []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}) + m.SetAligned(13, []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}) var tmp [5]byte m.GetUnaligned(13, tmp[:]) require.Equal(t, [5]byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}, tmp) - m.SetUnaligned(13, []byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}) + m.SetAligned(13, []byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}) m.GetUnaligned(13, tmp[:]) require.Equal(t, [5]byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}, tmp) }) @@ -164,7 +164,7 @@ func TestMemoryReadWrite(t *testing.T) { func TestMemoryJSON(t *testing.T) { m := NewMemory() - m.SetUnaligned(8, []byte{123}) + m.SetAligned(8, []byte{123}) dat, err := json.Marshal(m) require.NoError(t, err) var res Memory @@ -176,7 +176,7 @@ func TestMemoryJSON(t *testing.T) { func TestMemoryBinary(t *testing.T) { m := NewMemory() - m.SetUnaligned(8, []byte{123}) + m.SetAligned(8, []byte{123}) ser := new(bytes.Buffer) err := m.Serialize(ser) require.NoError(t, err, "must serialize state") diff --git a/rvgo/fast/vm.go b/rvgo/fast/vm.go index 44d25a0..f6d4feb 100644 --- a/rvgo/fast/vm.go +++ b/rvgo/fast/vm.go @@ -178,7 +178,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { panic(fmt.Errorf("addr %d not aligned with 32 bytes", addr)) } inst.verifyMemChange(addr, proofIndex) - s.Memory.SetUnaligned(addr, v[:]) + s.Memory.SetAligned(addr, v[:]) } // load unaligned, optionally signed, little-endian, integer of 1 ... 8 bytes from memory @@ -220,7 +220,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { } inst.verifyMemChange(leftAddr, proofIndexL) if (addr+size-1)&^31 == addr&^31 { // if aligned - s.Memory.SetUnaligned(addr, bytez[:size]) + s.Memory.SetAligned(addr, bytez[:size]) return } if proofIndexR == 0xff { @@ -229,12 +229,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { // if not aligned rightAddr := leftAddr + 32 leftSize := rightAddr - addr - s.Memory.SetUnaligned(addr, bytez[:leftSize]) + s.Memory.SetAligned(addr, bytez[:leftSize]) if verifyR { inst.trackMemAccess(rightAddr, proofIndexR) } inst.verifyMemChange(rightAddr, proofIndexR) - s.Memory.SetUnaligned(rightAddr, bytez[leftSize:size]) + s.Memory.SetAligned(rightAddr, bytez[leftSize:size]) } storeMem := func(addr U64, size U64, value U64, proofIndexL uint8, proofIndexR uint8, verifyL bool, verifyR bool) { @@ -249,7 +249,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { } inst.verifyMemChange(leftAddr, proofIndexL) if (addr+size-1)&^31 == addr&^31 { // if aligned - s.Memory.SetUnaligned(addr, bytez[:size]) + s.Memory.SetAligned(addr, bytez[:size]) return } // if not aligned @@ -258,12 +258,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { } rightAddr := leftAddr + 32 leftSize := rightAddr - addr - s.Memory.SetUnaligned(addr, bytez[:leftSize]) + s.Memory.SetAligned(addr, bytez[:leftSize]) if verifyR { inst.trackMemAccess(rightAddr, proofIndexR) } inst.verifyMemChange(rightAddr, proofIndexR) - s.Memory.SetUnaligned(rightAddr, bytez[leftSize:size]) + s.Memory.SetAligned(rightAddr, bytez[leftSize:size]) } // diff --git a/rvgo/scripts/go-ffi/differential-testing.go b/rvgo/scripts/go-ffi/differential-testing.go index eceefb2..bd02638 100644 --- a/rvgo/scripts/go-ffi/differential-testing.go +++ b/rvgo/scripts/go-ffi/differential-testing.go @@ -47,7 +47,7 @@ func DiffTestUtils() { checkErr(err, "Error decoding insn") instBytes := make([]byte, 4) binary.LittleEndian.PutUint32(instBytes, uint32(insn)) - mem.SetUnaligned(uint64(pc), instBytes) + mem.SetAligned(uint64(pc), instBytes) // proof size: 64-5+1=60 (a 64-bit mem-address branch to 32 byte leaf, incl leaf itself), all 32 bytes // 60 * 32 = 1920 @@ -57,7 +57,7 @@ func DiffTestUtils() { checkErr(err, "Error decoding memAddr") memValue, err := hex.DecodeString(strings.TrimPrefix(args[4], "0x")) checkErr(err, "Error decoding memValue") - mem.SetUnaligned(uint64(memAddr), memValue) + mem.SetAligned(uint64(memAddr), memValue) memProof = mem.MerkleProof(uint64(memAddr)) } insnProof = mem.MerkleProof(uint64(pc)) diff --git a/rvgo/test/syscall_test.go b/rvgo/test/syscall_test.go index 9336297..da3a570 100644 --- a/rvgo/test/syscall_test.go +++ b/rvgo/test/syscall_test.go @@ -93,7 +93,7 @@ func TestStateSyscallUnsupported(t *testing.T) { Registers: [32]uint64{17: uint64(syscall)}, Step: 0, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) fastState := fast.NewInstrumentedState(state, nil, os.Stdout, os.Stderr) stepWitness, err := fastState.Step(true) @@ -265,7 +265,7 @@ func TestEVMSysWriteHint(t *testing.T) { err := state.Memory.SetMemoryRange(uint64(tt.memOffset), bytes.NewReader(tt.hintData)) require.NoError(t, err) - state.Memory.SetUnaligned(0, syscallInsn) + state.Memory.SetAligned(0, syscallInsn) fastState := fast.NewInstrumentedState(state, &oracle, os.Stdout, os.Stderr) stepWitness, err := fastState.Step(true) @@ -296,7 +296,7 @@ func FuzzStateSyscallExit(f *testing.F) { Registers: [32]uint64{17: uint64(syscall), 10: uint64(exitCode)}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() preStateRegisters := state.Registers @@ -342,7 +342,7 @@ func FuzzStateSyscallBrk(f *testing.F) { Registers: [32]uint64{17: riscv.SysBrk}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = 1 << 30 @@ -387,7 +387,7 @@ func FuzzStateSyscallMmap(f *testing.F) { Registers: [32]uint64{17: riscv.SysMmap, 10: addr, 11: length}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[11] = 0 @@ -438,7 +438,7 @@ func FuzzStateSyscallFcntl(f *testing.F) { Registers: [32]uint64{17: riscv.SysFcntl, 10: fd, 11: cmd}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = out @@ -510,7 +510,7 @@ func FuzzStateSyscallOpenat(f *testing.F) { Registers: [32]uint64{17: riscv.SysOpenat}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = 0xFFFF_FFFF_FFFF_FFFF @@ -552,7 +552,7 @@ func FuzzStateSyscallClockGettime(f *testing.F) { Registers: [32]uint64{17: riscv.SysClockGettime, 11: addr}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) expectedRegisters := state.Registers expectedRegisters[11] = 0 @@ -562,11 +562,11 @@ func FuzzStateSyscallClockGettime(f *testing.F) { require.False(t, stepWitness.HasPreimage()) postMemory := fast.NewMemory() - postMemory.SetUnaligned(pc, syscallInsn) + postMemory.SetAligned(pc, syscallInsn) var bytes [8]byte binary.LittleEndian.PutUint64(bytes[:], 1337) - postMemory.SetUnaligned(addr, bytes[:]) - postMemory.SetUnaligned(addr+8, []byte{42, 0, 0, 0, 0, 0, 0, 0}) + postMemory.SetAligned(addr, bytes[:]) + postMemory.SetAligned(addr+8, []byte{42, 0, 0, 0, 0, 0, 0, 0}) require.Equal(t, pc+4, state.PC) // PC must advance require.Equal(t, uint64(0), state.Heap) @@ -599,7 +599,7 @@ func FuzzStateSyscallClone(f *testing.F) { Registers: [32]uint64{17: riscv.SysClone}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = 1 @@ -641,7 +641,7 @@ func FuzzStateSyscallGetrlimit(f *testing.F) { Registers: [32]uint64{17: riscv.SysGetrlimit, 10: 7, 11: addr}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) expectedRegisters := state.Registers expectedRegisters[10] = 0 expectedRegisters[11] = 0 @@ -652,11 +652,11 @@ func FuzzStateSyscallGetrlimit(f *testing.F) { require.False(t, stepWitness.HasPreimage()) postMemory := fast.NewMemory() - postMemory.SetUnaligned(pc, syscallInsn) + postMemory.SetAligned(pc, syscallInsn) var bytes [8]byte binary.LittleEndian.PutUint64(bytes[:], 1024) - postMemory.SetUnaligned(addr, bytes[:]) - postMemory.SetUnaligned(addr+8, bytes[:]) + postMemory.SetAligned(addr, bytes[:]) + postMemory.SetAligned(addr+8, bytes[:]) require.Equal(t, pc+4, state.PC) // PC must advance require.Equal(t, uint64(0), state.Heap) @@ -685,7 +685,7 @@ func FuzzStateSyscallGetrlimit(f *testing.F) { Registers: [32]uint64{17: riscv.SysGetrlimit, 10: res, 11: addr}, Step: 0, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) fastState := fast.NewInstrumentedState(state, nil, os.Stdout, os.Stderr) stepWitness, err := fastState.Step(true) @@ -745,7 +745,7 @@ func FuzzStateSyscallNoop(f *testing.F) { Registers: [32]uint64{17: uint64(syscall), 10: arg}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = 0 @@ -793,7 +793,7 @@ func FuzzStateSyscallRead(f *testing.F) { Registers: [32]uint64{17: riscv.SysRead, 10: fd, 11: addr, 12: count}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = ret @@ -853,7 +853,7 @@ func FuzzStateHintRead(f *testing.F) { PreimageKey: preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey(), PreimageOffset: preimageOffset, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStatePreimageKey := state.PreimageKey preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers @@ -905,7 +905,7 @@ func FuzzStatePreimageRead(f *testing.F) { PreimageKey: preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey(), PreimageOffset: preimageOffset, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStatePreimageKey := state.PreimageKey preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers @@ -966,7 +966,7 @@ func FuzzStateSyscallWrite(f *testing.F) { Registers: [32]uint64{17: riscv.SysWrite, 10: fd, 11: addr, 12: count}, Step: step, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = ret @@ -1038,7 +1038,7 @@ func FuzzStateHintWrite(f *testing.F) { require.NoError(t, err) // Set syscall instruction - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) preStatePreimageKey := state.PreimageKey preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers @@ -1095,11 +1095,11 @@ func FuzzStatePreimageWrite(f *testing.F) { Step: step, PreimageOffset: preimageOffset, } - state.Memory.SetUnaligned(pc, syscallInsn) + state.Memory.SetAligned(pc, syscallInsn) // Set preimage key to addr preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey() - state.Memory.SetUnaligned(addr, preimageKey[:]) + state.Memory.SetAligned(addr, preimageKey[:]) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers From 2c5bd452bfbd827189f8f5372a5b1038552d86b8 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Thu, 17 Oct 2024 10:59:15 -0600 Subject: [PATCH 5/9] Clean up remaining todos --- rvgo/slow/vm.go | 6 +----- rvgo/test/vm_test.go | 6 +++--- rvsol/src/RISCV.sol | 5 ++--- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/rvgo/slow/vm.go b/rvgo/slow/vm.go index 7893874..4210340 100644 --- a/rvgo/slow/vm.go +++ b/rvgo/slow/vm.go @@ -120,10 +120,6 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err copy(out[:], calldata[offset.val():]) return } - // TODO check length - // TODO check calldata stateData size - - // TODO: validate abi offset values? stateContentOffset := uint8(4 + 32 + 32 + 32 + 32) if iszero(eq(b32asBEWord(calldataload(toU64(4+32*3))), shortToU256(stateSize))) { @@ -132,11 +128,11 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err } proofContentOffset := shortToU64(uint16(stateContentOffset) + paddedStateSize + 32) + // TODO: validate abi offset values? // // State loading // - // TODO stateData := make([]byte, stateSize) copy(stateData, calldata[stateContentOffset:]) diff --git a/rvgo/test/vm_test.go b/rvgo/test/vm_test.go index 4295ae9..3dcb981 100644 --- a/rvgo/test/vm_test.go +++ b/rvgo/test/vm_test.go @@ -158,7 +158,7 @@ func TestFastStep(t *testing.T) { runTestCategory("rv64ui-p") runTestCategory("rv64um-p") runTestCategory("rv64ua-p") - //runTestCategory("benchmarks") TODO benchmarks (fix ELF bench data loading and wrap in Go benchmark?) + //runTestCategory("benchmarks") TODO benchmarks (fix ELF bench data loading and wrap in Go benchmark?) https://github.com/ethereum-optimism/asterisc/issues/89 } func TestSlowStep(t *testing.T) { @@ -171,7 +171,7 @@ func TestSlowStep(t *testing.T) { runTestCategory("rv64ui-p") runTestCategory("rv64um-p") runTestCategory("rv64ua-p") - //runTestCategory("benchmarks") TODO benchmarks (fix ELF bench data loading and wrap in Go benchmark?) + //runTestCategory("benchmarks") TODO benchmarks (fix ELF bench data loading and wrap in Go benchmark?) https://github.com/ethereum-optimism/asterisc/issues/89 } func TestEVMStep(t *testing.T) { @@ -184,5 +184,5 @@ func TestEVMStep(t *testing.T) { runTestCategory("rv64ui-p") runTestCategory("rv64um-p") runTestCategory("rv64ua-p") - //runTestCategory("benchmarks") TODO benchmarks (fix ELF bench data loading and wrap in Go benchmark?) + //runTestCategory("benchmarks") TODO benchmarks (fix ELF bench data loading and wrap in Go benchmark?) https://github.com/ethereum-optimism/asterisc/issues/89 } diff --git a/rvsol/src/RISCV.sol b/rvsol/src/RISCV.sol index 4f4ebee..c7f926d 100644 --- a/rvsol/src/RISCV.sol +++ b/rvsol/src/RISCV.sol @@ -349,12 +349,10 @@ contract RISCV { // State access // function readState(offset, length) -> out { - // TODO revert if more than 32 bytes out := mload(add(memStateOffset(), offset)) // note: the state variables are all big-endian encoded out := shr(shl(3, sub(32, length)), out) // shift-right to right-align data and reduce to desired length } function writeState(offset, length, data) { - // TODO revert if more than 32 bytes let memOffset := add(memStateOffset(), offset) // left-aligned mask of length bytes let mask := shl(shl(3, sub(32, length)), not(0)) @@ -782,8 +780,9 @@ contract RISCV { revertWithCode(0xbadf00d0) } + // Original implementation is at @optimism/src/cannon/PreimageKeyLib.sol + // but it cannot be used because this is inside assembly block function localize(preImageKey, localContext_) -> localizedKey { - // TODO: deduplicate definition of localize using lib // Grab the current free memory pointer to restore later. let ptr := mload(0x40) // Store the local data key and caller next to each other in memory for hashing. From 5d087c3308ffd88fcf871357b82571c58edc1be4 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Thu, 17 Oct 2024 15:06:18 -0600 Subject: [PATCH 6/9] Fix lint --- rvsol/src/RISCV.sol | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rvsol/src/RISCV.sol b/rvsol/src/RISCV.sol index c7f926d..80b04d4 100644 --- a/rvsol/src/RISCV.sol +++ b/rvsol/src/RISCV.sol @@ -875,7 +875,6 @@ contract RISCV { let fd := getRegister(toU64(14)) // A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this) - let errCode := 0 // ensure MAP_ANONYMOUS is set and fd == -1 switch or(iszero(and(flags, 0x20)), not(eq(fd, u64Mask()))) @@ -1513,7 +1512,8 @@ contract RISCV { setPC(add64(_pc, toU64(4))) // ignore breakpoint } } - default { // CSR instructions + default { + // CSR instructions setRegister(rd, toU64(0)) // ignore CSR instructions setPC(add64(_pc, toU64(4))) } @@ -1536,7 +1536,7 @@ contract RISCV { let addr := getRegister(rs1) if and64(addr, toU64(3)) { - // quick addr alignment check + // quick addr alignment check revertWithCode(0xbad10ad0) // addr not aligned with 4 bytes } From 4f9d2ceb3566976ec9e9f1676b8832ee82193603 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Thu, 17 Oct 2024 22:12:50 -0600 Subject: [PATCH 7/9] Add proper flag and fd for mmap fuzz testing --- rvgo/test/syscall_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/rvgo/test/syscall_test.go b/rvgo/test/syscall_test.go index da3a570..87bcd91 100644 --- a/rvgo/test/syscall_test.go +++ b/rvgo/test/syscall_test.go @@ -384,8 +384,14 @@ func FuzzStateSyscallMmap(f *testing.F) { Exited: false, Memory: fast.NewMemory(), LoadReservation: 0, - Registers: [32]uint64{17: riscv.SysMmap, 10: addr, 11: length}, - Step: step, + Registers: [32]uint64{ + 17: riscv.SysMmap, + 10: addr, + 11: length, + 13: 32, // MAP_ANONYMOUS flag + 14: 0xFFFF_FFFF_FFFF_FFFF, // fd == -1 (u64 mask) + }, + Step: step, } state.Memory.SetAligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() From 22f0c71153ad025dd371fd923685a00e7c2acf46 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Tue, 22 Oct 2024 16:31:55 -0600 Subject: [PATCH 8/9] Add offset validation and remove unnecessary code per code review --- rvgo/fast/instrumented.go | 3 +++ rvgo/fast/memory.go | 5 +---- rvgo/fast/vm.go | 6 +----- rvgo/slow/vm.go | 12 +++++++----- rvsol/src/RISCV.sol | 5 ----- 5 files changed, 12 insertions(+), 19 deletions(-) diff --git a/rvgo/fast/instrumented.go b/rvgo/fast/instrumented.go index 8898c4d..80187cc 100644 --- a/rvgo/fast/instrumented.go +++ b/rvgo/fast/instrumented.go @@ -83,6 +83,9 @@ func (m *InstrumentedState) readPreimage(key [32]byte, offset uint64) (dat [32]b m.lastPreimage = preimage } m.lastPreimageOffset = offset + if offset >= uint64(len(preimage)) { + panic("Preimage offset out-of-bounds") + } datLen = uint64(copy(dat[:], preimage[offset:])) return } diff --git a/rvgo/fast/memory.go b/rvgo/fast/memory.go index 8b45ee7..7db1bf9 100644 --- a/rvgo/fast/memory.go +++ b/rvgo/fast/memory.go @@ -195,10 +195,7 @@ func (m *Memory) SetAligned(addr uint64, dat []byte) { m.Invalidate(addr) // invalidate this branch of memory, now that the value changed } - d := copy(p.Data[pageAddr:], dat) - if d == len(dat) { - return // if all the data fitted in the page, we're done - } + copy(p.Data[pageAddr:], dat) } func (m *Memory) GetUnaligned(addr uint64, dest []byte) { diff --git a/rvgo/fast/vm.go b/rvgo/fast/vm.go index f6d4feb..ba2f17d 100644 --- a/rvgo/fast/vm.go +++ b/rvgo/fast/vm.go @@ -266,10 +266,6 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { s.Memory.SetAligned(rightAddr, bytez[leftSize:size]) } - // - // CSR (control and status registers) functions - // - // // Preimage oracle interactions // @@ -378,7 +374,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { // ensure MAP_ANONYMOUS is set and fd == -1 if (flags&0x20) == 0 || fd != u64Mask() { addr = u64Mask() - errCode = toU64(0x4d) // no error + errCode = toU64(0x4d) // EBADF } else { // ignore: prot, flags, fd, offset switch addr { diff --git a/rvgo/slow/vm.go b/rvgo/slow/vm.go index 4210340..458f00c 100644 --- a/rvgo/slow/vm.go +++ b/rvgo/slow/vm.go @@ -128,7 +128,13 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err } proofContentOffset := shortToU64(uint16(stateContentOffset) + paddedStateSize + 32) - // TODO: validate abi offset values? + + if and(b32asBEWord(calldataload(shortToU64(uint16(stateContentOffset)+paddedStateSize))), shortToU256(60-1)) != (U256{}) { + // proof offset must be stateContentOffset+paddedStateSize+32 + // proof size: 64-5+1=60 * 32 byte leaf, + // but multiple memProof can be used, so the proofSize must be a multiple of 60 + panic("invalid proof offset input") + } // // State loading @@ -450,10 +456,6 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err storeMemUnaligned(addr, size, u64ToU256(value), proofIndexL, proofIndexR) } - // - // CSR (control and status registers) functions - // - // // Preimage oracle interactions // diff --git a/rvsol/src/RISCV.sol b/rvsol/src/RISCV.sol index 80b04d4..c53d6c7 100644 --- a/rvsol/src/RISCV.sol +++ b/rvsol/src/RISCV.sol @@ -333,7 +333,6 @@ contract RISCV { out := 548 } if iszero(eq(proof.offset, proofContentOffset())) { revert(0, 0) } - // TODO: validate abi offset values? // // State loading @@ -728,10 +727,6 @@ contract RISCV { storeMemUnaligned(addr, size, u64ToU256(value), proofIndexL, proofIndexR) } - // - // CSR (control and status registers) functions - // - // // Preimage oracle interactions // From c6c8c9f7763c2d5ad09e15071d7661a18463f8e2 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Thu, 31 Oct 2024 12:39:27 -0600 Subject: [PATCH 9/9] Revert memory.SetAligned to memory.SetUnaligned --- rvgo/fast/memory.go | 20 ++++++++- rvgo/fast/memory_test.go | 44 +++++++++--------- rvgo/fast/vm.go | 14 +++--- rvgo/scripts/go-ffi/differential-testing.go | 4 +- rvgo/test/syscall_test.go | 50 ++++++++++----------- 5 files changed, 75 insertions(+), 57 deletions(-) diff --git a/rvgo/fast/memory.go b/rvgo/fast/memory.go index 7db1bf9..7eac13c 100644 --- a/rvgo/fast/memory.go +++ b/rvgo/fast/memory.go @@ -180,7 +180,7 @@ func (m *Memory) pageLookup(pageIndex uint64) (*CachedPage, bool) { return p, ok } -func (m *Memory) SetAligned(addr uint64, dat []byte) { +func (m *Memory) SetUnaligned(addr uint64, dat []byte) { if len(dat) > 32 { panic("cannot set more than 32 bytes") } @@ -195,6 +195,24 @@ func (m *Memory) SetAligned(addr uint64, dat []byte) { m.Invalidate(addr) // invalidate this branch of memory, now that the value changed } + d := copy(p.Data[pageAddr:], dat) + if d == len(dat) { + return // if all the data fitted in the page, we're done + } + + // continue to remaining part + addr += uint64(d) + pageIndex = addr >> PageAddrSize + pageAddr = addr & PageAddrMask + p, ok = m.pageLookup(pageIndex) + if !ok { + // allocate the page if we have not already. + // Go may mmap relatively large ranges, but we only allocate the pages just in time. + p = m.AllocPage(pageIndex) + } else { + m.Invalidate(addr) // invalidate this branch of memory, now that the value changed + } + copy(p.Data[pageAddr:], dat) } diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index fcd78f4..0c4a955 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -15,7 +15,7 @@ import ( func TestMemoryMerkleProof(t *testing.T) { t.Run("nearly empty tree", func(t *testing.T) { m := NewMemory() - m.SetAligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) + m.SetUnaligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) proof := m.MerkleProof(0x10000) require.Equal(t, uint32(0xaabbccdd), binary.BigEndian.Uint32(proof[:4])) for i := 0; i < 32-5; i++ { @@ -24,9 +24,9 @@ func TestMemoryMerkleProof(t *testing.T) { }) t.Run("fuller tree", func(t *testing.T) { m := NewMemory() - m.SetAligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) - m.SetAligned(0x80004, []byte{42}) - m.SetAligned(0x13370000, []byte{123}) + m.SetUnaligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) + m.SetUnaligned(0x80004, []byte{42}) + m.SetUnaligned(0x13370000, []byte{123}) root := m.MerkleRoot() proof := m.MerkleProof(0x80004) require.Equal(t, uint32(42<<24), binary.BigEndian.Uint32(proof[4:8])) @@ -53,35 +53,35 @@ func TestMemoryMerkleRoot(t *testing.T) { }) t.Run("empty page", func(t *testing.T) { m := NewMemory() - m.SetAligned(0xF000, []byte{0}) + m.SetUnaligned(0xF000, []byte{0}) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "fully zeroed memory should have expected zero hash") }) t.Run("single page", func(t *testing.T) { m := NewMemory() - m.SetAligned(0xF000, []byte{1}) + m.SetUnaligned(0xF000, []byte{1}) root := m.MerkleRoot() require.NotEqual(t, zeroHashes[64-5], root, "non-zero memory") }) t.Run("repeat zero", func(t *testing.T) { m := NewMemory() - m.SetAligned(0xF000, []byte{0}) - m.SetAligned(0xF004, []byte{0}) + m.SetUnaligned(0xF000, []byte{0}) + m.SetUnaligned(0xF004, []byte{0}) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) t.Run("two empty pages", func(t *testing.T) { m := NewMemory() - m.SetAligned(PageSize*3, []byte{0}) - m.SetAligned(PageSize*10, []byte{0}) + m.SetUnaligned(PageSize*3, []byte{0}) + m.SetUnaligned(PageSize*10, []byte{0}) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) t.Run("random few pages", func(t *testing.T) { m := NewMemory() - m.SetAligned(PageSize*3, []byte{1}) - m.SetAligned(PageSize*5, []byte{42}) - m.SetAligned(PageSize*6, []byte{123}) + m.SetUnaligned(PageSize*3, []byte{1}) + m.SetUnaligned(PageSize*5, []byte{42}) + m.SetUnaligned(PageSize*6, []byte{123}) p3 := m.MerkleizeSubtree((1 << PageKeySize) | 3) p5 := m.MerkleizeSubtree((1 << PageKeySize) | 5) p6 := m.MerkleizeSubtree((1 << PageKeySize) | 6) @@ -101,11 +101,11 @@ func TestMemoryMerkleRoot(t *testing.T) { }) t.Run("invalidate page", func(t *testing.T) { m := NewMemory() - m.SetAligned(0xF000, []byte{0}) + m.SetUnaligned(0xF000, []byte{0}) require.Equal(t, zeroHashes[64-5], m.MerkleRoot(), "zero at first") - m.SetAligned(0xF004, []byte{1}) + m.SetUnaligned(0xF004, []byte{1}) require.NotEqual(t, zeroHashes[64-5], m.MerkleRoot(), "non-zero") - m.SetAligned(0xF004, []byte{0}) + m.SetUnaligned(0xF004, []byte{0}) require.Equal(t, zeroHashes[64-5], m.MerkleRoot(), "zero again") }) } @@ -141,22 +141,22 @@ func TestMemoryReadWrite(t *testing.T) { t.Run("read-write", func(t *testing.T) { m := NewMemory() - m.SetAligned(12, []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}) + m.SetUnaligned(12, []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}) var tmp [5]byte m.GetUnaligned(12, tmp[:]) require.Equal(t, [5]byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}, tmp) - m.SetAligned(12, []byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}) + m.SetUnaligned(12, []byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}) m.GetUnaligned(12, tmp[:]) require.Equal(t, [5]byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}, tmp) }) t.Run("read-write-unaligned", func(t *testing.T) { m := NewMemory() - m.SetAligned(13, []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}) + m.SetUnaligned(13, []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}) var tmp [5]byte m.GetUnaligned(13, tmp[:]) require.Equal(t, [5]byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}, tmp) - m.SetAligned(13, []byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}) + m.SetUnaligned(13, []byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}) m.GetUnaligned(13, tmp[:]) require.Equal(t, [5]byte{0xAA, 0xBB, 0x1C, 0xDD, 0xEE}, tmp) }) @@ -164,7 +164,7 @@ func TestMemoryReadWrite(t *testing.T) { func TestMemoryJSON(t *testing.T) { m := NewMemory() - m.SetAligned(8, []byte{123}) + m.SetUnaligned(8, []byte{123}) dat, err := json.Marshal(m) require.NoError(t, err) var res Memory @@ -176,7 +176,7 @@ func TestMemoryJSON(t *testing.T) { func TestMemoryBinary(t *testing.T) { m := NewMemory() - m.SetAligned(8, []byte{123}) + m.SetUnaligned(8, []byte{123}) ser := new(bytes.Buffer) err := m.Serialize(ser) require.NoError(t, err, "must serialize state") diff --git a/rvgo/fast/vm.go b/rvgo/fast/vm.go index ba2f17d..3e1755d 100644 --- a/rvgo/fast/vm.go +++ b/rvgo/fast/vm.go @@ -178,7 +178,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { panic(fmt.Errorf("addr %d not aligned with 32 bytes", addr)) } inst.verifyMemChange(addr, proofIndex) - s.Memory.SetAligned(addr, v[:]) + s.Memory.SetUnaligned(addr, v[:]) } // load unaligned, optionally signed, little-endian, integer of 1 ... 8 bytes from memory @@ -220,7 +220,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { } inst.verifyMemChange(leftAddr, proofIndexL) if (addr+size-1)&^31 == addr&^31 { // if aligned - s.Memory.SetAligned(addr, bytez[:size]) + s.Memory.SetUnaligned(addr, bytez[:size]) return } if proofIndexR == 0xff { @@ -229,12 +229,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { // if not aligned rightAddr := leftAddr + 32 leftSize := rightAddr - addr - s.Memory.SetAligned(addr, bytez[:leftSize]) + s.Memory.SetUnaligned(addr, bytez[:leftSize]) if verifyR { inst.trackMemAccess(rightAddr, proofIndexR) } inst.verifyMemChange(rightAddr, proofIndexR) - s.Memory.SetAligned(rightAddr, bytez[leftSize:size]) + s.Memory.SetUnaligned(rightAddr, bytez[leftSize:size]) } storeMem := func(addr U64, size U64, value U64, proofIndexL uint8, proofIndexR uint8, verifyL bool, verifyR bool) { @@ -249,7 +249,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { } inst.verifyMemChange(leftAddr, proofIndexL) if (addr+size-1)&^31 == addr&^31 { // if aligned - s.Memory.SetAligned(addr, bytez[:size]) + s.Memory.SetUnaligned(addr, bytez[:size]) return } // if not aligned @@ -258,12 +258,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { } rightAddr := leftAddr + 32 leftSize := rightAddr - addr - s.Memory.SetAligned(addr, bytez[:leftSize]) + s.Memory.SetUnaligned(addr, bytez[:leftSize]) if verifyR { inst.trackMemAccess(rightAddr, proofIndexR) } inst.verifyMemChange(rightAddr, proofIndexR) - s.Memory.SetAligned(rightAddr, bytez[leftSize:size]) + s.Memory.SetUnaligned(rightAddr, bytez[leftSize:size]) } // diff --git a/rvgo/scripts/go-ffi/differential-testing.go b/rvgo/scripts/go-ffi/differential-testing.go index bd02638..eceefb2 100644 --- a/rvgo/scripts/go-ffi/differential-testing.go +++ b/rvgo/scripts/go-ffi/differential-testing.go @@ -47,7 +47,7 @@ func DiffTestUtils() { checkErr(err, "Error decoding insn") instBytes := make([]byte, 4) binary.LittleEndian.PutUint32(instBytes, uint32(insn)) - mem.SetAligned(uint64(pc), instBytes) + mem.SetUnaligned(uint64(pc), instBytes) // proof size: 64-5+1=60 (a 64-bit mem-address branch to 32 byte leaf, incl leaf itself), all 32 bytes // 60 * 32 = 1920 @@ -57,7 +57,7 @@ func DiffTestUtils() { checkErr(err, "Error decoding memAddr") memValue, err := hex.DecodeString(strings.TrimPrefix(args[4], "0x")) checkErr(err, "Error decoding memValue") - mem.SetAligned(uint64(memAddr), memValue) + mem.SetUnaligned(uint64(memAddr), memValue) memProof = mem.MerkleProof(uint64(memAddr)) } insnProof = mem.MerkleProof(uint64(pc)) diff --git a/rvgo/test/syscall_test.go b/rvgo/test/syscall_test.go index 87bcd91..7d8ef07 100644 --- a/rvgo/test/syscall_test.go +++ b/rvgo/test/syscall_test.go @@ -93,7 +93,7 @@ func TestStateSyscallUnsupported(t *testing.T) { Registers: [32]uint64{17: uint64(syscall)}, Step: 0, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) fastState := fast.NewInstrumentedState(state, nil, os.Stdout, os.Stderr) stepWitness, err := fastState.Step(true) @@ -265,7 +265,7 @@ func TestEVMSysWriteHint(t *testing.T) { err := state.Memory.SetMemoryRange(uint64(tt.memOffset), bytes.NewReader(tt.hintData)) require.NoError(t, err) - state.Memory.SetAligned(0, syscallInsn) + state.Memory.SetUnaligned(0, syscallInsn) fastState := fast.NewInstrumentedState(state, &oracle, os.Stdout, os.Stderr) stepWitness, err := fastState.Step(true) @@ -296,7 +296,7 @@ func FuzzStateSyscallExit(f *testing.F) { Registers: [32]uint64{17: uint64(syscall), 10: uint64(exitCode)}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() preStateRegisters := state.Registers @@ -342,7 +342,7 @@ func FuzzStateSyscallBrk(f *testing.F) { Registers: [32]uint64{17: riscv.SysBrk}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = 1 << 30 @@ -393,7 +393,7 @@ func FuzzStateSyscallMmap(f *testing.F) { }, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[11] = 0 @@ -444,7 +444,7 @@ func FuzzStateSyscallFcntl(f *testing.F) { Registers: [32]uint64{17: riscv.SysFcntl, 10: fd, 11: cmd}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = out @@ -516,7 +516,7 @@ func FuzzStateSyscallOpenat(f *testing.F) { Registers: [32]uint64{17: riscv.SysOpenat}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = 0xFFFF_FFFF_FFFF_FFFF @@ -558,7 +558,7 @@ func FuzzStateSyscallClockGettime(f *testing.F) { Registers: [32]uint64{17: riscv.SysClockGettime, 11: addr}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) expectedRegisters := state.Registers expectedRegisters[11] = 0 @@ -568,11 +568,11 @@ func FuzzStateSyscallClockGettime(f *testing.F) { require.False(t, stepWitness.HasPreimage()) postMemory := fast.NewMemory() - postMemory.SetAligned(pc, syscallInsn) + postMemory.SetUnaligned(pc, syscallInsn) var bytes [8]byte binary.LittleEndian.PutUint64(bytes[:], 1337) - postMemory.SetAligned(addr, bytes[:]) - postMemory.SetAligned(addr+8, []byte{42, 0, 0, 0, 0, 0, 0, 0}) + postMemory.SetUnaligned(addr, bytes[:]) + postMemory.SetUnaligned(addr+8, []byte{42, 0, 0, 0, 0, 0, 0, 0}) require.Equal(t, pc+4, state.PC) // PC must advance require.Equal(t, uint64(0), state.Heap) @@ -605,7 +605,7 @@ func FuzzStateSyscallClone(f *testing.F) { Registers: [32]uint64{17: riscv.SysClone}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = 1 @@ -647,7 +647,7 @@ func FuzzStateSyscallGetrlimit(f *testing.F) { Registers: [32]uint64{17: riscv.SysGetrlimit, 10: 7, 11: addr}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) expectedRegisters := state.Registers expectedRegisters[10] = 0 expectedRegisters[11] = 0 @@ -658,11 +658,11 @@ func FuzzStateSyscallGetrlimit(f *testing.F) { require.False(t, stepWitness.HasPreimage()) postMemory := fast.NewMemory() - postMemory.SetAligned(pc, syscallInsn) + postMemory.SetUnaligned(pc, syscallInsn) var bytes [8]byte binary.LittleEndian.PutUint64(bytes[:], 1024) - postMemory.SetAligned(addr, bytes[:]) - postMemory.SetAligned(addr+8, bytes[:]) + postMemory.SetUnaligned(addr, bytes[:]) + postMemory.SetUnaligned(addr+8, bytes[:]) require.Equal(t, pc+4, state.PC) // PC must advance require.Equal(t, uint64(0), state.Heap) @@ -691,7 +691,7 @@ func FuzzStateSyscallGetrlimit(f *testing.F) { Registers: [32]uint64{17: riscv.SysGetrlimit, 10: res, 11: addr}, Step: 0, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) fastState := fast.NewInstrumentedState(state, nil, os.Stdout, os.Stderr) stepWitness, err := fastState.Step(true) @@ -751,7 +751,7 @@ func FuzzStateSyscallNoop(f *testing.F) { Registers: [32]uint64{17: uint64(syscall), 10: arg}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = 0 @@ -799,7 +799,7 @@ func FuzzStateSyscallRead(f *testing.F) { Registers: [32]uint64{17: riscv.SysRead, 10: fd, 11: addr, 12: count}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = ret @@ -859,7 +859,7 @@ func FuzzStateHintRead(f *testing.F) { PreimageKey: preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey(), PreimageOffset: preimageOffset, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStatePreimageKey := state.PreimageKey preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers @@ -911,7 +911,7 @@ func FuzzStatePreimageRead(f *testing.F) { PreimageKey: preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey(), PreimageOffset: preimageOffset, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStatePreimageKey := state.PreimageKey preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers @@ -972,7 +972,7 @@ func FuzzStateSyscallWrite(f *testing.F) { Registers: [32]uint64{17: riscv.SysWrite, 10: fd, 11: addr, 12: count}, Step: step, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers expectedRegisters[10] = ret @@ -1044,7 +1044,7 @@ func FuzzStateHintWrite(f *testing.F) { require.NoError(t, err) // Set syscall instruction - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) preStatePreimageKey := state.PreimageKey preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers @@ -1101,11 +1101,11 @@ func FuzzStatePreimageWrite(f *testing.F) { Step: step, PreimageOffset: preimageOffset, } - state.Memory.SetAligned(pc, syscallInsn) + state.Memory.SetUnaligned(pc, syscallInsn) // Set preimage key to addr preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey() - state.Memory.SetAligned(addr, preimageKey[:]) + state.Memory.SetUnaligned(addr, preimageKey[:]) preStateRoot := state.Memory.MerkleRoot() expectedRegisters := state.Registers