Skip to content

Commit

Permalink
Merge branch 'master' into feature/mininny/audit-21
Browse files Browse the repository at this point in the history
  • Loading branch information
mininny authored Jan 15, 2025
2 parents 4bb1569 + 60d6d8d commit dd3141e
Show file tree
Hide file tree
Showing 12 changed files with 235 additions and 62 deletions.
4 changes: 2 additions & 2 deletions docs/radix-memory.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ New benchmark suite is added, which measures the latency of the following operat

- Memory read / write to random addresses
- Memory read / write to contiguous address
- Memory write to sparse memory addresse
- Memory write to sparse memory addresses
- Memory write to dense memory addresses
- Merkle proof generation
- Merkle root calculation
Expand Down Expand Up @@ -112,4 +112,4 @@ Usually, sparse region would utilize smaller branching factor for memory optimiz
- use larger branching factors at the upper address level to reduce the trie traversal depth
- use smaller branching factors at the lower address level to reduce computation for each node.

In addition, we can apply pgo as mentioned above. To apply pgo to asterisc builds, we can run asterisc with cpu pprof enabled, and ship asterisc with `default.pgo` in the build path. This way, whenever the user builds Asterisc, pgo will be enabled by default, leading to addition 5+% improvement in speed.
In addition, we can apply pgo as mentioned above. To apply pgo to asterisc builds, we can run asterisc with cpu pprof enabled, and ship asterisc with `default.pgo` in the build path. This way, whenever the user builds Asterisc, pgo will be enabled by default, leading to addition 5+% improvement in speed.
2 changes: 1 addition & 1 deletion docs/riscv.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
## Helpful learning resources

- rv32 instruction set cheat sheet: http://blog.translusion.com/images/posts/RISC-V-cheatsheet-RV32I-4-3.pdf
- rv32: reference card: https://github.com/jameslzhu/riscv-card/blob/master/riscv-card.pdf
- rv32: reference card: https://github.com/jameslzhu/riscv-card/releases/download/latest/riscv-card.pdf
- online riscv32 interpreter: https://www.cs.cornell.edu/courses/cs3410/2019sp/riscv/interpreter/#
- specs: https://riscv.org/technical/specifications/
- Berkely riscv card: https://inst.eecs.berkeley.edu/~cs61c/fa18/img/riscvcard.pdf
Expand Down
2 changes: 1 addition & 1 deletion rvgo/fast/instrumented.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func (m *InstrumentedState) Step(proof bool) (wit *StepWitness, err error) {

func (m *InstrumentedState) readPreimage(key [32]byte, offset uint64) (dat [32]byte, datLen uint64, err error) {
preimage := m.lastPreimage
if key != m.lastPreimageKey {
if preimage == nil || key != m.lastPreimageKey {
m.lastPreimageKey = key
data := m.preimageOracle.GetPreimage(key)
// add the length prefix
Expand Down
43 changes: 43 additions & 0 deletions rvgo/fast/instrumented_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
package fast

import (
"testing"

"github.com/stretchr/testify/require"
)

type MockPreimageOracle struct {
}

func (oracle *MockPreimageOracle) Hint(v []byte) {
}

func (oracle *MockPreimageOracle) GetPreimage(k [32]byte) []byte {
return make([]byte, 32)
}

func (oracle *MockPreimageOracle) ReadPreimagePart(key [32]byte, offset uint64) ([32]byte, uint8, error) {
return [32]byte{}, 32, nil
}

func TestReadPreimage(t *testing.T) {

vmState := VMState{
PC: 0,
Memory: NewMemory(),
Registers: [32]uint64{},
ExitCode: 0,
Exited: false,
Heap: 0x7f_00_00_00_00_00,
}

// instruction ecall
vmState.Memory.SetUnaligned(0, []byte{0x73})
vmState.Registers[17] = 63
vmState.Registers[10] = 5

instState := NewInstrumentedState(&vmState, &MockPreimageOracle{}, nil, nil)

_, err := instState.Step(true)
require.NoError(t, err)
}
6 changes: 3 additions & 3 deletions rvgo/fast/memory.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func (m *Memory) SetUnaligned(addr uint64, dat []byte) {
m.Invalidate(addr) // invalidate this branch of memory, now that the value changed
}

copy(p.Data[pageAddr:], dat)
copy(p.Data[pageAddr:], dat[d:])
}

func (m *Memory) GetUnaligned(addr uint64, dest []byte) {
Expand All @@ -140,7 +140,7 @@ func (m *Memory) GetUnaligned(addr uint64, dest []byte) {
p, ok := m.pageLookup(pageIndex)
var d int
if !ok {
l := pageSize - pageAddr
l := PageSize - pageAddr
if l > 32 {
l = 32
}
Expand All @@ -160,7 +160,7 @@ func (m *Memory) GetUnaligned(addr uint64, dest []byte) {
pageAddr = addr & PageAddrMask
p, ok = m.pageLookup(pageIndex)
if !ok {
l := pageSize - pageAddr
l := PageSize - pageAddr
if l > 32 {
l = 32
}
Expand Down
9 changes: 9 additions & 0 deletions rvgo/fast/memory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -412,3 +412,12 @@ func TestMemoryBinary(t *testing.T) {
m.GetUnaligned(8, dest[:])
require.Equal(t, uint8(123), dest[0])
}

func TestMemoryInvalidSetUnaligned(t *testing.T) {
t.Run("SetUnaligned incorrectly writes to next page", func(t *testing.T) {
m := NewMemory()
m.SetUnaligned(0x0FFE, []byte{0xaa, 0xbb, 0xcc, 0xdd})
require.Equal(t, m.pages[0].Data[4094:], []byte{0xaa, 0xbb})
require.Equal(t, m.pages[1].Data[0:2], []byte{0xcc, 0xdd})
})
}
22 changes: 7 additions & 15 deletions rvgo/fast/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
)

// page size must be at least 32 bytes (one merkle node)
// memory merkleization will look the same regardless of page size past 32.
const (
pageAddrSize = 10
pageKeySize = 64 - pageAddrSize
pageSize = 1 << pageAddrSize
pageAddrMask = pageSize - 1
maxPageCount = 1 << pageKeySize
)

type VMState struct {
Memory *Memory `json:"memory"`

Expand Down Expand Up @@ -111,6 +101,9 @@ func (state *VMState) Instr() uint32 {

type StateWitness []byte

const STATE_WITNESS_SIZE = 362 // STATE_WITNESS_SIZE is the size of the state witness encoding in bytes.
const EXITCODE_WITNESS_OFFSET = 32 + 32 + 8 + 8 // mem-root, preimage-key, preimage-offset, PC

const (
VMStatusValid = 0
VMStatusInvalid = 1
Expand All @@ -119,14 +112,13 @@ const (
)

func (sw StateWitness) StateHash() (common.Hash, error) {
offset := 32 + 32 + 8 + 8 // mem-root, preimage-key, preimage-offset, PC
if len(sw) <= offset+1 {
return common.Hash{}, fmt.Errorf("state must at least be %d bytes, but got %d", offset, len(sw))
if len(sw) != STATE_WITNESS_SIZE {
return common.Hash{}, fmt.Errorf("invalid witness length. got %d, expected %d", len(sw), STATE_WITNESS_SIZE)
}

hash := crypto.Keccak256Hash(sw)
exitCode := sw[offset]
exited := sw[offset+1]
exitCode := sw[EXITCODE_WITNESS_OFFSET]
exited := sw[EXITCODE_WITNESS_OFFSET+1]
status := vmStatus(exited == 1, exitCode)
hash[0] = status
return hash, nil
Expand Down
32 changes: 27 additions & 5 deletions rvgo/fast/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -587,6 +587,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
switch opcode {
case 0x03: // 000_0011: memory loading
// LB, LH, LW, LD, LBU, LHU, LWU

// bits[14:12] set to 111 are reserved
if eq64(funct3, toU64(0x7)) != 0 {
revertWithCode(riscv.ErrInvalidSyscall, fmt.Errorf("illegal instruction %d: reserved instruction encoding", instr))
}

imm := parseImmTypeI(instr)
signed := iszero64(and64(funct3, toU64(4))) // 4 = 100 -> bitflag
size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size
Expand Down Expand Up @@ -631,6 +637,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
// So it's really 13 bits with a hardcoded 0 bit.
pc = add64(pc, imm)
}

// The PC must be aligned to 4 bytes.
if pc&3 != 0 {
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", pc))
}

// not like the other opcodes: nothing to write to rd register, and PC has already changed
setPC(pc)
case 0x13: // 001_0011: immediate arithmetic and logic
Expand Down Expand Up @@ -760,7 +772,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
setPC(add64(pc, toU64(4)))
case 0x3B: // 011_1011: register arithmetic and logic in 32 bits
rs1Value := getRegister(rs1)
rs2Value := getRegister(rs2)
rs2Value := and64(getRegister(rs2), u32Mask())
var rdValue U64
switch funct7 {
case 1: // RV M extension
Expand Down Expand Up @@ -833,13 +845,23 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
imm := parseImmTypeJ(instr)
rdValue := add64(pc, toU64(4))
setRegister(rd, rdValue)
setPC(add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20)))) // signed offset in multiples of 2 bytes (last bit is there, but ignored)

newPC := add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20)))
if newPC&3 != 0 { // quick target alignment check
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC))
}
setPC(newPC) // signed offset in multiples of 2 bytes (last bit is there, but ignored)
case 0x67: // 110_0111: JALR = Jump and link register
rs1Value := getRegister(rs1)
imm := parseImmTypeI(instr)
rdValue := add64(pc, toU64(4))
setRegister(rd, rdValue)
setPC(and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))) // least significant bit is set to 0

newPC := and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))
if newPC&3 != 0 { // quick addr alignment check
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC))
}
setPC(newPC) // least significant bit is set to 0
case 0x73: // 111_0011: environment things
switch funct3 {
case 0: // 000 = ECALL/EBREAK
Expand Down Expand Up @@ -867,11 +889,11 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
// 0b010 == RV32A W variants
// 0b011 == RV64A D variants
size := shl64(funct3, toU64(1))
if lt64(size, toU64(4)) != 0 {
if lt64(size, toU64(4)) != 0 || gt64(size, toU64(8)) != 0 {
revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size))
}
addr := getRegister(rs1)
if addr&3 != 0 { // quick addr alignment check
if mod64(addr, size) != 0 { // quick addr alignment check
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 4 bytes", addr))
}

Expand Down
43 changes: 38 additions & 5 deletions rvgo/slow/vm.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package slow

import (
"bytes"
"encoding/binary"
"fmt"

Expand Down Expand Up @@ -121,6 +122,12 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
return
}

// First 4 bytes of keccak256("step(bytes,bytes,bytes32)")
expectedSelector := []byte{0xe1, 0x4c, 0xed, 0x32}
if len(calldata) < 4 || !bytes.Equal(calldata[:4], expectedSelector) {
panic("invalid function selector")
}

Check failure on line 130 in rvgo/slow/vm.go

View workflow job for this annotation

GitHub Actions / rvgo-lint

File is not properly formatted (goimports)
stateContentOffset := uint16(4 + 32 + 32 + 32 + 32)
if iszero(eq(add(b32asBEWord(calldataload(toU64(4))), shortToU256(32+4)), shortToU256(stateContentOffset))) {
// _stateData.offset = _stateData.pointer + 32 + 4
Expand Down Expand Up @@ -464,6 +471,10 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
setMemoryB32(rightAddr, beWordAsB32(right), proofIndexR)
}
storeMem := func(addr U64, size U64, value U64, proofIndexL uint8, proofIndexR uint8) {
if size.val() > 8 {
revertWithCode(riscv.ErrStoreExceeds8Bytes, fmt.Errorf("cannot store more than 8 bytes: %d", size))
}

storeMemUnaligned(addr, size, u64ToU256(value), proofIndexL, proofIndexR)
}

Expand Down Expand Up @@ -771,6 +782,12 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
switch opcode.val() {
case 0x03: // 000_0011: memory loading
// LB, LH, LW, LD, LBU, LHU, LWU

// bits[14:12] set to 111 are reserved
if eq64(funct3, toU64(0x7)) != (U64{}) {
revertWithCode(riscv.ErrInvalidSyscall, fmt.Errorf("illegal instruction %d: reserved instruction encoding", instr))
}

imm := parseImmTypeI(instr)
signed := iszero64(and64(funct3, toU64(4))) // 4 = 100 -> bitflag
size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size
Expand Down Expand Up @@ -815,6 +832,12 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
// So it's really 13 bits with a hardcoded 0 bit.
pc = add64(pc, imm)
}

// The PC must be aligned to 4 bytes.
if and64(pc, toU64(3)) != (U64{}) {
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", pc))
}

// not like the other opcodes: nothing to write to rd register, and PC has already changed
setPC(pc)
case 0x13: // 001_0011: immediate arithmetic and logic
Expand Down Expand Up @@ -944,7 +967,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
setPC(add64(pc, toU64(4)))
case 0x3B: // 011_1011: register arithmetic and logic in 32 bits
rs1Value := getRegister(rs1)
rs2Value := getRegister(rs2)
rs2Value := and64(getRegister(rs2), u32Mask())
var rdValue U64
switch funct7.val() {
case 1: // RV M extension
Expand Down Expand Up @@ -1017,13 +1040,23 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
imm := parseImmTypeJ(instr)
rdValue := add64(pc, toU64(4))
setRegister(rd, rdValue)
setPC(add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20)))) // signed offset in multiples of 2 bytes (last bit is there, but ignored)

newPC := add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20)))
if and64(newPC, toU64(3)) != (U64{}) { // quick target alignment check
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC))
}
setPC(newPC) // signed offset in multiples of 2 bytes (last bit is there, but ignored)
case 0x67: // 110_0111: JALR = Jump and link register
rs1Value := getRegister(rs1)
imm := parseImmTypeI(instr)
rdValue := add64(pc, toU64(4))
setRegister(rd, rdValue)
setPC(and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))) // least significant bit is set to 0

newPC := and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))
if and64(newPC, toU64(3)) != (U64{}) { // quick target alignment check
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC))
}
setPC(newPC) // least significant bit is set to 0
case 0x73: // 111_0011: environment things
switch funct3.val() {
case 0: // 000 = ECALL/EBREAK
Expand Down Expand Up @@ -1051,11 +1084,11 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
// 0b010 == RV32A W variants
// 0b011 == RV64A D variants
size := shl64(funct3, toU64(1))
if lt64(size, toU64(4)) != (U64{}) {
if or64(lt64(size, toU64(4)), gt64(size, toU64(8))) != (U64{}) {
revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size))
}
addr := getRegister(rs1)
if and64(addr, toU64(3)) != (U64{}) { // quick addr alignment check
if mod64(addr, size) != (U64{}) { // quick addr alignment check
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 4 bytes", addr))
}

Expand Down
4 changes: 2 additions & 2 deletions rvsol/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ forge test -vvv --ffi
- There are few issues with Foundry.
- Run script directly without manual build does not work with the current version of Foundry (2024-03-15 `3fa0270`).
You **must run** `make build` **before** running the deploy script. ([issue](https://github.com/foundry-rs/foundry/issues/6572))
- Some older version(2024-02-01 `2f4b5db`) of Foundry makes a dependency error reproted above issue.
- Some older version(2024-02-01 `2f4b5db`) of Foundry makes a dependency error reported above issue.
Use the **latest version** of Foundry!
- The deploy script can be run only once on the devnet because of the `create2` salt.
To rerun the script for dev purpose, you must restart the devnet with `make devnet-clean && make devnet-up` command on the monorepo.
To rerun the script for dev purpose, you must restart the devnet with `make devnet-clean && make devnet-up` command on the monorepo.
Loading

0 comments on commit dd3141e

Please sign in to comment.