Skip to content

Commit

Permalink
Incorporate new ABI format
Browse files Browse the repository at this point in the history
  • Loading branch information
axelKingsley committed Aug 7, 2024
1 parent 335dea1 commit 8b470d5
Show file tree
Hide file tree
Showing 4 changed files with 121 additions and 37 deletions.
89 changes: 74 additions & 15 deletions op-supervisor/supervisor/backend/source/contracts/l2inbox.go
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
package contracts

import (
"bytes"
"errors"
"fmt"
"io"
"math/big"

"github.com/ethereum-optimism/optimism/op-service/predeploys"
"github.com/ethereum-optimism/optimism/op-service/solabi"
"github.com/ethereum-optimism/optimism/op-service/sources/batching"
backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
)

const (
Expand All @@ -24,11 +26,14 @@ var (
)

type contractIdentifier struct {
// Origin represents the address that initiated the message
// it is used in combination with the MsgHash to uniquely identify a message
// and is hashed into the log hash, not stored directly.
Origin common.Address
BlockNumber *big.Int
LogIndex *big.Int
Timestamp *big.Int
BlockNumber *big.Int
ChainId *big.Int
Timestamp *big.Int
}

type CrossL2Inbox struct {
Expand All @@ -46,7 +51,9 @@ func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (backendTypes.
if l.Address != i.contract.Addr() {
return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: log not from CrossL2Inbox", ErrEventNotFound)
}
name, result, err := i.contract.DecodeEvent(l)
// use DecodeEvent to check the name of the event
// but the actual decoding is done manually to extract the contract identifier
name, _, err := i.contract.DecodeEvent(l)
if errors.Is(err, batching.ErrUnknownEvent) {
return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: %v", ErrEventNotFound, err.Error())
} else if err != nil {
Expand All @@ -55,20 +62,72 @@ func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (backendTypes.
if name != eventExecutingMessage {
return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: event %v not an ExecutingMessage event", ErrEventNotFound, name)
}
var ident contractIdentifier
result.GetStruct(0, &ident)
payload := result.GetBytes(1)
payloadHash := crypto.Keccak256Hash(payload)

chainID, err := types.ChainIDFromBig(ident.ChainId).ToUInt32()
// the second topic is the hash of the payload (the first is the event ID)
msgHash := l.Topics[1]
// the first 32 bytes of the data are the msgHash, so we skip them
identifierBytes := bytes.NewReader(l.Data[32:])
identifier, err := identifierFromBytes(identifierBytes)
if err != nil {
return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to convert chain ID %v to uint32: %w", ident.ChainId, err)
return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to read contract identifier: %w", err)
}
chainID, err := types.ChainIDFromBig(identifier.ChainId).ToUInt32()
if err != nil {
return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to convert chain ID %v to uint32: %w", identifier.ChainId, err)
}
hash := payloadHashToLogHash(msgHash, identifier.Origin)
return backendTypes.ExecutingMessage{
Chain: chainID,
BlockNum: ident.BlockNumber.Uint64(),
LogIdx: uint32(ident.LogIndex.Uint64()),
Timestamp: ident.Timestamp.Uint64(),
Hash: backendTypes.TruncateHash(payloadHash),
Hash: hash,
BlockNum: identifier.BlockNumber.Uint64(),
LogIdx: uint32(identifier.LogIndex.Uint64()),
Timestamp: identifier.Timestamp.Uint64(),
}, nil
}

// identifierFromBytes reads a contract identifier from a byte stream.
// it follows the spec and matches the CrossL2Inbox.json definition,
// rather than relying on reflection, as that can be error-prone regarding struct ordering
func identifierFromBytes(identifierBytes io.Reader) (contractIdentifier, error) {
origin, err := solabi.ReadAddress(identifierBytes)
if err != nil {
return contractIdentifier{}, fmt.Errorf("failed to read origin address: %w", err)
}
originAddr := common.BytesToAddress(origin[:])
blockNumber, err := solabi.ReadUint256(identifierBytes)
if err != nil {
return contractIdentifier{}, fmt.Errorf("failed to read block number: %w", err)
}
logIndex, err := solabi.ReadUint256(identifierBytes)
if err != nil {
return contractIdentifier{}, fmt.Errorf("failed to read log index: %w", err)
}
timestamp, err := solabi.ReadUint256(identifierBytes)
if err != nil {
return contractIdentifier{}, fmt.Errorf("failed to read timestamp: %w", err)
}
chainID, err := solabi.ReadUint256(identifierBytes)
if err != nil {
return contractIdentifier{}, fmt.Errorf("failed to read chain ID: %w", err)
}
return contractIdentifier{
Origin: originAddr,
BlockNumber: blockNumber,
LogIndex: logIndex,
Timestamp: timestamp,
ChainId: chainID,
}, nil
}

// payloadHashToLogHash converts the payload hash to the log hash
// it is the concatenation of the log's address and the hash of the log's payload,
// which is then hashed again. This is the hash that is stored in the log storage.
// The logHash can then be used to traverse from the executing message
// to the log the referenced initiating message.
// TODO: this function is duplicated between contracts and backend/source/log_processor.go
// to avoid a circular dependency. It should be reorganized to avoid this duplication.
func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) backendTypes.TruncatedHash {
msg := make([]byte, 0, 2*common.HashLength)
msg = append(msg, addr.Bytes()...)
msg = append(msg, payloadHash.Bytes()...)
return backendTypes.TruncateHash(common.BytesToHash(msg))
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,19 @@ func TestDecodeExecutingMessageEvent(t *testing.T) {
}
contractIdent := contractIdentifier{
Origin: common.Address{0xbb, 0xcc},
ChainId: new(big.Int).SetUint64(uint64(expected.Chain)),
BlockNumber: new(big.Int).SetUint64(expected.BlockNum),
LogIndex: new(big.Int).SetUint64(uint64(expected.LogIdx)),
Timestamp: new(big.Int).SetUint64(expected.Timestamp),
ChainId: new(big.Int).SetUint64(uint64(expected.Chain)),
LogIndex: new(big.Int).SetUint64(uint64(expected.LogIdx)),
}
abi := snapshots.LoadCrossL2InboxABI()
validData, err := abi.Events[eventExecutingMessage].Inputs.Pack(contractIdent, payload)
validData, err := abi.Events[eventExecutingMessage].Inputs.Pack(payloadHash, contractIdent)
require.NoError(t, err)
createValidLog := func() *ethTypes.Log {
//protoHack := bytes.Repeat([]byte{0x00}, 32*5)
return &ethTypes.Log{
Address: predeploys.CrossL2InboxAddr,
Topics: []common.Hash{abi.Events[eventExecutingMessage].ID},
Topics: []common.Hash{abi.Events[eventExecutingMessage].ID, payloadHash},
Data: validData,
}
}
Expand Down
40 changes: 32 additions & 8 deletions op-supervisor/supervisor/backend/source/log_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,17 +36,24 @@ func newLogProcessor(chain supTypes.ChainID, logStore LogStorage) *logProcessor
}
}

// ProcessLogs processes logs from a block and stores them in the log storage
// for any logs that are related to executing messages, they are decoded and stored
func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpts ethTypes.Receipts) error {
for _, rcpt := range rcpts {
for _, l := range rcpt.Logs {
logHash := logToHash(l)
// log hash represents the hash of *this* log as a potentially initiating message
logHash := logToLogHash(l)
var execMsg *backendTypes.ExecutingMessage
msg, err := p.eventDecoder.DecodeExecutingMessageLog(l)
if err != nil && !errors.Is(err, contracts.ErrEventNotFound) {
return fmt.Errorf("failed to decode executing message log: %w", err)
} else if err == nil {
// if the log is an executing message, store the message
execMsg = &msg
}
// executing messages have multiple entries in the database
// they should start with the initiating message and then include the execution
fmt.Println("p.chain", p.chain)
err = p.logStore.AddLog(p.chain, logHash, block.ID(), block.Time, uint32(l.Index), execMsg)
if err != nil {
return fmt.Errorf("failed to add log %d from block %v: %w", l.Index, block.ID(), err)
Expand All @@ -56,19 +63,36 @@ func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpt
return nil
}

func logToHash(l *ethTypes.Log) backendTypes.TruncatedHash {
payloadHash := crypto.Keccak256(logToPayload(l))
msg := make([]byte, 0, 2*common.HashLength)
msg = append(msg, l.Address.Bytes()...)
msg = append(msg, payloadHash...)
return backendTypes.TruncateHash(crypto.Keccak256Hash(msg))
// logToLogHash transforms a log into a hash that represents the log.
// it is the concatenation of the log's address and the hash of the log's payload,
// which is then hashed again. This is the hash that is stored in the log storage.
// The address is hashed into the payload hash to save space in the log storage,
// and because they represent paired data.
func logToLogHash(l *ethTypes.Log) backendTypes.TruncatedHash {
payloadHash := crypto.Keccak256(logToMessagePayload(l))
return payloadHashToLogHash(common.Hash(payloadHash), l.Address)
}

func logToPayload(l *ethTypes.Log) []byte {
// logToMessagePayload is the data that is hashed to get the logHash
// it is the concatenation of the log's topics and data
// the implementation is based on the interop messaging spec
func logToMessagePayload(l *ethTypes.Log) []byte {
msg := make([]byte, 0)
for _, topic := range l.Topics {
msg = append(msg, topic.Bytes()...)
}
msg = append(msg, l.Data...)
return msg
}

// payloadHashToLogHash converts the payload hash to the log hash
// it is the concatenation of the log's address and the hash of the log's payload,
// which is then hashed. This is the hash that is stored in the log storage.
// The logHash can then be used to traverse from the executing message
// to the log the referenced initiating message.
func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) backendTypes.TruncatedHash {
msg := make([]byte, 0, 2*common.HashLength)
msg = append(msg, addr.Bytes()...)
msg = append(msg, payloadHash.Bytes()...)
return backendTypes.TruncateHash(crypto.Keccak256Hash(msg))
}
20 changes: 10 additions & 10 deletions op-supervisor/supervisor/backend/source/log_processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,21 +64,21 @@ func TestLogProcessor(t *testing.T) {
block: block1.ID(),
timestamp: block1.Time,
logIdx: 0,
logHash: logToHash(rcpts[0].Logs[0]),
logHash: logToLogHash(rcpts[0].Logs[0]),
execMsg: nil,
},
{
block: block1.ID(),
timestamp: block1.Time,
logIdx: 0,
logHash: logToHash(rcpts[0].Logs[1]),
logHash: logToLogHash(rcpts[0].Logs[1]),
execMsg: nil,
},
{
block: block1.ID(),
timestamp: block1.Time,
logIdx: 0,
logHash: logToHash(rcpts[1].Logs[0]),
logHash: logToLogHash(rcpts[1].Logs[0]),
execMsg: nil,
},
}
Expand All @@ -98,14 +98,14 @@ func TestLogProcessor(t *testing.T) {
},
}
execMsg := backendTypes.ExecutingMessage{
Chain: 2,
Chain: 4,
BlockNum: 6,
LogIdx: 8,
Timestamp: 10,
Hash: backendTypes.TruncatedHash{0xaa},
}
store := &stubLogStorage{}
processor := newLogProcessor(supTypes.ChainID{2}, store)
processor := newLogProcessor(supTypes.ChainID{4}, store)
processor.eventDecoder = EventDecoderFn(func(l *ethTypes.Log) (backendTypes.ExecutingMessage, error) {
require.Equal(t, rcpts[0].Logs[0], l)
return execMsg, nil
Expand All @@ -118,7 +118,7 @@ func TestLogProcessor(t *testing.T) {
block: block1.ID(),
timestamp: block1.Time,
logIdx: 0,
logHash: logToHash(rcpts[0].Logs[0]),
logHash: logToLogHash(rcpts[0].Logs[0]),
execMsg: &execMsg,
},
}
Expand Down Expand Up @@ -160,24 +160,24 @@ func TestToLogHash(t *testing.T) {
func(l *ethTypes.Log) { l.Index = 98 },
func(l *ethTypes.Log) { l.Removed = true },
}
refHash := logToHash(mkLog())
refHash := logToLogHash(mkLog())
// The log hash is stored in the database so test that it matches the actual value.
// If this changes compatibility with existing databases may be affected
// If this changes, compatibility with existing databases may be affected
expectedRefHash := backendTypes.TruncateHash(common.HexToHash("0x4e1dc08fddeb273275f787762cdfe945cf47bb4e80a1fabbc7a825801e81b73f"))
require.Equal(t, expectedRefHash, refHash, "reference hash changed, check that database compatibility is not broken")

// Check that the hash is changed when any data it should include changes
for i, mod := range relevantMods {
l := mkLog()
mod(l)
hash := logToHash(l)
hash := logToLogHash(l)
require.NotEqualf(t, refHash, hash, "expected relevant modification %v to affect the hash but it did not", i)
}
// Check that the hash is not changed when any data it should not include changes
for i, mod := range irrelevantMods {
l := mkLog()
mod(l)
hash := logToHash(l)
hash := logToLogHash(l)
require.Equal(t, refHash, hash, "expected irrelevant modification %v to not affect the hash but it did", i)
}
}
Expand Down

0 comments on commit 8b470d5

Please sign in to comment.