From 8b470d54ad559483da99845e870e21ede2dca1aa Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 7 Aug 2024 15:37:07 -0500 Subject: [PATCH] Incorporate new ABI format --- .../backend/source/contracts/l2inbox.go | 89 +++++++++++++++---- .../backend/source/contracts/l2inbox_test.go | 9 +- .../backend/source/log_processor.go | 40 +++++++-- .../backend/source/log_processor_test.go | 20 ++--- 4 files changed, 121 insertions(+), 37 deletions(-) diff --git a/op-supervisor/supervisor/backend/source/contracts/l2inbox.go b/op-supervisor/supervisor/backend/source/contracts/l2inbox.go index 9af45e0383f1c..21e746233e9d2 100644 --- a/op-supervisor/supervisor/backend/source/contracts/l2inbox.go +++ b/op-supervisor/supervisor/backend/source/contracts/l2inbox.go @@ -1,18 +1,20 @@ package contracts import ( + "bytes" "errors" "fmt" + "io" "math/big" "github.com/ethereum-optimism/optimism/op-service/predeploys" + "github.com/ethereum-optimism/optimism/op-service/solabi" "github.com/ethereum-optimism/optimism/op-service/sources/batching" backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" ) const ( @@ -24,11 +26,14 @@ var ( ) type contractIdentifier struct { + // Origin represents the address that initiated the message + // it is used in combination with the MsgHash to uniquely identify a message + // and is hashed into the log hash, not stored directly. Origin common.Address - BlockNumber *big.Int LogIndex *big.Int - Timestamp *big.Int + BlockNumber *big.Int ChainId *big.Int + Timestamp *big.Int } type CrossL2Inbox struct { @@ -46,7 +51,9 @@ func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (backendTypes. if l.Address != i.contract.Addr() { return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: log not from CrossL2Inbox", ErrEventNotFound) } - name, result, err := i.contract.DecodeEvent(l) + // use DecodeEvent to check the name of the event + // but the actual decoding is done manually to extract the contract identifier + name, _, err := i.contract.DecodeEvent(l) if errors.Is(err, batching.ErrUnknownEvent) { return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: %v", ErrEventNotFound, err.Error()) } else if err != nil { @@ -55,20 +62,72 @@ func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (backendTypes. if name != eventExecutingMessage { return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: event %v not an ExecutingMessage event", ErrEventNotFound, name) } - var ident contractIdentifier - result.GetStruct(0, &ident) - payload := result.GetBytes(1) - payloadHash := crypto.Keccak256Hash(payload) - - chainID, err := types.ChainIDFromBig(ident.ChainId).ToUInt32() + // the second topic is the hash of the payload (the first is the event ID) + msgHash := l.Topics[1] + // the first 32 bytes of the data are the msgHash, so we skip them + identifierBytes := bytes.NewReader(l.Data[32:]) + identifier, err := identifierFromBytes(identifierBytes) if err != nil { - return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to convert chain ID %v to uint32: %w", ident.ChainId, err) + return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to read contract identifier: %w", err) } + chainID, err := types.ChainIDFromBig(identifier.ChainId).ToUInt32() + if err != nil { + return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to convert chain ID %v to uint32: %w", identifier.ChainId, err) + } + hash := payloadHashToLogHash(msgHash, identifier.Origin) return backendTypes.ExecutingMessage{ Chain: chainID, - BlockNum: ident.BlockNumber.Uint64(), - LogIdx: uint32(ident.LogIndex.Uint64()), - Timestamp: ident.Timestamp.Uint64(), - Hash: backendTypes.TruncateHash(payloadHash), + Hash: hash, + BlockNum: identifier.BlockNumber.Uint64(), + LogIdx: uint32(identifier.LogIndex.Uint64()), + Timestamp: identifier.Timestamp.Uint64(), + }, nil +} + +// identifierFromBytes reads a contract identifier from a byte stream. +// it follows the spec and matches the CrossL2Inbox.json definition, +// rather than relying on reflection, as that can be error-prone regarding struct ordering +func identifierFromBytes(identifierBytes io.Reader) (contractIdentifier, error) { + origin, err := solabi.ReadAddress(identifierBytes) + if err != nil { + return contractIdentifier{}, fmt.Errorf("failed to read origin address: %w", err) + } + originAddr := common.BytesToAddress(origin[:]) + blockNumber, err := solabi.ReadUint256(identifierBytes) + if err != nil { + return contractIdentifier{}, fmt.Errorf("failed to read block number: %w", err) + } + logIndex, err := solabi.ReadUint256(identifierBytes) + if err != nil { + return contractIdentifier{}, fmt.Errorf("failed to read log index: %w", err) + } + timestamp, err := solabi.ReadUint256(identifierBytes) + if err != nil { + return contractIdentifier{}, fmt.Errorf("failed to read timestamp: %w", err) + } + chainID, err := solabi.ReadUint256(identifierBytes) + if err != nil { + return contractIdentifier{}, fmt.Errorf("failed to read chain ID: %w", err) + } + return contractIdentifier{ + Origin: originAddr, + BlockNumber: blockNumber, + LogIndex: logIndex, + Timestamp: timestamp, + ChainId: chainID, }, nil } + +// payloadHashToLogHash converts the payload hash to the log hash +// it is the concatenation of the log's address and the hash of the log's payload, +// which is then hashed again. This is the hash that is stored in the log storage. +// The logHash can then be used to traverse from the executing message +// to the log the referenced initiating message. +// TODO: this function is duplicated between contracts and backend/source/log_processor.go +// to avoid a circular dependency. It should be reorganized to avoid this duplication. +func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) backendTypes.TruncatedHash { + msg := make([]byte, 0, 2*common.HashLength) + msg = append(msg, addr.Bytes()...) + msg = append(msg, payloadHash.Bytes()...) + return backendTypes.TruncateHash(common.BytesToHash(msg)) +} diff --git a/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go b/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go index 3c951dd146eb5..12b37e4acf951 100644 --- a/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go +++ b/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go @@ -28,18 +28,19 @@ func TestDecodeExecutingMessageEvent(t *testing.T) { } contractIdent := contractIdentifier{ Origin: common.Address{0xbb, 0xcc}, + ChainId: new(big.Int).SetUint64(uint64(expected.Chain)), BlockNumber: new(big.Int).SetUint64(expected.BlockNum), - LogIndex: new(big.Int).SetUint64(uint64(expected.LogIdx)), Timestamp: new(big.Int).SetUint64(expected.Timestamp), - ChainId: new(big.Int).SetUint64(uint64(expected.Chain)), + LogIndex: new(big.Int).SetUint64(uint64(expected.LogIdx)), } abi := snapshots.LoadCrossL2InboxABI() - validData, err := abi.Events[eventExecutingMessage].Inputs.Pack(contractIdent, payload) + validData, err := abi.Events[eventExecutingMessage].Inputs.Pack(payloadHash, contractIdent) require.NoError(t, err) createValidLog := func() *ethTypes.Log { + //protoHack := bytes.Repeat([]byte{0x00}, 32*5) return ðTypes.Log{ Address: predeploys.CrossL2InboxAddr, - Topics: []common.Hash{abi.Events[eventExecutingMessage].ID}, + Topics: []common.Hash{abi.Events[eventExecutingMessage].ID, payloadHash}, Data: validData, } } diff --git a/op-supervisor/supervisor/backend/source/log_processor.go b/op-supervisor/supervisor/backend/source/log_processor.go index 58ceaa6a80622..3fd96476d41f4 100644 --- a/op-supervisor/supervisor/backend/source/log_processor.go +++ b/op-supervisor/supervisor/backend/source/log_processor.go @@ -36,17 +36,24 @@ func newLogProcessor(chain supTypes.ChainID, logStore LogStorage) *logProcessor } } +// ProcessLogs processes logs from a block and stores them in the log storage +// for any logs that are related to executing messages, they are decoded and stored func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpts ethTypes.Receipts) error { for _, rcpt := range rcpts { for _, l := range rcpt.Logs { - logHash := logToHash(l) + // log hash represents the hash of *this* log as a potentially initiating message + logHash := logToLogHash(l) var execMsg *backendTypes.ExecutingMessage msg, err := p.eventDecoder.DecodeExecutingMessageLog(l) if err != nil && !errors.Is(err, contracts.ErrEventNotFound) { return fmt.Errorf("failed to decode executing message log: %w", err) } else if err == nil { + // if the log is an executing message, store the message execMsg = &msg } + // executing messages have multiple entries in the database + // they should start with the initiating message and then include the execution + fmt.Println("p.chain", p.chain) err = p.logStore.AddLog(p.chain, logHash, block.ID(), block.Time, uint32(l.Index), execMsg) if err != nil { return fmt.Errorf("failed to add log %d from block %v: %w", l.Index, block.ID(), err) @@ -56,15 +63,20 @@ func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpt return nil } -func logToHash(l *ethTypes.Log) backendTypes.TruncatedHash { - payloadHash := crypto.Keccak256(logToPayload(l)) - msg := make([]byte, 0, 2*common.HashLength) - msg = append(msg, l.Address.Bytes()...) - msg = append(msg, payloadHash...) - return backendTypes.TruncateHash(crypto.Keccak256Hash(msg)) +// logToLogHash transforms a log into a hash that represents the log. +// it is the concatenation of the log's address and the hash of the log's payload, +// which is then hashed again. This is the hash that is stored in the log storage. +// The address is hashed into the payload hash to save space in the log storage, +// and because they represent paired data. +func logToLogHash(l *ethTypes.Log) backendTypes.TruncatedHash { + payloadHash := crypto.Keccak256(logToMessagePayload(l)) + return payloadHashToLogHash(common.Hash(payloadHash), l.Address) } -func logToPayload(l *ethTypes.Log) []byte { +// logToMessagePayload is the data that is hashed to get the logHash +// it is the concatenation of the log's topics and data +// the implementation is based on the interop messaging spec +func logToMessagePayload(l *ethTypes.Log) []byte { msg := make([]byte, 0) for _, topic := range l.Topics { msg = append(msg, topic.Bytes()...) @@ -72,3 +84,15 @@ func logToPayload(l *ethTypes.Log) []byte { msg = append(msg, l.Data...) return msg } + +// payloadHashToLogHash converts the payload hash to the log hash +// it is the concatenation of the log's address and the hash of the log's payload, +// which is then hashed. This is the hash that is stored in the log storage. +// The logHash can then be used to traverse from the executing message +// to the log the referenced initiating message. +func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) backendTypes.TruncatedHash { + msg := make([]byte, 0, 2*common.HashLength) + msg = append(msg, addr.Bytes()...) + msg = append(msg, payloadHash.Bytes()...) + return backendTypes.TruncateHash(crypto.Keccak256Hash(msg)) +} diff --git a/op-supervisor/supervisor/backend/source/log_processor_test.go b/op-supervisor/supervisor/backend/source/log_processor_test.go index 9465066b16547..5c65973ab4e49 100644 --- a/op-supervisor/supervisor/backend/source/log_processor_test.go +++ b/op-supervisor/supervisor/backend/source/log_processor_test.go @@ -64,21 +64,21 @@ func TestLogProcessor(t *testing.T) { block: block1.ID(), timestamp: block1.Time, logIdx: 0, - logHash: logToHash(rcpts[0].Logs[0]), + logHash: logToLogHash(rcpts[0].Logs[0]), execMsg: nil, }, { block: block1.ID(), timestamp: block1.Time, logIdx: 0, - logHash: logToHash(rcpts[0].Logs[1]), + logHash: logToLogHash(rcpts[0].Logs[1]), execMsg: nil, }, { block: block1.ID(), timestamp: block1.Time, logIdx: 0, - logHash: logToHash(rcpts[1].Logs[0]), + logHash: logToLogHash(rcpts[1].Logs[0]), execMsg: nil, }, } @@ -98,14 +98,14 @@ func TestLogProcessor(t *testing.T) { }, } execMsg := backendTypes.ExecutingMessage{ - Chain: 2, + Chain: 4, BlockNum: 6, LogIdx: 8, Timestamp: 10, Hash: backendTypes.TruncatedHash{0xaa}, } store := &stubLogStorage{} - processor := newLogProcessor(supTypes.ChainID{2}, store) + processor := newLogProcessor(supTypes.ChainID{4}, store) processor.eventDecoder = EventDecoderFn(func(l *ethTypes.Log) (backendTypes.ExecutingMessage, error) { require.Equal(t, rcpts[0].Logs[0], l) return execMsg, nil @@ -118,7 +118,7 @@ func TestLogProcessor(t *testing.T) { block: block1.ID(), timestamp: block1.Time, logIdx: 0, - logHash: logToHash(rcpts[0].Logs[0]), + logHash: logToLogHash(rcpts[0].Logs[0]), execMsg: &execMsg, }, } @@ -160,9 +160,9 @@ func TestToLogHash(t *testing.T) { func(l *ethTypes.Log) { l.Index = 98 }, func(l *ethTypes.Log) { l.Removed = true }, } - refHash := logToHash(mkLog()) + refHash := logToLogHash(mkLog()) // The log hash is stored in the database so test that it matches the actual value. - // If this changes compatibility with existing databases may be affected + // If this changes, compatibility with existing databases may be affected expectedRefHash := backendTypes.TruncateHash(common.HexToHash("0x4e1dc08fddeb273275f787762cdfe945cf47bb4e80a1fabbc7a825801e81b73f")) require.Equal(t, expectedRefHash, refHash, "reference hash changed, check that database compatibility is not broken") @@ -170,14 +170,14 @@ func TestToLogHash(t *testing.T) { for i, mod := range relevantMods { l := mkLog() mod(l) - hash := logToHash(l) + hash := logToLogHash(l) require.NotEqualf(t, refHash, hash, "expected relevant modification %v to affect the hash but it did not", i) } // Check that the hash is not changed when any data it should not include changes for i, mod := range irrelevantMods { l := mkLog() mod(l) - hash := logToHash(l) + hash := logToLogHash(l) require.Equal(t, refHash, hash, "expected irrelevant modification %v to not affect the hash but it did", i) } }