diff --git a/l1-contracts/src/core/Rollup.sol b/l1-contracts/src/core/Rollup.sol index 341fd34b3da..7699dc42c24 100644 --- a/l1-contracts/src/core/Rollup.sol +++ b/l1-contracts/src/core/Rollup.sol @@ -9,11 +9,14 @@ import {IOutbox} from "./interfaces/messagebridge/IOutbox.sol"; import {IRegistry} from "./interfaces/messagebridge/IRegistry.sol"; // Libraries -import {Decoder} from "./libraries/Decoder.sol"; +import {HeaderDecoder} from "./libraries/decoders/HeaderDecoder.sol"; +import {MessagesDecoder} from "./libraries/decoders/MessagesDecoder.sol"; +import {Hash} from "./libraries/Hash.sol"; import {Errors} from "./libraries/Errors.sol"; // Contracts import {MockVerifier} from "../mock/MockVerifier.sol"; +import {AvailabilityOracle} from "./availability_oracle/AvailabilityOracle.sol"; /** * @title Rollup @@ -25,6 +28,7 @@ contract Rollup is IRollup { MockVerifier public immutable VERIFIER; IRegistry public immutable REGISTRY; uint256 public immutable VERSION; + AvailabilityOracle public immutable AVAILABILITY_ORACLE; bytes32 public rollupStateHash; uint256 public lastBlockTs; @@ -34,6 +38,7 @@ contract Rollup is IRollup { constructor(IRegistry _registry) { VERIFIER = new MockVerifier(); + AVAILABILITY_ORACLE = new AvailabilityOracle(); REGISTRY = _registry; VERSION = 1; } @@ -45,14 +50,30 @@ contract Rollup is IRollup { */ function process(bytes memory _proof, bytes calldata _l2Block) external override(IRollup) { _constrainGlobals(_l2Block); - ( - uint256 l2BlockNumber, - bytes32 oldStateHash, - bytes32 newStateHash, - bytes32 publicInputHash, - bytes32[] memory l2ToL1Msgs, - bytes32[] memory l1ToL2Msgs - ) = Decoder.decode(_l2Block); + + // Decode the header + (uint256 l2BlockNumber, bytes32 oldStateHash, bytes32 newStateHash) = + HeaderDecoder.decode(_l2Block[:HeaderDecoder.BLOCK_HEADER_SIZE]); + + // Check if the data is available using availability oracle (change availability oracle if you want a different DA layer) + bytes32 txsHash; + { + // @todo @LHerskind Hack such that the node is unchanged for now. + // should be removed when we have a proper block publication. + txsHash = AVAILABILITY_ORACLE.publish(_l2Block[HeaderDecoder.BLOCK_HEADER_SIZE:]); + } + + if (!AVAILABILITY_ORACLE.isAvailable(txsHash)) { + // @todo @LHerskind Impossible to hit with above hack. + revert Errors.Rollup__UnavailableTxs(txsHash); + } + + // Decode the cross-chain messages + (bytes32 inHash,, bytes32[] memory l2ToL1Msgs, bytes32[] memory l1ToL2Msgs) = + MessagesDecoder.decode(_l2Block[HeaderDecoder.BLOCK_HEADER_SIZE:]); + + bytes32 publicInputHash = + _computePublicInputHash(_l2Block[:HeaderDecoder.BLOCK_HEADER_SIZE], txsHash, inHash); // @todo @LHerskind Proper genesis state. If the state is empty, we allow anything for now. if (rollupStateHash != bytes32(0) && rollupStateHash != oldStateHash) { @@ -79,10 +100,10 @@ contract Rollup is IRollup { emit L2BlockProcessed(l2BlockNumber); } - function _constrainGlobals(bytes calldata _l2Block) internal view { - uint256 chainId = uint256(bytes32(_l2Block[:0x20])); - uint256 version = uint256(bytes32(_l2Block[0x20:0x40])); - uint256 ts = uint256(bytes32(_l2Block[0x60:0x80])); + function _constrainGlobals(bytes calldata _header) internal view { + uint256 chainId = uint256(bytes32(_header[:0x20])); + uint256 version = uint256(bytes32(_header[0x20:0x40])); + uint256 ts = uint256(bytes32(_header[0x60:0x80])); // block number already constrained by start state hash if (block.chainid != chainId) { @@ -105,4 +126,12 @@ contract Rollup is IRollup { revert Errors.Rollup__TimestampTooOld(); } } + + function _computePublicInputHash(bytes calldata _header, bytes32 _txsHash, bytes32 _inHash) + internal + pure + returns (bytes32) + { + return Hash.sha256ToField(bytes.concat(_header, _txsHash, _inHash)); + } } diff --git a/l1-contracts/src/core/availability_oracle/AvailabilityOracle.sol b/l1-contracts/src/core/availability_oracle/AvailabilityOracle.sol new file mode 100644 index 00000000000..0f4041d16d2 --- /dev/null +++ b/l1-contracts/src/core/availability_oracle/AvailabilityOracle.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 Aztec Labs. +pragma solidity >=0.8.18; + +// Interfaces +import {IAvailabilityOracle} from "./../interfaces/IAvailabilityOracle.sol"; + +// Libraries +import {TxsDecoder} from "./../libraries/decoders/TxsDecoder.sol"; + +/** + * @title AvailabilityOracle + * @author Aztec Labs + * @notice An availability oracle that uses L1 calldata for publication + */ +contract AvailabilityOracle is IAvailabilityOracle { + mapping(bytes32 txsHash => bool available) public override(IAvailabilityOracle) isAvailable; + + /** + * @notice Publishes transactions and marks its commitment, the TxsHash, as available + * @param _body - The L1 calldata + * @return txsHash - The TxsHash + */ + function publish(bytes calldata _body) external override(IAvailabilityOracle) returns (bytes32) { + bytes32 _txsHash = TxsDecoder.decode(_body); + isAvailable[_txsHash] = true; + return _txsHash; + } +} diff --git a/l1-contracts/src/core/interfaces/IAvailabilityOracle.sol b/l1-contracts/src/core/interfaces/IAvailabilityOracle.sol new file mode 100644 index 00000000000..302f6dbc0e5 --- /dev/null +++ b/l1-contracts/src/core/interfaces/IAvailabilityOracle.sol @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 Aztec Labs. +pragma solidity >=0.8.18; + +interface IAvailabilityOracle { + function publish(bytes calldata _body) external returns (bytes32); + + function isAvailable(bytes32 _txsHash) external view returns (bool); +} diff --git a/l1-contracts/src/core/libraries/Errors.sol b/l1-contracts/src/core/libraries/Errors.sol index d9a356b1fa9..e8d6785f183 100644 --- a/l1-contracts/src/core/libraries/Errors.sol +++ b/l1-contracts/src/core/libraries/Errors.sol @@ -54,6 +54,7 @@ library Errors { error Rollup__InvalidVersion(uint256 expected, uint256 actual); // 0x9ef30794 error Rollup__TimestampInFuture(); // 0xbc1ce916 error Rollup__TimestampTooOld(); // 0x72ed9c81 + error Rollup__UnavailableTxs(bytes32 txsHash); // 0x414906c3 // Registry error Registry__RollupNotRegistered(address rollup); // 0xa1fee4cf diff --git a/l1-contracts/src/core/libraries/Decoder.sol b/l1-contracts/src/core/libraries/decoders/Decoder.sol similarity index 99% rename from l1-contracts/src/core/libraries/Decoder.sol rename to l1-contracts/src/core/libraries/decoders/Decoder.sol index 379904f28ff..9a9863f5b1a 100644 --- a/l1-contracts/src/core/libraries/Decoder.sol +++ b/l1-contracts/src/core/libraries/decoders/Decoder.sol @@ -3,8 +3,8 @@ pragma solidity >=0.8.18; // Libraries -import {Constants} from "./ConstantsGen.sol"; -import {Hash} from "./Hash.sol"; +import {Constants} from "../ConstantsGen.sol"; +import {Hash} from "../Hash.sol"; /** * @title Decoder Library diff --git a/l1-contracts/src/core/libraries/decoders/HeaderDecoder.sol b/l1-contracts/src/core/libraries/decoders/HeaderDecoder.sol new file mode 100644 index 00000000000..f2e7778e1f6 --- /dev/null +++ b/l1-contracts/src/core/libraries/decoders/HeaderDecoder.sol @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2023 Aztec Labs. +pragma solidity >=0.8.18; + +// Libraries +import {Constants} from "../ConstantsGen.sol"; +import {Hash} from "../Hash.sol"; + +/** + * @title Header Decoder Library + * @author Aztec Labs + * @notice Decoding a L2 header + * Concerned with readability and velocity of development not giving a damn about gas costs. + * + * ------------------- + * You can use https://gist.github.com/LHerskind/724a7e362c97e8ac2902c6b961d36830 to generate the below outline. + * ------------------- + * L2 Block Header specification + * ------------------- + * + * | byte start | num bytes | name + * | --- | --- | --- + * | 0x0000 | 0x20 | chain-id + * | 0x0020 | 0x20 | version + * | 0x0040 | 0x20 | L2 block number + * | 0x0060 | 0x20 | L2 timestamp + * | 0x0080 | 0x20 | startNoteHashTreeSnapshot.root + * | 0x00a0 | 0x04 | startNoteHashTreeSnapshot.nextAvailableLeafIndex + * | 0x00a4 | 0x20 | startNullifierTreeSnapshot.root + * | 0x00c4 | 0x04 | startNullifierTreeSnapshot.nextAvailableLeafIndex + * | 0x00c8 | 0x20 | startContractTreeSnapshot.root + * | 0x00e8 | 0x04 | startContractTreeSnapshot.nextAvailableLeafIndex + * | 0x00ec | 0x20 | startPublicDataTreeSnapshot.root + * | 0x010c | 0x04 | startPublicDataTreeSnapshot.nextAvailableLeafIndex + * | 0x0110 | 0x20 | startL1ToL2MessageTreeSnapshot.root + * | 0x0130 | 0x04 | startL1ToL2MessageTreeSnapshot.nextAvailableLeafIndex + * | 0x0134 | 0x20 | startArchiveSnapshot.root + * | 0x0154 | 0x04 | startArchiveSnapshot.nextAvailableLeafIndex + * | 0x0158 | 0x20 | endNoteHashTreeSnapshot.root + * | 0x0178 | 0x04 | endNoteHashTreeSnapshot.nextAvailableLeafIndex + * | 0x017c | 0x20 | endNullifierTreeSnapshot.root + * | 0x019c | 0x04 | endNullifierTreeSnapshot.nextAvailableLeafIndex + * | 0x01a0 | 0x20 | endContractTreeSnapshot.root + * | 0x01c0 | 0x04 | endContractTreeSnapshot.nextAvailableLeafIndex + * | 0x01c4 | 0x20 | endPublicDataTreeSnapshot.root + * | 0x01e4 | 0x04 | endPublicDataTreeSnapshot.nextAvailableLeafIndex + * | 0x01e8 | 0x20 | endL1ToL2MessageTreeSnapshot.root + * | 0x0208 | 0x04 | endL1ToL2MessageTreeSnapshot.nextAvailableLeafIndex + * | 0x020c | 0x20 | endArchiveSnapshot.root + * | 0x022c | 0x04 | endArchiveSnapshot.nextAvailableLeafIndex + * | --- | --- | --- + */ +library HeaderDecoder { + // DECODING OFFSET CONSTANTS + // Where the start of trees metadata begins in the block + uint256 private constant START_TREES_BLOCK_HEADER_OFFSET = 0x80; + + // The size of the block header elements + uint256 private constant TREES_BLOCK_HEADER_SIZE = 0xd8; + + // Where the end of trees metadata begins in the block + uint256 private constant END_TREES_BLOCK_HEADER_OFFSET = + START_TREES_BLOCK_HEADER_OFFSET + TREES_BLOCK_HEADER_SIZE; + + // Where the metadata ends and the block data begins. + uint256 internal constant BLOCK_HEADER_SIZE = + START_TREES_BLOCK_HEADER_OFFSET + 2 * TREES_BLOCK_HEADER_SIZE; + + /** + * @notice Decodes the header + * @param _header - The L2 block calldata. + * @return l2BlockNumber - The L2 block number + * @return startStateHash - The start state hash + * @return endStateHash - The end state hash + */ + function decode(bytes calldata _header) + internal + pure + returns (uint256 l2BlockNumber, bytes32 startStateHash, bytes32 endStateHash) + { + l2BlockNumber = uint256(bytes32(_header[0x40:0x60])); + // Note, for startStateHash to match the storage, the l2 block number must be new - 1. + // Only jumping 1 block at a time. + startStateHash = computeStateHash(l2BlockNumber - 1, START_TREES_BLOCK_HEADER_OFFSET, _header); + endStateHash = computeStateHash(l2BlockNumber, END_TREES_BLOCK_HEADER_OFFSET, _header); + } + + /** + * @notice Computes a state hash + * @param _l2BlockNumber - The L2 block number + * @param _offset - The offset into the data, 0x80 for start, 0x019c for end + * @param _header - The L2 block calldata. + * @return The state hash + * @dev The state hash is sha256 hash of block's header elements. For each block the header elements are + * the block number, snapshots of all the trees and the root of the public data tree. This function + * copies all of these to memory and then hashes them. + */ + function computeStateHash(uint256 _l2BlockNumber, uint256 _offset, bytes calldata _header) + internal + pure + returns (bytes32) + { + return sha256( + bytes.concat(bytes32(_l2BlockNumber), _header[_offset:_offset + TREES_BLOCK_HEADER_SIZE]) + ); + } +} diff --git a/l1-contracts/src/core/libraries/decoders/MessagesDecoder.sol b/l1-contracts/src/core/libraries/decoders/MessagesDecoder.sol new file mode 100644 index 00000000000..29dbf87bfb0 --- /dev/null +++ b/l1-contracts/src/core/libraries/decoders/MessagesDecoder.sol @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 Aztec Labs. +pragma solidity >=0.8.18; + +// Libraries +import {Constants} from "../ConstantsGen.sol"; +import {Hash} from "../Hash.sol"; + +/** + * @title Messages Decoder Library + * @author Aztec Labs + * @notice Decoding a L2 block body and returns cross-chain messages + (in/out)Hash. + * Concerned with readability and velocity of development not giving a damn about gas costs. + * @dev Assumes the input trees to be padded. + * + * ------------------- + * You can use https://gist.github.com/LHerskind/724a7e362c97e8ac2902c6b961d36830 to generate the below outline. + * ------------------- + * L2 Body Data specification + * ------------------- + * + * | byte start | num bytes | name + * | --- | --- | --- + * | 0x00 | 0x04 | len(newCommitments) (denoted a) + * | 0x04 | a * 0x20 | newCommitments + * | 0x04 + a * 0x20 | 0x04 | len(newNullifiers) (denoted b) + * | 0x08 + a * 0x20 | b * 0x20 | newNullifiers + * | 0x08 + a * 0x20 + b * 0x20 | 0x04 | len(newPublicDataWrites) (denoted c) + * | 0x0c + a * 0x20 + b * 0x20 | c * 0x40 | newPublicDataWrites + * | 0x0c + a * 0x20 + b * 0x20 + c * 0x40 | 0x04 | len(newL2ToL1Msgs) (denoted d) + * | 0x10 + a * 0x20 + b * 0x20 + c * 0x40 | d * 0x20 | newL2ToL1Msgs + * | 0x10 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 | 0x04 | len(contracts) (denoted e) + * | 0x14 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 | e * 0x20 | newContracts + * | 0x14 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x20 | e * 0x34 | newContractsData + * | 0x14 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 | 0x04 | len(newL1ToL2Msgs) (denoted f) + * | 0x18 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 | f * 0x20 | newL1ToL2Msgs + * | 0x18 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 + f * 0x20 | 0x04 | byteLen(newEncryptedLogs) (denoted g) + * | 0x1c + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 + f * 0x20 | g | newEncryptedLogs + * | 0x1c + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 + f * 0x20 + g | 0x04 | byteLen(newUnencryptedLogs) (denoted h) + * | 0x20 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 + f * 0x20 + g | h | newUnencryptedLogs + * | --- | --- | --- + */ +library MessagesDecoder { + /** + * @notice Computes consumables for the block + * @param _body - The L2 block calldata. + * @return l1ToL2MsgsHash - The hash of the L1 to L2 messages + * @return l2ToL1MsgsHash - The hash of the L1 to L2 messages + * @return l2ToL1Msgs - The L2 to L1 messages of the block + * @return l1ToL2Msgs - The L1 to L2 messages of the block + */ + function decode(bytes calldata _body) + internal + pure + returns (bytes32, bytes32, bytes32[] memory, bytes32[] memory) + { + bytes32[] memory l1ToL2Msgs = new bytes32[](Constants.NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP); + bytes32[] memory l2ToL1Msgs; + + uint256 offset = 0; + + // Commitments + uint256 count = read4(_body, offset); + offset += 0x4 + count * 0x20; + + // Nullifiers + count = read4(_body, offset); + offset += 0x4 + count * 0x20; + + // Public data writes + count = read4(_body, offset); + offset += 0x4 + count * 0x40; + + // L2 to L1 messages + count = read4(_body, offset); + l2ToL1Msgs = new bytes32[](count); + assembly { + calldatacopy(add(l2ToL1Msgs, 0x20), add(_body.offset, add(offset, 0x4)), mul(count, 0x20)) + } + offset += 0x4 + count * 0x20; + + // Contracts + count = read4(_body, offset); + offset += 0x4 + count * 0x54; + + // L1 to L2 messages + count = read4(_body, offset); + // `l1ToL2Msgs` is fixed size so if `lengths.l1Tol2MsgsCount` < `Constants.NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP` the array + // will contain some zero values. + assembly { + calldatacopy(add(l1ToL2Msgs, 0x20), add(_body.offset, add(offset, 0x04)), mul(count, 0x20)) + } + + bytes32 l1ToL2MsgsHash = sha256(abi.encodePacked(l1ToL2Msgs)); + bytes32 l2ToL1MsgsHash = sha256(abi.encodePacked(l2ToL1Msgs)); + + return (l1ToL2MsgsHash, l2ToL1MsgsHash, l2ToL1Msgs, l1ToL2Msgs); + } + + /** + * @notice Reads 4 bytes from the data + * @param _data - The data to read from + * @param _offset - The offset to read from + * @return The 4 bytes read as a uint256 + */ + function read4(bytes calldata _data, uint256 _offset) internal pure returns (uint256) { + return uint256(uint32(bytes4(_data[_offset:_offset + 4]))); + } +} diff --git a/l1-contracts/src/core/libraries/decoders/TxsDecoder.sol b/l1-contracts/src/core/libraries/decoders/TxsDecoder.sol new file mode 100644 index 00000000000..c58b10b377e --- /dev/null +++ b/l1-contracts/src/core/libraries/decoders/TxsDecoder.sol @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2023 Aztec Labs. +pragma solidity >=0.8.18; + +// Libraries +import {Constants} from "../ConstantsGen.sol"; +import {Hash} from "../Hash.sol"; + +/** + * @title Txs Decoder Library + * @author Aztec Labs + * @notice Decoding a L2 block body and computing the TxsHash. + * Concerned with readability and velocity of development not giving a damn about gas costs. + * @dev Assumes the input trees to be padded. + * + * ------------------- + * You can use https://gist.github.com/LHerskind/724a7e362c97e8ac2902c6b961d36830 to generate the below outline. + * ------------------- + * L2 Body Data specification + * ------------------- + * + * | byte start | num bytes | name + * | --- | --- | --- + * | 0x00 | 0x04 | len(newCommitments) (denoted a) + * | 0x04 | a * 0x20 | newCommitments + * | 0x04 + a * 0x20 | 0x04 | len(newNullifiers) (denoted b) + * | 0x08 + a * 0x20 | b * 0x20 | newNullifiers + * | 0x08 + a * 0x20 + b * 0x20 | 0x04 | len(newPublicDataWrites) (denoted c) + * | 0x0c + a * 0x20 + b * 0x20 | c * 0x40 | newPublicDataWrites + * | 0x0c + a * 0x20 + b * 0x20 + c * 0x40 | 0x04 | len(newL2ToL1Msgs) (denoted d) + * | 0x10 + a * 0x20 + b * 0x20 + c * 0x40 | d * 0x20 | newL2ToL1Msgs + * | 0x10 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 | 0x04 | len(contracts) (denoted e) + * | 0x14 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 | e * 0x20 | newContracts + * | 0x14 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x20 | e * 0x34 | newContractsData + * | 0x14 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 | 0x04 | len(newL1ToL2Msgs) (denoted f) + * | 0x18 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 | f * 0x20 | newL1ToL2Msgs + * | 0x18 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 + f * 0x20 | 0x04 | byteLen(newEncryptedLogs) (denoted g) + * | 0x1c + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 + f * 0x20 | g | newEncryptedLogs + * | 0x1c + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 + f * 0x20 + g | 0x04 | byteLen(newUnencryptedLogs) (denoted h) + * | 0x20 + a * 0x20 + b * 0x20 + c * 0x40 + d * 0x20 + e * 0x54 + f * 0x20 + g | h | newUnencryptedLogs + * | --- | --- | --- + */ +library TxsDecoder { + struct ArrayOffsets { + uint256 commitment; + uint256 nullifier; + uint256 publicData; + uint256 l2ToL1Msgs; + uint256 contracts; + uint256 contractData; + uint256 l1ToL2Msgs; + uint256 encryptedLogs; + uint256 unencryptedLogs; + } + + // Note: Used in `computeConsumables` to get around stack too deep errors. + struct ConsumablesVars { + bytes32[] baseLeaves; + bytes32[] l2ToL1Msgs; + bytes baseLeaf; + bytes32 encryptedLogsHash; + bytes32 unencryptedLogsHash; + uint256 l1Tol2MsgsCount; + } + + /** + * @notice Computes consumables for the block + * @param _body - The L2 block calldata. + * @return diffRoot - The root of the diff tree (new commitments, nullifiers etc) + */ + function decode(bytes calldata _body) internal pure returns (bytes32) { + ArrayOffsets memory offsets; + ConsumablesVars memory vars; + + { + uint256 offset = 0; + + // Commitments + uint256 count = read4(_body, offset); + vars.baseLeaves = new bytes32[](count / Constants.MAX_NEW_COMMITMENTS_PER_TX); + offsets.commitment = 0x4; + offset += 0x4 + count * 0x20; + offsets.nullifier = offset + 0x4; // + 0x4 to offset by next read4 + + // Nullifiers + count = read4(_body, offset); + offset += 0x4 + count * 0x20; + offsets.publicData = offset + 0x4; // + 0x4 to offset by next read4 + + // Public data writes + count = read4(_body, offset); + offset += 0x4 + count * 0x40; + offsets.l2ToL1Msgs = offset + 0x4; // + 0x4 to offset by next read4 + + // L2 to L1 messages + count = read4(_body, offset); + vars.l2ToL1Msgs = new bytes32[](count); + assembly { + // load the l2 to l1 msgs (done here as offset will be altered in loop) + let l2ToL1Msgs := mload(add(vars, 0x20)) + calldatacopy( + add(l2ToL1Msgs, 0x20), add(_body.offset, mload(add(offsets, 0x60))), mul(count, 0x20) + ) + } + offset += 0x4 + count * 0x20; + offsets.contracts = offset + 0x4; // + 0x4 to offset by next read4 + + // Contracts + count = read4(_body, offset); + offsets.contractData = offsets.contracts + count * 0x20; + offset += 0x4 + count * 0x54; + offsets.l1ToL2Msgs = offset + 0x4; // + 0x4 to offset by next read4 + + // L1 to L2 messages + count = read4(_body, offset); + vars.l1Tol2MsgsCount = count; + offset += 0x4 + count * 0x20; + offsets.encryptedLogs = offset + 0x4; // + 0x4 to offset by next read4 + + // Used as length in bytes down here + uint256 length = read4(_body, offset); + offsets.unencryptedLogs = offsets.encryptedLogs + 0x4 + length; + } + + // Data starts after header. Look at L2 Block Data specification at the top of this file. + { + for (uint256 i = 0; i < vars.baseLeaves.length; i++) { + /* + * Compute the leaf to insert. + * Leaf_i = ( + * newCommitmentsKernel, + * newNullifiersKernel, + * newPublicDataWritesKernel, + * newL2ToL1MsgsKernel, + * newContractLeafKernel, + * newContractDataKernel.aztecAddress, + * newContractDataKernel.ethAddress (padded to 32 bytes), + * encryptedLogsHash, | + * unencryptedLogsHash, ____|=> Computed below from logs' preimages. + * ); + * Note that we always read data, the l2Block (atm) must therefore include dummy or zero-notes for + * Zero values. + */ + + /** + * Compute encrypted and unencrypted logs hashes corresponding to the current leaf. + * Note: will advance offsets by the number of bytes processed. + */ + (vars.encryptedLogsHash, offsets.encryptedLogs) = + computeKernelLogsHash(offsets.encryptedLogs, _body); + + (vars.unencryptedLogsHash, offsets.unencryptedLogs) = + computeKernelLogsHash(offsets.unencryptedLogs, _body); + + // Insertions are split into multiple `bytes.concat` to work around stack too deep. + vars.baseLeaf = bytes.concat( + bytes.concat( + slice(_body, offsets.commitment, Constants.COMMITMENTS_NUM_BYTES_PER_BASE_ROLLUP), + slice(_body, offsets.nullifier, Constants.NULLIFIERS_NUM_BYTES_PER_BASE_ROLLUP), + slice(_body, offsets.publicData, Constants.PUBLIC_DATA_WRITES_NUM_BYTES_PER_BASE_ROLLUP), + slice(_body, offsets.l2ToL1Msgs, Constants.L2_TO_L1_MSGS_NUM_BYTES_PER_BASE_ROLLUP), + slice(_body, offsets.contracts, Constants.CONTRACTS_NUM_BYTES_PER_BASE_ROLLUP) + ), + bytes.concat( + slice(_body, offsets.contractData, 0x20), // newContractDataKernel.aztecAddress + bytes12(0), + slice(_body, offsets.contractData + 0x20, 0x14) // newContractDataKernel.ethAddress + ), + bytes.concat(vars.encryptedLogsHash, vars.unencryptedLogsHash) + ); + + offsets.commitment += Constants.COMMITMENTS_NUM_BYTES_PER_BASE_ROLLUP; + offsets.nullifier += Constants.NULLIFIERS_NUM_BYTES_PER_BASE_ROLLUP; + offsets.publicData += Constants.PUBLIC_DATA_WRITES_NUM_BYTES_PER_BASE_ROLLUP; + offsets.l2ToL1Msgs += Constants.L2_TO_L1_MSGS_NUM_BYTES_PER_BASE_ROLLUP; + offsets.contracts += Constants.CONTRACTS_NUM_BYTES_PER_BASE_ROLLUP; + offsets.contractData += Constants.CONTRACT_DATA_NUM_BYTES_PER_BASE_ROLLUP_UNPADDED; + + vars.baseLeaves[i] = sha256(vars.baseLeaf); + } + } + + return computeRoot(vars.baseLeaves); + } + + /** + * @notice Computes logs hash as is done in the kernel and app circuits. + * @param _offsetInBlock - The offset of kernel's logs in a block. + * @param _body - The L2 block calldata. + * @return The hash of the logs and offset in a block after processing the logs. + * @dev We have logs preimages on the input and we need to perform the same hashing process as is done in the app + * circuit (hashing the logs) and in the kernel circuit (accumulating the logs hashes). In each iteration of + * kernel, the kernel computes a hash of the previous iteration's logs hash (the hash in the previous kernel's + * public inputs) and the the current iteration private circuit public inputs logs hash. + * + * E.g. for resulting logs hash of a kernel with 3 iterations would be computed as: + * + * kernelPublicInputsLogsHash = sha256(sha256(sha256(I1_LOGS), sha256(I2_LOGS)), sha256(I3_LOGS)) + * + * where I1_LOGS, I2_LOGS and I3_LOGS are logs emitted in the first, second and third function call. + * + * Note that `sha256(I1_LOGS)`, `sha256(I2_LOGS)` and `sha256(I3_LOGS)` are computed in the app circuit and not + * in the kernel circuit. The kernel circuit only accumulates the hashes. + * + * @dev For the example above, the logs are encoded in the following way: + * + * || K_LOGS_LEN | I1_LOGS_LEN | I1_LOGS | I2_LOGS_LEN | I2_LOGS | I3_LOGS_LEN | I3_LOGS || + * 4 bytes 4 bytes i bytes 4 bytes j bytes 4 bytes k bytes + * + * K_LOGS_LEN is the total length of the logs in the kernel. + * I1_LOGS_LEN (i) is the length of the logs in the first iteration. + * I1_LOGS are all the logs emitted in the first iteration. + * I2_LOGS_LEN (j) ... + * + * @dev Link to a relevant discussion: + * https://discourse.aztec.network/t/proposal-forcing-the-sequencer-to-actually-submit-data-to-l1/426/9 + */ + function computeKernelLogsHash(uint256 _offsetInBlock, bytes calldata _body) + internal + pure + returns (bytes32, uint256) + { + uint256 offset = _offsetInBlock; + uint256 remainingLogsLength = read4(_body, offset); + offset += 0x4; + + bytes32 kernelPublicInputsLogsHash; // The hash on the output of kernel iteration + + // Iterate until all the logs were processed + while (remainingLogsLength > 0) { + // The length of the logs emitted by Aztec.nr from the function call corresponding to this kernel iteration + uint256 privateCircuitPublicInputLogsLength = read4(_body, offset); + offset += 0x4; + + // Hash the logs of this iteration's function call + bytes32 privateCircuitPublicInputsLogsHash = + sha256(slice(_body, offset, privateCircuitPublicInputLogsLength)); + offset += privateCircuitPublicInputLogsLength; + + // Decrease remaining logs length by this privateCircuitPublicInputsLogs's length (len(I?_LOGS)) and 4 bytes for I?_LOGS_LEN + remainingLogsLength -= (privateCircuitPublicInputLogsLength + 0x4); + + kernelPublicInputsLogsHash = + sha256(bytes.concat(kernelPublicInputsLogsHash, privateCircuitPublicInputsLogsHash)); + } + + return (kernelPublicInputsLogsHash, offset); + } + + /** + * @notice Computes the root for a binary Merkle-tree given the leafs. + * @dev Uses sha256. + * @param _leafs - The 32 bytes leafs to build the tree of. + * @return The root of the Merkle tree. + */ + function computeRoot(bytes32[] memory _leafs) internal pure returns (bytes32) { + // @todo Must pad the tree + uint256 treeDepth = 0; + while (2 ** treeDepth < _leafs.length) { + treeDepth++; + } + uint256 treeSize = 2 ** treeDepth; + assembly { + mstore(_leafs, treeSize) + } + + for (uint256 i = 0; i < treeDepth; i++) { + for (uint256 j = 0; j < treeSize; j += 2) { + _leafs[j / 2] = sha256(bytes.concat(_leafs[j], _leafs[j + 1])); + } + } + + return _leafs[0]; + } + + /** + * @notice Wrapper around the slicing to avoid some stack too deep + * @param _data - The data to slice + * @param _start - The start of the slice + * @param _length - The length of the slice + * @return The slice + */ + function slice(bytes calldata _data, uint256 _start, uint256 _length) + internal + pure + returns (bytes memory) + { + return _data[_start:_start + _length]; + } + + /** + * @notice Reads 4 bytes from the data + * @param _data - The data to read from + * @param _offset - The offset to read from + * @return The 4 bytes read as a uint256 + */ + function read4(bytes calldata _data, uint256 _offset) internal pure returns (uint256) { + return uint256(uint32(bytes4(slice(_data, _offset, 4)))); + } +} diff --git a/l1-contracts/test/DecoderHelper.sol b/l1-contracts/test/DecoderHelper.sol index fc911d5c7ad..aff9242912b 100644 --- a/l1-contracts/test/DecoderHelper.sol +++ b/l1-contracts/test/DecoderHelper.sol @@ -2,7 +2,7 @@ // Copyright 2023 Aztec Labs. pragma solidity >=0.8.18; -import {Decoder} from "../src/core/libraries/Decoder.sol"; +import {Decoder} from "../src/core/libraries/decoders/Decoder.sol"; import {Rollup} from "../src/core/Rollup.sol"; contract DecoderHelper {