diff --git a/.circleci/config.yml b/.circleci/config.yml index c2a9a139a908..dd34bf0d79a7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1076,7 +1076,7 @@ jobs: key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} name: Save Cannon prestate to cache paths: - - "op-program/bin/prestate.json" + - "op-program/bin/prestate.bin.gz" - "op-program/bin/meta.json" - "op-program/bin/prestate-proof.json" - run: @@ -1097,7 +1097,7 @@ jobs: - persist_to_workspace: root: . paths: - - "op-program/bin/prestate.json" + - "op-program/bin/prestate.bin.gz" - "op-program/bin/meta.json" - "op-program/bin/prestate-proof.json" @@ -1164,184 +1164,33 @@ jobs: - notify-failures-on-develop: mentions: "@proofs-squad" - devnet: - machine: - image: <> - parameters: - variant: - type: string - environment: - DOCKER_BUILDKIT: 1 - DEVNET_NO_BUILD: 'true' - # Default value; Can be overridden. - DEVNET_L2OO: 'false' - DEVNET_ALTDA: 'false' + cannon-stf-verify: + docker: + - image: <> steps: - checkout - - attach_workspace: { at: "." } - - check-changed: - patterns: op-(.+),packages,ops-bedrock,bedrock-devnet - - when: - condition: - equal: ['altda', <>] - steps: - - run: - name: Set DEVNET_ALTDA = true - command: echo 'export DEVNET_ALTDA=true' >> $BASH_ENV - - when: - condition: - equal: ['altda-generic', <>] - steps: - - run: - name: Set DEVNET_ALTDA = true - command: echo 'export DEVNET_ALTDA=true' >> $BASH_ENV - - run: - name: Set GENERIC_ALTDA = true - command: echo 'export GENERIC_ALTDA=true' >> $BASH_ENV + - setup_remote_docker - restore_cache: name: Restore Go modules cache key: gomod-{{ checksum "go.sum" }} - restore_cache: name: Restore Go build cache keys: - - golang-build-cache-devnet-{{ checksum "go.sum" }} - - golang-build-cache-devnet- - - run: - name: Install latest golang - command: | - VER=$(jq -r .go < versions.json) - sudo rm -rf /usr/local/go - wget "https://go.dev/dl/go${VER}.linux-amd64.tar.gz" -O - | sudo tar -C /usr/local -xz - export PATH=$PATH:/usr/local/go/bin - go version - - run: - name: Install Geth - command: | - VER=$(jq -r .geth_release < versions.json) - wget "https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-${VER}.tar.gz" -O - | tar xz - sudo cp "geth-alltools-linux-amd64-${VER}"/* /usr/local/bin - - run: - name: Install eth2-testnet-genesis - command: | - go install -v github.com/protolambda/eth2-testnet-genesis@$(jq -r .eth2_testnet_genesis < versions.json) - - run: - name: foundryup - command: | - curl -L https://foundry.paradigm.xyz | bash - source $HOME/.bashrc - foundryup - echo 'export PATH=$HOME/.foundry/bin:$PATH' >> $BASH_ENV - source $HOME/.bashrc - forge --version - - run: - name: Install Just - command: | - VER=$(jq -r .just < versions.json) - curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to $HOME/bin --tag "${VER}" - echo 'export PATH="${PATH}:$HOME/bin"' >> $BASH_ENV - - install-contracts-dependencies - - when: - condition: - not: - equal: ['default', <>] - steps: - - run: - name: Use non-default devnet allocs - command: rm -r .devnet && mv .devnet-<> .devnet - - run: - name: Load and tag docker images - command: | - IMAGE_BASE_PREFIX="us-docker.pkg.dev/oplabs-tools-artifacts/images" - # Load from previous docker-build job - docker load < "./op-node.tar" - docker load < "./op-proposer.tar" - docker load < "./op-batcher.tar" - docker load < "./op-challenger.tar" - docker load < "./da-server.tar" - # rename to the tags that the docker-compose of the devnet expects - docker tag "$IMAGE_BASE_PREFIX/op-node:<>" "$IMAGE_BASE_PREFIX/op-node:devnet" - docker tag "$IMAGE_BASE_PREFIX/op-proposer:<>" "$IMAGE_BASE_PREFIX/op-proposer:devnet" - docker tag "$IMAGE_BASE_PREFIX/op-batcher:<>" "$IMAGE_BASE_PREFIX/op-batcher:devnet" - docker tag "$IMAGE_BASE_PREFIX/op-challenger:<>" "$IMAGE_BASE_PREFIX/op-challenger:devnet" - docker tag "$IMAGE_BASE_PREFIX/da-server:<>" "$IMAGE_BASE_PREFIX/da-server:devnet" - - run: - name: Bring up the stack - command: | - # Specify like this to avoid a forced rebuild of the contracts + devnet L1 - PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. - echo "Waiting for 10 seconds to give the devnet time to settle in..." - sleep 10 - - run: - name: Test the stack - command: make devnet-test - - run: - name: Dump op-node logs - command: | - docker logs ops-bedrock-op-node-1 || echo "No logs." - when: on_fail - - run: - name: Dump op-geth logs - command: | - docker logs ops-bedrock-l2-1 || echo "No logs." - when: on_fail + - golang-build-cache-cannon-stf-verify-{{ checksum "go.sum" }} + - golang-build-cache-cannon-stf-verify- - run: - name: Dump l1 logs - command: | - docker logs ops-bedrock-l1-1 || echo "No logs." - when: on_fail - - run: - name: Dump l1-bn logs - command: | - docker logs ops-bedrock-l1-bn-1 || echo "No logs." - when: on_fail - - run: - name: Dump l1-vc logs - command: | - docker logs ops-bedrock-l1-vc-1 || echo "No logs." - when: on_fail - - run: - name: Dump op-batcher logs - command: | - docker logs ops-bedrock-op-batcher-1 || echo "No logs." - when: on_fail - - run: - name: Dump op-proposer logs - command: | - docker logs ops-bedrock-op-proposer-1 || echo "No logs." - when: on_fail - - run: - name: Dump op-challenger logs - command: | - docker logs ops-bedrock-op-challenger-1 || echo "No logs." - when: on_fail - - run: - name: Dump da-server logs - command: | - docker logs ops-bedrock-da-server-1 || echo "No logs." - when: on_fail - - run: - name: Log deployment artifact - command: | - cat broadcast/Deploy.s.sol/900/run-latest.json || echo "No deployment file found" - when: on_fail - working_directory: packages/contracts-bedrock - - run: - name: Log devnet config - command: | - cat deploy-config/devnetL1.json || echo "No devnet config found" - when: on_fail - working_directory: packages/contracts-bedrock + name: Build cannon + command: make cannon - run: - name: Log artifacts directory - command: | - ls -R forge-artifacts || echo "No forge artifacts found" - when: on_fail - working_directory: packages/contracts-bedrock + name: Verify the Cannon STF + command: make -C ./cannon cannon-stf-verify - save_cache: name: Save Go build cache - key: golang-build-cache-devnet-{{ checksum "go.sum" }} + key: golang-build-cache-cannon-stf-verify-{{ checksum "go.sum" }} paths: - - /home/circleci/.cache/go-build + - "/root/.cache/go-build" + - notify-failures-on-develop: + mentions: "@proofs-squad" semgrep-scan: parameters: @@ -1379,7 +1228,7 @@ jobs: # --timeout (in seconds) limits the time per rule and file. # SEMGREP_TIMEOUT is the same, but docs have conflicting defaults (5s in CLI flag, 1800 in some places) # https://semgrep.dev/docs/troubleshooting/semgrep-app#if-the-job-is-aborted-due-to-taking-too-long - command: semgrep ci --timeout=100 + command: semgrep ci --timeout=100 --no-suppress-errors # If semgrep hangs, stop the scan after 20m, to prevent a useless 5h job no_output_timeout: 20m - notify-failures-on-develop @@ -1629,6 +1478,9 @@ workflows: - op-program - op-service - op-supervisor + - go-test: + name: semver-natspec-tests + module: packages/contracts-bedrock/scripts/checks/semver-natspec - go-test-kurtosis: name: op-chain-ops-integration module: op-chain-ops @@ -1721,18 +1573,6 @@ workflows: - cannon-prestate: requires: - go-mod-download - - devnet: - matrix: - parameters: - variant: ["default", "altda", "altda-generic"] - requires: - - contracts-bedrock-build - - op-batcher-docker-build - - op-proposer-docker-build - - op-node-docker-build - - op-challenger-docker-build - - da-server-docker-build - - cannon-prestate - check-generated-mocks-op-node - check-generated-mocks-op-service - cannon-go-lint-and-test: @@ -1857,7 +1697,7 @@ workflows: scheduled-fpp: when: - equal: [ build_four_hours, <> ] + equal: [ build_hourly, <> ] jobs: - fpp-verify: context: @@ -1885,6 +1725,11 @@ workflows: - cannon-prestate: requires: - go-mod-download + - cannon-stf-verify: + requires: + - go-mod-download + context: + - slack - contracts-bedrock-build: skip_pattern: test context: diff --git a/.semgrepignore b/.semgrepignore index 3208e2604b89..5d358263fb65 100644 --- a/.semgrepignore +++ b/.semgrepignore @@ -41,3 +41,5 @@ packages/contracts-bedrock/src/L2/SuperchainWETH.sol packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol packages/contracts-bedrock/src/governance/GovernanceToken.sol packages/contracts-bedrock/src/governance/interfaces/IGovernanceToken.sol +packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol +packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol diff --git a/Makefile b/Makefile index 4f329a4241e2..072375e728bf 100644 --- a/Makefile +++ b/Makefile @@ -134,7 +134,7 @@ reproducible-prestate: ## Builds reproducible-prestate binary .PHONY: reproducible-prestate # Include any files required for the devnet to build and run. -DEVNET_CANNON_PRESTATE_FILES := op-program/bin/prestate-proof.json op-program/bin/prestate.json op-program/bin/prestate-proof-mt.json op-program/bin/prestate-mt.bin.gz +DEVNET_CANNON_PRESTATE_FILES := op-program/bin/prestate-proof.json op-program/bin/prestate.bin.gz op-program/bin/prestate-proof-mt.json op-program/bin/prestate-mt.bin.gz $(DEVNET_CANNON_PRESTATE_FILES): @@ -142,8 +142,8 @@ $(DEVNET_CANNON_PRESTATE_FILES): make cannon-prestate-mt cannon-prestate: op-program cannon ## Generates prestate using cannon and op-program - ./cannon/bin/cannon load-elf --type singlethreaded --path op-program/bin/op-program-client.elf --out op-program/bin/prestate.json --meta op-program/bin/meta.json - ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate.json --meta op-program/bin/meta.json --proof-fmt 'op-program/bin/%d.json' --output "" + ./cannon/bin/cannon load-elf --type singlethreaded-2 --path op-program/bin/op-program-client.elf --out op-program/bin/prestate.bin.gz --meta op-program/bin/meta.json + ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate.bin.gz --meta op-program/bin/meta.json --proof-fmt 'op-program/bin/%d.json' --output "" mv op-program/bin/0.json op-program/bin/prestate-proof.json .PHONY: cannon-prestate diff --git a/cannon/.gitignore b/cannon/.gitignore index 68424370890f..c9a7f170c14d 100644 --- a/cannon/.gitignore +++ b/cannon/.gitignore @@ -7,9 +7,6 @@ venv *.log testdata/example/bin contracts/out -state.json -*.json -*.json.gz *.pprof *.out bin diff --git a/cannon/Dockerfile.diff b/cannon/Dockerfile.diff new file mode 100644 index 000000000000..168b664a2baa --- /dev/null +++ b/cannon/Dockerfile.diff @@ -0,0 +1,34 @@ +FROM golang:1.22.7-alpine3.20 as builder + +RUN apk add --no-cache make bash + +COPY ./go.mod /app/go.mod +COPY ./go.sum /app/go.sum + +WORKDIR /app + +RUN echo "go mod cache: $(go env GOMODCACHE)" +RUN echo "go build cache: $(go env GOCACHE)" + +RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build go mod download + +COPY . /app + +# We avoid copying the full .git dir into the build for just some metadata. +# Instead, specify: +# --build-arg GIT_COMMIT=$(git rev-parse HEAD) +# --build-arg GIT_DATE=$(git show -s --format='%ct') +ARG GIT_COMMIT +ARG GIT_DATE + +ARG TARGETOS TARGETARCH + +FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon:v1.1.0-alpha.1 AS cannon-v2 + +FROM --platform=$BUILDPLATFORM builder as cannon-verify +COPY --from=cannon-v2 /usr/local/bin/cannon /usr/local/bin/cannon-v2 +# verify the latest singlethreaded VM behavior against cannon-v2 +RUN cd cannon && make diff-singlethreaded-2-cannon -e OTHER_CANNON=/usr/local/bin/cannon-v2 +RUN --mount=type=cache,target=/root/.cache/go-build cd cannon && \ + make diff-singlethreaded-2-cannon -e OTHER_CANNON=/usr/local/bin/cannon-v2 \ + GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE diff --git a/cannon/Makefile b/cannon/Makefile index 7a660dbb7f62..5376b1b62086 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -22,8 +22,11 @@ cannon64-impl: env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build --tags=cannon64 -v $(LDFLAGS) -o ./bin/cannon64-impl . cannon-embeds: cannon32-impl cannon64-impl - @cp bin/cannon32-impl ./multicannon/embeds/cannon-0 + # singlethreaded-v2 + @cp bin/cannon32-impl ./multicannon/embeds/cannon-2 + # multithreaded @cp bin/cannon32-impl ./multicannon/embeds/cannon-1 + # 64-bit multithreaded @cp bin/cannon64-impl ./multicannon/embeds/cannon-3 cannon: cannon-embeds @@ -69,6 +72,9 @@ diff-%-cannon: cannon elf exit 1; \ fi +cannon-stf-verify: + @docker build --progress plain -f Dockerfile.diff ../ + fuzz: # Common vm tests go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallBrk ./mipsevm/tests @@ -93,4 +99,5 @@ fuzz: test \ lint \ fuzz \ - diff-%-cannon + diff-%-cannon \ + cannon-stf-verify diff --git a/cannon/cmd/load_elf.go b/cannon/cmd/load_elf.go index f73e2b75de0a..7609a3b7091d 100644 --- a/cannon/cmd/load_elf.go +++ b/cannon/cmd/load_elf.go @@ -69,7 +69,7 @@ func LoadELF(ctx *cli.Context) error { return err } switch ver { - case versions.VersionSingleThreaded: + case versions.VersionSingleThreaded2: createInitialState = func(f *elf.File) (mipsevm.FPVMState, error) { return program.LoadELF(f, singlethreaded.CreateInitialState) } diff --git a/cannon/cmd/run.go b/cannon/cmd/run.go index 732f3c16343d..b9854082c5a9 100644 --- a/cannon/cmd/run.go +++ b/cannon/cmd/run.go @@ -373,6 +373,7 @@ func Run(ctx *cli.Context) error { if err != nil { return fmt.Errorf("failed to load state: %w", err) } + l.Info("Loaded input state", "version", state.Version) vm := state.CreateVM(l, po, outLog, errLog, meta) debugProgram := ctx.Bool(RunDebugFlag.Name) if debugProgram { diff --git a/cannon/mipsevm/exec/mips_syscalls.go b/cannon/mipsevm/exec/mips_syscalls.go index 42b66a9b5a13..e2b29d36ff76 100644 --- a/cannon/mipsevm/exec/mips_syscalls.go +++ b/cannon/mipsevm/exec/mips_syscalls.go @@ -309,7 +309,15 @@ func HandleSysFcntl(a0, a1 Word) (v0, v1 Word) { // args: a0 = fd, a1 = cmd v1 = Word(0) - if a1 == 3 { // F_GETFL: get file descriptor flags + if a1 == 1 { // F_GETFD: get file descriptor flags + switch a0 { + case FdStdin, FdStdout, FdStderr, FdPreimageRead, FdHintRead, FdPreimageWrite, FdHintWrite: + v0 = 0 // No flags set + default: + v0 = 0xFFffFFff + v1 = MipsEBADF + } + } else if a1 == 3 { // F_GETFL: get file status flags switch a0 { case FdStdin, FdPreimageRead, FdHintRead: v0 = 0 // O_RDONLY diff --git a/cannon/mipsevm/multithreaded/instrumented_test.go b/cannon/mipsevm/multithreaded/instrumented_test.go index 1242f6375d91..f0b005257f7c 100644 --- a/cannon/mipsevm/multithreaded/instrumented_test.go +++ b/cannon/mipsevm/multithreaded/instrumented_test.go @@ -40,7 +40,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { var stdOutBuf, stdErrBuf bytes.Buffer us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), nil) - for i := 0; i < 1_000_000; i++ { + for i := 0; i < 2_000_000; i++ { if us.GetState().GetExited() { break } diff --git a/cannon/mipsevm/singlethreaded/state.go b/cannon/mipsevm/singlethreaded/state.go index a086e8b99016..741f7f66bb09 100644 --- a/cannon/mipsevm/singlethreaded/state.go +++ b/cannon/mipsevm/singlethreaded/state.go @@ -71,7 +71,6 @@ func CreateInitialState(pc, heapStart Word) *State { } func (s *State) CreateVM(logger log.Logger, po mipsevm.PreimageOracle, stdOut, stdErr io.Writer, meta mipsevm.Metadata) mipsevm.FPVM { - logger.Info("Using cannon VM", "is32", arch.IsMips32) return NewInstrumentedState(s, po, stdOut, stdErr, meta) } diff --git a/cannon/mipsevm/tests/fuzz_evm_common_test.go b/cannon/mipsevm/tests/fuzz_evm_common_test.go index e23d3e9cbf23..712b7d4875d3 100644 --- a/cannon/mipsevm/tests/fuzz_evm_common_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_common_test.go @@ -151,7 +151,17 @@ func FuzzStateSyscallFcntl(f *testing.F) { expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 - if cmd == 3 { + if cmd == 1 { + switch fd { + case exec.FdStdin, exec.FdStdout, exec.FdStderr, + exec.FdPreimageRead, exec.FdHintRead, exec.FdPreimageWrite, exec.FdHintWrite: + expected.Registers[2] = 0 + expected.Registers[7] = 0 + default: + expected.Registers[2] = 0xFF_FF_FF_FF + expected.Registers[7] = exec.MipsEBADF + } + } else if cmd == 3 { switch fd { case exec.FdStdin, exec.FdPreimageRead, exec.FdHintRead: expected.Registers[2] = 0 diff --git a/cannon/mipsevm/versions/detect.go b/cannon/mipsevm/versions/detect.go index 52840bb3293e..1f1f4147d695 100644 --- a/cannon/mipsevm/versions/detect.go +++ b/cannon/mipsevm/versions/detect.go @@ -27,7 +27,7 @@ func DetectVersion(path string) (StateVersion, error) { } switch ver { - case VersionSingleThreaded, VersionMultiThreaded, VersionMultiThreaded64: + case VersionSingleThreaded, VersionMultiThreaded, VersionSingleThreaded2, VersionMultiThreaded64: return ver, nil default: return 0, fmt.Errorf("%w: %d", ErrUnknownVersion, ver) diff --git a/cannon/mipsevm/versions/detect_test.go b/cannon/mipsevm/versions/detect_test.go index ba8b894c8401..be849269fff9 100644 --- a/cannon/mipsevm/versions/detect_test.go +++ b/cannon/mipsevm/versions/detect_test.go @@ -1,8 +1,10 @@ package versions import ( + "embed" "os" "path/filepath" + "strconv" "testing" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" @@ -11,23 +13,49 @@ import ( "github.com/stretchr/testify/require" ) +const statesPath = "testdata/states" + +//go:embed testdata/states +var historicStates embed.FS + func TestDetectVersion(t *testing.T) { - t.Run("SingleThreadedJSON", func(t *testing.T) { - state, err := NewFromState(singlethreaded.CreateEmptyState()) + testDetection := func(t *testing.T, version StateVersion, ext string) { + filename := strconv.Itoa(int(version)) + ext + dir := t.TempDir() + path := filepath.Join(dir, filename) + in, err := historicStates.ReadFile(filepath.Join(statesPath, filename)) require.NoError(t, err) - path := writeToFile(t, "state.json", state) - version, err := DetectVersion(path) + require.NoError(t, os.WriteFile(path, in, 0o644)) + + detectedVersion, err := DetectVersion(path) require.NoError(t, err) - require.Equal(t, VersionSingleThreaded, version) - }) + require.Equal(t, version, detectedVersion) + } + // Iterate all known versions to ensure we have a test case to detect every state version + for _, version := range StateVersionTypes { + version := version + if version == VersionMultiThreaded64 { + t.Skip("TODO(#12205)") + } + t.Run(version.String(), func(t *testing.T) { + testDetection(t, version, ".bin.gz") + }) + + if version == VersionSingleThreaded { + t.Run(version.String()+".json", func(t *testing.T) { + testDetection(t, version, ".json") + }) + } + } + // Additionally, check that the latest supported versions write new states in a way that is detected correctly t.Run("SingleThreadedBinary", func(t *testing.T) { state, err := NewFromState(singlethreaded.CreateEmptyState()) require.NoError(t, err) path := writeToFile(t, "state.bin.gz", state) version, err := DetectVersion(path) require.NoError(t, err) - require.Equal(t, VersionSingleThreaded, version) + require.Equal(t, VersionSingleThreaded2, version) }) t.Run("MultiThreadedBinary", func(t *testing.T) { @@ -38,15 +66,6 @@ func TestDetectVersion(t *testing.T) { require.NoError(t, err) require.Equal(t, VersionMultiThreaded, version) }) - - t.Run("MultiThreaded64Binary", func(t *testing.T) { - state, err := NewFromState(multithreaded.CreateEmptyState()) - require.NoError(t, err) - path := writeToFile(t, "state.bin.gz", state) - version, err := DetectVersion(path) - require.NoError(t, err) - require.Equal(t, VersionMultiThreaded, version) - }) } func TestDetectVersionInvalid(t *testing.T) { diff --git a/cannon/mipsevm/versions/state.go b/cannon/mipsevm/versions/state.go index baa4a85a4267..c33c5d4d756c 100644 --- a/cannon/mipsevm/versions/state.go +++ b/cannon/mipsevm/versions/state.go @@ -20,6 +20,8 @@ const ( // VersionSingleThreaded is the version of the Cannon STF found in op-contracts/v1.6.0 - https://github.com/ethereum-optimism/optimism/blob/op-contracts/v1.6.0/packages/contracts-bedrock/src/cannon/MIPS.sol VersionSingleThreaded StateVersion = iota VersionMultiThreaded + // VersionSingleThreaded2 is based on VersionSingleThreaded with the addition of support for fcntl(F_GETFD) syscall + VersionSingleThreaded2 VersionMultiThreaded64 ) @@ -29,7 +31,7 @@ var ( ErrUnsupportedMipsArch = errors.New("mips architecture is not supported") ) -var StateVersionTypes = []StateVersion{VersionSingleThreaded, VersionMultiThreaded, VersionMultiThreaded64} +var StateVersionTypes = []StateVersion{VersionSingleThreaded, VersionMultiThreaded, VersionSingleThreaded2, VersionMultiThreaded64} func LoadStateFromFile(path string) (*VersionedState, error) { if !serialize.IsBinaryFile(path) { @@ -50,7 +52,7 @@ func NewFromState(state mipsevm.FPVMState) (*VersionedState, error) { return nil, ErrUnsupportedMipsArch } return &VersionedState{ - Version: VersionSingleThreaded, + Version: VersionSingleThreaded2, FPVMState: state, }, nil case *multithreaded.State: @@ -92,7 +94,7 @@ func (s *VersionedState) Deserialize(in io.Reader) error { } switch s.Version { - case VersionSingleThreaded: + case VersionSingleThreaded2: if !arch.IsMips32 { return ErrUnsupportedMipsArch } @@ -145,6 +147,8 @@ func (s StateVersion) String() string { return "singlethreaded" case VersionMultiThreaded: return "multithreaded" + case VersionSingleThreaded2: + return "singlethreaded-2" case VersionMultiThreaded64: return "multithreaded64" default: @@ -158,6 +162,8 @@ func ParseStateVersion(ver string) (StateVersion, error) { return VersionSingleThreaded, nil case "multithreaded": return VersionMultiThreaded, nil + case "singlethreaded-2": + return VersionSingleThreaded2, nil case "multithreaded64": return VersionMultiThreaded64, nil default: diff --git a/cannon/mipsevm/versions/state_test.go b/cannon/mipsevm/versions/state_test.go index 7fb36cd5734c..8740d51d2929 100644 --- a/cannon/mipsevm/versions/state_test.go +++ b/cannon/mipsevm/versions/state_test.go @@ -4,6 +4,7 @@ import ( "path/filepath" "testing" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" "github.com/ethereum-optimism/optimism/cannon/mipsevm/singlethreaded" "github.com/ethereum-optimism/optimism/cannon/serialize" @@ -11,11 +12,11 @@ import ( ) func TestNewFromState(t *testing.T) { - t.Run("singlethreaded", func(t *testing.T) { + t.Run("singlethreaded-2", func(t *testing.T) { actual, err := NewFromState(singlethreaded.CreateEmptyState()) require.NoError(t, err) require.IsType(t, &singlethreaded.State{}, actual.FPVMState) - require.Equal(t, VersionSingleThreaded, actual.Version) + require.Equal(t, VersionSingleThreaded2, actual.Version) }) t.Run("multithreaded", func(t *testing.T) { @@ -27,16 +28,6 @@ func TestNewFromState(t *testing.T) { } func TestLoadStateFromFile(t *testing.T) { - t.Run("SinglethreadedFromJSON", func(t *testing.T) { - expected, err := NewFromState(singlethreaded.CreateEmptyState()) - require.NoError(t, err) - - path := writeToFile(t, "state.json", expected) - actual, err := LoadStateFromFile(path) - require.NoError(t, err) - require.Equal(t, expected, actual) - }) - t.Run("SinglethreadedFromBinary", func(t *testing.T) { expected, err := NewFromState(singlethreaded.CreateEmptyState()) require.NoError(t, err) @@ -58,14 +49,26 @@ func TestLoadStateFromFile(t *testing.T) { }) } -func TestMultithreadedDoesNotSupportJSON(t *testing.T) { - state, err := NewFromState(multithreaded.CreateEmptyState()) - require.NoError(t, err) +func TestVersionsOtherThanZeroDoNotSupportJSON(t *testing.T) { + tests := []struct { + version StateVersion + createState func() mipsevm.FPVMState + }{ + {VersionSingleThreaded2, func() mipsevm.FPVMState { return singlethreaded.CreateEmptyState() }}, + {VersionMultiThreaded, func() mipsevm.FPVMState { return multithreaded.CreateEmptyState() }}, + } + for _, test := range tests { + test := test + t.Run(test.version.String(), func(t *testing.T) { + state, err := NewFromState(test.createState()) + require.NoError(t, err) - dir := t.TempDir() - path := filepath.Join(dir, "test.json") - err = serialize.Write(path, state, 0o644) - require.ErrorIs(t, err, ErrJsonNotSupported) + dir := t.TempDir() + path := filepath.Join(dir, "test.json") + err = serialize.Write(path, state, 0o644) + require.ErrorIs(t, err, ErrJsonNotSupported) + }) + } } func writeToFile(t *testing.T, filename string, data serialize.Serializable) string { diff --git a/cannon/mipsevm/versions/testdata/states/0.bin.gz b/cannon/mipsevm/versions/testdata/states/0.bin.gz new file mode 100644 index 000000000000..2a862e6e0c11 Binary files /dev/null and b/cannon/mipsevm/versions/testdata/states/0.bin.gz differ diff --git a/cannon/mipsevm/versions/testdata/states/0.json b/cannon/mipsevm/versions/testdata/states/0.json new file mode 100644 index 000000000000..b45e978ea614 --- /dev/null +++ b/cannon/mipsevm/versions/testdata/states/0.json @@ -0,0 +1,48 @@ +{ + "memory": [], + "preimageKey": "0x0000000000000000000000000000000000000000000000000000000000000000", + "preimageOffset": 0, + "pc": 0, + "nextPC": 4, + "lo": 0, + "hi": 0, + "heap": 0, + "exit": 0, + "exited": false, + "step": 0, + "registers": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] +} + diff --git a/cannon/mipsevm/versions/testdata/states/1.bin.gz b/cannon/mipsevm/versions/testdata/states/1.bin.gz new file mode 100644 index 000000000000..fa6309bd0969 Binary files /dev/null and b/cannon/mipsevm/versions/testdata/states/1.bin.gz differ diff --git a/cannon/mipsevm/versions/testdata/states/2.bin.gz b/cannon/mipsevm/versions/testdata/states/2.bin.gz new file mode 100644 index 000000000000..901472568c61 Binary files /dev/null and b/cannon/mipsevm/versions/testdata/states/2.bin.gz differ diff --git a/cannon/testdata/example/alloc/go.mod b/cannon/testdata/example/alloc/go.mod index d4d3c23faf2d..f0525fb68d5a 100644 --- a/cannon/testdata/example/alloc/go.mod +++ b/cannon/testdata/example/alloc/go.mod @@ -1,8 +1,8 @@ module alloc -go 1.21 +go 1.22 -toolchain go1.21.1 +toolchain go1.22.0 require github.com/ethereum-optimism/optimism v0.0.0 diff --git a/cannon/testdata/example/claim/go.mod b/cannon/testdata/example/claim/go.mod index c70d9906f06c..be3ddc7c0040 100644 --- a/cannon/testdata/example/claim/go.mod +++ b/cannon/testdata/example/claim/go.mod @@ -1,8 +1,8 @@ module claim -go 1.21 +go 1.22 -toolchain go1.21.1 +toolchain go1.22.0 require github.com/ethereum-optimism/optimism v0.0.0 diff --git a/cannon/testdata/example/entry/go.mod b/cannon/testdata/example/entry/go.mod index 2e4d29124f54..296b95426437 100644 --- a/cannon/testdata/example/entry/go.mod +++ b/cannon/testdata/example/entry/go.mod @@ -1,3 +1,5 @@ module entry -go 1.21 +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/hello/go.mod b/cannon/testdata/example/hello/go.mod index da6c43db676b..b54bb78c6aee 100644 --- a/cannon/testdata/example/hello/go.mod +++ b/cannon/testdata/example/hello/go.mod @@ -1,3 +1,5 @@ module hello -go 1.20 +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/multithreaded/go.mod b/cannon/testdata/example/multithreaded/go.mod index a075941f46c3..e1bdb77a9aff 100644 --- a/cannon/testdata/example/multithreaded/go.mod +++ b/cannon/testdata/example/multithreaded/go.mod @@ -1,3 +1,5 @@ module multithreaded -go 1.21 +go 1.22 + +toolchain go1.22.0 diff --git a/docker-bake.hcl b/docker-bake.hcl index 074bf5cb78e3..5740590a95f2 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -61,10 +61,6 @@ variable "OP_SUPERVISOR_VERSION" { default = "${GIT_VERSION}" } -variable "PREV_CANNON_VERSION" { - default = "${GIT_VERSION}" -} - variable "CANNON_VERSION" { default = "${GIT_VERSION}" } @@ -199,7 +195,6 @@ target "cannon" { args = { GIT_COMMIT = "${GIT_COMMIT}" GIT_DATE = "${GIT_DATE}" - PREV_CANNON_VERSION = "${PREV_CANNON_VERSION}" CANNON_VERSION = "${CANNON_VERSION}" } target = "cannon-target" diff --git a/op-alt-da/daclient.go b/op-alt-da/daclient.go index 269b71f3c104..9f0bdab11fbd 100644 --- a/op-alt-da/daclient.go +++ b/op-alt-da/daclient.go @@ -119,7 +119,7 @@ func (c *DAClient) setInput(ctx context.Context, img []byte) (CommitmentData, er } body := bytes.NewReader(img) - url := fmt.Sprintf("%s/put/", c.url) + url := fmt.Sprintf("%s/put", c.url) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body) if err != nil { return nil, fmt.Errorf("failed to create HTTP request: %w", err) diff --git a/op-alt-da/damock.go b/op-alt-da/damock.go index 0db129171a82..ad388d0b2653 100644 --- a/op-alt-da/damock.go +++ b/op-alt-da/damock.go @@ -141,7 +141,7 @@ func (s *FakeDAServer) Start() error { // Override the HandleGet/Put method registrations mux := http.NewServeMux() mux.HandleFunc("/get/", s.HandleGet) - mux.HandleFunc("/put/", s.HandlePut) + mux.HandleFunc("/put", s.HandlePut) s.httpServer.Handler = mux return nil } diff --git a/op-alt-da/daserver.go b/op-alt-da/daserver.go index 94446944b543..ccdc2a0cb4d3 100644 --- a/op-alt-da/daserver.go +++ b/op-alt-da/daserver.go @@ -54,6 +54,7 @@ func (d *DAServer) Start() error { mux.HandleFunc("/get/", d.HandleGet) mux.HandleFunc("/put/", d.HandlePut) + mux.HandleFunc("/put", d.HandlePut) d.httpServer.Handler = mux @@ -128,7 +129,7 @@ func (d *DAServer) HandlePut(w http.ResponseWriter, r *http.Request) { d.log.Info("PUT", "url", r.URL) route := path.Dir(r.URL.Path) - if route != "/put" { + if route != "/put" && r.URL.Path != "/put" { w.WriteHeader(http.StatusBadRequest) return } diff --git a/op-chain-ops/cmd/op-deployer/main.go b/op-chain-ops/cmd/op-deployer/main.go index 023d8adca39d..d6daf959c103 100644 --- a/op-chain-ops/cmd/op-deployer/main.go +++ b/op-chain-ops/cmd/op-deployer/main.go @@ -4,6 +4,8 @@ import ( "fmt" "os" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/bootstrap" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version" opservice "github.com/ethereum-optimism/optimism/op-service" @@ -41,6 +43,11 @@ func main() { Flags: cliapp.ProtectFlags(deployer.ApplyFlags), Action: deployer.ApplyCLI(), }, + { + Name: "bootstrap", + Usage: "bootstraps global contract instances", + Subcommands: bootstrap.Commands, + }, { Name: "inspect", Usage: "inspects the state of a deployment", diff --git a/op-chain-ops/deployer/bootstrap/bootstrap.go b/op-chain-ops/deployer/bootstrap/bootstrap.go new file mode 100644 index 000000000000..5f1fc7db254e --- /dev/null +++ b/op-chain-ops/deployer/bootstrap/bootstrap.go @@ -0,0 +1,206 @@ +package bootstrap + +import ( + "context" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "math/big" + "strings" + + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/pipeline" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/urfave/cli/v2" +) + +type OPCMConfig struct { + L1RPCUrl string + PrivateKey string + Logger log.Logger + ArtifactsURL *state.ArtifactsURL + ContractsRelease string + + privateKeyECDSA *ecdsa.PrivateKey +} + +func (c *OPCMConfig) Check() error { + if c.L1RPCUrl == "" { + return fmt.Errorf("l1RPCUrl must be specified") + } + + if c.PrivateKey == "" { + return fmt.Errorf("private key must be specified") + } + + privECDSA, err := crypto.HexToECDSA(strings.TrimPrefix(c.PrivateKey, "0x")) + if err != nil { + return fmt.Errorf("failed to parse private key: %w", err) + } + c.privateKeyECDSA = privECDSA + + if c.Logger == nil { + return fmt.Errorf("logger must be specified") + } + + if c.ArtifactsURL == nil { + return fmt.Errorf("artifacts URL must be specified") + } + + if c.ContractsRelease == "" { + return fmt.Errorf("contracts release must be specified") + } + + return nil +} + +func OPCMCLI(cliCtx *cli.Context) error { + logCfg := oplog.ReadCLIConfig(cliCtx) + l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) + oplog.SetGlobalLogHandler(l.Handler()) + + l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) + privateKey := cliCtx.String(deployer.PrivateKeyFlagName) + artifactsURLStr := cliCtx.String(ArtifactsURLFlagName) + artifactsURL := new(state.ArtifactsURL) + if err := artifactsURL.UnmarshalText([]byte(artifactsURLStr)); err != nil { + return fmt.Errorf("failed to parse artifacts URL: %w", err) + } + contractsRelease := cliCtx.String(ContractsReleaseFlagName) + + ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) + + return OPCM(ctx, OPCMConfig{ + L1RPCUrl: l1RPCUrl, + PrivateKey: privateKey, + Logger: l, + ArtifactsURL: artifactsURL, + ContractsRelease: contractsRelease, + }) +} + +func OPCM(ctx context.Context, cfg OPCMConfig) error { + if err := cfg.Check(); err != nil { + return fmt.Errorf("invalid config for OPCM: %w", err) + } + + lgr := cfg.Logger + progressor := func(curr, total int64) { + lgr.Info("artifacts download progress", "current", curr, "total", total) + } + + artifactsFS, cleanup, err := pipeline.DownloadArtifacts(ctx, cfg.ArtifactsURL, progressor) + if err != nil { + return fmt.Errorf("failed to download artifacts: %w", err) + } + defer func() { + if err := cleanup(); err != nil { + lgr.Warn("failed to clean up artifacts", "err", err) + } + }() + + l1Client, err := ethclient.Dial(cfg.L1RPCUrl) + if err != nil { + return fmt.Errorf("failed to connect to L1 RPC: %w", err) + } + + chainID, err := l1Client.ChainID(ctx) + if err != nil { + return fmt.Errorf("failed to get chain ID: %w", err) + } + chainIDU64 := chainID.Uint64() + + superCfg, err := opcm.SuperchainFor(chainIDU64) + if err != nil { + return fmt.Errorf("error getting superchain config: %w", err) + } + standardVersionsTOML, err := opcm.StandardVersionsFor(chainIDU64) + if err != nil { + return fmt.Errorf("error getting standard versions TOML: %w", err) + } + opcmProxyOwnerAddr, err := opcm.ManagerOwnerAddrFor(chainIDU64) + if err != nil { + return fmt.Errorf("error getting superchain proxy admin: %w", err) + } + + signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) + chainDeployer := crypto.PubkeyToAddress(cfg.privateKeyECDSA.PublicKey) + + lgr.Info("deploying OPCM", "release", cfg.ContractsRelease) + + var dio opcm.DeployImplementationsOutput + err = pipeline.CallScriptBroadcast( + ctx, + pipeline.CallScriptBroadcastOpts{ + L1ChainID: chainID, + Logger: lgr, + ArtifactsFS: artifactsFS, + Deployer: chainDeployer, + Signer: signer, + Client: l1Client, + Broadcaster: pipeline.KeyedBroadcaster, + Handler: func(host *script.Host) error { + // We need to etch the Superchain addresses so that they have nonzero code + // and the checks in the OPCM constructor pass. + superchainConfigAddr := common.Address(*superCfg.Config.SuperchainConfigAddr) + protocolVersionsAddr := common.Address(*superCfg.Config.ProtocolVersionsAddr) + addresses := []common.Address{ + superchainConfigAddr, + protocolVersionsAddr, + } + for _, addr := range addresses { + host.ImportAccount(addr, types.Account{ + Code: []byte{0x00}, + }) + } + + var salt common.Hash + _, err = rand.Read(salt[:]) + if err != nil { + return fmt.Errorf("failed to generate CREATE2 salt: %w", err) + } + + dio, err = opcm.DeployImplementations( + host, + opcm.DeployImplementationsInput{ + Salt: salt, + WithdrawalDelaySeconds: big.NewInt(604800), + MinProposalSizeBytes: big.NewInt(126000), + ChallengePeriodSeconds: big.NewInt(86400), + ProofMaturityDelaySeconds: big.NewInt(604800), + DisputeGameFinalityDelaySeconds: big.NewInt(302400), + Release: cfg.ContractsRelease, + SuperchainConfigProxy: superchainConfigAddr, + ProtocolVersionsProxy: protocolVersionsAddr, + OpcmProxyOwner: opcmProxyOwnerAddr, + StandardVersionsToml: standardVersionsTOML, + UseInterop: false, + }, + ) + return err + }, + }, + ) + if err != nil { + return fmt.Errorf("error deploying implementations: %w", err) + } + + lgr.Info("deployed implementations") + + if err := jsonutil.WriteJSON(dio, ioutil.ToStdOut()); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil +} diff --git a/op-chain-ops/deployer/bootstrap/flags.go b/op-chain-ops/deployer/bootstrap/flags.go new file mode 100644 index 000000000000..edb784da9fce --- /dev/null +++ b/op-chain-ops/deployer/bootstrap/flags.go @@ -0,0 +1,41 @@ +package bootstrap + +import ( + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer" + "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/urfave/cli/v2" +) + +const ( + ArtifactsURLFlagName = "artifacts-url" + ContractsReleaseFlagName = "contracts-release" +) + +var ( + ArtifactsURLFlag = &cli.StringFlag{ + Name: ArtifactsURLFlagName, + Usage: "URL to the artifacts directory.", + EnvVars: deployer.PrefixEnvVar("ARTIFACTS_URL"), + } + ContractsReleaseFlag = &cli.StringFlag{ + Name: ContractsReleaseFlagName, + Usage: "Release of the contracts to deploy.", + EnvVars: deployer.PrefixEnvVar("CONTRACTS_RELEASE"), + } +) + +var OPCMFlags = []cli.Flag{ + deployer.L1RPCURLFlag, + deployer.PrivateKeyFlag, + ArtifactsURLFlag, + ContractsReleaseFlag, +} + +var Commands = []*cli.Command{ + { + Name: "opcm", + Usage: "Bootstrap an instance of OPCM.", + Flags: cliapp.ProtectFlags(OPCMFlags), + Action: OPCMCLI, + }, +} diff --git a/op-chain-ops/deployer/broadcaster/keyed.go b/op-chain-ops/deployer/broadcaster/keyed.go index 63b72010042b..879d38b329b9 100644 --- a/op-chain-ops/deployer/broadcaster/keyed.go +++ b/op-chain-ops/deployer/broadcaster/keyed.go @@ -66,7 +66,7 @@ func NewKeyedBroadcaster(cfg KeyedBroadcasterOpts) (*KeyedBroadcaster, error) { mgrCfg.FeeLimitMultiplier.Store(5) mgrCfg.FeeLimitThreshold.Store(big.NewInt(100)) mgrCfg.MinTipCap.Store(minTipCap) - mgrCfg.MinTipCap.Store(minBaseFee) + mgrCfg.MinBaseFee.Store(minBaseFee) txmLogger := log.NewLogger(log.DiscardHandler()) if cfg.TXManagerLogger != nil { diff --git a/op-chain-ops/deployer/flags.go b/op-chain-ops/deployer/flags.go index e0ab864bdada..c0f2ba92f14b 100644 --- a/op-chain-ops/deployer/flags.go +++ b/op-chain-ops/deployer/flags.go @@ -30,28 +30,27 @@ var ( L1ChainIDFlag = &cli.Uint64Flag{ Name: L1ChainIDFlagName, Usage: "Chain ID of the L1 chain.", - EnvVars: prefixEnvVar("L1_CHAIN_ID"), + EnvVars: PrefixEnvVar("L1_CHAIN_ID"), Value: 900, } L2ChainIDsFlag = &cli.StringFlag{ Name: L2ChainIDsFlagName, Usage: "Comma-separated list of L2 chain IDs to deploy.", - EnvVars: prefixEnvVar("L2_CHAIN_IDS"), + EnvVars: PrefixEnvVar("L2_CHAIN_IDS"), } WorkdirFlag = &cli.StringFlag{ Name: WorkdirFlagName, Usage: "Directory storing intent and stage. Defaults to the current directory.", - EnvVars: prefixEnvVar("WORKDIR"), + EnvVars: PrefixEnvVar("WORKDIR"), Value: cwd(), Aliases: []string{ OutdirFlagName, }, } - PrivateKeyFlag = &cli.StringFlag{ Name: PrivateKeyFlagName, Usage: "Private key of the deployer account.", - EnvVars: prefixEnvVar("PRIVATE_KEY"), + EnvVars: PrefixEnvVar("PRIVATE_KEY"), } ) @@ -69,7 +68,7 @@ var ApplyFlags = []cli.Flag{ PrivateKeyFlag, } -func prefixEnvVar(name string) []string { +func PrefixEnvVar(name string) []string { return op_service.PrefixEnvVar(EnvVarPrefix, name) } diff --git a/op-chain-ops/deployer/init.go b/op-chain-ops/deployer/init.go index bd79f980cdff..a74f7ffa69bd 100644 --- a/op-chain-ops/deployer/init.go +++ b/op-chain-ops/deployer/init.go @@ -41,8 +41,8 @@ func InitCLI() func(ctx *cli.Context) error { outdir := ctx.String(OutdirFlagName) l2ChainIDsRaw := ctx.String(L2ChainIDsFlagName) - l2ChainIDsStr := strings.Split(l2ChainIDsRaw, ",") - l2ChainIDs := make([]common.Hash, 0, len(l2ChainIDsStr)) + l2ChainIDsStr := strings.Split(strings.TrimSpace(l2ChainIDsRaw), ",") + l2ChainIDs := make([]common.Hash, len(l2ChainIDsStr)) for _, idStr := range l2ChainIDsStr { id, err := op_service.Parse256BitChainID(idStr) if err != nil { @@ -66,7 +66,6 @@ func Init(cfg InitConfig) error { intent := &state.Intent{ L1ChainID: cfg.L1ChainID, - UseFaultProofs: true, FundDevAccounts: true, ContractsRelease: "dev", } diff --git a/op-chain-ops/deployer/integration_test/apply_test.go b/op-chain-ops/deployer/integration_test/apply_test.go index ad22651fa36e..be4ef80e6374 100644 --- a/op-chain-ops/deployer/integration_test/apply_test.go +++ b/op-chain-ops/deployer/integration_test/apply_test.go @@ -198,7 +198,6 @@ func makeIntent( ProtocolVersionsOwner: addrFor(devkeys.SuperchainDeployerKey.Key(l1ChainID)), Guardian: addrFor(devkeys.SuperchainConfigGuardianKey.Key(l1ChainID)), }, - UseFaultProofs: true, FundDevAccounts: true, ContractArtifactsURL: (*state.ArtifactsURL)(artifactsURL), ContractsRelease: "dev", @@ -239,11 +238,10 @@ func validateOPChainDeployment(t *testing.T, ctx context.Context, l1Client *ethc {"OptimismPortalProxyAddress", chainState.OptimismPortalProxyAddress}, {"DisputeGameFactoryProxyAddress", chainState.DisputeGameFactoryProxyAddress}, {"AnchorStateRegistryProxyAddress", chainState.AnchorStateRegistryProxyAddress}, - {"AnchorStateRegistryImplAddress", chainState.AnchorStateRegistryImplAddress}, {"FaultDisputeGameAddress", chainState.FaultDisputeGameAddress}, {"PermissionedDisputeGameAddress", chainState.PermissionedDisputeGameAddress}, {"DelayedWETHPermissionedGameProxyAddress", chainState.DelayedWETHPermissionedGameProxyAddress}, - {"DelayedWETHPermissionlessGameProxyAddress", chainState.DelayedWETHPermissionlessGameProxyAddress}, + // {"DelayedWETHPermissionlessGameProxyAddress", chainState.DelayedWETHPermissionlessGameProxyAddress}, } for _, addr := range chainAddrs { // TODO Delete this `if`` block once FaultDisputeGameAddress is deployed. diff --git a/op-chain-ops/deployer/opcm/implementations.go b/op-chain-ops/deployer/opcm/implementations.go index 1d88c9b74398..0c61658dc429 100644 --- a/op-chain-ops/deployer/opcm/implementations.go +++ b/op-chain-ops/deployer/opcm/implementations.go @@ -22,7 +22,7 @@ type DeployImplementationsInput struct { ProtocolVersionsProxy common.Address UseInterop bool // if true, deploy Interop implementations - SuperchainProxyAdmin common.Address + OpcmProxyOwner common.Address StandardVersionsToml string // contents of 'standard-versions-mainnet.toml' or 'standard-versions-sepolia.toml' file } diff --git a/op-chain-ops/deployer/opcm/opchain.go b/op-chain-ops/deployer/opcm/opchain.go index 512b133c5876..ac118302e5a9 100644 --- a/op-chain-ops/deployer/opcm/opchain.go +++ b/op-chain-ops/deployer/opcm/opchain.go @@ -123,6 +123,7 @@ type opcmDeployInput struct { BlobBasefeeScalar uint32 L2ChainId *big.Int StartingAnchorRoots []byte + SaltMixer string } // decodeOutputABIJSON defines an ABI for a fake method called "decodeOutput" that returns the @@ -241,6 +242,7 @@ func DeployOPChainRaw( BlobBasefeeScalar: input.BlobBaseFeeScalar, L2ChainId: input.L2ChainId, StartingAnchorRoots: input.StartingAnchorRoots(), + SaltMixer: input.SaltMixer, }) if err != nil { return out, fmt.Errorf("failed to pack deploy input: %w", err) diff --git a/op-chain-ops/deployer/opcm/standard.go b/op-chain-ops/deployer/opcm/standard.go index c82e5de12a32..51de8a483fa7 100644 --- a/op-chain-ops/deployer/opcm/standard.go +++ b/op-chain-ops/deployer/opcm/standard.go @@ -1,6 +1,12 @@ package opcm -import "embed" +import ( + "embed" + "fmt" + + "github.com/ethereum-optimism/superchain-registry/superchain" + "github.com/ethereum/go-ethereum/common" +) //go:embed standard-versions-mainnet.toml var StandardVersionsMainnetData string @@ -9,3 +15,48 @@ var StandardVersionsMainnetData string var StandardVersionsSepoliaData string var _ embed.FS + +func StandardVersionsFor(chainID uint64) (string, error) { + switch chainID { + case 1: + return StandardVersionsMainnetData, nil + case 11155111: + return StandardVersionsSepoliaData, nil + default: + return "", fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + +func SuperchainFor(chainID uint64) (*superchain.Superchain, error) { + switch chainID { + case 1: + return superchain.Superchains["mainnet"], nil + case 11155111: + return superchain.Superchains["sepolia"], nil + default: + return nil, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + +func ManagerImplementationAddrFor(chainID uint64) (common.Address, error) { + switch chainID { + case 11155111: + // Generated using the bootstrap command on 09/26/2024. + return common.HexToAddress("0x0dc727671d5c08e4e41e8909983ebfa6f57aa0bf"), nil + default: + return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + +func ManagerOwnerAddrFor(chainID uint64) (common.Address, error) { + switch chainID { + case 1: + // Set to superchain proxy admin + return common.HexToAddress("0x543bA4AADBAb8f9025686Bd03993043599c6fB04"), nil + case 11155111: + // Set to development multisig + return common.HexToAddress("0xDEe57160aAfCF04c34C887B5962D0a69676d3C8B"), nil + default: + return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} diff --git a/op-chain-ops/deployer/pipeline/implementations.go b/op-chain-ops/deployer/pipeline/implementations.go index d54d64abc564..12000be720ec 100644 --- a/op-chain-ops/deployer/pipeline/implementations.go +++ b/op-chain-ops/deployer/pipeline/implementations.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "strings" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" @@ -21,9 +22,17 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St lgr.Info("deploying implementations") + var standardVersionsTOML string + var err error + if strings.HasPrefix(intent.ContractsRelease, "op-contracts") { + standardVersionsTOML, err = opcm.StandardVersionsFor(intent.L1ChainID) + if err != nil { + return fmt.Errorf("error getting standard versions TOML: %w", err) + } + } + var dump *foundry.ForgeAllocs var dio opcm.DeployImplementationsOutput - var err error err = CallScriptBroadcast( ctx, CallScriptBroadcastOpts{ @@ -35,8 +44,8 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St Client: env.L1Client, Broadcaster: KeyedBroadcaster, Handler: func(host *script.Host) error { - host.SetEnvVar("IMPL_SALT", st.Create2Salt.Hex()[2:]) host.ImportState(st.SuperchainDeployment.StateDump) + dio, err = opcm.DeployImplementations( host, opcm.DeployImplementationsInput{ @@ -49,8 +58,8 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St Release: intent.ContractsRelease, SuperchainConfigProxy: st.SuperchainDeployment.SuperchainConfigProxyAddress, ProtocolVersionsProxy: st.SuperchainDeployment.ProtocolVersionsProxyAddress, - SuperchainProxyAdmin: st.SuperchainDeployment.ProxyAdminAddress, - StandardVersionsToml: opcm.StandardVersionsMainnetData, + OpcmProxyOwner: st.SuperchainDeployment.ProxyAdminAddress, + StandardVersionsToml: standardVersionsTOML, UseInterop: false, }, ) diff --git a/op-chain-ops/deployer/pipeline/init.go b/op-chain-ops/deployer/pipeline/init.go index a680c7fdb48f..d7009e117269 100644 --- a/op-chain-ops/deployer/pipeline/init.go +++ b/op-chain-ops/deployer/pipeline/init.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "fmt" + "strings" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" @@ -19,7 +20,7 @@ func IsSupportedStateVersion(version int) bool { return version == 1 } -func Init(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, intent *state.Intent, st *state.State) error { +func Init(ctx context.Context, env *Env, _ foundry.StatDirFs, intent *state.Intent, st *state.State) error { lgr := env.Logger.New("stage", "init") lgr.Info("initializing pipeline") @@ -35,37 +36,31 @@ func Init(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, intent * } } - if intent.OPCMAddress != (common.Address{}) { - env.Logger.Info("using provided OPCM address, populating state", "address", intent.OPCMAddress.Hex()) - - if intent.ContractsRelease == "dev" { - env.Logger.Warn("using dev release with existing OPCM, this field will be ignored") - } - - opcmContract := opcm.NewContract(intent.OPCMAddress, env.L1Client) - protocolVersions, err := opcmContract.ProtocolVersions(ctx) + if strings.HasPrefix(intent.ContractsRelease, "op-contracts") { + superCfg, err := opcm.SuperchainFor(intent.L1ChainID) if err != nil { - return fmt.Errorf("error getting protocol versions address: %w", err) + return fmt.Errorf("error getting superchain config: %w", err) } - superchainConfig, err := opcmContract.SuperchainConfig(ctx) + + proxyAdmin, err := opcm.ManagerOwnerAddrFor(intent.L1ChainID) if err != nil { - return fmt.Errorf("error getting superchain config address: %w", err) + return fmt.Errorf("error getting superchain proxy admin address: %w", err) } - env.Logger.Debug( - "populating protocol versions and superchain config addresses", - "protocolVersions", protocolVersions.Hex(), - "superchainConfig", superchainConfig.Hex(), - ) - - // The below fields are the only ones required to perform an OP Chain - // deployment via an existing OPCM contract. All the others are used - // for deploying the OPCM itself, which isn't necessary in this case. + + // Have to do this weird pointer thing below because the Superchain Registry defines its + // own Address type. st.SuperchainDeployment = &state.SuperchainDeployment{ - ProtocolVersionsProxyAddress: protocolVersions, - SuperchainConfigProxyAddress: superchainConfig, + ProxyAdminAddress: proxyAdmin, + ProtocolVersionsProxyAddress: common.Address(*superCfg.Config.ProtocolVersionsAddr), + SuperchainConfigProxyAddress: common.Address(*superCfg.Config.SuperchainConfigAddr), + } + + opcmProxy, err := opcm.ManagerImplementationAddrFor(intent.L1ChainID) + if err != nil { + return fmt.Errorf("error getting OPCM proxy address: %w", err) } st.ImplementationsDeployment = &state.ImplementationsDeployment{ - OpcmProxyAddress: intent.OPCMAddress, + OpcmProxyAddress: opcmProxy, } } @@ -81,14 +76,6 @@ func Init(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, intent * return immutableErr("L1ChainID", st.AppliedIntent.L1ChainID, intent.L1ChainID) } - if st.AppliedIntent.UseFaultProofs != intent.UseFaultProofs { - return immutableErr("useFaultProofs", st.AppliedIntent.UseFaultProofs, intent.UseFaultProofs) - } - - if st.AppliedIntent.UseAltDA != intent.UseAltDA { - return immutableErr("useAltDA", st.AppliedIntent.UseAltDA, intent.UseAltDA) - } - if st.AppliedIntent.FundDevAccounts != intent.FundDevAccounts { return immutableErr("fundDevAccounts", st.AppliedIntent.FundDevAccounts, intent.FundDevAccounts) } diff --git a/op-chain-ops/deployer/pipeline/opchain.go b/op-chain-ops/deployer/pipeline/opchain.go index a7bb0d6a96b8..cc375382b2f7 100644 --- a/op-chain-ops/deployer/pipeline/opchain.go +++ b/op-chain-ops/deployer/pipeline/opchain.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" - "github.com/ethereum-optimism/optimism/op-chain-ops/script" "github.com/ethereum/go-ethereum/common" ) @@ -44,55 +43,27 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, } var dco opcm.DeployOPChainOutput - if intent.OPCMAddress == (common.Address{}) { - err = CallScriptBroadcast( - ctx, - CallScriptBroadcastOpts{ - L1ChainID: big.NewInt(int64(intent.L1ChainID)), - Logger: lgr, - ArtifactsFS: artifactsFS, - Deployer: env.Deployer, - Signer: env.Signer, - Client: env.L1Client, - Broadcaster: KeyedBroadcaster, - Handler: func(host *script.Host) error { - host.ImportState(st.ImplementationsDeployment.StateDump) - - dco, err = opcm.DeployOPChain( - host, - input, - ) - return err - }, - }, - ) - if err != nil { - return fmt.Errorf("error deploying OP chain: %w", err) - } - } else { - lgr.Info("deploying using existing OPCM", "address", intent.OPCMAddress.Hex()) - - bcaster, err := broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ - Logger: lgr, - ChainID: big.NewInt(int64(intent.L1ChainID)), - Client: env.L1Client, - Signer: env.Signer, - From: env.Deployer, - }) - if err != nil { - return fmt.Errorf("failed to create broadcaster: %w", err) - } - dco, err = opcm.DeployOPChainRaw( - ctx, - env.L1Client, - bcaster, - env.Deployer, - artifactsFS, - input, - ) - if err != nil { - return fmt.Errorf("error deploying OP chain: %w", err) - } + lgr.Info("deploying using existing OPCM", "address", st.ImplementationsDeployment.OpcmProxyAddress.Hex()) + bcaster, err := broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ + Logger: lgr, + ChainID: big.NewInt(int64(intent.L1ChainID)), + Client: env.L1Client, + Signer: env.Signer, + From: env.Deployer, + }) + if err != nil { + return fmt.Errorf("failed to create broadcaster: %w", err) + } + dco, err = opcm.DeployOPChainRaw( + ctx, + env.L1Client, + bcaster, + env.Deployer, + artifactsFS, + input, + ) + if err != nil { + return fmt.Errorf("error deploying OP chain: %w", err) } st.Chains = append(st.Chains, &state.ChainState{ diff --git a/op-chain-ops/deployer/state/deploy_config.go b/op-chain-ops/deployer/state/deploy_config.go index 81801e5865cb..5ea8590f537f 100644 --- a/op-chain-ops/deployer/state/deploy_config.go +++ b/op-chain-ops/deployer/state/deploy_config.go @@ -65,6 +65,13 @@ func DefaultDeployConfig() genesis.DeployConfig { SystemConfigStartBlock: 0, }, }, + FaultProofDeployConfig: genesis.FaultProofDeployConfig{ + FaultGameWithdrawalDelay: 604800, + PreimageOracleMinProposalSize: 126000, + PreimageOracleChallengePeriod: 86400, + ProofMaturityDelaySeconds: 604800, + DisputeGameFinalityDelaySeconds: 302400, + }, } } diff --git a/op-chain-ops/deployer/state/intent.go b/op-chain-ops/deployer/state/intent.go index 755ad6bbba54..b07a6c2acff4 100644 --- a/op-chain-ops/deployer/state/intent.go +++ b/op-chain-ops/deployer/state/intent.go @@ -17,17 +17,11 @@ type Intent struct { SuperchainRoles SuperchainRoles `json:"superchainRoles" toml:"superchainRoles"` - UseFaultProofs bool `json:"useFaultProofs" toml:"useFaultProofs"` - - UseAltDA bool `json:"useAltDA" toml:"useAltDA"` - FundDevAccounts bool `json:"fundDevAccounts" toml:"fundDevAccounts"` ContractArtifactsURL *ArtifactsURL `json:"contractArtifactsURL" toml:"contractArtifactsURL"` - ContractsRelease string `json:"contractsVersion" toml:"contractsVersion"` - - OPCMAddress common.Address `json:"opcmAddress" toml:"opcmAddress"` + ContractsRelease string `json:"contractsRelease" toml:"contractsRelease"` Chains []*ChainIntent `json:"chains" toml:"chains"` @@ -43,10 +37,28 @@ func (c *Intent) Check() error { return fmt.Errorf("l1ChainID must be set") } - if c.UseFaultProofs && c.UseAltDA { - return fmt.Errorf("cannot use both fault proofs and alt-DA") + if c.ContractsRelease == "dev" { + return c.checkDev() } + return c.checkProd() +} + +func (c *Intent) Chain(id common.Hash) (*ChainIntent, error) { + for i := range c.Chains { + if c.Chains[i].ID == id { + return c.Chains[i], nil + } + } + + return nil, fmt.Errorf("chain %d not found", id) +} + +func (c *Intent) WriteToFile(path string) error { + return jsonutil.WriteTOML(c, ioutil.ToAtomicFile(path, 0o755)) +} + +func (c *Intent) checkDev() error { if c.SuperchainRoles.ProxyAdminOwner == emptyAddress { return fmt.Errorf("proxyAdminOwner must be set") } @@ -60,28 +72,18 @@ func (c *Intent) Check() error { } if c.ContractArtifactsURL == nil { - return fmt.Errorf("contractArtifactsURL must be set") - } - - if c.ContractsRelease != "dev" && !strings.HasPrefix(c.ContractsRelease, "op-contracts/") { - return fmt.Errorf("contractsVersion must be either the literal \"dev\" or start with \"op-contracts/\"") + return fmt.Errorf("contractArtifactsURL must be set in dev mode") } return nil } -func (c *Intent) Chain(id common.Hash) (*ChainIntent, error) { - for i := range c.Chains { - if c.Chains[i].ID == id { - return c.Chains[i], nil - } +func (c *Intent) checkProd() error { + if !strings.HasPrefix(c.ContractsRelease, "op-contracts/") { + return fmt.Errorf("contractsVersion must be either the literal \"dev\" or start with \"op-contracts/\"") } - return nil, fmt.Errorf("chain %d not found", id) -} - -func (c *Intent) WriteToFile(path string) error { - return jsonutil.WriteTOML(c, ioutil.ToAtomicFile(path, 0o755)) + return nil } type SuperchainRoles struct { diff --git a/op-chain-ops/deployer/state/state.go b/op-chain-ops/deployer/state/state.go index 674e06d743a0..bc4d4c6f50e4 100644 --- a/op-chain-ops/deployer/state/state.go +++ b/op-chain-ops/deployer/state/state.go @@ -61,7 +61,7 @@ type SuperchainDeployment struct { SuperchainConfigImplAddress common.Address `json:"superchainConfigImplAddress"` ProtocolVersionsProxyAddress common.Address `json:"protocolVersionsProxyAddress"` ProtocolVersionsImplAddress common.Address `json:"protocolVersionsImplAddress"` - StateDump *foundry.ForgeAllocs `json:"stateDump"` + StateDump *foundry.ForgeAllocs `json:"-"` } type ImplementationsDeployment struct { @@ -76,7 +76,7 @@ type ImplementationsDeployment struct { L1StandardBridgeImplAddress common.Address `json:"l1StandardBridgeImplAddress"` OptimismMintableERC20FactoryImplAddress common.Address `json:"optimismMintableERC20FactoryImplAddress"` DisputeGameFactoryImplAddress common.Address `json:"disputeGameFactoryImplAddress"` - StateDump *foundry.ForgeAllocs `json:"stateDump"` + StateDump *foundry.ForgeAllocs `json:"-"` } type ChainState struct { diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 6701e9c940c2..7550b599aa94 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -169,7 +169,7 @@ func DeploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup Release: superCfg.Implementations.Release, SuperchainConfigProxy: superDeployment.SuperchainConfigProxy, ProtocolVersionsProxy: superDeployment.ProtocolVersionsProxy, - SuperchainProxyAdmin: superDeployment.SuperchainProxyAdmin, + OpcmProxyOwner: superDeployment.SuperchainProxyAdmin, UseInterop: superCfg.Implementations.UseInterop, StandardVersionsToml: opcm.StandardVersionsMainnetData, }) diff --git a/op-chain-ops/script/script.go b/op-chain-ops/script/script.go index d156adb67187..8402418ba788 100644 --- a/op-chain-ops/script/script.go +++ b/op-chain-ops/script/script.go @@ -391,12 +391,22 @@ func (h *Host) GetNonce(addr common.Address) uint64 { // when importing. func (h *Host) ImportState(allocs *foundry.ForgeAllocs) { for addr, alloc := range allocs.Accounts { - h.state.SetBalance(addr, uint256.MustFromBig(alloc.Balance), tracing.BalanceChangeUnspecified) - h.state.SetNonce(addr, alloc.Nonce) - h.state.SetCode(addr, alloc.Code) - for key, value := range alloc.Storage { - h.state.SetState(addr, key, value) - } + h.ImportAccount(addr, alloc) + } +} + +func (h *Host) ImportAccount(addr common.Address, account types.Account) { + var balance *uint256.Int + if account.Balance == nil { + balance = uint256.NewInt(0) + } else { + balance = uint256.MustFromBig(account.Balance) + } + h.state.SetBalance(addr, balance, tracing.BalanceChangeUnspecified) + h.state.SetNonce(addr, account.Nonce) + h.state.SetCode(addr, account.Code) + for key, value := range account.Storage { + h.state.SetState(addr, key, value) } } diff --git a/op-challenger/README.md b/op-challenger/README.md index 1c652008ea11..69420e419cfa 100644 --- a/op-challenger/README.md +++ b/op-challenger/README.md @@ -43,7 +43,7 @@ DISPUTE_GAME_FACTORY=$(jq -r .DisputeGameFactoryProxy .devnet/addresses.json) --cannon-l2-genesis .devnet/genesis-l2.json \ --cannon-bin ./cannon/bin/cannon \ --cannon-server ./op-program/bin/op-program \ - --cannon-prestate ./op-program/bin/prestate.json \ + --cannon-prestate ./op-program/bin/prestate.bin.gz \ --l2-eth-rpc http://localhost:9545 \ --mnemonic "test test test test test test test test test test test junk" \ --hd-path "m/44'/60'/0'/0/8" \ diff --git a/op-conductor/client/mocks/SequencerControl.go b/op-conductor/client/mocks/SequencerControl.go index 7e48f6dbf0df..cd6e5ecbca0c 100644 --- a/op-conductor/client/mocks/SequencerControl.go +++ b/op-conductor/client/mocks/SequencerControl.go @@ -25,6 +25,62 @@ func (_m *SequencerControl) EXPECT() *SequencerControl_Expecter { return &SequencerControl_Expecter{mock: &_m.Mock} } +// ConductorEnabled provides a mock function with given fields: ctx +func (_m *SequencerControl) ConductorEnabled(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ConductorEnabled") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SequencerControl_ConductorEnabled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ConductorEnabled' +type SequencerControl_ConductorEnabled_Call struct { + *mock.Call +} + +// ConductorEnabled is a helper method to define mock.On call +// - ctx context.Context +func (_e *SequencerControl_Expecter) ConductorEnabled(ctx interface{}) *SequencerControl_ConductorEnabled_Call { + return &SequencerControl_ConductorEnabled_Call{Call: _e.mock.On("ConductorEnabled", ctx)} +} + +func (_c *SequencerControl_ConductorEnabled_Call) Run(run func(ctx context.Context)) *SequencerControl_ConductorEnabled_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *SequencerControl_ConductorEnabled_Call) Return(_a0 bool, _a1 error) *SequencerControl_ConductorEnabled_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SequencerControl_ConductorEnabled_Call) RunAndReturn(run func(context.Context) (bool, error)) *SequencerControl_ConductorEnabled_Call { + _c.Call.Return(run) + return _c +} + // LatestUnsafeBlock provides a mock function with given fields: ctx func (_m *SequencerControl) LatestUnsafeBlock(ctx context.Context) (eth.BlockInfo, error) { ret := _m.Called(ctx) diff --git a/op-conductor/client/sequencer.go b/op-conductor/client/sequencer.go index 1099c84dbea0..0c2ae4c93ab0 100644 --- a/op-conductor/client/sequencer.go +++ b/op-conductor/client/sequencer.go @@ -18,6 +18,7 @@ type SequencerControl interface { SequencerActive(ctx context.Context) (bool, error) LatestUnsafeBlock(ctx context.Context) (eth.BlockInfo, error) PostUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error + ConductorEnabled(ctx context.Context) (bool, error) } // NewSequencerControl creates a new SequencerControl instance. @@ -59,3 +60,8 @@ func (s *sequencerController) SequencerActive(ctx context.Context) (bool, error) func (s *sequencerController) PostUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error { return s.node.PostUnsafePayload(ctx, payload) } + +// ConductorEnabled implements SequencerControl. +func (s *sequencerController) ConductorEnabled(ctx context.Context) (bool, error) { + return s.node.ConductorEnabled(ctx) +} diff --git a/op-conductor/conductor/service.go b/op-conductor/conductor/service.go index d2eb4fe89d9d..f93314f5f70b 100644 --- a/op-conductor/conductor/service.go +++ b/op-conductor/conductor/service.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/httputil" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/retry" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" "github.com/ethereum-optimism/optimism/op-service/sources" ) @@ -140,6 +141,25 @@ func (c *OpConductor) initSequencerControl(ctx context.Context) error { node := sources.NewRollupClient(nc) c.ctrl = client.NewSequencerControl(exec, node) + enabled, err := retry.Do(ctx, 60, retry.Fixed(5*time.Second), func() (bool, error) { + enabled, err := c.ctrl.ConductorEnabled(ctx) + if rpcErr, ok := err.(rpc.Error); ok { + errCode := rpcErr.ErrorCode() + errText := strings.ToLower(err.Error()) + if errCode == -32601 || strings.Contains(errText, "method not found") { // method not found error + c.log.Warn("Warning: conductorEnabled method not found, please upgrade your op-node to the latest version, continuing...") + return true, nil + } + } + return enabled, err + }) + if err != nil { + return errors.Wrap(err, "failed to connect to sequencer") + } + if !enabled { + return errors.New("conductor is not enabled on sequencer, exiting...") + } + return c.updateSequencerActiveStatus() } diff --git a/op-e2e/actions/helpers/l2_verifier.go b/op-e2e/actions/helpers/l2_verifier.go index 1594e1eb368c..6f9d80169875 100644 --- a/op-e2e/actions/helpers/l2_verifier.go +++ b/op-e2e/actions/helpers/l2_verifier.go @@ -241,6 +241,10 @@ func (s *l2VerifierBackend) OnUnsafeL2Payload(ctx context.Context, envelope *eth return nil } +func (s *l2VerifierBackend) ConductorEnabled(ctx context.Context) (bool, error) { + return false, nil +} + func (s *L2Verifier) DerivationMetricsTracer() *testutils.TestDerivationMetrics { return s.derivationMetrics } diff --git a/op-e2e/actions/interop/interop_test.go b/op-e2e/actions/interop/interop_test.go index 4015ffa29ce9..8badc474e944 100644 --- a/op-e2e/actions/interop/interop_test.go +++ b/op-e2e/actions/interop/interop_test.go @@ -42,7 +42,7 @@ func TestInteropVerifier(gt *testing.T) { ver.ActL2PipelineFull(t) l2ChainID := types.ChainIDFromBig(sd.RollupCfg.L2ChainID) - seqMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil) + seqMockBackend.ExpectCheckBlock(l2ChainID, 1, types.LocalUnsafe, nil) // create an unsafe L2 block seq.ActL2StartBlock(t) seq.ActL2EndBlock(t) @@ -99,8 +99,8 @@ func TestInteropVerifier(gt *testing.T) { require.Equal(t, uint64(0), status.FinalizedL2.Number) // The verifier might not see the L2 block that was just derived from L1 as cross-verified yet. - verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil) // for the local unsafe check - verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil) // for the local safe check + verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.LocalUnsafe, nil) // for the local unsafe check + verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.LocalUnsafe, nil) // for the local safe check ver.ActL1HeadSignal(t) ver.ActL2PipelineFull(t) verMockBackend.AssertExpectations(t) diff --git a/op-e2e/e2eutils/challenger/helper.go b/op-e2e/e2eutils/challenger/helper.go index 177fd90a9bf0..d8fb223507ac 100644 --- a/op-e2e/e2eutils/challenger/helper.go +++ b/op-e2e/e2eutils/challenger/helper.go @@ -124,7 +124,7 @@ func applyCannonConfig(c *config.Config, t *testing.T, rollupCfg *rollup.Config, t.Log("Using MT-Cannon absolute prestate") c.CannonAbsolutePreState = root + "op-program/bin/prestate-mt.bin.gz" } else { - c.CannonAbsolutePreState = root + "op-program/bin/prestate.json" + c.CannonAbsolutePreState = root + "op-program/bin/prestate.bin.gz" } c.Cannon.SnapshotFreq = 10_000_000 diff --git a/op-e2e/interop/interop_test.go b/op-e2e/interop/interop_test.go index 65265c22e7c2..0d593673ecce 100644 --- a/op-e2e/interop/interop_test.go +++ b/op-e2e/interop/interop_test.go @@ -95,6 +95,6 @@ func TestInteropTrivial(t *testing.T) { fmt.Println("Result of emitting event:", rec) - time.Sleep(10 * time.Second) + time.Sleep(60 * time.Second) } diff --git a/op-e2e/interop/supersystem.go b/op-e2e/interop/supersystem.go index ffa91bef97f3..3630b87dc896 100644 --- a/op-e2e/interop/supersystem.go +++ b/op-e2e/interop/supersystem.go @@ -471,7 +471,7 @@ func (s *interopE2ESystem) SupervisorClient() *sources.SupervisorClient { // their creation can't be safely skipped or reordered at this time func (s *interopE2ESystem) prepare(t *testing.T, w worldResourcePaths) { s.t = t - s.logger = testlog.Logger(s.t, log.LevelInfo) + s.logger = testlog.Logger(s.t, log.LevelDebug) s.hdWallet = s.prepareHDWallet() s.worldDeployment, s.worldOutput = s.prepareWorld(w) diff --git a/op-e2e/system/gastoken/gastoken_test.go b/op-e2e/system/gastoken/gastoken_test.go index 445f672743cf..839f33634046 100644 --- a/op-e2e/system/gastoken/gastoken_test.go +++ b/op-e2e/system/gastoken/gastoken_test.go @@ -2,7 +2,6 @@ package gastoken import ( "context" - "fmt" "math/big" "testing" "time" @@ -440,34 +439,6 @@ func TestCustomGasToken(t *testing.T) { checkFeeWithdrawal(t, enabled) } -// callViaSafe will use the Safe smart account at safeAddress to send a transaction to target using the provided data. The transaction signature is constructed from -// the supplied opts. -func callViaSafe(opts *bind.TransactOpts, client *ethclient.Client, safeAddress common.Address, target common.Address, data []byte) (*types.Transaction, error) { - signature := [65]byte{} - copy(signature[12:], opts.From[:]) - signature[64] = uint8(1) - - safe, err := bindings.NewSafe(safeAddress, client) - if err != nil { - return nil, err - } - - owners, err := safe.GetOwners(&bind.CallOpts{}) - if err != nil { - return nil, err - } - - isOwner, err := safe.IsOwner(&bind.CallOpts{}, opts.From) - if err != nil { - return nil, err - } - if !isOwner { - return nil, fmt.Errorf("address %s is not in owners list %s", opts.From, owners) - } - - return safe.ExecTransaction(opts, target, big.NewInt(0), data, 0, big.NewInt(0), big.NewInt(0), big.NewInt(0), common.Address{}, common.Address{}, signature[:]) -} - // setCustomGasToeken enables the Custom Gas Token feature on a chain where it wasn't enabled at genesis. // It reads existing parameters from the SystemConfig contract, inserts the supplied cgtAddress and reinitializes that contract. // To do this it uses the ProxyAdmin and StorageSetter from the supplied cfg. @@ -518,27 +489,18 @@ func setCustomGasToken(t *testing.T, cfg e2esys.SystemConfig, sys *e2esys.System proxyAdmin, err := bindings.NewProxyAdmin(cfg.L1Deployments.ProxyAdmin, l1Client) require.NoError(t, err) - // Compute Proxy Admin Owner (this is a SAFE with 1 owner) - proxyAdminOwner, err := proxyAdmin.Owner(&bind.CallOpts{}) - require.NoError(t, err) - // Deploy a new StorageSetter contract storageSetterAddr, tx, _, err := bindings.DeployStorageSetter(deployerOpts, l1Client) waitForTx(t, tx, err, l1Client) - // Set up a signer which controls the Proxy Admin Owner SAFE - safeOwnerOpts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Deployer, cfg.L1ChainIDBig()) - require.NoError(t, err) - - // Encode calldata for upgrading SystemConfigProxy to the StorageSetter implementation - proxyAdminABI, err := bindings.ProxyAdminMetaData.GetAbi() - require.NoError(t, err) - encodedUpgradeCall, err := proxyAdminABI.Pack("upgrade", - cfg.L1Deployments.SystemConfigProxy, storageSetterAddr) + // Set up a signer which controls the Proxy Admin. + // The deploy config's finalSystemOwner is the owner of the ProxyAdmin as well as the SystemConfig, + // so we can use that address for the proxy admin owner. + proxyAdminOwnerOpts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig()) require.NoError(t, err) - // Execute the upgrade SystemConfigProxy -> StorageSetter - tx, err = callViaSafe(safeOwnerOpts, l1Client, proxyAdminOwner, cfg.L1Deployments.ProxyAdmin, encodedUpgradeCall) + // Execute the upgrade SystemConfigProxy -> StorageSetter via ProxyAdmin + tx, err = proxyAdmin.Upgrade(proxyAdminOwnerOpts, cfg.L1Deployments.SystemConfigProxy, storageSetterAddr) waitForTx(t, tx, err, l1Client) // Bind a StorageSetter to the SystemConfigProxy address @@ -554,13 +516,8 @@ func setCustomGasToken(t *testing.T, cfg e2esys.SystemConfig, sys *e2esys.System require.NoError(t, err) require.Equal(t, currentSlotValue, [32]byte{0}) - // Prepare calldata for SystemConfigProxy -> SystemConfig upgrade - encodedUpgradeCall, err = proxyAdminABI.Pack("upgrade", - cfg.L1Deployments.SystemConfigProxy, cfg.L1Deployments.SystemConfig) - require.NoError(t, err) - // Execute SystemConfigProxy -> SystemConfig upgrade - tx, err = callViaSafe(safeOwnerOpts, l1Client, proxyAdminOwner, cfg.L1Deployments.ProxyAdmin, encodedUpgradeCall) + tx, err = proxyAdmin.Upgrade(proxyAdminOwnerOpts, cfg.L1Deployments.SystemConfigProxy, cfg.L1Deployments.SystemConfig) waitForTx(t, tx, err, l1Client) // Reinitialise with existing initializer values but with custom gas token set diff --git a/op-node/node/api.go b/op-node/node/api.go index a94e2477fe16..ccd4a3b81bb3 100644 --- a/op-node/node/api.go +++ b/op-node/node/api.go @@ -34,6 +34,7 @@ type driverClient interface { SequencerActive(context.Context) (bool, error) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error OverrideLeader(ctx context.Context) error + ConductorEnabled(ctx context.Context) (bool, error) } type SafeDBReader interface { @@ -98,6 +99,13 @@ func (n *adminAPI) OverrideLeader(ctx context.Context) error { return n.dr.OverrideLeader(ctx) } +// ConductorEnabled returns true if the sequencer conductor is enabled. +func (n *adminAPI) ConductorEnabled(ctx context.Context) (bool, error) { + recordDur := n.M.RecordRPCServerRequest("admin_conductorEnabled") + defer recordDur() + return n.dr.ConductorEnabled(ctx) +} + type nodeAPI struct { config *rollup.Config client l2EthClient diff --git a/op-node/node/conductor.go b/op-node/node/conductor.go index 20e0638dc686..ff5723889b95 100644 --- a/op-node/node/conductor.go +++ b/op-node/node/conductor.go @@ -32,7 +32,7 @@ type ConductorClient struct { var _ conductor.SequencerConductor = &ConductorClient{} // NewConductorClient returns a new conductor client for the op-conductor RPC service. -func NewConductorClient(cfg *Config, log log.Logger, metrics *metrics.Metrics) *ConductorClient { +func NewConductorClient(cfg *Config, log log.Logger, metrics *metrics.Metrics) conductor.SequencerConductor { return &ConductorClient{ cfg: cfg, metrics: metrics, @@ -53,6 +53,11 @@ func (c *ConductorClient) initialize() error { return nil } +// Enabled returns true if the conductor is enabled, and since the conductor client is initialized, the conductor is always enabled. +func (c *ConductorClient) Enabled(ctx context.Context) bool { + return true +} + // Leader returns true if this node is the leader sequencer. func (c *ConductorClient) Leader(ctx context.Context) (bool, error) { if c.overrideLeader.Load() { @@ -86,12 +91,11 @@ func (c *ConductorClient) CommitUnsafePayload(ctx context.Context, payload *eth. ctx, cancel := context.WithTimeout(ctx, c.cfg.ConductorRpcTimeout) defer cancel() - // extra bool return value is required for the generic, can be ignored. - _, err := retry.Do(ctx, 2, retry.Fixed(50*time.Millisecond), func() (bool, error) { + err := retry.Do0(ctx, 2, retry.Fixed(50*time.Millisecond), func() error { record := c.metrics.RecordRPCClientRequest("conductor_commitUnsafePayload") err := c.apiClient.CommitUnsafePayload(ctx, payload) record(err) - return true, err + return err }) return err } diff --git a/op-node/node/node.go b/op-node/node/node.go index 9d9f6a4343ac..298c98aa2b18 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -262,12 +262,12 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error { } // initialize the runtime config before unblocking - if _, err := retry.Do(ctx, 5, retry.Fixed(time.Second*10), func() (eth.L1BlockRef, error) { - ref, err := reload(ctx) + if err := retry.Do0(ctx, 5, retry.Fixed(time.Second*10), func() error { + _, err := reload(ctx) if errors.Is(err, errNodeHalt) { // don't retry on halt error err = nil } - return ref, err + return err }); err != nil { return fmt.Errorf("failed to load runtime configuration repeatedly, last error: %w", err) } diff --git a/op-node/node/server_test.go b/op-node/node/server_test.go index 7063b3ed2807..f8722e272318 100644 --- a/op-node/node/server_test.go +++ b/op-node/node/server_test.go @@ -287,6 +287,10 @@ func (c *mockDriverClient) OverrideLeader(ctx context.Context) error { return c.Mock.MethodCalled("OverrideLeader").Get(0).(error) } +func (c *mockDriverClient) ConductorEnabled(ctx context.Context) (bool, error) { + return c.Mock.MethodCalled("ConductorEnabled").Get(0).(bool), nil +} + type mockSafeDBReader struct { mock.Mock } diff --git a/op-node/rollup/conductor/conductor.go b/op-node/rollup/conductor/conductor.go index 927d88035ccb..b668d5fb055f 100644 --- a/op-node/rollup/conductor/conductor.go +++ b/op-node/rollup/conductor/conductor.go @@ -9,6 +9,8 @@ import ( // SequencerConductor is an interface for the driver to communicate with the sequencer conductor. // It is used to determine if the current node is the active sequencer, and to commit unsafe payloads to the conductor log. type SequencerConductor interface { + // Enabled returns true if the conductor is enabled. + Enabled(ctx context.Context) bool // Leader returns true if this node is the leader sequencer. Leader(ctx context.Context) (bool, error) // CommitUnsafePayload commits an unsafe payload to the conductor FSM. @@ -24,6 +26,11 @@ type NoOpConductor struct{} var _ SequencerConductor = &NoOpConductor{} +// Enabled implements SequencerConductor. +func (c *NoOpConductor) Enabled(ctx context.Context) bool { + return false +} + // Leader returns true if this node is the leader sequencer. NoOpConductor always returns true. func (c *NoOpConductor) Leader(ctx context.Context) (bool, error) { return true, nil diff --git a/op-node/rollup/derive/frame_queue.go b/op-node/rollup/derive/frame_queue.go index d57495a80558..77a2703290ce 100644 --- a/op-node/rollup/derive/frame_queue.go +++ b/op-node/rollup/derive/frame_queue.go @@ -6,11 +6,13 @@ import ( "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" ) var _ NextFrameProvider = &FrameQueue{} +//go:generate mockery --name NextDataProvider --case snake type NextDataProvider interface { NextData(context.Context) ([]byte, error) Origin() eth.L1BlockRef @@ -20,12 +22,14 @@ type FrameQueue struct { log log.Logger frames []Frame prev NextDataProvider + cfg *rollup.Config } -func NewFrameQueue(log log.Logger, prev NextDataProvider) *FrameQueue { +func NewFrameQueue(log log.Logger, cfg *rollup.Config, prev NextDataProvider) *FrameQueue { return &FrameQueue{ log: log, prev: prev, + cfg: cfg, } } @@ -34,18 +38,15 @@ func (fq *FrameQueue) Origin() eth.L1BlockRef { } func (fq *FrameQueue) NextFrame(ctx context.Context) (Frame, error) { - // Find more frames if we need to + // TODO(12157): reset frame queue once at Holocene L1 origin block + + // Only load more frames if necessary if len(fq.frames) == 0 { - if data, err := fq.prev.NextData(ctx); err != nil { + if err := fq.loadNextFrames(ctx); err != nil { return Frame{}, err - } else { - if new, err := ParseFrames(data); err == nil { - fq.frames = append(fq.frames, new...) - } else { - fq.log.Warn("Failed to parse frames", "origin", fq.prev.Origin(), "err", err) - } } } + // If we did not add more frames but still have more data, retry this function. if len(fq.frames) == 0 { return Frame{}, NotEnoughData @@ -56,6 +57,78 @@ func (fq *FrameQueue) NextFrame(ctx context.Context) (Frame, error) { return ret, nil } +func (fq *FrameQueue) loadNextFrames(ctx context.Context) error { + data, err := fq.prev.NextData(ctx) + if err != nil { + return err + } + + if frames, err := ParseFrames(data); err == nil { + fq.frames = append(fq.frames, frames...) + } else { + fq.log.Warn("Failed to parse frames", "origin", fq.prev.Origin(), "err", err) + return nil + } + + // Note: this implementation first parses all frames from the next L1 transaction and only then + // prunes all frames that were parsed. An even more memory-efficient implementation could prune + // the frame queue each time after pulling out only a single frame. + + if fq.cfg.IsHolocene(fq.Origin().Time) { + // We only need to prune the queue after adding more frames to it. + // Moving frames out of the queue to the next stage cannot invalidate any frames in + // the queue. + fq.prune() + } + + return nil +} + +func (fq *FrameQueue) prune() { + fq.frames = pruneFrameQueue(fq.frames) +} + +// pruneFrameQueue prunes the frame queue to only hold contiguous and ordered +// frames, conforming to Holocene frame queue rules. +func pruneFrameQueue(frames []Frame) []Frame { + for i := 0; i < len(frames)-1; { + current, next := frames[i], frames[i+1] + discard := func(d int) { + frames = append(frames[0:i+d], frames[i+1+d:]...) + } + // frames for the same channel ID must arrive in order + if current.ID == next.ID { + if current.IsLast { + discard(1) // discard next + continue + } + if next.FrameNumber != current.FrameNumber+1 { + discard(1) // discard next + continue + } + } else { + // first frames discard previously unclosed channels + if next.FrameNumber == 0 && !current.IsLast { + discard(0) // discard current + // make sure we backwards invalidate more frames of unclosed channel + if i > 0 { + i-- + } + continue + } + // non-first frames of new channels are dropped + if next.FrameNumber != 0 { + discard(1) // discard next + continue + } + } + // We only update the cursor if we didn't remove any frame, so if any frame got removed, the + // checks are applied to the new pair in the queue at the same position. + i++ + } + return frames +} + func (fq *FrameQueue) Reset(_ context.Context, _ eth.L1BlockRef, _ eth.SystemConfig) error { fq.frames = fq.frames[:0] return io.EOF diff --git a/op-node/rollup/derive/frame_queue_test.go b/op-node/rollup/derive/frame_queue_test.go new file mode 100644 index 000000000000..a0a57f4f387d --- /dev/null +++ b/op-node/rollup/derive/frame_queue_test.go @@ -0,0 +1,159 @@ +package derive + +import ( + "bytes" + "context" + "io" + "log/slog" + "testing" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/mocks" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestPruneFrameQueue(t *testing.T) { + for _, tt := range []struct { + desc string + frames []testFrame + expected []testFrame + }{ + { + desc: "empty", + frames: []testFrame{}, + expected: []testFrame{}, + }, + { + desc: "one", + frames: []testFrame{"a:2:"}, + expected: []testFrame{"a:2:"}, + }, + { + desc: "one-last", + frames: []testFrame{"a:2:!"}, + expected: []testFrame{"a:2:!"}, + }, + { + desc: "last-new", + frames: []testFrame{"a:2:!", "b:0:"}, + expected: []testFrame{"a:2:!", "b:0:"}, + }, + { + desc: "last-ooo", + frames: []testFrame{"a:2:!", "b:1:"}, + expected: []testFrame{"a:2:!"}, + }, + { + desc: "middle-lastooo", + frames: []testFrame{"b:1:", "a:2:!"}, + expected: []testFrame{"b:1:"}, + }, + { + desc: "middle-first", + frames: []testFrame{"b:1:", "a:0:"}, + expected: []testFrame{"a:0:"}, + }, + { + desc: "last-first", + frames: []testFrame{"b:1:!", "a:0:"}, + expected: []testFrame{"b:1:!", "a:0:"}, + }, + { + desc: "last-ooo", + frames: []testFrame{"b:1:!", "b:2:"}, + expected: []testFrame{"b:1:!"}, + }, + { + desc: "ooo", + frames: []testFrame{"b:1:", "b:3:"}, + expected: []testFrame{"b:1:"}, + }, + { + desc: "other-ooo", + frames: []testFrame{"b:1:", "c:3:"}, + expected: []testFrame{"b:1:"}, + }, + { + desc: "other-ooo-last", + frames: []testFrame{"b:1:", "c:3:", "b:2:!"}, + expected: []testFrame{"b:1:", "b:2:!"}, + }, + { + desc: "ooo-resubmit", + frames: []testFrame{"b:1:", "b:3:!", "b:2:", "b:3:!"}, + expected: []testFrame{"b:1:", "b:2:", "b:3:!"}, + }, + { + desc: "first-discards-multiple", + frames: []testFrame{"c:0:", "c:1:", "c:2:", "d:0:", "c:3:!"}, + expected: []testFrame{"d:0:"}, + }, + { + desc: "complex", + frames: []testFrame{"b:1:", "b:2:!", "a:0:", "c:1:!", "a:1:", "a:2:!", "c:0:", "c:1:", "d:0:", "c:2:!", "e:0:"}, + expected: []testFrame{"b:1:", "b:2:!", "a:0:", "a:1:", "a:2:!", "e:0:"}, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + pfs := pruneFrameQueue(testFramesToFrames(tt.frames...)) + require.Equal(t, testFramesToFrames(tt.expected...), pfs) + }) + } +} + +func TestFrameQueue_NextFrame(t *testing.T) { + t.Run("pre-holocene", func(t *testing.T) { testFrameQueue_NextFrame(t, false) }) + t.Run("holocene", func(t *testing.T) { testFrameQueue_NextFrame(t, true) }) +} + +func testFrameQueue_NextFrame(t *testing.T, holocene bool) { + lgr := testlog.Logger(t, slog.LevelWarn) + cfg := &rollup.Config{} + dp := mocks.NewNextDataProvider(t) + fq := NewFrameQueue(lgr, cfg, dp) + + inFrames := testFramesToFrames("b:1:", "b:2:!", "a:0:", "c:1:!", "a:1:", "a:2:!", "c:0:", "c:1:", "d:0:", "c:2:!", "e:0:") + var expFrames []Frame + if holocene { + cfg.HoloceneTime = ptr(uint64(0)) + // expect pruned frames with Holocene + expFrames = testFramesToFrames("b:1:", "b:2:!", "a:0:", "a:1:", "a:2:!", "e:0:") + } else { + expFrames = inFrames + } + + var inBuf bytes.Buffer + inBuf.WriteByte(DerivationVersion0) + for _, f := range inFrames { + require.NoError(t, f.MarshalBinary(&inBuf)) + } + + dp.On("Origin").Return(eth.L1BlockRef{}) + dp.On("NextData", mock.Anything).Return(inBuf.Bytes(), nil).Once() + dp.On("NextData", mock.Anything).Return(nil, io.EOF) + + gotFrames := make([]Frame, 0, len(expFrames)) + for i := 0; i <= len(inFrames); i++ { // make sure we hit EOF case + frame, err := fq.NextFrame(context.Background()) + if err != nil { + require.ErrorIs(t, err, io.EOF) + break + } + require.NoError(t, err) + gotFrames = append(gotFrames, frame) + } + require.Equal(t, expFrames, gotFrames) +} + +func ptr[T any](t T) *T { return &t } + +func testFramesToFrames(tfs ...testFrame) []Frame { + fs := make([]Frame, 0, len(tfs)) + for _, f := range tfs { + fs = append(fs, f.ToFrame()) + } + return fs +} diff --git a/op-node/rollup/derive/frame_test.go b/op-node/rollup/derive/frame_test.go index 46006398c707..240cc0a58d8d 100644 --- a/op-node/rollup/derive/frame_test.go +++ b/op-node/rollup/derive/frame_test.go @@ -163,6 +163,12 @@ func TestParseFramesInvalidVer(t *testing.T) { require.Error(t, err) } +func TestParseFramesOnlyVersion(t *testing.T) { + frames, err := ParseFrames([]byte{DerivationVersion0}) + require.Empty(t, frames) + require.Error(t, err) +} + func TestParseFrames(t *testing.T) { rng := rand.New(rand.NewSource(time.Now().UnixNano())) numFrames := rng.Intn(16) + 1 diff --git a/op-node/rollup/derive/mocks/next_data_provider.go b/op-node/rollup/derive/mocks/next_data_provider.go new file mode 100644 index 000000000000..e7a14d92eff7 --- /dev/null +++ b/op-node/rollup/derive/mocks/next_data_provider.go @@ -0,0 +1,78 @@ +// Code generated by mockery v2.46.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + eth "github.com/ethereum-optimism/optimism/op-service/eth" + + mock "github.com/stretchr/testify/mock" +) + +// NextDataProvider is an autogenerated mock type for the NextDataProvider type +type NextDataProvider struct { + mock.Mock +} + +// NextData provides a mock function with given fields: _a0 +func (_m *NextDataProvider) NextData(_a0 context.Context) ([]byte, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for NextData") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]byte, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) []byte); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Origin provides a mock function with given fields: +func (_m *NextDataProvider) Origin() eth.L1BlockRef { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Origin") + } + + var r0 eth.L1BlockRef + if rf, ok := ret.Get(0).(func() eth.L1BlockRef); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(eth.L1BlockRef) + } + + return r0 +} + +// NewNextDataProvider creates a new instance of NextDataProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNextDataProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *NextDataProvider { + mock := &NextDataProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/op-node/rollup/derive/pipeline.go b/op-node/rollup/derive/pipeline.go index a06640086fde..f114e2a4b0d3 100644 --- a/op-node/rollup/derive/pipeline.go +++ b/op-node/rollup/derive/pipeline.go @@ -77,13 +77,13 @@ type DerivationPipeline struct { // NewDerivationPipeline creates a DerivationPipeline, to turn L1 data into L2 block-inputs. func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher, - altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics) *DerivationPipeline { - + altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics, +) *DerivationPipeline { // Pull stages l1Traversal := NewL1Traversal(log, rollupCfg, l1Fetcher) dataSrc := NewDataSourceFactory(log, rollupCfg, l1Fetcher, l1Blobs, altDA) // auxiliary stage for L1Retrieval l1Src := NewL1Retrieval(log, dataSrc, l1Traversal) - frameQueue := NewFrameQueue(log, l1Src) + frameQueue := NewFrameQueue(log, rollupCfg, l1Src) bank := NewChannelBank(log, rollupCfg, frameQueue, metrics) chInReader := NewChannelInReader(rollupCfg, log, bank, metrics) batchQueue := NewBatchQueue(log, rollupCfg, chInReader, l2Source) diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index 2840cedcf423..09f05f67e3b3 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -483,6 +483,10 @@ func (s *Driver) OverrideLeader(ctx context.Context) error { return s.sequencer.OverrideLeader(ctx) } +func (s *Driver) ConductorEnabled(ctx context.Context) (bool, error) { + return s.sequencer.ConductorEnabled(ctx), nil +} + // SyncStatus blocks the driver event loop and captures the syncing status. func (s *Driver) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { return s.statusTracker.SyncStatus(), nil diff --git a/op-node/rollup/interop/interop.go b/op-node/rollup/interop/interop.go index c6c170478f21..152020f09c70 100644 --- a/op-node/rollup/interop/interop.go +++ b/op-node/rollup/interop/interop.go @@ -101,12 +101,13 @@ func (d *InteropDeriver) OnEvent(ev event.Event) bool { break } switch blockSafety { - case types.CrossUnsafe, types.CrossSafe, types.CrossFinalized: + case types.CrossUnsafe, types.CrossSafe, types.Finalized: // Hold off on promoting higher than cross-unsafe, // this will happen once we verify it to be local-safe first. d.emitter.Emit(engine.PromoteCrossUnsafeEvent{Ref: candidate}) } case engine.LocalSafeUpdateEvent: + d.log.Debug("Local safe update event", "block", x.Ref.Hash, "derivedFrom", x.DerivedFrom) d.derivedFrom[x.Ref.Hash] = x.DerivedFrom d.emitter.Emit(engine.RequestCrossSafeEvent{}) case engine.CrossSafeUpdateEvent: @@ -132,10 +133,12 @@ func (d *InteropDeriver) OnEvent(ev event.Event) bool { } derivedFrom, ok := d.derivedFrom[candidate.Hash] if !ok { + d.log.Warn("Unknown block candidate source, cannot promote block safety", "block", candidate, "safety", blockSafety) break } switch blockSafety { case types.CrossSafe: + d.log.Info("Verified cross-safe block", "block", candidate, "derivedFrom", derivedFrom) // TODO(#11673): once we have interop reorg support, we need to clean stale blocks also. delete(d.derivedFrom, candidate.Hash) d.emitter.Emit(engine.PromoteSafeEvent{ diff --git a/op-node/rollup/interop/interop_test.go b/op-node/rollup/interop/interop_test.go index 62b71140770e..a7aaedcae7a1 100644 --- a/op-node/rollup/interop/interop_test.go +++ b/op-node/rollup/interop/interop_test.go @@ -61,7 +61,7 @@ func TestInteropDeriver(t *testing.T) { firstLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, crossUnsafe, crossUnsafe.L1Origin) lastLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, firstLocalUnsafe, firstLocalUnsafe.L1Origin) interopBackend.ExpectCheckBlock( - chainID, firstLocalUnsafe.Number, supervisortypes.Unsafe, nil) + chainID, firstLocalUnsafe.Number, supervisortypes.LocalUnsafe, nil) l2Source.ExpectL2BlockRefByNumber(firstLocalUnsafe.Number, firstLocalUnsafe, nil) interopDeriver.OnEvent(engine.CrossUnsafeUpdateEvent{ CrossUnsafe: crossUnsafe, @@ -122,7 +122,7 @@ func TestInteropDeriver(t *testing.T) { DerivedFrom: derivedFrom, }) interopBackend.ExpectCheckBlock( - chainID, firstLocalSafe.Number, supervisortypes.Safe, nil) + chainID, firstLocalSafe.Number, supervisortypes.LocalSafe, nil) l2Source.ExpectL2BlockRefByNumber(firstLocalSafe.Number, firstLocalSafe, nil) interopDeriver.OnEvent(engine.CrossSafeUpdateEvent{ CrossSafe: crossSafe, diff --git a/op-node/rollup/sequencing/disabled.go b/op-node/rollup/sequencing/disabled.go index 3634284ccd2f..64d452828104 100644 --- a/op-node/rollup/sequencing/disabled.go +++ b/op-node/rollup/sequencing/disabled.go @@ -48,4 +48,8 @@ func (ds DisabledSequencer) OverrideLeader(ctx context.Context) error { return ErrSequencerNotEnabled } +func (ds DisabledSequencer) ConductorEnabled(ctx context.Context) bool { + return false +} + func (ds DisabledSequencer) Close() {} diff --git a/op-node/rollup/sequencing/iface.go b/op-node/rollup/sequencing/iface.go index 54e0c70719e0..c2e6fa7ab200 100644 --- a/op-node/rollup/sequencing/iface.go +++ b/op-node/rollup/sequencing/iface.go @@ -19,5 +19,6 @@ type SequencerIface interface { Stop(ctx context.Context) (hash common.Hash, err error) SetMaxSafeLag(ctx context.Context, v uint64) error OverrideLeader(ctx context.Context) error + ConductorEnabled(ctx context.Context) bool Close() } diff --git a/op-node/rollup/sequencing/sequencer.go b/op-node/rollup/sequencing/sequencer.go index e488300b49f8..538caafe4144 100644 --- a/op-node/rollup/sequencing/sequencer.go +++ b/op-node/rollup/sequencing/sequencer.go @@ -617,8 +617,6 @@ func (d *Sequencer) Init(ctx context.Context, active bool) error { d.emitter.Emit(engine.ForkchoiceRequestEvent{}) if active { - // TODO(#11121): should the conductor be checked on startup? - // The conductor was previously not being checked in this case, but that may be a bug. return d.forceStart() } else { if err := d.listener.SequencerStopped(); err != nil { @@ -712,6 +710,10 @@ func (d *Sequencer) OverrideLeader(ctx context.Context) error { return d.conductor.OverrideLeader(ctx) } +func (d *Sequencer) ConductorEnabled(ctx context.Context) bool { + return d.conductor.Enabled(ctx) +} + func (d *Sequencer) Close() { d.conductor.Close() d.asyncGossip.Stop() diff --git a/op-node/rollup/sequencing/sequencer_test.go b/op-node/rollup/sequencing/sequencer_test.go index 7b410e644ad2..3265711a0c46 100644 --- a/op-node/rollup/sequencing/sequencer_test.go +++ b/op-node/rollup/sequencing/sequencer_test.go @@ -105,6 +105,10 @@ type FakeConductor struct { var _ conductor.SequencerConductor = &FakeConductor{} +func (c *FakeConductor) Enabled(ctx context.Context) bool { + return true +} + func (c *FakeConductor) Leader(ctx context.Context) (bool, error) { return c.leader, nil } diff --git a/op-node/rollup/status/status.go b/op-node/rollup/status/status.go index 65121b1294aa..26e9ddbc2197 100644 --- a/op-node/rollup/status/status.go +++ b/op-node/rollup/status/status.go @@ -63,6 +63,7 @@ func (st *StatusTracker) OnEvent(ev event.Event) bool { switch x := ev.(type) { case engine.ForkchoiceUpdateEvent: + st.log.Debug("Forkchoice update", "unsafe", x.UnsafeL2Head, "safe", x.SafeL2Head, "finalized", x.FinalizedL2Head) st.data.UnsafeL2 = x.UnsafeL2Head st.data.SafeL2 = x.SafeL2Head st.data.FinalizedL2 = x.FinalizedL2Head @@ -70,11 +71,14 @@ func (st *StatusTracker) OnEvent(ev event.Event) bool { st.data.UnsafeL2 = x.Unsafe st.data.PendingSafeL2 = x.PendingSafe case engine.CrossUnsafeUpdateEvent: + st.log.Debug("Cross unsafe head updated", "cross_unsafe", x.CrossUnsafe, "local_unsafe", x.LocalUnsafe) st.data.CrossUnsafeL2 = x.CrossUnsafe st.data.UnsafeL2 = x.LocalUnsafe case engine.LocalSafeUpdateEvent: + st.log.Debug("Local safe head updated", "local_safe", x.Ref) st.data.LocalSafeL2 = x.Ref case engine.CrossSafeUpdateEvent: + st.log.Debug("Cross safe head updated", "cross_safe", x.CrossSafe, "local_safe", x.LocalSafe) st.data.SafeL2 = x.CrossSafe st.data.LocalSafeL2 = x.LocalSafe case derive.DeriverL1StatusEvent: diff --git a/op-program/Dockerfile.repro b/op-program/Dockerfile.repro index 57f65bb72b81..80ec7c92c6d9 100644 --- a/op-program/Dockerfile.repro +++ b/op-program/Dockerfile.repro @@ -35,11 +35,11 @@ RUN --mount=type=cache,target=/root/.cache/go-build cd op-program && make op-pro GOOS=linux GOARCH=mips GOMIPS=softfloat GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROGRAM_VERSION" # Run the op-program-client.elf binary directly through cannon's load-elf subcommand. -RUN /app/cannon/bin/cannon load-elf --type singlethreaded --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate.json --meta "" +RUN /app/cannon/bin/cannon load-elf --type singlethreaded-2 --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate.bin.gz --meta "" RUN /app/cannon/bin/cannon load-elf --type multithreaded --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate-mt.bin.gz --meta "" # Generate the prestate proof containing the absolute pre-state hash. -RUN /app/cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input /app/op-program/bin/prestate.json --meta "" --proof-fmt '/app/op-program/bin/%d.json' --output "" +RUN /app/cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input /app/op-program/bin/prestate.bin.gz --meta "" --proof-fmt '/app/op-program/bin/%d.json' --output "" RUN mv /app/op-program/bin/0.json /app/op-program/bin/prestate-proof.json RUN /app/cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input /app/op-program/bin/prestate-mt.bin.gz --meta "" --proof-fmt '/app/op-program/bin/%d-mt.json' --output "" @@ -51,7 +51,7 @@ RUN mv /app/op-program/bin/0-mt.json /app/op-program/bin/prestate-proof-mt.json FROM scratch AS export-stage COPY --from=builder /app/op-program/bin/op-program . COPY --from=builder /app/op-program/bin/op-program-client.elf . -COPY --from=builder /app/op-program/bin/prestate.json . +COPY --from=builder /app/op-program/bin/prestate.bin.gz . COPY --from=builder /app/op-program/bin/prestate-proof.json . COPY --from=builder /app/op-program/bin/prestate-mt.bin.gz . COPY --from=builder /app/op-program/bin/prestate-proof-mt.json . diff --git a/op-program/README.md b/op-program/README.md index 78c121457974..ce1a67b3eeb5 100644 --- a/op-program/README.md +++ b/op-program/README.md @@ -45,7 +45,7 @@ After running `make reproducible-prestate`, the following files can be found in [./bin/](./bin/): - [`op-program`](./bin/op-program) - [`op-program-client.elf`](./bin/op-program-client.elf) -- [`prestate.json`](./bin/prestate.json) +- [`prestate.bin.gz`](./bin/prestate.bin.gz) - [`prestate-proof.json`](./bin/prestate-proof.json) The `prestate-proof.json` file is what contains the absolute pre-state hash under diff --git a/op-program/scripts/build-prestates.sh b/op-program/scripts/build-prestates.sh index 5394c7a135e0..0c0da57dbdd9 100755 --- a/op-program/scripts/build-prestates.sh +++ b/op-program/scripts/build-prestates.sh @@ -30,9 +30,15 @@ do LOG_FILE="${LOGS_DIR}/build-$(echo "${VERSION}" | cut -c 12-).txt" echo "Building Version: ${VERSION} Logs: ${LOG_FILE}" git checkout "${VERSION}" > "${LOG_FILE}" 2>&1 + rm -rf "${BIN_DIR}" make reproducible-prestate >> "${LOG_FILE}" 2>&1 HASH=$(cat "${BIN_DIR}/prestate-proof.json" | jq -r .pre) - cp "${BIN_DIR}/prestate.json" "${STATES_DIR}/${HASH}.json" + if [ -f "${BIN_DIR}/prestate.bin.gz" ] + then + cp "${BIN_DIR}/prestate.bin.gz" "${STATES_DIR}/${HASH}.bin.gz" + else + cp "${BIN_DIR}/prestate.json" "${STATES_DIR}/${HASH}.json" + fi echo "Built ${VERSION}: ${HASH}" done diff --git a/op-program/verify/verify.go b/op-program/verify/verify.go index 43150cfbc2b4..a04a725abe66 100644 --- a/op-program/verify/verify.go +++ b/op-program/verify/verify.go @@ -5,6 +5,7 @@ import ( "fmt" "math/big" "os" + "os/exec" "path/filepath" "strconv" "strings" @@ -26,6 +27,8 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) +const runInProcess = false + type Runner struct { l1RpcUrl string l1RpcKind string @@ -99,7 +102,7 @@ func (r *Runner) RunBetweenBlocks(ctx context.Context, l1Head common.Hash, start return fmt.Errorf("failed to find ending block info: %w", err) } - return r.run(l1Head, agreedBlockInfo, agreedOutputRoot, claimedOutputRoot, claimedBlockInfo) + return r.run(ctx, l1Head, agreedBlockInfo, agreedOutputRoot, claimedOutputRoot, claimedBlockInfo) } func (r *Runner) createL2Client(ctx context.Context) (*sources.L2Client, error) { @@ -157,10 +160,10 @@ func (r *Runner) RunToFinalized(ctx context.Context) error { return fmt.Errorf("failed to find ending block info: %w", err) } - return r.run(l1Head.Hash(), agreedBlockInfo, agreedOutputRoot, claimedOutputRoot, claimedBlockInfo) + return r.run(ctx, l1Head.Hash(), agreedBlockInfo, agreedOutputRoot, claimedOutputRoot, claimedBlockInfo) } -func (r *Runner) run(l1Head common.Hash, agreedBlockInfo eth.BlockInfo, agreedOutputRoot common.Hash, claimedOutputRoot common.Hash, claimedBlockInfo eth.BlockInfo) error { +func (r *Runner) run(ctx context.Context, l1Head common.Hash, agreedBlockInfo eth.BlockInfo, agreedOutputRoot common.Hash, claimedOutputRoot common.Hash, claimedBlockInfo eth.BlockInfo) error { var err error if r.dataDir == "" { r.dataDir, err = os.MkdirTemp("", "oracledata") @@ -199,31 +202,64 @@ func (r *Runner) run(l1Head common.Hash, agreedBlockInfo eth.BlockInfo, agreedOu } fmt.Printf("Configuration: %s\n", argsStr) - offlineCfg := config.NewConfig( - r.rollupCfg, r.chainCfg, l1Head, agreedBlockInfo.Hash(), agreedOutputRoot, claimedOutputRoot, claimedBlockInfo.NumberU64()) - offlineCfg.DataDir = r.dataDir - onlineCfg := *offlineCfg - onlineCfg.L1URL = r.l1RpcUrl - onlineCfg.L1BeaconURL = r.l1BeaconUrl - onlineCfg.L2URL = r.l2RpcUrl - if r.l1RpcKind != "" { - onlineCfg.L1RPCKind = sources.RPCProviderKind(r.l1RpcKind) - } + if runInProcess { + offlineCfg := config.NewConfig( + r.rollupCfg, r.chainCfg, l1Head, agreedBlockInfo.Hash(), agreedOutputRoot, claimedOutputRoot, claimedBlockInfo.NumberU64()) + offlineCfg.DataDir = r.dataDir - fmt.Println("Running in online mode") - err = host.Main(oplog.NewLogger(os.Stderr, r.logCfg), &onlineCfg) - if err != nil { - return fmt.Errorf("online mode failed: %w", err) - } + onlineCfg := *offlineCfg + onlineCfg.L1URL = r.l1RpcUrl + onlineCfg.L1BeaconURL = r.l1BeaconUrl + onlineCfg.L2URL = r.l2RpcUrl + if r.l1RpcKind != "" { + onlineCfg.L1RPCKind = sources.RPCProviderKind(r.l1RpcKind) + } - fmt.Println("Running in offline mode") - err = host.Main(oplog.NewLogger(os.Stderr, r.logCfg), offlineCfg) - if err != nil { - return fmt.Errorf("offline mode failed: %w", err) + fmt.Println("Running in online mode") + err = host.Main(oplog.NewLogger(os.Stderr, r.logCfg), &onlineCfg) + if err != nil { + return fmt.Errorf("online mode failed: %w", err) + } + + fmt.Println("Running in offline mode") + err = host.Main(oplog.NewLogger(os.Stderr, r.logCfg), offlineCfg) + if err != nil { + return fmt.Errorf("offline mode failed: %w", err) + } + } else { + fmt.Println("Running in online mode") + onlineArgs := make([]string, len(args)) + copy(onlineArgs, args) + onlineArgs = append(onlineArgs, + "--l1", r.l1RpcUrl, + "--l1.beacon", r.l1BeaconUrl, + "--l2", r.l2RpcUrl) + if r.l1RpcKind != "" { + onlineArgs = append(onlineArgs, "--l1.rpckind", r.l1RpcKind) + } + err = runFaultProofProgram(ctx, onlineArgs) + if err != nil { + return fmt.Errorf("online mode failed: %w", err) + } + + fmt.Println("Running in offline mode") + err = runFaultProofProgram(ctx, args) + if err != nil { + return fmt.Errorf("offline mode failed: %w", err) + } } return nil } +func runFaultProofProgram(ctx context.Context, args []string) error { + ctx, cancel := context.WithTimeout(ctx, 60*time.Minute) + defer cancel() + cmd := exec.CommandContext(ctx, "./bin/op-program", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + func outputAtBlockNum(ctx context.Context, l2Client *sources.L2Client, blockNum uint64) (eth.BlockInfo, common.Hash, error) { startBlockInfo, err := l2Client.InfoByNumber(ctx, blockNum) if err != nil { diff --git a/op-service/retry/operation.go b/op-service/retry/operation.go index 4f0142cde946..95925296811d 100644 --- a/op-service/retry/operation.go +++ b/op-service/retry/operation.go @@ -40,25 +40,38 @@ func Do2[T, U any](ctx context.Context, maxAttempts int, strategy Strategy, op f // Strategy. func Do[T any](ctx context.Context, maxAttempts int, strategy Strategy, op func() (T, error)) (T, error) { var empty, ret T + f := func() (err error) { + ret, err = op() + return + } + err := Do0(ctx, maxAttempts, strategy, f) + if err != nil { + return empty, err + } + return ret, err +} + +// Do0 is similar to Do and Do2, execept that `op` only returns an error +func Do0(ctx context.Context, maxAttempts int, strategy Strategy, op func() error) error { var err error if maxAttempts < 1 { - return empty, fmt.Errorf("need at least 1 attempt to run op, but have %d max attempts", maxAttempts) + return fmt.Errorf("need at least 1 attempt to run op, but have %d max attempts", maxAttempts) } for i := 0; i < maxAttempts; i++ { if ctx.Err() != nil { - return empty, ctx.Err() + return ctx.Err() } - ret, err = op() + err = op() if err == nil { - return ret, nil + return nil } // Don't sleep when we are about to exit the loop & return ErrFailedPermanently if i != maxAttempts-1 { time.Sleep(strategy.Duration(i)) } } - return empty, &ErrFailedPermanently{ + return &ErrFailedPermanently{ attempts: maxAttempts, LastErr: err, } diff --git a/op-service/sources/rollupclient.go b/op-service/sources/rollupclient.go index acd0f84b3917..8ff6c54e23ef 100644 --- a/op-service/sources/rollupclient.go +++ b/op-service/sources/rollupclient.go @@ -74,6 +74,12 @@ func (r *RollupClient) OverrideLeader(ctx context.Context) error { return r.rpc.CallContext(ctx, nil, "admin_overrideLeader") } +func (r *RollupClient) ConductorEnabled(ctx context.Context) (bool, error) { + var result bool + err := r.rpc.CallContext(ctx, &result, "admin_conductorEnabled") + return result, err +} + func (r *RollupClient) SetLogLevel(ctx context.Context, lvl slog.Level) error { return r.rpc.CallContext(ctx, nil, "admin_setLogLevel", lvl.String()) } diff --git a/op-service/sources/supervisor_client.go b/op-service/sources/supervisor_client.go index db40e55ef472..ff702010daff 100644 --- a/op-service/sources/supervisor_client.go +++ b/op-service/sources/supervisor_client.go @@ -74,7 +74,7 @@ func (cl *SupervisorClient) CheckBlock(ctx context.Context, "supervisor_checkBlock", (*hexutil.U256)(&chainID), blockHash, hexutil.Uint64(blockNumber)) if err != nil { - return types.Unsafe, fmt.Errorf("failed to check Block %s:%d (chain %s): %w", blockHash, blockNumber, chainID, err) + return types.LocalUnsafe, fmt.Errorf("failed to check Block %s:%d (chain %s): %w", blockHash, blockNumber, chainID, err) } return result, nil } diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index e8e8ae1d1883..4e4c3e633f87 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -819,6 +819,12 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa "gasFeeCap", bumpedFee, "gasTipCap", bumpedTip) } + if tx.Gas() > gas { + // Don't bump the gas limit down if the passed-in gas limit is higher than + // what was originally specified. + gas = tx.Gas() + } + var newTx *types.Transaction if tx.Type() == types.BlobTxType { // Blob transactions have an additional blob gas price we must specify, so we must make sure it is diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index f21217e82c42..8216eaa9c0b5 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "path/filepath" "sync/atomic" "time" @@ -18,7 +17,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/config" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend" @@ -33,8 +31,6 @@ type SupervisorBackend struct { chainMonitors map[types.ChainID]*source.ChainMonitor db *db.ChainsDB - - maintenanceCancel context.CancelFunc } var _ frontend.Backend = (*SupervisorBackend)(nil) @@ -47,14 +43,8 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg return nil, err } - // create the head tracker - headTracker, err := heads.NewHeadTracker(logger, filepath.Join(cfg.Datadir, "heads.json")) - if err != nil { - return nil, fmt.Errorf("failed to load existing heads: %w", err) - } - // create the chains db - db := db.NewChainsDB(map[types.ChainID]db.LogStorage{}, headTracker, logger) + db := db.NewChainsDB(map[types.ChainID]db.LogStorage{}, logger) // create an empty map of chain monitors chainMonitors := make(map[types.ChainID]*source.ChainMonitor, len(cfg.L2RPCs)) @@ -145,10 +135,6 @@ func (su *SupervisorBackend) Start(ctx context.Context) error { return fmt.Errorf("failed to start chain monitor: %w", err) } } - // start db maintenance loop - maintenanceCtx, cancel := context.WithCancel(context.Background()) - su.db.StartCrossHeadMaintenance(maintenanceCtx) - su.maintenanceCancel = cancel return nil } @@ -158,8 +144,6 @@ func (su *SupervisorBackend) Stop(ctx context.Context) error { if !su.started.CompareAndSwap(true, false) { return errAlreadyStopped } - // signal the maintenance loop to stop - su.maintenanceCancel() // collect errors from stopping chain monitors var errs error for _, monitor := range su.chainMonitors { @@ -192,7 +176,7 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa logIdx := identifier.LogIndex _, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash) if errors.Is(err, logs.ErrFuture) { - return types.Unsafe, nil + return types.LocalUnsafe, nil } if errors.Is(err, logs.ErrConflict) { return types.Invalid, nil @@ -200,24 +184,7 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa if err != nil { return types.Invalid, fmt.Errorf("failed to check log: %w", err) } - safest := types.CrossUnsafe - // at this point we have the log entry, and we can check if it is safe by various criteria - for _, checker := range []db.SafetyChecker{ - db.NewSafetyChecker(types.Unsafe, su.db), - db.NewSafetyChecker(types.Safe, su.db), - db.NewSafetyChecker(types.Finalized, su.db), - } { - // check local safety limit first as it's more permissive - localPtr := checker.LocalHead(chainID) - if localPtr.WithinRange(blockNum, uint32(logIdx)) { - safest = checker.LocalSafetyLevel() - } - // check cross safety level - crossPtr := checker.CrossHead(chainID) - if crossPtr.WithinRange(blockNum, uint32(logIdx)) { - safest = checker.CrossSafetyLevel() - } - } + safest := su.db.Safest(chainID, blockNum, uint32(logIdx)) return safest, nil } @@ -243,12 +210,11 @@ func (su *SupervisorBackend) CheckMessages( // The block is considered safe if all logs in the block are safe // this is decided by finding the last log in the block and func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common.Hash, blockNumber hexutil.Uint64) (types.SafetyLevel, error) { - safest := types.CrossUnsafe // find the last log index in the block id := eth.BlockID{Hash: blockHash, Number: uint64(blockNumber)} _, err := su.db.FindSealedBlock(types.ChainID(*chainID), id) if errors.Is(err, logs.ErrFuture) { - return types.Unsafe, nil + return types.LocalUnsafe, nil } if errors.Is(err, logs.ErrConflict) { return types.Invalid, nil @@ -257,22 +223,6 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common. su.logger.Error("failed to scan block", "err", err) return "", err } - // at this point we have the extent of the block, and we can check if it is safe by various criteria - for _, checker := range []db.SafetyChecker{ - db.NewSafetyChecker(types.Unsafe, su.db), - db.NewSafetyChecker(types.Safe, su.db), - db.NewSafetyChecker(types.Finalized, su.db), - } { - // check local safety limit first as it's more permissive - localPtr := checker.LocalHead(types.ChainID(*chainID)) - if localPtr.IsSealed(uint64(blockNumber)) { - safest = checker.LocalSafetyLevel() - } - // check cross safety level - crossPtr := checker.CrossHead(types.ChainID(*chainID)) - if crossPtr.IsSealed(uint64(blockNumber)) { - safest = checker.CrossSafetyLevel() - } - } + safest := su.db.Safest(types.ChainID(*chainID), uint64(blockNumber), 0) return safest, nil } diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index 6c5e354dd0ab..8459266c0704 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -1,19 +1,17 @@ package db import ( - "context" "errors" "fmt" "io" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/safety" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -49,39 +47,21 @@ type LogStorage interface { var _ LogStorage = (*logs.DB)(nil) -type HeadsStorage interface { - CrossUnsafe(id types.ChainID) heads.HeadPointer - CrossSafe(id types.ChainID) heads.HeadPointer - CrossFinalized(id types.ChainID) heads.HeadPointer - LocalUnsafe(id types.ChainID) heads.HeadPointer - LocalSafe(id types.ChainID) heads.HeadPointer - LocalFinalized(id types.ChainID) heads.HeadPointer - - UpdateCrossUnsafe(id types.ChainID, pointer heads.HeadPointer) error - UpdateCrossSafe(id types.ChainID, pointer heads.HeadPointer) error - UpdateCrossFinalized(id types.ChainID, pointer heads.HeadPointer) error - - UpdateLocalUnsafe(id types.ChainID, pointer heads.HeadPointer) error - UpdateLocalSafe(id types.ChainID, pointer heads.HeadPointer) error - UpdateLocalFinalized(id types.ChainID, pointer heads.HeadPointer) error -} - // ChainsDB is a database that stores logs and heads for multiple chains. // it implements the ChainsStorage interface. type ChainsDB struct { - logDBs map[types.ChainID]LogStorage - heads HeadsStorage - maintenanceReady chan struct{} - logger log.Logger + logDBs map[types.ChainID]LogStorage + safetyIndex safety.SafetyIndex + logger log.Logger } -func NewChainsDB(logDBs map[types.ChainID]LogStorage, heads HeadsStorage, l log.Logger) *ChainsDB { - return &ChainsDB{ - logDBs: logDBs, - heads: heads, - logger: l, - maintenanceReady: make(chan struct{}, 1), +func NewChainsDB(logDBs map[types.ChainID]LogStorage, l log.Logger) *ChainsDB { + ret := &ChainsDB{ + logDBs: logDBs, + logger: l, } + ret.safetyIndex = safety.NewSafetyIndex(l, ret) + return ret } func (db *ChainsDB) AddLogDB(chain types.ChainID, logDB LogStorage) { @@ -91,6 +71,14 @@ func (db *ChainsDB) AddLogDB(chain types.ChainID, logDB LogStorage) { db.logDBs[chain] = logDB } +func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) { + logDB, ok := db.logDBs[chain] + if !ok { + return nil, fmt.Errorf("%w: %v", ErrUnknownChain, chain) + } + return logDB.IteratorStartingAt(sealedNum, logIndex) +} + // ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart. // It rewinds the database to the last block that is guaranteed to have been fully recorded to the database, // to ensure it can resume recording from the first log of the next block. @@ -110,187 +98,39 @@ func (db *ChainsDB) ResumeFromLastSealedBlock() error { return nil } -// StartCrossHeadMaintenance starts a background process that maintains the cross-heads of the chains -// for now it does not prevent multiple instances of this process from running -func (db *ChainsDB) StartCrossHeadMaintenance(ctx context.Context) { - go func() { - db.logger.Info("cross-head maintenance loop started") - // run the maintenance loop every 1 seconds for now - ticker := time.NewTicker(time.Second * 1) - for { - select { - case <-ctx.Done(): - db.logger.Warn("context cancelled, stopping maintenance loop") - return - case <-ticker.C: - db.logger.Debug("regular maintenance requested") - db.RequestMaintenance() - case <-db.maintenanceReady: - db.logger.Debug("running maintenance") - if err := db.updateAllHeads(); err != nil { - db.logger.Error("failed to update cross-heads", "err", err) - } - } - } - }() -} - // Check calls the underlying logDB to determine if the given log entry is safe with respect to the checker's criteria. -func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (entrydb.EntryIdx, error) { +func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (common.Hash, error) { logDB, ok := db.logDBs[chain] if !ok { - return 0, fmt.Errorf("%w: %v", ErrUnknownChain, chain) - } - return logDB.Contains(blockNum, logIdx, logHash) -} - -// RequestMaintenance requests that the maintenance loop update the cross-heads -// it does not block if maintenance is already scheduled -func (db *ChainsDB) RequestMaintenance() { - select { - case db.maintenanceReady <- struct{}{}: - return - default: - return + return common.Hash{}, fmt.Errorf("%w: %v", ErrUnknownChain, chain) } -} - -// updateAllHeads updates the cross-heads of all safety levels -// it is called by the maintenance loop -func (db *ChainsDB) updateAllHeads() error { - // create three safety checkers, one for each safety level - unsafeChecker := NewSafetyChecker(Unsafe, db) - safeChecker := NewSafetyChecker(Safe, db) - finalizedChecker := NewSafetyChecker(Finalized, db) - for _, checker := range []SafetyChecker{ - unsafeChecker, - safeChecker, - finalizedChecker} { - if err := db.UpdateCrossHeads(checker); err != nil { - return fmt.Errorf("failed to update cross-heads for safety level %s: %w", checker, err) - } + _, err := logDB.Contains(blockNum, logIdx, logHash) + if err != nil { + return common.Hash{}, err } - return nil + // TODO(#11693): need to get the actual block hash for this log entry for reorg detection + return common.Hash{}, nil } -// UpdateCrossHeadsForChain updates the cross-head for a single chain. -// the provided checker controls which heads are considered. -func (db *ChainsDB) UpdateCrossHeadsForChain(chainID types.ChainID, checker SafetyChecker) error { - // start with the xsafe head of the chain - xHead := checker.CrossHead(chainID) - // advance as far as the local head - localHead := checker.LocalHead(chainID) - // get an iterator for the next item - iter, err := db.logDBs[chainID].IteratorStartingAt(xHead.LastSealedBlockNum, xHead.LogsSince) - if err != nil { - return fmt.Errorf("failed to open iterator at sealed block %d logsSince %d for chain %v: %w", - xHead.LastSealedBlockNum, xHead.LogsSince, chainID, err) +// Safest returns the strongest safety level that can be guaranteed for the given log entry. +// it assumes the log entry has already been checked and is valid, this funcion only checks safety levels. +func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel) { + safest = types.LocalUnsafe + if crossUnsafe, err := db.safetyIndex.CrossUnsafeL2(chainID); err == nil && crossUnsafe.WithinRange(blockNum, index) { + safest = types.CrossUnsafe } - // track if we updated the cross-head - updated := false - // advance the logDB through all executing messages we can - // this loop will break: - // - when we reach the local head - // - when we reach a message that is not safe - // - if an error occurs - for { - if err := iter.NextInitMsg(); errors.Is(err, logs.ErrFuture) { - // We ran out of events, but there can still be empty blocks. - // Take the last block we've processed, and try to update the x-head with it. - sealedBlockHash, sealedBlockNum, ok := iter.SealedBlock() - if !ok { - break - } - // We can only drop the logsSince value to 0 if the block is not seen. - if sealedBlockNum > xHead.LastSealedBlockNum { - // if we would exceed the local head, then abort - if !localHead.WithinRange(sealedBlockNum, 0) { - break - } - xHead = heads.HeadPointer{ - LastSealedBlockHash: sealedBlockHash, - LastSealedBlockNum: sealedBlockNum, - LogsSince: 0, - } - updated = true - } - break - } else if err != nil { - return fmt.Errorf("failed to read next executing message for chain %v: %w", chainID, err) - } - - sealedBlockHash, sealedBlockNum, ok := iter.SealedBlock() - if !ok { - break - } - _, logIdx, ok := iter.InitMessage() - if !ok { - break - } - // if we would exceed the local head, then abort - if !localHead.WithinRange(sealedBlockNum, logIdx) { - break - } - - // Check the executing message, if any - exec := iter.ExecMessage() - if exec != nil { - // Use the checker to determine if this message exists in the canonical chain, - // within the view of the checker's safety level - if err := checker.CheckCross( - types.ChainIDFromUInt64(uint64(exec.Chain)), - exec.BlockNum, - exec.LogIdx, - exec.Hash); err != nil { - if errors.Is(err, logs.ErrConflict) { - db.logger.Error("Bad executing message!", "err", err) - } else if errors.Is(err, logs.ErrFuture) { - db.logger.Warn("Executing message references future message", "err", err) - } else { - db.logger.Error("Failed to check executing message") - } - break - } - } - // if all is well, prepare the x-head update to this point - xHead = heads.HeadPointer{ - LastSealedBlockHash: sealedBlockHash, - LastSealedBlockNum: sealedBlockNum, - LogsSince: logIdx + 1, - } - updated = true + if localSafe, err := db.safetyIndex.LocalSafeL2(chainID); err == nil && localSafe.WithinRange(blockNum, index) { + safest = types.LocalSafe } - // if any chain was updated, we can trigger a maintenance request - // this allows for the maintenance loop to handle cascading updates - // instead of waiting for the next scheduled update - if updated { - db.logger.Info("Promoting cross-head", "chain", chainID, "head", xHead, "safety-level", checker.CrossSafetyLevel()) - err = checker.UpdateCross(chainID, xHead) - if err != nil { - return fmt.Errorf("failed to update cross-head for chain %v: %w", chainID, err) - } - db.RequestMaintenance() - } else { - db.logger.Debug("No cross-head update", "chain", chainID, "head", xHead, "safety-level", checker.CrossSafetyLevel()) + if crossSafe, err := db.safetyIndex.LocalSafeL2(chainID); err == nil && crossSafe.WithinRange(blockNum, index) { + safest = types.CrossSafe } - return nil -} - -func (db *ChainsDB) Heads() HeadsStorage { - return db.heads -} - -// UpdateCrossHeads updates the cross-heads of all chains -// based on the provided SafetyChecker. The SafetyChecker is used to determine -// the safety of each log entry in the database, and the cross-head associated with it. -func (db *ChainsDB) UpdateCrossHeads(checker SafetyChecker) error { - for chainID := range db.logDBs { - err := db.UpdateCrossHeadsForChain(chainID, checker) - if err != nil { - return err + if finalized, err := db.safetyIndex.FinalizedL2(chainID); err == nil { + if finalized.Number >= blockNum { + safest = types.Finalized } } - return nil + return } func (db *ChainsDB) FindSealedBlock(chain types.ChainID, block eth.BlockID) (nextEntry entrydb.EntryIdx, err error) { @@ -312,20 +152,35 @@ func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { return logDB.LatestSealedBlockNum() } -func (db *ChainsDB) SealBlock(chain types.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error { +func (db *ChainsDB) AddLog( + chain types.ChainID, + logHash common.Hash, + parentBlock eth.BlockID, + logIdx uint32, + execMsg *types.ExecutingMessage) error { logDB, ok := db.logDBs[chain] if !ok { return fmt.Errorf("%w: %v", ErrUnknownChain, chain) } - return logDB.SealBlock(parentHash, block, timestamp) + return logDB.AddLog(logHash, parentBlock, logIdx, execMsg) } -func (db *ChainsDB) AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error { +func (db *ChainsDB) SealBlock( + chain types.ChainID, + block eth.L2BlockRef) error { logDB, ok := db.logDBs[chain] if !ok { return fmt.Errorf("%w: %v", ErrUnknownChain, chain) } - return logDB.AddLog(logHash, parentBlock, logIdx, execMsg) + err := logDB.SealBlock(block.ParentHash, block.ID(), block.Time) + if err != nil { + return fmt.Errorf("failed to seal block %v: %w", block, err) + } + err = db.safetyIndex.UpdateLocalUnsafe(chain, block) + if err != nil { + return fmt.Errorf("failed to update local-unsafe: %w", err) + } + return nil } func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { diff --git a/op-supervisor/supervisor/backend/db/heads/types.go b/op-supervisor/supervisor/backend/db/heads/types.go index 3e54593e33c7..7db0bff2d106 100644 --- a/op-supervisor/supervisor/backend/db/heads/types.go +++ b/op-supervisor/supervisor/backend/db/heads/types.go @@ -13,6 +13,7 @@ type HeadPointer struct { // LastSealedBlockHash is the last fully-processed block LastSealedBlockHash common.Hash LastSealedBlockNum uint64 + LastSealedTimestamp uint64 // Number of logs that have been verified since the LastSealedBlock. // These logs are contained in the block that builds on top of the LastSealedBlock. diff --git a/op-supervisor/supervisor/backend/db/logs/iterator.go b/op-supervisor/supervisor/backend/db/logs/iterator.go index 4b3bd1b65908..f9e65c41e890 100644 --- a/op-supervisor/supervisor/backend/db/logs/iterator.go +++ b/op-supervisor/supervisor/backend/db/logs/iterator.go @@ -8,11 +8,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type IteratorState interface { NextIndex() entrydb.EntryIdx + HeadPointer() (heads.HeadPointer, error) SealedBlock() (hash common.Hash, num uint64, ok bool) InitMessage() (hash common.Hash, logIndex uint32, ok bool) ExecMessage() *types.ExecutingMessage @@ -23,6 +25,7 @@ type Iterator interface { NextInitMsg() error NextExecMsg() error NextBlock() error + TraverseConditional(traverseConditionalFn) error IteratorState } @@ -32,6 +35,8 @@ type iterator struct { entriesRead int64 } +type traverseConditionalFn func(state IteratorState) error + // End traverses the iterator to the end of the DB. // It does not return io.EOF or ErrFuture. func (i *iterator) End() error { @@ -105,6 +110,25 @@ func (i *iterator) NextBlock() error { } } +func (i *iterator) TraverseConditional(fn traverseConditionalFn) error { + var snapshot logContext + for { + snapshot = i.current // copy the iterator state + _, err := i.next() + if err != nil { + i.current = snapshot + return err + } + if i.current.need != 0 { // skip intermediate states + continue + } + if err := fn(&i.current); err != nil { + i.current = snapshot + return err + } + } +} + // Read and apply the next entry. func (i *iterator) next() (entrydb.EntryType, error) { index := i.current.nextEntryIndex @@ -142,3 +166,7 @@ func (i *iterator) InitMessage() (hash common.Hash, logIndex uint32, ok bool) { func (i *iterator) ExecMessage() *types.ExecutingMessage { return i.current.ExecMessage() } + +func (i *iterator) HeadPointer() (heads.HeadPointer, error) { + return i.current.HeadPointer() +} diff --git a/op-supervisor/supervisor/backend/db/logs/state.go b/op-supervisor/supervisor/backend/db/logs/state.go index bb00762acc2e..df63f96e3599 100644 --- a/op-supervisor/supervisor/backend/db/logs/state.go +++ b/op-supervisor/supervisor/backend/db/logs/state.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -126,6 +127,18 @@ func (l *logContext) ExecMessage() *types.ExecutingMessage { return nil } +func (l *logContext) HeadPointer() (heads.HeadPointer, error) { + if l.need != 0 { + return heads.HeadPointer{}, errors.New("cannot provide head pointer while state is incomplete") + } + return heads.HeadPointer{ + LastSealedBlockHash: l.blockHash, + LastSealedBlockNum: l.blockNum, + LastSealedTimestamp: l.timestamp, + LogsSince: l.logsSince, + }, nil +} + // ApplyEntry applies an entry on top of the current state. func (l *logContext) ApplyEntry(entry entrydb.Entry) error { // Wrap processEntry to add common useful error message info diff --git a/op-supervisor/supervisor/backend/db/safety_checkers.go b/op-supervisor/supervisor/backend/db/safety_checkers.go deleted file mode 100644 index 745f74134662..000000000000 --- a/op-supervisor/supervisor/backend/db/safety_checkers.go +++ /dev/null @@ -1,153 +0,0 @@ -package db - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -const ( - Unsafe = "unsafe" - Safe = "safe" - Finalized = "finalized" -) - -// SafetyChecker is an interface for checking the safety of a log entry -// it maintains a consistent view between local and cross chain for a given safety level -type SafetyChecker interface { - LocalHead(chainID types.ChainID) heads.HeadPointer - CrossHead(chainID types.ChainID) heads.HeadPointer - CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error - CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error - UpdateLocal(chain types.ChainID, pointer heads.HeadPointer) error - UpdateCross(chain types.ChainID, pointer heads.HeadPointer) error - String() string - LocalSafetyLevel() types.SafetyLevel - CrossSafetyLevel() types.SafetyLevel -} - -// NewSafetyChecker creates a new SafetyChecker of the given type -func NewSafetyChecker(t types.SafetyLevel, chainsDB *ChainsDB) SafetyChecker { - return NewChecker(t, chainsDB) -} - -// check checks if the log entry is safe, provided a local head for the chain -// it is used by the individual SafetyCheckers to determine if a log entry is safe -func check( - chainsDB *ChainsDB, - head heads.HeadPointer, - chain types.ChainID, - blockNum uint64, - logIdx uint32, - logHash common.Hash) error { - - // for the Check to be valid, the log must: - // 1. have the expected logHash at the indicated blockNum and logIdx - _, err := chainsDB.logDBs[chain].Contains(blockNum, logIdx, logHash) - if err != nil { - return err - } - // 2. be within the range of the given head - if !head.WithinRange(blockNum, logIdx) { - return logs.ErrFuture - } - return nil -} - -// checker is a composition of accessor and update functions for a given safety level. -// they implement the SafetyChecker interface. -// checkers can be made with NewChecker. -type checker struct { - chains *ChainsDB - localSafety types.SafetyLevel - crossSafety types.SafetyLevel - updateCross func(chain types.ChainID, pointer heads.HeadPointer) error - updateLocal func(chain types.ChainID, pointer heads.HeadPointer) error - localHead func(chain types.ChainID) heads.HeadPointer - crossHead func(chain types.ChainID) heads.HeadPointer - checkCross func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error - checkLocal func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error -} - -func (c *checker) String() string { - return fmt.Sprintf("%s+%s", c.localSafety.String(), c.crossSafety.String()) -} - -func (c *checker) LocalSafetyLevel() types.SafetyLevel { - return c.localSafety -} - -func (c *checker) CrossSafetyLevel() types.SafetyLevel { - return c.crossSafety -} - -func (c *checker) UpdateCross(chain types.ChainID, pointer heads.HeadPointer) error { - return c.updateCross(chain, pointer) -} -func (c *checker) UpdateLocal(chain types.ChainID, pointer heads.HeadPointer) error { - return c.updateLocal(chain, pointer) -} -func (c *checker) LocalHead(chain types.ChainID) heads.HeadPointer { - return c.localHead(chain) -} -func (c *checker) CrossHead(chain types.ChainID) heads.HeadPointer { - return c.crossHead(chain) -} -func (c *checker) CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { - return c.checkCross(chain, blockNum, logIdx, logHash) -} -func (c *checker) CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { - return c.checkLocal(chain, blockNum, logIdx, logHash) -} - -func NewChecker(t types.SafetyLevel, c *ChainsDB) SafetyChecker { - // checkWith creates a function which takes a chain-getter and returns a function that returns the head for the chain - checkWith := func(getHead func(chain types.ChainID) heads.HeadPointer) func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { - return func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { - return check(c, getHead(chain), chain, blockNum, logIdx, logHash) - } - } - switch t { - case Unsafe: - return &checker{ - chains: c, - localSafety: types.Unsafe, - crossSafety: types.CrossUnsafe, - updateCross: c.heads.UpdateCrossUnsafe, - updateLocal: c.heads.UpdateLocalUnsafe, - crossHead: c.heads.CrossUnsafe, - localHead: c.heads.LocalUnsafe, - checkCross: checkWith(c.heads.CrossUnsafe), - checkLocal: checkWith(c.heads.LocalUnsafe), - } - case Safe: - return &checker{ - chains: c, - localSafety: types.Safe, - crossSafety: types.CrossSafe, - updateCross: c.heads.UpdateCrossSafe, - updateLocal: c.heads.UpdateLocalSafe, - crossHead: c.heads.CrossSafe, - localHead: c.heads.LocalSafe, - checkCross: checkWith(c.heads.CrossSafe), - checkLocal: checkWith(c.heads.LocalSafe), - } - case Finalized: - return &checker{ - chains: c, - localSafety: types.Finalized, - crossSafety: types.CrossFinalized, - updateCross: c.heads.UpdateCrossFinalized, - updateLocal: c.heads.UpdateLocalFinalized, - crossHead: c.heads.CrossFinalized, - localHead: c.heads.LocalFinalized, - checkCross: checkWith(c.heads.CrossFinalized), - checkLocal: checkWith(c.heads.LocalFinalized), - } - } - return &checker{} -} diff --git a/op-supervisor/supervisor/backend/db/safety_checkers_test.go b/op-supervisor/supervisor/backend/db/safety_checkers_test.go deleted file mode 100644 index fa0954bc6b65..000000000000 --- a/op-supervisor/supervisor/backend/db/safety_checkers_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package db - -/* -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// TestHeadsForChain tests the heads for a chain, -// confirming the Unsafe, Safe and Finalized all return the correct head for the chain. -// and confirming that the chainID matters when finding the value -func TestHeadsForChain(t *testing.T) { - h := heads.NewHeads() - chainHeads := heads.ChainHeads{ - Unsafe: entrydb.EntryIdx(1), - CrossUnsafe: entrydb.EntryIdx(2), - LocalSafe: entrydb.EntryIdx(3), - CrossSafe: entrydb.EntryIdx(4), - LocalFinalized: entrydb.EntryIdx(5), - CrossFinalized: entrydb.EntryIdx(6), - } - h.Put(types.ChainIDFromUInt64(1), chainHeads) - chainsDB := NewChainsDB(nil, &stubHeadStorage{h}, testlog.Logger(t, log.LevelDebug)) - tcases := []struct { - name string - chainID types.ChainID - checkerType types.SafetyLevel - expectedLocal entrydb.EntryIdx - expectedCross entrydb.EntryIdx - }{ - { - "Unsafe Head", - types.ChainIDFromUInt64(1), - Unsafe, - entrydb.EntryIdx(1), - entrydb.EntryIdx(2), - }, - { - "Safe Head", - types.ChainIDFromUInt64(1), - Safe, - entrydb.EntryIdx(3), - entrydb.EntryIdx(4), - }, - { - "Finalized Head", - types.ChainIDFromUInt64(1), - Finalized, - entrydb.EntryIdx(5), - entrydb.EntryIdx(6), - }, - { - "Incorrect Chain", - types.ChainIDFromUInt64(100), - Safe, - entrydb.EntryIdx(0), - entrydb.EntryIdx(0), - }, - } - - for _, c := range tcases { - t.Run(c.name, func(t *testing.T) { - checker := NewSafetyChecker(c.checkerType, chainsDB) - localHead := checker.LocalHeadForChain(c.chainID) - crossHead := checker.CrossHeadForChain(c.chainID) - require.Equal(t, c.expectedLocal, localHead) - require.Equal(t, c.expectedCross, crossHead) - }) - } -} - -func TestCheck(t *testing.T) { - h := heads.NewHeads() - chainHeads := heads.ChainHeads{ - Unsafe: entrydb.EntryIdx(6), - CrossUnsafe: entrydb.EntryIdx(5), - LocalSafe: entrydb.EntryIdx(4), - CrossSafe: entrydb.EntryIdx(3), - LocalFinalized: entrydb.EntryIdx(2), - CrossFinalized: entrydb.EntryIdx(1), - } - h.Put(types.ChainIDFromUInt64(1), chainHeads) - - // the logStore contains just a single stubbed log DB - logDB := &stubLogDB{} - logsStore := map[types.ChainID]LogStorage{ - types.ChainIDFromUInt64(1): logDB, - } - - chainsDB := NewChainsDB(logsStore, &stubHeadStorage{h}, testlog.Logger(t, log.LevelDebug)) - - tcases := []struct { - name string - checkerType types.SafetyLevel - chainID types.ChainID - blockNum uint64 - logIdx uint32 - loghash common.Hash - containsResponse containsResponse - expected bool - }{ - { - // confirm that checking Unsafe uses the unsafe head, - // and that we can find logs even *at* the unsafe head index - "Unsafe Log at Head", - Unsafe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(6), nil}, - true, - }, - { - // confirm that checking the Safe head works - "Safe Log", - Safe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(3), nil}, - true, - }, - { - // confirm that checking the Finalized head works - "Finalized Log", - Finalized, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(1), nil}, - true, - }, - { - // confirm that when exists is false, we return false - "Does not Exist", - Safe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(1), logs.ErrConflict}, - false, - }, - { - // confirm that when a head is out of range, we return false - "Unsafe Out of Range", - Unsafe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(100), nil}, - false, - }, - { - // confirm that when a head is out of range, we return false - "Safe Out of Range", - Safe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(5), nil}, - false, - }, - { - // confirm that when a head is out of range, we return false - "Finalized Out of Range", - Finalized, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(3), nil}, - false, - }, - { - // confirm that when Contains returns an error, we return false - "Error", - Safe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(0), errors.New("error")}, - false, - }, - } - - for _, c := range tcases { - t.Run(c.name, func(t *testing.T) { - // rig the logStore to return the expected response - logDB.containsResponse = c.containsResponse - checker := NewSafetyChecker(c.checkerType, chainsDB) - r := checker.Check(c.chainID, c.blockNum, c.logIdx, c.loghash) - // confirm that the expected outcome is correct - require.Equal(t, c.expected, r) - }) - } -} -*/ diff --git a/op-supervisor/supervisor/backend/safety/safety.go b/op-supervisor/supervisor/backend/safety/safety.go new file mode 100644 index 000000000000..c7828336ba57 --- /dev/null +++ b/op-supervisor/supervisor/backend/safety/safety.go @@ -0,0 +1,270 @@ +package safety + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type SafetyIndex interface { + // Updaters for the latest local safety status of each chain + UpdateLocalUnsafe(chainID types.ChainID, ref eth.L2BlockRef) error + UpdateLocalSafe(chainID types.ChainID, at eth.L1BlockRef, ref eth.L2BlockRef) error + UpdateFinalizeL1(ref eth.L1BlockRef) error + + // Getters for the latest safety status of each chain + UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) + CrossUnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) + LocalSafeL2(chainID types.ChainID) (heads.HeadPointer, error) + CrossSafeL2(chainID types.ChainID) (heads.HeadPointer, error) + // We only finalize on full L2 block boundaries, hence not a heads.HeadPointer return. + FinalizedL2(chainId types.ChainID) (eth.BlockID, error) +} + +type ChainsDBClient interface { + IteratorStartingAt(chainID types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) + Check(chainID types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (h common.Hash, err error) +} + +type safetyIndex struct { + log log.Logger + + chains ChainsDBClient + + unsafe map[types.ChainID]*View + safe map[types.ChainID]*View + finalized map[types.ChainID]eth.BlockID + + // remember what each non-finalized L2 block is derived from + derivedFrom map[types.ChainID]map[common.Hash]eth.L1BlockRef + + // the last received L1 finality signal. + finalizedL1 eth.L1BlockRef +} + +func NewSafetyIndex(log log.Logger, chains ChainsDBClient) *safetyIndex { + return &safetyIndex{ + log: log, + chains: chains, + unsafe: make(map[types.ChainID]*View), + safe: make(map[types.ChainID]*View), + finalized: make(map[types.ChainID]eth.BlockID), + derivedFrom: make(map[types.ChainID]map[common.Hash]eth.L1BlockRef), + } +} + +// UpdateLocalUnsafe updates the local-unsafe view for the given chain, and advances the cross-unsafe status. +func (r *safetyIndex) UpdateLocalUnsafe(chainID types.ChainID, ref eth.L2BlockRef) error { + view, ok := r.safe[chainID] + if !ok { + iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0) + if err != nil { + return fmt.Errorf("failed to open iterator for chain %s block %d", chainID, ref.Number) + } + view = &View{ + chainID: chainID, + iter: iter, + localView: heads.HeadPointer{ + LastSealedBlockHash: ref.Hash, + LastSealedBlockNum: ref.Number, + LastSealedTimestamp: ref.Time, + LogsSince: 0, + }, + localDerivedFrom: eth.L1BlockRef{}, + validWithinView: r.ValidWithinUnsafeView, + } + r.unsafe[chainID] = view + } else if err := view.UpdateLocal(eth.L1BlockRef{}, ref); err != nil { + return fmt.Errorf("failed to update local-unsafe: %w", err) + } + local, _ := r.unsafe[chainID].Local() + r.log.Debug("Updated local unsafe head", "chainID", chainID, "local", local) + r.advanceCrossUnsafe() + return nil +} + +// advanceCrossUnsafe calls Process on all cross-unsafe views. +func (r *safetyIndex) advanceCrossUnsafe() { + for chainID, view := range r.unsafe { + if err := view.Process(); err != nil { + r.log.Error("Failed to update cross-unsafe view", "chain", chainID, "err", err) + } + cross, _ := r.unsafe[chainID].Cross() + r.log.Debug("Updated cross unsafe head", "chainID", chainID, "cross", cross) + } +} + +// UpdateLocalSafe updates the local-safe view for the given chain, and advances the cross-safe status. +func (r *safetyIndex) UpdateLocalSafe( + chainID types.ChainID, at eth.L1BlockRef, ref eth.L2BlockRef) error { + view, ok := r.safe[chainID] + if !ok { + iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0) + if err != nil { + return fmt.Errorf("failed to open iterator for chain %s block %d", chainID, ref.Number) + } + view = &View{ + chainID: chainID, + iter: iter, + localView: heads.HeadPointer{ + LastSealedBlockHash: ref.Hash, + LastSealedBlockNum: ref.Number, + LastSealedTimestamp: ref.Time, + LogsSince: 0, + }, + localDerivedFrom: at, + validWithinView: r.ValidWithinSafeView, + } + r.safe[chainID] = view + } else if err := view.UpdateLocal(at, ref); err != nil { + return fmt.Errorf("failed to update local-safe: %w", err) + } + + // register what this L2 block is derived from + m, ok := r.derivedFrom[chainID] + if !ok { + m = make(map[common.Hash]eth.L1BlockRef) + r.derivedFrom[chainID] = m + } + m[ref.Hash] = at + local, _ := r.safe[chainID].Local() + r.log.Debug("Updated local safe head", "chainID", chainID, "local", local) + r.advanceCrossSafe() + return nil +} + +// advanceCrossSafe calls Process on all cross-safe views, and advances the finalized safety status. +func (r *safetyIndex) advanceCrossSafe() { + for chainID, view := range r.safe { + if err := view.Process(); err != nil { + r.log.Error("Failed to update cross-safe view", "chain", chainID, "err", err) + } + cross, _ := r.safe[chainID].Cross() + r.log.Debug("Updated local safe head", "chainID", chainID, "cross", cross) + } + r.advanceFinalized() +} + +// UpdateFinalizeL1 updates the finalized L1 block, and advances the finalized safety status. +func (r *safetyIndex) UpdateFinalizeL1(ref eth.L1BlockRef) error { + if ref.Number <= r.finalizedL1.Number { + return fmt.Errorf("ignoring old L1 finality signal of %s, already have %s", ref, r.finalizedL1) + } + r.finalizedL1 = ref + r.log.Debug("Updated L1 finalized head", "L1finalized", ref) + r.advanceFinalized() + return nil +} + +// advanceFinalized should be called whenever the finalized L1 block, or the cross-safe history, changes. +// This then promotes the irreversible cross-safe L2 blocks to a finalized safety status. +func (r *safetyIndex) advanceFinalized() { + // Whatever was considered cross-safe at the finalized block-height can + // now be considered finalized, since the inputs have become irreversible. + for chainID, view := range r.safe { + crossSafe, err := view.Cross() + if err != nil { + r.log.Info("Failed to get cross-safe data, cannot finalize", "chain", chainID, "err", err) + continue + } + // TODO(#12184): we need to consider older cross-safe data, + // if we want to finalize something at all on longer lagging finality signal. + // Could consider just iterating over all derivedFrom contents? + l1Dep := r.derivedFrom[chainID][crossSafe.LastSealedBlockHash] + if l1Dep.Number < r.finalizedL1.Number { + r.finalized[chainID] = eth.BlockID{Hash: crossSafe.LastSealedBlockHash, Number: crossSafe.LastSealedBlockNum} + finalized := r.finalized[chainID] + r.log.Debug("Updated finalized head", "chainID", chainID, "finalized", finalized) + } + } +} + +// UnsafeL2 returns the latest unsafe L2 block of the given chain. +func (r *safetyIndex) UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) { + view, ok := r.unsafe[chainID] + if !ok { + return heads.HeadPointer{}, fmt.Errorf("no unsafe data for chain %s", chainID) + } + return view.Local() +} + +// CrossUnsafeL2 returns the latest cross-unsafe L2 block of the given chain. +func (r *safetyIndex) CrossUnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) { + view, ok := r.unsafe[chainID] + if !ok { + return heads.HeadPointer{}, fmt.Errorf("no cross-unsafe data for chain %s", chainID) + } + return view.Cross() +} + +// LocalSafeL2 returns the latest local-safe L2 block of the given chain. +func (r *safetyIndex) LocalSafeL2(chainID types.ChainID) (heads.HeadPointer, error) { + view, ok := r.safe[chainID] + if !ok { + return heads.HeadPointer{}, fmt.Errorf("no local-safe data for chain %s", chainID) + } + return view.Local() +} + +// CrossSafeL2 returns the latest cross-safe L2 block of the given chain. +func (r *safetyIndex) CrossSafeL2(chainID types.ChainID) (heads.HeadPointer, error) { + view, ok := r.safe[chainID] + if !ok { + return heads.HeadPointer{}, fmt.Errorf("no cross-safe data for chain %s", chainID) + } + return view.Cross() +} + +// FinalizedL2 returns the latest finalized L2 block of the given chain. +func (r *safetyIndex) FinalizedL2(chainId types.ChainID) (eth.BlockID, error) { + finalized, ok := r.finalized[chainId] + if !ok { + return eth.BlockID{}, fmt.Errorf("not seen finalized data of chain %s at finalized L1 block %s", chainId, r.finalizedL1) + } + return finalized, nil +} + +// ValidWithinUnsafeView checks if the given executing message is in the database. +// unsafe view is meant to represent all of the database, and so no boundary checks are needed. +func (r *safetyIndex) ValidWithinUnsafeView(_ uint64, execMsg *types.ExecutingMessage) error { + execChainID := types.ChainIDFromUInt64(uint64(execMsg.Chain)) + _, err := r.chains.Check(execChainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash) + return err +} + +// ValidWithinSafeView checks if the given executing message is within the database, +// and within the L1 view of the caller. +func (r *safetyIndex) ValidWithinSafeView(l1View uint64, execMsg *types.ExecutingMessage) error { + execChainID := types.ChainIDFromUInt64(uint64(execMsg.Chain)) + + // Check that the initiating message, which was pulled in by the executing message, + // does indeed exist. And in which L2 block it exists (if any). + l2BlockHash, err := r.chains.Check(execChainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash) + if err != nil { + return err + } + // if the executing message falls within the execFinalized range, then nothing to check + execFinalized, ok := r.finalized[execChainID] + if ok && execFinalized.Number > execMsg.BlockNum { + return nil + } + // check if the L1 block of the executing message is known + execL1Block, ok := r.derivedFrom[execChainID][l2BlockHash] + if !ok { + return logs.ErrFuture // TODO(#12185) need to distinguish between same-data future, and new-data future + } + // check if the L1 block is within the view + if execL1Block.Number > l1View { + return fmt.Errorf("exec message depends on L2 block %s:%d, derived from L1 block %s, not within view yet: %w", + l2BlockHash, execMsg.BlockNum, execL1Block, logs.ErrFuture) + } + return nil +} + +var _ SafetyIndex = (*safetyIndex)(nil) diff --git a/op-supervisor/supervisor/backend/safety/views.go b/op-supervisor/supervisor/backend/safety/views.go new file mode 100644 index 000000000000..98941dd7e6e9 --- /dev/null +++ b/op-supervisor/supervisor/backend/safety/views.go @@ -0,0 +1,91 @@ +package safety + +import ( + "errors" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type View struct { + chainID types.ChainID + + iter logs.Iterator + + localView heads.HeadPointer + localDerivedFrom eth.L1BlockRef + + validWithinView func(l1View uint64, execMsg *types.ExecutingMessage) error +} + +func (vi *View) Cross() (heads.HeadPointer, error) { + return vi.iter.HeadPointer() +} + +func (vi *View) Local() (heads.HeadPointer, error) { + if vi.localView == (heads.HeadPointer{}) { + return heads.HeadPointer{}, logs.ErrFuture + } + return vi.localView, nil +} + +func (vi *View) UpdateLocal(at eth.L1BlockRef, ref eth.L2BlockRef) error { + vi.localView = heads.HeadPointer{ + LastSealedBlockHash: ref.Hash, + LastSealedBlockNum: ref.Number, + //LastSealedTimestamp: ref.Time, + LogsSince: 0, + } + vi.localDerivedFrom = at + + // TODO(#11693): reorg check against existing DB + // TODO(#12186): localView may be larger than what DB contents we have + return nil +} + +func (vi *View) Process() error { + err := vi.iter.TraverseConditional(func(state logs.IteratorState) error { + hash, num, ok := state.SealedBlock() + if !ok { + return logs.ErrFuture // maybe a more specific error for no-genesis case? + } + // TODO(#11693): reorg check in the future. To make sure that what we traverse is still canonical. + _ = hash + // check if L2 block is within view + if !vi.localView.WithinRange(num, 0) { + return logs.ErrFuture + } + _, initLogIndex, ok := state.InitMessage() + if !ok { + return nil // no readable message, just an empty block + } + // check if the message is within view + if !vi.localView.WithinRange(num, initLogIndex) { + return logs.ErrFuture + } + // check if it is an executing message. If so, check the dependency + if execMsg := state.ExecMessage(); execMsg != nil { + // Check if executing message is within cross L2 view, + // relative to the L1 view of current message. + // And check if the message is valid to execute at all + // (i.e. if it exists on the initiating side). + // TODO(#12187): it's inaccurate to check with the view of the local-unsafe + // it should be limited to the L1 view at the time of the inclusion of execution of the message. + err := vi.validWithinView(vi.localDerivedFrom.Number, execMsg) + if err != nil { + return err + } + } + return nil + }) + if err == nil { + panic("expected reader to complete with an exit-error") + } + if errors.Is(err, logs.ErrFuture) { + // register the new cross-safe block as cross-safe up to the current L1 view + return nil + } + return err +} diff --git a/op-supervisor/supervisor/backend/source/chain.go b/op-supervisor/supervisor/backend/source/chain.go index 03286b1a4160..383a5fb74de8 100644 --- a/op-supervisor/supervisor/backend/source/chain.go +++ b/op-supervisor/supervisor/backend/source/chain.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources/caching" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -25,8 +24,7 @@ type Metrics interface { } type Storage interface { - LogStorage - Heads() db.HeadsStorage + ChainsDBClientForLogProcessor DatabaseRewinder LatestBlockNum(chainID types.ChainID) (num uint64, ok bool) } @@ -50,16 +48,9 @@ func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID processLogs := newLogProcessor(chainID, store) unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, processLogs, store) - // create head processors which only update the head - unsafeHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalUnsafe) - safeHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalSafe) - finalizedHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalFinalized) + unsafeProcessors := []HeadProcessor{unsafeBlockProcessor} - unsafeProcessors := []HeadProcessor{unsafeBlockProcessor, unsafeHeadProcessor} - safeProcessors := []HeadProcessor{safeHeadProcessor} - finalizedProcessors := []HeadProcessor{finalizedHeadProcessor} - - callback := newHeadUpdateProcessor(logger, unsafeProcessors, safeProcessors, finalizedProcessors) + callback := newHeadUpdateProcessor(logger, unsafeProcessors, nil, nil) headMonitor := NewHeadMonitor(logger, epochPollInterval, cl, callback) return &ChainMonitor{ diff --git a/op-supervisor/supervisor/backend/source/chain_processor.go b/op-supervisor/supervisor/backend/source/chain_processor.go index 4c7895b0cdf3..60568fe296fb 100644 --- a/op-supervisor/supervisor/backend/source/chain_processor.go +++ b/op-supervisor/supervisor/backend/source/chain_processor.go @@ -21,7 +21,7 @@ type Source interface { } type LogProcessor interface { - ProcessLogs(ctx context.Context, block eth.L1BlockRef, receipts gethtypes.Receipts) error + ProcessLogs(ctx context.Context, block eth.L2BlockRef, receipts gethtypes.Receipts) error } type DatabaseRewinder interface { @@ -130,7 +130,13 @@ func (s *ChainProcessor) worker() { func (s *ChainProcessor) update(nextNum uint64) error { ctx, cancel := context.WithTimeout(s.ctx, time.Second*10) - next, err := s.client.L1BlockRefByNumber(ctx, nextNum) + nextL1, err := s.client.L1BlockRefByNumber(ctx, nextNum) + next := eth.L2BlockRef{ + Hash: nextL1.Hash, + ParentHash: nextL1.ParentHash, + Number: nextL1.Number, + Time: nextL1.Time, + } cancel() if err != nil { return fmt.Errorf("failed to fetch next block: %w", err) diff --git a/op-supervisor/supervisor/backend/source/log_processor.go b/op-supervisor/supervisor/backend/source/log_processor.go index 1c20f8c4530a..8a815f7ca9e9 100644 --- a/op-supervisor/supervisor/backend/source/log_processor.go +++ b/op-supervisor/supervisor/backend/source/log_processor.go @@ -15,7 +15,12 @@ import ( ) type LogStorage interface { - SealBlock(chain types.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error + SealBlock(chain types.ChainID, block eth.L2BlockRef) error + AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error +} + +type ChainsDBClientForLogProcessor interface { + SealBlock(chain types.ChainID, block eth.L2BlockRef) error AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error } @@ -39,7 +44,7 @@ func newLogProcessor(chain types.ChainID, logStore LogStorage) *logProcessor { // ProcessLogs processes logs from a block and stores them in the log storage // for any logs that are related to executing messages, they are decoded and stored -func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpts ethTypes.Receipts) error { +func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L2BlockRef, rcpts ethTypes.Receipts) error { for _, rcpt := range rcpts { for _, l := range rcpt.Logs { // log hash represents the hash of *this* log as a potentially initiating message @@ -60,7 +65,7 @@ func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpt } } } - if err := p.logStore.SealBlock(p.chain, block.ParentHash, block.ID(), block.Time); err != nil { + if err := p.logStore.SealBlock(p.chain, block); err != nil { return fmt.Errorf("failed to seal block %s: %w", block.ID(), err) } return nil diff --git a/op-supervisor/supervisor/backend/source/log_processor_test.go b/op-supervisor/supervisor/backend/source/log_processor_test.go index bd7aa7abc3d1..6e96d731fcff 100644 --- a/op-supervisor/supervisor/backend/source/log_processor_test.go +++ b/op-supervisor/supervisor/backend/source/log_processor_test.go @@ -17,7 +17,7 @@ var logProcessorChainID = types.ChainIDFromUInt64(4) func TestLogProcessor(t *testing.T) { ctx := context.Background() - block1 := eth.L1BlockRef{ + block1 := eth.L2BlockRef{ ParentHash: common.Hash{0x42}, Number: 100, Hash: common.Hash{0x11}, @@ -205,14 +205,14 @@ type stubLogStorage struct { seals []storedSeal } -func (s *stubLogStorage) SealBlock(chainID types.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error { +func (s *stubLogStorage) SealBlock(chainID types.ChainID, block eth.L2BlockRef) error { if logProcessorChainID != chainID { return fmt.Errorf("chain id mismatch, expected %v but got %v", logProcessorChainID, chainID) } s.seals = append(s.seals, storedSeal{ - parent: parentHash, - block: block, - timestamp: timestamp, + parent: block.ParentHash, + block: block.ID(), + timestamp: block.Time, }) return nil } diff --git a/op-supervisor/supervisor/types/types.go b/op-supervisor/supervisor/types/types.go index e89e8e9515bb..ea480afa8b3c 100644 --- a/op-supervisor/supervisor/types/types.go +++ b/op-supervisor/supervisor/types/types.go @@ -73,7 +73,7 @@ func (lvl SafetyLevel) String() string { func (lvl SafetyLevel) Valid() bool { switch lvl { - case CrossFinalized, Finalized, Safe, CrossUnsafe, Unsafe: + case Finalized, CrossSafe, LocalSafe, CrossUnsafe, LocalUnsafe: return true default: return false @@ -101,10 +101,10 @@ func (lvl *SafetyLevel) AtLeastAsSafe(min SafetyLevel) bool { switch min { case Invalid: return true - case Unsafe: + case CrossUnsafe: return *lvl != Invalid - case Safe: - return *lvl == Safe || *lvl == Finalized + case CrossSafe: + return *lvl == CrossSafe || *lvl == Finalized case Finalized: return *lvl == Finalized default: @@ -113,13 +113,26 @@ func (lvl *SafetyLevel) AtLeastAsSafe(min SafetyLevel) bool { } const ( - CrossFinalized SafetyLevel = "cross-finalized" - Finalized SafetyLevel = "finalized" - CrossSafe SafetyLevel = "cross-safe" - Safe SafetyLevel = "safe" - CrossUnsafe SafetyLevel = "cross-unsafe" - Unsafe SafetyLevel = "unsafe" - Invalid SafetyLevel = "invalid" + // Finalized is CrossSafe, with the additional constraint that every + // dependency is derived only from finalized L1 input data. + // This matches RPC label "finalized". + Finalized SafetyLevel = "finalized" + // CrossSafe is as safe as LocalSafe, with all its dependencies + // also fully verified to be reproducible from L1. + // This matches RPC label "safe". + CrossSafe SafetyLevel = "safe" + // LocalSafe is verified to be reproducible from L1, + // without any verified cross-L2 dependencies. + // This does not have an RPC label. + LocalSafe SafetyLevel = "local-safe" + // CrossUnsafe is as safe as LocalUnsafe, + // but with verified cross-L2 dependencies that are at least CrossUnsafe. + // This does not have an RPC label. + CrossUnsafe SafetyLevel = "cross-unsafe" + // LocalUnsafe is the safety of the tip of the chain. This matches RPC label "unsafe". + LocalUnsafe SafetyLevel = "unsafe" + // Invalid is the safety of when the message or block is not matching the expected data. + Invalid SafetyLevel = "invalid" ) type ChainID uint256.Int diff --git a/ops-bedrock/docker-compose.yml b/ops-bedrock/docker-compose.yml index 1cc5626876bd..adcaea8f4d1b 100644 --- a/ops-bedrock/docker-compose.yml +++ b/ops-bedrock/docker-compose.yml @@ -233,7 +233,7 @@ services: OP_CHALLENGER_CANNON_L2_GENESIS: ./.devnet/genesis-l2.json OP_CHALLENGER_CANNON_BIN: ./cannon/bin/cannon OP_CHALLENGER_CANNON_SERVER: /op-program/op-program - OP_CHALLENGER_CANNON_PRESTATE: /op-program/prestate.json + OP_CHALLENGER_CANNON_PRESTATE: /op-program/prestate.bin.gz OP_CHALLENGER_L2_ETH_RPC: http://l2:8545 OP_CHALLENGER_MNEMONIC: test test test test test test test test test test test junk OP_CHALLENGER_HD_PATH: "m/44'/60'/0'/0/4" diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index 35f14d19a439..edb7597ec34f 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -47,14 +47,11 @@ ARG TARGETARCH # The "id" defaults to the value of "target", the cache will thus be reused during this build. # "sharing" defaults to "shared", the cache will thus be available to other concurrent docker builds. -# For now fetch the v1 cannon binary from the op-challenger image -#FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger:v1.1.0 AS cannon-builder-0 +FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon:v1.0.0-alpha.3 AS cannon-builder-0 FROM --platform=$BUILDPLATFORM builder AS cannon-builder ARG CANNON_VERSION=v0.0.0 -# uncomment these lines once there's a new Cannon version available -#COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-0 -#COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-1 +COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-0 RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build cd cannon && make cannon \ GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$CANNON_VERSION" diff --git a/ops/tag-service/tag-tool.py b/ops/tag-service/tag-tool.py index dedd3601fc8e..545f9b41571f 100644 --- a/ops/tag-service/tag-tool.py +++ b/ops/tag-service/tag-tool.py @@ -20,7 +20,7 @@ GIT_TAG_COMMAND = 'git tag -a {tag} -m "{message}"' GIT_PUSH_COMMAND = 'git push origin {tag}' -def new_tag(service, version, bump): +def new_tag(service, version, bump, pre_release): if bump == 'major': bumped = version.bump_major() elif bump == 'minor': @@ -28,11 +28,18 @@ def new_tag(service, version, bump): elif bump == 'patch': bumped = version.bump_patch() elif bump == 'prerelease': + if pre_release: + raise Exception('Cannot use --bump=prerelease with --pre-release') bumped = version.bump_prerelease() elif bump == 'finalize-prerelease': + if pre_release: + raise Exception('Cannot use --bump=finalize-prerelease with --pre-release') bumped = version.finalize_version() else: raise Exception('Invalid bump type: {}'.format(bump)) + + if pre_release: + bumped = bumped.bump_prerelease() return f'{service}/v{bumped}' def latest_version(service): @@ -57,6 +64,7 @@ def main(): parser = argparse.ArgumentParser(description='Create a new git tag for a service') parser.add_argument('--service', type=str, help='The name of the Service') parser.add_argument('--bump', type=str, help='The type of bump to apply to the version number') + parser.add_argument('--pre-release', help='Treat this tag as a pre-release', action='store_true') parser.add_argument('--message', type=str, help='Message to include in git tag', default='[tag-tool-release]') args = parser.parse_args() @@ -67,7 +75,7 @@ def main(): else: latest = latest_version(service) - bumped = new_tag(service, semver.VersionInfo.parse(latest), args.bump) + bumped = new_tag(service, semver.VersionInfo.parse(latest), args.bump, args.pre_release) print(f'latest tag: {latest}') print(f'new tag: {bumped}') diff --git a/packages/contracts-bedrock/.gas-snapshot b/packages/contracts-bedrock/.gas-snapshot index 3564748212d9..700053bd8ab9 100644 --- a/packages/contracts-bedrock/.gas-snapshot +++ b/packages/contracts-bedrock/.gas-snapshot @@ -1,17 +1,17 @@ -GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7567) -GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5567) -GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175677) +GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7589) +GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5589) +GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175655) GasBenchMark_L1BlockInterop_SetValuesInterop_Warm:test_setL1BlockValuesInterop_benchmark() (gas: 5099) GasBenchMark_L1Block_SetValuesEcotone:test_setL1BlockValuesEcotone_benchmark() (gas: 158531) GasBenchMark_L1Block_SetValuesEcotone_Warm:test_setL1BlockValuesEcotone_benchmark() (gas: 7597) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 369242) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2967382) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 564362) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4076577) +GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 564356) +GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4076571) GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 467019) -GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3512701) +GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3512723) GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 72618) -GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 92970) -GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68312) -GasBenchMark_OptimismPortal:test_depositTransaction_benchmark_1() (gas: 68943) -GasBenchMark_OptimismPortal:test_proveWithdrawalTransaction_benchmark() (gas: 155607) \ No newline at end of file +GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 92973) +GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68357) +GasBenchMark_OptimismPortal:test_depositTransaction_benchmark_1() (gas: 68921) +GasBenchMark_OptimismPortal:test_proveWithdrawalTransaction_benchmark() (gas: 155610) \ No newline at end of file diff --git a/packages/contracts-bedrock/CONTRIBUTING.md b/packages/contracts-bedrock/CONTRIBUTING.md index a249ae5bedca..43f6a710747b 100644 --- a/packages/contracts-bedrock/CONTRIBUTING.md +++ b/packages/contracts-bedrock/CONTRIBUTING.md @@ -39,7 +39,7 @@ If you have any questions about the smart contracts, please feel free to ask the #### How Do I Submit a Good Enhancement Suggestion? -Enhancement suggestions are tracked as [GitHub issues](/issues). +Enhancement suggestions are tracked as [GitHub issues](https://github.com/ethereum-optimism/optimism/issues). - Use a **clear and descriptive title** for the issue to identify the suggestion. - Provide a **step-by-step** description of the suggested enhancement in as many details as possible. diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index cef9f85bbaeb..273cbb40ff50 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -87,7 +87,7 @@ optimizer = false # See test/kontrol/README.md for an explanation of how the profiles are configured [profile.kdeploy] -src = 'src/L1' +src = 'src' out = 'kout-deployment' test = 'test/kontrol' script = 'scripts-kontrol' diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 901ce17daa68..a9c621cf240c 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -163,7 +163,7 @@ semver-diff-check: build semver-diff-check-no-build # Checks that semver natspec is equal to the actual semver version. # Does not build contracts. semver-natspec-check-no-build: - ./scripts/checks/check-semver-natspec-match.sh + go run ./scripts/checks/semver-natspec # Checks that semver natspec is equal to the actual semver version. semver-natspec-check: build semver-natspec-check-no-build @@ -197,7 +197,11 @@ check: gas-snapshot-check-no-build kontrol-deployment-check snapshots-check-no-b ######################################################## # Cleans, builds, lints, and runs all checks. -pre-pr: clean build-go-ffi build lint gas-snapshot-no-build snapshots-no-build semver-lock check +pre-pr: clean pre-pr-no-build + +# Builds, lints, and runs all checks. Sometimes a bad cache causes issues, in which case the above +# `pre-pr` is preferred. But in most cases this will be sufficient and much faster then a full build. +pre-pr-no-build: build-go-ffi build lint gas-snapshot-no-build snapshots-no-build semver-lock check # Fixes linting errors. lint-fix: diff --git a/packages/contracts-bedrock/lib/forge-std b/packages/contracts-bedrock/lib/forge-std index 2d8b7b876a5b..8f24d6b04c92 160000 --- a/packages/contracts-bedrock/lib/forge-std +++ b/packages/contracts-bedrock/lib/forge-std @@ -1 +1 @@ -Subproject commit 2d8b7b876a5b328d6a73e13c4740ed7a0d72d5f4 +Subproject commit 8f24d6b04c92975e0795b5868aa0d783251cdeaa diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index 3dffee5d32b0..8e7a38ca2eb5 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -7,6 +7,10 @@ import { LibString } from "@solady/utils/LibString.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; +import { IL1CrossDomainMessengerV160 } from "src/L1/interfaces/IL1CrossDomainMessengerV160.sol"; +import { IL1StandardBridgeV160 } from "src/L1/interfaces/IL1StandardBridgeV160.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; @@ -26,8 +30,6 @@ import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; @@ -59,11 +61,13 @@ contract DeployImplementationsInput is BaseDeployIO { string internal _release; // Outputs from DeploySuperchain.s.sol. - SuperchainConfig internal _superchainConfigProxy; - ProtocolVersions internal _protocolVersionsProxy; + ISuperchainConfig internal _superchainConfigProxy; + IProtocolVersions internal _protocolVersionsProxy; string internal _standardVersionsToml; + address internal _opcmProxyOwner; + function set(bytes4 _sel, uint256 _value) public { require(_value != 0, "DeployImplementationsInput: cannot set zero value"); @@ -92,8 +96,9 @@ contract DeployImplementationsInput is BaseDeployIO { function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployImplementationsInput: cannot set zero address"); - if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = SuperchainConfig(_addr); - else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = ProtocolVersions(_addr); + if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = ISuperchainConfig(_addr); + else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = IProtocolVersions(_addr); + else if (_sel == this.opcmProxyOwner.selector) _opcmProxyOwner = _addr; else revert("DeployImplementationsInput: unknown selector"); } @@ -145,23 +150,19 @@ contract DeployImplementationsInput is BaseDeployIO { return _standardVersionsToml; } - function superchainConfigProxy() public view returns (SuperchainConfig) { + function superchainConfigProxy() public view returns (ISuperchainConfig) { require(address(_superchainConfigProxy) != address(0), "DeployImplementationsInput: not set"); return _superchainConfigProxy; } - function protocolVersionsProxy() public view returns (ProtocolVersions) { + function protocolVersionsProxy() public view returns (IProtocolVersions) { require(address(_protocolVersionsProxy) != address(0), "DeployImplementationsInput: not set"); return _protocolVersionsProxy; } - function superchainProxyAdmin() public returns (ProxyAdmin) { - SuperchainConfig proxy = this.superchainConfigProxy(); - // Can infer the superchainProxyAdmin from the superchainConfigProxy. - vm.prank(address(0)); - ProxyAdmin proxyAdmin = ProxyAdmin(Proxy(payable(address(proxy))).admin()); - require(address(proxyAdmin) != address(0), "DeployImplementationsInput: not set"); - return proxyAdmin; + function opcmProxyOwner() public view returns (address) { + require(address(_opcmProxyOwner) != address(0), "DeployImplementationsInput: not set"); + return _opcmProxyOwner; } } @@ -307,7 +308,7 @@ contract DeployImplementationsOutput is BaseDeployIO { Proxy proxy = Proxy(payable(address(opcmProxy()))); vm.prank(address(0)); address admin = proxy.admin(); - require(admin == address(_dii.superchainProxyAdmin()), "OPCMP-10"); + require(admin == address(_dii.opcmProxyOwner()), "OPCMP-10"); // Then we check the proxy as OPCM. DeployUtils.assertInitialized({ _contractAddress: address(opcmProxy()), _slot: 0, _offset: 0 }); @@ -479,7 +480,7 @@ contract DeployImplementations is Script { // --- OP Contracts Manager --- function opcmSystemConfigSetter( - DeployImplementationsInput, + DeployImplementationsInput _dii, DeployImplementationsOutput _dio ) internal @@ -487,9 +488,55 @@ contract DeployImplementations is Script { virtual returns (OPContractsManager.ImplementationSetter memory) { + // When configuring OPCM during Solidity tests, we are using the latest SystemConfig.sol + // version in this repo, which contains Custom Gas Token (CGT) features. This CGT version + // has a different `initialize` signature than the SystemConfig version that was released + // as part of `op-contracts/v1.6.0`, which is no longer in the repo. When running this + // script's bytecode for a production deploy of OPCM at `op-contracts/v1.6.0`, we need to + // use the ISystemConfigV160 interface instead of ISystemConfig. Therefore the selector used + // is a function of the `release` passed in by the caller. + bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") + ? ISystemConfigV160.initialize.selector + : SystemConfig.initialize.selector; return OPContractsManager.ImplementationSetter({ name: "SystemConfig", - info: OPContractsManager.Implementation(address(_dio.systemConfigImpl()), SystemConfig.initialize.selector) + info: OPContractsManager.Implementation(address(_dio.systemConfigImpl()), selector) + }); + } + + function l1CrossDomainMessengerConfigSetter( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + internal + view + virtual + returns (OPContractsManager.ImplementationSetter memory) + { + bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") + ? IL1CrossDomainMessengerV160.initialize.selector + : L1CrossDomainMessenger.initialize.selector; + return OPContractsManager.ImplementationSetter({ + name: "L1CrossDomainMessenger", + info: OPContractsManager.Implementation(address(_dio.l1CrossDomainMessengerImpl()), selector) + }); + } + + function l1StandardBridgeConfigSetter( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + internal + view + virtual + returns (OPContractsManager.ImplementationSetter memory) + { + bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") + ? IL1StandardBridgeV160.initialize.selector + : L1StandardBridge.initialize.selector; + return OPContractsManager.ImplementationSetter({ + name: "L1StandardBridge", + info: OPContractsManager.Implementation(address(_dio.l1StandardBridgeImpl()), selector) }); } @@ -505,7 +552,7 @@ contract DeployImplementations is Script { virtual returns (OPContractsManager opcmProxy_) { - ProxyAdmin proxyAdmin = _dii.superchainProxyAdmin(); + address opcmProxyOwner = _dii.opcmProxyOwner(); vm.broadcast(msg.sender); Proxy proxy = new Proxy(address(msg.sender)); @@ -521,7 +568,7 @@ contract DeployImplementations is Script { address(opcmImpl), abi.encodeWithSelector(opcmImpl.initialize.selector, initializerInputs) ); - proxy.changeAdmin(address(proxyAdmin)); // transfer ownership of Proxy contract to the ProxyAdmin contract + proxy.changeAdmin(address(opcmProxyOwner)); // transfer ownership of Proxy contract to the ProxyAdmin contract vm.stopBroadcast(); opcmProxy_ = OPContractsManager(address(proxy)); @@ -568,18 +615,8 @@ contract DeployImplementations is Script { address(_dio.optimismMintableERC20FactoryImpl()), OptimismMintableERC20Factory.initialize.selector ) }); - setters[4] = OPContractsManager.ImplementationSetter({ - name: "L1CrossDomainMessenger", - info: OPContractsManager.Implementation( - address(_dio.l1CrossDomainMessengerImpl()), L1CrossDomainMessenger.initialize.selector - ) - }); - setters[5] = OPContractsManager.ImplementationSetter({ - name: "L1StandardBridge", - info: OPContractsManager.Implementation( - address(_dio.l1StandardBridgeImpl()), L1StandardBridge.initialize.selector - ) - }); + setters[4] = l1CrossDomainMessengerConfigSetter(_dii, _dio); + setters[5] = l1StandardBridgeConfigSetter(_dii, _dio); setters[6] = OPContractsManager.ImplementationSetter({ name: "DisputeGameFactory", info: OPContractsManager.Implementation( @@ -739,8 +776,8 @@ contract DeployImplementations is Script { public virtual { - SuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - ProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); vm.broadcast(msg.sender); // TODO: Eventually we will want to select the correct implementation based on the release. @@ -874,7 +911,7 @@ contract DeployImplementations is Script { if (existingImplementation != address(0)) { singleton = MIPS(payable(existingImplementation)); } else if (isDevelopRelease(release)) { - IPreimageOracle preimageOracle = IPreimageOracle(_dio.preimageOracleSingleton()); + IPreimageOracle preimageOracle = IPreimageOracle(address(_dio.preimageOracleSingleton())); vm.broadcast(msg.sender); singleton = new MIPS(preimageOracle); } else { @@ -1025,7 +1062,7 @@ contract DeployImplementationsInterop is DeployImplementations { override returns (OPContractsManager opcmProxy_) { - ProxyAdmin proxyAdmin = _dii.superchainProxyAdmin(); + address opcmProxyOwner = _dii.opcmProxyOwner(); vm.broadcast(msg.sender); Proxy proxy = new Proxy(address(msg.sender)); @@ -1041,7 +1078,7 @@ contract DeployImplementationsInterop is DeployImplementations { address(opcmImpl), abi.encodeWithSelector(opcmImpl.initialize.selector, initializerInputs) ); - proxy.changeAdmin(address(proxyAdmin)); // transfer ownership of Proxy contract to the ProxyAdmin contract + proxy.changeAdmin(opcmProxyOwner); // transfer ownership of Proxy contract to the ProxyAdmin contract vm.stopBroadcast(); opcmProxy_ = OPContractsManagerInterop(address(proxy)); @@ -1109,8 +1146,8 @@ contract DeployImplementationsInterop is DeployImplementations { public override { - SuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - ProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); vm.broadcast(msg.sender); // TODO: Eventually we will want to select the correct implementation based on the release. diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index 25b3447ad723..152885170cb0 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -216,9 +216,11 @@ contract DeployOPChainOutput is BaseDeployIO { address(_anchorStateRegistryImpl), // address(_faultDisputeGame), address(_permissionedDisputeGame), - address(_delayedWETHPermissionedGameProxy), - address(_delayedWETHPermissionlessGameProxy) + address(_delayedWETHPermissionedGameProxy) ); + // TODO: Eventually switch from Permissioned to Permissionless. Add this address back in. + // address(_delayedWETHPermissionlessGameProxy) + DeployUtils.assertValidContractAddresses(Solarray.extend(addrs1, addrs2)); assertValidDeploy(_doi); @@ -295,7 +297,8 @@ contract DeployOPChainOutput is BaseDeployIO { } function delayedWETHPermissionlessGameProxy() public view returns (DelayedWETH) { - DeployUtils.assertValidContractAddress(address(_delayedWETHPermissionlessGameProxy)); + // TODO: Eventually switch from Permissioned to Permissionless. Add this check back in. + // DeployUtils.assertValidContractAddress(address(_delayedWETHPermissionlessGameProxy)); return _delayedWETHPermissionlessGameProxy; } @@ -304,7 +307,7 @@ contract DeployOPChainOutput is BaseDeployIO { function assertValidDeploy(DeployOPChainInput _doi) internal { assertValidAnchorStateRegistryImpl(_doi); assertValidAnchorStateRegistryProxy(_doi); - assertValidDelayedWETHs(_doi); + assertValidDelayedWETH(_doi); assertValidDisputeGameFactory(_doi); assertValidL1CrossDomainMessenger(_doi); assertValidL1ERC721Bridge(_doi); @@ -319,7 +322,13 @@ contract DeployOPChainOutput is BaseDeployIO { PermissionedDisputeGame game = permissionedDisputeGame(); require(GameType.unwrap(game.gameType()) == GameType.unwrap(GameTypes.PERMISSIONED_CANNON), "DPG-10"); - require(Claim.unwrap(game.absolutePrestate()) == bytes32(hex"dead"), "DPG-20"); + // This hex string is the absolutePrestate of the latest op-program release, see where the + // `EXPECTED_PRESTATE_HASH` is defined in `config.yml`. + require( + Claim.unwrap(game.absolutePrestate()) + == bytes32(hex"038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c"), + "DPG-20" + ); OPContractsManager opcm = _doi.opcmProxy(); (address mips,) = opcm.implementations(opcm.latestRelease(), "MIPS"); @@ -462,7 +471,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(vm.load(address(portal), bytes32(uint256(61))) == bytes32(0)); } - function assertValidDisputeGameFactory(DeployOPChainInput) internal view { + function assertValidDisputeGameFactory(DeployOPChainInput _doi) internal view { DisputeGameFactory factory = disputeGameFactoryProxy(); DeployUtils.assertInitialized({ _contractAddress: address(factory), _slot: 0, _offset: 0 }); @@ -470,11 +479,18 @@ contract DeployOPChainOutput is BaseDeployIO { require( address(factory.gameImpls(GameTypes.PERMISSIONED_CANNON)) == address(permissionedDisputeGame()), "DF-10" ); - require(factory.owner() == address(opChainProxyAdmin()), "DF-20"); + require(factory.owner() == address(_doi.opChainProxyAdminOwner()), "DF-20"); } - function assertValidDelayedWETHs(DeployOPChainInput) internal view { - // TODO add in once FP support is added. + function assertValidDelayedWETH(DeployOPChainInput _doi) internal { + DelayedWETH permissioned = delayedWETHPermissionedGameProxy(); + + require(permissioned.owner() == address(_doi.opChainProxyAdminOwner()), "DWETH-10"); + + Proxy proxy = Proxy(payable(address(permissioned))); + vm.prank(address(0)); + address admin = proxy.admin(); + require(admin == address(opChainProxyAdmin()), "DWETH-20"); } } @@ -518,7 +534,8 @@ contract DeployOPChain is Script { // vm.label(address(deployOutput.faultDisputeGame), "faultDisputeGame"); vm.label(address(deployOutput.permissionedDisputeGame), "permissionedDisputeGame"); vm.label(address(deployOutput.delayedWETHPermissionedGameProxy), "delayedWETHPermissionedGameProxy"); - vm.label(address(deployOutput.delayedWETHPermissionlessGameProxy), "delayedWETHPermissionlessGameProxy"); + // TODO: Eventually switch from Permissioned to Permissionless. + // vm.label(address(deployOutput.delayedWETHPermissionlessGameProxy), "delayedWETHPermissionlessGameProxy"); _doo.set(_doo.opChainProxyAdmin.selector, address(deployOutput.opChainProxyAdmin)); _doo.set(_doo.addressManager.selector, address(deployOutput.addressManager)); @@ -536,9 +553,11 @@ contract DeployOPChain is Script { // _doo.set(_doo.faultDisputeGame.selector, address(deployOutput.faultDisputeGame)); _doo.set(_doo.permissionedDisputeGame.selector, address(deployOutput.permissionedDisputeGame)); _doo.set(_doo.delayedWETHPermissionedGameProxy.selector, address(deployOutput.delayedWETHPermissionedGameProxy)); - _doo.set( - _doo.delayedWETHPermissionlessGameProxy.selector, address(deployOutput.delayedWETHPermissionlessGameProxy) - ); + // TODO: Eventually switch from Permissioned to Permissionless. + // _doo.set( + // _doo.delayedWETHPermissionlessGameProxy.selector, + // address(deployOutput.delayedWETHPermissionlessGameProxy) + // ); _doo.checkOutput(_doi); } diff --git a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol index b26755c755ec..c9e1b23bf230 100644 --- a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol @@ -195,7 +195,6 @@ contract DeploySuperchainOutput is BaseDeployIO { require(actualSuperchainConfigImpl == address(_superchainConfigImpl), "100"); require(actualProtocolVersionsImpl == address(_protocolVersionsImpl), "200"); - // TODO Also add the assertions for the implementation contracts from ChainAssertions.sol assertValidDeploy(_dsi); } diff --git a/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh b/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh index ccd337e958e7..a2093e936f3f 100755 --- a/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh +++ b/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh @@ -5,6 +5,13 @@ CONTRACTS_BASE=$(dirname "$(dirname "$SCRIPT_DIR")") MONOREPO_BASE=$(dirname "$(dirname "$CONTRACTS_BASE")") VERSIONS_FILE="${MONOREPO_BASE}/versions.json" +if ! command -v jq &> /dev/null +then + # shellcheck disable=SC2006 + echo "Please install jq" >&2 + exit 1 +fi + if ! command -v forge &> /dev/null then # shellcheck disable=SC2006 @@ -34,5 +41,5 @@ if [ "$INSTALLED_VERSION" = "$EXPECTED_VERSION" ]; then else echo "Mismatch between installed Foundry version ($INSTALLED_VERSION) and expected version ($EXPECTED_VERSION)." echo "Your version of Foundry may either not be up to date, or it could be a later version." - echo "Running just update-foundry will install the expected version." + echo "Running 'just update-foundry' from the repository root will install the expected version." fi diff --git a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh index 2df1045ef101..2a4a566f34e9 100755 --- a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh +++ b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh @@ -45,29 +45,31 @@ EXCLUDE_CONTRACTS=( "ISchemaResolver" "ISchemaRegistry" - # Kontrol - "KontrolCheatsBase" + # TODO: Interfaces that need to be fixed are below this line + # ---------------------------------------------------------- - # TODO: Interfaces that need to be fixed - "IOptimismSuperchainERC20" - "IOptimismMintableERC721" - "IOptimismMintableERC20" - "ILegacyMintableERC20" + # Inlined interface, needs to be replaced. "IInitializable" + + # Missing various functions. "IPreimageOracle" - "ICrossL2Inbox" - "IL2ToL2CrossDomainMessenger" + "ILegacyMintableERC20" + "IOptimismMintableERC20" + "IOptimismMintableERC721" + "IOptimismSuperchainERC20" + + # Doesn't start with "I" "MintableAndBurnable" + "KontrolCheatsBase" + + # Currently inherit from interface, needs to be fixed. "IWETH" "IDelayedWETH" - "IResolvedDelegateProxy" + "IL2ToL2CrossDomainMessenger" + "ICrossL2Inbox" - # TODO: Kontrol interfaces that need to be removed - "IL1ERC721Bridge" - "IL1StandardBridge" - "IL1CrossDomainMessenger" - "ISuperchainConfig" - "IOptimismPortal" + # Solidity complains about receive but contract doens't have it. + "IResolvedDelegateProxy" ) # Find all JSON files in the forge-artifacts folder diff --git a/packages/contracts-bedrock/scripts/checks/check-semver-natspec-match.sh b/packages/contracts-bedrock/scripts/checks/check-semver-natspec-match.sh deleted file mode 100755 index de4de3f8497a..000000000000 --- a/packages/contracts-bedrock/scripts/checks/check-semver-natspec-match.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the directory of the contracts-bedrock package -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -CONTRACTS_BASE=$(dirname "$(dirname "$SCRIPT_DIR")") -ARTIFACTS_DIR="$CONTRACTS_BASE/forge-artifacts" -CONTRACTS_DIR="$CONTRACTS_BASE/src" - -# Load semver-utils -# shellcheck source=/dev/null -source "$SCRIPT_DIR/utils/semver-utils.sh" - -# Flag to track if any errors are detected -has_errors=false - -# Iterate through each artifact file -for artifact_file in "$ARTIFACTS_DIR"/**/*.json; do - # Get the contract name and find the corresponding source file - contract_name=$(basename "$artifact_file" .json) - contract_file=$(find "$CONTRACTS_DIR" -name "$contract_name.sol") - - # Try to extract version as a constant - raw_metadata=$(jq -r '.rawMetadata' "$artifact_file") - artifact_version=$(echo "$raw_metadata" | jq -r '.output.devdoc.stateVariables.version."custom:semver"') - - is_constant=true - if [ "$artifact_version" = "null" ]; then - # If not found as a constant, try to extract as a function - artifact_version=$(echo "$raw_metadata" | jq -r '.output.devdoc.methods."version()"."custom:semver"') - is_constant=false - fi - - # If @custom:semver is not found in either location, skip this file - if [ "$artifact_version" = "null" ]; then - continue - fi - - # If source file is not found, report an error - if [ -z "$contract_file" ]; then - echo "❌ $contract_name: Source file not found" - continue - fi - - # Extract version from source based on whether it's a constant or function - if [ "$is_constant" = true ]; then - source_version=$(extract_constant_version "$contract_file") - else - source_version=$(extract_function_version "$contract_file") - fi - - # If source version is not found, report an error - if [ "$source_version" = "" ]; then - echo "❌ Error: failed to find version string for $contract_name" - echo " this is probably a bug in check-contract-semver.sh" - echo " please report or fix the issue if possible" - has_errors=true - fi - - # Compare versions - if [ "$source_version" != "$artifact_version" ]; then - echo "❌ Error: $contract_name has different semver in code and devdoc" - echo " Code: $source_version" - echo " Devdoc: $artifact_version" - has_errors=true - else - echo "✅ $contract_name: code: $source_version, devdoc: $artifact_version" - fi -done - -# If any errors were detected, exit with a non-zero status -if [ "$has_errors" = true ]; then - exit 1 -fi diff --git a/packages/contracts-bedrock/scripts/checks/semver-natspec/main.go b/packages/contracts-bedrock/scripts/checks/semver-natspec/main.go new file mode 100644 index 000000000000..d1e2153c02ef --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/semver-natspec/main.go @@ -0,0 +1,215 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + "sync/atomic" +) + +type ArtifactsWrapper struct { + RawMetadata string `json:"rawMetadata"` +} + +type Artifacts struct { + Output struct { + Devdoc struct { + StateVariables struct { + Version struct { + Semver string `json:"custom:semver"` + } `json:"version"` + } `json:"stateVariables,omitempty"` + Methods struct { + Version struct { + Semver string `json:"custom:semver"` + } `json:"version()"` + } `json:"methods,omitempty"` + } `json:"devdoc"` + } `json:"output"` +} + +var ConstantVersionPattern = regexp.MustCompile(`string.*constant.*version\s+=\s+"([^"]+)";`) + +var FunctionVersionPattern = regexp.MustCompile(`^\s+return\s+"((?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)";$`) + +var InteropVersionPattern = regexp.MustCompile(`^\s+return\s+string\.concat\(super\.version\(\), "((.*)\+interop(.*)?)"\);`) + +func main() { + if err := run(); err != nil { + writeStderr("an error occurred: %v", err) + os.Exit(1) + } +} + +func writeStderr(msg string, args ...any) { + _, _ = fmt.Fprintf(os.Stderr, msg+"\n", args...) +} + +func run() error { + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current working directory: %w", err) + } + + writeStderr("working directory: %s", cwd) + + artifactsDir := filepath.Join(cwd, "forge-artifacts") + srcDir := filepath.Join(cwd, "src") + + artifactFiles, err := glob(artifactsDir, ".json") + if err != nil { + return fmt.Errorf("failed to get artifact files: %w", err) + } + contractFiles, err := glob(srcDir, ".sol") + if err != nil { + return fmt.Errorf("failed to get contract files: %w", err) + } + + var hasErr int32 + var outMtx sync.Mutex + fail := func(msg string, args ...any) { + outMtx.Lock() + writeStderr("❌ "+msg, args...) + outMtx.Unlock() + atomic.StoreInt32(&hasErr, 1) + } + + sem := make(chan struct{}, runtime.NumCPU()) + for contractName, artifactPath := range artifactFiles { + contractName := contractName + artifactPath := artifactPath + + sem <- struct{}{} + + go func() { + defer func() { + <-sem + }() + + af, err := os.Open(artifactPath) + if err != nil { + fail("%s: failed to open contract artifact: %v", contractName, err) + return + } + defer af.Close() + + var wrapper ArtifactsWrapper + if err := json.NewDecoder(af).Decode(&wrapper); err != nil { + fail("%s: failed to parse artifact file: %v", contractName, err) + return + } + + if wrapper.RawMetadata == "" { + return + } + + var artifactData Artifacts + if err := json.Unmarshal([]byte(wrapper.RawMetadata), &artifactData); err != nil { + fail("%s: failed to unwrap artifact metadata: %v", contractName, err) + return + } + + artifactVersion := artifactData.Output.Devdoc.StateVariables.Version.Semver + + isConstant := true + if artifactData.Output.Devdoc.StateVariables.Version.Semver == "" { + artifactVersion = artifactData.Output.Devdoc.Methods.Version.Semver + isConstant = false + } + + if artifactVersion == "" { + return + } + + contractPath := contractFiles[contractName] + if contractPath == "" { + fail("%s: Source file not found", contractName) + return + } + + cf, err := os.Open(contractPath) + if err != nil { + fail("%s: failed to open contract source: %v", contractName, err) + return + } + defer cf.Close() + + sourceData, err := io.ReadAll(cf) + if err != nil { + fail("%s: failed to read contract source: %v", contractName, err) + return + } + + var sourceVersion string + + if isConstant { + sourceVersion = findLine(sourceData, ConstantVersionPattern) + } else { + sourceVersion = findLine(sourceData, FunctionVersionPattern) + } + + // Need to define a special case for interop contracts since they technically + // use an invalid semver format. Checking for sourceVersion == "" allows the + // team to update the format to a valid semver format in the future without + // needing to change this program. + if sourceVersion == "" && strings.HasSuffix(contractName, "Interop") { + sourceVersion = findLine(sourceData, InteropVersionPattern) + } + + if sourceVersion == "" { + fail("%s: version not found in source", contractName) + return + } + + if sourceVersion != artifactVersion { + fail("%s: version mismatch: source=%s, artifact=%s", contractName, sourceVersion, artifactVersion) + return + } + + _, _ = fmt.Fprintf(os.Stderr, "✅ %s: code: %s, artifact: %s\n", contractName, sourceVersion, artifactVersion) + }() + } + + for i := 0; i < cap(sem); i++ { + sem <- struct{}{} + } + + if atomic.LoadInt32(&hasErr) == 1 { + return fmt.Errorf("semver check failed, see logs above") + } + + return nil +} + +func glob(dir string, ext string) (map[string]string, error) { + out := make(map[string]string) + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() && filepath.Ext(path) == ext { + out[strings.TrimSuffix(filepath.Base(path), ext)] = path + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to walk directory: %w", err) + } + return out, nil +} + +func findLine(in []byte, pattern *regexp.Regexp) string { + scanner := bufio.NewScanner(bytes.NewReader(in)) + for scanner.Scan() { + match := pattern.FindStringSubmatch(scanner.Text()) + if len(match) > 0 { + return match[1] + } + } + return "" +} diff --git a/packages/contracts-bedrock/scripts/checks/semver-natspec/main_test.go b/packages/contracts-bedrock/scripts/checks/semver-natspec/main_test.go new file mode 100644 index 000000000000..7a8872d76d78 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/semver-natspec/main_test.go @@ -0,0 +1,124 @@ +package main + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRegexes(t *testing.T) { + t.Run("ConstantVersionPattern", func(t *testing.T) { + testRegex(t, ConstantVersionPattern, []regexTest{ + { + name: "constant version", + input: `string constant version = "1.2.3";`, + capture: "1.2.3", + }, + { + name: "constant version with weird spaces", + input: ` string constant version = "1.2.3";`, + capture: "1.2.3", + }, + { + name: "constant version with visibility", + input: `string public constant version = "1.2.3";`, + capture: "1.2.3", + }, + { + name: "different variable name", + input: `string constant VERSION = "1.2.3";`, + capture: "", + }, + { + name: "different type", + input: `uint constant version = 1;`, + capture: "", + }, + { + name: "not constant", + input: `string version = "1.2.3";`, + capture: "", + }, + { + name: "unterminated", + input: `string constant version = "1.2.3"`, + capture: "", + }, + }) + }) + + t.Run("FunctionVersionPattern", func(t *testing.T) { + testRegex(t, FunctionVersionPattern, []regexTest{ + { + name: "function version", + input: ` return "1.2.3";`, + capture: "1.2.3", + }, + { + name: "function version with weird spaces", + input: ` return "1.2.3";`, + capture: "1.2.3", + }, + { + name: "function version with prerelease", + input: ` return "1.2.3-alpha.1";`, + capture: "1.2.3-alpha.1", + }, + { + name: "invalid semver", + input: ` return "1.2.cabdab";`, + capture: "", + }, + { + name: "not a return statement", + input: `function foo()`, + capture: "", + }, + }) + }) + + t.Run("InteropVersionPattern", func(t *testing.T) { + testRegex(t, InteropVersionPattern, []regexTest{ + { + name: "interop version", + input: ` return string.concat(super.version(), "+interop");`, + capture: "+interop", + }, + { + name: "interop version but as a valid semver", + input: ` return string.concat(super.version(), "0.0.0+interop");`, + capture: "0.0.0+interop", + }, + { + name: "not an interop version", + input: ` return string.concat(super.version(), "hello!");`, + capture: "", + }, + { + name: "invalid syntax", + input: ` return string.concat(super.version(), "0.0.0+interop`, + capture: "", + }, + { + name: "something else is concatted", + input: ` return string.concat("superduper", "mart");`, + capture: "", + }, + }) + }) +} + +type regexTest struct { + name string + input string + capture string +} + +func testRegex(t *testing.T, re *regexp.Regexp, tests []regexTest) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.capture, findLine([]byte(test.input), re)) + }) + } +} diff --git a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol index 7e5a9164f466..af1b939014b0 100644 --- a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol +++ b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol @@ -10,9 +10,6 @@ import { DeployConfig } from "scripts/deploy/DeployConfig.s.sol"; import { Deployer } from "scripts/deploy/Deployer.sol"; import { ISystemConfigV0 } from "scripts/interfaces/ISystemConfigV0.sol"; -// Contracts -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; - // Libraries import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 0787c965e199..02a77678abfe 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -9,12 +9,6 @@ import { stdJson } from "forge-std/StdJson.sol"; import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; -// Safe -import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; -import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; -import { GnosisSafeProxyFactory as SafeProxyFactory } from "safe-contracts/proxies/GnosisSafeProxyFactory.sol"; -import { Enum as SafeOps } from "safe-contracts/common/Enum.sol"; - // Scripts import { Deployer } from "scripts/deploy/Deployer.sol"; import { Chains } from "scripts/libraries/Chains.sol"; @@ -26,25 +20,19 @@ import { ChainAssertions } from "scripts/deploy/ChainAssertions.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Contracts -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; import { AddressManager } from "src/legacy/AddressManager.sol"; -import { Proxy } from "src/universal/Proxy.sol"; -import { StandardBridge } from "src/universal/StandardBridge.sol"; -import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; -import { ResolvedDelegateProxy } from "src/legacy/ResolvedDelegateProxy.sol"; -import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; -import { MIPS } from "src/cannon/MIPS.sol"; -import { MIPS2 } from "src/cannon/MIPS2.sol"; import { StorageSetter } from "src/universal/StorageSetter.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Types } from "scripts/libraries/Types.sol"; -import "src/dispute/lib/Types.sol"; import { LibClaim, Duration } from "src/dispute/lib/LibUDT.sol"; +import "src/dispute/lib/Types.sol"; // Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; @@ -65,8 +53,13 @@ import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol" import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; +import { IMIPS2 } from "src/cannon/interfaces/IMIPS2.sol"; import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; +import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; /// @title Deploy /// @notice Script used to deploy a bedrock system. The entire system is deployed within the `run` function. @@ -197,80 +190,17 @@ contract Deploy is Deployer { // State Changing Helper Functions // //////////////////////////////////////////////////////////////// - /// @notice Gets the address of the SafeProxyFactory and Safe singleton for use in deploying a new GnosisSafe. - function _getSafeFactory() internal returns (SafeProxyFactory safeProxyFactory_, Safe safeSingleton_) { - if (getAddress("SafeProxyFactory") != address(0)) { - // The SafeProxyFactory is already saved, we can just use it. - safeProxyFactory_ = SafeProxyFactory(getAddress("SafeProxyFactory")); - safeSingleton_ = Safe(getAddress("SafeSingleton")); - return (safeProxyFactory_, safeSingleton_); - } - - // These are the standard create2 deployed contracts. First we'll check if they are deployed, - // if not we'll deploy new ones, though not at these addresses. - address safeProxyFactory = 0xa6B71E26C5e0845f74c812102Ca7114b6a896AB2; - address safeSingleton = 0xd9Db270c1B5E3Bd161E8c8503c55cEABeE709552; - - safeProxyFactory.code.length == 0 - ? safeProxyFactory_ = new SafeProxyFactory() - : safeProxyFactory_ = SafeProxyFactory(safeProxyFactory); - - safeSingleton.code.length == 0 ? safeSingleton_ = new Safe() : safeSingleton_ = Safe(payable(safeSingleton)); - - save("SafeProxyFactory", address(safeProxyFactory_)); - save("SafeSingleton", address(safeSingleton_)); - } - - /// @notice Make a call from the Safe contract to an arbitrary address with arbitrary data - function _callViaSafe(Safe _safe, address _target, bytes memory _data) internal { - // This is the signature format used when the caller is also the signer. - bytes memory signature = abi.encodePacked(uint256(uint160(msg.sender)), bytes32(0), uint8(1)); - - _safe.execTransaction({ - to: _target, - value: 0, - data: _data, - operation: SafeOps.Operation.Call, - safeTxGas: 0, - baseGas: 0, - gasPrice: 0, - gasToken: address(0), - refundReceiver: payable(address(0)), - signatures: signature - }); - } - - /// @notice Call from the Safe contract to the Proxy Admin's upgrade and call method - function _upgradeAndCallViaSafe(address _proxy, address _implementation, bytes memory _innerCallData) internal { - address proxyAdmin = mustGetAddress("ProxyAdmin"); - - bytes memory data = - abi.encodeCall(ProxyAdmin.upgradeAndCall, (payable(_proxy), _implementation, _innerCallData)); - - Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); - _callViaSafe({ _safe: safe, _target: proxyAdmin, _data: data }); - } - /// @notice Transfer ownership of the ProxyAdmin contract to the final system owner - function transferProxyAdminOwnership() public broadcast { - ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin")); + function transferProxyAdminOwnership(bool _isSuperchain) public broadcast { + string memory proxyAdminName = _isSuperchain ? "SuperchainProxyAdmin" : "ProxyAdmin"; + IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress(proxyAdminName)); address owner = proxyAdmin.owner(); - address safe = mustGetAddress("SystemOwnerSafe"); - if (owner != safe) { - proxyAdmin.transferOwnership(safe); - console.log("ProxyAdmin ownership transferred to Safe at: %s", safe); - } - } - /// @notice Transfer ownership of a Proxy to the ProxyAdmin contract - /// This is expected to be used in conjusting with deployERC1967ProxyWithOwner after setup actions - /// have been performed on the proxy. - /// @param _name The name of the proxy to transfer ownership of. - function transferProxyToProxyAdmin(string memory _name) public broadcast { - Proxy proxy = Proxy(mustGetAddress(_name)); - address proxyAdmin = mustGetAddress("ProxyAdmin"); - proxy.changeAdmin(proxyAdmin); - console.log("Proxy %s ownership transferred to ProxyAdmin at: %s", _name, proxyAdmin); + address finalSystemOwner = cfg.finalSystemOwner(); + if (owner != finalSystemOwner) { + proxyAdmin.transferOwnership(finalSystemOwner); + console.log("ProxyAdmin ownership transferred to final system owner at: %s", finalSystemOwner); + } } //////////////////////////////////////////////////////////////// @@ -301,11 +231,11 @@ contract Deploy is Deployer { console.log("Deploying a fresh OP Stack with existing SuperchainConfig and ProtocolVersions"); - Proxy scProxy = Proxy(_superchainConfigProxy); + IProxy scProxy = IProxy(_superchainConfigProxy); save("SuperchainConfig", scProxy.implementation()); save("SuperchainConfigProxy", _superchainConfigProxy); - Proxy pvProxy = Proxy(_protocolVersionsProxy); + IProxy pvProxy = IProxy(_protocolVersionsProxy); save("ProtocolVersions", pvProxy.implementation()); save("ProtocolVersionsProxy", _protocolVersionsProxy); @@ -335,19 +265,14 @@ contract Deploy is Deployer { /// @notice Internal function containing the deploy logic. function _run(bool _needsSuperchain) internal { console.log("start of L1 Deploy!"); - deploySafe("SystemOwnerSafe"); - console.log("deployed Safe!"); - - // Deploy a new ProxyAdmin and AddressManager - // This proxy will be used on the SuperchainConfig and ProtocolVersions contracts, as well as the contracts - // in the OP Chain system. - setupAdmin(); if (_needsSuperchain) { + deployProxyAdmin({ _isSuperchain: true }); setupSuperchain(); console.log("set up superchain!"); } + setupOpChainAdmin(); if (cfg.useAltDA()) { bytes32 typeHash = keccak256(bytes(cfg.daCommitmentType())); bytes32 keccakHash = keccak256(bytes("KeccakCommitment")); @@ -355,6 +280,7 @@ contract Deploy is Deployer { setupOpAltDA(); } } + setupOpChain(); console.log("set up op chain!"); } @@ -364,10 +290,9 @@ contract Deploy is Deployer { //////////////////////////////////////////////////////////////// /// @notice Deploy the address manager and proxy admin contracts. - function setupAdmin() public { + function setupOpChainAdmin() public { deployAddressManager(); - deployProxyAdmin(); - transferProxyAdminOwnership(); + deployProxyAdmin({ _isSuperchain: false }); } /// @notice Deploy a full system with a new SuperchainConfig @@ -378,12 +303,12 @@ contract Deploy is Deployer { console.log("Setting up Superchain"); // Deploy the SuperchainConfigProxy - deployERC1967Proxy("SuperchainConfigProxy"); + deployERC1967ProxyWithOwner("SuperchainConfigProxy", mustGetAddress("SuperchainProxyAdmin")); deploySuperchainConfig(); initializeSuperchainConfig(); // Deploy the ProtocolVersionsProxy - deployERC1967Proxy("ProtocolVersionsProxy"); + deployERC1967ProxyWithOwner("ProtocolVersionsProxy", mustGetAddress("SuperchainProxyAdmin")); deployProtocolVersions(); initializeProtocolVersions(); } @@ -394,7 +319,6 @@ contract Deploy is Deployer { // Ensure that the requisite contracts are deployed mustGetAddress("SuperchainConfigProxy"); - mustGetAddress("SystemOwnerSafe"); mustGetAddress("AddressManager"); mustGetAddress("ProxyAdmin"); @@ -410,6 +334,7 @@ contract Deploy is Deployer { transferDisputeGameFactoryOwnership(); transferDelayedWETHOwnership(); + transferProxyAdminOwnership({ _isSuperchain: false }); } /// @notice Deploy all of the OP Chain specific contracts @@ -492,70 +417,6 @@ contract Deploy is Deployer { // Non-Proxied Deployment Functions // //////////////////////////////////////////////////////////////// - /// @notice Deploy the Safe - function deploySafe(string memory _name) public broadcast returns (address addr_) { - address[] memory owners = new address[](0); - addr_ = deploySafe(_name, owners, 1, true); - } - - /// @notice Deploy a new Safe contract. If the keepDeployer option is used to enable further setup actions, then - /// the removeDeployerFromSafe() function should be called on that safe after setup is complete. - /// Note this function does not have the broadcast modifier. - /// @param _name The name of the Safe to deploy. - /// @param _owners The owners of the Safe. - /// @param _threshold The threshold of the Safe. - /// @param _keepDeployer Wether or not the deployer address will be added as an owner of the Safe. - function deploySafe( - string memory _name, - address[] memory _owners, - uint256 _threshold, - bool _keepDeployer - ) - public - returns (address addr_) - { - bytes32 salt = keccak256(abi.encode(_name, _implSalt())); - console.log("Deploying safe: %s with salt %s", _name, vm.toString(salt)); - (SafeProxyFactory safeProxyFactory, Safe safeSingleton) = _getSafeFactory(); - - if (_keepDeployer) { - address[] memory expandedOwners = new address[](_owners.length + 1); - // By always adding msg.sender first we know that the previousOwner will be SENTINEL_OWNERS, which makes it - // easier to call removeOwner later. - expandedOwners[0] = msg.sender; - for (uint256 i = 0; i < _owners.length; i++) { - expandedOwners[i + 1] = _owners[i]; - } - _owners = expandedOwners; - } - - bytes memory initData = abi.encodeCall( - Safe.setup, (_owners, _threshold, address(0), hex"", address(0), address(0), 0, payable(address(0))) - ); - addr_ = address(safeProxyFactory.createProxyWithNonce(address(safeSingleton), initData, uint256(salt))); - - save(_name, addr_); - console.log("New safe: %s deployed at %s\n Note that this safe is owned by the deployer key", _name, addr_); - } - - /// @notice If the keepDeployer option was used with deploySafe(), this function can be used to remove the deployer. - /// Note this function does not have the broadcast modifier. - function removeDeployerFromSafe(string memory _name, uint256 _newThreshold) public { - Safe safe = Safe(mustGetAddress(_name)); - - // The sentinel address is used to mark the start and end of the linked list of owners in the Safe. - address sentinelOwners = address(0x1); - - // Because deploySafe() always adds msg.sender first (if keepDeployer is true), we know that the previousOwner - // will be sentinelOwners. - _callViaSafe({ - _safe: safe, - _target: address(safe), - _data: abi.encodeCall(OwnerManager.removeOwner, (sentinelOwners, msg.sender, _newThreshold)) - }); - console.log("Removed deployer owner from ", _name); - } - /// @notice Deploy the AddressManager function deployAddressManager() public broadcast returns (address addr_) { console.log("Deploying AddressManager"); @@ -568,20 +429,33 @@ contract Deploy is Deployer { } /// @notice Deploy the ProxyAdmin - function deployProxyAdmin() public broadcast returns (address addr_) { - console.log("Deploying ProxyAdmin"); - ProxyAdmin admin = new ProxyAdmin({ _owner: msg.sender }); - require(admin.owner() == msg.sender); + function deployProxyAdmin(bool _isSuperchain) public broadcast returns (address addr_) { + string memory proxyAdminName = _isSuperchain ? "SuperchainProxyAdmin" : "ProxyAdmin"; - AddressManager addressManager = AddressManager(mustGetAddress("AddressManager")); - if (admin.addressManager() != addressManager) { - admin.setAddressManager(addressManager); - } + console.log("Deploying %s", proxyAdminName); - require(admin.addressManager() == addressManager); + // Include the proxyAdminName in the salt to prevent a create2 collision when both the Superchain and an OP + // Chain are being setup. + IProxyAdmin admin = IProxyAdmin( + DeployUtils.create2AndSave({ + _save: this, + _salt: keccak256(abi.encode(_implSalt(), proxyAdminName)), + _name: "ProxyAdmin", + _nick: proxyAdminName, + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxyAdmin.__constructor__, (msg.sender))) + }) + ); + require(admin.owner() == msg.sender); - save("ProxyAdmin", address(admin)); - console.log("ProxyAdmin deployed at %s", address(admin)); + // The AddressManager is only required for OP Chains + if (!_isSuperchain) { + IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); + if (admin.addressManager() != addressManager) { + admin.setAddressManager(addressManager); + } + require(admin.addressManager() == addressManager); + } + console.log("%s deployed at %s", proxyAdminName, address(admin)); addr_ = address(admin); } @@ -601,26 +475,36 @@ contract Deploy is Deployer { /// @notice Deploy the L1StandardBridgeProxy using a ChugSplashProxy function deployL1StandardBridgeProxy() public broadcast returns (address addr_) { - console.log("Deploying proxy for L1StandardBridge"); address proxyAdmin = mustGetAddress("ProxyAdmin"); - L1ChugSplashProxy proxy = new L1ChugSplashProxy(proxyAdmin); - + IL1ChugSplashProxy proxy = IL1ChugSplashProxy( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "L1ChugSplashProxy", + _nick: "L1StandardBridgeProxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ChugSplashProxy.__constructor__, (proxyAdmin))) + }) + ); require(EIP1967Helper.getAdmin(address(proxy)) == proxyAdmin); - - save("L1StandardBridgeProxy", address(proxy)); - console.log("L1StandardBridgeProxy deployed at %s", address(proxy)); addr_ = address(proxy); } /// @notice Deploy the L1CrossDomainMessengerProxy using a ResolvedDelegateProxy function deployL1CrossDomainMessengerProxy() public broadcast returns (address addr_) { - console.log("Deploying proxy for L1CrossDomainMessenger"); - AddressManager addressManager = AddressManager(mustGetAddress("AddressManager")); - ResolvedDelegateProxy proxy = new ResolvedDelegateProxy(addressManager, "OVM_L1CrossDomainMessenger"); - - save("L1CrossDomainMessengerProxy", address(proxy)); - console.log("L1CrossDomainMessengerProxy deployed at %s", address(proxy)); - + IResolvedDelegateProxy proxy = IResolvedDelegateProxy( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "ResolvedDelegateProxy", + _nick: "L1CrossDomainMessengerProxy", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IResolvedDelegateProxy.__constructor__, + (IAddressManager(mustGetAddress("AddressManager")), "OVM_L1CrossDomainMessenger") + ) + ) + }) + ); addr_ = address(proxy); } @@ -643,27 +527,32 @@ contract Deploy is Deployer { broadcast returns (address addr_) { - console.log(string.concat("Deploying ERC1967 proxy for ", _name)); - Proxy proxy = new Proxy({ _admin: _proxyOwner }); - + IProxy proxy = IProxy( + DeployUtils.create2AndSave({ + _save: this, + _salt: keccak256(abi.encode(_implSalt(), _name)), + _name: "Proxy", + _nick: _name, + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (_proxyOwner))) + }) + ); require(EIP1967Helper.getAdmin(address(proxy)) == _proxyOwner); - - save(_name, address(proxy)); - console.log(" at %s", address(proxy)); addr_ = address(proxy); } /// @notice Deploy the DataAvailabilityChallengeProxy function deployDataAvailabilityChallengeProxy() public broadcast returns (address addr_) { - console.log("Deploying proxy for DataAvailabilityChallenge"); address proxyAdmin = mustGetAddress("ProxyAdmin"); - Proxy proxy = new Proxy({ _admin: proxyAdmin }); - + IProxy proxy = IProxy( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "Proxy", + _nick: "DataAvailabilityChallengeProxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (proxyAdmin))) + }) + ); require(EIP1967Helper.getAdmin(address(proxy)) == proxyAdmin); - - save("DataAvailabilityChallengeProxy", address(proxy)); - console.log("DataAvailabilityChallengeProxy deployed at %s", address(proxy)); - addr_ = address(proxy); } @@ -678,7 +567,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "SuperchainConfig", - _args: abi.encodeCall(ISuperchainConfig.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISuperchainConfig.__constructor__, ())) }) ); @@ -694,7 +583,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "L1CrossDomainMessenger", - _args: abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ())) }) ); @@ -718,7 +607,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "OptimismPortal", - _args: abi.encodeCall(IOptimismPortal.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismPortal.__constructor__, ())) }); // Override the `OptimismPortal` contract to the deployed implementation. This is necessary @@ -778,7 +667,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "L2OutputOracle", - _args: abi.encodeCall(IL2OutputOracle.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL2OutputOracle.__constructor__, ())) }) ); @@ -804,7 +693,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "OptimismMintableERC20Factory", - _args: abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ())) }) ); @@ -825,7 +714,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "DisputeGameFactory", - _args: abi.encodeCall(IDisputeGameFactory.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IDisputeGameFactory.__constructor__, ())) }) ); @@ -872,7 +761,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "ProtocolVersions", - _args: abi.encodeCall(IProtocolVersions.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProtocolVersions.__constructor__, ())) }) ); @@ -888,43 +777,35 @@ contract Deploy is Deployer { /// @notice Deploy the PreimageOracle function deployPreimageOracle() public broadcast returns (address addr_) { - console.log("Deploying PreimageOracle implementation"); - PreimageOracle preimageOracle = new PreimageOracle{ salt: _implSalt() }({ - _minProposalSize: cfg.preimageOracleMinProposalSize(), - _challengePeriod: cfg.preimageOracleChallengePeriod() - }); - save("PreimageOracle", address(preimageOracle)); - console.log("PreimageOracle deployed at %s", address(preimageOracle)); - + IPreimageOracle preimageOracle = IPreimageOracle( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "PreimageOracle", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IPreimageOracle.__constructor__, + (cfg.preimageOracleMinProposalSize(), cfg.preimageOracleChallengePeriod()) + ) + ) + }) + ); addr_ = address(preimageOracle); } /// @notice Deploy Mips VM. Deploys either MIPS or MIPS2 depending on the environment function deployMips() public broadcast returns (address addr_) { - if (Config.useMultithreadedCannon()) { - addr_ = _deployMips2(); - } else { - addr_ = _deployMips(); - } + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: Config.useMultithreadedCannon() ? "MIPS2" : "MIPS", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IMIPS2.__constructor__, (IPreimageOracle(mustGetAddress("PreimageOracle")))) + ) + }); save("Mips", address(addr_)); } - /// @notice Deploy MIPS - function _deployMips() internal returns (address addr_) { - console.log("Deploying Mips implementation"); - MIPS mips = new MIPS{ salt: _implSalt() }(IPreimageOracle(mustGetAddress("PreimageOracle"))); - console.log("MIPS deployed at %s", address(mips)); - addr_ = address(mips); - } - - /// @notice Deploy MIPS2 - function _deployMips2() internal returns (address addr_) { - console.log("Deploying Mips2 implementation"); - MIPS2 mips2 = new MIPS2{ salt: _implSalt() }(IPreimageOracle(mustGetAddress("PreimageOracle"))); - console.log("MIPS2 deployed at %s", address(mips2)); - addr_ = address(mips2); - } - /// @notice Deploy the AnchorStateRegistry function deployAnchorStateRegistry() public broadcast returns (address addr_) { IAnchorStateRegistry anchorStateRegistry = IAnchorStateRegistry( @@ -951,7 +832,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "SystemConfigInterop", - _args: abi.encodeCall(ISystemConfigInterop.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfigInterop.__constructor__, ())) }); save("SystemConfig", addr_); } else { @@ -959,7 +840,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "SystemConfig", - _args: abi.encodeCall(ISystemConfig.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) }); } @@ -978,7 +859,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "L1StandardBridge", - _args: abi.encodeCall(IL1StandardBridge.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1StandardBridge.__constructor__, ())) }) ); @@ -999,7 +880,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "L1ERC721Bridge", - _args: abi.encodeCall(IL1ERC721Bridge.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ERC721Bridge.__constructor__, ())) }) ); @@ -1016,8 +897,8 @@ contract Deploy is Deployer { /// @notice Transfer ownership of the address manager to the ProxyAdmin function transferAddressManagerOwnership() public broadcast { - console.log("Transferring AddressManager ownership to ProxyAdmin"); - AddressManager addressManager = AddressManager(mustGetAddress("AddressManager")); + console.log("Transferring AddressManager ownership to IProxyAdmin"); + IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); address owner = addressManager.owner(); address proxyAdmin = mustGetAddress("ProxyAdmin"); if (owner != proxyAdmin) { @@ -1035,7 +916,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "DataAvailabilityChallenge", - _args: abi.encodeCall(IDataAvailabilityChallenge.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IDataAvailabilityChallenge.__constructor__, ())) }) ); addr_ = address(dac); @@ -1049,10 +930,12 @@ contract Deploy is Deployer { function initializeSuperchainConfig() public broadcast { address payable superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); address payable superchainConfig = mustGetAddress("SuperchainConfig"); - _upgradeAndCallViaSafe({ + + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("SuperchainProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: superchainConfigProxy, _implementation: superchainConfig, - _innerCallData: abi.encodeCall(ISuperchainConfig.initialize, (cfg.superchainConfigGuardian(), false)) + _data: abi.encodeCall(ISuperchainConfig.initialize, (cfg.superchainConfigGuardian(), false)) }); ChainAssertions.checkSuperchainConfig({ _contracts: _proxiesUnstrict(), _cfg: cfg, _isPaused: false }); @@ -1064,10 +947,11 @@ contract Deploy is Deployer { address disputeGameFactoryProxy = mustGetAddress("DisputeGameFactoryProxy"); address disputeGameFactory = mustGetAddress("DisputeGameFactory"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(disputeGameFactoryProxy), _implementation: disputeGameFactory, - _innerCallData: abi.encodeCall(IDisputeGameFactory.initialize, (msg.sender)) + _data: abi.encodeCall(IDisputeGameFactory.initialize, (msg.sender)) }); string memory version = IDisputeGameFactory(disputeGameFactoryProxy).version(); @@ -1082,10 +966,11 @@ contract Deploy is Deployer { address delayedWETH = mustGetAddress("DelayedWETH"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(delayedWETHProxy), _implementation: delayedWETH, - _innerCallData: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) + _data: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) }); string memory version = IDelayedWETH(payable(delayedWETHProxy)).version(); @@ -1105,10 +990,11 @@ contract Deploy is Deployer { address delayedWETH = mustGetAddress("DelayedWETH"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(delayedWETHProxy), _implementation: delayedWETH, - _innerCallData: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) + _data: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) }); string memory version = IDelayedWETH(payable(delayedWETHProxy)).version(); @@ -1165,10 +1051,11 @@ contract Deploy is Deployer { }) }); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(anchorStateRegistryProxy), _implementation: anchorStateRegistry, - _innerCallData: abi.encodeCall(IAnchorStateRegistry.initialize, (roots, superchainConfig)) + _data: abi.encodeCall(IAnchorStateRegistry.initialize, (roots, superchainConfig)) }); string memory version = IAnchorStateRegistry(payable(anchorStateRegistryProxy)).version(); @@ -1188,10 +1075,11 @@ contract Deploy is Deployer { customGasTokenAddress = cfg.customGasTokenAddress(); } - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(systemConfigProxy), _implementation: systemConfig, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( ISystemConfig.initialize, ( cfg.finalSystemOwner(), @@ -1225,7 +1113,7 @@ contract Deploy is Deployer { /// @notice Initialize the L1StandardBridge function initializeL1StandardBridge() public broadcast { console.log("Upgrading and initializing L1StandardBridge proxy"); - ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin")); + IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); address l1StandardBridgeProxy = mustGetAddress("L1StandardBridgeProxy"); address l1StandardBridge = mustGetAddress("L1StandardBridge"); address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy"); @@ -1233,20 +1121,15 @@ contract Deploy is Deployer { address systemConfigProxy = mustGetAddress("SystemConfigProxy"); uint256 proxyType = uint256(proxyAdmin.proxyType(l1StandardBridgeProxy)); - Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); - if (proxyType != uint256(ProxyAdmin.ProxyType.CHUGSPLASH)) { - _callViaSafe({ - _safe: safe, - _target: address(proxyAdmin), - _data: abi.encodeCall(ProxyAdmin.setProxyType, (l1StandardBridgeProxy, ProxyAdmin.ProxyType.CHUGSPLASH)) - }); + if (proxyType != uint256(IProxyAdmin.ProxyType.CHUGSPLASH)) { + proxyAdmin.setProxyType(l1StandardBridgeProxy, IProxyAdmin.ProxyType.CHUGSPLASH); } - require(uint256(proxyAdmin.proxyType(l1StandardBridgeProxy)) == uint256(ProxyAdmin.ProxyType.CHUGSPLASH)); + require(uint256(proxyAdmin.proxyType(l1StandardBridgeProxy)) == uint256(IProxyAdmin.ProxyType.CHUGSPLASH)); - _upgradeAndCallViaSafe({ + proxyAdmin.upgradeAndCall({ _proxy: payable(l1StandardBridgeProxy), _implementation: l1StandardBridge, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IL1StandardBridge.initialize, ( ICrossDomainMessenger(l1CrossDomainMessengerProxy), @@ -1270,10 +1153,11 @@ contract Deploy is Deployer { address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(l1ERC721BridgeProxy), _implementation: l1ERC721Bridge, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IL1ERC721Bridge.initialize, (ICrossDomainMessenger(payable(l1CrossDomainMessengerProxy)), ISuperchainConfig(superchainConfigProxy)) ) @@ -1293,10 +1177,11 @@ contract Deploy is Deployer { address optimismMintableERC20Factory = mustGetAddress("OptimismMintableERC20Factory"); address l1StandardBridgeProxy = mustGetAddress("L1StandardBridgeProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(optimismMintableERC20FactoryProxy), _implementation: optimismMintableERC20Factory, - _innerCallData: abi.encodeCall(IOptimismMintableERC20Factory.initialize, (l1StandardBridgeProxy)) + _data: abi.encodeCall(IOptimismMintableERC20Factory.initialize, (l1StandardBridgeProxy)) }); IOptimismMintableERC20Factory factory = IOptimismMintableERC20Factory(optimismMintableERC20FactoryProxy); @@ -1309,7 +1194,7 @@ contract Deploy is Deployer { /// @notice initializeL1CrossDomainMessenger function initializeL1CrossDomainMessenger() public broadcast { console.log("Upgrading and initializing L1CrossDomainMessenger proxy"); - ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin")); + IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy"); address l1CrossDomainMessenger = mustGetAddress("L1CrossDomainMessenger"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); @@ -1317,34 +1202,25 @@ contract Deploy is Deployer { address systemConfigProxy = mustGetAddress("SystemConfigProxy"); uint256 proxyType = uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)); - Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); - if (proxyType != uint256(ProxyAdmin.ProxyType.RESOLVED)) { - _callViaSafe({ - _safe: safe, - _target: address(proxyAdmin), - _data: abi.encodeCall(ProxyAdmin.setProxyType, (l1CrossDomainMessengerProxy, ProxyAdmin.ProxyType.RESOLVED)) - }); + if (proxyType != uint256(IProxyAdmin.ProxyType.RESOLVED)) { + proxyAdmin.setProxyType(l1CrossDomainMessengerProxy, IProxyAdmin.ProxyType.RESOLVED); } - require(uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)) == uint256(ProxyAdmin.ProxyType.RESOLVED)); + require(uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)) == uint256(IProxyAdmin.ProxyType.RESOLVED)); string memory contractName = "OVM_L1CrossDomainMessenger"; string memory implName = proxyAdmin.implementationName(l1CrossDomainMessenger); if (keccak256(bytes(contractName)) != keccak256(bytes(implName))) { - _callViaSafe({ - _safe: safe, - _target: address(proxyAdmin), - _data: abi.encodeCall(ProxyAdmin.setImplementationName, (l1CrossDomainMessengerProxy, contractName)) - }); + proxyAdmin.setImplementationName(l1CrossDomainMessengerProxy, contractName); } require( keccak256(bytes(proxyAdmin.implementationName(l1CrossDomainMessengerProxy))) == keccak256(bytes(contractName)) ); - _upgradeAndCallViaSafe({ + proxyAdmin.upgradeAndCall({ _proxy: payable(l1CrossDomainMessengerProxy), _implementation: l1CrossDomainMessenger, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IL1CrossDomainMessenger.initialize, ( ISuperchainConfig(superchainConfigProxy), @@ -1367,10 +1243,11 @@ contract Deploy is Deployer { address l2OutputOracleProxy = mustGetAddress("L2OutputOracleProxy"); address l2OutputOracle = mustGetAddress("L2OutputOracle"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(l2OutputOracleProxy), _implementation: l2OutputOracle, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IL2OutputOracle.initialize, ( cfg.l2OutputOracleSubmissionInterval(), @@ -1405,10 +1282,11 @@ contract Deploy is Deployer { address systemConfigProxy = mustGetAddress("SystemConfigProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(optimismPortalProxy), _implementation: optimismPortal, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IOptimismPortal.initialize, ( IL2OutputOracle(l2OutputOracleProxy), @@ -1434,10 +1312,11 @@ contract Deploy is Deployer { address systemConfigProxy = mustGetAddress("SystemConfigProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(optimismPortalProxy), _implementation: optimismPortal2, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IOptimismPortal2.initialize, ( IDisputeGameFactory(disputeGameFactoryProxy), @@ -1464,10 +1343,11 @@ contract Deploy is Deployer { uint256 requiredProtocolVersion = cfg.requiredProtocolVersion(); uint256 recommendedProtocolVersion = cfg.recommendedProtocolVersion(); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("SuperchainProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(protocolVersionsProxy), _implementation: protocolVersions, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IProtocolVersions.initialize, ( finalSystemOwner, @@ -1489,13 +1369,13 @@ contract Deploy is Deployer { console.log("Transferring DisputeGameFactory ownership to Safe"); IDisputeGameFactory disputeGameFactory = IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy")); address owner = disputeGameFactory.owner(); + address finalSystemOwner = cfg.finalSystemOwner(); - address safe = mustGetAddress("SystemOwnerSafe"); - if (owner != safe) { - disputeGameFactory.transferOwnership(safe); - console.log("DisputeGameFactory ownership transferred to Safe at: %s", safe); + if (owner != finalSystemOwner) { + disputeGameFactory.transferOwnership(finalSystemOwner); + console.log("DisputeGameFactory ownership transferred to final system owner at: %s", finalSystemOwner); } - ChainAssertions.checkDisputeGameFactory({ _contracts: _proxies(), _expectedOwner: safe }); + ChainAssertions.checkDisputeGameFactory({ _contracts: _proxies(), _expectedOwner: finalSystemOwner }); } /// @notice Transfer ownership of the DelayedWETH contract to the final system owner @@ -1504,12 +1384,17 @@ contract Deploy is Deployer { IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); address owner = weth.owner(); - address safe = mustGetAddress("SystemOwnerSafe"); - if (owner != safe) { - weth.transferOwnership(safe); - console.log("DelayedWETH ownership transferred to Safe at: %s", safe); + address finalSystemOwner = cfg.finalSystemOwner(); + if (owner != finalSystemOwner) { + weth.transferOwnership(finalSystemOwner); + console.log("DelayedWETH ownership transferred to final system owner at: %s", finalSystemOwner); } - ChainAssertions.checkDelayedWETH({ _contracts: _proxies(), _cfg: cfg, _isProxy: true, _expectedOwner: safe }); + ChainAssertions.checkDelayedWETH({ + _contracts: _proxies(), + _cfg: cfg, + _isProxy: true, + _expectedOwner: finalSystemOwner + }); } /// @notice Transfer ownership of the permissioned DelayedWETH contract to the final system owner @@ -1518,16 +1403,16 @@ contract Deploy is Deployer { IDelayedWETH weth = IDelayedWETH(mustGetAddress("PermissionedDelayedWETHProxy")); address owner = weth.owner(); - address safe = mustGetAddress("SystemOwnerSafe"); - if (owner != safe) { - weth.transferOwnership(safe); - console.log("DelayedWETH ownership transferred to Safe at: %s", safe); + address finalSystemOwner = cfg.finalSystemOwner(); + if (owner != finalSystemOwner) { + weth.transferOwnership(finalSystemOwner); + console.log("DelayedWETH ownership transferred to final system owner at: %s", finalSystemOwner); } ChainAssertions.checkPermissionedDelayedWETH({ _contracts: _proxies(), _cfg: cfg, _isProxy: true, - _expectedOwner: safe + _expectedOwner: finalSystemOwner }); } @@ -1648,7 +1533,7 @@ contract Deploy is Deployer { weth: weth, gameType: GameTypes.ALPHABET, absolutePrestate: outputAbsolutePrestate, - faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, PreimageOracle(mustGetAddress("PreimageOracle")))), + faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, IPreimageOracle(mustGetAddress("PreimageOracle")))), // The max depth for the alphabet trace is always 3. Add 1 because split depth is fully inclusive. maxGameDepth: cfg.faultGameSplitDepth() + 3 + 1, maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())) @@ -1663,7 +1548,17 @@ contract Deploy is Deployer { IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); Claim outputAbsolutePrestate = Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())); - PreimageOracle fastOracle = new PreimageOracle(cfg.preimageOracleMinProposalSize(), 0); + IPreimageOracle fastOracle = IPreimageOracle( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "PreimageOracle", + _nick: "FastPreimageOracle", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IPreimageOracle.__constructor__, (cfg.preimageOracleMinProposalSize(), 0)) + ) + }) + ); _setFaultGameImplementation({ _factory: factory, _allowUpgrade: _allowUpgrade, @@ -1791,10 +1686,11 @@ contract Deploy is Deployer { uint256 daBondSize = cfg.daBondSize(); uint256 daResolverRefundPercentage = cfg.daResolverRefundPercentage(); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(dataAvailabilityChallengeProxy), _implementation: dataAvailabilityChallenge, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IDataAvailabilityChallenge.initialize, (finalSystemOwner, daChallengeWindow, daResolveWindow, daBondSize, daResolverRefundPercentage) ) diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol index 252b4703b203..5171a2066628 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol @@ -5,9 +5,11 @@ import { console2 as console } from "forge-std/console2.sol"; import { stdJson } from "forge-std/StdJson.sol"; import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; +import { GnosisSafeProxyFactory as SafeProxyFactory } from "safe-contracts/proxies/GnosisSafeProxyFactory.sol"; import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; import { GuardManager } from "safe-contracts/base/GuardManager.sol"; +import { Enum as SafeOps } from "safe-contracts/common/Enum.sol"; import { Deployer } from "scripts/deploy/Deployer.sol"; @@ -50,7 +52,7 @@ struct GuardianConfig { DeputyGuardianModuleConfig deputyGuardianModuleConfig; } -/// @title Deploy +/// @title DeployOwnership /// @notice Script used to deploy and configure the Safe contracts which are used to manage the Superchain, /// as the ProxyAdminOwner and other roles in the system. Note that this script is not executable in a /// production environment as some steps depend on having a quorum of signers available. This script is meant to @@ -112,6 +114,113 @@ contract DeployOwnership is Deploy { }); } + /// @notice Make a call from the Safe contract to an arbitrary address with arbitrary data + function _callViaSafe(Safe _safe, address _target, bytes memory _data) internal { + // This is the signature format used when the caller is also the signer. + bytes memory signature = abi.encodePacked(uint256(uint160(msg.sender)), bytes32(0), uint8(1)); + + _safe.execTransaction({ + to: _target, + value: 0, + data: _data, + operation: SafeOps.Operation.Call, + safeTxGas: 0, + baseGas: 0, + gasPrice: 0, + gasToken: address(0), + refundReceiver: payable(address(0)), + signatures: signature + }); + } + + /// @notice Deploy the Safe + function deploySafe(string memory _name) public broadcast returns (address addr_) { + address[] memory owners = new address[](0); + addr_ = deploySafe(_name, owners, 1, true); + } + + /// @notice Deploy a new Safe contract. If the keepDeployer option is used to enable further setup actions, then + /// the removeDeployerFromSafe() function should be called on that safe after setup is complete. + /// Note this function does not have the broadcast modifier. + /// @param _name The name of the Safe to deploy. + /// @param _owners The owners of the Safe. + /// @param _threshold The threshold of the Safe. + /// @param _keepDeployer Wether or not the deployer address will be added as an owner of the Safe. + function deploySafe( + string memory _name, + address[] memory _owners, + uint256 _threshold, + bool _keepDeployer + ) + public + returns (address addr_) + { + bytes32 salt = keccak256(abi.encode(_name, _implSalt())); + console.log("Deploying safe: %s with salt %s", _name, vm.toString(salt)); + (SafeProxyFactory safeProxyFactory, Safe safeSingleton) = _getSafeFactory(); + + if (_keepDeployer) { + address[] memory expandedOwners = new address[](_owners.length + 1); + // By always adding msg.sender first we know that the previousOwner will be SENTINEL_OWNERS, which makes it + // easier to call removeOwner later. + expandedOwners[0] = msg.sender; + for (uint256 i = 0; i < _owners.length; i++) { + expandedOwners[i + 1] = _owners[i]; + } + _owners = expandedOwners; + } + + bytes memory initData = abi.encodeCall( + Safe.setup, (_owners, _threshold, address(0), hex"", address(0), address(0), 0, payable(address(0))) + ); + addr_ = address(safeProxyFactory.createProxyWithNonce(address(safeSingleton), initData, uint256(salt))); + + save(_name, addr_); + console.log("New safe: %s deployed at %s\n Note that this safe is owned by the deployer key", _name, addr_); + } + + /// @notice If the keepDeployer option was used with deploySafe(), this function can be used to remove the deployer. + /// Note this function does not have the broadcast modifier. + function removeDeployerFromSafe(string memory _name, uint256 _newThreshold) public { + Safe safe = Safe(mustGetAddress(_name)); + + // The sentinel address is used to mark the start and end of the linked list of owners in the Safe. + address sentinelOwners = address(0x1); + + // Because deploySafe() always adds msg.sender first (if keepDeployer is true), we know that the previousOwner + // will be sentinelOwners. + _callViaSafe({ + _safe: safe, + _target: address(safe), + _data: abi.encodeCall(OwnerManager.removeOwner, (sentinelOwners, msg.sender, _newThreshold)) + }); + console.log("Removed deployer owner from ", _name); + } + + /// @notice Gets the address of the SafeProxyFactory and Safe singleton for use in deploying a new GnosisSafe. + function _getSafeFactory() internal returns (SafeProxyFactory safeProxyFactory_, Safe safeSingleton_) { + if (getAddress("SafeProxyFactory") != address(0)) { + // The SafeProxyFactory is already saved, we can just use it. + safeProxyFactory_ = SafeProxyFactory(getAddress("SafeProxyFactory")); + safeSingleton_ = Safe(getAddress("SafeSingleton")); + return (safeProxyFactory_, safeSingleton_); + } + + // These are the standard create2 deployed contracts. First we'll check if they are deployed, + // if not we'll deploy new ones, though not at these addresses. + address safeProxyFactory = 0xa6B71E26C5e0845f74c812102Ca7114b6a896AB2; + address safeSingleton = 0xd9Db270c1B5E3Bd161E8c8503c55cEABeE709552; + + safeProxyFactory.code.length == 0 + ? safeProxyFactory_ = new SafeProxyFactory() + : safeProxyFactory_ = SafeProxyFactory(safeProxyFactory); + + safeSingleton.code.length == 0 ? safeSingleton_ = new Safe() : safeSingleton_ = Safe(payable(safeSingleton)); + + save("SafeProxyFactory", address(safeProxyFactory_)); + save("SafeSingleton", address(safeSingleton_)); + } + /// @notice Deploys a Safe with a configuration similar to that of the Foundation Safe on Mainnet. function deployFoundationOperationsSafe() public broadcast returns (address addr_) { SafeConfig memory exampleFoundationConfig = _getExampleFoundationConfig(); diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol index 2dc07b525bd0..c038c27a5683 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol @@ -5,13 +5,11 @@ pragma solidity 0.8.15; import { StdAssertions } from "forge-std/StdAssertions.sol"; import "scripts/deploy/Deploy.s.sol"; -// Contracts -import { Proxy } from "src/universal/Proxy.sol"; - // Libraries import "src/dispute/lib/Types.sol"; // Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; @@ -24,11 +22,11 @@ contract FPACOPS is Deploy, StdAssertions { // ENTRYPOINTS // //////////////////////////////////////////////////////////////// - function deployFPAC(address _proxyAdmin, address _systemOwnerSafe, address _superchainConfigProxy) public { + function deployFPAC(address _proxyAdmin, address _finalSystemOwner, address _superchainConfigProxy) public { console.log("Deploying a fresh FPAC system and OptimismPortal2 implementation."); prankDeployment("ProxyAdmin", msg.sender); - prankDeployment("SystemOwnerSafe", msg.sender); + prankDeployment("FinalSystemOwner", msg.sender); prankDeployment("SuperchainConfigProxy", _superchainConfigProxy); // Deploy the proxies. @@ -56,14 +54,14 @@ contract FPACOPS is Deploy, StdAssertions { // Deploy the Permissioned Cannon Fault game implementation and set it as game ID = 1. setPermissionedCannonFaultGameImplementation({ _allowUpgrade: false }); - // Transfer ownership of the DisputeGameFactory to the SystemOwnerSafe, and transfer the administrative rights + // Transfer ownership of the DisputeGameFactory to the FinalSystemOwner, and transfer the administrative rights // of the DisputeGameFactoryProxy to the ProxyAdmin. - transferDGFOwnershipFinal({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); - transferWethOwnershipFinal({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); + transferDGFOwnershipFinal({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); + transferWethOwnershipFinal({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); transferAnchorStateOwnershipFinal({ _proxyAdmin: _proxyAdmin }); // Run post-deployment assertions. - postDeployAssertions({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); + postDeployAssertions({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); // Print overview printConfigReview(); @@ -78,7 +76,7 @@ contract FPACOPS is Deploy, StdAssertions { console.log("Initializing DisputeGameFactoryProxy with DisputeGameFactory."); address dgfProxy = mustGetAddress("DisputeGameFactoryProxy"); - Proxy(payable(dgfProxy)).upgradeToAndCall( + IProxy(payable(dgfProxy)).upgradeToAndCall( mustGetAddress("DisputeGameFactory"), abi.encodeCall(IDisputeGameFactory.initialize, msg.sender) ); @@ -93,7 +91,7 @@ contract FPACOPS is Deploy, StdAssertions { address wethProxy = mustGetAddress("DelayedWETHProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - Proxy(payable(wethProxy)).upgradeToAndCall( + IProxy(payable(wethProxy)).upgradeToAndCall( mustGetAddress("DelayedWETH"), abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) ); @@ -121,35 +119,35 @@ contract FPACOPS is Deploy, StdAssertions { }); address asrProxy = mustGetAddress("AnchorStateRegistryProxy"); - Proxy(payable(asrProxy)).upgradeToAndCall( + IProxy(payable(asrProxy)).upgradeToAndCall( mustGetAddress("AnchorStateRegistry"), abi.encodeCall(IAnchorStateRegistry.initialize, (roots, superchainConfig)) ); } /// @notice Transfers admin rights of the `DisputeGameFactoryProxy` to the `ProxyAdmin` and sets the - /// `DisputeGameFactory` owner to the `SystemOwnerSafe`. - function transferDGFOwnershipFinal(address _proxyAdmin, address _systemOwnerSafe) internal broadcast { + /// `DisputeGameFactory` owner to the `FinalSystemOwner`. + function transferDGFOwnershipFinal(address _proxyAdmin, address _finalSystemOwner) internal broadcast { IDisputeGameFactory dgf = IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy")); - // Transfer the ownership of the DisputeGameFactory to the SystemOwnerSafe. - dgf.transferOwnership(_systemOwnerSafe); + // Transfer the ownership of the DisputeGameFactory to the FinalSystemOwner. + dgf.transferOwnership(_finalSystemOwner); // Transfer the admin rights of the DisputeGameFactoryProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(dgf))); + IProxy prox = IProxy(payable(address(dgf))); prox.changeAdmin(_proxyAdmin); } /// @notice Transfers admin rights of the `DelayedWETHProxy` to the `ProxyAdmin` and sets the - /// `DelayedWETH` owner to the `SystemOwnerSafe`. - function transferWethOwnershipFinal(address _proxyAdmin, address _systemOwnerSafe) internal broadcast { + /// `DelayedWETH` owner to the `FinalSystemOwner`. + function transferWethOwnershipFinal(address _proxyAdmin, address _finalSystemOwner) internal broadcast { IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); - // Transfer the ownership of the DelayedWETH to the SystemOwnerSafe. - weth.transferOwnership(_systemOwnerSafe); + // Transfer the ownership of the DelayedWETH to the FinalSystemOwner. + weth.transferOwnership(_finalSystemOwner); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(weth))); + IProxy prox = IProxy(payable(address(weth))); prox.changeAdmin(_proxyAdmin); } @@ -158,12 +156,12 @@ contract FPACOPS is Deploy, StdAssertions { IAnchorStateRegistry asr = IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")); // Transfer the admin rights of the AnchorStateRegistryProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(asr))); + IProxy prox = IProxy(payable(address(asr))); prox.changeAdmin(_proxyAdmin); } /// @notice Checks that the deployed system is configured correctly. - function postDeployAssertions(address _proxyAdmin, address _systemOwnerSafe) internal view { + function postDeployAssertions(address _proxyAdmin, address _finalSystemOwner) internal view { Types.ContractSet memory contracts = _proxiesUnstrict(); contracts.OptimismPortal2 = mustGetAddress("OptimismPortal2"); @@ -174,19 +172,19 @@ contract FPACOPS is Deploy, StdAssertions { address dgfProxyAddr = mustGetAddress("DisputeGameFactoryProxy"); IDisputeGameFactory dgfProxy = IDisputeGameFactory(dgfProxyAddr); assertEq(address(uint160(uint256(vm.load(dgfProxyAddr, Constants.PROXY_OWNER_ADDRESS)))), _proxyAdmin); - ChainAssertions.checkDisputeGameFactory(contracts, _systemOwnerSafe); + ChainAssertions.checkDisputeGameFactory(contracts, _finalSystemOwner); address wethProxyAddr = mustGetAddress("DelayedWETHProxy"); assertEq(address(uint160(uint256(vm.load(wethProxyAddr, Constants.PROXY_OWNER_ADDRESS)))), _proxyAdmin); - ChainAssertions.checkDelayedWETH(contracts, cfg, true, _systemOwnerSafe); + ChainAssertions.checkDelayedWETH(contracts, cfg, true, _finalSystemOwner); // Check the config elements in the deployed contracts. ChainAssertions.checkOptimismPortal2(contracts, cfg, false); - PreimageOracle oracle = PreimageOracle(mustGetAddress("PreimageOracle")); + IPreimageOracle oracle = IPreimageOracle(mustGetAddress("PreimageOracle")); assertEq(oracle.minProposalSize(), cfg.preimageOracleMinProposalSize()); assertEq(oracle.challengePeriod(), cfg.preimageOracleChallengePeriod()); - MIPS mips = MIPS(mustGetAddress("Mips")); + IMIPS mips = IMIPS(mustGetAddress("Mips")); assertEq(address(mips.oracle()), address(oracle)); // Check the AnchorStateRegistry configuration. diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol index 5408d9acb151..7db0de4c3fce 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol @@ -5,13 +5,11 @@ pragma solidity 0.8.15; import { StdAssertions } from "forge-std/StdAssertions.sol"; import "scripts/deploy/Deploy.s.sol"; -// Contracts -import { Proxy } from "src/universal/Proxy.sol"; - // Libraries import "src/dispute/lib/Types.sol"; // Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; @@ -32,13 +30,13 @@ contract FPACOPS2 is Deploy, StdAssertions { /// AnchorStateRegistry. Does not deploy a new DisputeGameFactory. System /// Owner is responsible for updating implementations later. /// @param _proxyAdmin Address of the ProxyAdmin contract to transfer ownership to. - /// @param _systemOwnerSafe Address of the SystemOwner. + /// @param _finalSystemOwner Address of the SystemOwner. /// @param _superchainConfigProxy Address of the SuperchainConfig proxy contract. /// @param _disputeGameFactoryProxy Address of the DisputeGameFactory proxy contract. /// @param _anchorStateRegistryProxy Address of the AnchorStateRegistry proxy contract. function deployFPAC2( address _proxyAdmin, - address _systemOwnerSafe, + address _finalSystemOwner, address _superchainConfigProxy, address _disputeGameFactoryProxy, address _anchorStateRegistryProxy @@ -49,7 +47,7 @@ contract FPACOPS2 is Deploy, StdAssertions { // Prank required deployments. prankDeployment("ProxyAdmin", msg.sender); - prankDeployment("SystemOwnerSafe", msg.sender); + prankDeployment("FinalSystemOwner", msg.sender); prankDeployment("SuperchainConfigProxy", _superchainConfigProxy); prankDeployment("DisputeGameFactoryProxy", _disputeGameFactoryProxy); prankDeployment("AnchorStateRegistryProxy", _anchorStateRegistryProxy); @@ -73,11 +71,11 @@ contract FPACOPS2 is Deploy, StdAssertions { deployPermissionedDisputeGame(); // Transfer ownership of DelayedWETH to ProxyAdmin. - transferWethOwnershipFinal({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); - transferPermissionedWETHOwnershipFinal({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); + transferWethOwnershipFinal({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); + transferPermissionedWETHOwnershipFinal({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); // Run post-deployment assertions. - postDeployAssertions({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); + postDeployAssertions({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); // Print overview. printConfigReview(); @@ -152,7 +150,7 @@ contract FPACOPS2 is Deploy, StdAssertions { address wethProxy = mustGetAddress("DelayedWETHProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - Proxy(payable(wethProxy)).upgradeToAndCall( + IProxy(payable(wethProxy)).upgradeToAndCall( mustGetAddress("DelayedWETH"), abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) ); @@ -164,44 +162,50 @@ contract FPACOPS2 is Deploy, StdAssertions { address wethProxy = mustGetAddress("PermissionedDelayedWETHProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - Proxy(payable(wethProxy)).upgradeToAndCall( + IProxy(payable(wethProxy)).upgradeToAndCall( mustGetAddress("DelayedWETH"), abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) ); } /// @notice Transfers admin rights of the `DelayedWETHProxy` to the `ProxyAdmin` and sets the - /// `DelayedWETH` owner to the `SystemOwnerSafe`. - function transferWethOwnershipFinal(address _proxyAdmin, address _systemOwnerSafe) internal broadcast { + /// `DelayedWETH` owner to the `FinalSystemOwner`. + function transferWethOwnershipFinal(address _proxyAdmin, address _finalSystemOwner) internal broadcast { console.log("Transferring ownership of DelayedWETHProxy"); IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); - // Transfer the ownership of the DelayedWETH to the SystemOwnerSafe. - weth.transferOwnership(_systemOwnerSafe); + // Transfer the ownership of the DelayedWETH to the FinalSystemOwner. + weth.transferOwnership(_finalSystemOwner); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(weth))); + IProxy prox = IProxy(payable(address(weth))); prox.changeAdmin(_proxyAdmin); } /// @notice Transfers admin rights of the permissioned `DelayedWETHProxy` to the `ProxyAdmin` - /// and sets the `DelayedWETH` owner to the `SystemOwnerSafe`. - function transferPermissionedWETHOwnershipFinal(address _proxyAdmin, address _systemOwnerSafe) internal broadcast { + /// and sets the `DelayedWETH` owner to the `FinalSystemOwner`. + function transferPermissionedWETHOwnershipFinal( + address _proxyAdmin, + address _finalSystemOwner + ) + internal + broadcast + { console.log("Transferring ownership of permissioned DelayedWETHProxy"); IDelayedWETH weth = IDelayedWETH(mustGetAddress("PermissionedDelayedWETHProxy")); - // Transfer the ownership of the DelayedWETH to the SystemOwnerSafe. - weth.transferOwnership(_systemOwnerSafe); + // Transfer the ownership of the DelayedWETH to the FinalSystemOwner. + weth.transferOwnership(_finalSystemOwner); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(weth))); + IProxy prox = IProxy(payable(address(weth))); prox.changeAdmin(_proxyAdmin); } /// @notice Checks that the deployed system is configured correctly. - function postDeployAssertions(address _proxyAdmin, address _systemOwnerSafe) internal view { + function postDeployAssertions(address _proxyAdmin, address _finalSystemOwner) internal view { Types.ContractSet memory contracts = _proxiesUnstrict(); // Ensure that `useFaultProofs` is set to `true`. @@ -220,17 +224,17 @@ contract FPACOPS2 is Deploy, StdAssertions { assertEq(address(uint160(uint256(vm.load(soyWethProxyAddr, Constants.PROXY_OWNER_ADDRESS)))), _proxyAdmin); // Run standard assertions for DGF and DelayedWETH. - ChainAssertions.checkDisputeGameFactory(contracts, _systemOwnerSafe); - ChainAssertions.checkDelayedWETH(contracts, cfg, true, _systemOwnerSafe); - ChainAssertions.checkPermissionedDelayedWETH(contracts, cfg, true, _systemOwnerSafe); + ChainAssertions.checkDisputeGameFactory(contracts, _finalSystemOwner); + ChainAssertions.checkDelayedWETH(contracts, cfg, true, _finalSystemOwner); + ChainAssertions.checkPermissionedDelayedWETH(contracts, cfg, true, _finalSystemOwner); // Verify PreimageOracle configuration. - PreimageOracle oracle = PreimageOracle(mustGetAddress("PreimageOracle")); + IPreimageOracle oracle = IPreimageOracle(mustGetAddress("PreimageOracle")); assertEq(oracle.minProposalSize(), cfg.preimageOracleMinProposalSize()); assertEq(oracle.challengePeriod(), cfg.preimageOracleChallengePeriod()); // Verify MIPS configuration. - MIPS mips = MIPS(mustGetAddress("Mips")); + IMIPS mips = IMIPS(mustGetAddress("Mips")); assertEq(address(mips.oracle()), address(oracle)); // Grab ASR diff --git a/packages/contracts-bedrock/scripts/fpac/Makefile b/packages/contracts-bedrock/scripts/fpac/Makefile index dbdea4a62cb2..0399666e4e27 100644 --- a/packages/contracts-bedrock/scripts/fpac/Makefile +++ b/packages/contracts-bedrock/scripts/fpac/Makefile @@ -23,9 +23,9 @@ cannon-prestate: # Generate the cannon prestate, and tar the `op-program` + `can .PHONY: deploy-fresh deploy-fresh: cannon-prestate # Deploy a fresh version of the FPAC contracts. Pass `--broadcast` to send to the network. - forge script FPACOPS.s.sol --sig "deployFPAC(address,address,address)" $(proxy-admin) $(system-owner-safe) $(superchain-config-proxy) --chain $(chain) -vvv $(args) + forge script FPACOPS.s.sol --sig "deployFPAC(address,address,address)" $(proxy-admin) $(final-system-owner) $(superchain-config-proxy) --chain $(chain) -vvv $(args) # TODO: Convert this whole file to a justfile .PHONY: deploy-upgrade deploy-upgrade: cannon-prestate # Deploy upgraded FP contracts. Pass `--broadcast` to send to the network. - forge script FPACOPS2.s.sol --sig "deployFPAC2(address,address,address,address,address)" $(proxy-admin) $(system-owner-safe) $(superchain-config-proxy) $(dispute-game-factory-proxy) $(anchor-state-registry-proxy) --chain $(chain) -vvv $(args) + forge script FPACOPS2.s.sol --sig "deployFPAC2(address,address,address,address,address)" $(proxy-admin) $(final-system-owner) $(superchain-config-proxy) $(dispute-game-factory-proxy) $(anchor-state-registry-proxy) --chain $(chain) -vvv $(args) diff --git a/packages/contracts-bedrock/scripts/fpac/README.md b/packages/contracts-bedrock/scripts/fpac/README.md index a5d981172b2a..a3d309a4871c 100644 --- a/packages/contracts-bedrock/scripts/fpac/README.md +++ b/packages/contracts-bedrock/scripts/fpac/README.md @@ -17,5 +17,5 @@ make cannon-prestate chain= _Description_: Deploys a fully fresh FPAC system to the passed chain. All args after the `args=` are forwarded to `forge script`. ```sh -make deploy-fresh chain= proxy-admin= system-owner-safe= [args=] +make deploy-fresh chain= proxy-admin= final-system-owner= [args=] ``` diff --git a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol index 7b078b45e2d4..3654424696b7 100644 --- a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol +++ b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol @@ -10,8 +10,8 @@ import { Artifacts } from "scripts/Artifacts.s.sol"; import { LibString } from "@solady/utils/LibString.sol"; import { Bytes } from "src/libraries/Bytes.sol"; -// Contracts -import { Proxy } from "src/universal/Proxy.sol"; +// Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; library DeployUtils { Vm internal constant vm = Vm(address(uint160(uint256(keccak256("hevm cheat code"))))); @@ -221,7 +221,7 @@ library DeployUtils { // We prank as the zero address due to the Proxy's `proxyCallIfNotAdmin` modifier. // Pranking inside this function also means it can no longer be considered `view`. vm.prank(address(0)); - address implementation = Proxy(payable(_proxy)).implementation(); + address implementation = IProxy(payable(_proxy)).implementation(); assertValidContractAddress(implementation); } diff --git a/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol b/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol index e19cd7e994bd..5a7b48847614 100644 --- a/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol +++ b/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol @@ -65,7 +65,7 @@ contract FeeVaultWithdrawal is Script { } /// @notice Logs the information relevant to the user. - function log(uint256 _balance, address _recipient, address _vault) internal view { + function log(uint256 _balance, address _recipient, address _vault) internal pure { string memory logline = string.concat( "Withdrawing ", vm.toString(_balance), " to ", vm.toString(_recipient), " from ", vm.toString(_vault) ); diff --git a/packages/contracts-bedrock/scripts/periphery/deploy/DeployPeriphery.s.sol b/packages/contracts-bedrock/scripts/periphery/deploy/DeployPeriphery.s.sol index 8f4b86b95d62..ea7cf2f4f5c3 100644 --- a/packages/contracts-bedrock/scripts/periphery/deploy/DeployPeriphery.s.sol +++ b/packages/contracts-bedrock/scripts/periphery/deploy/DeployPeriphery.s.sol @@ -1,5 +1,6 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// TODO: Migrate this script to use DeployUtils import { console2 as console } from "forge-std/console2.sol"; import { Script } from "forge-std/Script.sol"; diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 8670b4ff9d3a..3ded81206c65 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x8f00d4415fe9bef59c1aec5b6729105c686e0238ce947432b2b5a035589cff19", - "sourceCodeHash": "0x4b1cb591b22821ae7246fe47260e1ece74f2cb0463fb949de66fe2b6a986a32c" + "initCodeHash": "0xa0c1139a01cef2445266c71175eff2d36e4b3a7584b198835ed8cba4f7143704", + "sourceCodeHash": "0x67f9846a215d0817a75b4beee50925861d14da2cab1b699bb4e8ae89fa12d01b" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", @@ -80,12 +80,12 @@ "sourceCodeHash": "0x4f21025d4b5c9c74cf7040db6f8e9ce605b82931e3012fee51d3f5d9fbd7b73f" }, "src/L2/L1Block.sol": { - "initCodeHash": "0xd12353c5bf71c6765cc9292eecf262f216e67f117f4ba6287796a5207dbca00f", - "sourceCodeHash": "0xfe3a9585d9bfca8428e12759cab68a3114374e5c37371cfe08bb1976a9a5a041" + "initCodeHash": "0x48d118de2a69fb0fbf6a8da4603025e12da1360da8fb70a5e56342ba64b3ff5f", + "sourceCodeHash": "0x04d25cbf0c4ea5025b0dd3f79f0a32f6623ddb869cff35649072ab3ad964b310" }, "src/L2/L1BlockInterop.sol": { - "initCodeHash": "0x77b3b2151fe14ea36a640469115a5e4de27f7654a9606a9d0701522c6a4ad887", - "sourceCodeHash": "0x7417677643e1df1ae1782513b94c7821097b9529d3f8626c3bcb8b3a9ae0d180" + "initCodeHash": "0x7f87e0b8be9801cb242c469ec7999eb80221f65063aedd4ca4923a5e0fb0e5a7", + "sourceCodeHash": "0x722071a9d08dcbeda9cdaadeb2dd679a8bc192563e4a0439f4cd74439fa75581" }, "src/L2/L1FeeVault.sol": { "initCodeHash": "0x3bfcd57e25ad54b66c374f63e24e33a6cf107044aa8f5f69ef21202c380b5c5b", @@ -112,8 +112,8 @@ "sourceCodeHash": "0xd08a2e6514dbd44e16aa312a1b27b2841a9eab5622cbd05a39c30f543fad673c" }, "src/L2/L2ToL2CrossDomainMessenger.sol": { - "initCodeHash": "0x652e07372d45f0f861aa65b4a73db55871291b875ced02df893a405419de723a", - "sourceCodeHash": "0xc3e73c2d9abf3c7853d2505a83e475d58e96ab5fc5ad7770d04dea5feb9e5717" + "initCodeHash": "0x6f19eb8ff0950156b65cd92872240c0153ac5f3b6f0861d57bf561fdbcacbeac", + "sourceCodeHash": "0xfea53344596d735eff3be945ed1300dc75a6f8b7b2c02c0043af5b0036f5f239" }, "src/L2/OptimismSuperchainERC20.sol": { "initCodeHash": "0xe3dbb0851669708901a4c6bb7ad7d55f9896deeec02cbe53ac58d689ff95b88b", @@ -140,16 +140,16 @@ "sourceCodeHash": "0x2ab6be69795109a1ee04c5693a34d6ce0ff90b62e404cdeb18178bab18d06784" }, "src/cannon/MIPS.sol": { - "initCodeHash": "0x4043f262804931bbbbecff64f87f2d0bdc4554b4d0a8b22df8fff940e8d239bf", - "sourceCodeHash": "0xba4674e1846afbbc708877332a38dfabd4b8d1e48ce07d8ebf0a45c9f27f16b0" + "initCodeHash": "0x3992081512da36af76b707aee7d8ef9e084c54fb1dc9f8ce9989ed16d1216f01", + "sourceCodeHash": "0x7630362c20fbca071452031b88c9384d3215c4f2cbee24c7989901de63b0c178" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0xbb8c2370460e66274210d16ae527a29cb432bb646ebdccc0db0b21e53a4e428c", - "sourceCodeHash": "0x50ed780b621521047ed36ffb260032f2e5ec287f3e1ab3d742c7de45febb280d" + "initCodeHash": "0x590be819d8f02a7f9eb04ddc447f93ccbfd8bc9339f7c2e65336f9805b6c9a66", + "sourceCodeHash": "0x5bc0ab24cf926953b2ea9eb40b929821e280a7181c6cb18e7954bc3f7dc59be1" }, "src/cannon/PreimageOracle.sol": { - "initCodeHash": "0x801e52f9c8439fcf7089575fa93272dfb874641dbfc7d82f36d979c987271c0b", - "sourceCodeHash": "0xdb9421a552e6d7581b3db9e4c2a02d8210ad6ca66ba0f8703d77f7cd4b8e132b" + "initCodeHash": "0xa0b19e18561da9990c95ebea9750dd901f73147b32b8b234eca0f35073c5a970", + "sourceCodeHash": "0x6235d602f84c4173e7a58666791e3db4c9e9651eaccb20db5aed2f898b76e896" }, "src/dispute/AnchorStateRegistry.sol": { "initCodeHash": "0x13d00eef8c3f769863fc766180acc8586f5da309ca0a098e67d4d90bd3243341", diff --git a/packages/contracts-bedrock/snapshots/abi/L1Block.json b/packages/contracts-bedrock/snapshots/abi/L1Block.json index 020c9e942c75..6efa216b5bd6 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1Block.json +++ b/packages/contracts-bedrock/snapshots/abi/L1Block.json @@ -77,6 +77,32 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "eip1559Denominator", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "eip1559Elasticity", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "gasPayingToken", @@ -282,6 +308,13 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "setL1BlockValuesHolocene", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [], "name": "timestamp", diff --git a/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json b/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json index ab089f0cec55..ba871eb2086a 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json @@ -97,6 +97,32 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "eip1559Denominator", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "eip1559Elasticity", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "gasPayingToken", @@ -352,6 +378,13 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "setL1BlockValuesHolocene", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [], "name": "setL1BlockValuesInterop", diff --git a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json index a5cda3493911..2676f90b0491 100644 --- a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json +++ b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json @@ -54,33 +54,40 @@ { "inputs": [ { - "internalType": "uint256", - "name": "_destination", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_source", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_nonce", - "type": "uint256" - }, - { - "internalType": "address", - "name": "_sender", - "type": "address" - }, - { - "internalType": "address", - "name": "_target", - "type": "address" + "components": [ + { + "internalType": "address", + "name": "origin", + "type": "address" + }, + { + "internalType": "uint256", + "name": "blockNumber", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "logIndex", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "chainId", + "type": "uint256" + } + ], + "internalType": "struct ICrossL2Inbox.Identifier", + "name": "_id", + "type": "tuple" }, { "internalType": "bytes", - "name": "_message", + "name": "_sentMessage", "type": "bytes" } ], @@ -111,7 +118,7 @@ "outputs": [ { "internalType": "bytes32", - "name": "msgHash_", + "name": "", "type": "bytes32" } ], @@ -153,6 +160,18 @@ { "anonymous": false, "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "source", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "messageNonce", + "type": "uint256" + }, { "indexed": true, "internalType": "bytes32", @@ -166,6 +185,18 @@ { "anonymous": false, "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "source", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "messageNonce", + "type": "uint256" + }, { "indexed": true, "internalType": "bytes32", @@ -176,9 +207,51 @@ "name": "RelayedMessage", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "destination", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "address", + "name": "target", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "messageNonce", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "message", + "type": "bytes" + } + ], + "name": "SentMessage", + "type": "event" + }, + { + "inputs": [], + "name": "EventPayloadNotSentMessage", + "type": "error" + }, { "inputs": [], - "name": "CrossL2InboxOriginNotL2ToL2CrossDomainMessenger", + "name": "IdOriginNotL2ToL2CrossDomainMessenger", "type": "error" }, { @@ -215,10 +288,5 @@ "inputs": [], "name": "ReentrantCall", "type": "error" - }, - { - "inputs": [], - "name": "RelayMessageCallerNotCrossL2Inbox", - "type": "error" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 2ff2826881f5..dc4ef6142c57 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -2,12 +2,12 @@ { "inputs": [ { - "internalType": "contract SuperchainConfig", + "internalType": "contract ISuperchainConfig", "name": "_superchainConfig", "type": "address" }, { - "internalType": "contract ProtocolVersions", + "internalType": "contract IProtocolVersions", "name": "_protocolVersions", "type": "address" } @@ -184,17 +184,17 @@ "type": "address" }, { - "internalType": "contract AddressManager", + "internalType": "contract IAddressManager", "name": "addressManager", "type": "address" }, { - "internalType": "contract L1ERC721Bridge", + "internalType": "contract IL1ERC721Bridge", "name": "l1ERC721BridgeProxy", "type": "address" }, { - "internalType": "contract SystemConfig", + "internalType": "contract ISystemConfig", "name": "systemConfigProxy", "type": "address" }, @@ -204,52 +204,52 @@ "type": "address" }, { - "internalType": "contract L1StandardBridge", + "internalType": "contract IL1StandardBridge", "name": "l1StandardBridgeProxy", "type": "address" }, { - "internalType": "contract L1CrossDomainMessenger", + "internalType": "contract IL1CrossDomainMessenger", "name": "l1CrossDomainMessengerProxy", "type": "address" }, { - "internalType": "contract OptimismPortal2", + "internalType": "contract IOptimismPortal2", "name": "optimismPortalProxy", "type": "address" }, { - "internalType": "contract DisputeGameFactory", + "internalType": "contract IDisputeGameFactory", "name": "disputeGameFactoryProxy", "type": "address" }, { - "internalType": "contract AnchorStateRegistry", + "internalType": "contract IAnchorStateRegistry", "name": "anchorStateRegistryProxy", "type": "address" }, { - "internalType": "contract AnchorStateRegistry", + "internalType": "contract IAnchorStateRegistry", "name": "anchorStateRegistryImpl", "type": "address" }, { - "internalType": "contract FaultDisputeGame", + "internalType": "contract IFaultDisputeGame", "name": "faultDisputeGame", "type": "address" }, { - "internalType": "contract PermissionedDisputeGame", + "internalType": "contract IPermissionedDisputeGame", "name": "permissionedDisputeGame", "type": "address" }, { - "internalType": "contract DelayedWETH", + "internalType": "contract IDelayedWETH", "name": "delayedWETHPermissionedGameProxy", "type": "address" }, { - "internalType": "contract DelayedWETH", + "internalType": "contract IDelayedWETH", "name": "delayedWETHPermissionlessGameProxy", "type": "address" } @@ -410,7 +410,7 @@ "name": "protocolVersions", "outputs": [ { - "internalType": "contract ProtocolVersions", + "internalType": "contract IProtocolVersions", "name": "", "type": "address" } @@ -423,7 +423,7 @@ "name": "superchainConfig", "outputs": [ { - "internalType": "contract SuperchainConfig", + "internalType": "contract ISuperchainConfig", "name": "", "type": "address" } @@ -442,7 +442,7 @@ "name": "systemConfigs", "outputs": [ { - "internalType": "contract SystemConfig", + "internalType": "contract ISystemConfig", "name": "", "type": "address" } diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index 2ff2826881f5..dc4ef6142c57 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -2,12 +2,12 @@ { "inputs": [ { - "internalType": "contract SuperchainConfig", + "internalType": "contract ISuperchainConfig", "name": "_superchainConfig", "type": "address" }, { - "internalType": "contract ProtocolVersions", + "internalType": "contract IProtocolVersions", "name": "_protocolVersions", "type": "address" } @@ -184,17 +184,17 @@ "type": "address" }, { - "internalType": "contract AddressManager", + "internalType": "contract IAddressManager", "name": "addressManager", "type": "address" }, { - "internalType": "contract L1ERC721Bridge", + "internalType": "contract IL1ERC721Bridge", "name": "l1ERC721BridgeProxy", "type": "address" }, { - "internalType": "contract SystemConfig", + "internalType": "contract ISystemConfig", "name": "systemConfigProxy", "type": "address" }, @@ -204,52 +204,52 @@ "type": "address" }, { - "internalType": "contract L1StandardBridge", + "internalType": "contract IL1StandardBridge", "name": "l1StandardBridgeProxy", "type": "address" }, { - "internalType": "contract L1CrossDomainMessenger", + "internalType": "contract IL1CrossDomainMessenger", "name": "l1CrossDomainMessengerProxy", "type": "address" }, { - "internalType": "contract OptimismPortal2", + "internalType": "contract IOptimismPortal2", "name": "optimismPortalProxy", "type": "address" }, { - "internalType": "contract DisputeGameFactory", + "internalType": "contract IDisputeGameFactory", "name": "disputeGameFactoryProxy", "type": "address" }, { - "internalType": "contract AnchorStateRegistry", + "internalType": "contract IAnchorStateRegistry", "name": "anchorStateRegistryProxy", "type": "address" }, { - "internalType": "contract AnchorStateRegistry", + "internalType": "contract IAnchorStateRegistry", "name": "anchorStateRegistryImpl", "type": "address" }, { - "internalType": "contract FaultDisputeGame", + "internalType": "contract IFaultDisputeGame", "name": "faultDisputeGame", "type": "address" }, { - "internalType": "contract PermissionedDisputeGame", + "internalType": "contract IPermissionedDisputeGame", "name": "permissionedDisputeGame", "type": "address" }, { - "internalType": "contract DelayedWETH", + "internalType": "contract IDelayedWETH", "name": "delayedWETHPermissionedGameProxy", "type": "address" }, { - "internalType": "contract DelayedWETH", + "internalType": "contract IDelayedWETH", "name": "delayedWETHPermissionlessGameProxy", "type": "address" } @@ -410,7 +410,7 @@ "name": "protocolVersions", "outputs": [ { - "internalType": "contract ProtocolVersions", + "internalType": "contract IProtocolVersions", "name": "", "type": "address" } @@ -423,7 +423,7 @@ "name": "superchainConfig", "outputs": [ { - "internalType": "contract SuperchainConfig", + "internalType": "contract ISuperchainConfig", "name": "", "type": "address" } @@ -442,7 +442,7 @@ "name": "systemConfigs", "outputs": [ { - "internalType": "contract SystemConfig", + "internalType": "contract ISystemConfig", "name": "", "type": "address" } diff --git a/packages/contracts-bedrock/snapshots/abi/ProxyAdmin.json b/packages/contracts-bedrock/snapshots/abi/ProxyAdmin.json index fbf37d51b40d..0cd7aff17952 100644 --- a/packages/contracts-bedrock/snapshots/abi/ProxyAdmin.json +++ b/packages/contracts-bedrock/snapshots/abi/ProxyAdmin.json @@ -15,7 +15,7 @@ "name": "addressManager", "outputs": [ { - "internalType": "contract AddressManager", + "internalType": "contract IAddressManager", "name": "", "type": "address" } @@ -171,7 +171,7 @@ { "inputs": [ { - "internalType": "contract AddressManager", + "internalType": "contract IAddressManager", "name": "_address", "type": "address" } diff --git a/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json b/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json index 2928d2147b5c..5ee7d1e31942 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json @@ -75,5 +75,19 @@ "offset": 0, "slot": "7", "type": "uint256" + }, + { + "bytes": "8", + "label": "eip1559Denominator", + "offset": 0, + "slot": "8", + "type": "uint64" + }, + { + "bytes": "8", + "label": "eip1559Elasticity", + "offset": 8, + "slot": "8", + "type": "uint64" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json index 14ee2ff9609a..4f0eeb0e52d7 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json @@ -76,11 +76,25 @@ "slot": "7", "type": "uint256" }, + { + "bytes": "8", + "label": "eip1559Denominator", + "offset": 0, + "slot": "8", + "type": "uint64" + }, + { + "bytes": "8", + "label": "eip1559Elasticity", + "offset": 8, + "slot": "8", + "type": "uint64" + }, { "bytes": "64", "label": "dependencySet", "offset": 0, - "slot": "8", + "slot": "9", "type": "struct EnumerableSet.UintSet" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json index cbb977f214b4..aeef539c5c20 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json @@ -32,7 +32,7 @@ "label": "systemConfigs", "offset": 0, "slot": "3", - "type": "mapping(uint256 => contract SystemConfig)" + "type": "mapping(uint256 => contract ISystemConfig)" }, { "bytes": "256", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json index cbb977f214b4..aeef539c5c20 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json @@ -32,7 +32,7 @@ "label": "systemConfigs", "offset": 0, "slot": "3", - "type": "mapping(uint256 => contract SystemConfig)" + "type": "mapping(uint256 => contract ISystemConfig)" }, { "bytes": "256", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/ProxyAdmin.json b/packages/contracts-bedrock/snapshots/storageLayout/ProxyAdmin.json index 70f8300e6bed..a0b6f46bf85e 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/ProxyAdmin.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/ProxyAdmin.json @@ -25,7 +25,7 @@ "label": "addressManager", "offset": 0, "slot": "3", - "type": "contract AddressManager" + "type": "contract IAddressManager" }, { "bytes": "1", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 67e48e95702a..19c5283cc332 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -12,31 +12,29 @@ import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { Proxy } from "src/universal/Proxy.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; import { ResolvedDelegateProxy } from "src/legacy/ResolvedDelegateProxy.sol"; -import { AddressManager } from "src/legacy/AddressManager.sol"; -import { DelayedWETH } from "src/dispute/DelayedWETH.sol"; -import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; -import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; -import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; -import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; +import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; +import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; +import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; import { Claim, Duration, GameType, GameTypes } from "src/dispute/lib/Types.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; -import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { ResourceMetering } from "src/L1/ResourceMetering.sol"; -import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; -import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol"; -import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; +import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; /// @custom:proxied true @@ -69,21 +67,21 @@ contract OPContractsManager is ISemver, Initializable { /// @notice The full set of outputs from deploying a new OP Stack chain. struct DeployOutput { ProxyAdmin opChainProxyAdmin; - AddressManager addressManager; - L1ERC721Bridge l1ERC721BridgeProxy; - SystemConfig systemConfigProxy; + IAddressManager addressManager; + IL1ERC721Bridge l1ERC721BridgeProxy; + ISystemConfig systemConfigProxy; OptimismMintableERC20Factory optimismMintableERC20FactoryProxy; - L1StandardBridge l1StandardBridgeProxy; - L1CrossDomainMessenger l1CrossDomainMessengerProxy; + IL1StandardBridge l1StandardBridgeProxy; + IL1CrossDomainMessenger l1CrossDomainMessengerProxy; // Fault proof contracts below. - OptimismPortal2 optimismPortalProxy; - DisputeGameFactory disputeGameFactoryProxy; - AnchorStateRegistry anchorStateRegistryProxy; - AnchorStateRegistry anchorStateRegistryImpl; - FaultDisputeGame faultDisputeGame; - PermissionedDisputeGame permissionedDisputeGame; - DelayedWETH delayedWETHPermissionedGameProxy; - DelayedWETH delayedWETHPermissionlessGameProxy; + IOptimismPortal2 optimismPortalProxy; + IDisputeGameFactory disputeGameFactoryProxy; + IAnchorStateRegistry anchorStateRegistryProxy; + IAnchorStateRegistry anchorStateRegistryImpl; + IFaultDisputeGame faultDisputeGame; + IPermissionedDisputeGame permissionedDisputeGame; + IDelayedWETH delayedWETHPermissionedGameProxy; + IDelayedWETH delayedWETHPermissionlessGameProxy; } /// @notice The logic address and initializer selector for an implementation contract. @@ -126,18 +124,18 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.10 - string public constant version = "1.0.0-beta.10"; + /// @custom:semver 1.0.0-beta.17 + string public constant version = "1.0.0-beta.17"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. uint256 public constant OUTPUT_VERSION = 0; /// @notice Address of the SuperchainConfig contract shared by all chains. - SuperchainConfig public immutable superchainConfig; + ISuperchainConfig public immutable superchainConfig; /// @notice Address of the ProtocolVersions contract shared by all chains. - ProtocolVersions public immutable protocolVersions; + IProtocolVersions public immutable protocolVersions; /// @notice The latest release of the OP Contracts Manager, as a string of the format `op-contracts/vX.Y.Z`. string public latestRelease; @@ -146,7 +144,7 @@ contract OPContractsManager is ISemver, Initializable { mapping(string => mapping(string => Implementation)) public implementations; /// @notice Maps an L2 Chain ID to the SystemConfig for that chain. - mapping(uint256 => SystemConfig) public systemConfigs; + mapping(uint256 => ISystemConfig) public systemConfigs; /// @notice Addresses of the Blueprint contracts. /// This is internal because if public the autogenerated getter method would return a tuple of @@ -196,7 +194,7 @@ contract OPContractsManager is ISemver, Initializable { /// @notice OPCM is proxied. Therefore the `initialize` function replaces most constructor logic for this contract. - constructor(SuperchainConfig _superchainConfig, ProtocolVersions _protocolVersions) { + constructor(ISuperchainConfig _superchainConfig, IProtocolVersions _protocolVersions) { assertValidContractAddress(address(_superchainConfig)); assertValidContractAddress(address(_protocolVersions)); superchainConfig = _superchainConfig; @@ -236,7 +234,7 @@ contract OPContractsManager is ISemver, Initializable { // this contract, and then transfer ownership to the specified owner at the end of deployment. // The AddressManager is used to store the implementation for the L1CrossDomainMessenger // due to it's usage of the legacy ResolvedDelegateProxy. - output.addressManager = AddressManager(Blueprint.deployFrom(blueprint.addressManager, salt)); + output.addressManager = IAddressManager(Blueprint.deployFrom(blueprint.addressManager, salt)); output.opChainProxyAdmin = ProxyAdmin(Blueprint.deployFrom(blueprint.proxyAdmin, salt, abi.encode(address(this)))); output.opChainProxyAdmin.setAddressManager(output.addressManager); @@ -245,27 +243,27 @@ contract OPContractsManager is ISemver, Initializable { // Deploy ERC-1967 proxied contracts. output.l1ERC721BridgeProxy = - L1ERC721Bridge(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "L1ERC721Bridge")); + IL1ERC721Bridge(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "L1ERC721Bridge")); output.optimismPortalProxy = - OptimismPortal2(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismPortal"))); + IOptimismPortal2(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismPortal"))); output.systemConfigProxy = - SystemConfig(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "SystemConfig")); + ISystemConfig(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "SystemConfig")); output.optimismMintableERC20FactoryProxy = OptimismMintableERC20Factory( deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismMintableERC20Factory") ); output.disputeGameFactoryProxy = - DisputeGameFactory(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DisputeGameFactory")); + IDisputeGameFactory(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DisputeGameFactory")); output.anchorStateRegistryProxy = - AnchorStateRegistry(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "AnchorStateRegistry")); + IAnchorStateRegistry(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "AnchorStateRegistry")); // Deploy legacy proxied contracts. - output.l1StandardBridgeProxy = L1StandardBridge( + output.l1StandardBridgeProxy = IL1StandardBridge( payable(Blueprint.deployFrom(blueprint.l1ChugSplashProxy, salt, abi.encode(output.opChainProxyAdmin))) ); output.opChainProxyAdmin.setProxyType(address(output.l1StandardBridgeProxy), ProxyAdmin.ProxyType.CHUGSPLASH); string memory contractName = "OVM_L1CrossDomainMessenger"; - output.l1CrossDomainMessengerProxy = L1CrossDomainMessenger( + output.l1CrossDomainMessengerProxy = IL1CrossDomainMessenger( Blueprint.deployFrom(blueprint.resolvedDelegateProxy, salt, abi.encode(output.addressManager, contractName)) ); output.opChainProxyAdmin.setProxyType( @@ -278,20 +276,17 @@ contract OPContractsManager is ISemver, Initializable { // The AnchorStateRegistry Implementation is not MCP Ready, and therefore requires an implementation per chain. // It must be deployed after the DisputeGameFactoryProxy so that it can be provided as a constructor argument. - output.anchorStateRegistryImpl = AnchorStateRegistry( + output.anchorStateRegistryImpl = IAnchorStateRegistry( Blueprint.deployFrom(blueprint.anchorStateRegistry, salt, abi.encode(output.disputeGameFactoryProxy)) ); - // We have two delayed WETH contracts per chain, one for each of the permissioned and permissionless games. - output.delayedWETHPermissionlessGameProxy = DelayedWETH( - payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DelayedWETHPermissionlessGame")) - ); - output.delayedWETHPermissionedGameProxy = DelayedWETH( + // Eventually we will switch from DelayedWETHPermissionedGameProxy to DelayedWETHPermissionlessGameProxy. + output.delayedWETHPermissionedGameProxy = IDelayedWETH( payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DelayedWETHPermissionedGame")) ); // While not a proxy, we deploy the PermissionedDisputeGame here as well because it's bespoke per chain. - output.permissionedDisputeGame = PermissionedDisputeGame( + output.permissionedDisputeGame = IPermissionedDisputeGame( Blueprint.deployFrom( blueprint.permissionedDisputeGame1, blueprint.permissionedDisputeGame2, @@ -312,7 +307,10 @@ contract OPContractsManager is ISemver, Initializable { data = encodeOptimismPortalInitializer(impl.initializer, output); upgradeAndCall(output.opChainProxyAdmin, address(output.optimismPortalProxy), impl.logic, data); + // First we upgrade the implementation so it's version can be retrieved, then we initialize + // it afterwards. See the comments in encodeSystemConfigInitializer to learn more. impl = getLatestImplementation("SystemConfig"); + output.opChainProxyAdmin.upgrade(payable(address(output.systemConfigProxy)), impl.logic); data = encodeSystemConfigInitializer(impl.initializer, _input, output); upgradeAndCall(output.opChainProxyAdmin, address(output.systemConfigProxy), impl.logic, data); @@ -330,8 +328,8 @@ contract OPContractsManager is ISemver, Initializable { impl = getLatestImplementation("DelayedWETH"); data = encodeDelayedWETHInitializer(impl.initializer, _input); + // Eventually we will switch from DelayedWETHPermissionedGameProxy to DelayedWETHPermissionlessGameProxy. upgradeAndCall(output.opChainProxyAdmin, address(output.delayedWETHPermissionedGameProxy), impl.logic, data); - upgradeAndCall(output.opChainProxyAdmin, address(output.delayedWETHPermissionlessGameProxy), impl.logic, data); // We set the initial owner to this contract, set game implementations, then transfer ownership. impl = getLatestImplementation("DisputeGameFactory"); @@ -340,10 +338,10 @@ contract OPContractsManager is ISemver, Initializable { output.disputeGameFactoryProxy.setImplementation( GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(output.permissionedDisputeGame)) ); - output.disputeGameFactoryProxy.transferOwnership(address(output.opChainProxyAdmin)); + output.disputeGameFactoryProxy.transferOwnership(address(_input.roles.opChainProxyAdminOwner)); impl.logic = address(output.anchorStateRegistryImpl); - impl.initializer = AnchorStateRegistry.initialize.selector; + impl.initializer = IAnchorStateRegistry.initialize.selector; data = encodeAnchorStateRegistryInitializer(impl.initializer, _input); upgradeAndCall(output.opChainProxyAdmin, address(output.anchorStateRegistryProxy), impl.logic, data); @@ -431,8 +429,6 @@ contract OPContractsManager is ISemver, Initializable { virtual returns (bytes memory) { - _output; - // TODO make GameTypes.CANNON an input once FPs are supported return abi.encodeWithSelector( _selector, _output.disputeGameFactoryProxy, @@ -453,21 +449,50 @@ contract OPContractsManager is ISemver, Initializable { virtual returns (bytes memory) { - (ResourceMetering.ResourceConfig memory referenceResourceConfig, SystemConfig.Addresses memory opChainAddrs) = - defaultSystemConfigParams(_selector, _input, _output); - - return abi.encodeWithSelector( - _selector, - _input.roles.systemConfigOwner, - _input.basefeeScalar, - _input.blobBasefeeScalar, - bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - 30_000_000, // gasLimit, TODO should this be an input? - _input.roles.unsafeBlockSigner, - referenceResourceConfig, - chainIdToBatchInboxAddress(_input.l2ChainId), - opChainAddrs - ); + // We inspect the SystemConfig contract and determine it's signature here. This is required + // because this OPCM contract is being developed in a repository that no longer contains the + // SystemConfig contract that was released as part of `op-contracts/v1.6.0`, but in production + // it needs to support that version, in addition to the version currently on develop. + string memory semver = _output.systemConfigProxy.version(); + if (keccak256(abi.encode(semver)) == keccak256(abi.encode(string("2.2.0")))) { + // We are using the op-contracts/v1.6.0 SystemConfig contract. + ( + IResourceMetering.ResourceConfig memory referenceResourceConfig, + ISystemConfigV160.Addresses memory opChainAddrs + ) = defaultSystemConfigV160Params(_selector, _input, _output); + + return abi.encodeWithSelector( + _selector, + _input.roles.systemConfigOwner, + _input.basefeeScalar, + _input.blobBasefeeScalar, + bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash + 30_000_000, + _input.roles.unsafeBlockSigner, + referenceResourceConfig, + chainIdToBatchInboxAddress(_input.l2ChainId), + opChainAddrs + ); + } else { + // We are using the latest SystemConfig contract from the repo. + ( + IResourceMetering.ResourceConfig memory referenceResourceConfig, + ISystemConfig.Addresses memory opChainAddrs + ) = defaultSystemConfigParams(_selector, _input, _output); + + return abi.encodeWithSelector( + _selector, + _input.roles.systemConfigOwner, + _input.basefeeScalar, + _input.blobBasefeeScalar, + bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash + 30_000_000, + _input.roles.unsafeBlockSigner, + referenceResourceConfig, + chainIdToBatchInboxAddress(_input.l2ChainId), + opChainAddrs + ); + } } /// @notice Helper method for encoding the OptimismMintableERC20Factory initializer data. @@ -536,8 +561,8 @@ contract OPContractsManager is ISemver, Initializable { returns (bytes memory) { // this line fails in the op-deployer tests because it is not passing in any data - AnchorStateRegistry.StartingAnchorRoot[] memory startingAnchorRoots = - abi.decode(_input.startingAnchorRoots, (AnchorStateRegistry.StartingAnchorRoot[])); + IAnchorStateRegistry.StartingAnchorRoot[] memory startingAnchorRoots = + abi.decode(_input.startingAnchorRoots, (IAnchorStateRegistry.StartingAnchorRoot[])); return abi.encodeWithSelector(_selector, startingAnchorRoots, superchainConfig); } @@ -564,7 +589,7 @@ contract OPContractsManager is ISemver, Initializable { { return abi.encode( GameType.wrap(1), // Permissioned Cannon - Claim.wrap(bytes32(hex"dead")), // absolutePrestate + Claim.wrap(bytes32(hex"038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c")), // absolutePrestate 73, // maxGameDepth 30, // splitDepth Duration.wrap(3 hours), // clockExtension @@ -588,7 +613,7 @@ contract OPContractsManager is ISemver, Initializable { internal view virtual - returns (ResourceMetering.ResourceConfig memory resourceConfig_, SystemConfig.Addresses memory opChainAddrs_) + returns (IResourceMetering.ResourceConfig memory resourceConfig_, ISystemConfig.Addresses memory opChainAddrs_) { // We use assembly to easily convert from IResourceMetering.ResourceConfig to ResourceMetering.ResourceConfig. // This is required because we have not yet fully migrated the codebase to be interface-based. @@ -597,7 +622,7 @@ contract OPContractsManager is ISemver, Initializable { resourceConfig_ := resourceConfig } - opChainAddrs_ = SystemConfig.Addresses({ + opChainAddrs_ = ISystemConfig.Addresses({ l1CrossDomainMessenger: address(_output.l1CrossDomainMessengerProxy), l1ERC721Bridge: address(_output.l1ERC721BridgeProxy), l1StandardBridge: address(_output.l1StandardBridgeProxy), @@ -615,6 +640,45 @@ contract OPContractsManager is ISemver, Initializable { assertValidContractAddress(opChainAddrs_.optimismMintableERC20Factory); } + /// @notice Returns default, standard config arguments for the SystemConfig initializer. + /// This is used by subclasses to reduce code duplication. + function defaultSystemConfigV160Params( + bytes4, /* selector */ + DeployInput memory, /* _input */ + DeployOutput memory _output + ) + internal + view + virtual + returns ( + IResourceMetering.ResourceConfig memory resourceConfig_, + ISystemConfigV160.Addresses memory opChainAddrs_ + ) + { + // We use assembly to easily convert from IResourceMetering.ResourceConfig to ResourceMetering.ResourceConfig. + // This is required because we have not yet fully migrated the codebase to be interface-based. + IResourceMetering.ResourceConfig memory resourceConfig = Constants.DEFAULT_RESOURCE_CONFIG(); + assembly ("memory-safe") { + resourceConfig_ := resourceConfig + } + + opChainAddrs_ = ISystemConfigV160.Addresses({ + l1CrossDomainMessenger: address(_output.l1CrossDomainMessengerProxy), + l1ERC721Bridge: address(_output.l1ERC721BridgeProxy), + l1StandardBridge: address(_output.l1StandardBridgeProxy), + disputeGameFactory: address(_output.disputeGameFactoryProxy), + optimismPortal: address(_output.optimismPortalProxy), + optimismMintableERC20Factory: address(_output.optimismMintableERC20FactoryProxy) + }); + + assertValidContractAddress(opChainAddrs_.l1CrossDomainMessenger); + assertValidContractAddress(opChainAddrs_.l1ERC721Bridge); + assertValidContractAddress(opChainAddrs_.l1StandardBridge); + assertValidContractAddress(opChainAddrs_.disputeGameFactory); + assertValidContractAddress(opChainAddrs_.optimismPortal); + assertValidContractAddress(opChainAddrs_.optimismMintableERC20Factory); + } + /// @notice Makes an external call to the target to initialize the proxy with the specified data. /// First performs safety checks to ensure the target, implementation, and proxy admin are valid. function upgradeAndCall( diff --git a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol index ae7ac71c2ae9..133f2d629a5e 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol @@ -2,17 +2,17 @@ pragma solidity 0.8.15; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; -import { ResourceMetering } from "src/L1/ResourceMetering.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { SystemConfigInterop } from "src/L1/SystemConfigInterop.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; /// @custom:proxied true contract OPContractsManagerInterop is OPContractsManager { constructor( - SuperchainConfig _superchainConfig, - ProtocolVersions _protocolVersions + ISuperchainConfig _superchainConfig, + IProtocolVersions _protocolVersions ) OPContractsManager(_superchainConfig, _protocolVersions) { } @@ -30,7 +30,7 @@ contract OPContractsManagerInterop is OPContractsManager { override returns (bytes memory) { - (ResourceMetering.ResourceConfig memory referenceResourceConfig, SystemConfig.Addresses memory opChainAddrs) = + (IResourceMetering.ResourceConfig memory referenceResourceConfig, ISystemConfig.Addresses memory opChainAddrs) = defaultSystemConfigParams(_selector, _input, _output); // TODO For now we assume that the dependency manager is the same as the proxy admin owner. diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol index b8b7e3403d29..8a6de84e2c9d 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol @@ -7,16 +7,16 @@ import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; interface IL1CrossDomainMessenger is ICrossDomainMessenger { - function PORTAL() external view returns (address); + function PORTAL() external view returns (IOptimismPortal); function initialize( ISuperchainConfig _superchainConfig, IOptimismPortal _portal, ISystemConfig _systemConfig ) external; - function portal() external view returns (address); - function superchainConfig() external view returns (address); - function systemConfig() external view returns (address); + function portal() external view returns (IOptimismPortal); + function superchainConfig() external view returns (ISuperchainConfig); + function systemConfig() external view returns (ISystemConfig); function version() external view returns (string memory); function __constructor__() external; diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol new file mode 100644 index 000000000000..a1023100d92d --- /dev/null +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; + +/// @notice This interface corresponds to the op-contracts/v1.6.0 release of the L1CrossDomainMessenger +/// contract, which has a semver of 2.3.0 as specified in +/// https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +interface IL1CrossDomainMessengerV160 is ICrossDomainMessenger { + function PORTAL() external view returns (address); + function initialize(ISuperchainConfig _superchainConfig, IOptimismPortal _portal) external; + function portal() external view returns (address); + function superchainConfig() external view returns (address); + function systemConfig() external view returns (address); + function version() external view returns (string memory); + + function __constructor__() external; +} diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol new file mode 100644 index 000000000000..b382c4f1ad6d --- /dev/null +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; +import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; + +/// @notice This interface corresponds to the op-contracts/v1.6.0 release of the L1StandardBridge +/// contract, which has a semver of 2.1.0 as specified in +/// https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +interface IL1StandardBridgeV160 is IStandardBridge { + event ERC20DepositInitiated( + address indexed l1Token, + address indexed l2Token, + address indexed from, + address to, + uint256 amount, + bytes extraData + ); + event ERC20WithdrawalFinalized( + address indexed l1Token, + address indexed l2Token, + address indexed from, + address to, + uint256 amount, + bytes extraData + ); + event ETHDepositInitiated(address indexed from, address indexed to, uint256 amount, bytes extraData); + event ETHWithdrawalFinalized(address indexed from, address indexed to, uint256 amount, bytes extraData); + + function depositERC20( + address _l1Token, + address _l2Token, + uint256 _amount, + uint32 _minGasLimit, + bytes memory _extraData + ) + external; + function depositERC20To( + address _l1Token, + address _l2Token, + address _to, + uint256 _amount, + uint32 _minGasLimit, + bytes memory _extraData + ) + external; + function depositETH(uint32 _minGasLimit, bytes memory _extraData) external payable; + function depositETHTo(address _to, uint32 _minGasLimit, bytes memory _extraData) external payable; + function finalizeERC20Withdrawal( + address _l1Token, + address _l2Token, + address _from, + address _to, + uint256 _amount, + bytes memory _extraData + ) + external; + function finalizeETHWithdrawal( + address _from, + address _to, + uint256 _amount, + bytes memory _extraData + ) + external + payable; + function initialize(ICrossDomainMessenger _messenger, ISuperchainConfig _superchainConfig) external; + function l2TokenBridge() external view returns (address); + function superchainConfig() external view returns (ISuperchainConfig); + function systemConfig() external view returns (ISystemConfig); + function version() external view returns (string memory); + + function __constructor__() external; +} diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol index 37ab1512a031..a7c5434d048b 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol @@ -3,6 +3,7 @@ pragma solidity ^0.8.0; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +/// @notice This interface corresponds to the Custom Gas Token version of the SystemConfig contract. interface ISystemConfig { enum UpdateType { BATCHER, diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol new file mode 100644 index 000000000000..deb0dd2c52ad --- /dev/null +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; + +/// @notice This interface corresponds to the op-contracts/v1.6.0 release of the SystemConfig +/// contract, which has a semver of 2.2.0 as specified in +/// https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +interface ISystemConfigV160 { + enum UpdateType { + BATCHER, + GAS_CONFIG, + GAS_LIMIT, + UNSAFE_BLOCK_SIGNER + } + + struct Addresses { + address l1CrossDomainMessenger; + address l1ERC721Bridge; + address l1StandardBridge; + address disputeGameFactory; + address optimismPortal; + address optimismMintableERC20Factory; + } + + event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); + event Initialized(uint8 version); + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + + function BATCH_INBOX_SLOT() external view returns (bytes32); + function DISPUTE_GAME_FACTORY_SLOT() external view returns (bytes32); + function L1_CROSS_DOMAIN_MESSENGER_SLOT() external view returns (bytes32); + function L1_ERC_721_BRIDGE_SLOT() external view returns (bytes32); + function L1_STANDARD_BRIDGE_SLOT() external view returns (bytes32); + function OPTIMISM_MINTABLE_ERC20_FACTORY_SLOT() external view returns (bytes32); + function OPTIMISM_PORTAL_SLOT() external view returns (bytes32); + function START_BLOCK_SLOT() external view returns (bytes32); + function UNSAFE_BLOCK_SIGNER_SLOT() external view returns (bytes32); + function VERSION() external view returns (uint256); + function basefeeScalar() external view returns (uint32); + function batchInbox() external view returns (address addr_); + function batcherHash() external view returns (bytes32); + function blobbasefeeScalar() external view returns (uint32); + function disputeGameFactory() external view returns (address addr_); + function gasLimit() external view returns (uint64); + function gasPayingToken() external view returns (address addr_, uint8 decimals_); + function gasPayingTokenName() external view returns (string memory name_); + function gasPayingTokenSymbol() external view returns (string memory symbol_); + function initialize( + address _owner, + uint256 _basefeeScalar, + uint256 _blobbasefeeScalar, + bytes32 _batcherHash, + uint64 _gasLimit, + address _unsafeBlockSigner, + IResourceMetering.ResourceConfig memory _config, + address _batchInbox, + Addresses memory _addresses + ) + external; + function isCustomGasToken() external view returns (bool); + function l1CrossDomainMessenger() external view returns (address addr_); + function l1ERC721Bridge() external view returns (address addr_); + function l1StandardBridge() external view returns (address addr_); + function maximumGasLimit() external pure returns (uint64); + function minimumGasLimit() external view returns (uint64); + function optimismMintableERC20Factory() external view returns (address addr_); + function optimismPortal() external view returns (address addr_); + function overhead() external view returns (uint256); + function owner() external view returns (address); + function renounceOwnership() external; + function resourceConfig() external view returns (IResourceMetering.ResourceConfig memory); + function scalar() external view returns (uint256); + function setBatcherHash(bytes32 _batcherHash) external; + function setGasConfig(uint256 _overhead, uint256 _scalar) external; + function setGasConfigEcotone(uint32 _basefeeScalar, uint32 _blobbasefeeScalar) external; + function setGasLimit(uint64 _gasLimit) external; + function setUnsafeBlockSigner(address _unsafeBlockSigner) external; + function startBlock() external view returns (uint256 startBlock_); + function transferOwnership(address newOwner) external; // nosemgrep + function unsafeBlockSigner() external view returns (address addr_); + function version() external pure returns (string memory); + + function __constructor__() external; +} diff --git a/packages/contracts-bedrock/src/L2/L1Block.sol b/packages/contracts-bedrock/src/L2/L1Block.sol index c61f45b83629..feb9f18d1b89 100644 --- a/packages/contracts-bedrock/src/L2/L1Block.sol +++ b/packages/contracts-bedrock/src/L2/L1Block.sol @@ -57,9 +57,15 @@ contract L1Block is ISemver, IGasToken { /// @notice The latest L1 blob base fee. uint256 public blobBaseFee; - /// @custom:semver 1.5.1-beta.2 + /// @notice The eip-1550 base fee change denominator value. + uint64 public eip1559Denominator; + + /// @notice The eip-1550 base fee change elasticity value. + uint64 public eip1559Elasticity; + + /// @custom:semver 1.5.1-beta.3 function version() public pure virtual returns (string memory) { - return "1.5.1-beta.2"; + return "1.5.1-beta.3"; } /// @notice Returns the gas paying token, its decimals, name and symbol. @@ -168,6 +174,59 @@ contract L1Block is ISemver, IGasToken { } } + /// @notice Updates the L1 block values for a Holocene upgraded chain. + /// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size. + /// Params are expected to be in the following order: + /// 1. _baseFeeScalar L1 base fee scalar + /// 2. _blobBaseFeeScalar L1 blob base fee scalar + /// 3. _sequenceNumber Number of L2 blocks since epoch start. + /// 4. _timestamp L1 timestamp. + /// 5. _number L1 blocknumber. + /// 6. _basefee L1 base fee. + /// 7. _blobBaseFee L1 blob base fee. + /// 8. _hash L1 blockhash. + /// 9. _batcherHash Versioned hash to authenticate batcher by. + /// 10. _eip1559Elasticity EIP-1559 elasticity multiplier value. + /// 11. _eip1559Denominator EIP-1559 base fee change denominator value. + function setL1BlockValuesHolocene() public { + _setL1BlockValuesHolocene(); + } + + /// @notice Updates the L1 block values for a Holocene upgraded chain. + /// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size. + /// Params are expected to be in the following order: + /// 1. _baseFeeScalar L1 base fee scalar + /// 2. _blobBaseFeeScalar L1 blob base fee scalar + /// 3. _sequenceNumber Number of L2 blocks since epoch start. + /// 4. _timestamp L1 timestamp. + /// 5. _number L1 blocknumber. + /// 6. _basefee L1 base fee. + /// 7. _blobBaseFee L1 blob base fee. + /// 8. _hash L1 blockhash. + /// 9. _batcherHash Versioned hash to authenticate batcher by. + /// 10. _eip1559Elasticity EIP-1559 elasticity multiplier value. + /// 11. _eip1559Denominator EIP-1559 base fee change denominator value. + function _setL1BlockValuesHolocene() internal { + address depositor = DEPOSITOR_ACCOUNT(); + assembly { + // Revert if the caller is not the depositor account. + if xor(caller(), depositor) { + mstore(0x00, 0x3cc50b45) // 0x3cc50b45 is the 4-byte selector of "NotDepositor()" + revert(0x1C, 0x04) // returns the stored 4-byte selector from above + } + // sequencenum (uint64), blobBaseFeeScalar (uint32), baseFeeScalar (uint32) + sstore(sequenceNumber.slot, shr(128, calldataload(4))) + // number (uint64) and timestamp (uint64) + sstore(number.slot, shr(128, calldataload(20))) + sstore(basefee.slot, calldataload(36)) // uint256 + sstore(blobBaseFee.slot, calldataload(68)) // uint256 + sstore(hash.slot, calldataload(100)) // bytes32 + sstore(batcherHash.slot, calldataload(132)) // bytes32 + // eip1559Denominator (uint64) and eip1559Elasticity (uint64) + sstore(eip1559Denominator.slot, shr(128, calldataload(164))) // uint64 + } + } + /// @notice Sets the gas paying token for the L2 system. Can only be called by the special /// depositor account. This function is not called on every L2 block but instead /// only called by specially crafted L1 deposit transactions. diff --git a/packages/contracts-bedrock/src/L2/L1BlockInterop.sol b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol index 15ea67f5e6b3..189e0fe7d7d0 100644 --- a/packages/contracts-bedrock/src/L2/L1BlockInterop.sol +++ b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol @@ -42,9 +42,9 @@ contract L1BlockInterop is L1Block { /// keccak256(abi.encode(uint256(keccak256("l1Block.identifier.isDeposit")) - 1)) & ~bytes32(uint256(0xff)) uint256 internal constant IS_DEPOSIT_SLOT = 0x921bd3a089295c6e5540e8fba8195448d253efd6f2e3e495b499b627dc36a300; - /// @custom:semver +interop + /// @custom:semver +interop-beta.1 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop"); + return string.concat(super.version(), "+interop-beta.1"); } /// @notice Returns whether the call was triggered from a a deposit or not. diff --git a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol index 3eb72210a109..4c1ffc38760f 100644 --- a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol @@ -5,6 +5,7 @@ import { Encoding } from "src/libraries/Encoding.sol"; import { Hashing } from "src/libraries/Hashing.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; +import { ICrossL2Inbox } from "src/L2/interfaces/ICrossL2Inbox.sol"; import { IL2ToL2CrossDomainMessenger } from "src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol"; import { ISemver } from "src/universal/interfaces/ISemver.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; @@ -13,14 +14,14 @@ import { TransientReentrancyAware } from "src/libraries/TransientContext.sol"; /// @notice Thrown when a non-written slot in transient storage is attempted to be read from. error NotEntered(); -/// @notice Thrown when attempting to send a message to the chain that the message is being sent from. -error MessageDestinationSameChain(); +/// @notice Thrown when attempting to relay a message where payload origin is not L2ToL2CrossDomainMessenger. +error IdOriginNotL2ToL2CrossDomainMessenger(); -/// @notice Thrown when attempting to relay a message and the function caller (msg.sender) is not CrossL2Inbox. -error RelayMessageCallerNotCrossL2Inbox(); +/// @notice Thrown when the payload provided to the relay is not a SentMessage event. +error EventPayloadNotSentMessage(); -/// @notice Thrown when attempting to relay a message where CrossL2Inbox's origin is not L2ToL2CrossDomainMessenger. -error CrossL2InboxOriginNotL2ToL2CrossDomainMessenger(); +/// @notice Thrown when attempting to send a message to the chain that the message is being sent from. +error MessageDestinationSameChain(); /// @notice Thrown when attempting to relay a message whose destination chain is not the chain relaying it. error MessageDestinationNotRelayChain(); @@ -54,12 +55,17 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra bytes32 internal constant CROSS_DOMAIN_MESSAGE_SOURCE_SLOT = 0x711dfa3259c842fffc17d6e1f1e0fc5927756133a2345ca56b4cb8178589fee7; + /// @notice Event selector for the SentMessage event. Will be removed in favor of reading + // the `selector` property directly once crytic/slithe/#2566 is fixed. + bytes32 internal constant SENT_MESSAGE_EVENT_SELECTOR = + 0x382409ac69001e11931a28435afef442cbfd20d9891907e8fa373ba7d351f320; + /// @notice Current message version identifier. uint16 public constant messageVersion = uint16(0); /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.6 - string public constant version = "1.0.0-beta.6"; + /// @custom:semver 1.0.0-beta.7 + string public constant version = "1.0.0-beta.7"; /// @notice Mapping of message hashes to boolean receipt values. Note that a message will only be present in this /// mapping if it has successfully been relayed on this chain, and can therefore not be relayed again. @@ -70,13 +76,27 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra /// message. uint240 internal msgNonce; + /// @notice Emitted whenever a message is sent to a destination + /// @param destination Chain ID of the destination chain. + /// @param target Target contract or wallet address. + /// @param messageNonce Nonce associated with the messsage sent + /// @param sender Address initiating this message call + /// @param message Message payload to call target with. + event SentMessage( + uint256 indexed destination, address indexed target, uint256 indexed messageNonce, address sender, bytes message + ); + /// @notice Emitted whenever a message is successfully relayed on this chain. - /// @param messageHash Hash of the message that was relayed. - event RelayedMessage(bytes32 indexed messageHash); + /// @param source Chain ID of the source chain. + /// @param messageNonce Nonce associated with the messsage sent + /// @param messageHash Hash of the message that was relayed. + event RelayedMessage(uint256 indexed source, uint256 indexed messageNonce, bytes32 indexed messageHash); /// @notice Emitted whenever a message fails to be relayed on this chain. - /// @param messageHash Hash of the message that failed to be relayed. - event FailedRelayedMessage(bytes32 indexed messageHash); + /// @param source Chain ID of the source chain. + /// @param messageNonce Nonce associated with the messsage sent + /// @param messageHash Hash of the message that failed to be relayed. + event FailedRelayedMessage(uint256 indexed source, uint256 indexed messageNonce, bytes32 indexed messageHash); /// @notice Retrieves the sender of the current cross domain message. If not entered, reverts. /// @return sender_ Address of the sender of the current cross domain message. @@ -100,90 +120,81 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra /// @param _destination Chain ID of the destination chain. /// @param _target Target contract or wallet address. /// @param _message Message payload to call target with. - /// @return msgHash_ The hash of the message being sent, which can be used for tracking whether - /// the message has successfully been relayed. - function sendMessage( - uint256 _destination, - address _target, - bytes calldata _message - ) - external - returns (bytes32 msgHash_) - { + /// @return The hash of the message being sent, used to track whether the message has successfully been relayed. + function sendMessage(uint256 _destination, address _target, bytes calldata _message) external returns (bytes32) { if (_destination == block.chainid) revert MessageDestinationSameChain(); if (_target == Predeploys.CROSS_L2_INBOX) revert MessageTargetCrossL2Inbox(); if (_target == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) revert MessageTargetL2ToL2CrossDomainMessenger(); - (uint256 source, uint256 nonce, address sender) = (block.chainid, messageNonce(), msg.sender); - bytes memory data = abi.encodeCall( - L2ToL2CrossDomainMessenger.relayMessage, (_destination, source, nonce, sender, _target, _message) - ); - msgHash_ = Hashing.hashL2toL2CrossDomainMessengerRelayMessage({ + uint256 nonce = messageNonce(); + emit SentMessage(_destination, _target, nonce, msg.sender, _message); + + msgNonce++; + + return Hashing.hashL2toL2CrossDomainMessage({ _destination: _destination, - _source: source, + _source: block.chainid, _nonce: nonce, - _sender: sender, + _sender: msg.sender, _target: _target, _message: _message }); - assembly { - log0(add(data, 0x20), mload(data)) - } - msgNonce++; } - /// @notice Relays a message that was sent by the other CrossDomainMessenger contract. Can only be executed via - /// cross-chain call from the other messenger OR if the message was already received once and is currently - /// being replayed. - /// @param _destination Chain ID of the destination chain. - /// @param _source Chain ID of the source chain. - /// @param _nonce Nonce of the message being relayed. - /// @param _sender Address of the user who sent the message. - /// @param _target Address that the message is targeted at. - /// @param _message Message payload to call target with. + /// @notice Relays a message that was sent by the other L2ToL2CrossDomainMessenger contract. Can only be executed + /// via cross chain call from the other messenger OR if the message was already received once and is + /// currently being replayed. + /// @param _id Identifier of the SentMessage event to be relayed + /// @param _sentMessage Message payload of the `SentMessage` event function relayMessage( - uint256 _destination, - uint256 _source, - uint256 _nonce, - address _sender, - address _target, - bytes memory _message + ICrossL2Inbox.Identifier calldata _id, + bytes calldata _sentMessage ) external payable nonReentrant { - if (msg.sender != Predeploys.CROSS_L2_INBOX) revert RelayMessageCallerNotCrossL2Inbox(); - if (CrossL2Inbox(Predeploys.CROSS_L2_INBOX).origin() != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) { - revert CrossL2InboxOriginNotL2ToL2CrossDomainMessenger(); - } - if (_destination != block.chainid) revert MessageDestinationNotRelayChain(); - if (_target == Predeploys.CROSS_L2_INBOX) revert MessageTargetCrossL2Inbox(); - if (_target == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) { - revert MessageTargetL2ToL2CrossDomainMessenger(); + // Ensure the log came from the messenger. Since the log origin is the CDM, there isn't a scenario where + // this can be invoked from the CrossL2Inbox as the SentMessage log is not calldata for this function + if (_id.origin != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) { + revert IdOriginNotL2ToL2CrossDomainMessenger(); } - bytes32 messageHash = Hashing.hashL2toL2CrossDomainMessengerRelayMessage({ - _destination: _destination, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message + // Signal that this is a cross chain call that needs to have the identifier validated + CrossL2Inbox(Predeploys.CROSS_L2_INBOX).validateMessage(_id, keccak256(_sentMessage)); + + // Decode the payload + (uint256 destination, address target, uint256 nonce, address sender, bytes memory message) = + _decodeSentMessagePayload(_sentMessage); + + // Assert invariants on the message + if (destination != block.chainid) revert MessageDestinationNotRelayChain(); + if (target == Predeploys.CROSS_L2_INBOX) revert MessageTargetCrossL2Inbox(); + if (target == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) revert MessageTargetL2ToL2CrossDomainMessenger(); + + uint256 source = _id.chainId; + bytes32 messageHash = Hashing.hashL2toL2CrossDomainMessage({ + _destination: destination, + _source: source, + _nonce: nonce, + _sender: sender, + _target: target, + _message: message }); + if (successfulMessages[messageHash]) { revert MessageAlreadyRelayed(); } - _storeMessageMetadata(_source, _sender); + _storeMessageMetadata(source, sender); - bool success = SafeCall.call(_target, msg.value, _message); + bool success = SafeCall.call(target, msg.value, message); if (success) { successfulMessages[messageHash] = true; - emit RelayedMessage(messageHash); + emit RelayedMessage(source, nonce, messageHash); } else { - emit FailedRelayedMessage(messageHash); + emit FailedRelayedMessage(source, nonce, messageHash); } _storeMessageMetadata(0, address(0)); @@ -205,4 +216,20 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra tstore(CROSS_DOMAIN_MESSAGE_SOURCE_SLOT, _source) } } + + function _decodeSentMessagePayload(bytes calldata _payload) + internal + pure + returns (uint256 destination_, address target_, uint256 nonce_, address sender_, bytes memory message_) + { + // Validate Selector (also reverts if LOG0 with no topics) + bytes32 selector = abi.decode(_payload[:32], (bytes32)); + if (selector != SENT_MESSAGE_EVENT_SELECTOR) revert EventPayloadNotSentMessage(); + + // Topics + (destination_, target_, nonce_) = abi.decode(_payload[32:128], (uint256, address, uint256)); + + // Data + (sender_, message_) = abi.decode(_payload[128:], (address, bytes)); + } } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol index a43b3c7c3963..0eba9a9973f3 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol @@ -34,8 +34,11 @@ interface IL1Block { ) external; function setL1BlockValuesEcotone() external; + function setL1BlockValuesHolocene() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); + function eip1559Denominator() external view returns (uint64); + function eip1559Elasticity() external view returns (uint64); function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol index dd72e3fa6f89..31943804b961 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol @@ -52,9 +52,12 @@ interface IL1BlockInterop { ) external; function setL1BlockValuesEcotone() external; + function setL1BlockValuesHolocene() external; function setL1BlockValuesInterop() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); + function eip1559Denominator() external view returns (uint64); + function eip1559Elasticity() external view returns (uint64); function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol index e043bb43420a..2b5b945dec73 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol @@ -1,6 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +import { ICrossL2Inbox } from "src/L2/interfaces/ICrossL2Inbox.sol"; + /// @title IL2ToL2CrossDomainMessenger /// @notice Interface for the L2ToL2CrossDomainMessenger contract. interface IL2ToL2CrossDomainMessenger { @@ -45,20 +47,7 @@ interface IL2ToL2CrossDomainMessenger { /// @notice Relays a message that was sent by the other CrossDomainMessenger contract. Can only /// be executed via cross-chain call from the other messenger OR if the message was /// already received once and is currently being replayed. - /// @param _destination Chain ID of the destination chain. - /// @param _nonce Nonce of the message being relayed. - /// @param _sender Address of the user who sent the message. - /// @param _source Chain ID of the source chain. - /// @param _target Address that the message is targeted at. - /// @param _message Message to send to the target. - function relayMessage( - uint256 _destination, - uint256 _source, - uint256 _nonce, - address _sender, - address _target, - bytes calldata _message - ) - external - payable; + /// @param _id Identifier of the SentMessage event to be relayed + /// @param _sentMessage Message payload of the `SentMessage` event + function relayMessage(ICrossL2Inbox.Identifier calldata _id, bytes calldata _sentMessage) external payable; } diff --git a/packages/contracts-bedrock/src/cannon/MIPS.sol b/packages/contracts-bedrock/src/cannon/MIPS.sol index f1d216c8e6de..603ead867284 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS.sol @@ -45,8 +45,8 @@ contract MIPS is ISemver { } /// @notice The semantic version of the MIPS contract. - /// @custom:semver 1.1.1-beta.4 - string public constant version = "1.1.1-beta.4"; + /// @custom:semver 1.2.1-beta.1 + string public constant version = "1.2.1-beta.1"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index fb8409f6b41c..ebbf9302c1de 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -57,8 +57,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.12 - string public constant version = "1.0.0-beta.12"; + /// @custom:semver 1.0.0-beta.13 + string public constant version = "1.0.0-beta.13"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; diff --git a/packages/contracts-bedrock/src/cannon/PreimageOracle.sol b/packages/contracts-bedrock/src/cannon/PreimageOracle.sol index c3fbc2b498ad..ac8e70b8da76 100644 --- a/packages/contracts-bedrock/src/cannon/PreimageOracle.sol +++ b/packages/contracts-bedrock/src/cannon/PreimageOracle.sol @@ -1,18 +1,20 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IPreimageOracle } from "./interfaces/IPreimageOracle.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { PreimageKeyLib } from "./PreimageKeyLib.sol"; +// Libraries import { LibKeccak } from "@lib-keccak/LibKeccak.sol"; +import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol"; import "src/cannon/libraries/CannonErrors.sol"; import "src/cannon/libraries/CannonTypes.sol"; +// Interfaces +import { ISemver } from "src/universal/interfaces/ISemver.sol"; + /// @title PreimageOracle /// @notice A contract for storing permissioned pre-images. /// @custom:attribution Solady /// @custom:attribution Beacon Deposit Contract <0x00000000219ab540356cbb839cbe05303d7705fa> -contract PreimageOracle is IPreimageOracle, ISemver { +contract PreimageOracle is ISemver { //////////////////////////////////////////////////////////////// // Constants & Immutables // //////////////////////////////////////////////////////////////// @@ -31,8 +33,8 @@ contract PreimageOracle is IPreimageOracle, ISemver { uint256 public constant PRECOMPILE_CALL_RESERVED_GAS = 100_000; /// @notice The semantic version of the Preimage Oracle contract. - /// @custom:semver 1.1.3-beta.2 - string public constant version = "1.1.3-beta.2"; + /// @custom:semver 1.1.3-beta.4 + string public constant version = "1.1.3-beta.4"; //////////////////////////////////////////////////////////////// // Authorized Preimage Parts // @@ -107,7 +109,11 @@ contract PreimageOracle is IPreimageOracle, ISemver { // Standard Preimage Route (External) // //////////////////////////////////////////////////////////////// - /// @inheritdoc IPreimageOracle + /// @notice Reads a preimage from the oracle. + /// @param _key The key of the preimage to read. + /// @param _offset The offset of the preimage to read. + /// @return dat_ The preimage data. + /// @return datLen_ The length of the preimage data. function readPreimage(bytes32 _key, uint256 _offset) external view returns (bytes32 dat_, uint256 datLen_) { require(preimagePartOk[_key][_offset], "pre-image must exist"); @@ -123,7 +129,27 @@ contract PreimageOracle is IPreimageOracle, ISemver { dat_ = preimageParts[_key][_offset]; } - /// @inheritdoc IPreimageOracle + /// @notice Loads local data parts into the preimage oracle. + /// @param _ident The identifier of the local data. + /// @param _localContext The local key context for the preimage oracle. Optionally, can be set as a constant + /// if the caller only requires one set of local keys. + /// @param _word The local data word. + /// @param _size The number of bytes in `_word` to load. + /// @param _partOffset The offset of the local data part to write to the oracle. + /// @dev The local data parts are loaded into the preimage oracle under the context + /// of the caller - no other account can write to the caller's context + /// specific data. + /// + /// There are 5 local data identifiers: + /// ┌────────────┬────────────────────────┐ + /// │ Identifier │ Data │ + /// ├────────────┼────────────────────────┤ + /// │ 1 │ L1 Head Hash (bytes32) │ + /// │ 2 │ Output Root (bytes32) │ + /// │ 3 │ Root Claim (bytes32) │ + /// │ 4 │ L2 Block Number (u64) │ + /// │ 5 │ Chain ID (u64) │ + /// └────────────┴────────────────────────┘ function loadLocalData( uint256 _ident, bytes32 _localContext, @@ -163,7 +189,10 @@ contract PreimageOracle is IPreimageOracle, ISemver { preimageLengths[key_] = _size; } - /// @inheritdoc IPreimageOracle + /// @notice Prepares a preimage to be read by keccak256 key, starting at the given offset and up to 32 bytes + /// (clipped at preimage length, if out of data). + /// @param _partOffset The offset of the preimage to read. + /// @param _preimage The preimage data. function loadKeccak256PreimagePart(uint256 _partOffset, bytes calldata _preimage) external { uint256 size; bytes32 key; @@ -198,7 +227,10 @@ contract PreimageOracle is IPreimageOracle, ISemver { preimageLengths[key] = size; } - /// @inheritdoc IPreimageOracle + /// @notice Prepares a preimage to be read by sha256 key, starting at the given offset and up to 32 bytes + /// (clipped at preimage length, if out of data). + /// @param _partOffset The offset of the preimage to read. + /// @param _preimage The preimage data. function loadSha256PreimagePart(uint256 _partOffset, bytes calldata _preimage) external { uint256 size; bytes32 key; @@ -247,7 +279,13 @@ contract PreimageOracle is IPreimageOracle, ISemver { preimageLengths[key] = size; } - /// @inheritdoc IPreimageOracle + /// @notice Verifies that `p(_z) = _y` given `_commitment` that corresponds to the polynomial `p(x)` and a KZG + // proof. The value `y` is the pre-image, and the preimage key is `5 ++ keccak256(_commitment ++ z)[1:]`. + /// @param _z Big endian point value. Part of the preimage key. + /// @param _y Big endian point value. The preimage for the key. + /// @param _commitment The commitment to the polynomial. 48 bytes, part of the preimage key. + /// @param _proof The KZG proof, part of the preimage key. + /// @param _partOffset The offset of the preimage to store. function loadBlobPreimagePart( uint256 _z, uint256 _y, @@ -338,7 +376,13 @@ contract PreimageOracle is IPreimageOracle, ISemver { preimageLengths[key] = 32; } - /// @inheritdoc IPreimageOracle + /// @notice Prepares a precompile result to be read by a precompile key for the specified offset. + /// The precompile result data is a concatenation of the precompile call status byte and its return data. + /// The preimage key is `6 ++ keccak256(precompile ++ input)[1:]`. + /// @param _partOffset The offset of the precompile result being loaded. + /// @param _precompile The precompile address + /// @param _requiredGas The gas required to fully execute an L1 precompile. + /// @param _input The input to the precompile call. function loadPrecompilePreimagePart( uint256 _partOffset, address _precompile, diff --git a/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol b/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol index 79ee56f821c9..4a885d3dd03b 100644 --- a/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol +++ b/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol @@ -88,4 +88,9 @@ interface IPreimageOracle { bytes calldata _input ) external; + + /// @notice Returns the minimum size (in bytes) of a large preimage proposal. + function minProposalSize() external view returns (uint256); + + function __constructor__(uint256 _minProposalSize, uint256 _challengePeriod) external; } diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol index a835b6feef58..1b5fddaba7fd 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol @@ -347,7 +347,7 @@ library MIPSSyscalls { /// retrieve the file-descriptor R/W flags. /// @param _a0 The file descriptor. /// @param _a1 The control command. - /// @param v0_ The file status flag (only supported command is F_GETFL), or -1 on error. + /// @param v0_ The file status flag (only supported commands are F_GETFD and F_GETFL), or -1 on error. /// @param v1_ An error number, or 0 if there is no error. function handleSysFcntl(uint32 _a0, uint32 _a1) internal pure returns (uint32 v0_, uint32 v1_) { unchecked { @@ -355,8 +355,19 @@ library MIPSSyscalls { v1_ = uint32(0); // args: _a0 = fd, _a1 = cmd - if (_a1 == 3) { - // F_GETFL: get file descriptor flags + if (_a1 == 1) { + // F_GETFD: get file descriptor flags + if ( + _a0 == FD_STDIN || _a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_PREIMAGE_READ + || _a0 == FD_HINT_READ || _a0 == FD_PREIMAGE_WRITE || _a0 == FD_HINT_WRITE + ) { + v0_ = 0; // No flags set + } else { + v0_ = 0xFFffFFff; + v1_ = EBADF; + } + } else if (_a1 == 3) { + // F_GETFL: get file status flags if (_a0 == FD_STDIN || _a0 == FD_PREIMAGE_READ || _a0 == FD_HINT_READ) { v0_ = 0; // O_RDONLY } else if (_a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_PREIMAGE_WRITE || _a0 == FD_HINT_WRITE) { diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol b/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol index abeb3817d9be..b3201ff0b1c7 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol +++ b/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol @@ -1,8 +1,14 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; + /// @title IResolvedDelegateProxy /// @notice Interface for the ResolvedDelegateProxy contract. interface IResolvedDelegateProxy { fallback() external payable; + + receive() external payable; + + function __constructor__(IAddressManager _addressManager, string memory _implementationName) external; } diff --git a/packages/contracts-bedrock/src/libraries/Encoding.sol b/packages/contracts-bedrock/src/libraries/Encoding.sol index edcdd4ed75e2..ea33f3ca50bf 100644 --- a/packages/contracts-bedrock/src/libraries/Encoding.sol +++ b/packages/contracts-bedrock/src/libraries/Encoding.sol @@ -213,4 +213,50 @@ library Encoding { _batcherHash ); } + + /// @notice Returns an appropriately encoded call to L1Block.setL1BlockValuesHolocene + /// @param _baseFeeScalar L1 base fee Scalar + /// @param _blobBaseFeeScalar L1 blob base fee Scalar + /// @param _sequenceNumber Number of L2 blocks since epoch start. + /// @param _timestamp L1 timestamp. + /// @param _number L1 blocknumber. + /// @param _baseFee L1 base fee. + /// @param _blobBaseFee L1 blob base fee. + /// @param _hash L1 blockhash. + /// @param _batcherHash Versioned hash to authenticate batcher by. + /// @param _eip1559Elasticity EIP-1559 elasticity parameter + /// @param _eip1559Denominator EIP-1559 denominator parameter + function encodeSetL1BlockValuesHolocene( + uint32 _baseFeeScalar, + uint32 _blobBaseFeeScalar, + uint64 _sequenceNumber, + uint64 _timestamp, + uint64 _number, + uint256 _baseFee, + uint256 _blobBaseFee, + bytes32 _hash, + bytes32 _batcherHash, + uint64 _eip1559Elasticity, + uint64 _eip1559Denominator + ) + internal + pure + returns (bytes memory) + { + bytes4 functionSignature = bytes4(keccak256("setL1BlockValuesHolocene()")); + return abi.encodePacked( + functionSignature, + _baseFeeScalar, + _blobBaseFeeScalar, + _sequenceNumber, + _timestamp, + _number, + _baseFee, + _blobBaseFee, + _hash, + _batcherHash, + _eip1559Elasticity, + _eip1559Denominator + ); + } } diff --git a/packages/contracts-bedrock/src/libraries/Hashing.sol b/packages/contracts-bedrock/src/libraries/Hashing.sol index 07a31eb76006..0f0f15678f97 100644 --- a/packages/contracts-bedrock/src/libraries/Hashing.sol +++ b/packages/contracts-bedrock/src/libraries/Hashing.sol @@ -122,8 +122,8 @@ library Hashing { ); } - /// @notice Generates a unique hash for a message to be relayed across chains. This hash is - /// used to identify the message and ensure it is not relayed more than once. + /// @notice Generates a unique hash for cross l2 messages. This hash is used to identify + /// the message and ensure it is not relayed more than once. /// @param _destination Chain ID of the destination chain. /// @param _source Chain ID of the source chain. /// @param _nonce Unique nonce associated with the message to prevent replay attacks. @@ -131,7 +131,7 @@ library Hashing { /// @param _target Address of the contract or wallet that the message is targeting on the destination chain. /// @param _message The message payload to be relayed to the target on the destination chain. /// @return Hash of the encoded message parameters, used to uniquely identify the message. - function hashL2toL2CrossDomainMessengerRelayMessage( + function hashL2toL2CrossDomainMessage( uint256 _destination, uint256 _source, uint256 _nonce, diff --git a/packages/contracts-bedrock/src/universal/ProxyAdmin.sol b/packages/contracts-bedrock/src/universal/ProxyAdmin.sol index e554345d4264..dec119398c0f 100644 --- a/packages/contracts-bedrock/src/universal/ProxyAdmin.sol +++ b/packages/contracts-bedrock/src/universal/ProxyAdmin.sol @@ -1,13 +1,18 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; -import { Proxy } from "src/universal/Proxy.sol"; -import { AddressManager } from "src/legacy/AddressManager.sol"; -import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; + +// Libraries import { Constants } from "src/libraries/Constants.sol"; -import { IStaticERC1967Proxy } from "src/universal/interfaces/IStaticERC1967Proxy.sol"; + +// Interfaces +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; import { IStaticL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; +import { IStaticERC1967Proxy } from "src/universal/interfaces/IStaticERC1967Proxy.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; /// @title ProxyAdmin /// @notice This is an auxiliary contract meant to be assigned as the admin of an ERC1967 Proxy, @@ -34,7 +39,7 @@ contract ProxyAdmin is Ownable { /// @notice The address of the address manager, this is required to manage the /// ResolvedDelegateProxy type. - AddressManager public addressManager; + IAddressManager public addressManager; /// @notice A legacy upgrading indicator used by the old Chugsplash Proxy. bool internal upgrading; @@ -63,7 +68,7 @@ contract ProxyAdmin is Ownable { /// @notice Set the address of the AddressManager. This is required to manage legacy /// ResolvedDelegateProxy type proxy contracts. /// @param _address Address of the AddressManager. - function setAddressManager(AddressManager _address) external onlyOwner { + function setAddressManager(IAddressManager _address) external onlyOwner { addressManager = _address; } @@ -131,9 +136,9 @@ contract ProxyAdmin is Ownable { function changeProxyAdmin(address payable _proxy, address _newAdmin) external onlyOwner { ProxyType ptype = proxyType[_proxy]; if (ptype == ProxyType.ERC1967) { - Proxy(_proxy).changeAdmin(_newAdmin); + IProxy(_proxy).changeAdmin(_newAdmin); } else if (ptype == ProxyType.CHUGSPLASH) { - L1ChugSplashProxy(_proxy).setOwner(_newAdmin); + IL1ChugSplashProxy(_proxy).setOwner(_newAdmin); } else if (ptype == ProxyType.RESOLVED) { addressManager.transferOwnership(_newAdmin); } else { @@ -147,9 +152,9 @@ contract ProxyAdmin is Ownable { function upgrade(address payable _proxy, address _implementation) public onlyOwner { ProxyType ptype = proxyType[_proxy]; if (ptype == ProxyType.ERC1967) { - Proxy(_proxy).upgradeTo(_implementation); + IProxy(_proxy).upgradeTo(_implementation); } else if (ptype == ProxyType.CHUGSPLASH) { - L1ChugSplashProxy(_proxy).setStorage( + IL1ChugSplashProxy(_proxy).setStorage( Constants.PROXY_IMPLEMENTATION_ADDRESS, bytes32(uint256(uint160(_implementation))) ); } else if (ptype == ProxyType.RESOLVED) { @@ -178,7 +183,7 @@ contract ProxyAdmin is Ownable { { ProxyType ptype = proxyType[_proxy]; if (ptype == ProxyType.ERC1967) { - Proxy(_proxy).upgradeToAndCall{ value: msg.value }(_implementation, _data); + IProxy(_proxy).upgradeToAndCall{ value: msg.value }(_implementation, _data); } else { // reverts if proxy type is unknown upgrade(_proxy, _implementation); diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol b/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol new file mode 100644 index 000000000000..2b09da39e515 --- /dev/null +++ b/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IOptimismMintableERC721Factory { + event OptimismMintableERC721Created(address indexed localToken, address indexed remoteToken, address deployer); + + function BRIDGE() external view returns (address); + function REMOTE_CHAIN_ID() external view returns (uint256); + function createOptimismMintableERC721( + address _remoteToken, + string memory _name, + string memory _symbol + ) + external + returns (address); + function isOptimismMintableERC721(address) external view returns (bool); + function version() external view returns (string memory); + + function __constructor__(address _bridge, uint256 _remoteChainId) external; +} diff --git a/packages/contracts-bedrock/src/universal/interfaces/IProxy.sol b/packages/contracts-bedrock/src/universal/interfaces/IProxy.sol new file mode 100644 index 000000000000..a2c90f80828c --- /dev/null +++ b/packages/contracts-bedrock/src/universal/interfaces/IProxy.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IProxy { + event AdminChanged(address previousAdmin, address newAdmin); + event Upgraded(address indexed implementation); + + fallback() external payable; + + receive() external payable; + + function admin() external returns (address); + function changeAdmin(address _admin) external; + function implementation() external returns (address); + function upgradeTo(address _implementation) external; + function upgradeToAndCall(address _implementation, bytes memory _data) external payable returns (bytes memory); + + function __constructor__(address _admin) external; +} diff --git a/packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol b/packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol new file mode 100644 index 000000000000..b35947e6cd78 --- /dev/null +++ b/packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; + +interface IProxyAdmin { + enum ProxyType { + ERC1967, + CHUGSPLASH, + RESOLVED + } + + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + + function addressManager() external view returns (IAddressManager); + function changeProxyAdmin(address payable _proxy, address _newAdmin) external; + function getProxyAdmin(address payable _proxy) external view returns (address); + function getProxyImplementation(address _proxy) external view returns (address); + function implementationName(address) external view returns (string memory); + function isUpgrading() external view returns (bool); + function owner() external view returns (address); + function proxyType(address) external view returns (ProxyType); + function renounceOwnership() external; + function setAddress(string memory _name, address _address) external; + function setAddressManager(IAddressManager _address) external; + function setImplementationName(address _address, string memory _name) external; + function setProxyType(address _address, ProxyType _type) external; + function setUpgrading(bool _upgrading) external; + function transferOwnership(address newOwner) external; // nosemgrep + function upgrade(address payable _proxy, address _implementation) external; + function upgradeAndCall(address payable _proxy, address _implementation, bytes memory _data) external payable; + + function __constructor__(address _owner) external; +} diff --git a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol index 5304cf797449..dd14b349c68a 100644 --- a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol +++ b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol @@ -8,7 +8,6 @@ import { CommitmentType } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; import { computeCommitmentKeccak256 } from "src/L1/DataAvailabilityChallenge.sol"; -import { Proxy } from "src/universal/Proxy.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 8edc33f47772..52c49e4dccf1 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -9,13 +9,15 @@ import { DeployOPChain_TestBase } from "test/opcm/DeployOPChain.t.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; // Exposes internal functions for testing. contract OPContractsManager_Harness is OPContractsManager { constructor( - SuperchainConfig _superchainConfig, - ProtocolVersions _protocolVersions + ISuperchainConfig _superchainConfig, + IProtocolVersions _protocolVersions ) OPContractsManager(_superchainConfig, _protocolVersions) { } @@ -100,8 +102,8 @@ contract OPContractsManager_InternalMethods_Test is Test { OPContractsManager_Harness opcmHarness; function setUp() public { - SuperchainConfig superchainConfigProxy = SuperchainConfig(makeAddr("superchainConfig")); - ProtocolVersions protocolVersionsProxy = ProtocolVersions(makeAddr("protocolVersions")); + ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfig")); + IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersions")); vm.etch(address(superchainConfigProxy), hex"01"); vm.etch(address(protocolVersionsProxy), hex"01"); diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol index 0472c0781ce8..6861a569c20b 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol @@ -10,7 +10,6 @@ import { NextImpl } from "test/mocks/NextImpl.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts -import { Proxy } from "src/universal/Proxy.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; // Libraries @@ -27,6 +26,7 @@ import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; contract OptimismPortal_Test is CommonTest { address depositor; @@ -1173,10 +1173,10 @@ contract OptimismPortalUpgradeable_Test is CommonTest { vm.startPrank(EIP1967Helper.getAdmin(address(optimismPortal))); // The value passed to the initialize must be larger than the last value // that initialize was called with. - Proxy(payable(address(optimismPortal))).upgradeToAndCall( + IProxy(payable(address(optimismPortal))).upgradeToAndCall( address(nextImpl), abi.encodeWithSelector(NextImpl.initialize.selector, 2) ); - assertEq(Proxy(payable(address(optimismPortal))).implementation(), address(nextImpl)); + assertEq(IProxy(payable(address(optimismPortal))).implementation(), address(nextImpl)); // Verify that the NextImpl contract initialized its values according as expected bytes32 slot21After = vm.load(address(optimismPortal), bytes32(uint256(21))); diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index c131995c1a6b..3faa7e3d2261 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -10,7 +10,6 @@ import { NextImpl } from "test/mocks/NextImpl.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts -import { Proxy } from "src/universal/Proxy.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; // Libraries @@ -29,6 +28,7 @@ import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; contract OptimismPortal2_Test is CommonTest { address depositor; @@ -1422,10 +1422,10 @@ contract OptimismPortal2_Upgradeable_Test is CommonTest { vm.startPrank(EIP1967Helper.getAdmin(address(optimismPortal2))); // The value passed to the initialize must be larger than the last value // that initialize was called with. - Proxy(payable(address(optimismPortal2))).upgradeToAndCall( + IProxy(payable(address(optimismPortal2))).upgradeToAndCall( address(nextImpl), abi.encodeWithSelector(NextImpl.initialize.selector, 2) ); - assertEq(Proxy(payable(address(optimismPortal2))).implementation(), address(nextImpl)); + assertEq(IProxy(payable(address(optimismPortal2))).implementation(), address(nextImpl)); // Verify that the NextImpl contract initialized its values according as expected bytes32 slot21After = vm.load(address(optimismPortal2), bytes32(uint256(21))); diff --git a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol index 957d2b914f38..41eed4a930e6 100644 --- a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol +++ b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol @@ -1,17 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; -// Target contract dependencies -import { Proxy } from "src/universal/Proxy.sol"; - -// Target contract +// Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; contract ProtocolVersions_Init is CommonTest { @@ -57,7 +55,7 @@ contract ProtocolVersions_Initialize_Test is ProtocolVersions_Init { emit ConfigUpdate(0, IProtocolVersions.UpdateType.RECOMMENDED_PROTOCOL_VERSION, abi.encode(recommended)); vm.prank(EIP1967Helper.getAdmin(address(protocolVersions))); - Proxy(payable(address(protocolVersions))).upgradeToAndCall( + IProxy(payable(address(protocolVersions))).upgradeToAndCall( address(protocolVersionsImpl), abi.encodeCall( IProtocolVersions.initialize, diff --git a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol index f315b5212fd6..6d01cdb30867 100644 --- a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol +++ b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol @@ -1,17 +1,16 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing import { Test } from "forge-std/Test.sol"; +// Contracts +import { ResourceMetering } from "src/L1/ResourceMetering.sol"; + // Libraries import { Constants } from "src/libraries/Constants.sol"; -// Target contract dependencies -import { Proxy } from "src/universal/Proxy.sol"; - -// Target contract -import { ResourceMetering } from "src/L1/ResourceMetering.sol"; +// Interfaces import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; contract MeterUser is ResourceMetering { diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index ddfafc0edb2f..91819d8ef70a 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -7,7 +7,6 @@ import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; -import { Proxy } from "src/universal/Proxy.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; diff --git a/packages/contracts-bedrock/test/L2/L1Block.t.sol b/packages/contracts-bedrock/test/L2/L1Block.t.sol index 762553a2ff2f..06de35f51c1d 100644 --- a/packages/contracts-bedrock/test/L2/L1Block.t.sol +++ b/packages/contracts-bedrock/test/L2/L1Block.t.sol @@ -165,6 +165,116 @@ contract L1BlockEcotone_Test is L1BlockTest { } } +contract L1BlockHolocene_Test is L1BlockTest { + /// @dev Tests that setL1BlockValuesHolocene updates the values appropriately. + function testFuzz_setL1BlockValuesHolocene_succeeds( + uint32 baseFeeScalar, + uint32 blobBaseFeeScalar, + uint64 sequenceNumber, + uint64 timestamp, + uint64 number, + uint256 baseFee, + uint256 blobBaseFee, + bytes32 hash, + bytes32 batcherHash, + uint64 eip1559Elasticity, + uint64 eip1559Denominator + ) + external + { + bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( + baseFeeScalar, + blobBaseFeeScalar, + sequenceNumber, + timestamp, + number, + baseFee, + blobBaseFee, + hash, + batcherHash, + eip1559Elasticity, + eip1559Denominator + ); + + vm.prank(depositor); + (bool success,) = address(l1Block).call(functionCallDataPacked); + assertTrue(success, "Function call failed"); + + assertEq(l1Block.baseFeeScalar(), baseFeeScalar); + assertEq(l1Block.blobBaseFeeScalar(), blobBaseFeeScalar); + assertEq(l1Block.sequenceNumber(), sequenceNumber); + assertEq(l1Block.timestamp(), timestamp); + assertEq(l1Block.number(), number); + assertEq(l1Block.basefee(), baseFee); + assertEq(l1Block.blobBaseFee(), blobBaseFee); + assertEq(l1Block.hash(), hash); + assertEq(l1Block.batcherHash(), batcherHash); + assertEq(l1Block.eip1559Denominator(), eip1559Denominator); + assertEq(l1Block.eip1559Elasticity(), eip1559Elasticity); + + // ensure we didn't accidentally pollute the 128 bits of the sequencenum+scalars slot that + // should be empty + bytes32 scalarsSlot = vm.load(address(l1Block), bytes32(uint256(3))); + bytes32 mask128 = hex"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00000000000000000000000000000000"; + + assertEq(0, scalarsSlot & mask128); + + // ensure we didn't accidentally pollute the 128 bits of the number & timestamp slot that + // should be empty + bytes32 numberTimestampSlot = vm.load(address(l1Block), bytes32(uint256(0))); + assertEq(0, numberTimestampSlot & mask128); + + // ensure we didn't accidentally pollute the 128 bits of the eip-1559 parameters slot that + // should be empty + bytes32 eip1559ParamsSlot = vm.load(address(l1Block), bytes32(uint256(9))); + assertEq(0, eip1559ParamsSlot & mask128); + } + + /// @dev Tests that `setL1BlockValuesHolocene` succeeds if sender address is the depositor + function test_setL1BlockValuesHolocene_isDepositor_succeeds() external { + bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( + type(uint32).max, + type(uint32).max, + type(uint64).max, + type(uint64).max, + type(uint64).max, + type(uint256).max, + type(uint256).max, + bytes32(type(uint256).max), + bytes32(type(uint256).max), + type(uint64).max, + type(uint64).max + ); + + vm.prank(depositor); + (bool success,) = address(l1Block).call(functionCallDataPacked); + assertTrue(success, "function call failed"); + } + + /// @dev Tests that `setL1BlockValuesEcotone` reverts if sender address is not the depositor + function test_setL1BlockValuesHolocene_notDepositor_reverts() external { + bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( + type(uint32).max, + type(uint32).max, + type(uint64).max, + type(uint64).max, + type(uint64).max, + type(uint256).max, + type(uint256).max, + bytes32(type(uint256).max), + bytes32(type(uint256).max), + type(uint64).max, + type(uint64).max + ); + + (bool success, bytes memory data) = address(l1Block).call(functionCallDataPacked); + assertTrue(!success, "function call should have failed"); + // make sure return value is the expected function selector for "NotDepositor()" + bytes memory expReturn = hex"3cc50b45"; + assertEq(data, expReturn); + } +} + contract L1BlockCustomGasToken_Test is L1BlockTest { function testFuzz_setGasPayingToken_succeeds( address _token, diff --git a/packages/contracts-bedrock/test/L2Genesis.t.sol b/packages/contracts-bedrock/test/L2/L2Genesis.t.sol similarity index 100% rename from packages/contracts-bedrock/test/L2Genesis.t.sol rename to packages/contracts-bedrock/test/L2/L2Genesis.t.sol diff --git a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol index ffde996c21c7..f5ff43c832ca 100644 --- a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol @@ -10,19 +10,20 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Hashing } from "src/libraries/Hashing.sol"; // Target contract +import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; +import { ICrossL2Inbox } from "src/L2/interfaces/ICrossL2Inbox.sol"; import { L2ToL2CrossDomainMessenger, NotEntered, MessageDestinationSameChain, - RelayMessageCallerNotCrossL2Inbox, - CrossL2InboxOriginNotL2ToL2CrossDomainMessenger, + IdOriginNotL2ToL2CrossDomainMessenger, + EventPayloadNotSentMessage, MessageDestinationNotRelayChain, MessageTargetCrossL2Inbox, MessageTargetL2ToL2CrossDomainMessenger, MessageAlreadyRelayed, ReentrantCall } from "src/L2/L2ToL2CrossDomainMessenger.sol"; -import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; /// @title L2ToL2CrossDomainMessengerWithModifiableTransientStorage /// @dev L2ToL2CrossDomainMessenger contract with methods to modify the transient storage. @@ -91,11 +92,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { vm.recordLogs(); // Call the sendMessage function - bytes32 msgHash = - l2ToL2CrossDomainMessenger.sendMessage({ _destination: _destination, _target: _target, _message: _message }); + bytes32 msgHash = l2ToL2CrossDomainMessenger.sendMessage(_destination, _target, _message); assertEq( msgHash, - Hashing.hashL2toL2CrossDomainMessengerRelayMessage( + Hashing.hashL2toL2CrossDomainMessage( _destination, block.chainid, messageNonce, address(this), _target, _message ) ); @@ -103,13 +103,15 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Check that the event was emitted with the correct parameters Vm.Log[] memory logs = vm.getRecordedLogs(); assertEq(logs.length, 1); - assertEq( - logs[0].data, - abi.encodeCall( - L2ToL2CrossDomainMessenger.relayMessage, - (_destination, block.chainid, messageNonce, address(this), _target, _message) - ) - ); + + // topics + assertEq(logs[0].topics[0], L2ToL2CrossDomainMessenger.SentMessage.selector); + assertEq(logs[0].topics[1], bytes32(_destination)); + assertEq(logs[0].topics[2], bytes32(uint256(uint160(_target)))); + assertEq(logs[0].topics[3], bytes32(messageNonce)); + + // data + assertEq(logs[0].data, abi.encode(address(this), _message)); // Check that the message nonce has been incremented assertEq(l2ToL2CrossDomainMessenger.messageNonce(), messageNonce + 1); @@ -198,16 +200,15 @@ contract L2ToL2CrossDomainMessengerTest is Test { address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { - // Ensure that the target contract is not a Forge contract. - assumeNotForgeAddress(_target); - // Ensure that the target contract is not CrossL2Inbox or L2ToL2CrossDomainMessenger - vm.assume(_target != Predeploys.CROSS_L2_INBOX); - vm.assume(_target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + vm.assume(_target != Predeploys.CROSS_L2_INBOX && _target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); // Ensure that the target call is payable if value is sent if (_value > 0) assumePayable(_target); @@ -215,51 +216,68 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure that the target contract does not revert vm.mockCall({ callee: _target, msgValue: _value, data: _message, returnData: abi.encode(true) }); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract + // Construct the SentMessage payload & identifier + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); // Look for correct emitted event vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); emit L2ToL2CrossDomainMessenger.RelayedMessage( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) + _source, _nonce, keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) ); - // Ensure the target contract is called with the correct parameters - vm.expectCall({ callee: _target, msgValue: _value, data: _message }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Call the relayMessage function - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, // ensure the destination is the chain of L2ToL2CrossDomainMessenger - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); - - // Check that successfulMessages mapping updates the message hash correctly + // relay the message + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); assertEq( l2ToL2CrossDomainMessenger.successfulMessages( keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) ), true ); + } - // Check that entered slot is cleared after the function call - assertEq(l2ToL2CrossDomainMessenger.entered(), false); + function testFuzz_relayMessage_eventPayloadNotSentMessage_reverts( + uint256 _source, + uint256 _nonce, + bytes32 _msgHash, + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time + ) + external + { + // Expect a revert with the EventPayloadNotSentMessage selector + vm.expectRevert(EventPayloadNotSentMessage.selector); - // Check that metadata is cleared after the function call. We need to set the `entered` slot to non-zero value - // to prevent NotEntered revert when calling the crossDomainMessageSender and crossDomainMessageSource functions - l2ToL2CrossDomainMessenger.setEntered(1); - assertEq(l2ToL2CrossDomainMessenger.crossDomainMessageSource(), 0); - assertEq(l2ToL2CrossDomainMessenger.crossDomainMessageSender(), address(0)); + // Point to a different remote log that the inbox validates + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = + abi.encode(L2ToL2CrossDomainMessenger.RelayedMessage.selector, _source, _nonce, _msgHash); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" + }); + + // Call + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Mock target function that checks the source and sender of the message in transient storage. @@ -281,7 +299,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { uint256 _source, uint256 _nonce, address _sender, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { @@ -289,46 +310,39 @@ contract L2ToL2CrossDomainMessengerTest is Test { // contract has a non-zero balance. Thus, we set this contract's balance to zero and we hoax afterwards. vm.deal(address(this), 0); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - // Set the target and message for the reentrant call address target = address(this); bytes memory message = abi.encodeWithSelector(this.mockTarget.selector, _source, _sender); + bytes32 msgHash = keccak256(abi.encode(block.chainid, _source, _nonce, _sender, target, message)); + // Look for correct emitted event vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); - emit L2ToL2CrossDomainMessenger.RelayedMessage( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, target, message)) - ); + emit L2ToL2CrossDomainMessenger.RelayedMessage(_source, _nonce, msgHash); // Ensure the target contract is called with the correct parameters vm.expectCall({ callee: target, msgValue: _value, data: message }); - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Call the relayMessage function - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, // ensure the destination is the chain of L2ToL2CrossDomainMessenger - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: target, - _message: message + // Construct and relay the message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, target, _nonce), // topics + abi.encode(_sender, message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); + // Check that successfulMessages mapping updates the message hash correctly - assertEq( - l2ToL2CrossDomainMessenger.successfulMessages( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, target, message)) - ), - true - ); + assertEq(l2ToL2CrossDomainMessenger.successfulMessages(msgHash), true); // Check that entered slot is cleared after the function call assertEq(l2ToL2CrossDomainMessenger.entered(), false); @@ -353,14 +367,14 @@ contract L2ToL2CrossDomainMessengerTest is Test { vm.expectRevert(ReentrantCall.selector); - l2ToL2CrossDomainMessenger.relayMessage({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: address(0), - _message: "" - }); + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, 1, 1, 1, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, address(0), _nonce), // topics + abi.encode(_sender, "") // data + ); + + l2ToL2CrossDomainMessenger.relayMessage(id, sentMessage); // Ensure the function still reverts if `expectRevert` succeeds revert(); @@ -373,7 +387,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { uint256 _source2, // sender passed to `relayMessage` by the reentrant call. address _sender2, // sender passed to `relayMessage` by the reentrant call. uint256 _nonce, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { @@ -381,13 +398,6 @@ contract L2ToL2CrossDomainMessengerTest is Test { // contract has a non-zero balance. Thus, we set this contract's balance to zero and we hoax afterwards. vm.deal(address(this), 0); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - // Set the target and message for the reentrant call address target = address(this); bytes memory message = abi.encodeWithSelector(this.mockTargetReentrant.selector, _source2, _nonce, _sender2); @@ -395,25 +405,30 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Look for correct emitted event vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); emit L2ToL2CrossDomainMessenger.FailedRelayedMessage( - keccak256(abi.encode(block.chainid, _source1, _nonce, _sender1, target, message)) + _source1, _nonce, keccak256(abi.encode(block.chainid, _source1, _nonce, _sender1, target, message)) ); // Ensure the target contract is called with the correct parameters vm.expectCall({ callee: target, msgValue: _value, data: message }); - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Call the relayMessage function - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, // ensure the destination is the chain of L2ToL2CrossDomainMessenger - _source: _source1, - _nonce: _nonce, - _sender: _sender1, - _target: target, - _message: message + // Construct and relay the message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source1); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, target, _nonce), // topics + abi.encode(_sender1, message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); + // Check that entered slot is cleared after the function call assertEq(l2ToL2CrossDomainMessenger.entered(), false); @@ -424,70 +439,36 @@ contract L2ToL2CrossDomainMessengerTest is Test { assertEq(l2ToL2CrossDomainMessenger.crossDomainMessageSender(), address(0)); } - /// @dev Tests that the `relayMessage` function reverts when the caller is not the CrossL2Inbox contract. - function testFuzz_relayMessage_callerNotCrossL2Inbox_reverts( - uint256 _destination, + /// @dev Tests that the `relayMessage` function reverts when log identifier is not the cdm + function testFuzz_relayMessage_idOriginNotL2ToL2CrossDomainMessenger_reverts( uint256 _source, uint256 _nonce, address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + address _origin, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { - // Add sufficient value to the contract to relay the message with - vm.deal(address(this), _value); + // Incorrect identifier origin + vm.assume(_origin != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); - // Expect a revert with the RelayMessageCallerNotCrossL2Inbox selector - vm.expectRevert(RelayMessageCallerNotCrossL2Inbox.selector); - - // Call `relayMessage` with the current contract as the caller to provoke revert - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: _destination, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); - } + // Expect a revert with the IdOriginNotL2ToL2CrossDomainMessenger + vm.expectRevert(IdOriginNotL2ToL2CrossDomainMessenger.selector); - /// @dev Tests that the `relayMessage` function reverts when CrossL2Inbox's origin is not - /// L2ToL2CrossDomainMessenger. - function testFuzz_relayMessage_crossL2InboxOriginNotL2ToL2CrossDomainMessenger_reverts( - uint256 _destination, - uint256 _source, - uint256 _nonce, - address _sender, - address _target, - bytes calldata _message, - uint256 _value - ) - external - { - // Set address(0) as the origin of the CrossL2Inbox contract, which is not the L2ToL2CrossDomainMessenger - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(address(0)) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Expect a revert with the CrossL2InboxOriginNotL2ToL2CrossDomainMessenger selector - vm.expectRevert(CrossL2InboxOriginNotL2ToL2CrossDomainMessenger.selector); + ICrossL2Inbox.Identifier memory id = ICrossL2Inbox.Identifier(_origin, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); - // Call `relayMessage` with invalid CrossL2Inbox origin to provoke revert - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: _destination, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); + // Call + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the destination is not the relay chain. @@ -498,35 +479,36 @@ contract L2ToL2CrossDomainMessengerTest is Test { address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { // Ensure the destination is not this chain vm.assume(_destination != block.chainid); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract + // Expect a revert with the MessageDestinationNotRelayChain selector + vm.expectRevert(MessageDestinationNotRelayChain.selector); + + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, _destination, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Expect a revert with the MessageDestinationNotRelayChain selector - vm.expectRevert(MessageDestinationNotRelayChain.selector); - // Call `relayMessage` - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: _destination, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the message target is CrossL2Inbox. @@ -535,33 +517,37 @@ contract L2ToL2CrossDomainMessengerTest is Test { uint256 _nonce, address _sender, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - // Expect a revert with the MessageTargetCrossL2Inbox selector vm.expectRevert(MessageTargetCrossL2Inbox.selector); // Call `relayMessage` with CrossL2Inbox as the target to provoke revert. The current chain is the destination // to prevent revert due to invalid destination - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: Predeploys.CROSS_L2_INBOX, - _message: _message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode( + L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, Predeploys.CROSS_L2_INBOX, _nonce + ), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + + // Call + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the message target is L2ToL2CrossDomainMessenger. @@ -570,33 +556,39 @@ contract L2ToL2CrossDomainMessengerTest is Test { uint256 _nonce, address _sender, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - // Expect a revert with the MessageTargetL2ToL2CrossDomainMessenger selector vm.expectRevert(MessageTargetL2ToL2CrossDomainMessenger.selector); // Call `relayMessage` with L2ToL2CrossDomainMessenger as the target to provoke revert. The current chain is the // destination to prevent revert due to invalid destination - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, - _message: _message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode( + L2ToL2CrossDomainMessenger.SentMessage.selector, + block.chainid, + Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, + _nonce + ), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the message has already been relayed. @@ -606,7 +598,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { @@ -622,48 +617,37 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure that the target contract does not revert vm.mockCall({ callee: _target, msgValue: _value, data: _message, returnData: abi.encode(true) }); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - // Look for correct emitted event for first call. vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); emit L2ToL2CrossDomainMessenger.RelayedMessage( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) + _source, _nonce, keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) ); - // First call to `relayMessage` should succeed. The current chain is the destination to prevent revert due to - // invalid destination - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); + // First call to `relayMessage` should succeed. The current chain is the destination to prevent revert due to + // invalid destination + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); // Second call should fail with MessageAlreadyRelayed selector vm.expectRevert(MessageAlreadyRelayed.selector); // Call `relayMessage` again. The current chain is the destination to prevent revert due to invalid destination - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the target call fails. @@ -673,7 +657,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { @@ -686,30 +673,28 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure that the target contract reverts vm.mockCallRevert({ callee: _target, msgValue: _value, data: _message, revertData: abi.encode(false) }); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - // Look for correct emitted event vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); emit L2ToL2CrossDomainMessenger.FailedRelayedMessage( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) + _source, _nonce, keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) ); - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `crossDomainMessageSender` function returns the correct value. diff --git a/packages/contracts-bedrock/test/Predeploys.t.sol b/packages/contracts-bedrock/test/L2/Predeploys.t.sol similarity index 100% rename from packages/contracts-bedrock/test/Predeploys.t.sol rename to packages/contracts-bedrock/test/L2/Predeploys.t.sol diff --git a/packages/contracts-bedrock/test/Preinstalls.t.sol b/packages/contracts-bedrock/test/L2/Preinstalls.t.sol similarity index 100% rename from packages/contracts-bedrock/test/Preinstalls.t.sol rename to packages/contracts-bedrock/test/L2/Preinstalls.t.sol diff --git a/packages/contracts-bedrock/test/cannon/MIPS.t.sol b/packages/contracts-bedrock/test/cannon/MIPS.t.sol index 9aafbdb5421d..998bc4d4aa79 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS.t.sol @@ -1,14 +1,22 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; + +// Contracts import { MIPS } from "src/cannon/MIPS.sol"; import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; + +// Libraries import { MIPSInstructions } from "src/cannon/libraries/MIPSInstructions.sol"; import { MIPSSyscalls as sys } from "src/cannon/libraries/MIPSSyscalls.sol"; import { InvalidExitedValue, InvalidMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; import "src/dispute/lib/Types.sol"; +// Interfaces +import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; + contract MIPS_Test is CommonTest { MIPS internal mips; PreimageOracle internal oracle; @@ -16,7 +24,7 @@ contract MIPS_Test is CommonTest { function setUp() public virtual override { super.setUp(); oracle = new PreimageOracle(0, 0); - mips = new MIPS(oracle); + mips = new MIPS(IPreimageOracle(address(oracle))); vm.store(address(mips), 0x0, bytes32(abi.encode(address(oracle)))); vm.label(address(oracle), "PreimageOracle"); vm.label(address(mips), "MIPS"); @@ -1605,7 +1613,7 @@ contract MIPS_Test is CommonTest { assertEq(postState, outputState(expect), "unexpected post state"); } - function test_fcntl_succeeds() external { + function test_fcntl_getfl_succeeds() external { uint32 insn = 0x0000000c; // syscall (MIPS.State memory state, bytes memory proof) = constructMIPSState(0, insn, 0x4, 0); state.registers[2] = 4055; // fcntl syscall @@ -1631,6 +1639,25 @@ contract MIPS_Test is CommonTest { assertEq(postState, outputState(expect), "unexpected post state"); } + function test_fcntl_getfd_succeeds() external { + uint32 insn = 0x0000000c; // syscall + (MIPS.State memory state, bytes memory proof) = constructMIPSState(0, insn, 0x4, 0); + state.registers[2] = 4055; // fcntl syscall + state.registers[4] = 0x0; // a0 + state.registers[5] = 0x1; // a1 + + MIPS.State memory expect; + expect.memRoot = state.memRoot; + expect.pc = state.nextPC; + expect.nextPC = state.nextPC + 4; + expect.step = state.step + 1; + expect.registers[2] = 0; + expect.registers[5] = state.registers[5]; + + bytes32 postState = mips.step(encodeState(state), proof, 0); + assertEq(postState, outputState(expect), "unexpected post state"); + } + function test_prestate_exited_succeeds() external { uint32 insn = 0x0000000c; // syscall (MIPS.State memory state, bytes memory proof) = constructMIPSState(0, insn, 0x4, 0); diff --git a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol index 4c02d7a0bdd1..59b3e9e17eb4 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol @@ -1,13 +1,21 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; + +// Contracts import { MIPS2 } from "src/cannon/MIPS2.sol"; import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; + +// Libraries import { MIPSSyscalls as sys } from "src/cannon/libraries/MIPSSyscalls.sol"; import { MIPSInstructions as ins } from "src/cannon/libraries/MIPSInstructions.sol"; -import "src/dispute/lib/Types.sol"; import { InvalidExitedValue, InvalidMemoryProof, InvalidSecondMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; +import "src/dispute/lib/Types.sol"; + +// Interfaces +import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; contract ThreadStack { bytes32 internal constant EMPTY_THREAD_ROOT = hex"ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"; @@ -127,7 +135,7 @@ contract MIPS2_Test is CommonTest { function setUp() public virtual override { super.setUp(); oracle = new PreimageOracle(0, 0); - mips = new MIPS2(oracle); + mips = new MIPS2(IPreimageOracle(address(oracle))); threading = new Threading(); vm.store(address(mips), 0x0, bytes32(abi.encode(address(oracle)))); vm.label(address(oracle), "PreimageOracle"); diff --git a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol index bde1e1b9f893..8982eae96bf6 100644 --- a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol +++ b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol @@ -22,7 +22,7 @@ contract DelayedWETH_Init is CommonTest { super.setUp(); // Transfer ownership of delayed WETH to the test contract. - vm.prank(deploy.mustGetAddress("SystemOwnerSafe")); + vm.prank(delayedWeth.owner()); delayedWeth.transferOwnership(address(this)); } } diff --git a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol index 6c3ed2a18944..9619832135e5 100644 --- a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol +++ b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol @@ -29,7 +29,7 @@ contract DisputeGameFactory_Init is CommonTest { fakeClone = new FakeClone(); // Transfer ownership of the factory to the test contract. - vm.prank(deploy.mustGetAddress("SystemOwnerSafe")); + vm.prank(disputeGameFactory.owner()); disputeGameFactory.transferOwnership(address(this)); } } diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index bc6537e460e4..8cfb602e3d31 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -54,7 +54,7 @@ contract FaultDisputeGame_Init is DisputeGameFactory_Init { // Set preimage oracle challenge period to something arbitrary (4 seconds) just so we can // actually test the clock extensions later on. This is not a realistic value. PreimageOracle oracle = new PreimageOracle(0, 4); - AlphabetVM _vm = new AlphabetVM(absolutePrestate, oracle); + AlphabetVM _vm = new AlphabetVM(absolutePrestate, IPreimageOracle(address(oracle))); // Deploy an implementation of the fault game gameImpl = IFaultDisputeGame( @@ -123,7 +123,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { /// @dev Tests that the constructor of the `FaultDisputeGame` reverts when the `MAX_GAME_DEPTH` parameter is /// greater than `LibPosition.MAX_POSITION_BITLEN - 1`. function testFuzz_constructor_maxDepthTooLarge_reverts(uint256 _maxGameDepth) public { - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); _maxGameDepth = bound(_maxGameDepth, LibPosition.MAX_POSITION_BITLEN, type(uint256).max - 1); vm.expectRevert(MaxDepthTooLarge.selector); @@ -148,7 +148,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { _challengePeriod = bound(_challengePeriod, uint256(type(uint64).max) + 1, type(uint256).max); PreimageOracle oracle = new PreimageOracle(0, 0); - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, oracle); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(oracle))); // PreimageOracle constructor will revert if the challenge period is too large, so we need // to mock the call to pretend this is a bugged implementation where the challenge period @@ -175,7 +175,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { /// @dev Tests that the constructor of the `FaultDisputeGame` reverts when the `_splitDepth` /// parameter is greater than or equal to the `MAX_GAME_DEPTH` function testFuzz_constructor_invalidSplitDepth_reverts(uint256 _splitDepth) public { - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); uint256 maxGameDepth = 2 ** 3; _splitDepth = bound(_splitDepth, maxGameDepth - 1, type(uint256).max); @@ -197,7 +197,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { /// @dev Tests that the constructor of the `FaultDisputeGame` reverts when the `_splitDepth` /// parameter is less than the minimum split depth (currently 2). function testFuzz_constructor_lowSplitDepth_reverts(uint256 _splitDepth) public { - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); uint256 minSplitDepth = 2; _splitDepth = bound(_splitDepth, 0, minSplitDepth - 1); @@ -224,7 +224,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { ) public { - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); // Force the clock extension * 2 to be greater than the max clock duration, but keep things within // bounds of the uint64 type. diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index 8a3639b71cdf..36577a836df1 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -47,7 +47,7 @@ contract PermissionedDisputeGame_Init is DisputeGameFactory_Init { // Set the extra data for the game creation extraData = abi.encode(l2BlockNumber); - AlphabetVM _vm = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM _vm = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); // Use a 7 day delayed WETH to simulate withdrawals. IDelayedWETH _weth = IDelayedWETH(payable(new DelayedWETH(7 days))); diff --git a/packages/contracts-bedrock/test/kontrol/README.md b/packages/contracts-bedrock/test/kontrol/README.md index 0a6dcec7c79b..25660756963c 100644 --- a/packages/contracts-bedrock/test/kontrol/README.md +++ b/packages/contracts-bedrock/test/kontrol/README.md @@ -39,7 +39,6 @@ The directory is structured as follows │ ├── KontrolDeployment.sol: Deployment sequence for Kontrol proofs ├── proofs: Where the proofs (tests) themselves live │ ├── *.k.sol: Symbolic property tests for contracts -│ ├── interfaces: Interface files for src contracts, to avoid unnecessary compilation of contracts │ └── utils: Proof dependencies, including the autogenerated deployment summary contracts └── scripts: Where the scripts of the projects live ├── json: Data cleaning scripts for the output of KontrolDeployment.sol @@ -122,23 +121,19 @@ The next step is to include tests for the newly included state updates in [`Depl It might be necessary to set some of the existing tests from [`test`](../L1) as virtual because they can't be executed as is. See [`DeploymentSummary.t.sol`](deployment/DeploymentSummary.t.sol) for more concrete examples. -#### Add function signatures to [`KontrolInterfaces`](./proofs/interfaces/KontrolInterfaces.sol) - -So far we've got all the state updates ready to be added to the initial configuration of each proof, but we cannot yet write any proof about the function. We still need to add the relevant signatures into `KontrolInterfaces`. The reason for having `KontrolInterfaces` instead of using directly the contracts is to reduce the amount of compiled contracts by Kontrol. -In the future there might interfaces for all contracts under `contracts-bedrock`, which would imply the removal of `KontrolInterfaces`. - #### Write the proof Write your proof in a `.k.sol` file in the [`proofs`](./proofs/) folder, which is the `test` directory used by the `kprove` profile to run the proofs (see [Deployment Summary Process](#deployment-summary-process)). The name of the new proofs should start with `prove` (or `check`) instead of `test` to avoid `forge test` running them. The reason for this is that if Kontrol cheatcodes (see [Kontrol's own cheatcodes](https://github.com/runtimeverification/kontrol-cheatcodes/blob/master/src/KontrolCheats.sol)) are used in a test, it will not be runnable by `forge`. Currently, none of the tests are using custom Kontrol cheatcodes, but this is something to bear in mind. To reference the correct addresses for writing the tests, first import the signatures as in this example: + ```solidity -import { - IOptimismPortal as OptimismPortal, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; ``` + Declare the correspondent variables and cast the correct signatures to the correct addresses: + ```solidity OptimismPortal optimismPortal; SuperchainConfig superchainConfig; @@ -148,6 +143,7 @@ function setUp() public { superchainConfig = SuperchainConfig(superchainConfigProxyAddress); } ``` + Note that the names of the addresses come from [`DeploymentSummary.t.sol`](deployment/DeploymentSummary.t.sol) and are automatically generated by the [`make-summary-deployment.sh`](./scripts/make-summary-deployment.sh) script. #### Add your test to [`run-kontrol.sh`](./scripts/run-kontrol.sh) diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol index d748cd24b4a7..60edd1dc4655 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol @@ -3,10 +3,8 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; -import { - IL1CrossDomainMessenger as L1CrossDomainMessenger, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IL1CrossDomainMessenger as L1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; contract L1CrossDomainMessengerKontrol is DeploymentSummary, KontrolUtils { L1CrossDomainMessenger l1CrossDomainMessenger; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol index 43803f31a3e8..f7887f0f1a71 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol @@ -4,11 +4,9 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { - IL1ERC721Bridge as L1ERC721Bridge, - IL1CrossDomainMessenger as CrossDomainMessenger, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IL1ERC721Bridge as L1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ICrossDomainMessenger as CrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; contract L1ERC721BridgeKontrol is DeploymentSummary, KontrolUtils { L1ERC721Bridge l1ERC721Bridge; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol index 0b710fc01e51..8cefd5546e9e 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol @@ -4,11 +4,9 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { - IL1StandardBridge as L1StandardBridge, - IL1CrossDomainMessenger as CrossDomainMessenger, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IL1StandardBridge as L1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ICrossDomainMessenger as CrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; contract L1StandardBridgeKontrol is DeploymentSummary, KontrolUtils { L1StandardBridge l1standardBridge; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol index 969c69349ae4..f0cf6cac7734 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol @@ -4,10 +4,8 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { - IOptimismPortal as OptimismPortal, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import "src/libraries/PortalErrors.sol"; contract OptimismPortalKontrol is DeploymentSummary, KontrolUtils { diff --git a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol index 18a1b579417a..d561b8b85092 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol @@ -4,10 +4,8 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { - IOptimismPortal as OptimismPortal, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import "src/libraries/PortalErrors.sol"; contract OptimismPortal2Kontrol is DeploymentSummaryFaultProofs, KontrolUtils { diff --git a/packages/contracts-bedrock/test/kontrol/proofs/interfaces/KontrolInterfaces.sol b/packages/contracts-bedrock/test/kontrol/proofs/interfaces/KontrolInterfaces.sol deleted file mode 100644 index 831d208b9ac6..000000000000 --- a/packages/contracts-bedrock/test/kontrol/proofs/interfaces/KontrolInterfaces.sol +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { Types } from "src/libraries/Types.sol"; - -interface IOptimismPortal { - function guardian() external view returns (address); - - function paused() external view returns (bool paused_); - - function proveWithdrawalTransaction( - Types.WithdrawalTransaction memory _tx, - uint256 _l2OutputIndex, - Types.OutputRootProof calldata _outputRootProof, - bytes[] calldata _withdrawalProof - ) - external; - - function finalizeWithdrawalTransaction(Types.WithdrawalTransaction memory _tx) external; -} - -interface ISuperchainConfig { - function guardian() external view returns (address); - - function paused() external view returns (bool paused_); - - function pause(string memory _identifier) external; - - function unpause() external; -} - -interface IL1StandardBridge { - function paused() external view returns (bool); - - function messenger() external view returns (IL1CrossDomainMessenger); - - function otherBridge() external view returns (IL1StandardBridge); - - function finalizeBridgeERC20( - address _localToken, - address _remoteToken, - address _from, - address _to, - uint256 _amount, - bytes calldata _extraData - ) - external; - - function finalizeBridgeETH(address _from, address _to, uint256 _amount, bytes calldata _extraData) external; -} - -interface IL1ERC721Bridge { - function paused() external view returns (bool); - - function messenger() external view returns (IL1CrossDomainMessenger); - - function otherBridge() external view returns (IL1StandardBridge); - - function finalizeBridgeERC721( - address _localToken, - address _remoteToken, - address _from, - address _to, - uint256 _amount, - bytes calldata _extraData - ) - external; -} - -interface IL1CrossDomainMessenger { - function relayMessage( - uint256 _nonce, - address _sender, - address _target, - uint256 _value, - uint256 _minGasLimit, - bytes calldata _message - ) - external - payable; - - function xDomainMessageSender() external view returns (address); -} diff --git a/packages/contracts-bedrock/test/kontrol/scripts/make-summary-deployment.sh b/packages/contracts-bedrock/test/kontrol/scripts/make-summary-deployment.sh index ea7abbf4cc3c..7d7b8da150f3 100755 --- a/packages/contracts-bedrock/test/kontrol/scripts/make-summary-deployment.sh +++ b/packages/contracts-bedrock/test/kontrol/scripts/make-summary-deployment.sh @@ -56,9 +56,12 @@ if [ "$KONTROL_FP_DEPLOYMENT" = true ]; then SCRIPT_SIG="runKontrolDeploymentFaultProofs()" fi +# Sender just needs to be anything but the default sender (0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38) +# Otherwise state changes inside of Deploy.s.sol get stored in the state diff under the default script address (0x7FA9385bE102ac3EAc297483Dd6233D62b3e1496) +# Conflicts with other stuff that happens inside of Kontrol and leads to errors that are hard to debug DEPLOY_CONFIG_PATH=deploy-config/hardhat.json \ DEPLOYMENT_OUTFILE="$CONTRACT_NAMES" \ - forge script -vvv test/kontrol/deployment/KontrolDeployment.sol:KontrolDeployment --sig $SCRIPT_SIG + forge script --sender 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 -vvv test/kontrol/deployment/KontrolDeployment.sol:KontrolDeployment --sig $SCRIPT_SIG echo "Created state diff json" # Clean and store the state diff json in snapshots/state-diff/Kontrol-Deploy.json diff --git a/packages/contracts-bedrock/test/mocks/AlphabetVM.sol b/packages/contracts-bedrock/test/mocks/AlphabetVM.sol index b5d940c1cf6e..6ecf74e22868 100644 --- a/packages/contracts-bedrock/test/mocks/AlphabetVM.sol +++ b/packages/contracts-bedrock/test/mocks/AlphabetVM.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; -import { IBigStepper, IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; -import { PreimageOracle, PreimageKeyLib } from "src/cannon/PreimageOracle.sol"; +// Libraries +import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol"; import "src/dispute/lib/Types.sol"; +// Interfaces +import { IBigStepper, IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; + /// @title AlphabetVM /// @dev A mock VM for the purpose of testing the dispute game infrastructure. Note that this only works /// for games with an execution trace subgame max depth of 3 (8 instructions per subgame). @@ -12,7 +15,7 @@ contract AlphabetVM is IBigStepper { Claim internal immutable ABSOLUTE_PRESTATE; IPreimageOracle public oracle; - constructor(Claim _absolutePrestate, PreimageOracle _oracle) { + constructor(Claim _absolutePrestate, IPreimageOracle _oracle) { ABSOLUTE_PRESTATE = _absolutePrestate; oracle = _oracle; } diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index 1dca71b4ec70..7dd603a55c9d 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -69,31 +69,21 @@ contract DeployImplementationsInput_Test is Test { dii.protocolVersionsProxy(); vm.expectRevert("DeployImplementationsInput: not set"); - dii.superchainProxyAdmin(); + dii.opcmProxyOwner(); vm.expectRevert("DeployImplementationsInput: not set"); dii.standardVersionsToml(); } - function test_superchainProxyAdmin_whenNotSet_reverts() public { + function test_opcmProxyOwner_whenNotSet_reverts() public { vm.expectRevert("DeployImplementationsInput: not set"); - dii.superchainProxyAdmin(); - - dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); - vm.expectRevert(); - dii.superchainProxyAdmin(); - - Proxy noAdminProxy = new Proxy(address(0)); - dii.set(dii.superchainConfigProxy.selector, address(noAdminProxy)); - vm.expectRevert("DeployImplementationsInput: not set"); - dii.superchainProxyAdmin(); + dii.opcmProxyOwner(); } - function test_superchainProxyAdmin_succeeds() public { - Proxy proxyWithAdminSet = new Proxy(msg.sender); - dii.set(dii.superchainConfigProxy.selector, address(proxyWithAdminSet)); - ProxyAdmin proxyAdmin = dii.superchainProxyAdmin(); - assertEq(address(msg.sender), address(proxyAdmin), "100"); + function test_opcmProxyOwner_succeeds() public { + dii.set(dii.opcmProxyOwner.selector, address(msg.sender)); + address opcmProxyOwner = dii.opcmProxyOwner(); + assertEq(address(msg.sender), address(opcmProxyOwner), "100"); } } @@ -433,6 +423,7 @@ contract DeployImplementations_Test is Test { dii.set(dii.release.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); + dii.set(dii.opcmProxyOwner.selector, msg.sender); deployImplementations.run(dii, dio); @@ -445,7 +436,7 @@ contract DeployImplementations_Test is Test { assertEq(release, dii.release(), "525"); assertEq(address(superchainConfigProxy), address(dii.superchainConfigProxy()), "550"); assertEq(address(protocolVersionsProxy), address(dii.protocolVersionsProxy()), "575"); - assertEq(address(superchainProxyAdmin), address(dii.superchainProxyAdmin()), "580"); + assertEq(msg.sender, dii.opcmProxyOwner(), "580"); // Architecture assertions. assertEq(address(dio.mipsSingleton().oracle()), address(dio.preimageOracleSingleton()), "600"); diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 7390b1bd110c..bd17e43bd26b 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -142,8 +142,9 @@ contract DeployOPChainOutput_Test is Test { FaultDisputeGame faultDisputeGame = FaultDisputeGame(makeAddr("faultDisputeGame")); PermissionedDisputeGame permissionedDisputeGame = PermissionedDisputeGame(makeAddr("permissionedDisputeGame")); DelayedWETH delayedWETHPermissionedGameProxy = DelayedWETH(payable(makeAddr("delayedWETHPermissionedGameProxy"))); - DelayedWETH delayedWETHPermissionlessGameProxy = - DelayedWETH(payable(makeAddr("delayedWETHPermissionlessGameProxy"))); + // TODO: Eventually switch from Permissioned to Permissionless. + // DelayedWETH delayedWETHPermissionlessGameProxy = + // DelayedWETH(payable(makeAddr("delayedWETHPermissionlessGameProxy"))); function setUp() public { doo = new DeployOPChainOutput(); @@ -164,7 +165,8 @@ contract DeployOPChainOutput_Test is Test { vm.etch(address(faultDisputeGame), hex"01"); vm.etch(address(permissionedDisputeGame), hex"01"); vm.etch(address(delayedWETHPermissionedGameProxy), hex"01"); - vm.etch(address(delayedWETHPermissionlessGameProxy), hex"01"); + // TODO: Eventually switch from Permissioned to Permissionless. + // vm.etch(address(delayedWETHPermissionlessGameProxy), hex"01"); doo.set(doo.opChainProxyAdmin.selector, address(opChainProxyAdmin)); doo.set(doo.addressManager.selector, address(addressManager)); @@ -180,7 +182,8 @@ contract DeployOPChainOutput_Test is Test { doo.set(doo.faultDisputeGame.selector, address(faultDisputeGame)); doo.set(doo.permissionedDisputeGame.selector, address(permissionedDisputeGame)); doo.set(doo.delayedWETHPermissionedGameProxy.selector, address(delayedWETHPermissionedGameProxy)); - doo.set(doo.delayedWETHPermissionlessGameProxy.selector, address(delayedWETHPermissionlessGameProxy)); + // TODO: Eventually switch from Permissioned to Permissionless. + // doo.set(doo.delayedWETHPermissionlessGameProxy.selector, address(delayedWETHPermissionlessGameProxy)); assertEq(address(opChainProxyAdmin), address(doo.opChainProxyAdmin()), "100"); assertEq(address(addressManager), address(doo.addressManager()), "200"); @@ -196,7 +199,9 @@ contract DeployOPChainOutput_Test is Test { assertEq(address(faultDisputeGame), address(doo.faultDisputeGame()), "1300"); assertEq(address(permissionedDisputeGame), address(doo.permissionedDisputeGame()), "1400"); assertEq(address(delayedWETHPermissionedGameProxy), address(doo.delayedWETHPermissionedGameProxy()), "1500"); - assertEq(address(delayedWETHPermissionlessGameProxy), address(doo.delayedWETHPermissionlessGameProxy()), "1600"); + // TODO: Eventually switch from Permissioned to Permissionless. + // assertEq(address(delayedWETHPermissionlessGameProxy), address(doo.delayedWETHPermissionlessGameProxy()), + // "1600"); } function test_getters_whenNotSet_revert() public { @@ -244,8 +249,9 @@ contract DeployOPChainOutput_Test is Test { vm.expectRevert(expectedErr); doo.delayedWETHPermissionedGameProxy(); - vm.expectRevert(expectedErr); - doo.delayedWETHPermissionlessGameProxy(); + // TODO: Eventually switch from Permissioned to Permissionless. + // vm.expectRevert(expectedErr); + // doo.delayedWETHPermissionlessGameProxy(); } function test_getters_whenAddrHasNoCode_reverts() public { @@ -308,9 +314,10 @@ contract DeployOPChainOutput_Test is Test { vm.expectRevert(expectedErr); doo.delayedWETHPermissionedGameProxy(); - doo.set(doo.delayedWETHPermissionlessGameProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.delayedWETHPermissionlessGameProxy(); + // TODO: Eventually switch from Permissioned to Permissionless. + // doo.set(doo.delayedWETHPermissionlessGameProxy.selector, emptyAddr); + // vm.expectRevert(expectedErr); + // doo.delayedWETHPermissionlessGameProxy(); } } @@ -410,6 +417,7 @@ contract DeployOPChain_TestBase is Test { string.concat(vm.projectRoot(), "/test/fixtures/standard-versions.toml"); string memory standardVersionsToml = vm.readFile(standardVersionsTomlPath); dii.set(dii.standardVersionsToml.selector, standardVersionsToml); + dii.set(dii.opcmProxyOwner.selector, address(1)); deployImplementations.run(dii, dio); // Deploy DeployOpChain, but defer populating the input values to the test suites inheriting this contract. diff --git a/packages/contracts-bedrock/test/BenchmarkTest.t.sol b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol similarity index 100% rename from packages/contracts-bedrock/test/BenchmarkTest.t.sol rename to packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol diff --git a/packages/contracts-bedrock/test/ExtendedPause.t.sol b/packages/contracts-bedrock/test/universal/ExtendedPause.t.sol similarity index 100% rename from packages/contracts-bedrock/test/ExtendedPause.t.sol rename to packages/contracts-bedrock/test/universal/ExtendedPause.t.sol diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol index 07aa2c61958d..cba5fc829086 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol @@ -9,9 +9,9 @@ import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; -import { Proxy } from "src/universal/Proxy.sol"; // Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; contract OptimismMintableTokenFactory_Test is Bridge_Initializer { @@ -33,7 +33,7 @@ contract OptimismMintableTokenFactory_Test is Bridge_Initializer { /// @notice Tests that the upgrade is successful. function test_upgrading_succeeds() external { - Proxy proxy = Proxy(deploy.mustGetAddress("OptimismMintableERC20FactoryProxy")); + IProxy proxy = IProxy(deploy.mustGetAddress("OptimismMintableERC20FactoryProxy")); // Check an unused slot before upgrading. bytes32 slot21Before = vm.load(address(l1OptimismMintableERC20Factory), bytes32(uint256(21))); assertEq(bytes32(0), slot21Before); diff --git a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol index 4de72c872572..e212644c9d50 100644 --- a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol @@ -1,13 +1,19 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing import { Test } from "forge-std/Test.sol"; +import { SimpleStorage } from "test/universal/Proxy.t.sol"; + +// Contracts import { Proxy } from "src/universal/Proxy.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; -import { SimpleStorage } from "test/universal/Proxy.t.sol"; +import { AddressManager } from "src/legacy/AddressManager.sol"; import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; import { ResolvedDelegateProxy } from "src/legacy/ResolvedDelegateProxy.sol"; -import { AddressManager } from "src/legacy/AddressManager.sol"; + +// Interfaces +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; contract ProxyAdmin_Test is Test { address alice = address(64); @@ -45,7 +51,7 @@ contract ProxyAdmin_Test is Test { // Set the address of the address manager in the admin so that it // can resolve the implementation address of legacy // ResolvedDelegateProxy based proxies. - admin.setAddressManager(addressManager); + admin.setAddressManager(IAddressManager(address(addressManager))); // Set the reverse lookup of the ResolvedDelegateProxy // proxy admin.setImplementationName(address(resolved), "a"); @@ -67,7 +73,7 @@ contract ProxyAdmin_Test is Test { function test_setAddressManager_notOwner_reverts() external { vm.expectRevert("Ownable: caller is not the owner"); - admin.setAddressManager(AddressManager((address(0)))); + admin.setAddressManager(IAddressManager((address(0)))); } function test_setImplementationName_notOwner_reverts() external { diff --git a/packages/contracts-bedrock/test/Specs.t.sol b/packages/contracts-bedrock/test/universal/Specs.t.sol similarity index 100% rename from packages/contracts-bedrock/test/Specs.t.sol rename to packages/contracts-bedrock/test/universal/Specs.t.sol