diff --git a/common.gypi b/common.gypi index b8c5cf1da767aa..7dea2696d2bc40 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.19', + 'v8_embedder_string': '-node.12', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 820bac62f897a5..f9f655b8b1b3d6 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -51,7 +51,7 @@ /src/inspector/build/closure-compiler /src/inspector/build/closure-compiler.tar.gz /test/benchmarks/data -/test/fuzzer/wasm_corpus +/test/fuzzer/wasm_corpus/ /test/fuzzer/wasm_corpus.tar.gz !/test/mjsunit/tools/*.log /test/mozilla/data diff --git a/deps/v8/.gn b/deps/v8/.gn index 1ca4a2e4684b45..3a73ff4e2a1a08 100644 --- a/deps/v8/.gn +++ b/deps/v8/.gn @@ -25,10 +25,6 @@ no_check_targets = [ ] default_args = { - # Overwrite default args declared in the Fuchsia sdk - # Please maintain this in sync with Chromium version in src/.gn - fuchsia_target_api_level = 9 - # Disable rust dependencies. enable_rust = false } diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index cfc1360fa68ce9..80aaead7f55fd6 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -177,6 +177,7 @@ Kris Selden Kyounga Ra Loo Rong Jie Lu Yahan +Ludovic Mermod Luis Reis Luke Albao Luke Zarko diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index 3308433ce78dfe..b0cf80ede4226c 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -829,6 +829,7 @@ filegroup( "src/builtins/array-findlastindex.tq", "src/builtins/array-foreach.tq", "src/builtins/array-from.tq", + "src/builtins/array-from-async.tq", "src/builtins/array-isarray.tq", "src/builtins/array-join.tq", "src/builtins/array-lastindexof.tq", @@ -1010,6 +1011,7 @@ filegroup( "src/objects/name.tq", "src/objects/oddball.tq", "src/objects/hole.tq", + "src/objects/trusted-object.tq", "src/objects/ordered-hash-table.tq", "src/objects/primitive-heap-object.tq", "src/objects/promise.tq", @@ -1516,6 +1518,7 @@ filegroup( "src/heap/cppgc-js/wrappable-info-inl.h", "src/heap/ephemeron-remembered-set.h", "src/heap/ephemeron-remembered-set.cc", + "src/heap/evacuation-allocator.cc", "src/heap/evacuation-allocator.h", "src/heap/evacuation-allocator-inl.h", "src/heap/evacuation-verifier.cc", @@ -1572,6 +1575,9 @@ filegroup( "src/heap/local-heap.cc", "src/heap/local-heap.h", "src/heap/local-heap-inl.h", + "src/heap/main-allocator.cc", + "src/heap/main-allocator.h", + "src/heap/main-allocator-inl.h", "src/heap/mark-compact.cc", "src/heap/mark-compact.h", "src/heap/mark-compact-inl.h", @@ -1945,6 +1951,8 @@ filegroup( "src/objects/oddball.h", "src/objects/oddball-inl.h", "src/objects/hole.h", + "src/objects/trusted-object.h", + "src/objects/trusted-object-inl.h", "src/objects/hole-inl.h", "src/objects/option-utils.cc", "src/objects/option-utils.h", @@ -2197,9 +2205,13 @@ filegroup( "src/sandbox/code-pointer-table.cc", "src/sandbox/code-pointer-table.h", "src/sandbox/code-pointer-table-inl.h", + "src/sandbox/indirect-pointer-table.cc", + "src/sandbox/indirect-pointer-table.h", + "src/sandbox/indirect-pointer-table-inl.h", "src/sandbox/code-pointer.h", "src/sandbox/code-pointer-inl.h", "src/sandbox/indirect-pointer.h", + "src/sandbox/indirect-pointer-tag.h", "src/sandbox/indirect-pointer-inl.h", "src/sandbox/external-entity-table.h", "src/sandbox/external-entity-table-inl.h", @@ -2578,6 +2590,8 @@ filegroup( "src/asmjs/asm-scanner.h", "src/asmjs/asm-types.cc", "src/asmjs/asm-types.h", + "src/compiler/turboshaft/wasm-dead-code-elimination-phase.cc", + "src/compiler/turboshaft/wasm-dead-code-elimination-phase.h", "src/debug/debug-wasm-objects.cc", "src/debug/debug-wasm-objects.h", "src/debug/debug-wasm-objects-inl.h", @@ -2650,6 +2664,7 @@ filegroup( "src/wasm/value-type.cc", "src/wasm/value-type.h", "src/wasm/wasm-arguments.h", + "src/wasm/wasm-builtin-list.h", "src/wasm/wasm-code-manager.cc", "src/wasm/wasm-code-manager.h", "src/wasm/wasm-debug.cc", @@ -3020,6 +3035,10 @@ filegroup( "src/compiler/turboshaft/late-load-elimination-reducer.cc", "src/compiler/turboshaft/late-load-elimination-reducer.h", "src/compiler/turboshaft/layered-hash-map.h", + "src/compiler/turboshaft/loop-unrolling-phase.cc", + "src/compiler/turboshaft/loop-unrolling-phase.h", + "src/compiler/turboshaft/loop-unrolling-reducer.cc", + "src/compiler/turboshaft/loop-unrolling-reducer.h", "src/compiler/turboshaft/machine-lowering-phase.cc", "src/compiler/turboshaft/machine-lowering-phase.h", "src/compiler/turboshaft/machine-lowering-reducer.h", @@ -3052,6 +3071,8 @@ filegroup( "src/compiler/turboshaft/simplify-tf-loops.cc", "src/compiler/turboshaft/simplify-tf-loops.h", "src/compiler/turboshaft/snapshot-table.h", + "src/compiler/turboshaft/snapshot-table-opindex.h", + "src/compiler/turboshaft/stack-check-reducer.h", "src/compiler/turboshaft/store-store-elimination-phase.cc", "src/compiler/turboshaft/store-store-elimination-phase.h", "src/compiler/turboshaft/store-store-elimination-reducer.h", @@ -3158,6 +3179,10 @@ filegroup( "src/compiler/turboshaft/int64-lowering-phase.h", "src/compiler/turboshaft/int64-lowering-reducer.h", "src/compiler/turboshaft/wasm-lowering-reducer.h", + "src/compiler/turboshaft/wasm-gc-optimize-phase.cc", + "src/compiler/turboshaft/wasm-gc-optimize-phase.h", + "src/compiler/turboshaft/wasm-gc-type-reducer.cc", + "src/compiler/turboshaft/wasm-gc-type-reducer.h", "src/compiler/turboshaft/wasm-optimize-phase.cc", "src/compiler/turboshaft/wasm-optimize-phase.h", "src/compiler/turboshaft/wasm-turboshaft-compiler.cc", @@ -3401,6 +3426,8 @@ filegroup( "src/heap/base/bytes.h", "src/heap/base/incremental-marking-schedule.cc", "src/heap/base/incremental-marking-schedule.h", + "src/heap/base/memory-tagging.h", + "src/heap/base/memory-tagging.cc", "src/heap/base/stack.cc", "src/heap/base/stack.h", "src/heap/base/worklist.cc", diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 7e1e4b833d9c3c..025203d66c73a8 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -337,10 +337,6 @@ declare_args() { # Sets -DV8_ENABLE_SANDBOX. v8_enable_sandbox = "" - # Enable experimental code pointer sandboxing for the V8 sandbox. - # Sets -DV8_CODE_POINTER_SANDBOXING - v8_code_pointer_sandboxing = "" - # Expose the memory corruption API to JavaScript. Useful for testing the sandbox. # WARNING This will expose builtins that (by design) cause memory corruption. # Sets -DV8_EXPOSE_MEMORY_CORRUPTION_API @@ -568,11 +564,6 @@ if (v8_enable_sandbox == "") { v8_enable_external_code_space && target_os != "fuchsia" } -if (v8_code_pointer_sandboxing == "") { - # By default, enable code pointer sandboxing if the sandbox is enabled. - v8_code_pointer_sandboxing = v8_enable_sandbox -} - if (v8_enable_static_roots == "") { # Static roots are only valid for builds with pointer compression and a # shared read-only heap. @@ -674,8 +665,8 @@ assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage, assert(!v8_enable_sandbox || v8_enable_external_code_space, "The sandbox requires the external code space") -assert(!v8_code_pointer_sandboxing || v8_enable_sandbox, - "Code pointer sandboxing requires the sandbox") +assert(!v8_enable_sandbox || !v8_enable_third_party_heap, + "The sandbox is incompatible with the third-party heap") assert(!v8_expose_memory_corruption_api || v8_enable_sandbox, "The Memory Corruption API requires the sandbox") @@ -769,7 +760,7 @@ config("internal_config") { config("v8_tracing_config") { if (v8_use_perfetto) { include_dirs = [ - "third_party/perfetto/include", + "//third_party/perfetto/include", "$root_gen_dir/third_party/perfetto", "$root_gen_dir/third_party/perfetto/build_config", ] @@ -1210,9 +1201,6 @@ config("features") { if (v8_enable_wasm_simd256_revec) { defines += [ "V8_ENABLE_WASM_SIMD256_REVEC" ] } - if (v8_code_pointer_sandboxing) { - defines += [ "V8_CODE_POINTER_SANDBOXING" ] - } if (v8_enable_maglev_graph_printer) { defines += [ "V8_ENABLE_MAGLEV_GRAPH_PRINTER" ] } @@ -1842,6 +1830,7 @@ torque_files = [ "src/builtins/array-findlast.tq", "src/builtins/array-findlastindex.tq", "src/builtins/array-foreach.tq", + "src/builtins/array-from-async.tq", "src/builtins/array-from.tq", "src/builtins/array-isarray.tq", "src/builtins/array-join.tq", @@ -2025,6 +2014,7 @@ torque_files = [ "src/objects/name.tq", "src/objects/oddball.tq", "src/objects/hole.tq", + "src/objects/trusted-object.tq", "src/objects/ordered-hash-table.tq", "src/objects/primitive-heap-object.tq", "src/objects/promise.tq", @@ -3332,6 +3322,8 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/late-escape-analysis-reducer.h", "src/compiler/turboshaft/late-load-elimination-reducer.h", "src/compiler/turboshaft/layered-hash-map.h", + "src/compiler/turboshaft/loop-unrolling-phase.h", + "src/compiler/turboshaft/loop-unrolling-reducer.h", "src/compiler/turboshaft/machine-lowering-phase.h", "src/compiler/turboshaft/machine-lowering-reducer.h", "src/compiler/turboshaft/machine-optimization-reducer.h", @@ -3352,7 +3344,9 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/select-lowering-reducer.h", "src/compiler/turboshaft/sidetable.h", "src/compiler/turboshaft/simplify-tf-loops.h", + "src/compiler/turboshaft/snapshot-table-opindex.h", "src/compiler/turboshaft/snapshot-table.h", + "src/compiler/turboshaft/stack-check-reducer.h", "src/compiler/turboshaft/store-store-elimination-phase.h", "src/compiler/turboshaft/store-store-elimination-reducer.h", "src/compiler/turboshaft/structural-optimization-reducer.h", @@ -3371,6 +3365,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/utils.h", "src/compiler/turboshaft/value-numbering-reducer.h", "src/compiler/turboshaft/variable-reducer.h", + "src/compiler/turboshaft/wasm-dead-code-elimination-phase.h", "src/compiler/type-cache.h", "src/compiler/type-narrowing-reducer.h", "src/compiler/typed-optimization.h", @@ -3523,6 +3518,8 @@ v8_header_set("v8_internal_headers") { "src/heap/local-factory.h", "src/heap/local-heap-inl.h", "src/heap/local-heap.h", + "src/heap/main-allocator-inl.h", + "src/heap/main-allocator.h", "src/heap/mark-compact-inl.h", "src/heap/mark-compact.h", "src/heap/mark-sweep-utilities.h", @@ -3844,6 +3841,8 @@ v8_header_set("v8_internal_headers") { "src/objects/torque-defined-classes.h", "src/objects/transitions-inl.h", "src/objects/transitions.h", + "src/objects/trusted-object-inl.h", + "src/objects/trusted-object.h", "src/objects/turbofan-types-inl.h", "src/objects/turbofan-types.h", "src/objects/turboshaft-types-inl.h", @@ -3931,6 +3930,9 @@ v8_header_set("v8_internal_headers") { "src/sandbox/external-pointer-table.h", "src/sandbox/external-pointer.h", "src/sandbox/indirect-pointer-inl.h", + "src/sandbox/indirect-pointer-table-inl.h", + "src/sandbox/indirect-pointer-table.h", + "src/sandbox/indirect-pointer-tag.h", "src/sandbox/indirect-pointer.h", "src/sandbox/sandbox.h", "src/sandbox/sandboxed-pointer-inl.h", @@ -4071,6 +4073,8 @@ v8_header_set("v8_internal_headers") { "src/compiler/int64-lowering.h", "src/compiler/turboshaft/int64-lowering-phase.h", "src/compiler/turboshaft/int64-lowering-reducer.h", + "src/compiler/turboshaft/wasm-gc-optimize-phase.h", + "src/compiler/turboshaft/wasm-gc-type-reducer.h", "src/compiler/turboshaft/wasm-js-lowering-reducer.h", "src/compiler/turboshaft/wasm-lowering-reducer.h", "src/compiler/turboshaft/wasm-optimize-phase.h", @@ -4130,6 +4134,7 @@ v8_header_set("v8_internal_headers") { "src/wasm/turboshaft-graph-interface.h", "src/wasm/value-type.h", "src/wasm/wasm-arguments.h", + "src/wasm/wasm-builtin-list.h", "src/wasm/wasm-code-manager.h", "src/wasm/wasm-debug.h", "src/wasm/wasm-disassembler-impl.h", @@ -4379,6 +4384,17 @@ v8_header_set("v8_internal_headers") { "src/regexp/loong64/regexp-macro-assembler-loong64.h", "src/wasm/baseline/loong64/liftoff-assembler-loong64.h", ] + if (v8_enable_webassembly) { + # Trap handling is enabled on loong64 Linux and in simulators on + # x64 on Linux. + if ((current_cpu == "loong64" && is_linux) || + (current_cpu == "x64" && is_linux)) { + sources += [ "src/trap-handler/handler-inside-posix.h" ] + } + if (current_cpu == "x64" && is_linux) { + sources += [ "src/trap-handler/trap-handler-simulator.h" ] + } + } } else if (v8_current_cpu == "ppc") { sources += [ ### gcmole(ppc) ### @@ -4738,6 +4754,8 @@ if (v8_enable_webassembly) { v8_compiler_sources += [ "src/compiler/int64-lowering.cc", "src/compiler/turboshaft/int64-lowering-phase.cc", + "src/compiler/turboshaft/wasm-gc-optimize-phase.cc", + "src/compiler/turboshaft/wasm-gc-type-reducer.cc", "src/compiler/turboshaft/wasm-optimize-phase.cc", "src/compiler/turboshaft/wasm-turboshaft-compiler.cc", "src/compiler/wasm-address-reassociation.cc", @@ -4847,6 +4865,8 @@ v8_source_set("v8_turboshaft") { "src/compiler/turboshaft/instruction-selection-phase.cc", "src/compiler/turboshaft/late-escape-analysis-reducer.cc", "src/compiler/turboshaft/late-load-elimination-reducer.cc", + "src/compiler/turboshaft/loop-unrolling-phase.cc", + "src/compiler/turboshaft/loop-unrolling-reducer.cc", "src/compiler/turboshaft/machine-lowering-phase.cc", "src/compiler/turboshaft/memory-optimization-reducer.cc", "src/compiler/turboshaft/operations.cc", @@ -5082,6 +5102,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/cppgc-js/unified-heap-marking-verifier.cc", "src/heap/cppgc-js/unified-heap-marking-visitor.cc", "src/heap/ephemeron-remembered-set.cc", + "src/heap/evacuation-allocator.cc", "src/heap/evacuation-verifier.cc", "src/heap/factory-base.cc", "src/heap/factory.cc", @@ -5102,6 +5123,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/large-spaces.cc", "src/heap/local-factory.cc", "src/heap/local-heap.cc", + "src/heap/main-allocator.cc", "src/heap/mark-compact.cc", "src/heap/mark-sweep-utilities.cc", "src/heap/marking-barrier.cc", @@ -5327,6 +5349,7 @@ v8_source_set("v8_base_without_compiler") { "src/runtime/runtime.cc", "src/sandbox/code-pointer-table.cc", "src/sandbox/external-pointer-table.cc", + "src/sandbox/indirect-pointer-table.cc", "src/sandbox/sandbox.cc", "src/sandbox/testing.cc", "src/snapshot/code-serializer.cc", @@ -5427,6 +5450,7 @@ v8_source_set("v8_base_without_compiler") { "src/asmjs/asm-parser.cc", "src/asmjs/asm-scanner.cc", "src/asmjs/asm-types.cc", + "src/compiler/turboshaft/wasm-dead-code-elimination-phase.cc", "src/debug/debug-wasm-objects.cc", "src/runtime/runtime-test-wasm.cc", "src/runtime/runtime-wasm.cc", @@ -5640,6 +5664,20 @@ v8_source_set("v8_base_without_compiler") { "src/execution/loong64/simulator-loong64.cc", "src/regexp/loong64/regexp-macro-assembler-loong64.cc", ] + if (v8_enable_webassembly) { + # Trap handling is enabled on loong64 Linux and in simulators on + # x64 on Linux. + if ((current_cpu == "loong64" && is_linux) || + (current_cpu == "x64" && is_linux)) { + sources += [ + "src/trap-handler/handler-inside-posix.cc", + "src/trap-handler/handler-outside-posix.cc", + ] + } + if (current_cpu == "x64" && is_linux) { + sources += [ "src/trap-handler/handler-outside-simulator.cc" ] + } + } } else if (v8_current_cpu == "ppc") { sources += [ ### gcmole(ppc) ### @@ -6391,6 +6429,7 @@ v8_header_set("v8_heap_base_headers") { "src/heap/base/basic-slot-set.h", "src/heap/base/bytes.h", "src/heap/base/incremental-marking-schedule.h", + "src/heap/base/memory-tagging.h", "src/heap/base/stack.h", "src/heap/base/worklist.h", ] @@ -6404,6 +6443,7 @@ v8_source_set("v8_heap_base") { sources = [ "src/heap/base/active-system-pages.cc", "src/heap/base/incremental-marking-schedule.cc", + "src/heap/base/memory-tagging.cc", "src/heap/base/stack.cc", "src/heap/base/worklist.cc", ] @@ -6440,6 +6480,8 @@ v8_source_set("v8_heap_base") { configs = [ ":internal_config" ] + deps = [ ":v8_config_headers" ] + public_deps = [ ":v8_heap_base_headers", ":v8_libbase", diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 5cffcad442b366..810434bdf0ee96 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -24,7 +24,7 @@ vars = { # Since the images are hundreds of MB, default to only downloading the image # most commonly useful for developers. Bots and developers that need to use # other images (e.g., qemu.arm64) can override this with additional images. - 'checkout_fuchsia_boot_images': "terminal.qemu-x64", + 'checkout_fuchsia_boot_images': "terminal.qemu-x64,terminal.x64", 'checkout_fuchsia_product_bundles': '"{checkout_fuchsia_boot_images}" != ""', 'checkout_instrumented_libraries': False, @@ -57,23 +57,33 @@ vars = { # reclient CIPD package version 'reclient_version': 're_client_version:0.113.0.8b45b89-gomaip', + # Fetch configuration files required for the 'use_remoteexec' gn arg + 'download_remoteexec_cfg': False, + + # RBE instance to use for running remote builds + 'rbe_instance': Str('projects/rbe-chrome-untrusted/instances/default_instance'), + + # RBE project to download rewrapper config files for. Only needed if + # different from the project used in 'rbe_instance' + 'rewrapper_cfg_project': Str(''), + # This variable is overrided in Chromium's DEPS file. 'build_with_chromium': False, # GN CIPD package version. - 'gn_version': 'git_revision:cc56a0f98bb34accd5323316e0292575ff17a5d4', + 'gn_version': 'git_revision:991530ce394efb58fcd848195469022fa17ae126', # ninja CIPD package version # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja 'ninja_version': 'version:2@1.11.1.chromium.6', # luci-go CIPD package version. - 'luci_go': 'git_revision:fe3cfd422b1012c2c8cf00d65cdb11aa2c26cd66', + 'luci_go': 'git_revision:589d8654cfa7808816a6ecb4284ed2fd72c2f6d5', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:14.20230902.2.1', + 'fuchsia_version': 'version:15.20230930.1.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -111,11 +121,11 @@ vars = { deps = { 'base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '147f65333c38ddd1ebf554e89965c243c8ce50b3', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '29ac73db520575590c3aceb0a6f1f58dda8934f6', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e14e0cc3b60c6ba8901741da3f9c18b7fa983880', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b3ac98b5aa5333fa8b1059b5bf19885923dfe050', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'b2043d4f435131d0a1bdd5342c17753ef9236572', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '50c348906cbd450e031bc3123b657f833f8455b7', 'buildtools/linux64': { 'packages': [ { @@ -161,9 +171,9 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '9efb4f8e531efbc297680145a7fa67d7415d0a4a', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '6789b50cce139af4ca819feb8ce3a9c77ba4098a', 'third_party/android_platform': { - 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '4b4eee2d24ec8002602e1b82d63a586d46507501', + 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'eeb2d566f963bb66212fdc0d9bbe1dde550b4969', 'condition': 'checkout_android', }, 'third_party/android_sdk/public': { @@ -208,14 +218,14 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_toolchain/android_toolchain', - 'version': 'R_8suM8m0oHbZ1awdxGXvKEFpAOETscbfZxkkMthyk8C', + 'version': 'NSOM616pOQCfRfDAhC72ltgjyUQp9lAWCMzlmgB18dAC', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'f16ca3c78e46cefa982100444844da3fcb25390e', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '4f81c1e295978227d83f1b42ceff40b4f9b5b08c', 'condition': 'checkout_android', }, 'third_party/clang-format/script': @@ -229,7 +239,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '693e0b312171685d34de77b39bc90b8271ad6541', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '90a30a5b5357636fa05bb315c393275be7ca705c', 'third_party/fuchsia-gn-sdk': { 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + 'd1e0ff4350f77c7f6b246ff62c232318a73c8176', 'condition': 'checkout_fuchsia', @@ -266,11 +276,11 @@ deps = { 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', 'third_party/libc++/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '84fb809dd6dae36d556dc0bb702c6cc2ce9d4b80', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '7cf98622abaf832e2d4784889ebc69d5b6fde4d8', 'third_party/libc++abi/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '331847d7a5e6f8706689cf5d468e6e58c868fa10', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'e8e4eb8f1c413ea4365256b2b83a6093c95d2d86', 'third_party/libunwind/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a321409e66c212098e755cfae1a978bbcff1ccbb', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '43e5a34c5b7066a7ee15c74f09dc37b4b9b5630e', 'third_party/logdog/logdog': Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '0b2078a90f7a638d576b3a7c407d136f2fb62399', 'third_party/markupsafe': @@ -294,9 +304,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'f5fd0ad2663e239a31184ad4c9919991dda16f46', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '3f0af7f1d5ca6bb9d247f40b861346627c3032a1', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '9fc887ccded86c9355f1abbe80c651271c59632f', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'dc3593cbb8b6e77c06e17697ea5b34b38d54a7ba', 'tools/luci-go': { 'packages': [ { @@ -312,7 +322,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/abseil-cpp': { - 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '7affa303ea4ebf4d4de65b3f20f230c7bb16a2ed', + 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '7207ed23d56aa19796ffd08b8203f7af7f3b5f29', 'condition': 'not build_with_chromium', } } @@ -680,4 +690,20 @@ hooks = [ 'condition': 'host_os == "win"', 'action': ['python3', 'build/del_ninja_deps_cache.py'], }, + # Configure remote exec cfg files + { + 'name': 'configure_reclient_cfgs', + 'pattern': '.', + 'condition': 'download_remoteexec_cfg and not build_with_chromium', + 'action': ['python3', + 'buildtools/reclient_cfgs/configure_reclient_cfgs.py', + '--rbe_instance', + Var('rbe_instance'), + '--reproxy_cfg_template', + 'reproxy.cfg.template', + '--rewrapper_cfg_project', + Var('rewrapper_cfg_project'), + '--quiet', + ], + }, ] diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index f2ee4d36b317db..c1bb2e2ef192d0 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -195,7 +195,7 @@ // use_perfetto_client_library GN arg. If that flag is disabled, we fall back to // the legacy implementation in the latter half of this file (and // trace_event.h). -// TODO(skyostil): Remove the legacy macro implementation. +// TODO(skyostil, crbug.com/1006541): Remove the legacy macro implementation. // Normally we'd use BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) for this, but // because v8 includes trace_event_common.h directly (in non-Perfetto mode), we diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl index ed6226308c266b..4754f17c5b02d7 100644 --- a/deps/v8/include/js_protocol.pdl +++ b/deps/v8/include/js_protocol.pdl @@ -1014,8 +1014,7 @@ domain Runtime # Unique script identifier. type ScriptId extends string - # Represents options for serialization. Overrides `generatePreview`, `returnByValue` and - # `generateWebDriverValue`. + # Represents options for serialization. Overrides `generatePreview` and `returnByValue`. type SerializationOptions extends object properties enum serialization @@ -1027,8 +1026,7 @@ domain Runtime # `returnByValue: true`. Overrides `returnByValue`. json # Only remote object id is put in the result. Same bahaviour as if no - # `serializationOptions`, `generatePreview`, `returnByValue` nor `generateWebDriverValue` - # are provided. + # `serializationOptions`, `generatePreview` nor `returnByValue` are provided. idOnly # Deep serialization depth. Default is full depth. Respected only in `deep` serialization mode. @@ -1066,6 +1064,7 @@ domain Runtime arraybuffer node window + generator optional any value optional string objectId # Set if value reference met more then once during serialization. In such @@ -1125,8 +1124,6 @@ domain Runtime optional UnserializableValue unserializableValue # String representation of the object. optional string description - # Deprecated. Use `deepSerializedValue` instead. WebDriver BiDi representation of the value. - deprecated optional DeepSerializedValue webDriverValue # Deep serialized value. experimental optional DeepSerializedValue deepSerializedValue # Unique object identifier (for non-primitive values). @@ -1442,13 +1439,8 @@ domain Runtime # boundaries). # This is mutually exclusive with `executionContextId`. experimental optional string uniqueContextId - # Deprecated. Use `serializationOptions: {serialization:"deep"}` instead. - # Whether the result should contain `webDriverValue`, serialized according to - # https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but - # resulting `objectId` is still provided. - deprecated optional boolean generateWebDriverValue # Specifies the result serialization. If provided, overrides - # `generatePreview`, `returnByValue` and `generateWebDriverValue`. + # `generatePreview` and `returnByValue`. experimental optional SerializationOptions serializationOptions returns @@ -1536,14 +1528,8 @@ domain Runtime # boundaries). # This is mutually exclusive with `contextId`. experimental optional string uniqueContextId - # Deprecated. Use `serializationOptions: {serialization:"deep"}` instead. - # Whether the result should contain `webDriverValue`, serialized - # according to - # https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but - # resulting `objectId` is still provided. - deprecated optional boolean generateWebDriverValue # Specifies the result serialization. If provided, overrides - # `generatePreview`, `returnByValue` and `generateWebDriverValue`. + # `generatePreview` and `returnByValue`. experimental optional SerializationOptions serializationOptions returns # Evaluation result. diff --git a/deps/v8/include/v8-container.h b/deps/v8/include/v8-container.h index ce068603649461..1d9e72c117df6f 100644 --- a/deps/v8/include/v8-container.h +++ b/deps/v8/include/v8-container.h @@ -43,6 +43,42 @@ class V8_EXPORT Array : public Object { return static_cast(value); } + enum class CallbackResult { + kException, + kBreak, + kContinue, + }; + using IterationCallback = CallbackResult (*)(uint32_t index, + Local element, + void* data); + + /** + * Calls {callback} for every element of this array, passing {callback_data} + * as its {data} parameter. + * This function will typically be faster than calling {Get()} repeatedly. + * As a consequence of being optimized for low overhead, the provided + * callback must adhere to the following restrictions: + * - It must not allocate any V8 objects and continue iterating; it may + * allocate (e.g. an error message/object) and then immediately terminate + * the iteration. + * - It must not modify the array being iterated. + * - It must not call back into V8 (unless it can guarantee that such a + * call does not violate the above restrictions, which is difficult). + * - The {Local element} must not "escape", i.e. must not be assigned + * to any other {Local}. Creating a {Global} from it, or updating a + * v8::TypecheckWitness with it, is safe. + * These restrictions may be lifted in the future if use cases arise that + * justify a slower but more robust implementation. + * + * Returns {Nothing} on exception; use a {TryCatch} to catch and handle this + * exception. + * When the {callback} returns {kException}, iteration is terminated + * immediately, returning {Nothing}. By returning {kBreak}, the callback + * can request non-exceptional early termination of the iteration. + */ + Maybe Iterate(Local context, IterationCallback callback, + void* callback_data); + private: Array(); static void CheckCast(Value* obj); diff --git a/deps/v8/include/v8-exception.h b/deps/v8/include/v8-exception.h index bc058e3fc7b874..3b76636c392ff6 100644 --- a/deps/v8/include/v8-exception.h +++ b/deps/v8/include/v8-exception.h @@ -30,14 +30,21 @@ class ThreadLocalTop; */ class V8_EXPORT Exception { public: - static Local RangeError(Local message); - static Local ReferenceError(Local message); - static Local SyntaxError(Local message); - static Local TypeError(Local message); - static Local WasmCompileError(Local message); - static Local WasmLinkError(Local message); - static Local WasmRuntimeError(Local message); - static Local Error(Local message); + static Local RangeError(Local message, + Local options = {}); + static Local ReferenceError(Local message, + Local options = {}); + static Local SyntaxError(Local message, + Local options = {}); + static Local TypeError(Local message, + Local options = {}); + static Local WasmCompileError(Local message, + Local options = {}); + static Local WasmLinkError(Local message, + Local options = {}); + static Local WasmRuntimeError(Local message, + Local options = {}); + static Local Error(Local message, Local options = {}); /** * Creates an error message for the given exception. diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index 68b19720a4f3e7..cfdd5dbe6df01a 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -217,17 +217,6 @@ class V8_EXPORT V8InspectorSession { virtual void stop() = 0; }; -// Deprecated. -// TODO(crbug.com/1420968): remove. -class V8_EXPORT WebDriverValue { - public: - explicit WebDriverValue(std::unique_ptr type, - v8::MaybeLocal value = {}) - : type(std::move(type)), value(value) {} - std::unique_ptr type; - v8::MaybeLocal value; -}; - struct V8_EXPORT DeepSerializedValue { explicit DeepSerializedValue(std::unique_ptr type, v8::MaybeLocal value = {}) @@ -266,12 +255,6 @@ class V8_EXPORT V8InspectorClient { virtual void beginUserGesture() {} virtual void endUserGesture() {} - // Deprecated. Use `deepSerialize` instead. - // TODO(crbug.com/1420968): remove. - virtual std::unique_ptr serializeToWebDriverValue( - v8::Local v8Value, int maxDepth) { - return nullptr; - } virtual std::unique_ptr deepSerialize( v8::Local v8Value, int maxDepth, v8::Local additionalParameters) { diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index d29a303536186c..d04af4461b7fc7 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -484,65 +484,74 @@ PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS) // Indirect Pointers. // // When the sandbox is enabled, indirect pointers are used to reference -// HeapObjects that live outside of the sandbox (but are still managed through -// the GC). When object A references an object B through an indirect pointer, -// object A will contain a IndirectPointerHandle, i.e. a shifted 32-bit index, -// which identifies an entry in a pointer table (such as the CodePointerTable). +// HeapObjects that live outside of the sandbox (but are still managed by V8's +// garbage collector). When object A references an object B through an indirect +// pointer, object A will contain a IndirectPointerHandle, i.e. a shifted +// 32-bit index, which identifies an entry in a pointer table (generally an +// indirect pointer table, or the code pointer table if it is a Code object). // This table entry then contains the actual pointer to object B. Further, // object B owns this pointer table entry, and it is responsible for updating // the "self-pointer" in the entry when it is relocated in memory. This way, in // contrast to "normal" pointers, indirect pointers never need to be tracked by // the GC (i.e. there is no remembered set for them). -// Currently there is only one type of object referenced through indirect -// pointers (Code objects), but once there are different types of such objects, -// the pointer table entry would probably also contain the type of the target -// object (e.g. by XORing the instance type into the top bits of the pointer). // An IndirectPointerHandle represents a 32-bit index into a pointer table. using IndirectPointerHandle = uint32_t; +// The size of the virtual memory reservation for the indirect pointer table. +// As with the external pointer table, a maximum table size in combination with +// shifted indices allows omitting bounds checks. +constexpr size_t kIndirectPointerTableReservationSize = 8 * MB; + // The indirect pointer handles are stores shifted to the left by this amount // to guarantee that they are smaller than the maximum table size. -constexpr uint32_t kIndirectPointerHandleShift = 6; +constexpr uint32_t kIndirectPointerHandleShift = 12; // A null handle always references an entry that contains nullptr. constexpr IndirectPointerHandle kNullIndirectPointerHandle = 0; -// Currently only Code objects can be referenced through indirect pointers and -// various places rely on that assumption. They will all static_assert against -// this constant to make them easy to find and fix once we reference other types -// of objects indirectly. -constexpr bool kAllIndirectPointerObjectsAreCode = true; +// The maximum number of entries in an indirect pointer table. +constexpr int kIndirectPointerTableEntrySize = 8; +constexpr int kIndirectPointerTableEntrySizeLog2 = 3; +constexpr size_t kMaxIndirectPointers = + kIndirectPointerTableReservationSize / kIndirectPointerTableEntrySize; +static_assert((1 << (32 - kIndirectPointerHandleShift)) == kMaxIndirectPointers, + "kIndirectPointerTableReservationSize and " + "kIndirectPointerHandleShift don't match"); // // Code Pointers. // // When the sandbox is enabled, Code objects are referenced from inside the // sandbox through indirect pointers that reference entries in the code pointer -// table (CPT). Each entry in the CPT contains both a pointer to a Code object -// as well as a pointer to the Code's entrypoint. This allows calling/jumping -// into Code with one fewer memory access (compared to the case where the -// entrypoint pointer needs to be loaded from the Code object). -// As such, a CodePointerHandle can be used both to obtain the referenced Code -// object and to directly load its entrypoint pointer. +// table (CPT) instead of the indirect pointer table (IPT). Each entry in the +// CPT contains both a pointer to a Code object as well as a pointer to the +// Code's entrypoint. This allows calling/jumping into Code with one fewer +// memory access (compared to the case where the entrypoint pointer needs to be +// loaded from the Code object). As such, a CodePointerHandle can be used both +// to obtain the referenced Code object and to directly load its entrypoint +// pointer. using CodePointerHandle = IndirectPointerHandle; -constexpr uint32_t kCodePointerHandleShift = kIndirectPointerHandleShift; -constexpr CodePointerHandle kNullCodePointerHandle = 0; -// The size of the virtual memory reservation for code pointer table. -// This determines the maximum number of entries in a table. Using a maximum -// size allows omitting bounds checks on table accesses if the indices are -// guaranteed (e.g. through shifting) to be below the maximum index. This -// value must be a power of two. +// The size of the virtual memory reservation for the code pointer table. +// As with the other tables, a maximum table size in combination with shifted +// indices allows omitting bounds checks. constexpr size_t kCodePointerTableReservationSize = 1 * GB; -// The maximum number of entries in an external pointer table. +// Code pointer handles are shifted by a different amount than indirect pointer +// handles as the tables have a different maximum size. +constexpr uint32_t kCodePointerHandleShift = 6; + +// A null handle always references an entry that contains nullptr. +constexpr CodePointerHandle kNullCodePointerHandle = 0; + +// The maximum number of entries in a code pointer table. constexpr int kCodePointerTableEntrySize = 16; constexpr int kCodePointerTableEntrySizeLog2 = 4; constexpr size_t kMaxCodePointers = kCodePointerTableReservationSize / kCodePointerTableEntrySize; static_assert( - (1 << (32 - kIndirectPointerHandleShift)) == kMaxCodePointers, + (1 << (32 - kCodePointerHandleShift)) == kMaxCodePointers, "kCodePointerTableReservationSize and kCodePointerHandleShift don't match"); constexpr int kCodePointerTableEntryEntrypointOffset = 0; @@ -602,9 +611,11 @@ class Internals { static const int kHandleScopeDataSize = 2 * kApiSystemPointerSize + 2 * kApiInt32Size; - // ExternalPointerTable layout guarantees. + // ExternalPointerTable and IndirectPointerTable layout guarantees. static const int kExternalPointerTableBasePointerOffset = 0; static const int kExternalPointerTableSize = 2 * kApiSystemPointerSize; + static const int kIndirectPointerTableSize = 2 * kApiSystemPointerSize; + static const int kIndirectPointerTableBasePointerOffset = 0; // IsolateData layout guarantees. static const int kIsolateCageBaseOffset = 0; @@ -639,8 +650,10 @@ class Internals { kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize; static const int kIsolateSharedExternalPointerTableAddressOffset = kIsolateExternalPointerTableOffset + kExternalPointerTableSize; - static const int kIsolateApiCallbackThunkArgumentOffset = + static const int kIsolateIndirectPointerTableOffset = kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize; + static const int kIsolateApiCallbackThunkArgumentOffset = + kIsolateIndirectPointerTableOffset + kIndirectPointerTableSize; #else static const int kIsolateApiCallbackThunkArgumentOffset = kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize; @@ -763,6 +776,15 @@ class Internals { return ReadRawField(map, kMapInstanceTypeOffset); } + V8_INLINE static Address LoadMap(Address obj) { + if (!HasHeapObjectTag(obj)) return kNullAddress; + Address map = ReadTaggedPointerField(obj, kHeapObjectMapOffset); +#ifdef V8_MAP_PACKING + map = UnpackMapWord(map); +#endif + return map; + } + V8_INLINE static int GetOddballKind(Address obj) { return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset)); } diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index f6b64db360ba3a..86bde11cdeafc0 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -421,36 +421,36 @@ class V8_EXPORT Isolate { enum UseCounterFeature { kUseAsm = 0, kBreakIterator = 1, - kLegacyConst V8_DEPRECATE_SOON("unused") = 2, - kMarkDequeOverflow V8_DEPRECATE_SOON("unused") = 3, - kStoreBufferOverflow V8_DEPRECATE_SOON("unused") = 4, - kSlotsBufferOverflow V8_DEPRECATE_SOON("unused") = 5, - kObjectObserve V8_DEPRECATE_SOON("unused") = 6, + kLegacyConst V8_DEPRECATED("unused") = 2, + kMarkDequeOverflow V8_DEPRECATED("unused") = 3, + kStoreBufferOverflow V8_DEPRECATED("unused") = 4, + kSlotsBufferOverflow V8_DEPRECATED("unused") = 5, + kObjectObserve V8_DEPRECATED("unused") = 6, kForcedGC = 7, kSloppyMode = 8, kStrictMode = 9, - kStrongMode V8_DEPRECATE_SOON("unused") = 10, + kStrongMode V8_DEPRECATED("unused") = 10, kRegExpPrototypeStickyGetter = 11, kRegExpPrototypeToString = 12, kRegExpPrototypeUnicodeGetter = 13, - kIntlV8Parse V8_DEPRECATE_SOON("unused") = 14, - kIntlPattern V8_DEPRECATE_SOON("unused") = 15, - kIntlResolved V8_DEPRECATE_SOON("unused") = 16, - kPromiseChain V8_DEPRECATE_SOON("unused") = 17, - kPromiseAccept V8_DEPRECATE_SOON("unused") = 18, - kPromiseDefer V8_DEPRECATE_SOON("unused") = 19, + kIntlV8Parse V8_DEPRECATED("unused") = 14, + kIntlPattern V8_DEPRECATED("unused") = 15, + kIntlResolved V8_DEPRECATED("unused") = 16, + kPromiseChain V8_DEPRECATED("unused") = 17, + kPromiseAccept V8_DEPRECATED("unused") = 18, + kPromiseDefer V8_DEPRECATED("unused") = 19, kHtmlCommentInExternalScript = 20, kHtmlComment = 21, kSloppyModeBlockScopedFunctionRedefinition = 22, kForInInitializer = 23, - kArrayProtectorDirtied V8_DEPRECATE_SOON("unused") = 24, + kArrayProtectorDirtied V8_DEPRECATED("unused") = 24, kArraySpeciesModified = 25, kArrayPrototypeConstructorModified = 26, - kArrayInstanceProtoModified V8_DEPRECATE_SOON("unused") = 27, + kArrayInstanceProtoModified V8_DEPRECATED("unused") = 27, kArrayInstanceConstructorModified = 28, - kLegacyFunctionDeclaration V8_DEPRECATE_SOON("unused") = 29, - kRegExpPrototypeSourceGetter V8_DEPRECATE_SOON("unused") = 30, - kRegExpPrototypeOldFlagGetter V8_DEPRECATE_SOON("unused") = 31, + kLegacyFunctionDeclaration V8_DEPRECATED("unused") = 29, + kRegExpPrototypeSourceGetter V8_DEPRECATED("unused") = 30, + kRegExpPrototypeOldFlagGetter V8_DEPRECATED("unused") = 31, kDecimalWithLeadingZeroInStrictMode = 32, kLegacyDateParser = 33, kDefineGetterOrSetterWouldThrow = 34, @@ -458,22 +458,21 @@ class V8_EXPORT Isolate { kAssigmentExpressionLHSIsCallInSloppy = 36, kAssigmentExpressionLHSIsCallInStrict = 37, kPromiseConstructorReturnedUndefined = 38, - kConstructorNonUndefinedPrimitiveReturn V8_DEPRECATE_SOON("unused") = 39, - kLabeledExpressionStatement V8_DEPRECATE_SOON("unused") = 40, - kLineOrParagraphSeparatorAsLineTerminator V8_DEPRECATE_SOON("unused") = 41, + kConstructorNonUndefinedPrimitiveReturn V8_DEPRECATED("unused") = 39, + kLabeledExpressionStatement V8_DEPRECATED("unused") = 40, + kLineOrParagraphSeparatorAsLineTerminator V8_DEPRECATED("unused") = 41, kIndexAccessor = 42, kErrorCaptureStackTrace = 43, kErrorPrepareStackTrace = 44, kErrorStackTraceLimit = 45, kWebAssemblyInstantiation = 46, kDeoptimizerDisableSpeculation = 47, - kArrayPrototypeSortJSArrayModifiedPrototype V8_DEPRECATE_SOON("unused") = - 48, + kArrayPrototypeSortJSArrayModifiedPrototype V8_DEPRECATED("unused") = 48, kFunctionTokenOffsetTooLongForToString = 49, kWasmSharedMemory = 50, kWasmThreadOpcodes = 51, - kAtomicsNotify V8_DEPRECATE_SOON("unused") = 52, - kAtomicsWake V8_DEPRECATE_SOON("unused") = 53, + kAtomicsNotify V8_DEPRECATED("unused") = 52, + kAtomicsWake V8_DEPRECATED("unused") = 53, kCollator = 54, kNumberFormat = 55, kDateTimeFormat = 56, @@ -483,7 +482,7 @@ class V8_EXPORT Isolate { kListFormat = 60, kSegmenter = 61, kStringLocaleCompare = 62, - kStringToLocaleUpperCase V8_DEPRECATE_SOON("unused") = 63, + kStringToLocaleUpperCase V8_DEPRECATED("unused") = 63, kStringToLocaleLowerCase = 64, kNumberToLocaleString = 65, kDateToLocaleString = 66, @@ -491,14 +490,14 @@ class V8_EXPORT Isolate { kDateToLocaleTimeString = 68, kAttemptOverrideReadOnlyOnPrototypeSloppy = 69, kAttemptOverrideReadOnlyOnPrototypeStrict = 70, - kOptimizedFunctionWithOneShotBytecode V8_DEPRECATE_SOON("unused") = 71, + kOptimizedFunctionWithOneShotBytecode V8_DEPRECATED("unused") = 71, kRegExpMatchIsTrueishOnNonJSRegExp = 72, kRegExpMatchIsFalseishOnJSRegExp = 73, - kDateGetTimezoneOffset V8_DEPRECATE_SOON("unused") = 74, + kDateGetTimezoneOffset V8_DEPRECATED("unused") = 74, kStringNormalize = 75, kCallSiteAPIGetFunctionSloppyCall = 76, kCallSiteAPIGetThisSloppyCall = 77, - kRegExpMatchAllWithNonGlobalRegExp V8_DEPRECATE_SOON("unused") = 78, + kRegExpMatchAllWithNonGlobalRegExp V8_DEPRECATED("unused") = 78, kRegExpExecCalledOnSlowRegExp = 79, kRegExpReplaceCalledOnSlowRegExp = 80, kDisplayNames = 81, @@ -529,9 +528,9 @@ class V8_EXPORT Isolate { kWasmSimdOpcodes = 106, kVarRedeclaredCatchBinding = 107, kWasmRefTypes = 108, - kWasmBulkMemory V8_DEPRECATE_SOON( + kWasmBulkMemory V8_DEPRECATED( "Unused since 2021 (https://crrev.com/c/2622913)") = 109, - kWasmMultiValue V8_DEPRECATE_SOON( + kWasmMultiValue V8_DEPRECATED( "Unused since 2021 (https://crrev.com/c/2817790)") = 110, kWasmExceptionHandling = 111, kInvalidatedMegaDOMProtector = 112, @@ -541,8 +540,8 @@ class V8_EXPORT Isolate { kAsyncStackTaggingCreateTaskCall = 116, kDurationFormat = 117, kInvalidatedNumberStringNotRegexpLikeProtector = 118, - kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode V8_DEPRECATE_SOON( - "unused") = 119, + kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode V8_DEPRECATED("unused") = + 119, kImportAssertionDeprecatedSyntax = 120, kLocaleInfoObsoletedGetters = 121, kLocaleInfoFunctions = 122, @@ -551,6 +550,7 @@ class V8_EXPORT Isolate { kWasmMemory64 = 125, kWasmMultiMemory = 126, kWasmGC = 127, + kWasmImportedStrings = 128, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to diff --git a/deps/v8/include/v8-memory-span.h b/deps/v8/include/v8-memory-span.h index b26af4f705b7cf..c18edb1e2766b9 100644 --- a/deps/v8/include/v8-memory-span.h +++ b/deps/v8/include/v8-memory-span.h @@ -7,12 +7,16 @@ #include +#include +#include +#include + #include "v8config.h" // NOLINT(build/include_directory) namespace v8 { /** - * Points to an unowned continous buffer holding a known number of elements. + * Points to an unowned contiguous buffer holding a known number of elements. * * This is similar to std::span (under consideration for C++20), but does not * require advanced C++ support. In the (far) future, this may be replaced with @@ -23,21 +27,167 @@ namespace v8 { */ template class V8_EXPORT MemorySpan { + private: + /** Some C++ machinery, brought from the future. */ + template + using is_array_convertible = std::is_convertible; + template + static constexpr bool is_array_convertible_v = + is_array_convertible::value; + + template + using iter_reference_t = decltype(*std::declval()); + + template + struct is_compatible_iterator : std::false_type {}; + template + struct is_compatible_iterator< + It, + std::void_t< + std::is_base_of::iterator_category>, + is_array_convertible>, + T>>> : std::true_type {}; + template + static constexpr bool is_compatible_iterator_v = + is_compatible_iterator::value; + + template + static constexpr U* to_address(U* p) noexcept { + return p; + } + + template ().operator->())>> + static constexpr auto to_address(It it) noexcept { + return it.operator->(); + } + public: /** The default constructor creates an empty span. */ constexpr MemorySpan() = default; - constexpr MemorySpan(T* data, size_t size) : data_(data), size_(size) {} + /** Constructor from nullptr and count, for backwards compatibility. + * This is not compatible with C++20 std::span. + */ + constexpr MemorySpan(std::nullptr_t, size_t) {} + + /** Constructor from "iterator" and count. */ + template , bool> = true> + constexpr MemorySpan(Iterator first, + size_t count) // NOLINT(runtime/explicit) + : data_(to_address(first)), size_(count) {} + + /** Constructor from two "iterators". */ + template && + !std::is_convertible_v, + bool> = true> + constexpr MemorySpan(Iterator first, + Iterator last) // NOLINT(runtime/explicit) + : data_(to_address(first)), size_(last - first) {} + + /** Implicit conversion from C-style array. */ + template + constexpr MemorySpan(T (&a)[N]) noexcept // NOLINT(runtime/explicit) + : data_(a), size_(N) {} + + /** Implicit conversion from std::array. */ + template , bool> = true> + constexpr MemorySpan( + std::array& a) noexcept // NOLINT(runtime/explicit) + : data_(a.data()), size_{N} {} + + /** Implicit conversion from const std::array. */ + template , bool> = true> + constexpr MemorySpan( + const std::array& a) noexcept // NOLINT(runtime/explicit) + : data_(a.data()), size_{N} {} /** Returns a pointer to the beginning of the buffer. */ constexpr T* data() const { return data_; } /** Returns the number of elements that the buffer holds. */ constexpr size_t size() const { return size_; } + constexpr T& operator[](size_t i) const { return data_[i]; } + + class Iterator { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = value_type*; + using reference = value_type&; + + T& operator*() const { return *ptr_; } + T* operator->() const { return ptr_; } + + bool operator==(Iterator other) const { return ptr_ == other.ptr_; } + bool operator!=(Iterator other) const { return !(*this == other); } + + Iterator& operator++() { + ++ptr_; + return *this; + } + + Iterator operator++(int) { + Iterator temp(*this); + ++(*this); + return temp; + } + + private: + explicit Iterator(T* ptr) : ptr_(ptr) {} + + T* ptr_ = nullptr; + }; + + Iterator begin() const { return Iterator(data_); } + Iterator end() const { return Iterator(data_ + size_); } + private: T* data_ = nullptr; size_t size_ = 0; }; +/** + * Helper function template to create an array of fixed length, initialized by + * the provided initializer list, without explicitly specifying the array size, + * e.g. + * + * auto arr = v8::to_array>({v8_str("one"), v8_str("two")}); + * + * In the future, this may be replaced with or aliased to std::to_array (under + * consideration for C++20). + */ + +namespace detail { +template +constexpr std::array, N> to_array_lvalue_impl( + T (&a)[N], std::index_sequence) { + return {{a[I]...}}; +} + +template +constexpr std::array, N> to_array_rvalue_impl( + T (&&a)[N], std::index_sequence) { + return {{std::move(a[I])...}}; +} +} // namespace detail + +template +constexpr std::array, N> to_array(T (&a)[N]) { + return detail::to_array_lvalue_impl(a, std::make_index_sequence{}); +} + +template +constexpr std::array, N> to_array(T (&&a)[N]) { + return detail::to_array_rvalue_impl(std::move(a), + std::make_index_sequence{}); +} + } // namespace v8 #endif // INCLUDE_V8_MEMORY_SPAN_H_ diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h index 5c6c1c4705e38b..46bc4679c5c83f 100644 --- a/deps/v8/include/v8-metrics.h +++ b/deps/v8/include/v8-metrics.h @@ -55,6 +55,7 @@ struct GarbageCollectionFullCycle { double efficiency_cpp_in_bytes_per_us = -1.0; double main_thread_efficiency_in_bytes_per_us = -1.0; double main_thread_efficiency_cpp_in_bytes_per_us = -1.0; + int64_t incremental_marking_start_stop_wall_clock_duration_in_us = -1; }; struct GarbageCollectionFullMainThreadIncrementalMark { diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h index 45660924dd055a..f78d40438cb01f 100644 --- a/deps/v8/include/v8-object.h +++ b/deps/v8/include/v8-object.h @@ -174,7 +174,7 @@ enum AccessControl { DEFAULT = 0, ALL_CAN_READ = 1, ALL_CAN_WRITE = 1 << 1, - PROHIBITS_OVERWRITING V8_ENUM_DEPRECATE_SOON("unused") = 1 << 2 + PROHIBITS_OVERWRITING V8_ENUM_DEPRECATED("unused") = 1 << 2 }; /** diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h index aa1447c6bb8cc5..759bd754d797be 100644 --- a/deps/v8/include/v8-script.h +++ b/deps/v8/include/v8-script.h @@ -16,6 +16,7 @@ #include "v8-data.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) #include "v8-maybe.h" // NOLINT(build/include_directory) +#include "v8-memory-span.h" // NOLINT(build/include_directory) #include "v8-message.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) @@ -285,10 +286,15 @@ class V8_EXPORT Module : public Data { * module_name is used solely for logging/debugging and doesn't affect module * behavior. */ + V8_DEPRECATE_SOON("Please use the version that takes a MemorySpan") static Local CreateSyntheticModule( Isolate* isolate, Local module_name, const std::vector>& export_names, SyntheticModuleEvaluationSteps evaluation_steps); + static Local CreateSyntheticModule( + Isolate* isolate, Local module_name, + const MemorySpan>& export_names, + SyntheticModuleEvaluationSteps evaluation_steps); /** * Set this module's exported value for the name export_name to the specified diff --git a/deps/v8/include/v8-typed-array.h b/deps/v8/include/v8-typed-array.h index 483b4f772ffa81..ba031cdd707b2b 100644 --- a/deps/v8/include/v8-typed-array.h +++ b/deps/v8/include/v8-typed-array.h @@ -5,14 +5,14 @@ #ifndef INCLUDE_V8_TYPED_ARRAY_H_ #define INCLUDE_V8_TYPED_ARRAY_H_ +#include + #include "v8-array-buffer.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) namespace v8 { -class SharedArrayBuffer; - /** * A base class for an instance of TypedArray series of constructors * (ES6 draft 15.13.6). @@ -20,12 +20,25 @@ class SharedArrayBuffer; class V8_EXPORT TypedArray : public ArrayBufferView { public: /* - * The largest typed array size that can be constructed using New. + * The largest supported typed array byte size. Each subclass defines a + * type-specific kMaxLength for the maximum length that can be passed to New. */ - static constexpr size_t kMaxLength = - internal::kApiSystemPointerSize == 4 - ? internal::kSmiMaxValue - : static_cast(uint64_t{1} << 32); +#if V8_ENABLE_SANDBOX + static constexpr size_t kMaxByteLength = + internal::kMaxSafeBufferSizeForSandbox; +#elif V8_HOST_ARCH_32_BIT + static constexpr size_t kMaxByteLength = std::numeric_limits::max(); +#else + // The maximum safe integer (2^53 - 1). + static constexpr size_t kMaxByteLength = + static_cast((uint64_t{1} << 53) - 1); +#endif + + /* + * Deprecated: Use |kMaxByteLength| or the type-specific |kMaxLength| fields. + */ + V8_DEPRECATE_SOON("Use kMaxByteLength") + static constexpr size_t kMaxLength = kMaxByteLength; /** * Number of elements in this typed array @@ -50,6 +63,13 @@ class V8_EXPORT TypedArray : public ArrayBufferView { */ class V8_EXPORT Uint8Array : public TypedArray { public: + /* + * The largest Uint8Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(uint8_t); + static_assert(sizeof(uint8_t) == 1); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -71,6 +91,13 @@ class V8_EXPORT Uint8Array : public TypedArray { */ class V8_EXPORT Uint8ClampedArray : public TypedArray { public: + /* + * The largest Uint8ClampedArray size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(uint8_t); + static_assert(sizeof(uint8_t) == 1); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New( @@ -93,6 +120,13 @@ class V8_EXPORT Uint8ClampedArray : public TypedArray { */ class V8_EXPORT Int8Array : public TypedArray { public: + /* + * The largest Int8Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(int8_t); + static_assert(sizeof(int8_t) == 1); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -114,6 +148,13 @@ class V8_EXPORT Int8Array : public TypedArray { */ class V8_EXPORT Uint16Array : public TypedArray { public: + /* + * The largest Uint16Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(uint16_t); + static_assert(sizeof(uint16_t) == 2); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -135,6 +176,13 @@ class V8_EXPORT Uint16Array : public TypedArray { */ class V8_EXPORT Int16Array : public TypedArray { public: + /* + * The largest Int16Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(int16_t); + static_assert(sizeof(int16_t) == 2); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -156,6 +204,13 @@ class V8_EXPORT Int16Array : public TypedArray { */ class V8_EXPORT Uint32Array : public TypedArray { public: + /* + * The largest Uint32Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(uint32_t); + static_assert(sizeof(uint32_t) == 4); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -177,6 +232,13 @@ class V8_EXPORT Uint32Array : public TypedArray { */ class V8_EXPORT Int32Array : public TypedArray { public: + /* + * The largest Int32Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(int32_t); + static_assert(sizeof(int32_t) == 4); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -198,6 +260,13 @@ class V8_EXPORT Int32Array : public TypedArray { */ class V8_EXPORT Float32Array : public TypedArray { public: + /* + * The largest Float32Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(float); + static_assert(sizeof(float) == 4); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -219,6 +288,13 @@ class V8_EXPORT Float32Array : public TypedArray { */ class V8_EXPORT Float64Array : public TypedArray { public: + /* + * The largest Float64Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(double); + static_assert(sizeof(double) == 8); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -240,6 +316,13 @@ class V8_EXPORT Float64Array : public TypedArray { */ class V8_EXPORT BigInt64Array : public TypedArray { public: + /* + * The largest BigInt64Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(int64_t); + static_assert(sizeof(int64_t) == 8); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, @@ -261,6 +344,13 @@ class V8_EXPORT BigInt64Array : public TypedArray { */ class V8_EXPORT BigUint64Array : public TypedArray { public: + /* + * The largest BigUint64Array size that can be constructed using New. + */ + static constexpr size_t kMaxLength = + TypedArray::kMaxByteLength / sizeof(uint64_t); + static_assert(sizeof(uint64_t) == 8); + static Local New(Local array_buffer, size_t byte_offset, size_t length); static Local New(Local shared_array_buffer, diff --git a/deps/v8/include/v8-value.h b/deps/v8/include/v8-value.h index 19ecf71d16a9d8..110b6953540ab2 100644 --- a/deps/v8/include/v8-value.h +++ b/deps/v8/include/v8-value.h @@ -391,7 +391,7 @@ class V8_EXPORT Value : public Data { V8_WARN_UNUSED_RESULT MaybeLocal ToDetailString( Local context) const; /** - * Perform the equivalent of `Object(value)` in JS. + * Perform the equivalent of `Tagged(value)` in JS. */ V8_WARN_UNUSED_RESULT MaybeLocal ToObject( Local context) const; @@ -469,6 +469,41 @@ class V8_EXPORT Value : public Data { static void CheckCast(Data* that); }; +/** + * Can be used to avoid repeated expensive type checks for groups of objects + * that are expected to be similar (e.g. when Blink converts a bunch of + * JavaScript objects to "ScriptWrappable" after a "HasInstance" check) by + * making use of V8-internal "hidden classes". An object that has passed the + * full check can be remembered via {Update}; further objects can be queried + * using {Matches}. + * Note that the answer will be conservative/"best-effort": when {Matches} + * returns true, then the {candidate} can be relied upon to have the same + * shape/constructor/prototype/etc. as the {baseline}. Otherwise, no reliable + * statement can be made (the objects might still have indistinguishable shapes + * for all intents and purposes, but this mechanism, being optimized for speed, + * couldn't determine that quickly). + */ +class V8_EXPORT TypecheckWitness { + public: + explicit TypecheckWitness(Isolate* isolate); + + /** + * Checks whether {candidate} can cheaply be identified as being "similar" + * to the {baseline} that was passed to {Update} earlier. + * It's safe to call this on an uninitialized {TypecheckWitness} instance: + * it will then return {false} for any input. + */ + V8_INLINE bool Matches(Local candidate) const; + + /** + * Remembers a new baseline for future {Matches} queries. + */ + void Update(Local baseline); + + private: + Local cached_map_; +}; + template <> V8_INLINE Value* Value::Cast(Data* value) { #ifdef V8_ENABLE_CHECKS @@ -562,6 +597,14 @@ bool Value::QuickIsString() const { #endif // V8_STATIC_ROOTS_BOOL } +bool TypecheckWitness::Matches(Local candidate) const { + internal::Address obj = internal::ValueHelper::ValueAsAddress(*candidate); + internal::Address obj_map = internal::Internals::LoadMap(obj); + internal::Address cached = + internal::ValueHelper::ValueAsAddress(*cached_map_); + return obj_map == cached; +} + } // namespace v8 #endif // INCLUDE_V8_VALUE_H_ diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 074d0e9ce39017..364ca72fbe1e0f 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 11 -#define V8_MINOR_VERSION 8 -#define V8_BUILD_NUMBER 172 -#define V8_PATCH_LEVEL 17 +#define V8_MINOR_VERSION 9 +#define V8_BUILD_NUMBER 169 +#define V8_PATCH_LEVEL 7 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 77e6b109cf6696..f96445e28def87 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -288,6 +288,7 @@ 'v8_linux64_arm64_no_wasm_compile_dbg': 'debug_arm64_webassembly_disabled', 'v8_linux64_verify_csa_compile_rel': 'release_x64_verify_csa', 'v8_linux64_asan_compile_rel': 'release_x64_asan_minimal_symbols', + 'v8_linux64_asan_sandbox_compile_rel': 'release_x64_asan_symbolized_expose_memory_corruption', 'v8_linux64_cfi_compile_rel': 'release_x64_cfi', 'v8_linux64_fuzzilli_compile_rel': 'release_x64_fuzzilli', 'v8_linux64_loong64_compile_rel': 'release_simulate_loong64', @@ -750,7 +751,7 @@ 'mixins': { 'android': { - 'gn_args': 'target_os="android" v8_android_log_stdout=true default_min_sdk_version=19', + 'gn_args': 'target_os="android" v8_android_log_stdout=true default_min_sdk_version=21', }, 'android_strip_outputs': { diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 2e3513a7416951..1ec9b2cc6180e6 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -489,6 +489,7 @@ {'name': 'webkit', 'variant': 'stress_sampling'}, # Stress snapshot. {'name': 'mjsunit', 'variant': 'stress_snapshot'}, + {'name': 'mjsunit', 'variant': 'rehash_snapshot'}, # Experimental regexp engine. {'name': 'mjsunit', 'variant': 'experimental_regexp'}, # Variants for maglev. @@ -554,11 +555,6 @@ {'name': 'mozilla', 'variant': 'minor_ms'}, {'name': 'test262', 'variant': 'minor_ms', 'shards': 2}, {'name': 'mjsunit', 'variant': 'minor_ms'}, - {'name': 'v8testing', 'variant': 'concurrent_minor_ms'}, - {'name': 'benchmarks', 'variant': 'concurrent_minor_ms'}, - {'name': 'mozilla', 'variant': 'concurrent_minor_ms'}, - {'name': 'test262', 'variant': 'concurrent_minor_ms', 'shards': 2}, - {'name': 'mjsunit', 'variant': 'concurrent_minor_ms'}, ], }, 'v8_linux64_msan_rel': { @@ -1534,11 +1530,6 @@ {'name': 'mozilla', 'variant': 'minor_ms'}, {'name': 'test262', 'variant': 'minor_ms', 'shards': 2}, {'name': 'mjsunit', 'variant': 'minor_ms'}, - {'name': 'v8testing', 'variant': 'concurrent_minor_ms'}, - {'name': 'benchmarks', 'variant': 'concurrent_minor_ms'}, - {'name': 'mozilla', 'variant': 'concurrent_minor_ms'}, - {'name': 'test262', 'variant': 'concurrent_minor_ms', 'shards': 2}, - {'name': 'mjsunit', 'variant': 'concurrent_minor_ms'}, ], }, 'V8 Linux64 - disable runtime call stats': { @@ -1561,6 +1552,7 @@ {'name': 'webkit', 'variant': 'stress_sampling'}, # Stress snapshot. {'name': 'mjsunit', 'variant': 'stress_snapshot'}, + {'name': 'mjsunit', 'variant': 'rehash_snapshot'}, # Experimental regexp engine. {'name': 'mjsunit', 'variant': 'experimental_regexp'}, # Variants for maglev. @@ -1624,6 +1616,7 @@ {'name': 'webkit', 'variant': 'stress_sampling'}, # Stress snapshot. {'name': 'mjsunit', 'variant': 'stress_snapshot'}, + {'name': 'mjsunit', 'variant': 'rehash_snapshot'}, # Experimental regexp engine. {'name': 'mjsunit', 'variant': 'experimental_regexp'}, # Variants for maglev. diff --git a/deps/v8/src/api/api-arguments-inl.h b/deps/v8/src/api/api-arguments-inl.h index b5aba465de731a..563884b90ef738 100644 --- a/deps/v8/src/api/api-arguments-inl.h +++ b/deps/v8/src/api/api-arguments-inl.h @@ -32,7 +32,7 @@ CustomArgumentsBase::CustomArgumentsBase(Isolate* isolate) template CustomArguments::~CustomArguments() { - slot_at(kReturnValueIndex).store(Object(kHandleZapValue)); + slot_at(kReturnValueIndex).store(Tagged(kHandleZapValue)); } template diff --git a/deps/v8/src/api/api-arguments.cc b/deps/v8/src/api/api-arguments.cc index 08511a0ac4a1d7..289cd9f28b36f8 100644 --- a/deps/v8/src/api/api-arguments.cc +++ b/deps/v8/src/api/api-arguments.cc @@ -21,7 +21,8 @@ PropertyCallbackArguments::PropertyCallbackArguments( slot_at(T::kThisIndex).store(self); slot_at(T::kHolderIndex).store(holder); slot_at(T::kDataIndex).store(data); - slot_at(T::kIsolateIndex).store(Object(reinterpret_cast
(isolate))); + slot_at(T::kIsolateIndex) + .store(Tagged(reinterpret_cast
(isolate))); int value = Internals::kInferShouldThrowMode; if (should_throw.IsJust()) { value = should_throw.FromJust(); @@ -45,7 +46,8 @@ FunctionCallbackArguments::FunctionCallbackArguments( slot_at(T::kDataIndex).store(data); slot_at(T::kHolderIndex).store(holder); slot_at(T::kNewTargetIndex).store(new_target); - slot_at(T::kIsolateIndex).store(Object(reinterpret_cast
(isolate))); + slot_at(T::kIsolateIndex) + .store(Tagged(reinterpret_cast
(isolate))); // Here the hole is set as default value. It's converted to and not // directly exposed to js. // TODO(cbruni): Remove and/or use custom sentinel value. diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h index 154ac355a1aae3..6efec2a42a6c16 100644 --- a/deps/v8/src/api/api-inl.h +++ b/deps/v8/src/api/api-inl.h @@ -117,7 +117,7 @@ TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY) const v8::From* that, bool allow_empty_handle) { \ DCHECK(allow_empty_handle || !v8::internal::ValueHelper::IsEmpty(that)); \ DCHECK(v8::internal::ValueHelper::IsEmpty(that) || \ - Is##To(v8::internal::Object( \ + Is##To(v8::internal::Tagged( \ v8::internal::ValueHelper::ValueAsAddress(that)))); \ if (v8::internal::ValueHelper::IsEmpty(that)) { \ return v8::internal::Handle::null(); \ @@ -131,7 +131,7 @@ TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY) const v8::From* that, bool allow_empty_handle) { \ DCHECK(allow_empty_handle || !v8::internal::ValueHelper::IsEmpty(that)); \ DCHECK(v8::internal::ValueHelper::IsEmpty(that) || \ - Is##To(v8::internal::Object( \ + Is##To(v8::internal::Tagged( \ v8::internal::ValueHelper::ValueAsAddress(that)))); \ return v8::internal::DirectHandle( \ v8::internal::ValueHelper::ValueAsAddress(that)); \ @@ -149,7 +149,7 @@ TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY) const v8::From* that, bool allow_empty_handle) { \ DCHECK(allow_empty_handle || !v8::internal::ValueHelper::IsEmpty(that)); \ DCHECK(v8::internal::ValueHelper::IsEmpty(that) || \ - Is##To(v8::internal::Object( \ + Is##To(v8::internal::Tagged( \ v8::internal::ValueHelper::ValueAsAddress(that)))); \ return v8::internal::Handle( \ reinterpret_cast( \ @@ -312,7 +312,7 @@ bool CopyAndConvertArrayToCppBuffer(Local src, T* dst, } i::DisallowGarbageCollection no_gc; - i::Tagged obj = *reinterpret_cast(*src); + i::Tagged obj = *Utils::OpenHandle(*src); if (i::Object::IterationHasObservableEffects(obj)) { // The array has a custom iterator. return false; diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index 3015c2f80fbc0e..db57ca09b791d3 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -182,7 +182,7 @@ Tagged GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) { V8_INTRINSICS_LIST(GET_INTRINSIC_VALUE) #undef GET_INTRINSIC_VALUE } - return Object(); + return Tagged(); } template @@ -195,13 +195,13 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, // Walk the inheritance chain and copy all accessors to current object. int max_number_of_properties = 0; - TemplateInfoT info = *data; + Tagged info = *data; while (!info.is_null()) { - Tagged props = info.property_accessors(); + Tagged props = info->property_accessors(); if (!IsUndefined(props, isolate)) { - max_number_of_properties += TemplateList::cast(props)->length(); + max_number_of_properties += ArrayList::cast(props)->Length(); } - info = info.GetParent(isolate); + info = info->GetParent(isolate); } if (max_number_of_properties > 0) { @@ -210,7 +210,9 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, Handle array = isolate->factory()->NewFixedArray(max_number_of_properties); - for (Handle temp(*data, isolate); !temp->is_null(); + // TODO(leszeks): Avoid creating unnecessary handles for cases where we + // don't need to append anything. + for (Handle temp(*data, isolate); !(*temp).is_null(); temp = handle(temp->GetParent(isolate), isolate)) { // Accumulate accessors. Tagged maybe_properties = temp->property_accessors(); @@ -233,28 +235,27 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, Tagged maybe_property_list = data->property_list(); if (IsUndefined(maybe_property_list, isolate)) return obj; - Handle properties(TemplateList::cast(maybe_property_list), - isolate); - if (properties->length() == 0) return obj; + Handle properties(ArrayList::cast(maybe_property_list), isolate); + if (properties->Length() == 0) return obj; int i = 0; for (int c = 0; c < data->number_of_properties(); c++) { - auto name = handle(Name::cast(properties->get(i++)), isolate); - Tagged bit = properties->get(i++); + auto name = handle(Name::cast(properties->Get(i++)), isolate); + Tagged bit = properties->Get(i++); if (IsSmi(bit)) { PropertyDetails details(Smi::cast(bit)); PropertyAttributes attributes = details.attributes(); PropertyKind kind = details.kind(); if (kind == PropertyKind::kData) { - auto prop_data = handle(properties->get(i++), isolate); + auto prop_data = handle(properties->Get(i++), isolate); RETURN_ON_EXCEPTION( isolate, DefineDataProperty(isolate, obj, name, prop_data, attributes), JSObject); } else { - auto getter = handle(properties->get(i++), isolate); - auto setter = handle(properties->get(i++), isolate); + auto getter = handle(properties->Get(i++), isolate); + auto setter = handle(properties->Get(i++), isolate); RETURN_ON_EXCEPTION(isolate, DefineAccessorProperty(isolate, obj, name, getter, setter, attributes), @@ -263,12 +264,12 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, } else { // Intrinsic data property --- Get appropriate value from the current // context. - PropertyDetails details(Smi::cast(properties->get(i++))); + PropertyDetails details(Smi::cast(properties->Get(i++))); PropertyAttributes attributes = details.attributes(); DCHECK_EQ(PropertyKind::kData, details.kind()); v8::Intrinsic intrinsic = - static_cast(Smi::ToInt(properties->get(i++))); + static_cast(Smi::ToInt(properties->Get(i++))); auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate); RETURN_ON_EXCEPTION( @@ -560,11 +561,11 @@ MaybeHandle InstantiateFunction( void AddPropertyToPropertyList(Isolate* isolate, Handle templ, int length, Handle* data) { Tagged maybe_list = templ->property_list(); - Handle list; + Handle list; if (IsUndefined(maybe_list, isolate)) { - list = TemplateList::New(isolate, length); + list = ArrayList::New(isolate, length, AllocationType::kOld); } else { - list = handle(TemplateList::cast(maybe_list), isolate); + list = handle(ArrayList::cast(maybe_list), isolate); } templ->set_number_of_properties(templ->number_of_properties() + 1); for (int i = 0; i < length; i++) { @@ -572,7 +573,7 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle templ, data[i].is_null() ? Handle::cast(isolate->factory()->undefined_value()) : data[i]; - list = TemplateList::Add(isolate, list, value); + list = ArrayList::Add(isolate, list, value); } templ->set_property_list(*list); } @@ -678,13 +679,13 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate, Handle info, Handle property) { Tagged maybe_list = info->property_accessors(); - Handle list; + Handle list; if (IsUndefined(maybe_list, isolate)) { - list = TemplateList::New(isolate, 1); + list = ArrayList::New(isolate, 1, AllocationType::kOld); } else { - list = handle(TemplateList::cast(maybe_list), isolate); + list = handle(ArrayList::cast(maybe_list), isolate); } - list = TemplateList::Add(isolate, list, property); + list = ArrayList::Add(isolate, list, property); info->set_property_accessors(*list); } diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 7807545da57f23..4f583896edea86 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -731,7 +731,7 @@ i::Address* GlobalizeTracedReference(i::Isolate* i_isolate, i::Address value, auto result = i_isolate->traced_handles()->Create(value, slot, store_mode); #ifdef VERIFY_HEAP if (i::v8_flags.verify_heap) { - Object::ObjectVerify(i::Object(value), i_isolate); + Object::ObjectVerify(i::Tagged(value), i_isolate); } #endif // VERIFY_HEAP return result.location(); @@ -800,7 +800,7 @@ i::Address* GlobalizeReference(i::Isolate* i_isolate, i::Address value) { i::Handle result = i_isolate->global_handles()->Create(value); #ifdef VERIFY_HEAP if (i::v8_flags.verify_heap) { - i::Object::ObjectVerify(i::Object(value), i_isolate); + i::Object::ObjectVerify(i::Tagged(value), i_isolate); } #endif // VERIFY_HEAP return result.location(); @@ -920,8 +920,9 @@ EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) { i::Address* EscapableHandleScope::Escape(i::Address* escape_value) { i::Heap* heap = reinterpret_cast(GetIsolate())->heap(); - Utils::ApiCheck(i::IsTheHole(i::Object(*escape_slot_), heap->isolate()), - "EscapableHandleScope::Escape", "Escape value set twice"); + Utils::ApiCheck( + i::IsTheHole(i::Tagged(*escape_slot_), heap->isolate()), + "EscapableHandleScope::Escape", "Escape value set twice"); if (escape_value == nullptr) { *escape_slot_ = i::ReadOnlyRoots(heap).undefined_value().ptr(); return nullptr; @@ -1805,8 +1806,8 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback, i::Handle::cast(struct_info); SET_FIELD_WRAPPED(i_isolate, info, set_callback, callback); - info->set_named_interceptor(i::Object()); - info->set_indexed_interceptor(i::Object()); + info->set_named_interceptor(i::Tagged()); + info->set_indexed_interceptor(i::Tagged()); if (data.IsEmpty()) { data = v8::Undefined(reinterpret_cast(i_isolate)); @@ -2443,7 +2444,17 @@ MaybeLocal Module::Evaluate(Local context) { Local Module::CreateSyntheticModule( Isolate* v8_isolate, Local module_name, - const std::vector>& export_names, + const std::vector>& export_names, + v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) { + return CreateSyntheticModule( + v8_isolate, module_name, + MemorySpan>(export_names.begin(), export_names.end()), + evaluation_steps); +} + +Local Module::CreateSyntheticModule( + Isolate* v8_isolate, Local module_name, + const MemorySpan>& export_names, v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) { auto i_isolate = reinterpret_cast(v8_isolate); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); @@ -2981,8 +2992,9 @@ void v8::TryCatch::operator delete(void*, size_t) { base::OS::Abort(); } void v8::TryCatch::operator delete[](void*, size_t) { base::OS::Abort(); } bool v8::TryCatch::HasCaught() const { - return !IsTheHole(i::Object(reinterpret_cast(exception_)), - i_isolate_); + return !IsTheHole( + i::Tagged(reinterpret_cast(exception_)), + i_isolate_); } bool v8::TryCatch::CanContinue() const { return can_continue_; } @@ -3970,7 +3982,8 @@ MaybeLocal Value::ToUint32(Local context) const { } i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) { - return i::GetIsolateFromWritableObject(i::HeapObject::cast(i::Object(obj))); + return i::GetIsolateFromWritableObject( + i::HeapObject::cast(i::Tagged(obj))); } bool i::ShouldThrowOnError(i::Isolate* i_isolate) { @@ -4225,8 +4238,6 @@ void v8::ArrayBufferView::CheckCast(Value* that) { "Value is not an ArrayBufferView"); } -constexpr size_t v8::TypedArray::kMaxLength; - void v8::TypedArray::CheckCast(Value* that) { i::Handle obj = Utils::OpenHandle(that); Utils::ApiCheck(i::IsJSTypedArray(*obj), "v8::TypedArray::Cast()", @@ -6716,9 +6727,10 @@ MaybeLocal v8::Context::NewRemoteContext( i::Handle access_check_info = i::handle( i::AccessCheckInfo::cast(global_constructor->GetAccessCheckInfo()), i_isolate); - Utils::ApiCheck(access_check_info->named_interceptor() != i::Object(), - "v8::Context::NewRemoteContext", - "Global template needs to have access check handlers"); + Utils::ApiCheck( + access_check_info->named_interceptor() != i::Tagged(), + "v8::Context::NewRemoteContext", + "Global template needs to have access check handlers"); i::Handle global_proxy = CreateEnvironment( i_isolate, nullptr, global_template, global_object, 0, DeserializeInternalFieldsCallback(), nullptr); @@ -7024,7 +7036,7 @@ class ObjectVisitorDeepFreezer : i::ObjectVisitor { i::Isolate* isolate_; Context::DeepFreezeDelegate* delegate_; - std::unordered_set done_list_; + std::unordered_set, i::Object::Hasher> done_list_; std::vector> objects_to_freeze_; std::vector> lazy_accessor_pairs_to_freeze_; base::Optional error_; @@ -7320,9 +7332,10 @@ MaybeLocal FunctionTemplate::NewRemoteInstance() { "InstanceTemplate needs to have access checks enabled"); i::Handle access_check_info = i::handle( i::AccessCheckInfo::cast(constructor->GetAccessCheckInfo()), i_isolate); - Utils::ApiCheck(access_check_info->named_interceptor() != i::Object(), - "v8::FunctionTemplate::NewRemoteInstance", - "InstanceTemplate needs to have access check handlers"); + Utils::ApiCheck( + access_check_info->named_interceptor() != i::Tagged(), + "v8::FunctionTemplate::NewRemoteInstance", + "InstanceTemplate needs to have access check handlers"); i::Handle object; if (!i::ApiNatives::InstantiateRemoteObject( Utils::OpenHandle(*InstanceTemplate())) @@ -7996,16 +8009,213 @@ Local v8::Array::New(Isolate* v8_isolate, Local* elements, factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, len)); } +namespace internal { + +uint32_t GetLength(Tagged array) { + Tagged length = array->length(); + if (IsSmi(length)) return Smi::ToInt(length); + return static_cast(Object::Number(length)); +} + +} // namespace internal + uint32_t v8::Array::Length() const { i::Handle obj = Utils::OpenHandle(this); - i::Tagged length = obj->length(); - if (i::IsSmi(length)) { - return i::Smi::ToInt(length); - } else { - return static_cast(i::Object::Number(length)); + return i::GetLength(*obj); +} + +namespace internal { + +bool CanUseFastIteration(Isolate* isolate, Handle array) { + if (IsCustomElementsReceiverMap(array->map())) return false; + if (array->GetElementsAccessor()->HasAccessors(*array)) return false; + if (!JSObject::PrototypeHasNoElements(isolate, *array)) return false; + return true; +} + +enum class FastIterateResult { + kException = static_cast(v8::Array::CallbackResult::kException), + kBreak = static_cast(v8::Array::CallbackResult::kBreak), + kSlowPath, + kFinished, +}; + +FastIterateResult FastIterateArray(Handle array, Isolate* isolate, + v8::Array::IterationCallback callback, + void* callback_data) { + // Instead of relying on callers to check condition, this function returns + // {kSlowPath} for situations it can't handle. + // Most code paths below don't allocate, and rely on {callback} not allocating + // either, but this isn't enforced with {DisallowHeapAllocation} to allow + // embedders to allocate error objects before terminating the iteration. + // Since {callback} must not allocate anyway, we can get away with fake + // handles, reducing per-element overhead. + if (!CanUseFastIteration(isolate, array)) return FastIterateResult::kSlowPath; + using Result = v8::Array::CallbackResult; + DisallowJavascriptExecution no_js(isolate); + uint32_t length = GetLength(*array); + if (length == 0) return FastIterateResult::kFinished; + switch (array->GetElementsKind()) { + case PACKED_SMI_ELEMENTS: + case PACKED_ELEMENTS: + case PACKED_FROZEN_ELEMENTS: + case PACKED_SEALED_ELEMENTS: + case PACKED_NONEXTENSIBLE_ELEMENTS: { + Tagged elements = FixedArray::cast(array->elements()); + for (uint32_t i = 0; i < length; i++) { + Tagged element = elements->get(static_cast(i)); + // TODO(13270): When we switch to CSS, we can pass {element} to + // the callback directly, without {fake_handle}. + Handle fake_handle(reinterpret_cast(&element)); + Result result = callback(i, Utils::ToLocal(fake_handle), callback_data); + if (result != Result::kContinue) { + return static_cast(result); + } + DCHECK(CanUseFastIteration(isolate, array)); + } + return FastIterateResult::kFinished; + } + case HOLEY_SMI_ELEMENTS: + case HOLEY_FROZEN_ELEMENTS: + case HOLEY_SEALED_ELEMENTS: + case HOLEY_NONEXTENSIBLE_ELEMENTS: + case HOLEY_ELEMENTS: { + Tagged elements = FixedArray::cast(array->elements()); + for (uint32_t i = 0; i < length; i++) { + Tagged element = elements->get(static_cast(i)); + if (IsTheHole(element)) continue; + // TODO(13270): When we switch to CSS, we can pass {element} to + // the callback directly, without {fake_handle}. + Handle fake_handle(reinterpret_cast(&element)); + Result result = callback(i, Utils::ToLocal(fake_handle), callback_data); + if (result != Result::kContinue) { + return static_cast(result); + } + DCHECK(CanUseFastIteration(isolate, array)); + } + return FastIterateResult::kFinished; + } + case HOLEY_DOUBLE_ELEMENTS: + case PACKED_DOUBLE_ELEMENTS: { + DCHECK_NE(length, 0); // Cast to FixedDoubleArray would be invalid. + Handle elements( + FixedDoubleArray::cast(array->elements()), isolate); + FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, i++, { + if (elements->is_the_hole(i)) continue; + double element = elements->get_scalar(i); + Handle value = isolate->factory()->NewNumber(element); + Result result = callback(i, Utils::ToLocal(value), callback_data); + if (result != Result::kContinue) { + return static_cast(result); + } + DCHECK(CanUseFastIteration(isolate, array)); + }); + return FastIterateResult::kFinished; + } + case DICTIONARY_ELEMENTS: { + DisallowGarbageCollection no_gc; + Tagged dict = array->element_dictionary(); + struct Entry { + uint32_t index; + InternalIndex entry; + }; + std::vector sorted; + sorted.reserve(dict->NumberOfElements()); + ReadOnlyRoots roots(isolate); + for (InternalIndex i : dict->IterateEntries()) { + Tagged key = dict->KeyAt(isolate, i); + if (!dict->IsKey(roots, key)) continue; + uint32_t index = static_cast(Object::Number(key)); + sorted.push_back({index, i}); + } + std::sort( + sorted.begin(), sorted.end(), + [](const Entry& a, const Entry& b) { return a.index < b.index; }); + for (const Entry& entry : sorted) { + Tagged value = dict->ValueAt(entry.entry); + // TODO(13270): When we switch to CSS, we can pass {element} to + // the callback directly, without {fake_handle}. + Handle fake_handle(reinterpret_cast(&value)); + Result result = + callback(entry.index, Utils::ToLocal(fake_handle), callback_data); + if (result != Result::kContinue) { + return static_cast(result); + } + SLOW_DCHECK(CanUseFastIteration(isolate, array)); + } + return FastIterateResult::kFinished; + } + case NO_ELEMENTS: + return FastIterateResult::kFinished; + case FAST_SLOPPY_ARGUMENTS_ELEMENTS: + case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: + // Probably not worth implementing. Take the slow path. + return FastIterateResult::kSlowPath; + case WASM_ARRAY_ELEMENTS: + case FAST_STRING_WRAPPER_ELEMENTS: + case SLOW_STRING_WRAPPER_ELEMENTS: + case SHARED_ARRAY_ELEMENTS: +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS: + TYPED_ARRAYS(TYPED_ARRAY_CASE) + RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) +#undef TYPED_ARRAY_CASE + // These are never used by v8::Array instances. + UNREACHABLE(); } } +} // namespace internal + +Maybe v8::Array::Iterate(Local context, + v8::Array::IterationCallback callback, + void* callback_data) { + i::Handle array = Utils::OpenHandle(this); + i::Isolate* isolate = array->GetIsolate(); + i::FastIterateResult fast_result = + i::FastIterateArray(array, isolate, callback, callback_data); + if (fast_result == i::FastIterateResult::kException) return Nothing(); + // Early breaks and completed iteration both return successfully. + if (fast_result != i::FastIterateResult::kSlowPath) return JustVoid(); + + // Slow path: retrieving elements could have side effects. + ENTER_V8(isolate, context, Array, Iterate, Nothing(), i::HandleScope); + for (uint32_t i = 0; i < i::GetLength(*array); ++i) { + i::Handle element; + has_pending_exception = + !i::JSReceiver::GetElement(isolate, array, i).ToHandle(&element); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(void); + using Result = v8::Array::CallbackResult; + Result result = callback(i, Utils::ToLocal(element), callback_data); + if (result == Result::kException) return Nothing(); + if (result == Result::kBreak) return JustVoid(); + } + return JustVoid(); +} + +v8::TypecheckWitness::TypecheckWitness(Isolate* isolate) + // We need to reserve a handle that we can patch later. + // TODO(13270): When we switch to CSS, we can use a direct pointer + // instead of a handle. + : cached_map_(v8::Number::New(isolate, 1)) {} + +void v8::TypecheckWitness::Update(Local baseline) { + i::Tagged obj = *Utils::OpenHandle(*baseline); + i::Tagged map = i::Smi::zero(); + if (!IsSmi(obj)) map = i::HeapObject::cast(obj)->map(); + // Design overview: in the {TypecheckWitness} constructor, we create + // a single handle for the witness value. Whenever {Update} is called, we + // make this handle point at the fresh baseline/witness; the intention is + // to allow having short-lived HandleScopes (e.g. in {FastIterateArray} + // above) while a {TypecheckWitness} is alive: it therefore cannot hold + // on to one of the short-lived handles. + // Calling {OpenHandle} on the {cached_map_} only serves to "reinterpret_cast" + // it to an {i::Handle} on which we can call {PatchValue}. + // TODO(13270): When we switch to CSS, this can become simpler: we can + // then simply overwrite the direct pointer. + i::Handle cache = Utils::OpenHandle(*cached_map_); + cache.PatchValue(map); +} + Local v8::Map::New(Isolate* v8_isolate) { i::Isolate* i_isolate = reinterpret_cast(v8_isolate); API_RCS_SCOPE(i_isolate, Map, New); @@ -8113,11 +8323,12 @@ i::Handle MapAsArray(i::Isolate* i_isolate, int result_index = 0; { i::DisallowGarbageCollection no_gc; - i::Tagged the_hole = i::ReadOnlyRoots(i_isolate).the_hole_value(); + i::Tagged hash_table_hole = + i::ReadOnlyRoots(i_isolate).hash_table_hole_value(); for (int i = offset; i < capacity; ++i) { i::InternalIndex entry(i); i::Tagged key = table->KeyAt(entry); - if (key == the_hole) continue; + if (key == hash_table_hole) continue; if (collect_keys) result->set(result_index++, key); if (collect_values) result->set(result_index++, table->ValueAt(entry)); } @@ -8218,11 +8429,12 @@ i::Handle SetAsArray(i::Isolate* i_isolate, int result_index = 0; { i::DisallowGarbageCollection no_gc; - i::Tagged the_hole = i::ReadOnlyRoots(i_isolate).the_hole_value(); + i::Tagged hash_table_hole = + i::ReadOnlyRoots(i_isolate).hash_table_hole_value(); for (int i = offset; i < capacity; ++i) { i::InternalIndex entry(i); i::Tagged key = table->KeyAt(entry); - if (key == the_hole) continue; + if (key == hash_table_hole) continue; result->set(result_index++, key); if (collect_key_values) result->set(result_index++, key); } @@ -8776,9 +8988,9 @@ size_t v8::TypedArray::Length() { return obj->WasDetached() ? 0 : obj->GetLength(); } -static_assert( - v8::TypedArray::kMaxLength == i::JSTypedArray::kMaxLength, - "v8::TypedArray::kMaxLength must match i::JSTypedArray::kMaxLength"); +static_assert(v8::TypedArray::kMaxByteLength == i::JSTypedArray::kMaxByteLength, + "v8::TypedArray::kMaxByteLength must match " + "i::JSTypedArray::kMaxByteLength"); #define TYPED_ARRAY_NEW(Type, type, TYPE, ctype) \ Local Type##Array::New(Local array_buffer, \ @@ -10234,13 +10446,13 @@ void v8::Isolate::LocaleConfigurationChangeNotification() { #endif // V8_INTL_SUPPORT } -#if defined(V8_OS_WIN) +#if defined(V8_OS_WIN) && defined(V8_ENABLE_ETW_STACK_WALKING) void Isolate::SetFilterETWSessionByURLCallback( FilterETWSessionByURLCallback callback) { i::Isolate* i_isolate = reinterpret_cast(this); i_isolate->SetFilterETWSessionByURLCallback(callback); } -#endif // V8_OS_WIN +#endif // V8_OS_WIN && V8_ENABLE_ETW_STACK_WALKING bool v8::Object::IsCodeLike(v8::Isolate* v8_isolate) const { i::Isolate* i_isolate = reinterpret_cast(v8_isolate); @@ -10354,20 +10566,25 @@ String::Value::Value(v8::Isolate* v8_isolate, v8::Local obj) String::Value::~Value() { i::DeleteArray(str_); } -#define DEFINE_ERROR(NAME, name) \ - Local Exception::NAME(v8::Local raw_message) { \ - i::Isolate* i_isolate = i::Isolate::Current(); \ - API_RCS_SCOPE(i_isolate, NAME, New); \ - ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); \ - i::Object error; \ - { \ - i::HandleScope scope(i_isolate); \ - i::Handle message = Utils::OpenHandle(*raw_message); \ - i::Handle constructor = i_isolate->name##_function(); \ - error = *i_isolate->factory()->NewError(constructor, message); \ - } \ - i::Handle result(error, i_isolate); \ - return Utils::ToLocal(result); \ +#define DEFINE_ERROR(NAME, name) \ + Local Exception::NAME(v8::Local raw_message, \ + v8::Local raw_options) { \ + i::Isolate* i_isolate = i::Isolate::Current(); \ + API_RCS_SCOPE(i_isolate, NAME, New); \ + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); \ + i::Tagged error; \ + { \ + i::HandleScope scope(i_isolate); \ + i::Handle options; \ + if (!raw_options.IsEmpty()) { \ + options = Utils::OpenHandle(*raw_options); \ + } \ + i::Handle message = Utils::OpenHandle(*raw_message); \ + i::Handle constructor = i_isolate->name##_function(); \ + error = *i_isolate->factory()->NewError(constructor, message, options); \ + } \ + i::Handle result(error, i_isolate); \ + return Utils::ToLocal(result); \ } DEFINE_ERROR(RangeError, range_error) @@ -11285,8 +11502,9 @@ void InvokeAccessorGetterCallback( { Address arg = i_isolate->isolate_data()->api_callback_thunk_argument(); // Currently we don't call InterceptorInfo callbacks via CallApiGetter. - DCHECK(IsAccessorInfo(Object(arg))); - Tagged accessor_info = AccessorInfo::cast(Object(arg)); + DCHECK(IsAccessorInfo(Tagged(arg))); + Tagged accessor_info = + AccessorInfo::cast(Tagged(arg)); getter = reinterpret_cast( accessor_info->getter(i_isolate)); diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index 8b67c3e045daec..002c8293efce15 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -408,11 +408,11 @@ class HandleScopeImplementer { // `is_microtask_context_[i]` is 1. // TODO(tzik): Remove |is_microtask_context_| after the deprecated // v8::Isolate::GetEnteredContext() is removed. - DetachableVector entered_contexts_; + DetachableVector> entered_contexts_; DetachableVector is_microtask_context_; // Used as a stack to keep track of saved contexts. - DetachableVector saved_contexts_; + DetachableVector> saved_contexts_; Address* spare_; Address* last_handle_before_deferred_block_; // This is only used for threading support. diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index c02e218a8ade9b..edd8888ce8d0e5 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -72,7 +72,8 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle stdlib, base::StaticCharVector(#fname))); \ Handle value = StdlibMathMember(isolate, stdlib, name); \ if (!IsJSFunction(*value)) return false; \ - SharedFunctionInfo shared = Handle::cast(value)->shared(); \ + Tagged shared = \ + Handle::cast(value)->shared(); \ if (!shared->HasBuiltinId() || \ shared->builtin_id() != Builtin::kMath##FName) { \ return false; \ diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 003bd0f27368a2..672440a2b4b9ad 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -204,7 +204,7 @@ ClassScope::ClassScope(IsolateT* isolate, Zone* zone, // If the class variable is context-allocated and its index is // saved for deserialization, deserialize it. if (scope_info->HasSavedClassVariable()) { - String name; + Tagged name; int index; std::tie(name, index) = scope_info->SavedClassVariable(); DCHECK_EQ(scope_info->ContextLocalMode(index), VariableMode::kConst); @@ -475,7 +475,7 @@ Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone, DCHECK_EQ(scope_info->ContextLocalMode(0), VariableMode::kVar); DCHECK_EQ(scope_info->ContextLocalInitFlag(0), kCreatedInitialized); DCHECK(scope_info->HasInlinedLocalNames()); - String name = scope_info->ContextInlinedLocalName(0); + Tagged name = scope_info->ContextInlinedLocalName(0); MaybeAssignedFlag maybe_assigned = scope_info->ContextLocalMaybeAssignedFlag(0); outer_scope = @@ -976,8 +976,8 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { DCHECK_NULL(cache->variables_.Lookup(name)); DisallowGarbageCollection no_gc; - String name_handle = *name->string(); - ScopeInfo scope_info = *scope_info_; + Tagged name_handle = *name->string(); + Tagged scope_info = *scope_info_; // The Scope is backed up by ScopeInfo. This means it cannot operate in a // heap-independent mode, and all strings must be internalized immediately. So // it's ok to get the Handle here. diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc index e63896bca53338..9a21c10cb14914 100644 --- a/deps/v8/src/base/cpu.cc +++ b/deps/v8/src/base/cpu.cc @@ -14,7 +14,7 @@ #if V8_OS_LINUX #include // AT_HWCAP #endif -#if V8_GLIBC_PREREQ(2, 16) +#if V8_GLIBC_PREREQ(2, 16) || V8_OS_ANDROID #include // getauxval() #endif #if V8_OS_QNX @@ -163,17 +163,27 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { #define HWCAP_SB (1 << 29) #define HWCAP_PACA (1 << 30) #define HWCAP_PACG (1UL << 31) - +// See kernel header. +/* + * HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2 + */ +#define HWCAP2_MTE (1 << 18) #endif // V8_HOST_ARCH_ARM64 #if V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 -static uint32_t ReadELFHWCaps() { - uint32_t result = 0; -#if V8_GLIBC_PREREQ(2, 16) - result = static_cast(getauxval(AT_HWCAP)); +static std::tuple ReadELFHWCaps() { + uint32_t hwcap = 0; + uint32_t hwcap2 = 0; +#if defined(AT_HWCAP) + hwcap = static_cast(getauxval(AT_HWCAP)); +#if defined(AT_HWCAP2) + hwcap2 = static_cast(getauxval(AT_HWCAP2)); +#endif // AT_HWCAP2 #else // Read the ELF HWCAP flags by parsing /proc/self/auxv. + // If getauxval is not available, the kernel/libc is also not new enough to + // expose hwcap2. FILE* fp = base::Fopen("/proc/self/auxv", "r"); if (fp != nullptr) { struct { @@ -193,7 +203,7 @@ static uint32_t ReadELFHWCaps() { base::Fclose(fp); } #endif - return result; + return std::make_tuple(hwcap, hwcap2); } #endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 @@ -406,6 +416,7 @@ CPU::CPU() has_jscvt_(false), has_dot_prod_(false), has_lse_(false), + has_mte_(false), is_fp64_mode_(false), has_non_stop_time_stamp_counter_(false), is_running_in_vm_(false), @@ -628,7 +639,8 @@ CPU::CPU() } // Try to extract the list of CPU features from ELF hwcaps. - uint32_t hwcaps = ReadELFHWCaps(); + uint32_t hwcaps, hwcaps2; + std::tie(hwcaps, hwcaps2) = ReadELFHWCaps(); if (hwcaps != 0) { has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0; has_neon_ = (hwcaps & HWCAP_NEON) != 0; @@ -740,7 +752,9 @@ CPU::CPU() #elif V8_OS_LINUX // Try to extract the list of CPU features from ELF hwcaps. - uint32_t hwcaps = ReadELFHWCaps(); + uint32_t hwcaps, hwcaps2; + std::tie(hwcaps, hwcaps2) = ReadELFHWCaps(); + has_mte_ = (hwcaps2 & HWCAP2_MTE) != 0; if (hwcaps != 0) { has_jscvt_ = (hwcaps & HWCAP_JSCVT) != 0; has_dot_prod_ = (hwcaps & HWCAP_ASIMDDP) != 0; diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h index a1537faa756a32..94c21d0abd2d7b 100644 --- a/deps/v8/src/base/cpu.h +++ b/deps/v8/src/base/cpu.h @@ -125,6 +125,7 @@ class V8_BASE_EXPORT CPU final { bool has_jscvt() const { return has_jscvt_; } bool has_dot_prod() const { return has_dot_prod_; } bool has_lse() const { return has_lse_; } + bool has_mte() const { return has_mte_; } // mips features bool is_fp64_mode() const { return is_fp64_mode_; } @@ -186,6 +187,7 @@ class V8_BASE_EXPORT CPU final { bool has_jscvt_; bool has_dot_prod_; bool has_lse_; + bool has_mte_; bool is_fp64_mode_; bool has_non_stop_time_stamp_counter_; bool is_running_in_vm_; diff --git a/deps/v8/src/base/iterator.h b/deps/v8/src/base/iterator.h index b10240649b79c3..88647241b15d78 100644 --- a/deps/v8/src/base/iterator.h +++ b/deps/v8/src/base/iterator.h @@ -6,6 +6,8 @@ #define V8_BASE_ITERATOR_H_ #include +#include +#include namespace v8 { namespace base { @@ -68,7 +70,7 @@ struct DerefPtrIterator : base::iterator { explicit DerefPtrIterator(T* const* ptr) : ptr(ptr) {} - T& operator*() { return **ptr; } + T& operator*() const { return **ptr; } DerefPtrIterator& operator++() { ++ptr; return *this; @@ -77,7 +79,12 @@ struct DerefPtrIterator : base::iterator { --ptr; return *this; } - bool operator!=(DerefPtrIterator other) { return ptr != other.ptr; } + bool operator!=(const DerefPtrIterator& other) const { + return ptr != other.ptr; + } + bool operator==(const DerefPtrIterator& other) const { + return ptr == other.ptr; + } }; // {Reversed} returns a container adapter usable in a range-based "for" @@ -130,6 +137,63 @@ auto IterateWithoutLast(const iterator_range& t) { return IterateWithoutLast(range_copy); } +// TupleIterator is an iterator wrapping around multiple iterators. It is use by +// the `zip` function below to iterate over multiple containers at once. +template +class TupleIterator + : public base::iterator< + std::bidirectional_iterator_tag, + std::tuple::reference...>> { + public: + using value_type = + std::tuple::reference...>; + + explicit TupleIterator(Iterators... its) : its_(its...) {} + + TupleIterator& operator++() { + std::apply([](auto&... iterators) { (++iterators, ...); }, its_); + return *this; + } + + template + bool operator!=(const Other& other) const { + return not_equal_impl(other, std::index_sequence_for{}); + } + + value_type operator*() const { + return std::apply( + [](auto&... this_iterators) { return value_type{*this_iterators...}; }, + its_); + } + + private: + template + bool not_equal_impl(const Other& other, + std::index_sequence) const { + return (... || (std::get(its_) != std::get(other.its_))); + } + + std::tuple its_; +}; + +// `zip` creates an iterator_range from multiple containers. It can be used to +// iterate over multiple containers at once. For instance: +// +// std::vector arr = { 2, 4, 6 }; +// std::set set = { 3.5, 4.5, 5.5 }; +// for (auto [i, d] : base::zip(arr, set)) { +// std::cout << i << " and " << d << std::endl; +// } +// +// Prints "2 and 3.5", "4 and 4.5" and "6 and 5.5". +template +auto zip(Containers&... containers) { + using TupleIt = + TupleIterator().begin())...>; + return base::make_iterator_range(TupleIt(containers.begin()...), + TupleIt(containers.end()...)); +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/memory-protection-key.h b/deps/v8/src/base/platform/memory-protection-key.h index 24c81ffa7ed553..2e3cfa29489a2a 100644 --- a/deps/v8/src/base/platform/memory-protection-key.h +++ b/deps/v8/src/base/platform/memory-protection-key.h @@ -32,6 +32,9 @@ class V8_BASE_EXPORT MemoryProtectionKey { // mprotect(). static constexpr int kNoMemoryProtectionKey = -1; + // The default ProtectionKey can be used to remove pkey assignments. + static constexpr int kDefaultProtectionKey = 0; + // Permissions for memory protection keys on top of the page's permissions. // NOTE: Since there is no executable bit, the executable permission cannot be // withdrawn by memory protection keys. diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index 73cdbdb19df2ae..421070d5d08115 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -370,9 +370,9 @@ void* OS::GetRandomMmapAddr() { // this address for RISC-V. https://github.com/v8-riscv/v8/issues/375 raw_addr &= 0x3FFFF000; #elif V8_TARGET_ARCH_LOONG64 - // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance - // to fulfill request. - raw_addr &= uint64_t{0xFFFFFF0000}; + // 40 or 47 bits of virtual addressing. Truncate to 38 bits to allow kernel + // chance to fulfill request. + raw_addr &= uint64_t{0x3FFFFF0000}; #else raw_addr &= 0x3FFFF000; @@ -849,6 +849,25 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { } #endif +int OS::GetPeakMemoryUsageKb() { +#if defined(V8_OS_FUCHSIA) + // Fuchsia does not implement getrusage() + return -1; +#else + struct rusage usage; + if (getrusage(RUSAGE_SELF, &usage) < 0) return -1; + +#if defined(V8_OS_MACOS) || defined(V8_OS_IOS) + constexpr int KB = 1024; + // MacOS and iOS ru_maxrss count bytes + return static_cast(usage.ru_maxrss / KB); +#else + // Most other cases (at least Linux, IOS, return kilobytes) + return static_cast(usage.ru_maxrss); +#endif // defined(V8_OS_MACOS) || defined(V8_OS_IOS) +#endif // defined(V8_OS_FUCHSIA) +} + double OS::TimeCurrentMillis() { return Time::Now().ToJsTime(); } diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index 152fbbf3c26eab..c65953b0cf698e 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -22,6 +22,7 @@ #include // For SymLoadModule64 and al. #include // For _msize() #include // For timeGetTime(). +#include // For GetProcessmMemoryInfo(). #include // For Module32First and al. #include @@ -487,6 +488,18 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { return 0; } +int OS::GetPeakMemoryUsageKb() { + constexpr int KB = 1024; + + PROCESS_MEMORY_COUNTERS mem_counters; + int ret; + + ret = GetProcessMemoryInfo(GetCurrentProcess(), &mem_counters, + sizeof(mem_counters)); + if (ret == 0) return -1; + + return static_cast(mem_counters.PeakWorkingSetSize / KB); +} // Returns current time as the number of milliseconds since // 00:00:00 UTC, January 1, 1970. diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 4ced4fdeddc164..0ea081b5ff5c85 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -162,6 +162,9 @@ class V8_BASE_EXPORT OS { // micro-second resolution. static int GetUserTime(uint32_t* secs, uint32_t* usecs); + // Obtain the peak memory usage in kilobytes + static int GetPeakMemoryUsageKb(); + // Returns current time as the number of milliseconds since // 00:00:00 UTC, January 1, 1970. static double TimeCurrentMillis(); diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h index 4470b875e20ad9..ff56e0b581493f 100644 --- a/deps/v8/src/base/small-vector.h +++ b/deps/v8/src/base/small-vector.h @@ -27,6 +27,7 @@ class SmallVector { public: static constexpr size_t kInlineSize = kSize; + using value_type = T; SmallVector() = default; explicit SmallVector(const Allocator& allocator) : allocator_(allocator) {} @@ -197,9 +198,17 @@ class SmallVector { end_ = begin_ + new_size; } - void reserve_no_init(size_t new_capacity) { - // Resizing without initialization is safe if T is trivially copyable. - ASSERT_TRIVIALLY_COPYABLE(T); + void resize_and_init(size_t new_size) { + static_assert(std::is_trivially_destructible_v); + if (new_size > capacity()) Grow(new_size); + T* new_end = begin_ + new_size; + if (new_end > end_) { + std::uninitialized_fill(end_, new_end, T{}); + } + end_ = new_end; + } + + void reserve(size_t new_capacity) { if (new_capacity > capacity()) Grow(new_capacity); } diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h index e5fb71dd20ca91..68a0e2741db40e 100644 --- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h +++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h @@ -63,6 +63,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ bind(label); } @@ -400,9 +403,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ ldr(interrupt_budget, @@ -423,9 +424,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ ldr(interrupt_budget, diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h index 77d29bb8981451..1268374650b6b5 100644 --- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h +++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h @@ -62,6 +62,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ Bind(label); } @@ -458,9 +461,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch().W(); __ Ldr(interrupt_budget, @@ -481,9 +482,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch().W(); __ Ldr(interrupt_budget, diff --git a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h index 0bdb3ad47f3681..18800e41112df8 100644 --- a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h +++ b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h @@ -49,32 +49,25 @@ void BaselineCompiler::PrologueFillFrame() { const int kLoopUnrollSize = 8; const int new_target_index = new_target_or_generator_register.index(); const bool has_new_target = new_target_index != kMaxInt; - // BaselineOutOfLinePrologue already pushed one undefined. - register_count -= 1; if (has_new_target) { - if (new_target_index == 0) { - // Oops, need to fix up that undefined that BaselineOutOfLinePrologue - // pushed. - __ masm()->Poke(kJavaScriptCallNewTargetRegister, Operand(0)); - } else { DCHECK_LE(new_target_index, register_count); - int index = 1; - for (; index + 2 <= new_target_index; index += 2) { + int before_new_target_count = 0; + for (; before_new_target_count + 2 <= new_target_index; + before_new_target_count += 2) { __ masm()->Push(kInterpreterAccumulatorRegister, kInterpreterAccumulatorRegister); } - if (index == new_target_index) { + if (before_new_target_count == new_target_index) { __ masm()->Push(kJavaScriptCallNewTargetRegister, kInterpreterAccumulatorRegister); } else { - DCHECK_EQ(index, new_target_index - 1); + DCHECK_EQ(before_new_target_count + 1, new_target_index); __ masm()->Push(kInterpreterAccumulatorRegister, kJavaScriptCallNewTargetRegister); } - // We pushed "index" registers, minus the one the prologue pushed, plus - // the two registers that included new_target. - register_count -= (index - 1 + 2); - } + // We pushed before_new_target_count registers, plus the two registers + // that included new_target. + register_count -= (before_new_target_count + 2); } if (register_count < 2 * kLoopUnrollSize) { // If the frame is small enough, just unroll the frame fill completely. diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h index 9fba6a927bd18a..34250e0e67e27f 100644 --- a/deps/v8/src/baseline/baseline-assembler-inl.h +++ b/deps/v8/src/baseline/baseline-assembler-inl.h @@ -139,6 +139,13 @@ void BaselineAssembler::StoreRegister(interpreter::Register output, Move(output, value); } +void BaselineAssembler::LoadFeedbackCell(Register output) { + Move(output, FeedbackCellOperand()); + ScratchRegisterScope scratch_scope(this); + Register scratch = scratch_scope.AcquireScratch(); + __ AssertFeedbackCell(output, scratch); +} + template void BaselineAssembler::DecodeField(Register reg) { __ DecodeField(reg); diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h index 41f3c22df398fa..1437e68ef826ff 100644 --- a/deps/v8/src/baseline/baseline-assembler.h +++ b/deps/v8/src/baseline/baseline-assembler.h @@ -30,6 +30,7 @@ class BaselineAssembler { inline MemOperand ContextOperand(); inline MemOperand FunctionOperand(); inline MemOperand FeedbackVectorOperand(); + inline MemOperand FeedbackCellOperand(); inline void GetCode(LocalIsolate* isolate, CodeDesc* desc); inline int pc_offset() const; @@ -232,6 +233,9 @@ class BaselineAssembler { inline void LoadContext(Register output); inline void StoreContext(Register context); + inline void LoadFeedbackCell(Register output); + inline void AssertFeedbackCell(Register object); + inline static void EmitReturn(MacroAssembler* masm); MacroAssembler* masm() { return masm_; } diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 1252af976e0160..978c092826eb03 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -78,7 +78,7 @@ namespace detail { #ifdef DEBUG bool Clobbers(Register target, Register reg) { return target == reg; } bool Clobbers(Register target, Handle handle) { return false; } -bool Clobbers(Register target, Smi smi) { return false; } +bool Clobbers(Register target, Tagged smi) { return false; } bool Clobbers(Register target, Tagged index) { return false; } bool Clobbers(Register target, int32_t imm) { return false; } bool Clobbers(Register target, RootIndex index) { return false; } @@ -92,7 +92,7 @@ bool MachineTypeMatches(MachineType type, MemOperand reg) { return true; } bool MachineTypeMatches(MachineType type, Handle handle) { return type.IsTagged() && !type.IsTaggedSigned(); } -bool MachineTypeMatches(MachineType type, Smi handle) { +bool MachineTypeMatches(MachineType type, Tagged handle) { return type.IsTagged() && !type.IsTaggedPointer(); } bool MachineTypeMatches(MachineType type, Tagged handle) { @@ -712,7 +712,7 @@ void BaselineCompiler::VisitLdaZero() { } void BaselineCompiler::VisitLdaSmi() { - Smi constant = Smi::FromInt(iterator().GetImmediateOperand(0)); + Tagged constant = Smi::FromInt(iterator().GetImmediateOperand(0)); __ Move(kInterpreterAccumulatorRegister, constant); } diff --git a/deps/v8/src/baseline/bytecode-offset-iterator.h b/deps/v8/src/baseline/bytecode-offset-iterator.h index 6f30cd9a72d80f..06d02207ebc4b7 100644 --- a/deps/v8/src/baseline/bytecode-offset-iterator.h +++ b/deps/v8/src/baseline/bytecode-offset-iterator.h @@ -84,7 +84,7 @@ class V8_EXPORT_PRIVATE BytecodeOffsetIterator { Address current_pc_start_offset_; Address current_pc_end_offset_; int current_bytecode_offset_; - BytecodeArray bytecode_handle_storage_; + Tagged bytecode_handle_storage_; interpreter::BytecodeArrayIterator bytecode_iterator_; LocalHeap* local_heap_; base::Optional no_gc_; diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h index d474366ae5ac57..a5606388f8d400 100644 --- a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h +++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h @@ -68,6 +68,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(ebp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(ebp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ bind(label); } @@ -384,9 +387,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); __ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), Immediate(weight)); if (skip_interrupt_label) { @@ -401,9 +402,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); DCHECK(!AreAliased(feedback_cell, weight)); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); __ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), weight); if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label); diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h index 1dd4a7ef0df106..2d1bb082dae463 100644 --- a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h +++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h @@ -61,6 +61,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ bind(label); } @@ -389,9 +392,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Ld_w(interrupt_budget, @@ -409,9 +410,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Ld_w(interrupt_budget, diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h index 47b8fabaa1dbea..a6e091ee7c0cd4 100644 --- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h +++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h @@ -61,6 +61,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ bind(label); } @@ -387,9 +390,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Lw(interrupt_budget, @@ -407,9 +408,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Lw(interrupt_budget, diff --git a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h index 639ff8ae1ff0d5..4985a10149cb08 100644 --- a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h +++ b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h @@ -88,6 +88,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ bind(label); } @@ -471,9 +474,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( @@ -496,9 +497,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( diff --git a/deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h b/deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h index 7f65c7858052de..6477de7ce3b0f9 100644 --- a/deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h +++ b/deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h @@ -60,6 +60,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ bind(label); } @@ -395,9 +398,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Lw(interrupt_budget, @@ -417,9 +418,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Lw(interrupt_budget, diff --git a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h index bd157932db5486..b05d6fb594dd40 100644 --- a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h +++ b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h @@ -87,6 +87,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ bind(label); } @@ -488,9 +491,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( @@ -513,9 +514,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadFeedbackCell(feedback_cell); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h index c80c077b83b754..554861e1d16440 100644 --- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h +++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h @@ -70,6 +70,9 @@ void BaselineAssembler::RegisterFrameAddress( MemOperand BaselineAssembler::FeedbackVectorOperand() { return MemOperand(rbp, BaselineFrameConstants::kFeedbackVectorFromFp); } +MemOperand BaselineAssembler::FeedbackCellOperand() { + return MemOperand(rbp, BaselineFrameConstants::kFeedbackCellFromFp); +} void BaselineAssembler::Bind(Label* label) { __ bind(label); } @@ -398,11 +401,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - // Decompresses pointer by complex addressing mode when necessary. - TaggedRegister tagged(feedback_cell); - LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset); - __ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), + LoadFeedbackCell(feedback_cell); + __ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), Immediate(weight)); if (skip_interrupt_label) { DCHECK_LT(weight, 0); @@ -415,11 +415,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ASM_CODE_COMMENT(masm_); ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - // Decompresses pointer by complex addressing mode when necessary. - TaggedRegister tagged(feedback_cell); - LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset); - __ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight); + LoadFeedbackCell(feedback_cell); + __ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), + weight); if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label); } diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 9a465695ca8ed7..7209b889cd26b1 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -933,18 +933,19 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { UseScratchRegisterScope temps(masm); // Need a few extra registers - temps.Include(r8, r9); + temps.Include({r4, r8, r9}); auto descriptor = Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); - // Load the feedback vector from the closure. + // Load the feedback cell and vector from the closure. + Register feedback_cell = temps.Acquire(); Register feedback_vector = temps.Acquire(); - __ ldr(feedback_vector, + __ ldr(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ ldr(feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); __ AssertFeedbackVector(feedback_vector); // Check the tiering state. @@ -1004,9 +1005,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { Register bytecodeArray = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); __ Push(argc, bytecodeArray); - - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. if (v8_flags.debug_code) { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); @@ -1014,6 +1012,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { FEEDBACK_VECTOR_TYPE); __ Assert(eq, AbortReason::kExpectedFeedbackVector); } + __ Push(feedback_cell); __ Push(feedback_vector); } @@ -1075,9 +1074,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // We're here because we got deopted during BaselineOutOfLinePrologue's stack // check. Undo all its frame creation and call into the interpreter instead. - // Drop bytecode offset (was the feedback vector but got replaced during - // deopt) and bytecode array. - __ Drop(2); + // Drop the feedback vector, the bytecode offset (was the feedback vector but + // got replaced during deopt) and bytecode array. + __ Drop(3); // Context, closure, argc. __ Pop(kContextRegister, kJavaScriptCallTargetRegister, @@ -1127,35 +1126,20 @@ void Builtins::Generate_InterpreterEntryTrampoline( BYTECODE_ARRAY_TYPE); __ b(ne, &compile_lazy); -#ifndef V8_JITLESS - // Load the feedback vector from the closure. - Register feedback_vector = r2; - __ ldr(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ ldr(feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ ldr(r4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ ldrh(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE)); - __ b(ne, &push_stack_frame); + Register feedback_vector = r2; + __ LoadFeedbackVector(feedback_vector, closure, r4, &push_stack_frame); +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. Register flags = r4; Label flags_need_processing; __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); - { - UseScratchRegisterScope temps(masm); - ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire()); - } - - Label not_optimized; - __ bind(¬_optimized); + ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r4); // Increment invocation count for the function. __ ldr(r9, FieldMemOperand(feedback_vector, @@ -1167,13 +1151,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). - __ bind(&push_stack_frame); #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and // - kInvocationCountOffset: only used for tiering heuristics and code // coverage. #endif // !V8_JITLESS + + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1183,7 +1168,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Push bytecode array and Smi tagged bytecode array offset. __ SmiTag(r4, kInterpreterBytecodeOffsetRegister); - __ Push(kInterpreterBytecodeArrayRegister, r4); + __ Push(kInterpreterBytecodeArrayRegister, r4, feedback_vector); // Allocate the local and temporary register file on the stack. Label stack_overflow; @@ -3517,9 +3502,9 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // from the API function here. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCA::kLengthOffset + kSlotsToDropOnStackSize); - __ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ + - exit_frame_params_count) * - kPointerSize)); + __ mov(scratch, + Operand((FCA::kArgsLengthWithReceiver + exit_frame_params_count) * + kPointerSize)); __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2)); __ str(scratch, stack_space_operand); @@ -3540,9 +3525,10 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, const bool with_profiling = mode != CallApiCallbackMode::kOptimizedNoProfiling; + Label* no_done = nullptr; CallApiFunctionAndReturn(masm, with_profiling, api_function_address, thunk_ref, thunk_arg, kUseStackSpaceOperand, - &stack_space_operand, return_value_operand); + &stack_space_operand, return_value_operand, no_done); } void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { @@ -3638,9 +3624,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { MemOperand* const kUseStackSpaceConstant = nullptr; const bool with_profiling = true; - CallApiFunctionAndReturn(masm, with_profiling, api_function_address, - thunk_ref, thunk_arg, kStackUnwindSpace, - kUseStackSpaceConstant, return_value_operand); + Label* no_done = nullptr; + CallApiFunctionAndReturn( + masm, with_profiling, api_function_address, thunk_ref, thunk_arg, + kStackUnwindSpace, kUseStackSpaceConstant, return_value_operand, no_done); } void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { @@ -3977,12 +3964,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, r3); } - // Load the feedback vector. - Register feedback_vector = r2; - __ ldr(feedback_vector, + // Load the feedback cell and vector. + Register feedback_cell = r2; + Register feedback_vector = r9; + __ ldr(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ ldr(feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -3994,9 +3982,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ ldr(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister); - // Replace BytecodeOffset with the feedback vector. + // Replace bytecode offset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ str(feedback_cell, + MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp)); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); __ str(feedback_vector, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp)); feedback_vector = no_reg; // Compute baseline pc for bytecode offset. diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 3d7f7b284cfb8f..5b6609f488cc36 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -1101,19 +1101,20 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { UseScratchRegisterScope temps(masm); // Need a few extra registers - temps.Include(x14, x15); + temps.Include(CPURegList(kXRegSizeInBits, {x14, x15, x22})); auto descriptor = Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); - // Load the feedback vector from the closure. + // Load the feedback cell and vector from the closure. + Register feedback_cell = temps.AcquireX(); Register feedback_vector = temps.AcquireX(); - __ LoadTaggedField(feedback_vector, + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField( feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); __ AssertFeedbackVector(feedback_vector, x4); // Check the tiering state. @@ -1165,16 +1166,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - __ Push(argc, bytecode_array); - - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. + __ Push(argc, bytecode_array, feedback_cell, feedback_vector); __ AssertFeedbackVector(feedback_vector, x4); - // Our stack is currently aligned. We have have to push something along with - // the feedback vector to keep it that way -- we may as well start - // initialising the register frame. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - __ Push(feedback_vector, kInterpreterAccumulatorRegister); } Label call_stack_guard; @@ -1198,11 +1191,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { } // Do "fast" return to the caller pc in lr. - if (v8_flags.debug_code) { - // The accumulator should already be "undefined", we don't have to load it. - __ CompareRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - __ Assert(eq, AbortReason::kUnexpectedValue); - } + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ Ret(); __ bind(&flags_need_processing); @@ -1237,9 +1226,8 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // We're here because we got deopted during BaselineOutOfLinePrologue's stack // check. Undo all its frame creation and call into the interpreter instead. - // Drop the accumulator register (we already started building the register - // frame) and bytecode offset (was the feedback vector but got replaced - // during deopt). + // Drop the feedback vector and the bytecode offset (was the feedback vector + // but got replaced during deopt). __ Drop(2); // Bytecode array, argc, Closure, Context. @@ -1291,38 +1279,20 @@ void Builtins::Generate_InterpreterEntryTrampoline( BYTECODE_ARRAY_TYPE); __ B(ne, &compile_lazy); -#ifndef V8_JITLESS - // Load the feedback vector from the closure. - Register feedback_vector = x2; - __ LoadTaggedField(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedField( - feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedField(x7, - FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset)); - __ Cmp(x7, FEEDBACK_VECTOR_TYPE); - __ B(ne, &push_stack_frame); + Register feedback_vector = x2; + __ LoadFeedbackVector(feedback_vector, closure, x7, &push_stack_frame); - // Check the tiering state. +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. Label flags_need_processing; Register flags = w7; __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); - { - UseScratchRegisterScope temps(masm); - ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.AcquireW()); - } - - Label not_optimized; - __ bind(¬_optimized); + ResetFeedbackVectorOsrUrgency(masm, feedback_vector, w7); // Increment invocation count for the function. __ Ldr(w10, FieldMemOperand(feedback_vector, @@ -1334,13 +1304,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). - __ Bind(&push_stack_frame); #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and // - kInvocationCountOffset: only used for tiering heuristics and code // coverage. #endif // !V8_JITLESS + + __ Bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ Push(lr, fp); __ mov(fp, sp); @@ -1351,12 +1322,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); // Push actual argument count, bytecode array, Smi tagged bytecode array - // offset and an undefined (to properly align the stack pointer). - static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1); + // offset and the feedback vector. __ SmiTag(x6, kInterpreterBytecodeOffsetRegister); __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister); - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - __ Push(x6, kInterpreterAccumulatorRegister); + __ Push(x6, feedback_vector); // Allocate the local and temporary register file on the stack. Label stack_overflow; @@ -1380,9 +1349,11 @@ void Builtins::Generate_InterpreterEntryTrampoline( // register in the register file. Label loop_header; __ Lsr(x11, x11, kSystemPointerSizeLog2); - // Round down (since we already have an undefined in the stack) the number - // of registers to a multiple of 2, to align the stack to 16 bytes. + // Round up the number of registers to a multiple of 2, to align the stack + // to 16 bytes. + __ Add(x11, x11, 1); __ Bic(x11, x11, 1); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ PushMultipleTimes(kInterpreterAccumulatorRegister, x11); __ Bind(&loop_header); } @@ -2528,6 +2499,8 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc, __ Bind(&even); __ Cbz(slots_to_claim, &exit); __ Claim(slots_to_claim); + // An alignment slot may have been allocated above. If the number of stack + // parameters is 0, the we have to initialize the alignment slot. __ Cbz(slots_to_copy, &init); // Move the arguments already in the stack including the receiver. @@ -2548,21 +2521,11 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc, // call. { __ Bind(&init); - // Unconditionally initialize the last parameter slot. If `len` is odd, then - // it is an alignment slot that we have to initialize to avoid issues in the - // GC. If `len` is even, then the write is unnecessary, but faster than a - // check + jump. + // This code here is only reached when the number of stack parameters is 0. + // In that case we have to initialize the alignment slot if there is one. + __ Tbz(len, 0, &exit); __ Str(xzr, MemOperand(sp, len, LSL, kSystemPointerSizeLog2)); } - // Fill a possible alignment slot with a meaningful value. - { - Register total_num_args = x10; - __ Add(total_num_args, argc, len); - // If the sum is even, then there are no alignment slots that need - // initialization. - __ Tbz(total_num_args, 0, &exit); - __ Str(xzr, MemOperand(sp, total_num_args, LSL, kSystemPointerSizeLog2)); - } __ Bind(&exit); } @@ -4849,9 +4812,10 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, const bool with_profiling = mode != CallApiCallbackMode::kOptimizedNoProfiling; + Label* no_done = nullptr; CallApiFunctionAndReturn(masm, with_profiling, api_function_address, thunk_ref, thunk_arg, kUseStackSpaceOperand, - &stack_space_operand, return_value_operand); + &stack_space_operand, return_value_operand, no_done); } void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { @@ -4966,9 +4930,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { MemOperand* const kUseStackSpaceConstant = nullptr; const bool with_profiling = true; - CallApiFunctionAndReturn(masm, with_profiling, api_function_address, - thunk_ref, thunk_arg, kStackUnwindSpace, - kUseStackSpaceConstant, return_value_operand); + Label* no_done = nullptr; + CallApiFunctionAndReturn( + masm, with_profiling, api_function_address, thunk_ref, thunk_arg, + kStackUnwindSpace, kUseStackSpaceConstant, return_value_operand, no_done); } void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { @@ -5328,13 +5293,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, x3); } - // Load the feedback vector. - Register feedback_vector = x2; - __ LoadTaggedField(feedback_vector, + // Load the feedback cell and vector. + Register feedback_cell = x2; + Register feedback_vector = x15; + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField( feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -5345,9 +5311,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Save BytecodeOffset from the stack frame. __ SmiUntag(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - // Replace BytecodeOffset with the feedback vector. + // Replace bytecode offset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ Str(feedback_cell, + MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp)); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); __ Str(feedback_vector, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp)); feedback_vector = no_reg; // Compute baseline pc for bytecode offset. diff --git a/deps/v8/src/builtins/array-from-async.tq b/deps/v8/src/builtins/array-from-async.tq new file mode 100644 index 00000000000000..142e39eeea446f --- /dev/null +++ b/deps/v8/src/builtins/array-from-async.tq @@ -0,0 +1,646 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace array { + +extern enum ArrayFromAsyncLabels extends uint31 + constexpr 'ArrayBuiltins::ArrayFromAsyncLabels' { + kGetIteratorStep, + kCheckIteratorValueAndMapping, + kIteratorMapping, + kGetIteratorValueWithMapping, + kAddIteratorValueToTheArray, + kGetArrayLikeValue, + kCheckArrayLikeValueAndMapping, + kGetArrayLikeValueWithMapping, + kAddArrayLikeValueToTheArray, + kDoneAndResolvePromise, + kCloseAsyncIterator, + kRejectPromise +} + +struct ArrayFromAsyncResumeState { + step: ArrayFromAsyncLabels; + awaitedValue: JSAny; + len: Smi; + index: Smi; +} + +type ArrayFromAsyncResolveContext extends FunctionContext; +extern enum ArrayFromAsyncResolveContextSlots extends intptr + constexpr 'ArrayBuiltins::ArrayFromAsyncResolveContextSlots' { + kArrayFromAsyncResolveResumeStateStepSlot: + Slot, + kArrayFromAsyncResolveResumeStateAwaitedValueSlot: + Slot, + kArrayFromAsyncResolveResumeStateLenSlot: + Slot, + kArrayFromAsyncResolveResumeStateIndexSlot: + Slot, + kArrayFromAsyncResolvePromiseSlot: + Slot, + kArrayFromAsyncResolvePromiseFunctionSlot: + Slot, + kArrayFromAsyncResolveOnFulfilledFunctionSlot: + Slot, + kArrayFromAsyncResolveOnRejectedFunctionSlot: + Slot, + kArrayFromAsyncResolveResultArraySlot: + Slot, + kArrayFromAsyncResolveIteratorSlot: + Slot, + kArrayFromAsyncResolveNextMethodSlot: + Slot, + kArrayFromAsyncResolveErrorSlot: Slot, + kArrayFromAsyncResolveMapfnSlot: Slot, + kArrayFromAsyncResolveThisArgSlot: Slot, + kArrayFromAsyncResolveLength +} + +macro CreateArrayFromAsyncResolveContext( + implicit context: Context)(resumeState: ArrayFromAsyncResumeState, + promise: JSPromise, promiseFun: JSReceiver, map: Map, iterator: JSReceiver, + next: JSAny, arr: JSReceiver, error: JSAny, mapfn: JSAny, thisArg: JSAny, + nativeContext: NativeContext): ArrayFromAsyncResolveContext { + const resolveContext = %RawDownCast( + AllocateSyntheticFunctionContext( + nativeContext, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveLength)); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateStepSlot, + SmiTag(resumeState.step)); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateAwaitedValueSlot, + resumeState.awaitedValue); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateLenSlot, + resumeState.len); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateIndexSlot, + resumeState.index); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolvePromiseSlot, + promise); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolvePromiseFunctionSlot, + promiseFun); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveOnFulfilledFunctionSlot, + promise::AllocateFunctionWithMapAndContext( + map, ArrayFromAsyncOnFulfilledSharedFunConstant(), resolveContext)); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveOnRejectedFunctionSlot, + promise::AllocateFunctionWithMapAndContext( + map, ArrayFromAsyncOnRejectedSharedFunConstant(), resolveContext)); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveResultArraySlot, + arr); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveIteratorSlot, + iterator); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveNextMethodSlot, + next); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveErrorSlot, + error); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveMapfnSlot, + mapfn); + InitContextSlot( + resolveContext, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveThisArgSlot, + thisArg); + return resolveContext; +} + +macro GetIteratorRecordFromArrayFromAsyncResolveContext( + context: ArrayFromAsyncResolveContext): iterator::IteratorRecord { + const iterator = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveIteratorSlot); + + const nextMethod = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveNextMethodSlot); + + return iterator::IteratorRecord{object: iterator, next: nextMethod}; +} + +transitioning macro CreateArrayFromIterableAsynchronously( + context: ArrayFromAsyncResolveContext): JSAny { + try { + const fastIteratorResultMap = GetIteratorResultMap(); + + const mapfn = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveMapfnSlot); + + const thisArg = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveThisArgSlot); + + const arr = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResultArraySlot); + + let resumeState = ArrayFromAsyncResumeState{ + step: SmiUntag( + %RawDownCast>(*ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateStepSlot))), + awaitedValue: *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateAwaitedValueSlot), + len: *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateLenSlot), + index: *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateIndexSlot) + }; + + let mappedValue: JSAny = Undefined; + let nextValue: JSAny = Undefined; + + // TODO(v8:14290): Replace `if/else` with `switch/case` when the support + // for `switch` is added. + + while (true) { + if (resumeState.step == ArrayFromAsyncLabels::kGetIteratorStep) { + const iteratorRecord = + GetIteratorRecordFromArrayFromAsyncResolveContext(context); + let next: JSAny; + // https://github.com/tc39/proposal-array-from-async/issues/33#issuecomment-1279296963 + // 3. Let nextResult be ? Call(iteratorRecord.[[NextMethod]], + // iteratorRecord.[[Iterator]]). + // 4. Set nextResult to ? Await(nextResult). + next = Call(context, iteratorRecord.next, iteratorRecord.object); + + return ArrayFromAsyncAwaitPoint( + ArrayFromAsyncLabels::kCheckIteratorValueAndMapping, next); + } else if ( + resumeState.step == + ArrayFromAsyncLabels::kCheckIteratorValueAndMapping) { + // 5. If nextResult is not an Object, throw a TypeError exception. + const nextJSReceiver = Cast(resumeState.awaitedValue) + otherwise ThrowTypeError( + MessageTemplate::kIteratorResultNotAnObject, 'Array.fromAsync'); + + try { + // 6. Let done be ? IteratorComplete(nextResult). + iterator::IteratorComplete(nextJSReceiver, fastIteratorResultMap) + otherwise Done; + + // 8. Let nextValue be ? IteratorValue(nextResult). + nextValue = + iterator::IteratorValue(nextJSReceiver, fastIteratorResultMap); + + // When mapfn is not undefined, it is guaranteed to be callable as + // checked upon entry. + const mapping: bool = (mapfn != Undefined); + + // 9. If mapping is true, then + if (mapping) { + resumeState.step = ArrayFromAsyncLabels::kIteratorMapping; + } else { + // 10. Else, let mappedValue be nextValue. + mappedValue = nextValue; + resumeState.step = + ArrayFromAsyncLabels::kAddIteratorValueToTheArray; + } + } label Done { + // 7. If done is true, + // a. Perform ? Set(A, "length", 𝔽(k), true). + array::SetPropertyLength(arr, resumeState.index); + // b. Return Completion Record { [[Type]]: return, [[Value]]: A, + // [[Target]]: empty }. + resumeState.step = ArrayFromAsyncLabels::kDoneAndResolvePromise; + } + } else if (resumeState.step == ArrayFromAsyncLabels::kIteratorMapping) { + // a. Let mappedValue be Call(mapfn, thisArg, « nextValue, 𝔽(k) + // »). + // b. IfAbruptCloseAsyncIterator(mappedValue, + // iteratorRecord). + const mapResult = Call( + context, UnsafeCast(mapfn), thisArg, nextValue, + resumeState.index); + + // c. Set mappedValue to Await(mappedValue). + // d. IfAbruptCloseAsyncIterator(mappedValue, iteratorRecord). + return ArrayFromAsyncAwaitPoint( + ArrayFromAsyncLabels::kGetIteratorValueWithMapping, mapResult); + } else if ( + resumeState.step == + ArrayFromAsyncLabels::kGetIteratorValueWithMapping) { + mappedValue = resumeState.awaitedValue; + resumeState.step = ArrayFromAsyncLabels::kAddIteratorValueToTheArray; + } else if ( + resumeState.step == + ArrayFromAsyncLabels::kAddIteratorValueToTheArray) { + // 11. Let defineStatus be CreateDataPropertyOrThrow(A, Pk, + // mappedValue). + // 12. If defineStatus is an abrupt completion, return ? + // AsyncIteratorClose(iteratorRecord, defineStatus). + FastCreateDataProperty(arr, resumeState.index, mappedValue); + + // 13. Set k to k + 1. + resumeState.index++; + + *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateIndexSlot) = resumeState.index; + + resumeState.step = ArrayFromAsyncLabels::kGetIteratorStep; + } else if (resumeState.step == ArrayFromAsyncLabels::kGetArrayLikeValue) { + // vii. Repeat, while k < len, + // 1. Let Pk be ! ToString(𝔽(k)). + // 2. Let kValue be ? Get(arrayLike, Pk). + + resumeState.step = ArrayFromAsyncLabels::kCheckArrayLikeValueAndMapping; + + *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateStepSlot) = + SmiTag(resumeState.step); + + resumeState.index++; + *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateIndexSlot) = resumeState.index; + + // item.then((result) => asyncFunction(result)); + return Undefined; + } else if ( + resumeState.step == + ArrayFromAsyncLabels::kCheckArrayLikeValueAndMapping) { + if (resumeState.index == resumeState.len) { + resumeState.step = ArrayFromAsyncLabels::kDoneAndResolvePromise; + } + + let mapping: bool; + // a. If mapfn is undefined, let mapping be false. + if (mapfn == Undefined) { + mapping = false; + } else { + // b. Else, + // i. If IsCallable(mapfn) is false, throw a TypeError exception. + if (!Is(mapfn)) deferred { + ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfn); + } + // ii. Let mapping be true. + mapping = true; + } + + // 4. If mapping is true, then + if (mapping) { + resumeState.step = + ArrayFromAsyncLabels::kGetArrayLikeValueWithMapping; + } else { + resumeState.step = ArrayFromAsyncLabels::kAddArrayLikeValueToTheArray; + } + } else if ( + resumeState.step == + ArrayFromAsyncLabels::kGetArrayLikeValueWithMapping) { + // a. Let mappedValue be ? Call(mapfn, thisArg, « kValue, 𝔽(k) + // »). b. Set mappedValue to ? Await(mappedValue). + const mapResult = Call( + context, UnsafeCast(mapfn), thisArg, + resumeState.awaitedValue, resumeState.index); + return ArrayFromAsyncAwaitPoint( + ArrayFromAsyncLabels::kAddArrayLikeValueToTheArray, mapResult); + } else if ( + resumeState.step == + ArrayFromAsyncLabels::kAddArrayLikeValueToTheArray) { + // 5. Else, let mappedValue be kValue. + // 6. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue). + mappedValue = resumeState.awaitedValue; + FastCreateDataProperty(arr, resumeState.index, mappedValue); + resumeState.step = ArrayFromAsyncLabels::kGetArrayLikeValue; + } else if ( + resumeState.step == ArrayFromAsyncLabels::kDoneAndResolvePromise) { + const promise = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolvePromiseSlot); + + promise::ResolvePromise(promise, arr); + return Undefined; + } else if ( + resumeState.step == ArrayFromAsyncLabels::kCloseAsyncIterator) { + resumeState.step = ArrayFromAsyncLabels::kRejectPromise; + + const iteratorRecord = + GetIteratorRecordFromArrayFromAsyncResolveContext(context); + try { + ArrayFromAsyncAsyncIteratorCloseOnException(iteratorRecord) + otherwise RejectPromise; + return Undefined; + } label RejectPromise { + // Do nothing so the codeflow continues to the kRejectPromise label. + } + } else if (resumeState.step == ArrayFromAsyncLabels::kRejectPromise) { + return RejectArrayFromAsyncPromise(); + } + } + } catch (e, _message) { + *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveErrorSlot) = e; + + const iteratorRecord = + GetIteratorRecordFromArrayFromAsyncResolveContext(context); + try { + ArrayFromAsyncAsyncIteratorCloseOnException(iteratorRecord) + otherwise RejectPromise; + } label RejectPromise { + return RejectArrayFromAsyncPromise(); + } + } + return Undefined; +} + +transitioning macro ArrayFromAsyncAwaitPoint( + implicit context: Context)(step: ArrayFromAsyncLabels, + value: JSAny): JSAny { + const context = %RawDownCast(context); + *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateStepSlot) = + SmiTag(step); + + const promiseFun = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolvePromiseFunctionSlot); + const resolve = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveOnFulfilledFunctionSlot); + const reject = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveOnRejectedFunctionSlot); + + // TODO(v8:13321): Add a fast path for values that are already + // built-in promises. + const resultPromise = promise::PromiseResolve(promiseFun, value); + + promise::PerformPromiseThenImpl( + UnsafeCast(resultPromise), resolve, reject, Undefined); + + return Undefined; +} + +// `ArrayFromAsyncFulfilled` is the callback function for the fulfilled case of +// the promise in `then` handler. +transitioning javascript builtin ArrayFromAsyncOnFulfilled( + js-implicit context: Context, receiver: JSAny, target: JSFunction)( + result: JSAny): JSAny { + const context = %RawDownCast(context); + *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateAwaitedValueSlot) = result; + + return CreateArrayFromIterableAsynchronously(context); +} + +// `ArrayFromAsyncRejected` is the callback function for the rejected case of +// the promise in `then` handler. +transitioning javascript builtin ArrayFromAsyncOnRejected( + js-implicit context: Context, receiver: JSAny, target: JSFunction)( + result: JSAny): JSAny { + const context = %RawDownCast(context); + + *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots:: + kArrayFromAsyncResolveResumeStateStepSlot) = + SmiTag(ArrayFromAsyncLabels::kCloseAsyncIterator); + *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveErrorSlot) = + result; + + return CreateArrayFromIterableAsynchronously(context); +} + +// This macro reject the promise if any exception occurs in the execution of +// the asynchronous code. +transitioning macro RejectArrayFromAsyncPromise( + implicit context: Context)(): JSAny { + const context = %RawDownCast(context); + const error = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveErrorSlot); + const promise = *ContextSlot( + context, + ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolvePromiseSlot); + + return promise::RejectPromise(promise, error, False); +} + +// This is the specialized implementation of `IfAbruptCloseAsyncIterator` for +// Array.fromAsync +// https://tc39.es/proposal-array-from-async/#sec-ifabruptcloseasynciterator +transitioning macro ArrayFromAsyncAsyncIteratorCloseOnException( + implicit context: Context)( + iterator: iterator::IteratorRecord): void labels RejectPromise { + try { + const context = %RawDownCast(context); + // 3. Let innerResult be GetMethod(iterator, "return"). + const method = GetProperty(iterator.object, kReturnString); + + // 4. If innerResult.[[Type]] is normal, then + // a. Let return be innerResult.[[Value]]. + // b. If return is undefined, return Completion(completion). + if (method == Undefined || method == Null) { + goto RejectPromise; + } + + // c. Set innerResult to Call(return, iterator). + // If an exception occurs, the original exception remains bound + const innerResult = Call(context, method, iterator.object); + + // d. If innerResult.[[Type]] is normal, set innerResult to + // Completion(Await(innerResult.[[Value]])). + const step = ArrayFromAsyncLabels::kRejectPromise; + + ArrayFromAsyncAwaitPoint(step, innerResult); + } catch (_e, _message) { + // Swallow the exception. + } + + // (5. If completion.[[Type]] is throw) return Completion(completion). +} + +// https://tc39.es/proposal-array-from-async/#sec-array.fromAsync +// Array.fromAsync ( asyncItems [ , mapfn [ , thisArg ] ] ) +// Since we do not have support for `await` in torque, we handled +// asynchronous execution flow manually in torque. More information +// is available in go/array-from-async-implementation. +transitioning javascript builtin ArrayFromAsync( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + // 1. Let C be the this value. + const c = HasBuiltinSubclassingFlag() ? receiver : GetArrayFunction(); + + const items = arguments[0]; + const mapfn = arguments[1]; + const thisArg = arguments[2]; + + // 2. Let promiseCapability be ! NewPromiseCapability(%Promise%). + const promise = promise::NewJSPromise(); + + // 3. Let fromAsyncClosure be a new Abstract Closure with no parameters that + // captures C, mapfn, and thisArg and performs the following steps when + // called: + + let usingAsyncIterator: JSAny = Undefined; + let usingSyncIterator: JSAny = Undefined; + let iteratorRecordObject: JSReceiver; + let iteratorRecordNext: JSAny; + let arr: JSReceiver; + let firstStep: ArrayFromAsyncLabels; + + try { + if (mapfn != Undefined) { + // i. If IsCallable(mapfn) is false, throw a TypeError exception. + if (!Is(mapfn)) deferred { + ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfn); + } + } + + try { + // c. Let usingAsyncIterator be ? + // GetMethod(asyncItems, @@asyncIterator). + usingAsyncIterator = GetMethod(items, AsyncIteratorSymbolConstant()) + otherwise AsyncIteratorIsUndefined, AsyncIteratorNotCallable; + } label AsyncIteratorIsUndefined { + // d. If usingAsyncIterator is undefined, then + // i. Let usingSyncIterator be ? + // GetMethod(asyncItems, @@iterator). + + usingSyncIterator = GetMethod(items, IteratorSymbolConstant()) + otherwise SyncIteratorIsUndefined, SyncIteratorNotCallable; + } label SyncIteratorIsUndefined deferred { + // i. Else, (iteratorRecord is undefined) + // i. NOTE: asyncItems is neither an AsyncIterable nor an + // Iterable so assume it is an array-like object. + // ii. Let arrayLike be ! ToObject(asyncItems). + // iii. Let len be ? LengthOfArrayLike(arrayLike). + // iv. If IsConstructor(C) is + // true, then + // 1. Let A be ? Construct(C, « 𝔽(len) »). + // v. Else, + // 1. Let A be ? ArrayCreate(len). + // vi. Let k be 0. + + // TODO(v8:13321): Array-like path will be implemented later. + // That means code inside the following labels are incomplete: + // kGetArrayLikeValue, kCheckArrayLikeValueAndMapping, + // kGetArrayLikeValueWithMapping, kAddArrayLikeValueToTheArray. + + // firstStep = ArrayFromAsyncLabels::kGetArrayLikeValue; + } label SyncIteratorNotCallable(_value: JSAny) + deferred { + ThrowTypeError( + MessageTemplate::kFirstArgumentIteratorSymbolNonCallable, + 'Array.fromAsync'); + } label AsyncIteratorNotCallable(_value: JSAny) + deferred { + ThrowTypeError( + MessageTemplate::kFirstArgumentAsyncIteratorSymbolNonCallable, + 'Array.fromAsync'); + } + + // e. Let iteratorRecord be undefined. + // f. If usingAsyncIterator is not undefined, then + // i. Set iteratorRecord to ? GetIterator(asyncItems, async, + // usingAsyncIterator). + // g. Else if usingSyncIterator is not undefined, then + // i. Set iteratorRecord to ? + // CreateAsyncFromSyncIterator(GetIterator(asyncItems, sync, + // usingSyncIterator)). + + const iteratorRecord = (usingAsyncIterator != Undefined) ? + iterator::GetIterator(items, usingAsyncIterator) : + iterator::GetIteratorRecordAfterCreateAsyncFromSyncIterator( + iterator::GetIterator(items, usingSyncIterator)); + + iteratorRecordObject = iteratorRecord.object; + iteratorRecordNext = iteratorRecord.next; + + // h. If iteratorRecord is not undefined, then + typeswitch (c) { + case (c: Constructor): { + // i. If IsConstructor(C) is true, then + // 1. Let A be ? Construct(C). + arr = Construct(c); + } + case (JSAny): { + // ii. Else, + // 1. Let A be ! ArrayCreate(0). + arr = ArrayCreate(0); + } + } + + firstStep = ArrayFromAsyncLabels::kGetIteratorStep; + } catch (e, _message) { + promise::RejectPromise(promise, e, False); + return promise; + } + + let resumeState = ArrayFromAsyncResumeState{ + step: firstStep, + awaitedValue: Undefined, + len: 0, + index: 0 + }; + + const promiseFun = *NativeContextSlot( + context, ContextSlot::PROMISE_FUNCTION_INDEX); + const map = *NativeContextSlot( + context, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); + + const resolveContext = CreateArrayFromAsyncResolveContext( + resumeState, promise, promiseFun, map, iteratorRecordObject, + iteratorRecordNext, arr, Undefined, mapfn, thisArg, context); + + CreateArrayFromIterableAsynchronously(resolveContext); + return promise; +} + +extern macro ArrayFromAsyncOnFulfilledSharedFunConstant(): SharedFunctionInfo; +extern macro ArrayFromAsyncOnRejectedSharedFunConstant(): SharedFunctionInfo; +} diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 4685c4b007bfe0..2158a2fbf822b9 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -470,6 +470,8 @@ extern enum MessageTemplate { kArgumentIsNonObject, kKeysMethodInvalid, kGeneratorRunning, + kFirstArgumentAsyncIteratorSymbolNonCallable, + kIteratorResultNotAnObject, ... } @@ -485,8 +487,6 @@ extern enum PropertyAttributes extends int31 { const kArrayBufferMaxByteLength: constexpr uintptr generates 'JSArrayBuffer::kMaxByteLength'; -const kTypedArrayMaxLength: - constexpr uintptr generates 'JSTypedArray::kMaxLength'; const kMaxTypedArrayInHeap: constexpr int31 generates 'JSTypedArray::kMaxSizeInHeap'; // CSA does not support 64-bit types on 32-bit platforms so as a workaround the @@ -551,6 +551,7 @@ extern macro Int32FalseConstant(): bool; extern macro Int32TrueConstant(): bool; extern macro IteratorSymbolConstant(): PublicSymbol; extern macro KeysStringConstant(): String; +extern macro AsyncIteratorSymbolConstant(): PublicSymbol; extern macro LengthStringConstant(): String; extern macro MatchSymbolConstant(): Symbol; extern macro MessageStringConstant(): String; diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index b1eae115ee760a..7319c9867ba03a 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -1675,7 +1675,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { // 2. Perform ? CreateDataPropertyOrThrow(target, // ! ToString(targetIndex), // element). - CallRuntime(Runtime::kCreateDataProperty, context, target, + CallBuiltin(Builtin::kFastCreateDataProperty, context, target, target_index, element); // 3. Increase targetIndex by 1. diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h index 979269faa58a74..de9513af444679 100644 --- a/deps/v8/src/builtins/builtins-array-gen.h +++ b/deps/v8/src/builtins/builtins-array-gen.h @@ -124,6 +124,42 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS; }; +class ArrayBuiltins { + public: + enum ArrayFromAsyncResolveContextSlots { + kArrayFromAsyncResolveResumeStateStepSlot = Context::MIN_CONTEXT_SLOTS, + kArrayFromAsyncResolveResumeStateAwaitedValueSlot, + kArrayFromAsyncResolveResumeStateLenSlot, + kArrayFromAsyncResolveResumeStateIndexSlot, + kArrayFromAsyncResolvePromiseSlot, + kArrayFromAsyncResolvePromiseFunctionSlot, + kArrayFromAsyncResolveOnFulfilledFunctionSlot, + kArrayFromAsyncResolveOnRejectedFunctionSlot, + kArrayFromAsyncResolveResultArraySlot, + kArrayFromAsyncResolveIteratorSlot, + kArrayFromAsyncResolveNextMethodSlot, + kArrayFromAsyncResolveErrorSlot, + kArrayFromAsyncResolveMapfnSlot, + kArrayFromAsyncResolveThisArgSlot, + kArrayFromAsyncResolveLength + }; + + enum ArrayFromAsyncLabels { + kGetIteratorStep, + kCheckIteratorValueAndMapping, + kIteratorMapping, + kGetIteratorValueWithMapping, + kAddIteratorValueToTheArray, + kGetArrayLikeValue, + kCheckArrayLikeValueAndMapping, + kGetArrayLikeValueWithMapping, + kAddArrayLikeValueToTheArray, + kDoneAndResolvePromise, + kCloseAsyncIterator, + kRejectPromise + }; +}; + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index 408c77bc7acb5c..32c2d1cc8c0473 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -1589,12 +1589,5 @@ BUILTIN(ArrayConcat) { return Slow_ArrayConcat(&args, species, isolate); } -BUILTIN(ArrayFromAsync) { - HandleScope scope(isolate); - DCHECK(v8_flags.harmony_array_from_async); - - return ReadOnlyRoots(isolate).undefined_value(); -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc index 08810fd4afd7f5..fbaf98b4c75bd8 100644 --- a/deps/v8/src/builtins/builtins-arraybuffer.cc +++ b/deps/v8/src/builtins/builtins-arraybuffer.cc @@ -73,15 +73,10 @@ Tagged ConstructBuffer(Isolate* isolate, Handle target, BackingStore::Allocate(isolate, byte_length, shared, initialized); max_byte_length = byte_length; } else { - // We need to check the max length against both - // JSArrayBuffer::kMaxByteLength and JSTypedArray::kMaxLength, since it's - // possible to create length-tracking TypedArrays and resize the underlying - // buffer. If the max byte length was larger than JSTypedArray::kMaxLength, - // that'd result in having a TypedArray with length larger than - // JSTypedArray::kMaxLength. + static_assert(JSArrayBuffer::kMaxByteLength == + JSTypedArray::kMaxByteLength); if (!TryNumberToSize(*max_length, &max_byte_length) || - max_byte_length > JSArrayBuffer::kMaxByteLength || - max_byte_length > JSTypedArray::kMaxLength) { + max_byte_length > JSArrayBuffer::kMaxByteLength) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength)); diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc index d87004050bad2e..30c8a57c76f559 100644 --- a/deps/v8/src/builtins/builtins-async-gen.cc +++ b/deps/v8/src/builtins/builtins-async-gen.cc @@ -181,8 +181,8 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure( TNode builtin_id = LoadObjectField( shared_info, SharedFunctionInfo::kFunctionDataOffset); TNode code = LoadBuiltin(builtin_id); - StoreMaybeIndirectPointerFieldNoWriteBarrier(function, - JSFunction::kCodeOffset, code); + StoreMaybeIndirectPointerFieldNoWriteBarrier( + function, JSFunction::kCodeOffset, kCodeIndirectPointerTag, code); } TNode AsyncBuiltinsAssembler::CreateUnwrapClosure( diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index b229c29aea5324..2cd4d731aaf9b4 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -28,7 +28,7 @@ void BaseCollectionsAssembler::AddConstructorEntry( Label* if_may_have_side_effects, Label* if_exception, TVariable* var_exception) { compiler::ScopedExceptionHandler handler(this, if_exception, var_exception); - CSA_DCHECK(this, Word32BinaryNot(IsTheHole(key_value))); + CSA_DCHECK(this, Word32BinaryNot(IsHashTableHole(key_value))); if (variant == kMap || variant == kWeakMap) { TorqueStructKeyValuePair pair = if_may_have_side_effects != nullptr @@ -874,7 +874,7 @@ TNode CollectionsBuiltinsAssembler::MapIteratorToList( TNode entry_start_position; TNode cur_index; std::tie(entry_key, entry_start_position, cur_index) = - NextSkipHoles(table, var_index.value(), &done); + NextSkipHashTableHoles(table, var_index.value(), &done); // Decide to write key or value. Branch( @@ -981,7 +981,8 @@ TNode CollectionsBuiltinsAssembler::SetOrSetIteratorToList( TNode entry_start_position; TNode cur_index; std::tie(entry_key, entry_start_position, cur_index) = - NextSkipHoles(table, var_index.value(), &finalize); + NextSkipHashTableHoles(table, var_index.value(), + &finalize); Store(elements, var_offset.value(), entry_key); @@ -1284,9 +1285,9 @@ CollectionsBuiltinsAssembler::TransitionOrderedHashSetNoUpdate( template std::tuple, TNode, TNode> -CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, - TNode index, - Label* if_end) { +CollectionsBuiltinsAssembler::NextSkipHashTableHoles(TNode table, + TNode index, + Label* if_end) { // Compute the used capacity for the {table}. TNode number_of_buckets = LoadAndUntagToWord32ObjectField( table, TableType::NumberOfBucketsOffset()); @@ -1297,16 +1298,15 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, TNode used_capacity = Int32Add(number_of_elements, number_of_deleted_elements); - return NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end); + return NextSkipHashTableHoles(table, number_of_buckets, used_capacity, index, + if_end); } template std::tuple, TNode, TNode> -CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, - TNode number_of_buckets, - TNode used_capacity, - TNode index, - Label* if_end) { +CollectionsBuiltinsAssembler::NextSkipHashTableHoles( + TNode table, TNode number_of_buckets, + TNode used_capacity, TNode index, Label* if_end) { CSA_DCHECK(this, Word32Equal(number_of_buckets, LoadAndUntagToWord32ObjectField( table, TableType::NumberOfBucketsOffset()))); @@ -1333,7 +1333,7 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, entry_key = UnsafeLoadKeyFromOrderedHashTableEntry( table, ChangePositiveInt32ToIntPtr(entry_start_position)); var_index = Int32Add(var_index.value(), Int32Constant(1)); - Branch(IsTheHole(entry_key), &loop, &done_loop); + Branch(IsHashTableHole(entry_key), &loop, &done_loop); } BIND(&done_loop); @@ -1356,8 +1356,8 @@ CollectionsBuiltinsAssembler::NextKeyIndexPairUnmodifiedTable( TNode entry_start_position; TNode next_index; - std::tie(key, entry_start_position, next_index) = - NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end); + std::tie(key, entry_start_position, next_index) = NextSkipHashTableHoles( + table, number_of_buckets, used_capacity, index, if_end); return TorqueStructKeyIndexPair{key, next_index}; } @@ -1382,7 +1382,7 @@ TorqueStructKeyIndexPair CollectionsBuiltinsAssembler::NextKeyIndexPair( TNode next_index; std::tie(key, entry_start_position, next_index) = - NextSkipHoles(table, index, if_end); + NextSkipHashTableHoles(table, index, if_end); return TorqueStructKeyIndexPair{key, next_index}; } @@ -1405,8 +1405,8 @@ CollectionsBuiltinsAssembler::NextKeyValueIndexTupleUnmodifiedTable( TNode entry_start_position; TNode next_index; - std::tie(key, entry_start_position, next_index) = - NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end); + std::tie(key, entry_start_position, next_index) = NextSkipHashTableHoles( + table, number_of_buckets, used_capacity, index, if_end); TNode value = UnsafeLoadValueFromOrderedHashMapEntry(table, entry_start_position); @@ -1423,7 +1423,7 @@ CollectionsBuiltinsAssembler::NextKeyValueIndexTuple( TNode next_index; std::tie(key, entry_start_position, next_index) = - NextSkipHoles(table, index, if_end); + NextSkipHashTableHoles(table, index, if_end); TNode value = UnsafeLoadValueFromOrderedHashMapEntry(table, entry_start_position); @@ -1677,9 +1677,6 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) { ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.delete"); - // This check breaks a known exploitation technique. See crbug.com/1263462 - CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant())); - const TNode table = LoadObjectField(CAST(receiver), JSMap::kTableOffset); @@ -1694,8 +1691,8 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) { BIND(&entry_found); // If we found the entry, mark the entry as deleted. - StoreKeyValueInOrderedHashMapEntry(table, TheHoleConstant(), - TheHoleConstant(), + StoreKeyValueInOrderedHashMapEntry(table, HashTableHoleConstant(), + HashTableHoleConstant(), entry_start_position_or_hash.value()); // Decrement the number of elements, increment the number of deleted elements. @@ -1823,7 +1820,7 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) { "Set.prototype.delete"); // This check breaks a known exploitation technique. See crbug.com/1263462 - CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant())); + CSA_CHECK(this, TaggedNotEqual(key, HashTableHoleConstant())); const TNode table = LoadObjectField(CAST(receiver), JSMap::kTableOffset); @@ -1853,9 +1850,6 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) { TNode CollectionsBuiltinsAssembler::DeleteFromSetTable( const TNode context, TNode table, TNode key, Label* not_found) { - // This check breaks a known exploitation technique. See crbug.com/1263462 - CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant())); - TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this); @@ -1864,7 +1858,7 @@ TNode CollectionsBuiltinsAssembler::DeleteFromSetTable( BIND(&entry_found); // If we found the entry, mark the entry as deleted. - StoreKeyInOrderedHashSetEntry(table, TheHoleConstant(), + StoreKeyInOrderedHashSetEntry(table, HashTableHoleConstant(), entry_start_position_or_hash.value()); // Decrement the number of elements, increment the number of deleted elements. @@ -1937,7 +1931,7 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) { TNode entry_key; TNode entry_start_position; std::tie(entry_key, entry_start_position, index) = - NextSkipHoles(table, index, &done_loop); + NextSkipHashTableHoles(table, index, &done_loop); // Load the entry value as well. TNode entry_value = @@ -2019,7 +2013,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { TNode entry_key; TNode entry_start_position; std::tie(entry_key, entry_start_position, index) = - NextSkipHoles(table, index, &return_end); + NextSkipHashTableHoles(table, index, &return_end); StoreObjectFieldNoWriteBarrier(receiver, JSMapIterator::kIndexOffset, SmiTag(index)); var_value = entry_key; @@ -2136,7 +2130,7 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) { TNode entry_key; TNode entry_start_position; std::tie(entry_key, entry_start_position, index) = - NextSkipHoles(table, index, &done_loop); + NextSkipHashTableHoles(table, index, &done_loop); // Invoke the {callback} passing the {entry_key} (twice) and the {receiver}. Call(context, callback, this_arg, entry_key, entry_key, receiver); @@ -2204,7 +2198,7 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { TNode entry_key; TNode entry_start_position; std::tie(entry_key, entry_start_position, index) = - NextSkipHoles(table, index, &return_end); + NextSkipHashTableHoles(table, index, &return_end); StoreObjectFieldNoWriteBarrier(receiver, JSSetIterator::kIndexOffset, SmiTag(index)); var_value = entry_key; diff --git a/deps/v8/src/builtins/builtins-collections-gen.h b/deps/v8/src/builtins/builtins-collections-gen.h index d69534dfa6708d..76d36a9822e5ca 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.h +++ b/deps/v8/src/builtins/builtins-collections-gen.h @@ -245,12 +245,15 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { const TNode iterator); template - std::tuple, TNode, TNode> NextSkipHoles( - TNode table, TNode index, Label* if_end); + std::tuple, TNode, TNode> + NextSkipHashTableHoles(TNode table, TNode index, + Label* if_end); template - std::tuple, TNode, TNode> NextSkipHoles( - TNode table, TNode number_of_buckets, - TNode used_capacity, TNode index, Label* if_end); + std::tuple, TNode, TNode> + NextSkipHashTableHoles(TNode table, + TNode number_of_buckets, + TNode used_capacity, TNode index, + Label* if_end); // Specialization for Smi. // The {result} variable will contain the entry index if the key was found, diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index c6423124c141a5..b3bd3f0f8a02fb 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -253,7 +253,8 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { shared_function_info); StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context); TNode lazy_builtin = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy)); - StoreMaybeIndirectPointerField(result, JSFunction::kCodeOffset, lazy_builtin); + StoreMaybeIndirectPointerField(result, JSFunction::kCodeOffset, + kCodeIndirectPointerTag, lazy_builtin); Return(result); } diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc index f539f1f6071c61..be4694d958c646 100644 --- a/deps/v8/src/builtins/builtins-date.cc +++ b/deps/v8/src/builtins/builtins-date.cc @@ -58,8 +58,8 @@ double ParseDateTimeString(Isolate* isolate, Handle str) { return DateCache::TimeClip(date); } -Object SetLocalDateValue(Isolate* isolate, Handle date, - double time_val) { +Tagged SetLocalDateValue(Isolate* isolate, Handle date, + double time_val) { if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs && time_val <= DateCache::kMaxTimeBeforeUTCInMs) { time_val = isolate->date_cache()->ToUTC(static_cast(time_val)); diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 18da8a651ba30f..55ca4622e9c5b5 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -55,8 +55,8 @@ namespace internal { #define BUILTIN_LIST_BASE_TIER1(CPP, TFJ, TFC, TFS, TFH, ASM) \ /* GC write barriers */ \ - TFC(IndirectPointerBarrierSaveFP, WriteBarrier) \ - TFC(IndirectPointerBarrierIgnoreFP, WriteBarrier) \ + TFC(IndirectPointerBarrierSaveFP, IndirectPointerWriteBarrier) \ + TFC(IndirectPointerBarrierIgnoreFP, IndirectPointerWriteBarrier) \ \ /* TSAN support for stores in generated code. */ \ IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANStore) \ @@ -414,7 +414,6 @@ namespace internal { CPP(ArrayShift) \ /* ES6 #sec-array.prototype.unshift */ \ CPP(ArrayUnshift) \ - CPP(ArrayFromAsync) \ /* Support for Array.from and other array-copying idioms */ \ TFS(CloneFastJSArray, NeedsContext::kYes, kSource) \ TFS(CloneFastJSArrayFillingHoles, NeedsContext::kYes, kSource) \ @@ -656,11 +655,9 @@ namespace internal { TFH(LoadSuperICBaseline, LoadWithReceiverBaseline) \ TFH(KeyedLoadIC, KeyedLoadWithVector) \ TFH(KeyedLoadIC_Megamorphic, KeyedLoadWithVector) \ - TFH(KeyedLoadIC_MegamorphicStringKey, KeyedLoadWithVector) \ TFH(KeyedLoadICTrampoline, KeyedLoad) \ TFH(KeyedLoadICBaseline, KeyedLoadBaseline) \ TFH(KeyedLoadICTrampoline_Megamorphic, KeyedLoad) \ - TFH(KeyedLoadICTrampoline_MegamorphicStringKey, KeyedLoad) \ TFH(StoreGlobalIC, StoreGlobalWithVector) \ TFH(StoreGlobalICTrampoline, StoreGlobal) \ TFH(StoreGlobalICBaseline, StoreGlobalBaseline) \ @@ -709,6 +706,7 @@ namespace internal { TFS(IterableToFixedArrayWithSymbolLookupSlow, NeedsContext::kYes, kIterable) \ TFS(IterableToListMayPreserveHoles, NeedsContext::kYes, kIterable, \ kIteratorFn) \ + TFS(IterableToListConvertHoles, NeedsContext::kYes, kIterable, kIteratorFn) \ IF_WASM(TFS, IterableToFixedArrayForWasm, NeedsContext::kYes, kIterable, \ kExpectedLength) \ \ diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc index b9af2d84af026e..8c3a7fb1ad621b 100644 --- a/deps/v8/src/builtins/builtins-ic-gen.cc +++ b/deps/v8/src/builtins/builtins-ic-gen.cc @@ -58,11 +58,6 @@ void Builtins::Generate_KeyedLoadIC_Megamorphic( AccessorAssembler assembler(state); assembler.GenerateKeyedLoadIC_Megamorphic(); } -void Builtins::Generate_KeyedLoadIC_MegamorphicStringKey( - compiler::CodeAssemblerState* state) { - AccessorAssembler assembler(state); - assembler.GenerateKeyedLoadIC_MegamorphicStringKey(); -} void Builtins::Generate_KeyedLoadIC_PolymorphicName( compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); @@ -83,11 +78,6 @@ void Builtins::Generate_KeyedLoadICTrampoline_Megamorphic( AccessorAssembler assembler(state); assembler.GenerateKeyedLoadICTrampoline_Megamorphic(); } -void Builtins::Generate_KeyedLoadICTrampoline_MegamorphicStringKey( - compiler::CodeAssemblerState* state) { - AccessorAssembler assembler(state); - assembler.GenerateKeyedLoadICTrampoline_MegamorphicStringKey(); -} void Builtins::Generate_LoadGlobalIC_NoFeedback( compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 9a35975fa9279d..4cbff5d060da34 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -270,7 +270,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { BIND(&next); } - void PointerTableWriteBarrier(SaveFPRegsMode fp_mode) { + void IndirectPointerWriteBarrier(SaveFPRegsMode fp_mode) { // Currently, only objects living in (local) old space are referenced // through a pointer table indirection and we have DCHECKs in the CPP write // barrier code to check that. This simplifies the write barrier code for @@ -281,11 +281,15 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { BIND(&marking_is_on); // For this barrier, the slot contains an index into a pointer table and not - // directly a pointer to a HeapObject. - TNode slot = - UncheckedParameter(WriteBarrierDescriptor::kSlotAddress); - TNode object = BitcastTaggedToWord( - UncheckedParameter(WriteBarrierDescriptor::kObject)); + // directly a pointer to a HeapObject. Further, the slot address is tagged + // with the indirect pointer tag of the slot, so it cannot directly be + // dereferenced but needs to be decoded first. + TNode slot = UncheckedParameter( + IndirectPointerWriteBarrierDescriptor::kSlotAddress); + TNode object = BitcastTaggedToWord(UncheckedParameter( + IndirectPointerWriteBarrierDescriptor::kObject)); + TNode tag = UncheckedParameter( + IndirectPointerWriteBarrierDescriptor::kIndirectPointerTag); TNode function = ExternalConstant( ExternalReference:: @@ -293,7 +297,8 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { CallCFunctionWithCallerSavedRegisters( function, MachineTypeOf::value, fp_mode, std::make_pair(MachineTypeOf::value, object), - std::make_pair(MachineTypeOf::value, slot)); + std::make_pair(MachineTypeOf::value, slot), + std::make_pair(MachineTypeOf::value, tag)); Goto(&next); BIND(&next); @@ -561,7 +566,12 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { return; } - PointerTableWriteBarrier(fp_mode); + if (!V8_ENABLE_SANDBOX_BOOL) { + Unreachable(); + return; + } + + IndirectPointerWriteBarrier(fp_mode); IncrementCounter(isolate()->counters()->write_barriers(), 1); Return(TrueConstant()); } diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index 3baadec1cd5172..99b52505797ab9 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -71,6 +71,30 @@ TNode IteratorBuiltinsAssembler::IteratorStep( GotoIf(TaggedIsSmi(result), &if_notobject); TNode heap_object_result = CAST(result); TNode result_map = LoadMap(heap_object_result); + GotoIfNot(JSAnyIsNotPrimitiveMap(result_map), &if_notobject); + + // IteratorComplete + // 2. Return ToBoolean(? Get(iterResult, "done")). + IteratorComplete(context, heap_object_result, if_done, + fast_iterator_result_map); + Goto(&return_result); + + BIND(&if_notobject); + CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result); + Unreachable(); + + BIND(&return_result); + return CAST(heap_object_result); +} + +void IteratorBuiltinsAssembler::IteratorComplete( + TNode context, const TNode iterator, Label* if_done, + base::Optional> fast_iterator_result_map) { + DCHECK_NOT_NULL(if_done); + + Label return_result(this); + + TNode result_map = LoadMap(iterator); if (fast_iterator_result_map) { // Fast iterator result case: @@ -79,10 +103,9 @@ TNode IteratorBuiltinsAssembler::IteratorStep( // 4. Return result. GotoIfNot(TaggedEqual(result_map, *fast_iterator_result_map), &if_generic); - // IteratorComplete // 2. Return ToBoolean(? Get(iterResult, "done")). TNode done = - LoadObjectField(heap_object_result, JSIteratorResult::kDoneOffset); + LoadObjectField(iterator, JSIteratorResult::kDoneOffset); BranchIfToBooleanIsTrue(done, if_done, &return_result); BIND(&if_generic); @@ -90,22 +113,14 @@ TNode IteratorBuiltinsAssembler::IteratorStep( // Generic iterator result case: { - // 3. If Type(result) is not Object, throw a TypeError exception. - GotoIfNot(JSAnyIsNotPrimitiveMap(result_map), &if_notobject); - - // IteratorComplete // 2. Return ToBoolean(? Get(iterResult, "done")). TNode done = - GetProperty(context, heap_object_result, factory()->done_string()); + GetProperty(context, iterator, factory()->done_string()); BranchIfToBooleanIsTrue(done, if_done, &return_result); } - BIND(&if_notobject); - CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result); - Unreachable(); - BIND(&return_result); - return CAST(heap_object_result); + return; } TNode IteratorBuiltinsAssembler::IteratorValue( @@ -340,7 +355,8 @@ TF_BUILTIN(StringFixedArrayFromIterable, IteratorBuiltinsAssembler) { // will be copied to the new array, which is inconsistent with the behavior of // an actual iteration, where holes should be replaced with undefined (if the // prototype has no elements). To maintain the correct behavior for holey -// arrays, use the builtins IterableToList or IterableToListWithSymbolLookup. +// arrays, use the builtins IterableToList or IterableToListWithSymbolLookup or +// IterableToListConvertHoles. TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) { auto context = Parameter(Descriptor::kContext); auto iterable = Parameter(Descriptor::kIterable); @@ -357,6 +373,29 @@ TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) { TailCallBuiltin(Builtin::kIterableToList, context, iterable, iterator_fn); } +// This builtin always returns a new JSArray and is thus safe to use even in the +// presence of code that may call back into user-JS. This builtin will take the +// fast path if the iterable is a fast array and the Array prototype and the +// Symbol.iterator is untouched. The fast path skips the iterator and copies the +// backing store to the new array. Note that if the array has holes, the holes +// will be converted to undefined values in the new array (unlike +// IterableToListMayPreserveHoles builtin). +TF_BUILTIN(IterableToListConvertHoles, IteratorBuiltinsAssembler) { + auto context = Parameter(Descriptor::kContext); + auto iterable = Parameter(Descriptor::kIterable); + auto iterator_fn = Parameter(Descriptor::kIteratorFn); + + Label slow_path(this); + + GotoIfNot(IsFastJSArrayWithNoCustomIteration(context, iterable), &slow_path); + + // The fast path will convert holes to undefined values in the new array. + TailCallBuiltin(Builtin::kCloneFastJSArrayFillingHoles, context, iterable); + + BIND(&slow_path); + TailCallBuiltin(Builtin::kIterableToList, context, iterable, iterator_fn); +} + void IteratorBuiltinsAssembler::FastIterableToList( TNode context, TNode iterable, TVariable* var_result, Label* slow) { diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h index f7ba54b1b8c03b..39775a6b9d02fa 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.h +++ b/deps/v8/src/builtins/builtins-iterator-gen.h @@ -42,6 +42,18 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { return IteratorStep(context, iterator, if_done, fast_iterator_result_map); } + // https://tc39.es/ecma262/#sec-iteratorcomplete + void IteratorComplete( + TNode context, const TNode iterator, Label* if_done, + base::Optional> fast_iterator_result_map = base::nullopt); + void IteratorComplete(TNode context, + const TNode iterator, + base::Optional> fast_iterator_result_map, + Label* if_done) { + return IteratorComplete(context, iterator, if_done, + fast_iterator_result_map); + } + // https://tc39.github.io/ecma262/#sec-iteratorvalue // Return the `value` field from an iterator. // `fast_iterator_result_map` refers to the map for the JSIteratorResult diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc index 2a377bf10e1d1b..2c03c0516ac027 100644 --- a/deps/v8/src/builtins/builtins-lazy-gen.cc +++ b/deps/v8/src/builtins/builtins-lazy-gen.cc @@ -75,7 +75,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot( // Optimized code is good, get it into the closure and link the closure into // the optimized functions list, then tail call the optimized code. StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset, - optimized_code); + kCodeIndirectPointerTag, optimized_code); Comment("MaybeTailCallOptimizedCodeSlot:: GenerateTailCallToJSCode"); GenerateTailCallToJSCode(optimized_code, function); @@ -111,7 +111,8 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE( isolate(), CompileLazy)))); - StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset, sfi_code); + StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset, + kCodeIndirectPointerTag, sfi_code); Label maybe_use_sfi_code(this); // If there is no feedback, don't check for optimized code. @@ -168,7 +169,8 @@ TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) { TNode code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy)); // Set the code slot inside the JSFunction to CompileLazy. - StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset, code); + StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset, + kCodeIndirectPointerTag, code); GenerateTailCallToJSCode(code, function); } diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc index f58636fee555d7..96fdaaba202684 100644 --- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc +++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc @@ -356,7 +356,7 @@ void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount( TNode MicrotaskQueueBuiltinsAssembler::GetCurrentContext() { auto ref = ExternalReference::Create(kContextAddress, isolate()); // TODO(delphick): Add a checked cast. For now this is not possible as context - // can actually be Smi(0). + // can actually be Tagged(0). return TNode::UncheckedCast(LoadFullTagged(ExternalConstant(ref))); } diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc index 9779bec18ac6ab..a0421dbd42d9b8 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc @@ -125,6 +125,9 @@ inline size_t GetAddress32(size_t index, size_t byte_offset) { // ES #sec-atomics.notify // Atomics.notify( typedArray, index, count ) BUILTIN(AtomicsNotify) { + // TODO(clemensb): This builtin only allocates (an exception) in the case of + // an error; we could try to avoid allocating the HandleScope in the non-error + // case. HandleScope scope(isolate); Handle array = args.atOrUndefined(isolate, 1); Handle index = args.atOrUndefined(isolate, 2); @@ -163,20 +166,21 @@ BUILTIN(AtomicsNotify) { // 10. If IsSharedArrayBuffer(buffer) is false, return 0. Handle array_buffer = sta->GetBuffer(); - size_t wake_addr; - if (V8_UNLIKELY(!sta->GetBuffer()->is_shared())) { - return Smi::FromInt(0); + if (V8_UNLIKELY(!array_buffer->is_shared())) { + return Smi::zero(); } // Steps 11-17 performed in FutexEmulation::Wake. + size_t wake_addr; if (sta->type() == kExternalBigInt64Array) { wake_addr = GetAddress64(i, sta->byte_offset()); } else { DCHECK(sta->type() == kExternalInt32Array); wake_addr = GetAddress32(i, sta->byte_offset()); } - return FutexEmulation::Wake(array_buffer, wake_addr, c); + int num_waiters_woken = FutexEmulation::Wake(*array_buffer, wake_addr, c); + return Smi::FromInt(num_waiters_woken); } Tagged DoWait(Isolate* isolate, FutexEmulation::WaitMode mode, diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc index db193bdf8bd7a9..18a00bbd7a5f51 100644 --- a/deps/v8/src/builtins/builtins-string.cc +++ b/deps/v8/src/builtins/builtins-string.cc @@ -244,9 +244,9 @@ inline bool ToUpperOverflows(base::uc32 character) { } template -V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper( - Isolate* isolate, String string, SeqString result, int result_length, - unibrow::Mapping* mapping) { +V8_WARN_UNUSED_RESULT static Tagged ConvertCaseHelper( + Isolate* isolate, Tagged string, Tagged result, + int result_length, unibrow::Mapping* mapping) { DisallowGarbageCollection no_gc; // We try this twice, once with the assumption that the result is no longer // than the input and, if that assumption breaks, again with the exact @@ -272,16 +272,16 @@ V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper( int char_length = mapping->get(current, next, chars); if (char_length == 0) { // The case conversion of this character is the character itself. - result.Set(i, current); + result->Set(i, current); i++; } else if (char_length == 1 && (ignore_overflow || !ToUpperOverflows(current))) { // Common case: converting the letter resulted in one character. DCHECK(static_cast(chars[0]) != current); - result.Set(i, chars[0]); + result->Set(i, chars[0]); has_changed_character = true; i++; - } else if (result_length == string.length()) { + } else if (result_length == string->length()) { bool overflows = ToUpperOverflows(current); // We've assumed that the result would be as long as the // input but here is a character that converts to several @@ -322,7 +322,7 @@ V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper( : Smi::FromInt(current_length); } else { for (int j = 0; j < char_length; j++) { - result.Set(i, chars[j]); + result->Set(i, chars[j]); i++; } has_changed_character = true; @@ -341,7 +341,7 @@ V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper( } template -V8_WARN_UNUSED_RESULT static Object ConvertCase( +V8_WARN_UNUSED_RESULT static Tagged ConvertCase( Handle s, Isolate* isolate, unibrow::Mapping* mapping) { s = String::Flatten(isolate, s); @@ -379,7 +379,8 @@ V8_WARN_UNUSED_RESULT static Object ConvertCase( result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked(); } - Object answer = ConvertCaseHelper(isolate, *s, *result, length, mapping); + Tagged answer = + ConvertCaseHelper(isolate, *s, *result, length, mapping); if (IsException(answer, isolate) || IsString(answer)) return answer; DCHECK(IsSmi(answer)); diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 452012c096527e..42e594a0ce9252 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -51,9 +51,9 @@ TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( // - Set BitField to 0. // - Set IsExternal and IsDetachable bits of BitFieldSlot. // - Set the byte_length field to zero. - // - Set backing_store to null/Smi(0). + // - Set backing_store to null/Tagged(0). // - Set extension to null. - // - Set all embedder fields to Smi(0). + // - Set all embedder fields to Tagged(0). if (FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset) != 0) { DCHECK_EQ(4, FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset)); StoreObjectFieldNoWriteBarrier( diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h index 9d9a51f33bae94..877ac18499752d 100644 --- a/deps/v8/src/builtins/builtins-utils.h +++ b/deps/v8/src/builtins/builtins-utils.h @@ -23,12 +23,12 @@ class BuiltinArguments : public JavaScriptArguments { : Arguments(length, arguments) { // Check we have at least the receiver. DCHECK_LE(1, this->length()); - DCHECK(Object((*at(0)).ptr()).IsObject()); + DCHECK(Tagged((*at(0)).ptr()).IsObject()); } Tagged operator[](int index) const { DCHECK_LT(index, length()); - return Object(*address_of_arg_at(index + kArgsOffset)); + return Tagged(*address_of_arg_at(index + kArgsOffset)); } template @@ -104,7 +104,7 @@ static_assert(BuiltinArguments::kNumExtraArgsWithReceiver == // TODO(cbruni): add global flag to check whether any tracing events have been // enabled. #define BUILTIN_RCS(name) \ - V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ + V8_WARN_UNUSED_RESULT static Tagged Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate); \ \ V8_NOINLINE static Address Builtin_Impl_Stats_##name( \ @@ -126,11 +126,11 @@ static_assert(BuiltinArguments::kNumExtraArgsWithReceiver == return BUILTIN_CONVERT_RESULT(Builtin_Impl_##name(args, isolate)); \ } \ \ - V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ + V8_WARN_UNUSED_RESULT static Tagged Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate) #define BUILTIN_NO_RCS(name) \ - V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ + V8_WARN_UNUSED_RESULT static Tagged Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate); \ \ V8_WARN_UNUSED_RESULT Address Builtin_##name( \ @@ -140,7 +140,7 @@ static_assert(BuiltinArguments::kNumExtraArgsWithReceiver == return BUILTIN_CONVERT_RESULT(Builtin_Impl_##name(args, isolate)); \ } \ \ - V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ + V8_WARN_UNUSED_RESULT static Tagged Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate) #ifdef V8_RUNTIME_CALL_STATS diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index 1085f16075f7b4..9459355e85bf6c 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -188,7 +188,7 @@ void Builtins::set_code(Builtin builtin, Tagged code) { Tagged Builtins::code(Builtin builtin) { Address ptr = isolate_->builtin_table()[Builtins::ToInt(builtin)]; - return Code::cast(Object(ptr)); + return Code::cast(Tagged(ptr)); } Handle Builtins::code_handle(Builtin builtin) { @@ -262,6 +262,11 @@ const char* Builtins::NameForStackTrace(Builtin builtin) { case Builtin::kStringPrototypeIndexOf: case Builtin::kThrowIndexOfCalledOnNull: return "String.indexOf"; + case Builtin::kDataViewPrototypeGetInt32: + case Builtin::kThrowDataViewGetInt32DetachedError: + case Builtin::kThrowDataViewGetInt32OutOfBounds: + case Builtin::kThrowDataViewGetInt32TypeError: + return "DataView.getInt32"; #if V8_INTL_SUPPORT case Builtin::kStringPrototypeToLowerCaseIntl: #endif diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h index 5797cc15cf3c1d..f4d6cf4fce7391 100644 --- a/deps/v8/src/builtins/builtins.h +++ b/deps/v8/src/builtins/builtins.h @@ -126,23 +126,22 @@ class Builtins { static BytecodeOffset GetContinuationBytecodeOffset(Builtin builtin); static Builtin GetBuiltinFromBytecodeOffset(BytecodeOffset); - static constexpr Builtin GetRecordWriteStub( - SaveFPRegsMode fp_mode, PointerType type = PointerType::kDirect) { - switch (type) { - case PointerType::kDirect: - switch (fp_mode) { - case SaveFPRegsMode::kIgnore: - return Builtin::kRecordWriteIgnoreFP; - case SaveFPRegsMode::kSave: - return Builtin::kRecordWriteSaveFP; - } - case PointerType::kIndirect: - switch (fp_mode) { - case SaveFPRegsMode::kIgnore: - return Builtin::kIndirectPointerBarrierIgnoreFP; - case SaveFPRegsMode::kSave: - return Builtin::kIndirectPointerBarrierSaveFP; - } + static constexpr Builtin GetRecordWriteStub(SaveFPRegsMode fp_mode) { + switch (fp_mode) { + case SaveFPRegsMode::kIgnore: + return Builtin::kRecordWriteIgnoreFP; + case SaveFPRegsMode::kSave: + return Builtin::kRecordWriteSaveFP; + } + } + + static constexpr Builtin GetIndirectPointerBarrierStub( + SaveFPRegsMode fp_mode) { + switch (fp_mode) { + case SaveFPRegsMode::kIgnore: + return Builtin::kIndirectPointerBarrierIgnoreFP; + case SaveFPRegsMode::kSave: + return Builtin::kIndirectPointerBarrierSaveFP; } } diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq index 36eda98f16fe8b..a79e6816e6f5c3 100644 --- a/deps/v8/src/builtins/convert.tq +++ b/deps/v8/src/builtins/convert.tq @@ -117,12 +117,6 @@ FromConstexpr(i: constexpr int31): int8 { FromConstexpr(i: constexpr int31): char8 { return %RawDownCast(FromConstexpr(i)); } -FromConstexpr(s: constexpr Smi): Number { - return SmiConstant(s); -} -FromConstexpr(s: constexpr Smi): Smi { - return SmiConstant(s); -} FromConstexpr(i: constexpr int31): uint32 { return Unsigned(Int32Constant(i)); } diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq index b418d04e709783..bba1100db717e8 100644 --- a/deps/v8/src/builtins/data-view.tq +++ b/deps/v8/src/builtins/data-view.tq @@ -77,7 +77,7 @@ macro ValidateDataView(context: Context, o: JSAny, method: String): return UnsafeCast(o); } case (_x: JSAny): { - ThrowTypeError(MessageTemplate::kIncompatibleMethodReceiver, method); + ThrowTypeError(MessageTemplate::kIncompatibleMethodReceiver, method, o); } } } diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index f603a1e611cc28..bf29764b89fe8d 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -902,39 +902,28 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, eax); __ j(not_equal, &compile_lazy); -#ifndef V8_JITLESS - Register closure = edi; - Register feedback_vector = ecx; Label push_stack_frame; - // Load feedback vector and check if it is valid. If valid, check for - // optimized code and update invocation count. Otherwise, setup the stack - // frame. - __ mov(feedback_vector, - FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ mov(feedback_vector, - FieldOperand(feedback_vector, FeedbackCell::kValueOffset)); - __ mov(eax, FieldOperand(feedback_vector, HeapObject::kMapOffset)); - __ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE); - __ j(not_equal, &push_stack_frame); + Register feedback_vector = ecx; + Register closure = edi; + Register scratch = eax; + __ LoadFeedbackVector(feedback_vector, closure, scratch, &push_stack_frame, + Label::kNear); - // Load the optimization state from the feedback vector and re-use the +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. Load the optimization state from the feedback vector and re-use the // register. Label flags_need_processing; Register flags = ecx; + XMMRegister saved_feedback_vector = xmm1; __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( - flags, xmm1, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); + flags, saved_feedback_vector, CodeKind::INTERPRETED_FUNCTION, + &flags_need_processing); // Reload the feedback vector. - // TODO(jgruber): Don't clobber it above. - __ mov(feedback_vector, - FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ mov(feedback_vector, - FieldOperand(feedback_vector, FeedbackCell::kValueOffset)); + __ movd(feedback_vector, saved_feedback_vector); - { - static constexpr Register scratch = eax; - ResetFeedbackVectorOsrUrgency(masm, feedback_vector, scratch); - } + ResetFeedbackVectorOsrUrgency(masm, feedback_vector, scratch); // Increment the invocation count. __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); @@ -942,13 +931,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set // up the frame (that is done below). - __ bind(&push_stack_frame); #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and // - kInvocationCountOffset: only used for tiering heuristics and code // coverage. #endif // !V8_JITLESS + + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ push(ebp); // Caller's frame pointer. __ mov(ebp, esp); @@ -979,6 +969,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ push(kInterpreterBytecodeArrayRegister); // Push Smi tagged initial bytecode array offset. __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag))); + __ push(feedback_vector); // Allocate the local and temporary register file on the stack. Label stack_overflow; @@ -1603,7 +1594,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { // Set the return address to the correct point in the interpreter entry // trampoline. Label builtin_trampoline, trampoline_loaded; - Smi interpreter_entry_return_pc_offset( + Tagged interpreter_entry_return_pc_offset( masm->isolate()->heap()->interpreter_entry_return_pc_offset()); DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero()); @@ -1743,22 +1734,25 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { XMMRegister saved_arg_count = xmm0; XMMRegister saved_bytecode_array = xmm1; XMMRegister saved_frame_size = xmm2; - XMMRegister saved_feedback_vector = xmm3; + XMMRegister saved_feedback_cell = xmm3; + XMMRegister saved_feedback_vector = xmm4; __ movd(saved_arg_count, arg_count); __ movd(saved_frame_size, frame_size); // Use the arg count (eax) as the scratch register. Register scratch = arg_count; - // Load the feedback vector from the closure. - Register feedback_vector = ecx; + // Load the feedback cell and vector from the closure. Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); + Register feedback_cell = ecx; + __ mov(feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); + __ movd(saved_feedback_cell, feedback_cell); + Register feedback_vector = ecx; __ mov(feedback_vector, - FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ mov(feedback_vector, - FieldOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldOperand(feedback_cell, FeedbackCell::kValueOffset)); __ AssertFeedbackVector(feedback_vector, scratch); + feedback_cell = no_reg; // Load the optimization state from the feedback vector and re-use the // register. @@ -1779,7 +1773,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // Increment the invocation count. __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); - XMMRegister return_address = xmm4; + XMMRegister return_address = xmm5; // Save the return address, so that we can push it to the end of the newly // set-up frame once we're done setting it up. __ PopReturnAddressTo(return_address, scratch); @@ -1803,12 +1797,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // We'll use the bytecode for both code age/OSR resetting, and pushing onto // the frame, so load it into a register. - Register bytecode_array = scratch; - __ movd(bytecode_array, saved_bytecode_array); - __ Push(bytecode_array); - - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. + __ Push(saved_bytecode_array, scratch); + __ Push(saved_feedback_cell, scratch); __ Push(saved_feedback_vector, scratch); } @@ -1877,6 +1867,8 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // We're here because we got deopted during BaselineOutOfLinePrologue's stack // check. Undo all its frame creation and call into the interpreter instead. + // Drop the feedback vector. + __ Pop(ecx); // Drop bytecode offset (was the feedback vector but got replaced during // deopt). __ Pop(ecx); @@ -3788,7 +3780,7 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // FunctionCallbackInfo::values_ (points at the first varargs argument // passed on the stack). __ lea(holder, - Operand(holder, (FCA::kArgsLength + 1) * kSystemPointerSize)); + Operand(holder, FCA::kArgsLengthWithReceiver * kSystemPointerSize)); __ mov(ExitFrameStackSlotOperand(kApiArgsSize + FCA::kValuesOffset), holder); @@ -3802,10 +3794,10 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, constexpr int kBytesToDropOffset = FCA::kLengthOffset + kSystemPointerSize; static_assert(kBytesToDropOffset == (kApiStackSpace - 1) * kSystemPointerSize); - __ lea(scratch, Operand(argc, times_system_pointer_size, - (FCA::kArgsLength + 1 /* receiver */ + - exit_frame_params_count) * - kSystemPointerSize)); + __ lea(scratch, + Operand(argc, times_system_pointer_size, + (FCA::kArgsLengthWithReceiver + exit_frame_params_count) * + kSystemPointerSize)); __ mov(ExitFrameStackSlotOperand(kApiArgsSize + kBytesToDropOffset), scratch); __ RecordComment("v8::FunctionCallback's argument."); @@ -4581,12 +4573,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, ecx); } - // Load the feedback vector. + // Load the feedback cell and vector. + Register feedback_cell = eax; Register feedback_vector = ecx; + __ mov(feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); + closure = no_reg; __ mov(feedback_vector, - FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ mov(feedback_vector, - FieldOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldOperand(feedback_cell, FeedbackCell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -4599,8 +4592,16 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ mov(kInterpreterBytecodeOffsetRegister, MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister); - // Replace BytecodeOffset with the feedback vector. - __ mov(MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), + // Replace bytecode offset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ mov(MemOperand(ebp, BaselineFrameConstants::kFeedbackCellFromFp), + feedback_cell); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); + __ mov(MemOperand(ebp, InterpreterFrameConstants::kFeedbackVectorFromFp), feedback_vector); feedback_vector = no_reg; @@ -4651,6 +4652,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ pop(kInterpreterAccumulatorRegister); if (is_osr) { + DCHECK_EQ(feedback_cell, no_reg); + closure = ecx; __ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset)); ResetJSFunctionAge(masm, closure, closure); Generate_OSREntry(masm, code_obj); @@ -4673,21 +4676,19 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ bind(&install_baseline_code); // Pop/re-push the accumulator so that it's spilled within the below frame - // scope, to keep the stack valid. Use ecx for this -- we can't save it in - // kInterpreterAccumulatorRegister because that aliases with closure. - DCHECK(!AreAliased(ecx, kContextRegister, closure)); - __ pop(ecx); + // scope, to keep the stack valid. + __ pop(kInterpreterAccumulatorRegister); // Restore the clobbered context register. __ mov(kContextRegister, Operand(ebp, StandardFrameConstants::kContextOffset)); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(ecx); + __ Push(kInterpreterAccumulatorRegister); + // Reload closure. + closure = eax; + __ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset)); __ Push(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); - // Now that we're restarting, we don't have to worry about closure and - // accumulator aliasing, so pop the spilled accumulator directly back into - // the right register. __ Pop(kInterpreterAccumulatorRegister); } // Retry from the start after installing baseline code. diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq index bbcdf066cfed02..dffa70dc26c12c 100644 --- a/deps/v8/src/builtins/iterator.tq +++ b/deps/v8/src/builtins/iterator.tq @@ -31,6 +31,10 @@ extern macro IteratorBuiltinsAssembler::IteratorStep( extern macro IteratorBuiltinsAssembler::IteratorStep( implicit context: Context)(IteratorRecord, Map): JSReceiver labels Done; +extern macro IteratorBuiltinsAssembler::IteratorComplete( + implicit context: Context)(JSReceiver): void labels Done; +extern macro IteratorBuiltinsAssembler::IteratorComplete( + implicit context: Context)(JSReceiver, Map): void labels Done; extern macro IteratorBuiltinsAssembler::IteratorValue( implicit context: Context)(JSReceiver): JSAny; @@ -97,6 +101,20 @@ transitioning builtin CreateAsyncFromSyncIteratorBaseline(syncIterator: JSAny): return CreateAsyncFromSyncIterator(context, syncIterator); } +@export +transitioning macro GetIteratorRecordAfterCreateAsyncFromSyncIterator( + asyncIterator: IteratorRecord): IteratorRecord { + const context: Context = LoadContextFromBaseline(); + + const iterator = CreateAsyncFromSyncIterator(context, asyncIterator.object); + + const nextMethod = GetProperty(iterator, kNextString); + return IteratorRecord{ + object: UnsafeCast(iterator), + next: nextMethod + }; +} + macro GetLazyReceiver(receiver: JSAny): JSAny { return receiver; } diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc index abd863546b6d01..9ba3106d511f25 100644 --- a/deps/v8/src/builtins/loong64/builtins-loong64.cc +++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc @@ -921,19 +921,20 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, // static void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { UseScratchRegisterScope temps(masm); - temps.Include({s1, s2}); + temps.Include({s1, s2, s3}); temps.Exclude({t7}); auto descriptor = Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); - // Load the feedback vector from the closure. + // Load the feedback cell and vector from the closure. + Register feedback_cell = temps.Acquire(); Register feedback_vector = temps.Acquire(); - __ LoadTaggedField(feedback_vector, + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField( feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); @@ -991,22 +992,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - __ Push(argc, bytecode_array); + __ Push(argc, bytecode_array, feedback_cell, feedback_vector); - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. { UseScratchRegisterScope temps(masm); Register invocation_count = temps.Acquire(); __ AssertFeedbackVector(feedback_vector, invocation_count); } - // Our stack is currently aligned. We have have to push something along with - // the feedback vector to keep it that way -- we may as well start - // initialising the register frame. - // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves - // `undefined` in the accumulator register, to skip the load in the baseline - // code. - __ Push(feedback_vector); } Label call_stack_guard; @@ -1057,7 +1049,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ Pop(kJavaScriptCallNewTargetRegister); } __ Ret(); - temps.Exclude({s1, s2}); + temps.Exclude({s1, s2, s3}); } // static @@ -1065,9 +1057,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // We're here because we got deopted during BaselineOutOfLinePrologue's stack // check. Undo all its frame creation and call into the interpreter instead. - // Drop bytecode offset (was the feedback vector but got replaced during - // deopt) and bytecode array. - __ Drop(2); + // Drop the feedback vector, the bytecode offset (was the feedback vector + // but got replaced during deopt) and bytecode array. + __ Drop(3); // Context, closure, argc. __ Pop(kContextRegister, kJavaScriptCallTargetRegister, @@ -1119,22 +1111,13 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ JumpIfObjectType(&compile_lazy, ne, kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, kScratchReg); -#ifndef V8_JITLESS - // Load the feedback vector from the closure. + Label push_stack_frame; Register feedback_vector = a2; - __ LoadTaggedField(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedField( - feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + __ LoadFeedbackVector(feedback_vector, closure, a4, &push_stack_frame); - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedField(a4, - FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); - __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. // Check the tiering state. Label flags_need_processing; @@ -1143,13 +1126,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); - { - UseScratchRegisterScope temps(masm); - ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire()); - } - - Label not_optimized; - __ bind(¬_optimized); + ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a4); // Increment invocation count for the function. __ Ld_w(a4, FieldMemOperand(feedback_vector, @@ -1161,13 +1138,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). - __ bind(&push_stack_frame); #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and // - kInvocationCountOffset: only used for tiering heuristics and code // coverage. #endif // !V8_JITLESS + + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1175,9 +1153,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ li(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); - // Push bytecode array and Smi tagged bytecode array offset. + // Push bytecode array, Smi tagged bytecode array offset and the feedback + // vector. __ SmiTag(a4, kInterpreterBytecodeOffsetRegister); - __ Push(kInterpreterBytecodeArrayRegister, a4); + __ Push(kInterpreterBytecodeArrayRegister, a4, feedback_vector); // Allocate the local and temporary register file on the stack. Label stack_overflow; @@ -3795,13 +3774,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, t2); } - // Replace BytecodeOffset with the feedback vector. - Register feedback_vector = a2; - __ LoadTaggedField(feedback_vector, + // Load the feedback cell and vector. + Register feedback_cell = a2; + Register feedback_vector = t8; + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField( feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -3812,9 +3792,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Save BytecodeOffset from the stack frame. __ SmiUntag(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - // Replace BytecodeOffset with the feedback vector. + // Replace bytecode offset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ St_d(feedback_cell, + MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp)); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); __ St_d(feedback_vector, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp)); feedback_vector = no_reg; // Compute baseline pc for bytecode offset. diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 81e01e09e9047d..2078a7b778242e 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -902,17 +902,18 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, // static void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { UseScratchRegisterScope temps(masm); - temps.Include({s1, s2}); + temps.Include({s1, s2, s3}); auto descriptor = Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); // Load the feedback vector from the closure. + Register feedback_cell = temps.Acquire(); Register feedback_vector = temps.Acquire(); - __ Ld(feedback_vector, + __ Ld(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ Ld(feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); @@ -970,22 +971,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - __ Push(argc, bytecode_array); + __ Push(argc, bytecode_array, feedback_cell, feedback_vector); - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. { UseScratchRegisterScope temps(masm); Register invocation_count = temps.Acquire(); __ AssertFeedbackVector(feedback_vector, invocation_count); } - // Our stack is currently aligned. We have have to push something along with - // the feedback vector to keep it that way -- we may as well start - // initialising the register frame. - // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves - // `undefined` in the accumulator register, to skip the load in the baseline - // code. - __ Push(feedback_vector); } Label call_stack_guard; @@ -1036,7 +1028,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ Pop(kJavaScriptCallNewTargetRegister); } __ Ret(); - temps.Exclude({kScratchReg, kScratchReg2}); + temps.Exclude({s1, s2, s3}); } // static @@ -1044,9 +1036,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // We're here because we got deopted during BaselineOutOfLinePrologue's stack // check. Undo all its frame creation and call into the interpreter instead. - // Drop bytecode offset (was the feedback vector but got replaced during - // deopt) and bytecode array. - __ Drop(2); + // Drop the feedback vector, the bytecode offset (was the feedback vector + // but got replaced during deopt) and bytecode array. + __ Drop(3); // Context, closure, argc. __ Pop(kContextRegister, kJavaScriptCallTargetRegister, @@ -1095,20 +1087,13 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg); __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE)); -#ifndef V8_JITLESS - // Load the feedback vector from the closure. + Label push_stack_frame; Register feedback_vector = a2; - __ Ld(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ Ld(feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + __ LoadFeedbackVector(feedback_vector, closure, a4, &push_stack_frame); - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); - __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. // Check the tiering state. Label flags_need_processing; @@ -1117,13 +1102,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); - { - UseScratchRegisterScope temps(masm); - ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire()); - } - - Label not_optimized; - __ bind(¬_optimized); + ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a4); // Increment invocation count for the function. __ Lw(a4, FieldMemOperand(feedback_vector, @@ -1135,13 +1114,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). - __ bind(&push_stack_frame); #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and // - kInvocationCountOffset: only used for tiering heuristics and code // coverage. #endif // !V8_JITLESS + + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1149,9 +1129,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ li(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); - // Push bytecode array and Smi tagged bytecode array offset. + // Push bytecode array, Smi tagged bytecode array offset, and the feedback + // vector. __ SmiTag(a4, kInterpreterBytecodeOffsetRegister); - __ Push(kInterpreterBytecodeArrayRegister, a4); + __ Push(kInterpreterBytecodeArrayRegister, a4, feedback_vector); // Allocate the local and temporary register file on the stack. Label stack_overflow; @@ -3822,12 +3803,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, t2); } - // Replace BytecodeOffset with the feedback vector. - Register feedback_vector = a2; - __ Ld(feedback_vector, + // Load the feedback cell and vector. + Register feedback_cell = a2; + Register feedback_vector = t8; + __ Ld(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ Ld(feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -3838,9 +3820,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Save BytecodeOffset from the stack frame. __ SmiUntag(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - // Replace BytecodeOffset with the feedback vector. + // Replace BytecodeOffset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ Sd(feedback_cell, + MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp)); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); __ Sd(feedback_vector, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp)); feedback_vector = no_reg; // Compute baseline pc for bytecode offset. diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 466d799975b59a..cd0a68e3f5275b 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -173,14 +173,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, r6); } - // Load the feedback vector. - Register feedback_vector = r5; - __ LoadTaggedField(feedback_vector, + // Load the feedback cell and vector. + Register feedback_cell = r5; + Register feedback_vector = ip; + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0); - __ LoadTaggedField( - feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset), r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset), + r0); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -192,9 +193,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ LoadU64(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister); - // Replace BytecodeOffset with the feedback vector. + // Replace bytecode offset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ StoreU64(feedback_cell, + MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp)); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); __ StoreU64(feedback_vector, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp)); feedback_vector = no_reg; // Compute baseline pc for bytecode offset. @@ -1201,14 +1210,15 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); - // Load the feedback vector from the closure. + // Load the feedback cell and vector from the closure. + Register feedback_cell = r7; Register feedback_vector = ip; - __ LoadTaggedField(feedback_vector, + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0); - __ LoadTaggedField( - feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset), r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset), + r0); __ AssertFeedbackVector(feedback_vector, r11); // Check for an tiering state. @@ -1260,14 +1270,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ Push(argc, bytecodeArray); - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. if (v8_flags.debug_code) { Register scratch = r11; __ CompareObjectType(feedback_vector, scratch, scratch, FEEDBACK_VECTOR_TYPE); __ Assert(eq, AbortReason::kExpectedFeedbackVector); } + __ Push(feedback_cell); __ Push(feedback_vector); } @@ -1331,9 +1340,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // We're here because we got deopted during BaselineOutOfLinePrologue's stack // check. Undo all its frame creation and call into the interpreter instead. - // Drop bytecode offset (was the feedback vector but got replaced during - // deopt) and bytecode array. - __ Drop(2); + // Drop the feedback vector, the bytecode offset (was the feedback vector but + // got replaced during deopt) and bytecode array. + __ Drop(3); // Context, closure, argc. __ Pop(kContextRegister, kJavaScriptCallTargetRegister, @@ -1387,24 +1396,13 @@ void Builtins::Generate_InterpreterEntryTrampoline( BYTECODE_ARRAY_TYPE); __ bne(&compile_lazy); -#ifndef V8_JITLESS - // Load the feedback vector from the closure. + Label push_stack_frame; Register feedback_vector = r5; - __ LoadTaggedField(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), - r0); - __ LoadTaggedField( - feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset), r0); + __ LoadFeedbackVector(feedback_vector, closure, r7, &push_stack_frame); - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedField( - r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0); - __ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); - __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE)); - __ bne(&push_stack_frame); +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. Register flags = r7; Label flags_need_processing; @@ -1412,13 +1410,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); - { - UseScratchRegisterScope temps(masm); ResetFeedbackVectorOsrUrgency(masm, feedback_vector, ip, r0); - } - - Label not_optimized; - __ bind(¬_optimized); // Increment invocation count for the function. __ LoadU32( @@ -1435,7 +1427,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). - __ bind(&push_stack_frame); #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and @@ -1443,6 +1434,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // coverage. #endif // !V8_JITLESS + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1452,7 +1444,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Push bytecode array and Smi tagged bytecode array offset. __ SmiTag(r7, kInterpreterBytecodeOffsetRegister); - __ Push(kInterpreterBytecodeArrayRegister, r7); + __ Push(kInterpreterBytecodeArrayRegister, r7, feedback_vector); // Allocate the local and temporary register file on the stack. Label stack_overflow; @@ -3616,9 +3608,9 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // from the API function here. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCA::kLengthOffset + kSlotsToDropOnStackSize); - __ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ + - exit_frame_params_count) * - kSystemPointerSize)); + __ mov(scratch, + Operand((FCA::kArgsLengthWithReceiver + exit_frame_params_count) * + kSystemPointerSize)); __ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2)); __ add(scratch, scratch, ip); __ StoreU64(scratch, stack_space_operand); diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq index 046ae285321bef..a160ee042d35ab 100644 --- a/deps/v8/src/builtins/regexp-replace.tq +++ b/deps/v8/src/builtins/regexp-replace.tq @@ -253,7 +253,7 @@ transitioning javascript builtin RegExpPrototypeReplace( // If Type(rx) is not Object, throw a TypeError exception. const rx = Cast(receiver) otherwise ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, methodName); + MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver); // Let S be ? ToString(string). const s = ToString_Inline(string); diff --git a/deps/v8/src/builtins/regexp-test.tq b/deps/v8/src/builtins/regexp-test.tq index c8d08f7c772c98..4e3bcb906c66c2 100644 --- a/deps/v8/src/builtins/regexp-test.tq +++ b/deps/v8/src/builtins/regexp-test.tq @@ -14,7 +14,7 @@ transitioning javascript builtin RegExpPrototypeTest( const methodName: constexpr string = 'RegExp.prototype.test'; const receiver = Cast(receiver) otherwise ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, methodName); + MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver); const str: String = ToString_Inline(string); if (IsFastRegExpPermissive(receiver)) { RegExpPrototypeExecBodyWithoutResultFast( diff --git a/deps/v8/src/builtins/riscv/builtins-riscv.cc b/deps/v8/src/builtins/riscv/builtins-riscv.cc index 638001f90549cc..db2ccb81419657 100644 --- a/deps/v8/src/builtins/riscv/builtins-riscv.cc +++ b/deps/v8/src/builtins/riscv/builtins-riscv.cc @@ -959,7 +959,7 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // Drop bytecode offset (was the feedback vector but got replaced during // deopt) and bytecode array. - __ AddWord(sp, sp, Operand(2 * kSystemPointerSize)); + __ AddWord(sp, sp, Operand(3 * kSystemPointerSize)); // Context, closure, argc. __ Pop(kContextRegister, kJavaScriptCallTargetRegister, @@ -974,18 +974,19 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { UseScratchRegisterScope temps(masm); - temps.Include({kScratchReg, kScratchReg2}); + temps.Include({kScratchReg, kScratchReg2, s1}); auto descriptor = Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); - // Load the feedback vector from the closure. + // Load the feedback cell and vector from the closure. + Register feedback_cell = temps.Acquire(); Register feedback_vector = temps.Acquire(); - __ LoadTaggedField(feedback_vector, + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField( feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); { UseScratchRegisterScope temp(masm); Register type = temps.Acquire(); @@ -1039,7 +1040,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - __ Push(argc, bytecode_array); + __ Push(argc, bytecode_array, feedback_cell, feedback_vector); // Baseline code frames store the feedback vector where interpreter would // store the bytecode offset. { @@ -1047,13 +1048,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { Register type = temps.Acquire(); __ AssertFeedbackVector(feedback_vector, type); } - // Our stack is currently aligned. We have have to push something along with - // the feedback vector to keep it that way -- we may as well start - // initialising the register frame. - // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves - // `undefined` in the accumulator register, to skip the load in the baseline - // code. - __ Push(feedback_vector); } Label call_stack_guard; @@ -1101,7 +1095,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ Pop(kJavaScriptCallNewTargetRegister); } __ Ret(); - temps.Exclude({kScratchReg, kScratchReg2}); + temps.Exclude({kScratchReg, kScratchReg2, s1}); } // Generate code for entering a JS function with the interpreter. @@ -1141,23 +1135,13 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg); __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE)); -#ifndef V8_JITLESS + Label push_stack_frame; Register feedback_vector = a2; - // Load the feedback vector from the closure. - __ LoadTaggedField(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedField( - feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + __ LoadFeedbackVector(feedback_vector, closure, a4, &push_stack_frame); - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedField(a4, - FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); - __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE), - Label::Distance::kNear); +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. // Check the tiering state. Label flags_need_processing; @@ -1165,12 +1149,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); - { - UseScratchRegisterScope temps(masm); - ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire()); - } - Label not_optimized; - __ bind(¬_optimized); + ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a4); // Increment invocation count for the function. __ Lw(a4, FieldMemOperand(feedback_vector, @@ -1182,13 +1161,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). - __ bind(&push_stack_frame); #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and // - kInvocationCountOffset: only used for tiering heuristics and code // coverage. #endif // !V8_JITLESS + + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1196,9 +1176,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ li(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); - // Push bytecode array and Smi tagged bytecode array offset. + // Push bytecode array, Smi tagged bytecode array offset, and the feedback + // vector. __ SmiTag(a4, kInterpreterBytecodeOffsetRegister); - __ Push(kInterpreterBytecodeArrayRegister, a4); + __ Push(kInterpreterBytecodeArrayRegister, a4, feedback_vector); // Allocate the local and temporary register file on the stack. Label stack_overflow; @@ -3913,13 +3894,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, scratch); } - // Replace BytecodeOffset with the feedback vector. - Register feedback_vector = a2; - __ LoadTaggedField(feedback_vector, + // Load the feedback cell and vector. + Register feedback_cell = a2; + Register feedback_vector = t4; + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField( feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. @@ -3930,10 +3912,18 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Save BytecodeOffset from the stack frame. __ SmiUntag(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - // Replace BytecodeOffset with the feedback vector. + // Replace bytecode offset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ StoreWord(feedback_cell, + MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp)); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); __ StoreWord( feedback_vector, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp)); feedback_vector = no_reg; // Compute baseline pc for bytecode offset. @@ -3975,6 +3965,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister); __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister); FrameScope scope(masm, StackFrame::INTERNAL); + __ PrepareCallCFunction(3, 0, a4); __ CallCFunction(get_baseline_pc, 3, 0); } __ LoadCodeInstructionStart(code_obj, code_obj); diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index ab958664f4431d..38dbb900124ad3 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -171,13 +171,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, r5); } - // Load the feedback vector. - Register feedback_vector = r4; - __ LoadTaggedField(feedback_vector, + // Load the feedback cell and vector. + Register feedback_cell = r4; + Register feedback_vector = r1; + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField( feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -189,9 +190,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ LoadU64(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister); - // Replace BytecodeOffset with the feedback vector. + // Replace bytecode offset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ StoreU64(feedback_cell, + MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp)); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); __ StoreU64(feedback_vector, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp)); feedback_vector = no_reg; // Compute baseline pc for bytecode offset. @@ -1239,13 +1248,14 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); - // Load the feedback vector from the closure. + // Load the feedback cell and vector from the closure. + Register feedback_cell = r6; Register feedback_vector = ip; - __ LoadTaggedField(feedback_vector, + __ LoadTaggedField(feedback_cell, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField( feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); __ AssertFeedbackVector(feedback_vector, r1); // Check for an tiering state. @@ -1298,14 +1308,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ Push(argc, bytecodeArray); - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. if (v8_flags.debug_code) { Register scratch = r1; __ CompareObjectType(feedback_vector, scratch, scratch, FEEDBACK_VECTOR_TYPE); __ Assert(eq, AbortReason::kExpectedFeedbackVector); } + __ Push(feedback_cell); __ Push(feedback_vector); } @@ -1364,9 +1373,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // We're here because we got deopted during BaselineOutOfLinePrologue's stack // check. Undo all its frame creation and call into the interpreter instead. - // Drop bytecode offset (was the feedback vector but got replaced during - // deopt) and bytecode array. - __ Drop(2); + // Drop the feedback vector, the bytecode offset (was the feedback vector but + // got replaced during deopt) and bytecode array. + __ Drop(3); // Context, closure, argc. __ Pop(kContextRegister, kJavaScriptCallTargetRegister, @@ -1420,23 +1429,13 @@ void Builtins::Generate_InterpreterEntryTrampoline( BYTECODE_ARRAY_TYPE); __ bne(&compile_lazy); -#ifndef V8_JITLESS - // Load the feedback vector from the closure. + Label push_stack_frame; Register feedback_vector = r4; - __ LoadTaggedField(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedField( - feedback_vector, - FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset)); + __ LoadFeedbackVector(feedback_vector, closure, r6, &push_stack_frame); - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedField(r6, - FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset)); - __ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE)); - __ bne(&push_stack_frame); +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. Register flags = r6; Label flags_need_processing; @@ -1444,13 +1443,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); - { - UseScratchRegisterScope temps(masm); ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r1); - } - - Label not_optimized; - __ bind(¬_optimized); // Increment invocation count for the function. __ LoadS32(r1, FieldMemOperand(feedback_vector, @@ -1462,13 +1455,15 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). - __ bind(&push_stack_frame); + #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and // - kInvocationCountOffset: only used for tiering heuristics and code // coverage. #endif // !V8_JITLESS + + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1477,8 +1472,8 @@ void Builtins::Generate_InterpreterEntryTrampoline( Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); // Push bytecode array and Smi tagged bytecode array offset. - __ SmiTag(r4, kInterpreterBytecodeOffsetRegister); - __ Push(kInterpreterBytecodeArrayRegister, r4); + __ SmiTag(r0, kInterpreterBytecodeOffsetRegister); + __ Push(kInterpreterBytecodeArrayRegister, r0, feedback_vector); // Allocate the local and temporary register file on the stack. Label stack_overflow; @@ -3590,9 +3585,9 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // from the API function here. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCA::kLengthOffset + kSlotsToDropOnStackSize); - __ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ + - exit_frame_params_count) * - kSystemPointerSize)); + __ mov(scratch, + Operand((FCA::kArgsLengthWithReceiver + exit_frame_params_count) * + kSystemPointerSize)); __ ShiftLeftU64(r1, argc, Operand(kSystemPointerSizeLog2)); __ AddS64(scratch, r1); __ StoreU64(scratch, stack_space_operand); diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index 3010364d5017d2..de1ac56f79754d 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -242,10 +242,14 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { ++builtin) { Tagged code = builtins->code(builtin); Tagged istream = code->instruction_stream(); - CodePageMemoryModificationScope code_modification_scope(istream); + WritableJitAllocation jit_allocation = ThreadIsolation::LookupJitAllocation( + istream.address(), istream->Size(), + ThreadIsolation::JitAllocationType::kInstructionStream); bool flush_icache = false; - for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) { - RelocInfo* rinfo = it.rinfo(); + for (WritableRelocIterator it(jit_allocation, istream, + code->constant_pool(), kRelocMask); + !it.done(); it.next()) { + WritableRelocInfo* rinfo = it.rinfo(); if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) { Tagged target_code = Code::FromTargetAddress(rinfo->target_address()); diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index a5260fc52a6525..93987c57f9c6d7 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -5,8 +5,7 @@ #include 'src/builtins/builtins-constructor-gen.h' namespace typed_array { -extern builtin IterableToListMayPreserveHoles(Context, Object, Callable): - JSArray; +extern builtin IterableToListConvertHoles(Context, Object, Callable): JSArray; extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( implicit context: Context)(): JSArrayBuffer; @@ -201,7 +200,7 @@ transitioning macro ConstructByIterable( iteratorFn: Callable): never labels IfConstructByArrayLike(JSArray, uintptr) { const array: JSArray = - IterableToListMayPreserveHoles(context, iterable, iteratorFn); + IterableToListConvertHoles(context, iterable, iteratorFn); // Max JSArray length is a valid JSTypedArray length so we just use it. goto IfConstructByArrayLike(array, array.length_uintptr); } @@ -298,8 +297,7 @@ transitioning macro ConstructByArrayBuffer( if (bufferByteLength < offset) goto IfInvalidOffset; newByteLength = bufferByteLength - offset; - newLength = elementsInfo.CalculateLength(newByteLength) - otherwise IfInvalidLength; + newLength = elementsInfo.CalculateLength(newByteLength); } else { // b. Else, // i. Let newByteLength be newLength × elementSize. diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq index 5cc481c98059d3..ac42bb13ab5186 100644 --- a/deps/v8/src/builtins/typed-array-subarray.tq +++ b/deps/v8/src/builtins/typed-array-subarray.tq @@ -13,7 +13,7 @@ transitioning javascript builtin TypedArrayPrototypeSubArray( // 2. Perform ? RequireInternalSlot(O, [[TypedArrayName]]). const source = Cast(receiver) otherwise ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, methodName); + MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver); // 3. Assert: O has a [[ViewedArrayBuffer]] internal slot. // 4. Let buffer be O.[[ViewedArrayBuffer]]. diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index 404217a9fd2784..bcb293eb542437 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -25,7 +25,6 @@ type RabGsabUint8Elements extends ElementsKind; struct TypedArrayElementsInfo { // Calculates the number of bytes required for specified number of elements. macro CalculateByteLength(length: uintptr): uintptr labels IfInvalid { - if (length > kTypedArrayMaxLength) goto IfInvalid; const maxArrayLength = kArrayBufferMaxByteLength >>> this.sizeLog2; if (length > maxArrayLength) goto IfInvalid; const byteLength = length << this.sizeLog2; @@ -34,10 +33,8 @@ struct TypedArrayElementsInfo { // Calculates the maximum number of elements supported by a specified number // of bytes. - macro CalculateLength(byteLength: uintptr): uintptr labels IfInvalid { - const length = byteLength >>> this.sizeLog2; - if (length > kTypedArrayMaxLength) goto IfInvalid; - return length; + macro CalculateLength(byteLength: uintptr): uintptr { + return byteLength >>> this.sizeLog2; } // Determines if `bytes` (byte offset or length) cannot be evenly divided by diff --git a/deps/v8/src/builtins/wasm-to-js.tq b/deps/v8/src/builtins/wasm-to-js.tq index 82751b72478f42..f0b379fe67358b 100644 --- a/deps/v8/src/builtins/wasm-to-js.tq +++ b/deps/v8/src/builtins/wasm-to-js.tq @@ -62,7 +62,10 @@ transitioning macro WasmToJSWrapper(ref: WasmApiFunctionRef): WasmToJSResult { const paramTypes = Subslice(serializedSig, returnCount + 1, paramCount) otherwise unreachable; - const outParams = WasmAllocateFixedArray(paramCount + 1); + // The number of parameters that get pushed on the stack is (at least) the + // number of incoming parameters plus the receiver. + const numStackParams = paramCount + 1; + const outParams = WasmAllocateZeroedFixedArray(numStackParams); let nextIndex: intptr = 0; // Set the receiver to `Undefined` as the default. If the receiver would be // different, e.g. the global proxy for sloppy functions, then the CallVarargs @@ -134,18 +137,17 @@ transitioning macro WasmToJSWrapper(ref: WasmApiFunctionRef): WasmToJSResult { const context = ref.native_context; // Reset the signature on the stack, so that incoming parameters don't get - // scanned anymore. This performance optimization is possible because the - // incoming parameters are not accessed anymore. + // scanned anymore. *GetRefAt(sigSlot, 0) = 0; const result = CallVarargs( - context, target, 0, Convert(paramCount) + 1, outParams); + context, target, 0, Convert(numStackParams), outParams); // Put a marker on the stack to indicate to the frame iterator that the call // to JavaScript is finished. For asm.js source positions it is important to // know if an exception happened in the call to JS, or in the ToNumber // conversion afterwards. - *GetRefAt(sigSlot, 0) = BitcastTaggedToWord(SmiConstant(2)); + *GetRefAt(sigSlot, 0) = BitcastTaggedToWord(SmiConstant(-1)); let resultFixedArray: FixedArray; if (returnCount > 1) { resultFixedArray = diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index 622d0bea4127cb..d38d35f7cd2fd6 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -23,7 +23,9 @@ extern runtime WasmFunctionTableSet( Context, WasmInstanceObject, Smi, Smi, Object): JSAny; extern runtime ThrowRangeError(Context, Smi): never; extern runtime ThrowWasmError(Context, Smi): never; +extern runtime WasmThrowRangeError(Context, Smi): never; extern runtime WasmThrowTypeError(Context, Smi, JSAny): never; +extern runtime WasmThrowTypeErrorTwoArgs(Context, Smi, JSAny, JSAny): never; extern runtime WasmThrow(Context, Object, FixedArray): JSAny; extern runtime WasmReThrow(Context, Object): JSAny; extern runtime WasmTriggerTierUp(Context, WasmInstanceObject): JSAny; @@ -331,6 +333,14 @@ builtin WasmInternalFunctionCreateExternal( return runtime::WasmInternalFunctionCreateExternal(context, func); } +builtin WasmAllocateZeroedFixedArray(size: intptr): FixedArray { + if (size == 0) return kEmptyFixedArray; + const result = UnsafeCast(AllocateFixedArray( + ElementsKind::PACKED_ELEMENTS, size, AllocationFlag::kNone)); + FillEntireFixedArrayWithSmiZero(ElementsKind::PACKED_ELEMENTS, result, size); + return result; +} + builtin WasmAllocateFixedArray(size: intptr): FixedArray { if (size == 0) return kEmptyFixedArray; return UnsafeCast(AllocateFixedArray( @@ -1074,6 +1084,26 @@ builtin ThrowIndexOfCalledOnNull(): JSAny { runtime::WasmThrowTypeError(context, SmiConstant(error), name); } +builtin ThrowDataViewGetInt32DetachedError(): JSAny { + const context = LoadContextFromFrame(); + const error = MessageTemplate::kDetachedOperation; + const name = StringConstant('DataView.prototype.getInt32'); + runtime::WasmThrowTypeError(context, SmiConstant(error), name); +} + +builtin ThrowDataViewGetInt32OutOfBounds(): JSAny { + const context = LoadContextFromFrame(); + const error = MessageTemplate::kInvalidDataViewAccessorOffset; + runtime::WasmThrowRangeError(context, SmiConstant(error)); +} + +builtin ThrowDataViewGetInt32TypeError(value: JSAny): JSAny { + const context = LoadContextFromFrame(); + const error = MessageTemplate::kIncompatibleMethodReceiver; + const name = StringConstant('DataView.prototype.getInt32'); + runtime::WasmThrowTypeErrorTwoArgs(context, SmiConstant(error), name, value); +} + builtin WasmStringConcat(a: String, b: String): String { const context = LoadContextFromFrame(); tail StringAdd_CheckNone(a, b); diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 55fa656cb01791..01466781f8dcf8 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -1029,22 +1029,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( kScratchRegister); __ j(not_equal, &compile_lazy); -#ifndef V8_JITLESS - // Load the feedback vector from the closure. - Register feedback_vector = rbx; - TaggedRegister feedback_cell(feedback_vector); - __ LoadTaggedField(feedback_cell, - FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedField(feedback_vector, - FieldOperand(feedback_cell, FeedbackCell::kValueOffset)); - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ IsObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, rcx); - __ j(not_equal, &push_stack_frame); + Register feedback_vector = rbx; + __ LoadFeedbackVector(feedback_vector, closure, &push_stack_frame, + Label::kNear); - // Check the tiering state. +#ifndef V8_JITLESS + // If feedback vector is valid, check for optimized code and update invocation + // count. Label flags_need_processing; __ CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing( feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing); @@ -1058,13 +1050,14 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). - __ bind(&push_stack_frame); #else // Note: By omitting the above code in jitless mode we also disable: // - kFlagsLogNextExecution: only used for logging/profiling; and // - kInvocationCountOffset: only used for tiering heuristics and code // coverage. #endif // !V8_JITLESS + + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ pushq(rbp); // Caller's frame pointer. __ movq(rbp, rsp); @@ -1081,6 +1074,9 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ SmiTag(rcx, kInterpreterBytecodeOffsetRegister); __ Push(rcx); + // Push feedback vector. + __ Push(feedback_vector); + // Allocate the local and temporary register file on the stack. Label stack_overflow; { @@ -1710,7 +1706,8 @@ void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { // static void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { - Register feedback_vector = r8; + Register feedback_cell = r8; + Register feedback_vector = r11; Register return_address = r15; #ifdef DEBUG @@ -1723,8 +1720,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); - // Load the feedback vector from the closure. - TaggedRegister feedback_cell(feedback_vector); + // Load the feedback cell and vector from the closure. __ LoadTaggedField(feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField(feedback_vector, @@ -1769,9 +1765,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); __ Push(bytecode_array); - - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. + __ Push(feedback_cell); __ Push(feedback_vector); } @@ -1845,6 +1839,8 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) { // We're here because we got deopted during BaselineOutOfLinePrologue's stack // check. Undo all its frame creation and call into the interpreter instead. + // Drop feedback vector. + __ Pop(kScratchRegister); // Drop bytecode offset (was the feedback vector but got replaced during // deopt). __ Pop(kScratchRegister); @@ -2416,7 +2412,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, Label convert_to_object, convert_receiver; __ movq(rcx, args.GetReceiverOperand()); __ JumpIfSmi(rcx, &convert_to_object, Label::kNear); - __ JumpIfJSAnyIsNotPrimitive(rcx, rbx, &done_convert, Label::kNear); + __ JumpIfJSAnyIsNotPrimitive(rcx, rbx, &done_convert, + DEBUG_BOOL ? Label::kFar : Label::kNear); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; __ JumpIfRoot(rcx, RootIndex::kUndefinedValue, &convert_global_proxy, @@ -4336,7 +4333,7 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, // FunctionCallbackInfo::values_ (points at the first varargs argument // passed on the stack). __ leaq(holder, - Operand(holder, (FCA::kArgsLength + 1) * kSystemPointerSize)); + Operand(holder, FCA::kArgsLengthWithReceiver * kSystemPointerSize)); __ movq(ExitFrameStackSlotOperand(FCA::kValuesOffset), holder); // FunctionCallbackInfo::length_. @@ -4350,9 +4347,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, (kApiStackSpace - 1) * kSystemPointerSize); __ leaq(kScratchRegister, Operand(argc, times_system_pointer_size, - (FCA::kArgsLength + exit_frame_params_count) * - kSystemPointerSize + - kReceiverOnStackSize)); + (FCA::kArgsLengthWithReceiver + exit_frame_params_count) * + kSystemPointerSize)); __ movq(ExitFrameStackSlotOperand(kBytesToDropOffset), kScratchRegister); __ RecordComment("v8::FunctionCallback's argument."); @@ -4374,9 +4370,10 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, const bool with_profiling = mode != CallApiCallbackMode::kOptimizedNoProfiling; + Label* no_done = nullptr; CallApiFunctionAndReturn(masm, with_profiling, api_function_address, thunk_ref, thunk_arg, kUseExitFrameStackSlotOperand, - &stack_space_operand, return_value_operand); + &stack_space_operand, return_value_operand, no_done); } void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { @@ -4491,9 +4488,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { Operand* const kUseStackSpaceConstant = nullptr; const bool with_profiling = true; - CallApiFunctionAndReturn(masm, with_profiling, api_function_address, - thunk_ref, thunk_arg, kStackUnwindSpace, - kUseStackSpaceConstant, return_value_operand); + Label* no_done = nullptr; + CallApiFunctionAndReturn( + masm, with_profiling, api_function_address, thunk_ref, thunk_arg, + kStackUnwindSpace, kUseStackSpaceConstant, return_value_operand, no_done); } void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { @@ -4759,10 +4757,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, AssertCodeIsBaseline(masm, code_obj, r11); } - // Load the feedback vector. + // Load the feedback cell and feedback vector. + Register feedback_cell = r8; Register feedback_vector = r11; - - TaggedRegister feedback_cell(feedback_vector); __ LoadTaggedField(feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField(feedback_vector, @@ -4774,12 +4771,20 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ IsObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister); __ j(not_equal, &install_baseline_code); - // Save BytecodeOffset from the stack frame. + // Save bytecode offset from the stack frame. __ SmiUntagUnsigned( kInterpreterBytecodeOffsetRegister, MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - // Replace BytecodeOffset with the feedback vector. - __ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), + // Replace bytecode offset with feedback cell. + static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp == + BaselineFrameConstants::kFeedbackCellFromFp); + __ movq(MemOperand(rbp, BaselineFrameConstants::kFeedbackCellFromFp), + feedback_cell); + feedback_cell = no_reg; + // Update feedback vector cache. + static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp == + BaselineFrameConstants::kFeedbackVectorFromFp); + __ movq(MemOperand(rbp, InterpreterFrameConstants::kFeedbackVectorFromFp), feedback_vector); feedback_vector = no_reg; diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h index 2021142b91047c..a29a9b422f723c 100644 --- a/deps/v8/src/codegen/arm/assembler-arm-inl.h +++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h @@ -53,7 +53,7 @@ int DoubleRegister::SupportedRegisterCount() { return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; } -void RelocInfo::apply(intptr_t delta) { +void WritableRelocInfo::apply(intptr_t delta) { if (RelocInfo::IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. int32_t* p = reinterpret_cast(pc_); @@ -94,7 +94,7 @@ int RelocInfo::target_address_size() { return kPointerSize; } Tagged RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); return HeapObject::cast( - Object(Assembler::target_address_at(pc_, constant_pool_))); + Tagged(Assembler::target_address_at(pc_, constant_pool_))); } Handle RelocInfo::target_object_handle(Assembler* origin) { @@ -106,8 +106,8 @@ Handle RelocInfo::target_object_handle(Assembler* origin) { return origin->relative_code_target_object_handle_at(pc_); } -void RelocInfo::set_target_object(Tagged target, - ICacheFlushMode icache_flush_mode) { +void WritableRelocInfo::set_target_object(Tagged target, + ICacheFlushMode icache_flush_mode) { DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); @@ -118,7 +118,7 @@ Address RelocInfo::target_external_reference() { return Assembler::target_address_at(pc_, constant_pool_); } -void RelocInfo::set_target_external_reference( +void WritableRelocInfo::set_target_external_reference( Address target, ICacheFlushMode icache_flush_mode) { DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); Assembler::set_target_address_at(pc_, constant_pool_, target, @@ -142,17 +142,6 @@ Address RelocInfo::target_off_heap_target() { return Assembler::target_address_at(pc_, constant_pool_); } -void RelocInfo::WipeOut() { - DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || - IsExternalReference(rmode_) || IsInternalReference(rmode_) || - IsOffHeapTarget(rmode_)); - if (IsInternalReference(rmode_)) { - Memory
(pc_) = kNullAddress; - } else { - Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); - } -} - Handle Assembler::relative_code_target_object_handle_at( Address pc) const { Instruction* branch = Instruction::At(pc); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index 6ab47b8f629025..7404c06bd4f7d9 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -28,10 +28,6 @@ #include "src/runtime/runtime.h" #include "src/snapshot/snapshot.h" -#if V8_ENABLE_WEBASSEMBLY -#include "src/wasm/wasm-code-manager.h" -#endif // V8_ENABLE_WEBASSEMBLY - // Satisfy cpplint check, but don't include platform-specific header. It is // included recursively via macro-assembler.h. #if 0 @@ -742,7 +738,8 @@ void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); #if V8_ENABLE_WEBASSEMBLY if (mode == StubCallMode::kCallWasmRuntimeStub) { - auto wasm_target = wasm::WasmCode::GetRecordWriteStub(fp_mode); + auto wasm_target = + static_cast
(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode)); Call(wasm_target, RelocInfo::WASM_STUB_CALL); #else if (false) { @@ -1894,7 +1891,7 @@ void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, #if V8_ENABLE_WEBASSEMBLY if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { - Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); + Call(static_cast
(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL); #else // For balance. if (false) { @@ -1955,6 +1952,12 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, } // namespace #ifdef V8_ENABLE_DEBUG_CODE +void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) { + if (v8_flags.debug_code) { + CompareObjectType(object, scratch, scratch, FEEDBACK_CELL_TYPE); + Assert(eq, AbortReason::kExpectedFeedbackCell); + } +} void MacroAssembler::AssertFeedbackVector(Register object) { if (v8_flags.debug_code) { UseScratchRegisterScope temps(this); @@ -2361,6 +2364,27 @@ void MacroAssembler::LoadMap(Register destination, Register object) { ldr(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } +void MacroAssembler::LoadFeedbackVector(Register dst, Register closure, + Register scratch, Label* fbv_undef) { + Label done; + + // Load the feedback vector from the closure. + ldr(dst, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + ldr(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset)); + + // Check if feedback vector is valid. + ldr(scratch, FieldMemOperand(dst, HeapObject::kMapOffset)); + ldrh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + cmp(scratch, Operand(FEEDBACK_VECTOR_TYPE)); + b(eq, &done); + + // Not valid, load undefined. + LoadRoot(dst, RootIndex::kUndefinedValue); + b(fbv_undef); + + bind(&done); +} + void MacroAssembler::LoadGlobalProxy(Register dst) { ASM_CODE_COMMENT(this); LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX); @@ -3053,7 +3077,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int stack_space, MemOperand* stack_space_operand, - MemOperand return_value_operand) { + MemOperand return_value_operand, Label* done) { ASM_CODE_COMMENT(masm); using ER = ExternalReference; @@ -3179,7 +3203,11 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, __ AssertJSAny(return_value, scratch, scratch2, AbortReason::kAPICallReturnedInvalidObject); - __ mov(pc, lr); + if (done) { + __ b(done); + } else { + __ mov(pc, lr); + } if (with_profiling) { ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper."); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index 2bf55c94b8fd7d..e8a77168b6eaaf 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -564,6 +564,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void LoadMap(Register destination, Register object); + void LoadFeedbackVector(Register dst, Register closure, Register scratch, + Label* fbv_undef); + void PushAll(RegList registers) { if (registers.is_empty()) return; ASM_CODE_COMMENT(this); @@ -859,6 +862,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { MemOperand ReceiverOperand() { return MemOperand(sp, 0); } // Tiering support. + void AssertFeedbackCell(Register object, + Register scratch) NOOP_UNLESS_DEBUG_CODE; void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE; void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure); @@ -1063,7 +1068,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int stack_space, MemOperand* stack_space_operand, - MemOperand return_value_operand); + MemOperand return_value_operand, Label* done); #define ACCESS_MASM(masm) masm-> diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h index 15bc1e88f05717..8e72d19b6e4176 100644 --- a/deps/v8/src/codegen/arm/register-arm.h +++ b/deps/v8/src/codegen/arm/register-arm.h @@ -299,6 +299,7 @@ DEFINE_REGISTER_NAMES(QwNeonRegister, SIMD128_REGISTERS) DEFINE_REGISTER_NAMES(CRegister, C_REGISTERS) // Give alias names to registers for calling conventions. +constexpr Register kStackPointerRegister = sp; constexpr Register kReturnRegister0 = r0; constexpr Register kReturnRegister1 = r1; constexpr Register kReturnRegister2 = r2; diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h index 4fcf4782570342..aefda6cfcc811e 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -21,7 +21,7 @@ namespace internal { bool CpuFeatures::SupportsOptimizer() { return true; } -void RelocInfo::apply(intptr_t delta) { +void WritableRelocInfo::apply(intptr_t delta) { // On arm64 only internal references and immediate branches need extra work. if (RelocInfo::IsInternalReference(rmode_)) { // Absolute code pointer inside code object moves with the code object. @@ -675,7 +675,7 @@ Tagged RelocInfo::target_object(PtrComprCageBase cage_base) { return HeapObject::cast(obj); } else { return HeapObject::cast( - Object(Assembler::target_address_at(pc_, constant_pool_))); + Tagged(Assembler::target_address_at(pc_, constant_pool_))); } } @@ -688,8 +688,8 @@ Handle RelocInfo::target_object_handle(Assembler* origin) { } } -void RelocInfo::set_target_object(Tagged target, - ICacheFlushMode icache_flush_mode) { +void WritableRelocInfo::set_target_object(Tagged target, + ICacheFlushMode icache_flush_mode) { DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); if (IsCompressedEmbeddedObject(rmode_)) { Assembler::set_target_compressed_address_at( @@ -708,7 +708,7 @@ Address RelocInfo::target_external_reference() { return Assembler::target_address_at(pc_, constant_pool_); } -void RelocInfo::set_target_external_reference( +void WritableRelocInfo::set_target_external_reference( Address target, ICacheFlushMode icache_flush_mode) { DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); Assembler::set_target_address_at(pc_, constant_pool_, target, @@ -735,20 +735,6 @@ Address RelocInfo::target_off_heap_target() { return Assembler::target_address_at(pc_, constant_pool_); } -void RelocInfo::WipeOut() { - DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) || - IsExternalReference(rmode_) || IsInternalReference(rmode_) || - IsOffHeapTarget(rmode_)); - if (IsInternalReference(rmode_)) { - WriteUnalignedValue
(pc_, kNullAddress); - } else if (IsCompressedEmbeddedObject(rmode_)) { - Assembler::set_target_compressed_address_at(pc_, constant_pool_, - kNullAddress); - } else { - Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); - } -} - LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { DCHECK(rt.is_valid()); if (rt.IsRegister()) { diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 2adc936f63c08e..6903a94de7a109 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -24,10 +24,6 @@ #include "src/runtime/runtime.h" #include "src/snapshot/snapshot.h" -#if V8_ENABLE_WEBASSEMBLY -#include "src/wasm/wasm-code-manager.h" -#endif // V8_ENABLE_WEBASSEMBLY - // Satisfy cpplint check, but don't include platform-specific header. It is // included recursively via macro-assembler.h. #if 0 @@ -1067,81 +1063,81 @@ void MacroAssembler::B(Label* label, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); - Label done; bool need_extra_instructions = NeedExtraInstructionsOrRegisterBranch(label); - if (need_extra_instructions) { + if (V8_UNLIKELY(need_extra_instructions)) { + Label done; b(&done, NegateCondition(cond)); B(label); + bind(&done); } else { b(label, cond); } - bind(&done); } void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { DCHECK(allow_macro_instructions()); - Label done; bool need_extra_instructions = NeedExtraInstructionsOrRegisterBranch(label); - if (need_extra_instructions) { + if (V8_UNLIKELY(need_extra_instructions)) { + Label done; tbz(rt, bit_pos, &done); B(label); + bind(&done); } else { tbnz(rt, bit_pos, label); } - bind(&done); } void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { DCHECK(allow_macro_instructions()); - Label done; bool need_extra_instructions = NeedExtraInstructionsOrRegisterBranch(label); - if (need_extra_instructions) { + if (V8_UNLIKELY(need_extra_instructions)) { + Label done; tbnz(rt, bit_pos, &done); B(label); + bind(&done); } else { tbz(rt, bit_pos, label); } - bind(&done); } void MacroAssembler::Cbnz(const Register& rt, Label* label) { DCHECK(allow_macro_instructions()); - Label done; bool need_extra_instructions = NeedExtraInstructionsOrRegisterBranch(label); - if (need_extra_instructions) { + if (V8_UNLIKELY(need_extra_instructions)) { + Label done; cbz(rt, &done); B(label); + bind(&done); } else { cbnz(rt, label); } - bind(&done); } void MacroAssembler::Cbz(const Register& rt, Label* label) { DCHECK(allow_macro_instructions()); - Label done; bool need_extra_instructions = NeedExtraInstructionsOrRegisterBranch(label); - if (need_extra_instructions) { + if (V8_UNLIKELY(need_extra_instructions)) { + Label done; cbnz(rt, &done); B(label); + bind(&done); } else { cbz(rt, label); } - bind(&done); } // Pseudo-instructions. @@ -1447,6 +1443,12 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, } // namespace #ifdef V8_ENABLE_DEBUG_CODE +void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) { + if (v8_flags.debug_code) { + IsObjectType(object, scratch, scratch, FEEDBACK_CELL_TYPE); + Assert(eq, AbortReason::kExpectedFeedbackCell); + } +} void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) { if (v8_flags.debug_code) { IsObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE); @@ -1463,9 +1465,10 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode( AssertCode(optimized_code); StoreMaybeIndirectPointerField( optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); - RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code, - kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore, SmiCheck::kOmit, - kCodePointerType); + RecordWriteField( + closure, JSFunction::kCodeOffset, optimized_code, kLRHasNotBeenSaved, + SaveFPRegsMode::kIgnore, SmiCheck::kOmit, + SlotDescriptor::ForMaybeIndirectPointerSlot(kCodeIndirectPointerTag)); } void MacroAssembler::GenerateTailCallToReturnedCode( @@ -2447,8 +2450,13 @@ void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { void MacroAssembler::LoadCodeInstructionStart(Register destination, Register code_object) { ASM_CODE_COMMENT(this); - LoadCodeEntrypointField( - destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); +#ifdef V8_ENABLE_SANDBOX + LoadCodeEntrypointViaIndirectPointer( + destination, + FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset)); +#else + Ldr(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); +#endif } void MacroAssembler::CallCodeObject(Register code_object) { @@ -2472,11 +2480,11 @@ void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { void MacroAssembler::CallJSFunction(Register function_object) { Register code = kJavaScriptCallCodeStartRegister; -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX // When the sandbox is enabled, we can directly fetch the entrypoint pointer // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. - LoadCodeEntrypointField( + LoadCodeEntrypointViaIndirectPointer( code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); Call(code); #else @@ -2489,11 +2497,11 @@ void MacroAssembler::CallJSFunction(Register function_object) { void MacroAssembler::JumpJSFunction(Register function_object, JumpMode jump_mode) { Register code = kJavaScriptCallCodeStartRegister; -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX // When the sandbox is enabled, we can directly fetch the entrypoint pointer // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. - LoadCodeEntrypointField( + LoadCodeEntrypointViaIndirectPointer( code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); DCHECK_EQ(jump_mode, JumpMode::kJump); // We jump through x17 here because for Branch Identification (BTI) we use @@ -2919,7 +2927,7 @@ void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, // DoubleToI preserves any registers it needs to clobber. #if V8_ENABLE_WEBASSEMBLY if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { - Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); + Call(static_cast
(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL); #else // For balance. if (false) { @@ -3197,6 +3205,28 @@ void MacroAssembler::LoadMap(Register dst, Register object) { LoadTaggedField(dst, FieldMemOperand(object, HeapObject::kMapOffset)); } +void MacroAssembler::LoadFeedbackVector(Register dst, Register closure, + Register scratch, Label* fbv_undef) { + Label done; + + // Load the feedback vector from the closure. + LoadTaggedField(dst, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + LoadTaggedField(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset)); + + // Check if feedback vector is valid. + LoadTaggedField(scratch, FieldMemOperand(dst, HeapObject::kMapOffset)); + Ldrh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + Cmp(scratch, FEEDBACK_VECTOR_TYPE); + B(eq, &done); + + // Not valid, load undefined. + LoadRoot(dst, RootIndex::kUndefinedValue); + B(fbv_undef); + + Bind(&done); +} + // Sets condition flags based on comparison, and returns type in type_reg. void MacroAssembler::CompareInstanceType(Register map, Register type_reg, InstanceType type) { @@ -3412,7 +3442,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, - SmiCheck smi_check, PointerType type) { + SmiCheck smi_check, SlotDescriptor slot) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(object, value)); // First, check if a write barrier is even needed. The tests below @@ -3442,7 +3472,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status, - save_fp, SmiCheck::kOmit, type); + save_fp, SmiCheck::kOmit, slot); Bind(&done); } @@ -3517,49 +3547,61 @@ void MacroAssembler::LoadExternalPointerField(Register destination, } void MacroAssembler::LoadIndirectPointerField(Register destination, - MemOperand field_operand) { + MemOperand field_operand, + IndirectPointerTag tag) { +#ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); -#ifdef V8_CODE_POINTER_SANDBOXING UseScratchRegisterScope temps(this); Register table = temps.AcquireX(); - static_assert(kAllIndirectPointerObjectsAreCode); - Mov(table, ExternalReference::code_pointer_table_address()); Ldr(destination.W(), field_operand); - Mov(destination, Operand(destination, LSR, kCodePointerHandleShift)); - Add(destination, table, - Operand(destination, LSL, kCodePointerTableEntrySizeLog2)); - Ldr(destination, - MemOperand(destination, - Immediate(kCodePointerTableEntryCodeObjectOffset))); + if (tag == kCodeIndirectPointerTag) { + Mov(table, ExternalReference::code_pointer_table_address()); + Mov(destination, Operand(destination, LSR, kCodePointerHandleShift)); + Add(destination, table, + Operand(destination, LSL, kCodePointerTableEntrySizeLog2)); + Ldr(destination, + MemOperand(destination, + Immediate(kCodePointerTableEntryCodeObjectOffset))); + } else { + CHECK(root_array_available_); + Mov(table, + ExternalReference::indirect_pointer_table_base_address(isolate())); + Mov(destination, Operand(destination, LSR, kIndirectPointerHandleShift)); + Ldr(destination, MemOperand(table, destination, LSL, + kIndirectPointerTableEntrySizeLog2)); + } Orr(destination, destination, Immediate(kHeapObjectTag)); #else - Ldr(destination, field_operand); -#endif // V8_CODE_POINTER_SANDBOXING + UNREACHABLE(); +#endif // V8_ENABLE_SANDBOX } void MacroAssembler::StoreIndirectPointerField(Register value, MemOperand dst_field_operand) { - DCHECK(V8_CODE_POINTER_SANDBOXING_BOOL); +#ifdef V8_ENABLE_SANDBOX UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); - static_assert(kAllIndirectPointerObjectsAreCode); - Ldr(scratch.W(), FieldMemOperand(value, Code::kCodePointerTableEntryOffset)); + Ldr(scratch.W(), + FieldMemOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset)); Str(scratch.W(), dst_field_operand); +#else + UNREACHABLE(); +#endif } void MacroAssembler::StoreMaybeIndirectPointerField( Register value, MemOperand dst_field_operand) { -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX StoreIndirectPointerField(value, dst_field_operand); #else StoreTaggedField(value, dst_field_operand); #endif } -void MacroAssembler::LoadCodeEntrypointField(Register destination, - MemOperand field_operand) { +void MacroAssembler::LoadCodeEntrypointViaIndirectPointer( + Register destination, MemOperand field_operand) { ASM_CODE_COMMENT(this); -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX UseScratchRegisterScope temps(this); Register table = temps.AcquireX(); Mov(table, ExternalReference::code_pointer_table_address()); @@ -3569,81 +3611,99 @@ void MacroAssembler::LoadCodeEntrypointField(Register destination, Mov(destination, Operand(destination, LSL, kCodePointerTableEntrySizeLog2)); Ldr(destination, MemOperand(table, destination)); #else - Ldr(destination, field_operand); -#endif // V8_CODE_POINTER_SANDBOXING + UNREACHABLE(); +#endif // V8_ENABLE_SANDBOX } void MacroAssembler::MaybeSaveRegisters(RegList registers) { - if (registers.is_empty()) return; - ASM_CODE_COMMENT(this); - CPURegList regs(kXRegSizeInBits, registers); - // If we were saving LR, we might need to sign it. - DCHECK(!regs.IncludesAliasOf(lr)); - regs.Align(); - PushCPURegList(regs); + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + CPURegList regs(kXRegSizeInBits, registers); + // If we were saving LR, we might need to sign it. + DCHECK(!regs.IncludesAliasOf(lr)); + regs.Align(); + PushCPURegList(regs); } void MacroAssembler::MaybeRestoreRegisters(RegList registers) { - if (registers.is_empty()) return; - ASM_CODE_COMMENT(this); - CPURegList regs(kXRegSizeInBits, registers); - // If we were saving LR, we might need to sign it. - DCHECK(!regs.IncludesAliasOf(lr)); - regs.Align(); - PopCPURegList(regs); + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + CPURegList regs(kXRegSizeInBits, registers); + // If we were saving LR, we might need to sign it. + DCHECK(!regs.IncludesAliasOf(lr)); + regs.Align(); + PopCPURegList(regs); } void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode) { - ASM_CODE_COMMENT(this); - RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); - MaybeSaveRegisters(registers); + ASM_CODE_COMMENT(this); + RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); + MaybeSaveRegisters(registers); - MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(), - WriteBarrierDescriptor::SlotAddressRegister(), object, - offset); + MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(), + WriteBarrierDescriptor::SlotAddressRegister(), object, + offset); - Call(isolate()->builtins()->code_handle( - Builtins::GetEphemeronKeyBarrierStub(fp_mode)), - RelocInfo::CODE_TARGET); - MaybeRestoreRegisters(registers); + Call(isolate()->builtins()->code_handle( + Builtins::GetEphemeronKeyBarrierStub(fp_mode)), + RelocInfo::CODE_TARGET); + MaybeRestoreRegisters(registers); +} + +void MacroAssembler::CallIndirectPointerBarrier(Register object, Operand offset, + SaveFPRegsMode fp_mode, + IndirectPointerTag tag) { + ASM_CODE_COMMENT(this); + RegList registers = + IndirectPointerWriteBarrierDescriptor::ComputeSavedRegisters(object); + MaybeSaveRegisters(registers); + + MoveObjectAndSlot( + IndirectPointerWriteBarrierDescriptor::ObjectRegister(), + IndirectPointerWriteBarrierDescriptor::SlotAddressRegister(), object, + offset); + Mov(IndirectPointerWriteBarrierDescriptor::IndirectPointerTagRegister(), + Operand(tag)); + + CallBuiltin(Builtins::GetIndirectPointerBarrierStub(fp_mode)); + MaybeRestoreRegisters(registers); } void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, - StubCallMode mode, - PointerType type) { - ASM_CODE_COMMENT(this); - RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); - MaybeSaveRegisters(registers); + StubCallMode mode) { + ASM_CODE_COMMENT(this); + RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); + MaybeSaveRegisters(registers); - Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); - Register slot_address_parameter = - WriteBarrierDescriptor::SlotAddressRegister(); - MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset); + Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); + Register slot_address_parameter = + WriteBarrierDescriptor::SlotAddressRegister(); + MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset); - CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode, - type); + CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode); - MaybeRestoreRegisters(registers); + MaybeRestoreRegisters(registers); } void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, - StubCallMode mode, PointerType type) { - ASM_CODE_COMMENT(this); - DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); - DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); + StubCallMode mode) { + ASM_CODE_COMMENT(this); + DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); + DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); #if V8_ENABLE_WEBASSEMBLY if (mode == StubCallMode::kCallWasmRuntimeStub) { - auto wasm_target = wasm::WasmCode::GetRecordWriteStub(fp_mode); + auto wasm_target = + static_cast
(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode)); Call(wasm_target, RelocInfo::WASM_STUB_CALL); #else if (false) { #endif } else { - Builtin builtin = Builtins::GetRecordWriteStub(fp_mode, type); + Builtin builtin = Builtins::GetRecordWriteStub(fp_mode); CallBuiltin(builtin); } } @@ -3689,7 +3749,7 @@ void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, void MacroAssembler::RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode, SmiCheck smi_check, - PointerType type) { + SlotDescriptor slot) { ASM_CODE_COMMENT(this); ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite"); DCHECK(!AreAliased(object, value)); @@ -3700,9 +3760,11 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Register temp = temps.AcquireX(); DCHECK(!AreAliased(object, value, temp)); Add(temp, object, offset); - if (type == PointerType::kIndirect) { - LoadIndirectPointerField(temp, MemOperand(temp)); + if (slot.contains_indirect_pointer()) { + LoadIndirectPointerField(temp, MemOperand(temp), + slot.indirect_pointer_tag()); } else { + DCHECK(slot.contains_direct_pointer()); LoadTaggedField(temp, MemOperand(temp)); } Cmp(temp, value); @@ -3733,11 +3795,17 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, } Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); DCHECK(!AreAliased(object, slot_address, value)); - // TODO(cbruni): Turn offset into int. - DCHECK(offset.IsImmediate()); - Add(slot_address, object, offset); - CallRecordWriteStub(object, slot_address, fp_mode, - StubCallMode::kCallBuiltinPointer, type); + if (slot.contains_direct_pointer()) { + // TODO(cbruni): Turn offset into int. + DCHECK(offset.IsImmediate()); + Add(slot_address, object, offset); + CallRecordWriteStub(object, slot_address, fp_mode, + StubCallMode::kCallBuiltinPointer); + } else { + DCHECK(slot.contains_indirect_pointer()); + CallIndirectPointerBarrier(object, offset, fp_mode, + slot.indirect_pointer_tag()); + } if (lr_status == kLRHasNotBeenSaved) { Pop(lr, padreg); } @@ -4231,7 +4299,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int stack_space, MemOperand* stack_space_operand, - MemOperand return_value_operand) { + MemOperand return_value_operand, Label* done) { ASM_CODE_COMMENT(masm); ASM_LOCATION("CallApiFunctionAndReturn"); @@ -4268,6 +4336,11 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, scratch, scratch2, prev_next_address_reg, prev_limit_reg)); DCHECK(!AreAliased(thunk_arg, // incoming parameters scratch, scratch2, prev_next_address_reg, prev_limit_reg)); + + // Explicitly include x16/x17 to let StoreReturnAddressAndCall() use them. + UseScratchRegisterScope fix_temps(masm); + fix_temps.Include(x16, x17); + { ASM_CODE_COMMENT_STRING(masm, "Allocate HandleScope in callee-save registers."); @@ -4363,8 +4436,11 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, // {stack_space_operand} was loaded into {stack_space_reg} above. __ DropArguments(stack_space_reg); } - - __ Ret(); + if (done) { + __ B(done); + } else { + __ Ret(); + } if (with_profiling) { ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper."); diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 96d6738768f472..1181366af2cd81 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -914,14 +914,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode); + void CallIndirectPointerBarrier(Register object, Operand offset, + SaveFPRegsMode fp_mode, + IndirectPointerTag tag); + void CallRecordWriteStubSaveRegisters( Register object, Operand offset, SaveFPRegsMode fp_mode, - StubCallMode mode = StubCallMode::kCallBuiltinPointer, - PointerType type = PointerType::kDirect); + StubCallMode mode = StubCallMode::kCallBuiltinPointer); void CallRecordWriteStub( Register object, Register slot_address, SaveFPRegsMode fp_mode, - StubCallMode mode = StubCallMode::kCallBuiltinPointer, - PointerType type = PointerType::kDirect); + StubCallMode mode = StubCallMode::kCallBuiltinPointer); // For a given |object| and |offset|: // - Move |object| to |dst_object|. @@ -1002,6 +1004,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void LoadMap(Register dst, Register object); void LoadCompressedMap(Register dst, Register object); + void LoadFeedbackVector(Register dst, Register closure, Register scratch, + Label* fbv_undef); + inline void Fmov(VRegister fd, VRegister fn); inline void Fmov(VRegister fd, Register rn); // Provide explicit double and float interfaces for FP immediate moves, rather @@ -1579,7 +1584,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { Register isolate_root = Register::no_reg()); // Loads an indirect pointer from the heap. - void LoadIndirectPointerField(Register destination, MemOperand field_operand); + void LoadIndirectPointerField(Register destination, MemOperand field_operand, + IndirectPointerTag tag); // Store an indirect pointer to the given object in the destination field. void StoreIndirectPointerField(Register value, MemOperand dst_field_operand); @@ -1589,10 +1595,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void StoreMaybeIndirectPointerField(Register value, MemOperand dst_field_operand); - // Laod a pointer to a code entrypoint from the heap. - // When the sandbox is enabled the pointer is loaded indirectly via the code - // pointer table, otherwise it is loaded direclty as a raw pointer. - void LoadCodeEntrypointField(Register destination, MemOperand field_operand); + // Load the pointer to a Code's entrypoint via an indirect pointer to the + // Code object. + // Only available when the sandbox is enabled. + void LoadCodeEntrypointViaIndirectPointer(Register destination, + MemOperand field_operand); // Instruction set functions ------------------------------------------------ // Logical macros. @@ -1897,6 +1904,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void PopCalleeSavedRegisters(); // Tiering support. + void AssertFeedbackCell(Register object, + Register scratch) NOOP_UNLESS_DEBUG_CODE; inline void AssertFeedbackVector(Register object); void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE; @@ -2167,17 +2176,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // stored. // The offset is the offset from the start of the object, not the offset from // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off). - void RecordWriteField(Register object, int offset, Register value, - LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, - SmiCheck smi_check = SmiCheck::kInline, - PointerType type = PointerType::kDirect); + void RecordWriteField( + Register object, int offset, Register value, LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, SmiCheck smi_check = SmiCheck::kInline, + SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot()); // For a given |object| notify the garbage collector that the slot at |offset| // has been written. |value| is the object being stored. - void RecordWrite(Register object, Operand offset, Register value, - LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, - SmiCheck smi_check = SmiCheck::kInline, - PointerType type = PointerType::kDirect); + void RecordWrite( + Register object, Operand offset, Register value, + LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, + SmiCheck smi_check = SmiCheck::kInline, + SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot()); // --------------------------------------------------------------------------- // Debugging. @@ -2475,7 +2485,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int stack_space, MemOperand* stack_space_operand, - MemOperand return_value_operand); + MemOperand return_value_operand, Label* done); } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h index 12caf0b70fa4a9..27051355a9bbaf 100644 --- a/deps/v8/src/codegen/arm64/register-arm64.h +++ b/deps/v8/src/codegen/arm64/register-arm64.h @@ -45,9 +45,6 @@ namespace internal { ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \ MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) -#define MAGLEV_SCRATCH_GENERAL_REGISTERS(R) \ - R(x16) R(x17) - #define FLOAT_REGISTERS(V) \ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \ V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \ @@ -494,6 +491,7 @@ GENERAL_REGISTER_CODE_LIST(DEFINE_VREGISTERS) #undef DEFINE_REGISTER // Registers aliases. +ALIAS_REGISTER(Register, kStackPointerRegister, sp); ALIAS_REGISTER(VRegister, v8_, v8); // Avoid conflicts with namespace v8. ALIAS_REGISTER(Register, ip0, x16); ALIAS_REGISTER(Register, ip1, x17); diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index ff7c1d426954e2..e06ff1ac6cafa2 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -55,6 +55,7 @@ #include "src/flags/flags.h" #include "src/handles/handles.h" #include "src/objects/objects.h" +#include "src/sandbox/indirect-pointer-tag.h" #include "src/utils/ostreams.h" namespace v8 { @@ -264,6 +265,48 @@ class AssemblerBuffer { V8_WARN_UNUSED_RESULT = 0; }; +// Describes a HeapObject slot containing a pointer to another HeapObject. Such +// a slot can either contain a direct/tagged pointer, or an indirect pointer +// (i.e. an index into an indirect pointer table, which then contains the +// actual pointer to the object) together with a specific IndirectPointerTag. +class SlotDescriptor { + public: + bool contains_direct_pointer() const { + return indirect_pointer_tag_ == kIndirectPointerNullTag; + } + + bool contains_indirect_pointer() const { + return indirect_pointer_tag_ != kIndirectPointerNullTag; + } + + IndirectPointerTag indirect_pointer_tag() const { + DCHECK(contains_indirect_pointer()); + return indirect_pointer_tag_; + } + + static SlotDescriptor ForDirectPointerSlot() { + return SlotDescriptor(kIndirectPointerNullTag); + } + + static SlotDescriptor ForIndirectPointerSlot(IndirectPointerTag tag) { + return SlotDescriptor(tag); + } + + static SlotDescriptor ForMaybeIndirectPointerSlot(IndirectPointerTag tag) { +#ifdef V8_ENABLE_SANDBOX + return ForIndirectPointerSlot(tag); +#else + return ForDirectPointerSlot(); +#endif + } + + private: + SlotDescriptor(IndirectPointerTag tag) : indirect_pointer_tag_(tag) {} + + // If the tag is null, this object describes a direct pointer slot. + IndirectPointerTag indirect_pointer_tag_; +}; + // Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot // grow, so it must be large enough for all code emitted by the Assembler. V8_EXPORT_PRIVATE diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h index 769595076114d4..434ad5072cd3f5 100644 --- a/deps/v8/src/codegen/bailout-reason.h +++ b/deps/v8/src/codegen/bailout-reason.h @@ -23,6 +23,7 @@ namespace internal { V(kExpectedOptimizationSentinel, \ "Expected optimized code cell or optimization sentinel") \ V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \ + V(kExpectedFeedbackCell, "Expected feedback cell") \ V(kExpectedFeedbackVector, "Expected feedback vector") \ V(kExpectedBaselineData, "Expected baseline data") \ V(kFloat64IsNotAInt32, \ @@ -100,7 +101,8 @@ namespace internal { V(kWrongFunctionContext, "Wrong context passed to function") \ V(kUnexpectedThreadInWasmSet, "thread_in_wasm flag was already set") \ V(kUnexpectedThreadInWasmUnset, "thread_in_wasm flag was not set") \ - V(kInvalidReceiver, "Expected JS object or primitive object") + V(kInvalidReceiver, "Expected JS object or primitive object") \ + V(kUnexpectedInstanceType, "Unexpected instance type encountered") #define BAILOUT_MESSAGES_LIST(V) \ V(kNoReason, "no reason") \ diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 5edbe168532fcc..cb143909a29154 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -1720,7 +1720,7 @@ void CodeStubAssembler::StoreBoundedSizeToObject(TNode object, TNode offset, TNode value) { #ifdef V8_ENABLE_SANDBOX - CSA_DCHECK(this, UintPtrLessThan( + CSA_DCHECK(this, UintPtrLessThanOrEqual( value, IntPtrConstant(kMaxSafeBufferSizeForSandbox))); TNode raw_value = ReinterpretCast(value); TNode shift_amount = Uint64Constant(kBoundedSizeShift); @@ -1806,7 +1806,7 @@ void CodeStubAssembler::StoreExternalPointerToObject(TNode object, #endif // V8_ENABLE_SANDBOX } -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX TNode CodeStubAssembler::ComputeCodePointerTableEntryOffset( TNode object, TNode field_offset) { TNode handle = @@ -1823,24 +1823,24 @@ TNode CodeStubAssembler::ComputeCodePointerTableEntryOffset( Word32Shl(index, UniqueUint32Constant(kCodePointerTableEntrySizeLog2))); return offset; } -#endif // V8_CODE_POINTER_SANDBOXING +#endif // V8_ENABLE_SANDBOX -TNode CodeStubAssembler::LoadCodeEntrypointFromObject( +TNode CodeStubAssembler::LoadCodeEntrypointViaIndirectPointerField( TNode object, TNode field_offset) { -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX TNode table = ExternalConstant(ExternalReference::code_pointer_table_address()); TNode offset = ComputeCodePointerTableEntryOffset(object, field_offset); return Load(table, offset); #else - return LoadObjectField(object, field_offset); -#endif // V8_CODE_POINTER_SANDBOXING + UNREACHABLE(); +#endif // V8_ENABLE_SANDBOX } TNode CodeStubAssembler::LoadIndirectPointerFromObject( TNode object, int field_offset) { -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX TNode table = ExternalConstant(ExternalReference::code_pointer_table_address()); TNode offset = @@ -1855,16 +1855,16 @@ TNode CodeStubAssembler::LoadIndirectPointerFromObject( return UncheckedCast(BitcastWordToTagged(value)); #else UNREACHABLE(); -#endif // V8_CODE_POINTER_SANDBOXING +#endif // V8_ENABLE_SANDBOX } TNode CodeStubAssembler::LoadMaybeIndirectPointerFromObject( TNode object, int field_offset) { -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX return LoadIndirectPointerFromObject(object, field_offset); #else return LoadObjectField(object, field_offset); -#endif // V8_CODE_POINTER_SANDBOXING +#endif // V8_ENABLE_SANDBOX } TNode CodeStubAssembler::LoadFromParentFrame(int offset) { @@ -3372,36 +3372,38 @@ void CodeStubAssembler::StoreObjectField(TNode object, } } -void CodeStubAssembler::StoreIndirectPointerField(TNode object, - int offset, - TNode value) { - DCHECK(V8_CODE_POINTER_SANDBOXING_BOOL); - OptimizedStoreIndirectPointerField(object, offset, value); +void CodeStubAssembler::StoreIndirectPointerField( + TNode object, int offset, IndirectPointerTag tag, + TNode value) { + DCHECK(V8_ENABLE_SANDBOX_BOOL); + OptimizedStoreIndirectPointerField(object, offset, tag, value); } void CodeStubAssembler::StoreIndirectPointerFieldNoWriteBarrier( - TNode object, int offset, TNode value) { - DCHECK(V8_CODE_POINTER_SANDBOXING_BOOL); - OptimizedStoreIndirectPointerFieldNoWriteBarrier(object, offset, value); + TNode object, int offset, IndirectPointerTag tag, + TNode value) { + DCHECK(V8_ENABLE_SANDBOX_BOOL); + OptimizedStoreIndirectPointerFieldNoWriteBarrier(object, offset, tag, value); } -void CodeStubAssembler::StoreMaybeIndirectPointerField(TNode object, - int offset, - TNode value) { -#ifdef V8_CODE_POINTER_SANDBOXING - StoreIndirectPointerField(object, offset, value); +void CodeStubAssembler::StoreMaybeIndirectPointerField( + TNode object, int offset, IndirectPointerTag tag, + TNode value) { +#ifdef V8_ENABLE_SANDBOX + StoreIndirectPointerField(object, offset, tag, value); #else StoreObjectField(object, offset, value); -#endif // V8_CODE_POINTER_SANDBOXING +#endif // V8_ENABLE_SANDBOX } void CodeStubAssembler::StoreMaybeIndirectPointerFieldNoWriteBarrier( - TNode object, int offset, TNode value) { -#ifdef V8_CODE_POINTER_SANDBOXING - StoreIndirectPointerFieldNoWriteBarrier(object, offset, value); + TNode object, int offset, IndirectPointerTag tag, + TNode value) { +#ifdef V8_ENABLE_SANDBOX + StoreIndirectPointerFieldNoWriteBarrier(object, offset, tag, value); #else StoreObjectFieldNoWriteBarrier(object, offset, value); -#endif // V8_CODE_POINTER_SANDBOXING +#endif // V8_ENABLE_SANDBOX } void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier( @@ -11423,7 +11425,7 @@ TNode CodeStubAssembler::LoadFeedbackVectorForStub() { TNode CodeStubAssembler::LoadFeedbackVectorFromBaseline() { return CAST( - LoadFromParentFrame(InterpreterFrameConstants::kBytecodeOffsetFromFp)); + LoadFromParentFrame(BaselineFrameConstants::kFeedbackVectorFromFp)); } TNode CodeStubAssembler::LoadContextFromBaseline() { @@ -14719,7 +14721,8 @@ TNode CodeStubAssembler::Typeof(TNode value) { Label return_number(this, Label::kDeferred), if_oddball(this), return_function(this), return_undefined(this), return_object(this), - return_string(this), return_bigint(this), return_result(this); + return_string(this), return_bigint(this), return_symbol(this), + return_result(this); GotoIf(TaggedIsSmi(value), &return_number); @@ -14750,9 +14753,9 @@ TNode CodeStubAssembler::Typeof(TNode value) { GotoIf(IsBigIntInstanceType(instance_type), &return_bigint); - CSA_DCHECK(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE)); - result_var = HeapConstant(isolate()->factory()->symbol_string()); - Goto(&return_result); + GotoIf(IsSymbolInstanceType(instance_type), &return_symbol); + + Abort(AbortReason::kUnexpectedInstanceType); BIND(&return_number); { @@ -14798,6 +14801,12 @@ TNode CodeStubAssembler::Typeof(TNode value) { Goto(&return_result); } + BIND(&return_symbol); + { + result_var = HeapConstant(isolate()->factory()->symbol_string()); + Goto(&return_result); + } + BIND(&return_result); return result_var.value(); } @@ -16056,7 +16065,14 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( } TNode CodeStubAssembler::LoadCodeInstructionStart(TNode code) { - return LoadCodeEntrypointFromObject(code, Code::kInstructionStartOffset); +#ifdef V8_ENABLE_SANDBOX + // In this case, the entrypoint is stored in the code pointer table entry + // referenced via the Code object's 'self' indirect pointer. + return LoadCodeEntrypointViaIndirectPointerField( + code, Code::kSelfIndirectPointerOffset); +#else + return LoadObjectField(code, Code::kInstructionStartOffset); +#endif } TNode CodeStubAssembler::IsMarkedForDeoptimization(TNode code) { @@ -16088,7 +16104,7 @@ TNode CodeStubAssembler::AllocateFunctionWithMapAndContext( shared_info); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context); StoreMaybeIndirectPointerFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, - code); + kCodeIndirectPointerTag, code); return CAST(fun); } diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 24def3b60b9529..4b5b31434f4222 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -32,6 +32,7 @@ #include "src/objects/string.h" #include "src/objects/swiss-name-dictionary.h" #include "src/objects/tagged-index.h" +#include "src/objects/tagged.h" #include "src/roots/roots.h" #include "torque-generated/exported-macros-assembler.h" @@ -128,7 +129,13 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(ProxyRevokeSharedFun, proxy_revoke_shared_fun, ProxyRevokeSharedFun) \ V(ShadowRealmImportValueFulfilledSFI, \ shadow_realm_import_value_fulfilled_sfi, \ - ShadowRealmImportValueFulfilledSFI) + ShadowRealmImportValueFulfilledSFI) \ + V(ArrayFromAsyncOnFulfilledSharedFun, \ + array_from_async_on_fulfilled_shared_fun, \ + ArrayFromAsyncOnFulfilledSharedFun) \ + V(ArrayFromAsyncOnRejectedSharedFun, \ + array_from_async_on_rejected_shared_fun, \ + ArrayFromAsyncOnRejectedSharedFun) #define UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER( \ V, rootIndexName, rootAccessorName, class_name) \ @@ -178,6 +185,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; IsConcatSpreadableSymbol) \ V(iterator_symbol, iterator_symbol, IteratorSymbol) \ V(keys_string, keys_string, KeysString) \ + V(async_iterator_symbol, async_iterator_symbol, AsyncIteratorSymbol) \ V(length_string, length_string, LengthString) \ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ V(match_symbol, match_symbol, MatchSymbol) \ @@ -230,6 +238,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(SeqTwoByteStringMap, seq_two_byte_string_map, SeqTwoByteStringMap) \ V(TheHoleValue, the_hole_value, TheHole) \ V(PropertyCellHoleValue, property_cell_hole_value, PropertyCellHole) \ + V(HashTableHoleValue, hash_table_hole_value, HashTableHole) \ V(then_string, then_string, ThenString) \ V(toJSON_string, toJSON_string, ToJSONString) \ V(toString_string, toString_string, ToStringString) \ @@ -1177,19 +1186,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadMaybeIndirectPointerFromObject(TNode object, int offset); - // When the sandbox is enabled, this will load the Code object pointer from - // the code pointer table entry. - // Load a code entrypoint pointer from an object. - // When the sandbox is enabled, this will load the entrypoint pointer from the - // code pointer table entry. - TNode LoadCodeEntrypointFromObject(TNode object, - int offset) { - return LoadCodeEntrypointFromObject(object, IntPtrConstant(offset)); + // Load the pointer to a Code's entrypoint via an indirect pointer to the + // Code object. + // Only available when the sandbox is enabled. + TNode LoadCodeEntrypointViaIndirectPointerField( + TNode object, int offset) { + return LoadCodeEntrypointViaIndirectPointerField(object, + IntPtrConstant(offset)); } - TNode LoadCodeEntrypointFromObject(TNode object, - TNode offset); + TNode LoadCodeEntrypointViaIndirectPointerField( + TNode object, TNode offset); -#ifdef V8_CODE_POINTER_SANDBOXING +#ifdef V8_ENABLE_SANDBOX // Helper function to load a CodePointerHandle from an object and compute the // offset into the code pointer table from it. TNode ComputeCodePointerTableEntryOffset( @@ -1792,21 +1800,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Store to an indirect pointer field. This involves loading the index for // the pointer table entry owned by the pointed-to object (which points back // to it) and storing that into the specified field. - // TODO(saelo) Currently, only Code objects can be referenced through - // indirect pointers, and so we only support these here, but we should - // generalize this. + // Stores that may require a write barrier also need to know the indirect + // pointer tag for the field. Otherwise, it is not needed void StoreIndirectPointerField(TNode object, int offset, - TNode value); - void StoreIndirectPointerFieldNoWriteBarrier(TNode object, - int offset, TNode value); + IndirectPointerTag tag, + TNode value); + void StoreIndirectPointerFieldNoWriteBarrier( + TNode object, int offset, IndirectPointerTag tag, + TNode value); // Store to a field that either contains an indirect pointer (when the // sandbox is enabled) or a regular (tagged) pointer otherwise. void StoreMaybeIndirectPointerField(TNode object, int offset, - TNode value); - void StoreMaybeIndirectPointerFieldNoWriteBarrier(TNode object, - int offset, - TNode value); + IndirectPointerTag tag, + TNode value); + void StoreMaybeIndirectPointerFieldNoWriteBarrier( + TNode object, int offset, IndirectPointerTag tag, + TNode value); template void StoreObjectFieldNoWriteBarrier(TNode object, diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h index 6c93a7871ced3e..0b25c5b404cd45 100644 --- a/deps/v8/src/codegen/compilation-cache.h +++ b/deps/v8/src/codegen/compilation-cache.h @@ -40,7 +40,7 @@ class CompilationCacheEvalOrScript { Isolate* isolate() const { return isolate_; } Isolate* const isolate_; - Object table_; + Tagged table_; DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEvalOrScript); }; @@ -127,7 +127,7 @@ class CompilationCacheRegExp { Isolate* isolate() const { return isolate_; } Isolate* const isolate_; - Object tables_[kGenerations]; // One for each generation. + Tagged tables_[kGenerations]; // One for each generation. DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp); }; diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index 3204e37c88ee14..547ece66021a63 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -36,6 +36,7 @@ #include "src/execution/local-isolate.h" #include "src/execution/vm-state-inl.h" #include "src/flags/flags.h" +#include "src/handles/global-handles-inl.h" #include "src/handles/handles.h" #include "src/handles/maybe-handles.h" #include "src/handles/persistent-handles.h" @@ -76,7 +77,7 @@ namespace { constexpr bool IsOSR(BytecodeOffset osr_offset) { return !osr_offset.IsNone(); } -void SetTieringState(JSFunction function, BytecodeOffset osr_offset, +void SetTieringState(Tagged function, BytecodeOffset osr_offset, TieringState value) { if (IsOSR(osr_offset)) { function->set_osr_tiering_state(value); @@ -85,7 +86,7 @@ void SetTieringState(JSFunction function, BytecodeOffset osr_offset, } } -void ResetTieringState(JSFunction function, BytecodeOffset osr_offset) { +void ResetTieringState(Tagged function, BytecodeOffset osr_offset) { if (function->has_feedback_vector()) { SetTieringState(function, osr_offset, TieringState::kNone); } @@ -375,7 +376,7 @@ void Compiler::LogFunctionCompilation(Isolate* isolate, namespace { ScriptOriginOptions OriginOptionsForEval( - Object script, ParsingWhileDebugging parsing_while_debugging) { + Tagged script, ParsingWhileDebugging parsing_while_debugging) { bool is_shared_cross_origin = parsing_while_debugging == ParsingWhileDebugging::kYes; bool is_opaque = false; @@ -495,7 +496,7 @@ void OptimizedCompilationJob::RegisterWeakObjectsInOptimizedCode( Isolate* isolate, Handle context, Handle code) { // TODO(choongwoo.han): Split this method into collecting maps on the // background thread, and retaining them on the foreground thread. - std::vector> maps; + GlobalHandleVector maps(isolate->heap()); DCHECK(code->is_optimized_code()); { DisallowGarbageCollection no_gc; @@ -506,16 +507,12 @@ void OptimizedCompilationJob::RegisterWeakObjectsInOptimizedCode( Tagged target_object = it.rinfo()->target_object(cage_base); if (code->IsWeakObjectInOptimizedCode(target_object)) { if (IsMap(target_object, cage_base)) { - maps.push_back(handle(Map::cast(target_object), isolate)); + maps.Push(Map::cast(target_object)); } } } } - for (Handle map : maps) { - // TODO(choongwoo.han): Batch this to avoid calling WeakArrayList::AddToEnd - // for each call. - isolate->heap()->AddRetainedMap(context, map); - } + isolate->heap()->AddRetainedMaps(context, std::move(maps)); code->set_can_have_weak_objects(true); } @@ -761,8 +758,8 @@ void EnsureSharedFunctionInfosArrayOnScript(Handle