deps: update V8 to 13.0.245.25

PR-URL: https://github.com/nodejs/node/pull/55014
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Yagiz Nizipli <yagiz@nizipli.com>
This commit is contained in:
Michaël Zasso 2025-01-30 16:35:04 +01:00
parent 1faf8c6d3e
commit 5edec0e39a
No known key found for this signature in database
GPG Key ID: 770F7A9A5AE15600
1086 changed files with 28959 additions and 14717 deletions

12
deps/v8/.clang-tidy vendored
View File

@ -2,6 +2,10 @@
--- ---
Checks: '-*, Checks: '-*,
bugprone-unique-ptr-array-mismatch, bugprone-unique-ptr-array-mismatch,
# google-build-explicit-make-pair,
google-default-arguments,
google-explicit-constructor,
google-readability-casting,
modernize-redundant-void-arg, modernize-redundant-void-arg,
modernize-replace-random-shuffle, modernize-replace-random-shuffle,
modernize-shrink-to-fit, modernize-shrink-to-fit,
@ -11,12 +15,10 @@
# modernize-use-equals-delete, # modernize-use-equals-delete,
modernize-use-nullptr, modernize-use-nullptr,
modernize-use-override, modernize-use-override,
# google-build-explicit-make-pair, performance-inefficient-vector-operation,
google-default-arguments, performance-trivially-destructible,
google-explicit-constructor, performance-unnecessary-copy-initialization'
google-readability-casting'
WarningsAsErrors: '' WarningsAsErrors: ''
HeaderFilterRegex: '' HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false AnalyzeTemporaryDtors: false
... ...

3
deps/v8/.gitignore vendored
View File

@ -68,6 +68,9 @@
/third_party/colorama/src /third_party/colorama/src
!/third_party/cpu_features !/third_party/cpu_features
/third_party/cpu_features/src /third_party/cpu_features/src
!/third_party/fast_float
/third_party/fast_float/src/*
!/third_party/fast_float/src/include
!/third_party/glibc !/third_party/glibc
!/third_party/googletest !/third_party/googletest
/third_party/googletest/src/* /third_party/googletest/src/*

3
deps/v8/AUTHORS vendored
View File

@ -71,7 +71,9 @@ Anton Bershanskiy <8knots@protonmail.com>
Anton Bikineev <ant.bikineev@gmail.com> Anton Bikineev <ant.bikineev@gmail.com>
Ao Wang <wangao.james@bytedance.com> Ao Wang <wangao.james@bytedance.com>
Archil Sharashenidze <achosharashenidze@gmail.com> Archil Sharashenidze <achosharashenidze@gmail.com>
Artem Kobzar <artem.kobzar@jetbrains.com>
Arthur Islamov <arthur@islamov.ai> Arthur Islamov <arthur@islamov.ai>
Asuka Shikina <shikina.asuka@gmail.com>
Aurèle Barrière <aurele.barriere@gmail.com> Aurèle Barrière <aurele.barriere@gmail.com>
Bala Avulapati <bavulapati@gmail.com> Bala Avulapati <bavulapati@gmail.com>
Bangfu Tao <bangfu.tao@samsung.com> Bangfu Tao <bangfu.tao@samsung.com>
@ -309,6 +311,7 @@ Youfeng Hao <ajihyf@gmail.com>
Yu Yin <xwafish@gmail.com> Yu Yin <xwafish@gmail.com>
Yujie Wang <hex6770@gmail.com> Yujie Wang <hex6770@gmail.com>
Yuri Iozzelli <yuri@leaningtech.com> Yuri Iozzelli <yuri@leaningtech.com>
Yuri Gaevsky <yuri.gaevsky@syntacore.com>
Yusif Khudhur <yusif.khudhur@gmail.com> Yusif Khudhur <yusif.khudhur@gmail.com>
Yuxiang Cao <caoyxsh@outlook.com> Yuxiang Cao <caoyxsh@outlook.com>
Zac Hansen <xaxxon@gmail.com> Zac Hansen <xaxxon@gmail.com>

46
deps/v8/BUILD.bazel vendored
View File

@ -59,8 +59,6 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
# v8_enable_gdbjit # v8_enable_gdbjit
# v8_check_header_includes # v8_check_header_includes
# v8_enable_lazy_source_positions # v8_enable_lazy_source_positions
# v8_enable_third_party_heap
# v8_third_party_heap_files
# v8_disable_write_barriers # v8_disable_write_barriers
# v8_enable_unconditional_write_barriers # v8_enable_unconditional_write_barriers
# v8_enable_single_generation # v8_enable_single_generation
@ -492,8 +490,7 @@ v8_config(
], ],
"@v8//bazel/config:v8_target_ppc64le": [ "@v8//bazel/config:v8_target_ppc64le": [
# NOTE: Bazel rules for ppc64le weren't tested on a real system. # NOTE: Bazel rules for ppc64le weren't tested on a real system.
"V8_TARGET_ARCH_PPC64", "V8_TARGET_ARCH_PPC64"
"V8_TARGET_ARCH_PPC_LE",
], ],
}, },
no_match_error = "Please specify a target cpu supported by v8", no_match_error = "Please specify a target cpu supported by v8",
@ -1793,8 +1790,6 @@ filegroup(
"src/heap/stress-scavenge-observer.h", "src/heap/stress-scavenge-observer.h",
"src/heap/sweeper.cc", "src/heap/sweeper.cc",
"src/heap/sweeper.h", "src/heap/sweeper.h",
"src/heap/third-party/heap-api.h",
"src/heap/third-party/heap-api-stub.cc",
"src/heap/traced-handles-marking-visitor.cc", "src/heap/traced-handles-marking-visitor.cc",
"src/heap/traced-handles-marking-visitor.h", "src/heap/traced-handles-marking-visitor.h",
"src/heap/weak-object-worklists.cc", "src/heap/weak-object-worklists.cc",
@ -2336,6 +2331,7 @@ filegroup(
"src/runtime/runtime-test.cc", "src/runtime/runtime-test.cc",
"src/runtime/runtime-trace.cc", "src/runtime/runtime-trace.cc",
"src/runtime/runtime-typedarray.cc", "src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-utils.cc",
"src/runtime/runtime-utils.h", "src/runtime/runtime-utils.h",
"src/runtime/runtime-weak-refs.cc", "src/runtime/runtime-weak-refs.cc",
"src/sandbox/bounded-size.h", "src/sandbox/bounded-size.h",
@ -2557,6 +2553,8 @@ filegroup(
"src/codegen/x64/assembler-x64.cc", "src/codegen/x64/assembler-x64.cc",
"src/codegen/x64/assembler-x64.h", "src/codegen/x64/assembler-x64.h",
"src/codegen/x64/assembler-x64-inl.h", "src/codegen/x64/assembler-x64-inl.h",
"src/codegen/x64/builtin-jump-table-info-x64.cc",
"src/codegen/x64/builtin-jump-table-info-x64.h",
"src/codegen/x64/constants-x64.h", "src/codegen/x64/constants-x64.h",
"src/codegen/x64/cpu-x64.cc", "src/codegen/x64/cpu-x64.cc",
"src/codegen/x64/fma-instr.h", "src/codegen/x64/fma-instr.h",
@ -2889,7 +2887,6 @@ filegroup(
"src/wasm/leb-helper.h", "src/wasm/leb-helper.h",
"src/wasm/local-decl-encoder.cc", "src/wasm/local-decl-encoder.cc",
"src/wasm/local-decl-encoder.h", "src/wasm/local-decl-encoder.h",
"src/wasm/memory-tracing.cc",
"src/wasm/memory-tracing.h", "src/wasm/memory-tracing.h",
"src/wasm/module-compiler.cc", "src/wasm/module-compiler.cc",
"src/wasm/module-compiler.h", "src/wasm/module-compiler.h",
@ -2924,6 +2921,9 @@ filegroup(
"src/wasm/wasm-builtin-list.h", "src/wasm/wasm-builtin-list.h",
"src/wasm/wasm-code-manager.cc", "src/wasm/wasm-code-manager.cc",
"src/wasm/wasm-code-manager.h", "src/wasm/wasm-code-manager.h",
"src/wasm/wasm-code-pointer-table.cc",
"src/wasm/wasm-code-pointer-table.h",
"src/wasm/wasm-code-pointer-table-inl.h",
"src/wasm/wasm-debug.cc", "src/wasm/wasm-debug.cc",
"src/wasm/wasm-debug.h", "src/wasm/wasm-debug.h",
"src/wasm/wasm-deopt-data.cc", "src/wasm/wasm-deopt-data.cc",
@ -3282,6 +3282,8 @@ filegroup(
"src/compiler/turboshaft/build-graph-phase.cc", "src/compiler/turboshaft/build-graph-phase.cc",
"src/compiler/turboshaft/build-graph-phase.h", "src/compiler/turboshaft/build-graph-phase.h",
"src/compiler/turboshaft/builtin-call-descriptors.h", "src/compiler/turboshaft/builtin-call-descriptors.h",
"src/compiler/turboshaft/builtin-compiler.cc",
"src/compiler/turboshaft/builtin-compiler.h",
"src/compiler/turboshaft/csa-optimize-phase.cc", "src/compiler/turboshaft/csa-optimize-phase.cc",
"src/compiler/turboshaft/csa-optimize-phase.h", "src/compiler/turboshaft/csa-optimize-phase.h",
"src/compiler/turboshaft/dataview-lowering-reducer.h", "src/compiler/turboshaft/dataview-lowering-reducer.h",
@ -3303,6 +3305,7 @@ filegroup(
"src/compiler/turboshaft/explicit-truncation-reducer.h", "src/compiler/turboshaft/explicit-truncation-reducer.h",
"src/compiler/turboshaft/fast-api-call-lowering-reducer.h", "src/compiler/turboshaft/fast-api-call-lowering-reducer.h",
"src/compiler/turboshaft/fast-hash.h", "src/compiler/turboshaft/fast-hash.h",
"src/compiler/turboshaft/field-macro.inc",
"src/compiler/turboshaft/graph.cc", "src/compiler/turboshaft/graph.cc",
"src/compiler/turboshaft/graph.h", "src/compiler/turboshaft/graph.h",
"src/compiler/turboshaft/graph-builder.cc", "src/compiler/turboshaft/graph-builder.cc",
@ -3484,6 +3487,9 @@ filegroup(
"src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc", "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc",
"src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h", "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h",
"src/compiler/turboshaft/wasm-load-elimination-reducer.h", "src/compiler/turboshaft/wasm-load-elimination-reducer.h",
"src/compiler/turboshaft/wasm-in-js-inlining-phase.cc",
"src/compiler/turboshaft/wasm-in-js-inlining-phase.h",
"src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h",
"src/compiler/turboshaft/wasm-lowering-phase.cc", "src/compiler/turboshaft/wasm-lowering-phase.cc",
"src/compiler/turboshaft/wasm-lowering-phase.h", "src/compiler/turboshaft/wasm-lowering-phase.h",
"src/compiler/turboshaft/wasm-lowering-reducer.h", "src/compiler/turboshaft/wasm-lowering-reducer.h",
@ -3588,6 +3594,7 @@ filegroup(
"src/builtins/builtins-lazy-gen.h", "src/builtins/builtins-lazy-gen.h",
"src/builtins/builtins-microtask-queue-gen.cc", "src/builtins/builtins-microtask-queue-gen.cc",
"src/builtins/builtins-number-gen.cc", "src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-number-tsa.cc",
"src/builtins/builtins-object-gen.cc", "src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-object-gen.h", "src/builtins/builtins-object-gen.h",
"src/builtins/builtins-promise-gen.cc", "src/builtins/builtins-promise-gen.cc",
@ -3607,6 +3614,7 @@ filegroup(
"src/builtins/builtins-utils-gen.h", "src/builtins/builtins-utils-gen.h",
"src/builtins/growable-fixed-array-gen.cc", "src/builtins/growable-fixed-array-gen.cc",
"src/builtins/growable-fixed-array-gen.h", "src/builtins/growable-fixed-array-gen.h",
"src/builtins/number-builtins-reducer-inl.h",
"src/builtins/profile-data-reader.cc", "src/builtins/profile-data-reader.cc",
"src/builtins/profile-data-reader.h", "src/builtins/profile-data-reader.h",
"src/builtins/setup-builtins-internal.cc", "src/builtins/setup-builtins-internal.cc",
@ -3616,6 +3624,8 @@ filegroup(
"third_party/v8/codegen/fp16-inl.h", "third_party/v8/codegen/fp16-inl.h",
"src/codegen/code-stub-assembler-inl.h", "src/codegen/code-stub-assembler-inl.h",
"src/codegen/code-stub-assembler.h", "src/codegen/code-stub-assembler.h",
"src/codegen/define-code-stub-assembler-macros.inc",
"src/codegen/undef-code-stub-assembler-macros.inc",
"src/heap/setup-heap-internal.cc", "src/heap/setup-heap-internal.cc",
"src/ic/accessor-assembler.cc", "src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h", "src/ic/accessor-assembler.h",
@ -3629,6 +3639,8 @@ filegroup(
"src/interpreter/interpreter-assembler.h", "src/interpreter/interpreter-assembler.h",
"src/interpreter/interpreter-generator.cc", "src/interpreter/interpreter-generator.cc",
"src/interpreter/interpreter-generator.h", "src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-generator-tsa.cc",
"src/interpreter/interpreter-generator-tsa.h",
"src/interpreter/interpreter-intrinsics-generator.cc", "src/interpreter/interpreter-intrinsics-generator.cc",
"src/interpreter/interpreter-intrinsics-generator.h", "src/interpreter/interpreter-intrinsics-generator.h",
"src/numbers/integer-literal.h", "src/numbers/integer-literal.h",
@ -3796,6 +3808,25 @@ filegroup(
}), }),
) )
v8_library(
name = "lib_fast_float",
srcs = [
"third_party/fast_float/src/include/fast_float/ascii_number.h",
"third_party/fast_float/src/include/fast_float/bigint.h",
"third_party/fast_float/src/include/fast_float/constexpr_feature_detect.h",
"third_party/fast_float/src/include/fast_float/decimal_to_binary.h",
"third_party/fast_float/src/include/fast_float/digit_comparison.h",
"third_party/fast_float/src/include/fast_float/fast_float.h",
"third_party/fast_float/src/include/fast_float/fast_table.h",
"third_party/fast_float/src/include/fast_float/float_common.h",
"third_party/fast_float/src/include/fast_float/parse_number.h",
],
hdrs = [ "third_party/fast_float/src/include/fast_float/fast_float.h" ],
includes = [
"third_party/fast_float/src/include",
],
)
v8_library( v8_library(
name = "lib_fp16", name = "lib_fp16",
srcs = ["third_party/fp16/src/include/fp16.h"], srcs = ["third_party/fp16/src/include/fp16.h"],
@ -4299,6 +4330,7 @@ v8_library(
":noicu/generated_torque_definitions", ":noicu/generated_torque_definitions",
], ],
deps = [ deps = [
":lib_fast_float",
":lib_fp16", ":lib_fp16",
":v8_libbase", ":v8_libbase",
"//external:absl_btree", "//external:absl_btree",

208
deps/v8/BUILD.gn vendored
View File

@ -266,15 +266,6 @@ declare_args() {
# Enable lazy source positions by default. # Enable lazy source positions by default.
v8_enable_lazy_source_positions = true v8_enable_lazy_source_positions = true
# Enable third party HEAP library
v8_enable_third_party_heap = false
# Libaries used by third party heap
v8_third_party_heap_libs = []
# Source code used by third party heap
v8_third_party_heap_files = []
# Disable write barriers when GCs are non-incremental and # Disable write barriers when GCs are non-incremental and
# heap has single generation. # heap has single generation.
v8_disable_write_barriers = false v8_disable_write_barriers = false
@ -451,6 +442,12 @@ declare_args() {
v8_enable_experimental_tsa_builtins = false v8_enable_experimental_tsa_builtins = false
v8_dcheck_always_on = dcheck_always_on v8_dcheck_always_on = dcheck_always_on
# Remote builds require an explicit dependency on icudat, but
# this breaks locally building V8 with ICU support when the file
# isn't present, which some embedders rely on. This option controls
# the explicit dependency and allows the build to complete.
v8_depend_on_icu_data_file = icu_use_data_file
} }
# Derived defaults. # Derived defaults.
@ -556,14 +553,6 @@ if (v8_enable_single_generation == "") {
if (v8_enable_atomic_object_field_writes == "") { if (v8_enable_atomic_object_field_writes == "") {
v8_enable_atomic_object_field_writes = v8_enable_concurrent_marking v8_enable_atomic_object_field_writes = v8_enable_concurrent_marking
} }
if (v8_enable_third_party_heap) {
v8_disable_write_barriers = true
v8_enable_single_generation = true
v8_enable_shared_ro_heap = false
v8_enable_pointer_compression = false
v8_enable_pointer_compression_shared_cage = false
v8_enable_allocation_folding = false
}
if (v8_enable_single_generation) { if (v8_enable_single_generation) {
v8_allocation_site_tracking = false v8_allocation_site_tracking = false
} }
@ -710,9 +699,6 @@ assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage,
assert(!v8_enable_sandbox || v8_enable_external_code_space, assert(!v8_enable_sandbox || v8_enable_external_code_space,
"The sandbox requires the external code space") "The sandbox requires the external code space")
assert(!v8_enable_sandbox || !v8_enable_third_party_heap,
"The sandbox is incompatible with the third-party heap")
assert(!v8_enable_memory_corruption_api || v8_enable_sandbox, assert(!v8_enable_memory_corruption_api || v8_enable_sandbox,
"The Memory Corruption API requires the sandbox") "The Memory Corruption API requires the sandbox")
@ -750,6 +736,10 @@ if (v8_enable_single_generation == true) {
assert(!v8_enable_snapshot_compression || v8_use_zlib, assert(!v8_enable_snapshot_compression || v8_use_zlib,
"Snapshot compression requires zlib") "Snapshot compression requires zlib")
assert(!v8_enable_cet_shadow_stack ||
(v8_target_cpu == "x64" && target_os == "win"),
"CET shadow stack is supported only on x64 Windows")
if (v8_expose_public_symbols == "") { if (v8_expose_public_symbols == "") {
v8_expose_public_symbols = v8_expose_symbols v8_expose_public_symbols = v8_expose_symbols
} }
@ -1174,9 +1164,6 @@ config("features") {
if (v8_disable_write_barriers) { if (v8_disable_write_barriers) {
defines += [ "V8_DISABLE_WRITE_BARRIERS" ] defines += [ "V8_DISABLE_WRITE_BARRIERS" ]
} }
if (v8_enable_third_party_heap) {
defines += [ "V8_ENABLE_THIRD_PARTY_HEAP" ]
}
if (v8_use_external_startup_data) { if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
} }
@ -1213,6 +1200,9 @@ config("features") {
if (v8_enable_cet_ibt) { if (v8_enable_cet_ibt) {
defines += [ "V8_ENABLE_CET_IBT" ] defines += [ "V8_ENABLE_CET_IBT" ]
} }
if (v8_enable_memory_sealing) {
defines += [ "V8_ENABLE_MEMORY_SEALING" ]
}
if (v8_enable_wasm_gdb_remote_debugging) { if (v8_enable_wasm_gdb_remote_debugging) {
defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ] defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ]
} }
@ -1438,27 +1428,18 @@ config("toolchain") {
cflags += [ "-march=z196" ] cflags += [ "-march=z196" ]
} }
} }
if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { if (v8_current_cpu == "ppc64") {
if (v8_current_cpu == "ppc") { defines += [ "V8_TARGET_ARCH_PPC64" ]
defines += [ "V8_TARGET_ARCH_PPC" ] cflags += [ "-ffp-contract=off" ]
} else if (v8_current_cpu == "ppc64") { if (current_os == "aix") {
defines += [ "V8_TARGET_ARCH_PPC64" ] cflags += [
cflags += [ "-ffp-contract=off" ] # Work around AIX ceil, trunc and round oddities.
} "-mcpu=power5+",
if (host_byteorder == "little") { "-mfprnd",
defines += [ "V8_TARGET_ARCH_PPC_LE" ]
} else if (host_byteorder == "big") {
defines += [ "V8_TARGET_ARCH_PPC_BE" ]
if (current_os == "aix") {
cflags += [
# Work around AIX ceil, trunc and round oddities.
"-mcpu=power5+",
"-mfprnd",
# Work around AIX assembler popcntb bug. # Work around AIX assembler popcntb bug.
"-mno-popcntb", "-mno-popcntb",
] ]
}
} }
} }
@ -2448,12 +2429,17 @@ template("run_mksnapshot") {
} }
action("run_mksnapshot_" + name) { action("run_mksnapshot_" + name) {
deps = [ ":mksnapshot($v8_snapshot_toolchain)" ] deps = [ ":mksnapshot($v8_snapshot_toolchain)" ]
if (v8_verify_deterministic_mksnapshot) {
# We archive the snapshot executable when verifying snapshot
# determinism to ease debugging.
data_deps = [ ":mksnapshot($v8_snapshot_toolchain)" ]
}
script = "tools/run.py" script = "tools/run.py"
sources = [] sources = []
if (icu_use_data_file) { if (v8_depend_on_icu_data_file) {
deps += [ "//third_party/icu:copy_icudata" ] deps += [ "//third_party/icu:copy_icudata" ]
if (host_byteorder == "big") { if (host_byteorder == "big") {
sources += [ "$root_out_dir/icudtb.dat" ] sources += [ "$root_out_dir/icudtb.dat" ]
@ -2472,7 +2458,20 @@ template("run_mksnapshot") {
ext = "s" ext = "s"
} }
args = [ args = []
if (v8_verify_deterministic_mksnapshot) {
# Output redirection must be the first argument to run.py. We capture
# output when verifying snapshot determinism for debugging.
args += [
"--redirect-stdout",
rebase_path("$root_out_dir/mksnapshot_output${suffix}.log",
root_build_dir),
]
data += [ "$root_out_dir/mksnapshot_output${suffix}.log" ]
}
args += [
"./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)", "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)",
"root_out_dir") + "/mksnapshot", "root_out_dir") + "/mksnapshot",
root_build_dir), root_build_dir),
@ -2488,8 +2487,25 @@ template("run_mksnapshot") {
"--embedded_src", "--embedded_src",
rebase_path("$target_gen_dir/embedded${suffix}.${ext}", root_build_dir), rebase_path("$target_gen_dir/embedded${suffix}.${ext}", root_build_dir),
# mksnapshot runs in predictable mode to create deterministic snapshots.
# Note this flag is also set implicitly by mksnapshot itself (see
# mksnapshot.cc). We set it here as well for clarity.
"--predictable",
# Disable ICs globally in mksnapshot to avoid problems with Code handlers.
# See https://crbug.com/345280736.
# TODO(jgruber): Re-enable once a better fix is available.
# Note this flag is also set implicitly by mksnapshot itself (see
# mksnapshot.cc). We set it here as well for clarity.
"--no-use-ic",
] ]
if (v8_verify_deterministic_mksnapshot) {
# Flags that help debugging snapshot determinism.
args += [ "--trace-read-only-promotion" ]
}
if (v8_log_builtins_block_count_input != "") { if (v8_log_builtins_block_count_input != "") {
args += [ args += [
"--trace-turbo", "--trace-turbo",
@ -2635,11 +2651,15 @@ if (v8_verify_deterministic_mksnapshot) {
} }
} }
action("verify_deterministic_mksnapshot") { group("snapshot_set") {
deps = [] data_deps = []
foreach(i, runs) { foreach(i, runs) {
deps += [ ":run_mksnapshot_$i" ] data_deps += [ ":run_mksnapshot_$i" ]
} }
}
action("verify_deterministic_mksnapshot") {
deps = [ ":snapshot_set" ]
report_file = "$target_gen_dir/mksnapshot_comparison.txt" report_file = "$target_gen_dir/mksnapshot_comparison.txt"
script = "tools/snapshot/compare_mksnapshot_output.py" script = "tools/snapshot/compare_mksnapshot_output.py"
args = [ args = [
@ -2736,9 +2756,13 @@ action("v8_dump_build_config") {
simulator_run = target_cpu != v8_target_cpu simulator_run = target_cpu != v8_target_cpu
use_sanitizer = is_asan || is_cfi || is_msan || is_tsan || is_ubsan use_sanitizer = is_asan || is_cfi || is_msan || is_tsan || is_ubsan
use_leaptiering = v8_enable_sandbox && !v8_disable_leaptiering
# This lists all build-time switches consumed by the test framework. All # This lists all build-time switches consumed by the test framework. All
# switches can be used automatically in the status files as is - no # switches can be used automatically in the status files as is - no
# further files need to be modified. # further files need to be modified.
# However, the switch also has to be entered in `build_config_content` in
# `bazel/defs.bzl` so that the switch also works for tests triggered by bazel.
# #
# Naming conventions: Keep switch names short and remove unnecessary # Naming conventions: Keep switch names short and remove unnecessary
# qualifiers. Drop v8_enable_, v8_, is_ where possible. # qualifiers. Drop v8_enable_, v8_, is_ where possible.
@ -2794,7 +2818,6 @@ action("v8_dump_build_config") {
"single_generation=$v8_enable_single_generation", "single_generation=$v8_enable_single_generation",
"slow_dchecks=$v8_enable_slow_dchecks", "slow_dchecks=$v8_enable_slow_dchecks",
"target_cpu=\"$target_cpu\"", "target_cpu=\"$target_cpu\"",
"third_party_heap=$v8_enable_third_party_heap",
"tsan=$is_tsan", "tsan=$is_tsan",
"ubsan=$is_ubsan", "ubsan=$is_ubsan",
"use_sanitizer=$use_sanitizer", "use_sanitizer=$use_sanitizer",
@ -2805,6 +2828,10 @@ action("v8_dump_build_config") {
"verify_heap=$v8_enable_verify_heap", "verify_heap=$v8_enable_verify_heap",
"verify_predictable=$v8_enable_verify_predictable", "verify_predictable=$v8_enable_verify_predictable",
"memory_corruption_api=$v8_enable_memory_corruption_api", "memory_corruption_api=$v8_enable_memory_corruption_api",
"leaptiering=$use_leaptiering",
# Please add new switches also in `build_config_content` in `bazel/defs.bzl`
# so that the switches also work for tests triggered by bazel.
] ]
} }
@ -2952,6 +2979,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-lazy-gen.h", "src/builtins/builtins-lazy-gen.h",
"src/builtins/builtins-microtask-queue-gen.cc", "src/builtins/builtins-microtask-queue-gen.cc",
"src/builtins/builtins-number-gen.cc", "src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-number-tsa.cc",
"src/builtins/builtins-object-gen.cc", "src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-object-gen.h", "src/builtins/builtins-object-gen.h",
"src/builtins/builtins-promise-gen.cc", "src/builtins/builtins-promise-gen.cc",
@ -2971,6 +2999,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-utils-gen.h", "src/builtins/builtins-utils-gen.h",
"src/builtins/growable-fixed-array-gen.cc", "src/builtins/growable-fixed-array-gen.cc",
"src/builtins/growable-fixed-array-gen.h", "src/builtins/growable-fixed-array-gen.h",
"src/builtins/number-builtins-reducer-inl.h",
"src/builtins/profile-data-reader.cc", "src/builtins/profile-data-reader.cc",
"src/builtins/profile-data-reader.h", "src/builtins/profile-data-reader.h",
"src/builtins/setup-builtins-internal.cc", "src/builtins/setup-builtins-internal.cc",
@ -2978,8 +3007,12 @@ v8_source_set("v8_initializers") {
"src/codegen/code-stub-assembler-inl.h", "src/codegen/code-stub-assembler-inl.h",
"src/codegen/code-stub-assembler.cc", "src/codegen/code-stub-assembler.cc",
"src/codegen/code-stub-assembler.h", "src/codegen/code-stub-assembler.h",
"src/codegen/define-code-stub-assembler-macros.inc",
"src/codegen/heap-object-list.h", "src/codegen/heap-object-list.h",
"src/codegen/turboshaft-builtins-assembler-inl.h", "src/codegen/turboshaft-builtins-assembler-inl.h",
"src/codegen/undef-code-stub-assembler-macros.inc",
"src/compiler/turboshaft/builtin-compiler.cc",
"src/compiler/turboshaft/builtin-compiler.h",
"src/heap/setup-heap-internal.cc", "src/heap/setup-heap-internal.cc",
"src/ic/accessor-assembler.cc", "src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h", "src/ic/accessor-assembler.h",
@ -2991,6 +3024,8 @@ v8_source_set("v8_initializers") {
"src/ic/unary-op-assembler.h", "src/ic/unary-op-assembler.h",
"src/interpreter/interpreter-assembler.cc", "src/interpreter/interpreter-assembler.cc",
"src/interpreter/interpreter-assembler.h", "src/interpreter/interpreter-assembler.h",
"src/interpreter/interpreter-generator-tsa.cc",
"src/interpreter/interpreter-generator-tsa.h",
"src/interpreter/interpreter-generator.cc", "src/interpreter/interpreter-generator.cc",
"src/interpreter/interpreter-generator.h", "src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics-generator.cc", "src/interpreter/interpreter-intrinsics-generator.cc",
@ -3046,11 +3081,6 @@ v8_source_set("v8_initializers") {
### gcmole(loong64) ### ### gcmole(loong64) ###
"src/builtins/loong64/builtins-loong64.cc", "src/builtins/loong64/builtins-loong64.cc",
] ]
} else if (v8_current_cpu == "ppc") {
sources += [
### gcmole(ppc) ###
"src/builtins/ppc/builtins-ppc.cc",
]
} else if (v8_current_cpu == "ppc64") { } else if (v8_current_cpu == "ppc64") {
sources += [ sources += [
### gcmole(ppc64) ### ### gcmole(ppc64) ###
@ -3531,6 +3561,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/explicit-truncation-reducer.h", "src/compiler/turboshaft/explicit-truncation-reducer.h",
"src/compiler/turboshaft/fast-api-call-lowering-reducer.h", "src/compiler/turboshaft/fast-api-call-lowering-reducer.h",
"src/compiler/turboshaft/fast-hash.h", "src/compiler/turboshaft/fast-hash.h",
"src/compiler/turboshaft/field-macro.inc",
"src/compiler/turboshaft/graph-builder.h", "src/compiler/turboshaft/graph-builder.h",
"src/compiler/turboshaft/graph-visualizer.h", "src/compiler/turboshaft/graph-visualizer.h",
"src/compiler/turboshaft/graph.h", "src/compiler/turboshaft/graph.h",
@ -4354,6 +4385,8 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/wasm-assembler-helpers.h", "src/compiler/turboshaft/wasm-assembler-helpers.h",
"src/compiler/turboshaft/wasm-gc-optimize-phase.h", "src/compiler/turboshaft/wasm-gc-optimize-phase.h",
"src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h", "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h",
"src/compiler/turboshaft/wasm-in-js-inlining-phase.h",
"src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h",
"src/compiler/turboshaft/wasm-js-lowering-reducer.h", "src/compiler/turboshaft/wasm-js-lowering-reducer.h",
"src/compiler/turboshaft/wasm-load-elimination-reducer.h", "src/compiler/turboshaft/wasm-load-elimination-reducer.h",
"src/compiler/turboshaft/wasm-lowering-phase.h", "src/compiler/turboshaft/wasm-lowering-phase.h",
@ -4426,6 +4459,8 @@ v8_header_set("v8_internal_headers") {
"src/wasm/wasm-arguments.h", "src/wasm/wasm-arguments.h",
"src/wasm/wasm-builtin-list.h", "src/wasm/wasm-builtin-list.h",
"src/wasm/wasm-code-manager.h", "src/wasm/wasm-code-manager.h",
"src/wasm/wasm-code-pointer-table-inl.h",
"src/wasm/wasm-code-pointer-table.h",
"src/wasm/wasm-debug.h", "src/wasm/wasm-debug.h",
"src/wasm/wasm-deopt-data.h", "src/wasm/wasm-deopt-data.h",
"src/wasm/wasm-disassembler-impl.h", "src/wasm/wasm-disassembler-impl.h",
@ -4475,12 +4510,6 @@ v8_header_set("v8_internal_headers") {
] ]
} }
if (!v8_enable_third_party_heap) {
sources += filter_include(v8_third_party_heap_files, [ "*.h" ])
} else {
sources += [ "src/heap/third-party/heap-api.h" ]
}
if (v8_enable_i18n_support) { if (v8_enable_i18n_support) {
sources += [ sources += [
"src/objects/intl-objects.h", "src/objects/intl-objects.h",
@ -4567,6 +4596,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h",
"src/codegen/x64/assembler-x64-inl.h", "src/codegen/x64/assembler-x64-inl.h",
"src/codegen/x64/assembler-x64.h", "src/codegen/x64/assembler-x64.h",
"src/codegen/x64/builtin-jump-table-info-x64.h",
"src/codegen/x64/constants-x64.h", "src/codegen/x64/constants-x64.h",
"src/codegen/x64/fma-instr.h", "src/codegen/x64/fma-instr.h",
"src/codegen/x64/interface-descriptors-x64-inl.h", "src/codegen/x64/interface-descriptors-x64-inl.h",
@ -4728,23 +4758,6 @@ v8_header_set("v8_internal_headers") {
sources += [ "src/trap-handler/trap-handler-simulator.h" ] sources += [ "src/trap-handler/trap-handler-simulator.h" ]
} }
} }
} else if (v8_current_cpu == "ppc") {
sources += [
### gcmole(ppc) ###
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.h",
"src/codegen/ppc/constants-ppc.h",
"src/codegen/ppc/interface-descriptors-ppc-inl.h",
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
"src/codegen/ppc/reglist-ppc.h",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
"src/execution/ppc/frame-constants-ppc.h",
"src/execution/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc-inl.h",
]
} else if (v8_current_cpu == "ppc64") { } else if (v8_current_cpu == "ppc64") {
sources += [ sources += [
### gcmole(ppc64) ### ### gcmole(ppc64) ###
@ -5082,14 +5095,6 @@ if (v8_current_cpu == "x86") {
"src/compiler/backend/loong64/instruction-scheduler-loong64.cc", "src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
"src/compiler/backend/loong64/instruction-selector-loong64.cc", "src/compiler/backend/loong64/instruction-selector-loong64.cc",
] ]
} else if (v8_current_cpu == "ppc") {
v8_compiler_sources += [
### gcmole(ppc) ###
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
]
} else if (v8_current_cpu == "ppc64") { } else if (v8_current_cpu == "ppc64") {
v8_compiler_sources += [ v8_compiler_sources += [
### gcmole(ppc64) ### ### gcmole(ppc64) ###
@ -5128,6 +5133,7 @@ if (v8_enable_webassembly) {
"src/compiler/turboshaft/int64-lowering-phase.cc", "src/compiler/turboshaft/int64-lowering-phase.cc",
"src/compiler/turboshaft/wasm-gc-optimize-phase.cc", "src/compiler/turboshaft/wasm-gc-optimize-phase.cc",
"src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc", "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc",
"src/compiler/turboshaft/wasm-in-js-inlining-phase.cc",
"src/compiler/turboshaft/wasm-lowering-phase.cc", "src/compiler/turboshaft/wasm-lowering-phase.cc",
"src/compiler/turboshaft/wasm-optimize-phase.cc", "src/compiler/turboshaft/wasm-optimize-phase.cc",
"src/compiler/turboshaft/wasm-turboshaft-compiler.cc", "src/compiler/turboshaft/wasm-turboshaft-compiler.cc",
@ -5747,6 +5753,7 @@ v8_source_set("v8_base_without_compiler") {
"src/runtime/runtime-test.cc", "src/runtime/runtime-test.cc",
"src/runtime/runtime-trace.cc", "src/runtime/runtime-trace.cc",
"src/runtime/runtime-typedarray.cc", "src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-utils.cc",
"src/runtime/runtime-weak-refs.cc", "src/runtime/runtime-weak-refs.cc",
"src/runtime/runtime.cc", "src/runtime/runtime.cc",
"src/sandbox/code-pointer-table.cc", "src/sandbox/code-pointer-table.cc",
@ -5895,7 +5902,6 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/graph-builder-interface.cc", "src/wasm/graph-builder-interface.cc",
"src/wasm/jump-table-assembler.cc", "src/wasm/jump-table-assembler.cc",
"src/wasm/local-decl-encoder.cc", "src/wasm/local-decl-encoder.cc",
"src/wasm/memory-tracing.cc",
"src/wasm/module-compiler.cc", "src/wasm/module-compiler.cc",
"src/wasm/module-decoder.cc", "src/wasm/module-decoder.cc",
"src/wasm/module-instantiate.cc", "src/wasm/module-instantiate.cc",
@ -5908,6 +5914,7 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/turboshaft-graph-interface.cc", "src/wasm/turboshaft-graph-interface.cc",
"src/wasm/value-type.cc", "src/wasm/value-type.cc",
"src/wasm/wasm-code-manager.cc", "src/wasm/wasm-code-manager.cc",
"src/wasm/wasm-code-pointer-table.cc",
"src/wasm/wasm-debug.cc", "src/wasm/wasm-debug.cc",
"src/wasm/wasm-deopt-data.cc", "src/wasm/wasm-deopt-data.cc",
"src/wasm/wasm-disassembler.cc", "src/wasm/wasm-disassembler.cc",
@ -5944,12 +5951,6 @@ v8_source_set("v8_base_without_compiler") {
} }
} }
if (v8_enable_third_party_heap) {
sources += filter_exclude(v8_third_party_heap_files, [ "*.h" ])
} else {
sources += [ "src/heap/third-party/heap-api-stub.cc" ]
}
if (v8_enable_conservative_stack_scanning) { if (v8_enable_conservative_stack_scanning) {
sources += [ "src/heap/conservative-stack-visitor.cc" ] sources += [ "src/heap/conservative-stack-visitor.cc" ]
} }
@ -5989,6 +5990,7 @@ v8_source_set("v8_base_without_compiler") {
### gcmole(x64) ### ### gcmole(x64) ###
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
"src/codegen/x64/assembler-x64.cc", "src/codegen/x64/assembler-x64.cc",
"src/codegen/x64/builtin-jump-table-info-x64.cc",
"src/codegen/x64/cpu-x64.cc", "src/codegen/x64/cpu-x64.cc",
"src/codegen/x64/macro-assembler-x64.cc", "src/codegen/x64/macro-assembler-x64.cc",
"src/deoptimizer/x64/deoptimizer-x64.cc", "src/deoptimizer/x64/deoptimizer-x64.cc",
@ -6122,21 +6124,6 @@ v8_source_set("v8_base_without_compiler") {
sources += [ "src/trap-handler/handler-outside-simulator.cc" ] sources += [ "src/trap-handler/handler-outside-simulator.cc" ]
} }
} }
} else if (v8_current_cpu == "ppc") {
sources += [
### gcmole(ppc) ###
"src/codegen/ppc/assembler-ppc.cc",
"src/codegen/ppc/constants-ppc.cc",
"src/codegen/ppc/cpu-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
"src/diagnostics/ppc/unwinder-ppc.cc",
"src/execution/ppc/frame-constants-ppc.cc",
"src/execution/ppc/simulator-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
]
} else if (v8_current_cpu == "ppc64") { } else if (v8_current_cpu == "ppc64") {
sources += [ sources += [
### gcmole(ppc64) ### ### gcmole(ppc64) ###
@ -6260,6 +6247,7 @@ v8_source_set("v8_base_without_compiler") {
":v8_tracing", ":v8_tracing",
":v8_version", ":v8_version",
"src/inspector:inspector", "src/inspector:inspector",
"//third_party/fast_float",
] ]
public_deps = [ public_deps = [
@ -6323,14 +6311,10 @@ v8_source_set("v8_base_without_compiler") {
libs = [] libs = []
if (v8_enable_third_party_heap) {
libs += v8_third_party_heap_libs
}
# Platforms that don't have CAS support need to link atomic library # Platforms that don't have CAS support need to link atomic library
# to implement atomic memory access # to implement atomic memory access
if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" || if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || v8_current_cpu == "ppc64" ||
(current_os != "zos" && (current_os != "zos" &&
(v8_current_cpu == "s390" || v8_current_cpu == "s390x")) || (v8_current_cpu == "s390" || v8_current_cpu == "s390x")) ||
v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {

View File

@ -11,8 +11,8 @@ dlehmann@chromium.org
dmercadier@chromium.org dmercadier@chromium.org
ecmziegler@chromium.org ecmziegler@chromium.org
evih@chromium.org evih@chromium.org
fgm@chromium.org
gdeepti@chromium.org gdeepti@chromium.org
hablich@chromium.org
hpayer@chromium.org hpayer@chromium.org
ishell@chromium.org ishell@chromium.org
jgruber@chromium.org jgruber@chromium.org
@ -22,7 +22,6 @@ liviurau@chromium.org
machenbach@chromium.org machenbach@chromium.org
manoskouk@chromium.org manoskouk@chromium.org
marja@chromium.org marja@chromium.org
mathias@chromium.org
mliedtke@chromium.org mliedtke@chromium.org
mlippautz@chromium.org mlippautz@chromium.org
nicohartmann@chromium.org nicohartmann@chromium.org

207
deps/v8/DEPS vendored
View File

@ -60,7 +60,7 @@ vars = {
'checkout_fuchsia_no_hooks': False, 'checkout_fuchsia_no_hooks': False,
# reclient CIPD package version # reclient CIPD package version
'reclient_version': 're_client_version:0.157.0.d2566cec-gomaip', 'reclient_version': 're_client_version:0.163.0.d27158ab-gomaip',
# Fetch configuration files required for the 'use_remoteexec' gn arg # Fetch configuration files required for the 'use_remoteexec' gn arg
'download_remoteexec_cfg': False, 'download_remoteexec_cfg': False,
@ -76,22 +76,22 @@ vars = {
'build_with_chromium': False, 'build_with_chromium': False,
# GN CIPD package version. # GN CIPD package version.
'gn_version': 'git_revision:54f5b539df8c4e460b18c62a11132d77b5601136', 'gn_version': 'git_revision:20806f79c6b4ba295274e3a589d85db41a02fdaa',
# ninja CIPD package version # ninja CIPD package version
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
'ninja_version': 'version:2@1.12.1.chromium.4', 'ninja_version': 'version:2@1.12.1.chromium.4',
# siso CIPD package version # siso CIPD package version
'siso_version': 'git_revision:87262779ecc3482c8c60b070404b225107212d0d', 'siso_version': 'git_revision:eaee19cf51478b64614e2e8daad77378238a3c6c',
# luci-go CIPD package version. # luci-go CIPD package version.
'luci_go': 'git_revision:ad7b787aa0ee53a81bc88fb4f7fee7a3ff1e8c29', 'luci_go': 'git_revision:1aca70b6bf116c1bd8fbf0526c9a89e9be308718',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk # the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other. # and whatever else without interference from each other.
'fuchsia_version': 'version:23.20240815.4.1', 'fuchsia_version': 'version:24.20240913.4.1',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version # the commit queue can handle CLs rolling android_sdk_build-tools_version
@ -124,14 +124,14 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version # the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other. # and whatever else without interference from each other.
'android_sdk_cmdline-tools_version': 'fv6JzkTqfxfIbmsRC8u1b2y0EQO7yQDMDzY3-g0NVu4C', 'android_sdk_cmdline-tools_version': 'B4p95sDPpm34K8Cf4JcfTM-iYSglWko9qjWgbT9dxWQC',
} }
deps = { deps = {
'build': 'build':
Var('chromium_url') + '/chromium/src/build.git' + '@' + '7a468ed1849454cffabf4a64110c24e6f1da2c51', Var('chromium_url') + '/chromium/src/build.git' + '@' + '4bd877395d215d47c694a8383147eb158fafbbd6',
'buildtools': 'buildtools':
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '60a590902cf146c282f15242401bd8543256e2a2', Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'a7a84ac61eae5a8946807265a2fd8bd812daf384',
'buildtools/linux64': { 'buildtools/linux64': {
'packages': [ 'packages': [
{ {
@ -177,7 +177,7 @@ deps = {
'test/mozilla/data': 'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data': 'test/test262/data':
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'bcb42e339dbac06f2f9902046b1fbf62562e0cd3', Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd62fa93c8f9ce5e687c0bbaa5d2b59670ab2ff60',
'third_party/android_platform': { 'third_party/android_platform': {
'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '6337c445f9963ec3914e7e0c5787941d07b46509', 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '6337c445f9963ec3914e7e0c5787941d07b46509',
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -231,15 +231,15 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'third_party/boringssl': { 'third_party/boringssl': {
'url': Var('chromium_url') + '/chromium/src/third_party/boringssl.git' + '@' + '4d98a91cde88f349b96f4018c00053b6699ffd88', 'url': Var('chromium_url') + '/chromium/src/third_party/boringssl.git' + '@' + 'c79987a83ceaf2cf911f7d21bec621ddc90c45cc',
'condition': "checkout_centipede_deps", 'condition': "checkout_centipede_deps",
}, },
'third_party/boringssl/src': { 'third_party/boringssl/src': {
'url': Var('boringssl_url') + '/boringssl.git' + '@' + '11f334121fd0d13830fefdf08041183da2d30ef3', 'url': Var('boringssl_url') + '/boringssl.git' + '@' + '58f3bc83230d2958bb9710bc910972c4f5d382dc',
'condition': "checkout_centipede_deps", 'condition': "checkout_centipede_deps",
}, },
'third_party/catapult': { 'third_party/catapult': {
'url': Var('chromium_url') + '/catapult.git' + '@' + '799e21b232f23f6c1391abfd44fe8ab1dd95bd9b', 'url': Var('chromium_url') + '/catapult.git' + '@' + '296226a4a0067c8cffeb8831fb87526a8035f3cc',
'condition': 'checkout_android', 'condition': 'checkout_android',
}, },
'third_party/clang-format/script': 'third_party/clang-format/script':
@ -253,11 +253,13 @@ deps = {
'condition': 'checkout_android', 'condition': 'checkout_android',
}, },
'third_party/depot_tools': 'third_party/depot_tools':
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0bc7c4832e4f2d453e4826c9a2e1197e11bd6ec7', Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '22df6f8e622dc3e8df8dc8b5d3e3503b169af78e',
'third_party/fp16/src': 'third_party/fp16/src':
Var('chromium_url') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '0a92994d729ff76a58f692d3028ca1b64b145d91', Var('chromium_url') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '0a92994d729ff76a58f692d3028ca1b64b145d91',
'third_party/fast_float/src':
Var('chromium_url') + '/external/github.com/fastfloat/fast_float.git' + '@' + '3e57d8dcfb0a04b5a8a26b486b54490a2e9b310f',
'third_party/fuchsia-gn-sdk': { 'third_party/fuchsia-gn-sdk': {
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '30fee7b68b3675e351fa47303c3b6ef322941ccd', 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '5086f6c9e4c6d3295a76fdb5d27209f2d6449c6a',
'condition': 'checkout_fuchsia', 'condition': 'checkout_fuchsia',
}, },
# Exists for rolling the Fuchsia SDK. Check out of the SDK should always # Exists for rolling the Fuchsia SDK. Check out of the SDK should always
@ -283,7 +285,7 @@ deps = {
'third_party/fuzztest/src': 'third_party/fuzztest/src':
Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + '32eb84a95951fa3a0148fb3e6a1a02f830ded136', Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + '32eb84a95951fa3a0148fb3e6a1a02f830ded136',
'third_party/googletest/src': 'third_party/googletest/src':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'ff233bdd4cac0a0bf6e5cd45bda3406814cb2796', Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '0953a17a4281fc26831da647ad3fcd5e21e6473b',
'third_party/highway/src': 'third_party/highway/src':
Var('chromium_url') + '/external/github.com/google/highway.git' + '@' + '8295336dd70f1201d42c22ab5b0861de38cf8fbf', Var('chromium_url') + '/external/github.com/google/highway.git' + '@' + '8295336dd70f1201d42c22ab5b0861de38cf8fbf',
'third_party/icu': 'third_party/icu':
@ -303,153 +305,153 @@ deps = {
'third_party/jsoncpp/source': 'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
'third_party/libc++/src': 'third_party/libc++/src':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'f801c947082a3e0a4b48780303526b73905f6ecd', Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '50ab693ecb611942ce4440d8c9ed707ee65ed5e8',
'third_party/libc++abi/src': 'third_party/libc++abi/src':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'eb6567388e89d9730c76dee71d68ac82e4a1abf6', Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '29b2e9a0f48688da116692cb04758393053d269c',
'third_party/libunwind/src': 'third_party/libunwind/src':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '116c20dae60d84a77005697cf29f72783f81b0f9', Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'dc70138c3e68e2f946585f134e20815851e26263',
'third_party/llvm-build/Release+Asserts': { 'third_party/llvm-build/Release+Asserts': {
'dep_type': 'gcs', 'dep_type': 'gcs',
'bucket': 'chromium-browser-clang', 'bucket': 'chromium-browser-clang',
'objects': [ 'objects': [
{ {
'object_name': 'Linux_x64/clang-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Linux_x64/clang-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '32ac9d9864a6bd99242f1a97778b3a074ac1151ce3eca369903f2ef5337c787a', 'sha256sum': '6a30f7bc7c5f0eac02a40a4ec9a1ab906ddff2adacf2c9ff065916047c79f0fb',
'size_bytes': 52250752, 'size_bytes': 52892060,
'generation': 1723267014378582, 'generation': 1726118358890940,
'condition': 'host_os == "linux"', 'condition': 'host_os == "linux"',
}, },
{ {
'object_name': 'Linux_x64/clang-tidy-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Linux_x64/clang-tidy-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '5fd3fb73ceef73593fa09f8228283aec9f7798b648bf450e87f071a097be213b', 'sha256sum': '35e00fc8f58cf7cd30f0ad27c2fdef56b677e287030072c46c0f024d23363ae4',
'size_bytes': 13217676, 'size_bytes': 13283180,
'generation': 1723267014627839, 'generation': 1726118359291453,
'condition': 'host_os == "linux" and checkout_clang_tidy', 'condition': 'host_os == "linux" and checkout_clang_tidy',
}, },
{ {
'object_name': 'Linux_x64/clangd-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Linux_x64/clangd-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '6e1fe97f8b7131591233d8a2df1ff289ffb878c3fc6834c978a86273f7c00b6b', 'sha256sum': '33e2276976dfeaf387f5ea16651ea591eebe3570a12469f3884c74f8079e88bf',
'size_bytes': 26125984, 'size_bytes': 26305668,
'generation': 1723267014767012, 'generation': 1726118359489734,
'condition': 'host_os == "linux" and checkout_clangd', 'condition': 'host_os == "linux" and checkout_clangd',
}, },
{ {
'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'ef317481472926d3e2a82e2d2a02cde78685002b4c9923df476108906022d792', 'sha256sum': '426c6bd378848de0817a7695fee821bece9efb51e3ed1d7b750a75bc17bf00eb',
'size_bytes': 2374748, 'size_bytes': 2370472,
'generation': 1723267015213805, 'generation': 1726118360237343,
'condition': 'host_os == "linux" and checkout_clang_coverage_tools', 'condition': 'host_os == "linux" and checkout_clang_coverage_tools',
}, },
{ {
'object_name': 'Linux_x64/llvmobjdump-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Linux_x64/llvmobjdump-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '02be68f7c7c7bf679e1abff2745306b8385275017c89b2b13f638a941785f8c5', 'sha256sum': 'e11c3043e76c7c79fe7905861a11c78433c6d796d049f837eda0a2ce118f0793',
'size_bytes': 5386480, 'size_bytes': 5410724,
'generation': 1723267014930087, 'generation': 1726118359908897,
'condition': '(checkout_linux or checkout_mac or checkout_android and host_os != "mac")', 'condition': '(checkout_linux or checkout_mac or checkout_android and host_os != "mac")',
}, },
{ {
'object_name': 'Mac/clang-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac/clang-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '5df8a609a7d2511343fbc20af1de3ed1682c3703fc074f21af1bf8bc2f58e491', 'sha256sum': 'cabfc7ca792ef13d3e665c3a7811f9a76cc39094059c11606cea1724f0394bbc',
'size_bytes': 47200408, 'size_bytes': 47551968,
'generation': 1723267016534642, 'generation': 1726118361528729,
'condition': 'host_os == "mac" and host_cpu == "x64"', 'condition': 'host_os == "mac" and host_cpu == "x64"',
}, },
{ {
'object_name': 'Mac/clang-mac-runtime-library-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac/clang-mac-runtime-library-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'ffc72ff3fca85f31340c164aab480bd2babfaa6219ff12e93b81f0056309da55', 'sha256sum': '50a618246d7fd23645640fc50ccb0d4684c1895def378b90963a289f920ea88a',
'size_bytes': 869616, 'size_bytes': 879508,
'generation': 1723267034708598, 'generation': 1726118377526206,
'condition': 'checkout_mac and not host_os == "mac"', 'condition': 'checkout_mac and not host_os == "mac"',
}, },
{ {
'object_name': 'Mac/clang-tidy-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac/clang-tidy-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'd02b9a39491d2ff3a291778de87b2a72f5885d01a8093518cb5612b97353ac2a', 'sha256sum': '2c325505ea43a8d8a14770890d62aba9af37b397e3063e3fb622cfd51d4706f6',
'size_bytes': 12805336, 'size_bytes': 12884412,
'generation': 1723267016822831, 'generation': 1726118361811669,
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_tidy', 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_tidy',
}, },
{ {
'object_name': 'Mac/clangd-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac/clangd-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'ce09141be75350f0f932fcda14d8b906d2869674d79ef5a10a3e60a9a8d3ccee', 'sha256sum': '1c1a0965cc95053dec8c649a7b3bb627ad2300ad230eed97b52ee70a8a8edd85',
'size_bytes': 26372428, 'size_bytes': 26553148,
'generation': 1723267016957114, 'generation': 1726118361978146,
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clangd', 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clangd',
}, },
{ {
'object_name': 'Mac/llvm-code-coverage-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac/llvm-code-coverage-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'c0ac62ff01f1ce6e5d30134cb0f83fd8eabf858dfb33d07209a6b40d8f1ae789', 'sha256sum': '9259bd27c19ca9662c70ffc2b42c10afb584e7c584470d6e656e164643614b50',
'size_bytes': 2248664, 'size_bytes': 2247028,
'generation': 1723267017743181, 'generation': 1726118362377026,
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_coverage_tools', 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_coverage_tools',
}, },
{ {
'object_name': 'Mac_arm64/clang-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac_arm64/clang-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'f4a384ecdaa051ba4786c9e6c46f9101a751b1a5c5ad4bf0d217c4ba71e0ff30', 'sha256sum': 'e87eb4caa95c98ef36c40aec5b8cd07a6c4fb8959d3c7e7d452f6ed860c8c2bf',
'size_bytes': 42737720, 'size_bytes': 41352592,
'generation': 1723267036349494, 'generation': 1726118378868177,
'condition': 'host_os == "mac" and host_cpu == "arm64"', 'condition': 'host_os == "mac" and host_cpu == "arm64"',
}, },
{ {
'object_name': 'Mac_arm64/clang-tidy-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac_arm64/clang-tidy-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '2769378fd2891af945f8d29b5eaf05b4ab0450b2d086539df1c78c684e64be14', 'sha256sum': 'fcf8f25a8461db90686d0455bd8f195750a2cdc425cb03c48debe4d3e8bb9299',
'size_bytes': 11740656, 'size_bytes': 11476316,
'generation': 1723267036269250, 'generation': 1726118379144738,
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_tidy', 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_tidy',
}, },
{ {
'object_name': 'Mac_arm64/clangd-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac_arm64/clangd-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'a35dfb99903a487e06d685712e461125978c76ba8eaa99e9f5742e63d3e67444', 'sha256sum': 'cca4049d3362528511ebc603db05189c9bef406a80ae4fead22b1db0a4de83e6',
'size_bytes': 23470088, 'size_bytes': 22679568,
'generation': 1723267036383208, 'generation': 1726118379283835,
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clangd', 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clangd',
}, },
{ {
'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '574a0d7c560aae964d8bdcd85f0145077b1324e79eee4a3dd1636ab7aefc59e5', 'sha256sum': '001e8582de4bc7c434f321b5bacd2b0b45e553f3134cb7d78e1a4f62e2b97ac6',
'size_bytes': 2010540, 'size_bytes': 1969844,
'generation': 1723267036758678, 'generation': 1726118379757221,
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_coverage_tools', 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_coverage_tools',
}, },
{ {
'object_name': 'Win/clang-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Win/clang-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'e255af29c29a741cf39c3000b612466ff805a99766d26ac86ec2afcb4ca0c922', 'sha256sum': 'cb416511e6379b7fd3f362f637ebb8a28957d0d2ff2dc6e2d9f4484a381f2885',
'size_bytes': 44019080, 'size_bytes': 44655000,
'generation': 1723267056892790, 'generation': 1726118399720986,
'condition': 'host_os == "win"', 'condition': 'host_os == "win"',
}, },
{ {
'object_name': 'Win/clang-tidy-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Win/clang-tidy-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'a86aa579fc90a053b94874a8c79daeb7f3bbd77107fb460c6d0a7959cefb7e61', 'sha256sum': '15af2ae61dabdfe0ddbdd48f467b996855ba51d0ef633c5c0ac3c74cdc0d8f2c',
'size_bytes': 13055812, 'size_bytes': 13114928,
'generation': 1723267057185720, 'generation': 1726118400057660,
'condition': 'host_os == "win" and checkout_clang_tidy', 'condition': 'host_os == "win" and checkout_clang_tidy',
}, },
{ {
'object_name': 'Win/clang-win-runtime-library-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Win/clang-win-runtime-library-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '6d89f358769ef50d008194e0ab9e8d4d80b8d6ffc0095ed44aef925d900aa743', 'sha256sum': '81d66840357d83ca1a2c85ebca5259a7a86d9e99c77b37727fbaee87ccacf675',
'size_bytes': 2873772, 'size_bytes': 2897452,
'generation': 1723267074433695, 'generation': 1726118416326356,
'condition': 'checkout_win and not host_os == "win"', 'condition': 'checkout_win and not host_os == "win"',
}, },
{ {
'object_name': 'Win/clangd-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Win/clangd-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'e2b69a726f794005a333ae66a0ef5c0258872a19bc4506eff23f23fdee75ba5c', 'sha256sum': '1304718c221543b16465a4b6108572fa1ba9f2b75c4e4398bdb01fb983428c10',
'size_bytes': 25053884, 'size_bytes': 25169688,
'generation': 1723267057351794, 'generation': 1726118400193324,
'condition': 'host_os == "win" and checkout_clangd', 'condition': 'host_os == "win" and checkout_clangd',
}, },
{ {
'object_name': 'Win/llvm-code-coverage-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Win/llvm-code-coverage-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': 'e68e7c7ecbc7b2fc4f7ec3e97565a7f12bab1d195d22bc76959f3a88b1462ac1', 'sha256sum': 'e01b8fbca72fc1cca6988e359d9a0eea8fa5ccbaff8d41deffd62970c7f4fed5',
'size_bytes': 2376020, 'size_bytes': 2382756,
'generation': 1723267057803475, 'generation': 1726118400642803,
'condition': 'host_os == "win" and checkout_clang_coverage_tools', 'condition': 'host_os == "win" and checkout_clang_coverage_tools',
}, },
{ {
'object_name': 'Win/llvmobjdump-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', 'object_name': 'Win/llvmobjdump-llvmorg-20-init-3847-g69c43468-28.tar.xz',
'sha256sum': '801714415847b8efea7252b1072b8647f92ba0e946480b3db9b156900e42ab55', 'sha256sum': '2f837a21d910ad748666282d0c1da15a438d9aae4fc1bc85dab7313da6dfeb7b',
'size_bytes': 5392812, 'size_bytes': 5439736,
'generation': 1723267057506056, 'generation': 1726118400404099,
'condition': 'checkout_linux or checkout_mac or checkout_android and host_os == "win"', 'condition': 'checkout_linux or checkout_mac or checkout_android and host_os == "win"',
}, },
], ],
@ -471,7 +473,7 @@ deps = {
'third_party/perfetto': 'third_party/perfetto':
Var('android_url') + '/platform/external/perfetto.git' + '@' + '6fc824d618d2f06b5d9cd8655ba0419b6b3b366e', Var('android_url') + '/platform/external/perfetto.git' + '@' + '6fc824d618d2f06b5d9cd8655ba0419b6b3b366e',
'third_party/protobuf': 'third_party/protobuf':
Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + 'da2fe725b80ac0ba646fbf77d0ce5b4ac236f823', Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + '37bbf271c62d6c01c58c66505b17c7dcf086371a',
'third_party/re2/src': 'third_party/re2/src':
Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + '6dcd83d60f7944926bfd308cc13979fc53dd69ca', Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + '6dcd83d60f7944926bfd308cc13979fc53dd69ca',
'third_party/requests': { 'third_party/requests': {
@ -489,9 +491,9 @@ deps = {
'condition': 'not build_with_chromium and host_cpu != "s390" and host_os != "zos" and host_cpu != "ppc"', 'condition': 'not build_with_chromium and host_cpu != "s390" and host_os != "zos" and host_cpu != "ppc"',
}, },
'third_party/zlib': 'third_party/zlib':
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'd3aea2341cdeaf7e717bc257a59aa7a9407d318a', Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'fa9f14143c7938e6a1d18443900efee7a1e5e669',
'tools/clang': 'tools/clang':
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '63b7be17f8981d716ea9a0d65bb04654d79548a8', Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'e47c184ec52d50c7aa2a99cd3bd26ebcafaa94b9',
'tools/luci-go': { 'tools/luci-go': {
'packages': [ 'packages': [
{ {
@ -509,7 +511,7 @@ deps = {
'tools/protoc_wrapper': 'tools/protoc_wrapper':
Var('chromium_url') + '/chromium/src/tools/protoc_wrapper.git' + '@' + 'dbcbea90c20ae1ece442d8ef64e61c7b10e2b013', Var('chromium_url') + '/chromium/src/tools/protoc_wrapper.git' + '@' + 'dbcbea90c20ae1ece442d8ef64e61c7b10e2b013',
'third_party/abseil-cpp': { 'third_party/abseil-cpp': {
'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + 'ed3733b91e472a1e7a641c1f0c1e6c0ea698e958', 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '1f7e21e34c3807a8841c9562cfc8b3213eb50bfc',
'condition': 'not build_with_chromium', 'condition': 'not build_with_chromium',
}, },
'third_party/zoslib': { 'third_party/zoslib': {
@ -524,6 +526,7 @@ include_rules = [
'+unicode', '+unicode',
'+third_party/fdlibm', '+third_party/fdlibm',
'+third_party/ittapi/include', '+third_party/ittapi/include',
'+third_party/fast_float/src/include',
'+third_party/fp16/src/include', '+third_party/fp16/src/include',
'+third_party/v8/codegen', '+third_party/v8/codegen',
'+third_party/fuzztest', '+third_party/fuzztest',

1
deps/v8/WATCHLISTS vendored
View File

@ -129,7 +129,6 @@
'verwaest+watch@chromium.org', 'verwaest+watch@chromium.org',
], ],
'feature_shipping_status': [ 'feature_shipping_status': [
'hablich@chromium.org',
'saelo+watch@chromium.org', 'saelo+watch@chromium.org',
], ],
'heap_changes': [ 'heap_changes': [

View File

@ -561,6 +561,7 @@ def build_config_content(cpu, icu):
("is_android", "false"), ("is_android", "false"),
("is_ios", "false"), ("is_ios", "false"),
("js_shared_memory", "false"), ("js_shared_memory", "false"),
("leaptiering", "true"),
("lite_mode", "false"), ("lite_mode", "false"),
("local_off_stack_check", "false"), ("local_off_stack_check", "false"),
("memory_corruption_api", "false"), ("memory_corruption_api", "false"),
@ -578,7 +579,6 @@ def build_config_content(cpu, icu):
("single_generation", "false"), ("single_generation", "false"),
("slow_dchecks", "false"), ("slow_dchecks", "false"),
("target_cpu", cpu), ("target_cpu", cpu),
("third_party_heap", "false"),
("tsan", "false"), ("tsan", "false"),
("ubsan", "false"), ("ubsan", "false"),
("use_sanitizer", "false"), ("use_sanitizer", "false"),

3
deps/v8/docs/OWNERS vendored
View File

@ -1,2 +1 @@
hablich@chromium.org hpayer@chromium.org
mathias@chromium.org

5
deps/v8/gni/v8.gni vendored
View File

@ -174,6 +174,11 @@ declare_args() {
# Emit CET IBT landing pad instructions in JIT generated code (experimental). # Emit CET IBT landing pad instructions in JIT generated code (experimental).
v8_enable_cet_ibt = false v8_enable_cet_ibt = false
# Use memory sealing to protect various global memory mappings for CFI
# (experimental).
# TODO(sroettger): enable by default once we have bot support for testing.
v8_enable_memory_sealing = false
} }
if (v8_use_external_startup_data == "") { if (v8_use_external_startup_data == "") {

View File

@ -19,5 +19,4 @@ per-file v8-version.h=v8-ci-autoroll-builder@chops-service-accounts.iam.gservice
# For branch updates: # For branch updates:
per-file v8-version.h=file:../INFRA_OWNERS per-file v8-version.h=file:../INFRA_OWNERS
per-file v8-version.h=hablich@chromium.org
per-file v8-version.h=vahl@chromium.org per-file v8-version.h=vahl@chromium.org

View File

@ -37,11 +37,12 @@ class V8_EXPORT DefaultPlatform : public Platform {
return v8_platform_->MonotonicallyIncreasingTime(); return v8_platform_->MonotonicallyIncreasingTime();
} }
std::shared_ptr<cppgc::TaskRunner> GetForegroundTaskRunner() override { std::shared_ptr<cppgc::TaskRunner> GetForegroundTaskRunner(
TaskPriority priority) override {
// V8's default platform creates a new task runner when passed the // V8's default platform creates a new task runner when passed the
// `v8::Isolate` pointer the first time. For non-default platforms this will // `v8::Isolate` pointer the first time. For non-default platforms this will
// require getting the appropriate task runner. // require getting the appropriate task runner.
return v8_platform_->GetForegroundTaskRunner(kNoIsolate); return v8_platform_->GetForegroundTaskRunner(kNoIsolate, priority);
} }
std::unique_ptr<cppgc::JobHandle> PostJob( std::unique_ptr<cppgc::JobHandle> PostJob(

View File

@ -114,7 +114,7 @@ class HeapConsistency final {
* has not yet been processed. * has not yet been processed.
* *
* \param params The parameters retrieved from `GetWriteBarrierType()`. * \param params The parameters retrieved from `GetWriteBarrierType()`.
* \param object The pointer to the object. May be an interior pointer to a * \param object The pointer to the object. May be an interior pointer to
* an interface of the actual object. * an interface of the actual object.
*/ */
static V8_INLINE void DijkstraWriteBarrier(const WriteBarrierParams& params, static V8_INLINE void DijkstraWriteBarrier(const WriteBarrierParams& params,

View File

@ -52,6 +52,15 @@ class V8_EXPORT Platform {
* Foreground task runner that should be used by a Heap. * Foreground task runner that should be used by a Heap.
*/ */
virtual std::shared_ptr<TaskRunner> GetForegroundTaskRunner() { virtual std::shared_ptr<TaskRunner> GetForegroundTaskRunner() {
return GetForegroundTaskRunner(TaskPriority::kUserBlocking);
}
/**
* Returns a TaskRunner with a specific |priority| which can be used to post a
* task on the foreground thread.
*/
virtual std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
TaskPriority priority) {
return nullptr; return nullptr;
} }

View File

@ -254,15 +254,7 @@ using AddCrashKeyCallback = void (*)(CrashKeyId id, const std::string& value);
using BeforeCallEnteredCallback = void (*)(Isolate*); using BeforeCallEnteredCallback = void (*)(Isolate*);
using CallCompletedCallback = void (*)(Isolate*); using CallCompletedCallback = void (*)(Isolate*);
// --- AllowCodeGenerationFromStrings callbacks --- // --- Modify Code Generation From Strings Callback ---
/**
* Callback to check if code generation from strings is allowed. See
* Context::AllowCodeGenerationFromStrings.
*/
using AllowCodeGenerationFromStringsCallback = bool (*)(Local<Context> context,
Local<String> source);
struct ModifyCodeGenerationFromStringsResult { struct ModifyCodeGenerationFromStringsResult {
// If true, proceed with the codegen algorithm. Otherwise, block it. // If true, proceed with the codegen algorithm. Otherwise, block it.
bool codegen_allowed = false; bool codegen_allowed = false;
@ -272,22 +264,6 @@ struct ModifyCodeGenerationFromStringsResult {
MaybeLocal<String> modified_source; MaybeLocal<String> modified_source;
}; };
/**
* Access type specification.
*/
enum AccessType {
ACCESS_GET,
ACCESS_SET,
ACCESS_HAS,
ACCESS_DELETE,
ACCESS_KEYS
};
// --- Failed Access Check Callback ---
using FailedAccessCheckCallback = void (*)(Local<Object> target,
AccessType type, Local<Value> data);
/** /**
* Callback to check if codegen is allowed from a source object, and convert * Callback to check if codegen is allowed from a source object, and convert
* the source to string if necessary. See: ModifyCodeGenerationFromStrings. * the source to string if necessary. See: ModifyCodeGenerationFromStrings.
@ -300,6 +276,22 @@ using ModifyCodeGenerationFromStringsCallback2 =
Local<Value> source, Local<Value> source,
bool is_code_like); bool is_code_like);
// --- Failed Access Check Callback ---
/**
* Access type specification.
*/
enum AccessType {
ACCESS_GET,
ACCESS_SET,
ACCESS_HAS,
ACCESS_DELETE,
ACCESS_KEYS
};
using FailedAccessCheckCallback = void (*)(Local<Object> target,
AccessType type, Local<Value> data);
// --- WebAssembly compilation callbacks --- // --- WebAssembly compilation callbacks ---
using ExtensionCallback = bool (*)(const FunctionCallbackInfo<Value>&); using ExtensionCallback = bool (*)(const FunctionCallbackInfo<Value>&);

View File

@ -320,7 +320,7 @@ class V8_EXPORT Context : public Data {
* 'Function' constructor are used an exception will be thrown. * 'Function' constructor are used an exception will be thrown.
* *
* If code generation from strings is not allowed the * If code generation from strings is not allowed the
* V8::AllowCodeGenerationFromStrings callback will be invoked if * V8::ModifyCodeGenerationFromStringsCallback callback will be invoked if
* set before blocking the call to 'eval' or the 'Function' * set before blocking the call to 'eval' or the 'Function'
* constructor. If that callback returns true, the call will be * constructor. If that callback returns true, the call will be
* allowed, otherwise an exception will be thrown. If no callback is * allowed, otherwise an exception will be thrown. If no callback is

View File

@ -18,6 +18,22 @@
#include "v8config.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory)
// TODO(pkasting): Use <compare>/spaceship unconditionally after dropping
// support for old libstdc++ versions.
#if __has_include(<version>)
#include <version>
#endif
#if defined(__cpp_lib_three_way_comparison) && \
__cpp_lib_three_way_comparison >= 201711L && \
defined(__cpp_lib_concepts) && __cpp_lib_concepts >= 202002L
#include <compare>
#include <concepts>
#define V8_HAVE_SPACESHIP_OPERATOR 1
#else
#define V8_HAVE_SPACESHIP_OPERATOR 0
#endif
namespace v8 { namespace v8 {
class Array; class Array;
@ -295,7 +311,8 @@ constexpr size_t kExternalPointerTableReservationSize = 256 * MB;
// The external pointer table indices stored in HeapObjects as external // The external pointer table indices stored in HeapObjects as external
// pointers are shifted to the left by this amount to guarantee that they are // pointers are shifted to the left by this amount to guarantee that they are
// smaller than the maximum table size. // smaller than the maximum table size even after the C++ compiler multiplies
// them by 8 to be used as indexes into a table of 64 bit pointers.
constexpr uint32_t kExternalPointerIndexShift = 7; constexpr uint32_t kExternalPointerIndexShift = 7;
#else #else
constexpr size_t kExternalPointerTableReservationSize = 512 * MB; constexpr size_t kExternalPointerTableReservationSize = 512 * MB;
@ -1351,11 +1368,11 @@ class V8_EXPORT StrongRootAllocatorBase {
public: public:
Heap* heap() const { return heap_; } Heap* heap() const { return heap_; }
bool operator==(const StrongRootAllocatorBase& other) const { friend bool operator==(const StrongRootAllocatorBase& a,
return heap_ == other.heap_; const StrongRootAllocatorBase& b) {
} // TODO(pkasting): Replace this body with `= default` after dropping support
bool operator!=(const StrongRootAllocatorBase& other) const { // for old gcc versions.
return heap_ != other.heap_; return a.heap_ == b.heap_;
} }
protected: protected:
@ -1390,22 +1407,60 @@ class StrongRootAllocator : private std::allocator<T> {
using std::allocator<T>::deallocate; using std::allocator<T>::deallocate;
}; };
// TODO(pkasting): Replace with `requires` clauses after dropping support for
// old gcc versions.
template <typename Iterator, typename = void>
inline constexpr bool kHaveIteratorConcept = false;
template <typename Iterator>
inline constexpr bool kHaveIteratorConcept<
Iterator, std::void_t<typename Iterator::iterator_concept>> = true;
template <typename Iterator, typename = void>
inline constexpr bool kHaveIteratorCategory = false;
template <typename Iterator>
inline constexpr bool kHaveIteratorCategory<
Iterator, std::void_t<typename Iterator::iterator_category>> = true;
// Helper struct that contains an `iterator_concept` type alias only when either
// `Iterator` or `std::iterator_traits<Iterator>` do.
// Default: no alias.
template <typename Iterator, typename = void>
struct MaybeDefineIteratorConcept {};
// Use `Iterator::iterator_concept` if available.
template <typename Iterator>
struct MaybeDefineIteratorConcept<
Iterator, std::enable_if_t<kHaveIteratorConcept<Iterator>>> {
using iterator_concept = Iterator::iterator_concept;
};
// Otherwise fall back to `std::iterator_traits<Iterator>` if possible.
template <typename Iterator>
struct MaybeDefineIteratorConcept<
Iterator, std::enable_if_t<kHaveIteratorCategory<Iterator> &&
!kHaveIteratorConcept<Iterator>>> {
// There seems to be no feature-test macro covering this, so use the
// presence of `<ranges>` as a crude proxy, since it was added to the
// standard as part of the Ranges papers.
// TODO(pkasting): Add this unconditionally after dropping support for old
// libstdc++ versions.
#if __has_include(<ranges>)
using iterator_concept = std::iterator_traits<Iterator>::iterator_concept;
#endif
};
// A class of iterators that wrap some different iterator type. // A class of iterators that wrap some different iterator type.
// If specified, ElementType is the type of element accessed by the wrapper // If specified, ElementType is the type of element accessed by the wrapper
// iterator; in this case, the actual reference and pointer types of Iterator // iterator; in this case, the actual reference and pointer types of Iterator
// must be convertible to ElementType& and ElementType*, respectively. // must be convertible to ElementType& and ElementType*, respectively.
template <typename Iterator, typename ElementType = void> template <typename Iterator, typename ElementType = void>
class WrappedIterator { class WrappedIterator : public MaybeDefineIteratorConcept<Iterator> {
public: public:
static_assert( static_assert(
!std::is_void_v<ElementType> || std::is_void_v<ElementType> ||
(std::is_convertible_v<typename std::iterator_traits<Iterator>::pointer, (std::is_convertible_v<typename std::iterator_traits<Iterator>::pointer,
ElementType*> && std::add_pointer_t<ElementType>> &&
std::is_convertible_v<typename std::iterator_traits<Iterator>::reference, std::is_convertible_v<typename std::iterator_traits<Iterator>::reference,
ElementType&>)); std::add_lvalue_reference_t<ElementType>>));
using iterator_category =
typename std::iterator_traits<Iterator>::iterator_category;
using difference_type = using difference_type =
typename std::iterator_traits<Iterator>::difference_type; typename std::iterator_traits<Iterator>::difference_type;
using value_type = using value_type =
@ -1415,24 +1470,96 @@ class WrappedIterator {
using pointer = using pointer =
std::conditional_t<std::is_void_v<ElementType>, std::conditional_t<std::is_void_v<ElementType>,
typename std::iterator_traits<Iterator>::pointer, typename std::iterator_traits<Iterator>::pointer,
ElementType*>; std::add_pointer_t<ElementType>>;
using reference = using reference =
std::conditional_t<std::is_void_v<ElementType>, std::conditional_t<std::is_void_v<ElementType>,
typename std::iterator_traits<Iterator>::reference, typename std::iterator_traits<Iterator>::reference,
ElementType&>; std::add_lvalue_reference_t<ElementType>>;
using iterator_category =
typename std::iterator_traits<Iterator>::iterator_category;
constexpr WrappedIterator() noexcept : it_() {} constexpr WrappedIterator() noexcept = default;
constexpr explicit WrappedIterator(Iterator it) noexcept : it_(it) {} constexpr explicit WrappedIterator(Iterator it) noexcept : it_(it) {}
// TODO(pkasting): Switch to `requires` and concepts after dropping support
// for old gcc and libstdc++ versions.
template <typename OtherIterator, typename OtherElementType, template <typename OtherIterator, typename OtherElementType,
std::enable_if_t<std::is_convertible_v<OtherIterator, Iterator>, typename = std::enable_if_t<
bool> = true> std::is_convertible_v<OtherIterator, Iterator>>>
constexpr WrappedIterator( constexpr WrappedIterator(
const WrappedIterator<OtherIterator, OtherElementType>& it) noexcept const WrappedIterator<OtherIterator, OtherElementType>& other) noexcept
: it_(it.base()) {} : it_(other.base()) {}
constexpr reference operator*() const noexcept { return *it_; } [[nodiscard]] constexpr reference operator*() const noexcept { return *it_; }
constexpr pointer operator->() const noexcept { return it_.operator->(); } [[nodiscard]] constexpr pointer operator->() const noexcept {
return it_.operator->();
}
template <typename OtherIterator, typename OtherElementType>
[[nodiscard]] constexpr bool operator==(
const WrappedIterator<OtherIterator, OtherElementType>& other)
const noexcept {
return it_ == other.base();
}
#if V8_HAVE_SPACESHIP_OPERATOR
template <typename OtherIterator, typename OtherElementType>
[[nodiscard]] constexpr auto operator<=>(
const WrappedIterator<OtherIterator, OtherElementType>& other)
const noexcept {
if constexpr (std::three_way_comparable_with<Iterator, OtherIterator>) {
return it_ <=> other.base();
} else if constexpr (std::totally_ordered_with<Iterator, OtherIterator>) {
if (it_ < other.base()) {
return std::strong_ordering::less;
}
return (it_ > other.base()) ? std::strong_ordering::greater
: std::strong_ordering::equal;
} else {
if (it_ < other.base()) {
return std::partial_ordering::less;
}
if (other.base() < it_) {
return std::partial_ordering::greater;
}
return (it_ == other.base()) ? std::partial_ordering::equivalent
: std::partial_ordering::unordered;
}
}
#else
// Assume that if spaceship isn't present, operator rewriting might not be
// either.
template <typename OtherIterator, typename OtherElementType>
[[nodiscard]] constexpr bool operator!=(
const WrappedIterator<OtherIterator, OtherElementType>& other)
const noexcept {
return it_ != other.base();
}
template <typename OtherIterator, typename OtherElementType>
[[nodiscard]] constexpr bool operator<(
const WrappedIterator<OtherIterator, OtherElementType>& other)
const noexcept {
return it_ < other.base();
}
template <typename OtherIterator, typename OtherElementType>
[[nodiscard]] constexpr bool operator<=(
const WrappedIterator<OtherIterator, OtherElementType>& other)
const noexcept {
return it_ <= other.base();
}
template <typename OtherIterator, typename OtherElementType>
[[nodiscard]] constexpr bool operator>(
const WrappedIterator<OtherIterator, OtherElementType>& other)
const noexcept {
return it_ > other.base();
}
template <typename OtherIterator, typename OtherElementType>
[[nodiscard]] constexpr bool operator>=(
const WrappedIterator<OtherIterator, OtherElementType>& other)
const noexcept {
return it_ >= other.base();
}
#endif
constexpr WrappedIterator& operator++() noexcept { constexpr WrappedIterator& operator++() noexcept {
++it_; ++it_;
@ -1453,101 +1580,44 @@ class WrappedIterator {
--(*this); --(*this);
return result; return result;
} }
constexpr WrappedIterator operator+(difference_type n) const noexcept { [[nodiscard]] constexpr WrappedIterator operator+(
difference_type n) const noexcept {
WrappedIterator result(*this); WrappedIterator result(*this);
result += n; result += n;
return result; return result;
} }
[[nodiscard]] friend constexpr WrappedIterator operator+(
difference_type n, const WrappedIterator& x) noexcept {
return x + n;
}
constexpr WrappedIterator& operator+=(difference_type n) noexcept { constexpr WrappedIterator& operator+=(difference_type n) noexcept {
it_ += n; it_ += n;
return *this; return *this;
} }
constexpr WrappedIterator operator-(difference_type n) const noexcept { [[nodiscard]] constexpr WrappedIterator operator-(
return *this + (-n); difference_type n) const noexcept {
return *this + -n;
} }
constexpr WrappedIterator& operator-=(difference_type n) noexcept { constexpr WrappedIterator& operator-=(difference_type n) noexcept {
*this += -n; return *this += -n;
return *this;
} }
constexpr reference operator[](difference_type n) const noexcept { template <typename OtherIterator, typename OtherElementType>
[[nodiscard]] constexpr auto operator-(
const WrappedIterator<OtherIterator, OtherElementType>& other)
const noexcept {
return it_ - other.base();
}
[[nodiscard]] constexpr reference operator[](
difference_type n) const noexcept {
return it_[n]; return it_[n];
} }
constexpr Iterator base() const noexcept { return it_; } [[nodiscard]] constexpr const Iterator& base() const noexcept { return it_; }
private:
template <typename OtherIterator, typename OtherElementType>
friend class WrappedIterator;
private: private:
Iterator it_; Iterator it_;
}; };
template <typename Iterator, typename ElementType, typename OtherIterator,
typename OtherElementType>
constexpr bool operator==(
const WrappedIterator<Iterator, ElementType>& x,
const WrappedIterator<OtherIterator, OtherElementType>& y) noexcept {
return x.base() == y.base();
}
template <typename Iterator, typename ElementType, typename OtherIterator,
typename OtherElementType>
constexpr bool operator<(
const WrappedIterator<Iterator, ElementType>& x,
const WrappedIterator<OtherIterator, OtherElementType>& y) noexcept {
return x.base() < y.base();
}
template <typename Iterator, typename ElementType, typename OtherIterator,
typename OtherElementType>
constexpr bool operator!=(
const WrappedIterator<Iterator, ElementType>& x,
const WrappedIterator<OtherIterator, OtherElementType>& y) noexcept {
return !(x == y);
}
template <typename Iterator, typename ElementType, typename OtherIterator,
typename OtherElementType>
constexpr bool operator>(
const WrappedIterator<Iterator, ElementType>& x,
const WrappedIterator<OtherIterator, OtherElementType>& y) noexcept {
return y < x;
}
template <typename Iterator, typename ElementType, typename OtherIterator,
typename OtherElementType>
constexpr bool operator>=(
const WrappedIterator<Iterator, ElementType>& x,
const WrappedIterator<OtherIterator, OtherElementType>& y) noexcept {
return !(x < y);
}
template <typename Iterator, typename ElementType, typename OtherIterator,
typename OtherElementType>
constexpr bool operator<=(
const WrappedIterator<Iterator, ElementType>& x,
const WrappedIterator<OtherIterator, OtherElementType>& y) noexcept {
return !(y < x);
}
template <typename Iterator, typename ElementType, typename OtherIterator,
typename OtherElementType>
constexpr auto operator-(
const WrappedIterator<Iterator, ElementType>& x,
const WrappedIterator<OtherIterator, OtherElementType>& y) noexcept
-> decltype(x.base() - y.base()) {
return x.base() - y.base();
}
template <typename Iterator, typename ElementType>
constexpr WrappedIterator<Iterator> operator+(
typename WrappedIterator<Iterator, ElementType>::difference_type n,
const WrappedIterator<Iterator, ElementType>& x) noexcept {
x += n;
return x;
}
// Helper functions about values contained in handles. // Helper functions about values contained in handles.
// A value is either an indirect pointer or a direct pointer, depending on // A value is either an indirect pointer or a direct pointer, depending on
// whether direct local support is enabled. // whether direct local support is enabled.

View File

@ -548,6 +548,7 @@ class V8_EXPORT Isolate {
kDocumentAllLegacyCall = 141, kDocumentAllLegacyCall = 141,
kDocumentAllLegacyConstruct = 142, kDocumentAllLegacyConstruct = 142,
kConsoleContext = 143, kConsoleContext = 143,
kWasmImportedStringsUtf8 = 144,
// If you add new values here, you'll also need to update Chromium's: // If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@ -1746,7 +1747,7 @@ class V8_EXPORT Isolate {
friend class PersistentValueMapBase; friend class PersistentValueMapBase;
internal::Address* GetDataFromSnapshotOnce(size_t index); internal::Address* GetDataFromSnapshotOnce(size_t index);
void ReportExternalAllocationLimitReached(); void HandleExternalMemoryInterrupt();
}; };
void Isolate::SetData(uint32_t slot, void* data) { void Isolate::SetData(uint32_t slot, void* data) {

View File

@ -13,6 +13,40 @@
#include "v8config.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory)
// TODO(pkasting): Use <compare>/spaceship unconditionally after dropping
// support for old libstdc++ versions.
#if __has_include(<version>)
#include <version>
#endif
#if defined(__cpp_lib_three_way_comparison) && \
__cpp_lib_three_way_comparison >= 201711L
#define V8_HAVE_SPACESHIP_OPERATOR 1
#else
#define V8_HAVE_SPACESHIP_OPERATOR 0
#endif
// TODO(pkasting): Make this block unconditional after dropping support for old
// libstdc++ versions.
#if __has_include(<ranges>)
#include <ranges>
namespace v8 {
template <typename T>
class V8_EXPORT MemorySpan;
} // namespace v8
// Mark `MemorySpan` as satisfying the `view` and `borrowed_range` concepts.
// This should be done before the definition of `MemorySpan`, so that any
// inlined calls to range functionality use the correct specializations.
template <typename T>
inline constexpr bool std::ranges::enable_view<v8::MemorySpan<T>> = true;
template <typename T>
inline constexpr bool std::ranges::enable_borrowed_range<v8::MemorySpan<T>> =
true;
#endif
namespace v8 { namespace v8 {
/** /**
@ -53,13 +87,13 @@ class V8_EXPORT MemorySpan {
is_compatible_iterator<It>::value; is_compatible_iterator<It>::value;
template <typename U> template <typename U>
static constexpr U* to_address(U* p) noexcept { [[nodiscard]] static constexpr U* to_address(U* p) noexcept {
return p; return p;
} }
template <typename It, template <typename It,
typename = std::void_t<decltype(std::declval<It&>().operator->())>> typename = std::void_t<decltype(std::declval<It&>().operator->())>>
static constexpr auto to_address(It it) noexcept { [[nodiscard]] static constexpr auto to_address(It it) noexcept {
return it.operator->(); return it.operator->();
} }
@ -108,50 +142,139 @@ class V8_EXPORT MemorySpan {
: data_(a.data()), size_{N} {} : data_(a.data()), size_{N} {}
/** Returns a pointer to the beginning of the buffer. */ /** Returns a pointer to the beginning of the buffer. */
constexpr T* data() const { return data_; } [[nodiscard]] constexpr T* data() const { return data_; }
/** Returns the number of elements that the buffer holds. */ /** Returns the number of elements that the buffer holds. */
constexpr size_t size() const { return size_; } [[nodiscard]] constexpr size_t size() const { return size_; }
constexpr T& operator[](size_t i) const { return data_[i]; } [[nodiscard]] constexpr T& operator[](size_t i) const { return data_[i]; }
/** Returns true if the buffer is empty. */ /** Returns true if the buffer is empty. */
constexpr bool empty() const { return size() == 0; } [[nodiscard]] constexpr bool empty() const { return size() == 0; }
class Iterator { class Iterator {
public: public:
using iterator_category = std::forward_iterator_tag;
using value_type = T;
using difference_type = std::ptrdiff_t; using difference_type = std::ptrdiff_t;
using value_type = T;
using pointer = value_type*; using pointer = value_type*;
using reference = value_type&; using reference = value_type&;
using iterator_category = std::random_access_iterator_tag;
// There seems to be no feature-test macro covering this, so use the
// presence of `<ranges>` as a crude proxy, since it was added to the
// standard as part of the Ranges papers.
// TODO(pkasting): Add this unconditionally after dropping support for old
// libstdc++ versions.
#if __has_include(<ranges>)
using iterator_concept = std::contiguous_iterator_tag;
#endif
T& operator*() const { return *ptr_; } // Required to satisfy `std::semiregular<>`.
T* operator->() const { return ptr_; } constexpr Iterator() = default;
bool operator==(Iterator other) const { return ptr_ == other.ptr_; } [[nodiscard]] friend constexpr bool operator==(const Iterator& a,
bool operator!=(Iterator other) const { return !(*this == other); } const Iterator& b) {
// TODO(pkasting): Replace this body with `= default` after dropping
// support for old gcc versions.
return a.ptr_ == b.ptr_;
}
#if V8_HAVE_SPACESHIP_OPERATOR
[[nodiscard]] friend constexpr auto operator<=>(const Iterator&,
const Iterator&) = default;
#else
// Assume that if spaceship isn't present, operator rewriting might not be
// either.
[[nodiscard]] friend constexpr bool operator!=(const Iterator& a,
const Iterator& b) {
return a.ptr_ != b.ptr_;
}
Iterator& operator++() { [[nodiscard]] friend constexpr bool operator<(const Iterator& a,
const Iterator& b) {
return a.ptr_ < b.ptr_;
}
[[nodiscard]] friend constexpr bool operator<=(const Iterator& a,
const Iterator& b) {
return a.ptr_ <= b.ptr_;
}
[[nodiscard]] friend constexpr bool operator>(const Iterator& a,
const Iterator& b) {
return a.ptr_ > b.ptr_;
}
[[nodiscard]] friend constexpr bool operator>=(const Iterator& a,
const Iterator& b) {
return a.ptr_ >= b.ptr_;
}
#endif
constexpr Iterator& operator++() {
++ptr_; ++ptr_;
return *this; return *this;
} }
Iterator operator++(int) { constexpr Iterator operator++(int) {
Iterator temp(*this); Iterator temp = *this;
++(*this); ++*this;
return temp; return temp;
} }
constexpr Iterator& operator--() {
--ptr_;
return *this;
}
constexpr Iterator operator--(int) {
Iterator temp = *this;
--*this;
return temp;
}
constexpr Iterator& operator+=(difference_type rhs) {
ptr_ += rhs;
return this;
}
[[nodiscard]] friend constexpr Iterator operator+(Iterator lhs,
difference_type rhs) {
lhs += rhs;
return lhs;
}
[[nodiscard]] friend constexpr Iterator operator+(difference_type lhs,
const Iterator& rhs) {
return rhs + lhs;
}
constexpr Iterator& operator-=(difference_type rhs) {
ptr_ -= rhs;
return this;
}
[[nodiscard]] friend constexpr Iterator operator-(Iterator lhs,
difference_type rhs) {
lhs -= rhs;
return lhs;
}
[[nodiscard]] friend constexpr difference_type operator-(
const Iterator& lhs, const Iterator& rhs) {
return lhs.ptr_ - rhs.ptr_;
}
[[nodiscard]] constexpr reference operator*() const { return *ptr_; }
[[nodiscard]] constexpr pointer operator->() const { return ptr_; }
[[nodiscard]] constexpr reference operator[](size_t offset) const {
return ptr_[offset];
}
private: private:
friend class MemorySpan<T>; friend class MemorySpan<T>;
explicit Iterator(T* ptr) : ptr_(ptr) {} constexpr explicit Iterator(T* ptr) : ptr_(ptr) {}
T* ptr_ = nullptr; T* ptr_ = nullptr;
}; };
Iterator begin() const { return Iterator(data_); } [[nodiscard]] Iterator begin() const { return Iterator(data_); }
Iterator end() const { return Iterator(data_ + size_); } [[nodiscard]] Iterator end() const { return Iterator(data_ + size_); }
private: private:
T* data_ = nullptr; T* data_ = nullptr;
@ -171,25 +294,26 @@ class V8_EXPORT MemorySpan {
namespace detail { namespace detail {
template <class T, std::size_t N, std::size_t... I> template <class T, std::size_t N, std::size_t... I>
constexpr std::array<std::remove_cv_t<T>, N> to_array_lvalue_impl( [[nodiscard]] constexpr std::array<std::remove_cv_t<T>, N> to_array_lvalue_impl(
T (&a)[N], std::index_sequence<I...>) { T (&a)[N], std::index_sequence<I...>) {
return {{a[I]...}}; return {{a[I]...}};
} }
template <class T, std::size_t N, std::size_t... I> template <class T, std::size_t N, std::size_t... I>
constexpr std::array<std::remove_cv_t<T>, N> to_array_rvalue_impl( [[nodiscard]] constexpr std::array<std::remove_cv_t<T>, N> to_array_rvalue_impl(
T (&&a)[N], std::index_sequence<I...>) { T (&&a)[N], std::index_sequence<I...>) {
return {{std::move(a[I])...}}; return {{std::move(a[I])...}};
} }
} // namespace detail } // namespace detail
template <class T, std::size_t N> template <class T, std::size_t N>
constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N]) { [[nodiscard]] constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N]) {
return detail::to_array_lvalue_impl(a, std::make_index_sequence<N>{}); return detail::to_array_lvalue_impl(a, std::make_index_sequence<N>{});
} }
template <class T, std::size_t N> template <class T, std::size_t N>
constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&&a)[N]) { [[nodiscard]] constexpr std::array<std::remove_cv_t<T>, N> to_array(
T (&&a)[N]) {
return detail::to_array_rvalue_impl(std::move(a), return detail::to_array_rvalue_impl(std::move(a),
std::make_index_sequence<N>{}); std::make_index_sequence<N>{});
} }

View File

@ -14,7 +14,7 @@
#include <string> #include <string>
#include "v8-source-location.h" // NOLINT(build/include_directory) #include "v8-source-location.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory)
namespace v8 { namespace v8 {
@ -79,9 +79,8 @@ class TaskRunner {
* *
* Embedders should override PostTaskImpl instead of this. * Embedders should override PostTaskImpl instead of this.
*/ */
void PostTask( void PostTask(std::unique_ptr<Task> task,
std::unique_ptr<Task> task, const SourceLocation& location = SourceLocation::Current()) {
const SourceLocation& location = SourceLocation::Current()) {
PostTaskImpl(std::move(task), location); PostTaskImpl(std::move(task), location);
} }
@ -553,6 +552,19 @@ class PageAllocator {
*/ */
virtual bool DecommitPages(void* address, size_t size) = 0; virtual bool DecommitPages(void* address, size_t size) = 0;
/**
* Block any modifications to the given mapping such as changing permissions
* or unmapping the pages on supported platforms.
* The address space reservation will exist until the process ends, but it's
* possible to release the memory using DiscardSystemPages. Note that this
* might require write permissions to the page as e.g. on Linux, mseal will
* block discarding sealed anonymous memory.
*/
virtual bool SealPages(void* address, size_t length) {
// TODO(360048056): make it pure once it's implemented on Chromium side.
return false;
}
/** /**
* INTERNAL ONLY: This interface has not been stabilised and may change * INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first. * without notice from one release to another without being deprecated first.
@ -1086,11 +1098,8 @@ class Platform {
* Returns a TaskRunner which can be used to post a task on the foreground. * Returns a TaskRunner which can be used to post a task on the foreground.
* The TaskRunner's NonNestableTasksEnabled() must be true. This function * The TaskRunner's NonNestableTasksEnabled() must be true. This function
* should only be called from a foreground thread. * should only be called from a foreground thread.
* TODO(chromium:1448758): Deprecate once |GetForegroundTaskRunner(Isolate*,
* TaskPriority)| is ready.
*/ */
virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner( std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(Isolate* isolate) {
Isolate* isolate) {
return GetForegroundTaskRunner(isolate, TaskPriority::kUserBlocking); return GetForegroundTaskRunner(isolate, TaskPriority::kUserBlocking);
} }
@ -1098,12 +1107,9 @@ class Platform {
* Returns a TaskRunner with a specific |priority| which can be used to post a * Returns a TaskRunner with a specific |priority| which can be used to post a
* task on the foreground thread. The TaskRunner's NonNestableTasksEnabled() * task on the foreground thread. The TaskRunner's NonNestableTasksEnabled()
* must be true. This function should only be called from a foreground thread. * must be true. This function should only be called from a foreground thread.
* TODO(chromium:1448758): Make pure virtual once embedders implement it.
*/ */
virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner( virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
Isolate* isolate, TaskPriority priority) { Isolate* isolate, TaskPriority priority) = 0;
return nullptr;
}
/** /**
* Schedules a task to be invoked on a worker thread. * Schedules a task to be invoked on a worker thread.

View File

@ -129,6 +129,11 @@ class V8_EXPORT ModuleRequest : public Data {
*/ */
Local<String> GetSpecifier() const; Local<String> GetSpecifier() const;
/**
* Returns the module import phase for this ModuleRequest.
*/
ModuleImportPhase GetPhase() const;
/** /**
* Returns the source code offset of this module request. * Returns the source code offset of this module request.
* Use Module::SourceOffsetToLocation to convert this to line/column numbers. * Use Module::SourceOffsetToLocation to convert this to line/column numbers.
@ -211,6 +216,9 @@ class V8_EXPORT Module : public Data {
using ResolveModuleCallback = MaybeLocal<Module> (*)( using ResolveModuleCallback = MaybeLocal<Module> (*)(
Local<Context> context, Local<String> specifier, Local<Context> context, Local<String> specifier,
Local<FixedArray> import_attributes, Local<Module> referrer); Local<FixedArray> import_attributes, Local<Module> referrer);
using ResolveSourceCallback = MaybeLocal<Object> (*)(
Local<Context> context, Local<String> specifier,
Local<FixedArray> import_attributes, Local<Module> referrer);
/** /**
* Instantiates the module and its dependencies. * Instantiates the module and its dependencies.
@ -220,7 +228,8 @@ class V8_EXPORT Module : public Data {
* exception is propagated.) * exception is propagated.)
*/ */
V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule( V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(
Local<Context> context, ResolveModuleCallback callback); Local<Context> context, ResolveModuleCallback module_callback,
ResolveSourceCallback source_callback = nullptr);
/** /**
* Evaluates the module and its dependencies. * Evaluates the module and its dependencies.

View File

@ -72,8 +72,6 @@ class V8_EXPORT Template : public Data {
* \param name The name of the property for which an accessor is added. * \param name The name of the property for which an accessor is added.
* \param getter The callback to invoke when getting the property. * \param getter The callback to invoke when getting the property.
* \param setter The callback to invoke when setting the property. * \param setter The callback to invoke when setting the property.
* \param data A piece of data that will be passed to the getter and setter
* callbacks whenever they are invoked.
* \param attribute The attributes of the property for which an accessor * \param attribute The attributes of the property for which an accessor
* is added. * is added.
*/ */

View File

@ -37,15 +37,13 @@ enum class TracedReferenceHandling {
kDroppable kDroppable
}; };
V8_EXPORT internal::Address* GlobalizeTracedReference( V8_EXPORT Address* GlobalizeTracedReference(
internal::Isolate* isolate, internal::Address value, Isolate* isolate, Address value, Address* slot,
internal::Address* slot, TracedReferenceStoreMode store_mode, TracedReferenceStoreMode store_mode,
internal::TracedReferenceHandling reference_handling); TracedReferenceHandling reference_handling);
V8_EXPORT void MoveTracedReference(internal::Address** from, V8_EXPORT void MoveTracedReference(Address** from, Address** to);
internal::Address** to); V8_EXPORT void CopyTracedReference(const Address* const* from, Address** to);
V8_EXPORT void CopyTracedReference(const internal::Address* const* from, V8_EXPORT void DisposeTracedReference(Address* global_handle);
internal::Address** to);
V8_EXPORT void DisposeTracedReference(internal::Address* global_handle);
} // namespace internal } // namespace internal
@ -55,6 +53,9 @@ V8_EXPORT void DisposeTracedReference(internal::Address* global_handle);
*/ */
class TracedReferenceBase : public api_internal::IndirectHandleBase { class TracedReferenceBase : public api_internal::IndirectHandleBase {
public: public:
static_assert(sizeof(std::atomic<internal::Address*>) ==
sizeof(internal::Address*));
/** /**
* If non-empty, destroy the underlying storage cell. |IsEmpty| will return * If non-empty, destroy the underlying storage cell. |IsEmpty| will return
* true after this call. * true after this call.
@ -73,9 +74,7 @@ class TracedReferenceBase : public api_internal::IndirectHandleBase {
* Returns true if this TracedReference is empty, i.e., has not been * Returns true if this TracedReference is empty, i.e., has not been
* assigned an object. This version of IsEmpty is thread-safe. * assigned an object. This version of IsEmpty is thread-safe.
*/ */
bool IsEmptyThreadSafe() const { bool IsEmptyThreadSafe() const { return GetSlotThreadSafe() == nullptr; }
return this->GetSlotThreadSafe() == nullptr;
}
protected: protected:
V8_INLINE TracedReferenceBase() = default; V8_INLINE TracedReferenceBase() = default;
@ -83,17 +82,17 @@ class TracedReferenceBase : public api_internal::IndirectHandleBase {
/** /**
* Update this reference in a thread-safe way. * Update this reference in a thread-safe way.
*/ */
void SetSlotThreadSafe(void* new_val) { void SetSlotThreadSafe(internal::Address* new_val) {
reinterpret_cast<std::atomic<void*>*>(&slot())->store( reinterpret_cast<std::atomic<internal::Address*>*>(&slot())->store(
new_val, std::memory_order_relaxed); new_val, std::memory_order_relaxed);
} }
/** /**
* Get this reference in a thread-safe way * Get this reference in a thread-safe way
*/ */
const void* GetSlotThreadSafe() const { const internal::Address* GetSlotThreadSafe() const {
return reinterpret_cast<std::atomic<const void*> const*>(&slot())->load( return reinterpret_cast<const std::atomic<internal::Address*>*>(&slot())
std::memory_order_relaxed); ->load(std::memory_order_relaxed);
} }
V8_EXPORT void CheckValue() const; V8_EXPORT void CheckValue() const;

View File

@ -18,9 +18,8 @@ struct CalleeSavedRegisters {
void* arm_r10; void* arm_r10;
}; };
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ #elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || \
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_LOONG64 || \ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
V8_TARGET_ARCH_RISCV32
struct CalleeSavedRegisters {}; struct CalleeSavedRegisters {};
#else #else
#error Target architecture was not detected as supported by v8 #error Target architecture was not detected as supported by v8

View File

@ -8,10 +8,10 @@
// These macros define the version number for the current version. // These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build // NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts. // system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 12 #define V8_MAJOR_VERSION 13
#define V8_MINOR_VERSION 9 #define V8_MINOR_VERSION 0
#define V8_BUILD_NUMBER 202 #define V8_BUILD_NUMBER 245
#define V8_PATCH_LEVEL 28 #define V8_PATCH_LEVEL 25
// Use 1 for candidates and 0 otherwise. // Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.) // (Boolean macro values are not supported by all preprocessors.)

View File

@ -581,15 +581,11 @@ path. Add it with -I<path> to the command line
// functions. // functions.
// Use like: // Use like:
// V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod(); // V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod();
#if V8_OS_WIN
# define V8_PRESERVE_MOST
#else
#if V8_HAS_ATTRIBUTE_PRESERVE_MOST #if V8_HAS_ATTRIBUTE_PRESERVE_MOST
# define V8_PRESERVE_MOST __attribute__((preserve_most)) # define V8_PRESERVE_MOST __attribute__((preserve_most))
#else #else
# define V8_PRESERVE_MOST /* NOT SUPPORTED */ # define V8_PRESERVE_MOST /* NOT SUPPORTED */
#endif #endif
#endif
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated. // A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
@ -681,7 +677,7 @@ path. Add it with -I<path> to the command line
// V8_NODISCARD Foo() { ... }; // V8_NODISCARD Foo() { ... };
// [[nodiscard]] comes in C++17 but supported in clang with -std >= c++11. // [[nodiscard]] comes in C++17 but supported in clang with -std >= c++11.
#if V8_HAS_CPP_ATTRIBUTE_NODISCARD #if V8_HAS_CPP_ATTRIBUTE_NODISCARD
#define V8_NODISCARD #define V8_NODISCARD [[nodiscard]]
#else #else
#define V8_NODISCARD /* NOT SUPPORTED */ #define V8_NODISCARD /* NOT SUPPORTED */
#endif #endif
@ -833,9 +829,6 @@ V8 shared library set USING_V8_SHARED.
#elif defined(__PPC64__) || defined(_ARCH_PPC64) #elif defined(__PPC64__) || defined(_ARCH_PPC64)
#define V8_HOST_ARCH_PPC64 1 #define V8_HOST_ARCH_PPC64 1
#define V8_HOST_ARCH_64_BIT 1 #define V8_HOST_ARCH_64_BIT 1
#elif defined(__PPC__) || defined(_ARCH_PPC)
#define V8_HOST_ARCH_PPC 1
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__s390__) || defined(__s390x__) #elif defined(__s390__) || defined(__s390x__)
#define V8_HOST_ARCH_S390 1 #define V8_HOST_ARCH_S390 1
#if defined(__s390x__) #if defined(__s390x__)
@ -862,10 +855,10 @@ V8 shared library set USING_V8_SHARED.
// The macros may be set externally. If not, detect in the same way as the host // The macros may be set externally. If not, detect in the same way as the host
// architecture, that is, target the native environment as presented by the // architecture, that is, target the native environment as presented by the
// compiler. // compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && \ !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \
!V8_TARGET_ARCH_RISCV32 !V8_TARGET_ARCH_RISCV32
#if defined(_M_X64) || defined(__x86_64__) #if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1 #define V8_TARGET_ARCH_X64 1
@ -881,8 +874,6 @@ V8 shared library set USING_V8_SHARED.
#define V8_TARGET_ARCH_LOONG64 1 #define V8_TARGET_ARCH_LOONG64 1
#elif defined(_ARCH_PPC64) #elif defined(_ARCH_PPC64)
#define V8_TARGET_ARCH_PPC64 1 #define V8_TARGET_ARCH_PPC64 1
#elif defined(_ARCH_PPC)
#define V8_TARGET_ARCH_PPC 1
#elif defined(__s390__) #elif defined(__s390__)
#define V8_TARGET_ARCH_S390 1 #define V8_TARGET_ARCH_S390 1
#if defined(__s390x__) #if defined(__s390x__)
@ -920,8 +911,6 @@ V8 shared library set USING_V8_SHARED.
#define V8_TARGET_ARCH_64_BIT 1 #define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
#define V8_TARGET_ARCH_64_BIT 1 #define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_PPC
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC64
#define V8_TARGET_ARCH_64_BIT 1 #define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_S390 #elif V8_TARGET_ARCH_S390
@ -986,12 +975,12 @@ V8 shared library set USING_V8_SHARED.
#else #else
#define V8_TARGET_LITTLE_ENDIAN 1 #define V8_TARGET_LITTLE_ENDIAN 1
#endif #endif
#elif defined(__BIG_ENDIAN__) // FOR PPCGR on AIX #elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX
#define V8_TARGET_BIG_ENDIAN 1 #define V8_TARGET_BIG_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_LE #else
#define V8_TARGET_LITTLE_ENDIAN 1 #define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_BE #endif
#define V8_TARGET_BIG_ENDIAN 1
#elif V8_TARGET_ARCH_S390 #elif V8_TARGET_ARCH_S390
#if V8_TARGET_ARCH_S390_LE_SIM #if V8_TARGET_ARCH_S390_LE_SIM
#define V8_TARGET_LITTLE_ENDIAN 1 #define V8_TARGET_LITTLE_ENDIAN 1
@ -1017,5 +1006,10 @@ V8 shared library set USING_V8_SHARED.
#else #else
#define V8_STATIC_ROOTS_BOOL true #define V8_STATIC_ROOTS_BOOL true
#endif #endif
#ifdef V8_TARGET_BIG_ENDIAN
#define V8_TARGET_BIG_ENDIAN_BOOL true
#else
#define V8_TARGET_BIG_ENDIAN_BOOL false
#endif
#endif // V8CONFIG_H_ #endif // V8CONFIG_H_

View File

@ -71,6 +71,10 @@
"label": "//test:v8_run_num_fuzzer", "label": "//test:v8_run_num_fuzzer",
"type": "script", "type": "script",
}, },
"snapshot_set": {
"label": "//:snapshot_set",
"type": "script",
},
"test262": { "test262": {
"label": "//test/test262:v8_test262", "label": "//test/test262:v8_test262",
"type": "script", "type": "script",

View File

@ -1,2 +1,2 @@
mathias@chromium.org ahaas@chromium.org
cbruni@chromium.org cbruni@chromium.org

View File

@ -16,7 +16,7 @@
* ENTER_V8, ENTER_V8_NO_SCRIPT, ENTER_V8_NO_SCRIPT_NO_EXCEPTION. * ENTER_V8, ENTER_V8_NO_SCRIPT, ENTER_V8_NO_SCRIPT_NO_EXCEPTION.
* *
* The latter two assume that no script is executed, and no exceptions are * The latter two assume that no script is executed, and no exceptions are
* scheduled in addition (respectively). Creating a exception and * scheduled in addition (respectively). Creating an exception and
* removing it before returning is ok. * removing it before returning is ok.
* *
* Exceptions should be handled either by invoking one of the * Exceptions should be handled either by invoking one of the

View File

@ -85,7 +85,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
isolate, getter, isolate, getter,
InstantiateFunction(isolate, Cast<FunctionTemplateInfo>(getter))); InstantiateFunction(isolate, Cast<FunctionTemplateInfo>(getter)));
DirectHandle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline); DirectHandle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Cast<JSFunction>(getter)->set_code(*trampoline); Cast<JSFunction>(getter)->UpdateCode(*trampoline);
} }
if (IsFunctionTemplateInfo(*setter) && if (IsFunctionTemplateInfo(*setter) &&
Cast<FunctionTemplateInfo>(*setter)->BreakAtEntry(isolate)) { Cast<FunctionTemplateInfo>(*setter)->BreakAtEntry(isolate)) {
@ -93,7 +93,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
isolate, setter, isolate, setter,
InstantiateFunction(isolate, Cast<FunctionTemplateInfo>(setter))); InstantiateFunction(isolate, Cast<FunctionTemplateInfo>(setter)));
DirectHandle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline); DirectHandle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Cast<JSFunction>(setter)->set_code(*trampoline); Cast<JSFunction>(setter)->UpdateCode(*trampoline);
} }
RETURN_ON_EXCEPTION(isolate, JSObject::DefineOwnAccessorIgnoreAttributes( RETURN_ON_EXCEPTION(isolate, JSObject::DefineOwnAccessorIgnoreAttributes(
object, name, getter, setter, attributes)); object, name, getter, setter, attributes));

View File

@ -161,11 +161,6 @@
#endif // V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD #endif // V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
#if V8_OS_WIN #if V8_OS_WIN
#include <windows.h>
// This has to come after windows.h.
#include <versionhelpers.h>
#include "include/v8-wasm-trap-handler-win.h" #include "include/v8-wasm-trap-handler-win.h"
#include "src/trap-handler/handler-inside-win.h" #include "src/trap-handler/handler-inside-win.h"
#if defined(V8_OS_WIN64) #if defined(V8_OS_WIN64)
@ -414,7 +409,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
backing_memory_base, backing_memory_size, kAllocationGranularity); backing_memory_base, backing_memory_size, kAllocationGranularity);
end_of_accessible_region_ = region_alloc_->begin(); end_of_accessible_region_ = region_alloc_->begin();
// Install a on-merge callback to discard or decommit unused pages. // Install an on-merge callback to discard or decommit unused pages.
region_alloc_->set_on_merge_callback([this](i::Address start, region_alloc_->set_on_merge_callback([this](i::Address start,
size_t size) { size_t size) {
mutex_.AssertHeld(); mutex_.AssertHeld();
@ -2297,6 +2292,11 @@ Local<String> ModuleRequest::GetSpecifier() const {
return ToApiHandle<String>(i::direct_handle(self->specifier(), i_isolate)); return ToApiHandle<String>(i::direct_handle(self->specifier(), i_isolate));
} }
ModuleImportPhase ModuleRequest::GetPhase() const {
auto self = Utils::OpenDirectHandle(this);
return self->phase();
}
int ModuleRequest::GetSourceOffset() const { int ModuleRequest::GetSourceOffset() const {
return Utils::OpenDirectHandle(this)->position(); return Utils::OpenDirectHandle(this)->position();
} }
@ -2430,11 +2430,13 @@ int Module::GetIdentityHash() const {
} }
Maybe<bool> Module::InstantiateModule(Local<Context> context, Maybe<bool> Module::InstantiateModule(Local<Context> context,
Module::ResolveModuleCallback callback) { ResolveModuleCallback module_callback,
ResolveSourceCallback source_callback) {
auto i_isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate()); auto i_isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(i_isolate, context, Module, InstantiateModule, i::HandleScope); ENTER_V8(i_isolate, context, Module, InstantiateModule, i::HandleScope);
has_exception = !i::Module::Instantiate(i_isolate, Utils::OpenHandle(this), has_exception =
context, callback); !i::Module::Instantiate(i_isolate, Utils::OpenHandle(this), context,
module_callback, source_callback);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true); return Just(true);
} }
@ -3717,11 +3719,11 @@ TYPED_ARRAYS_BASE(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY #undef VALUE_IS_TYPED_ARRAY
bool Value::IsFloat16Array() const { bool Value::IsFloat16Array() const {
Utils::ApiCheck(i::v8_flags.js_float16array, "Value::IsFloat16Array",
"Float16Array is not supported");
auto obj = *Utils::OpenDirectHandle(this); auto obj = *Utils::OpenDirectHandle(this);
return i::IsJSTypedArray(obj) && return i::IsJSTypedArray(obj) &&
i::Cast<i::JSTypedArray>(obj)->type() == i::kExternalFloat16Array; i::Cast<i::JSTypedArray>(obj)->type() == i::kExternalFloat16Array &&
Utils::ApiCheck(i::v8_flags.js_float16array, "Value::IsFloat16Array",
"Float16Array is not supported");
} }
bool Value::IsDataView() const { bool Value::IsDataView() const {
@ -4184,7 +4186,7 @@ std::unique_ptr<v8::BackingStore> v8::BackingStore::Reallocate(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate); i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
API_RCS_SCOPE(i_isolate, ArrayBuffer, BackingStore_Reallocate); API_RCS_SCOPE(i_isolate, ArrayBuffer, BackingStore_Reallocate);
Utils::ApiCheck(byte_length <= i::JSArrayBuffer::kMaxByteLength, Utils::ApiCheck(byte_length <= i::JSArrayBuffer::kMaxByteLength,
"v8::BackingStore::Reallocate", "byte_lenght is too large"); "v8::BackingStore::Reallocate", "byte_length is too large");
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::BackingStore* i_backing_store = i::BackingStore* i_backing_store =
reinterpret_cast<i::BackingStore*>(backing_store.get()); reinterpret_cast<i::BackingStore*>(backing_store.get());
@ -8197,7 +8199,7 @@ MaybeLocal<v8::Array> v8::Array::New(
Local<Context> context, size_t length, Local<Context> context, size_t length,
std::function<MaybeLocal<v8::Value>()> next_value_callback) { std::function<MaybeLocal<v8::Value>()> next_value_callback) {
PREPARE_FOR_EXECUTION(context, Array, New); PREPARE_FOR_EXECUTION(context, Array, New);
// We should never see a exception here as V8 will not create an // We should never see an exception here as V8 will not create an
// exception and the callback is invoked by the embedder where the exception // exception and the callback is invoked by the embedder where the exception
// is already scheduled. // is already scheduled.
USE(has_exception); USE(has_exception);
@ -9560,10 +9562,10 @@ void BigInt::ToWordsArray(int* sign_bit, int* word_count,
words); words);
} }
void Isolate::ReportExternalAllocationLimitReached() { void Isolate::HandleExternalMemoryInterrupt() {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap(); i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
if (heap->gc_state() != i::Heap::NOT_IN_GC) return; if (heap->gc_state() != i::Heap::NOT_IN_GC) return;
heap->ReportExternalMemoryPressure(); heap->HandleExternalMemoryInterrupt();
} }
HeapProfiler* Isolate::GetHeapProfiler() { HeapProfiler* Isolate::GetHeapProfiler() {
@ -10271,20 +10273,19 @@ void Isolate::GetStackSample(const RegisterState& state, void** frames,
int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
int64_t change_in_bytes) { int64_t change_in_bytes) {
// Try to check for unreasonably large or small values from the embedder. // Try to check for unreasonably large or small values from the embedder.
const int64_t kMaxReasonableBytes = int64_t(1) << 60; static constexpr int64_t kMaxReasonableBytes = int64_t(1) << 60;
const int64_t kMinReasonableBytes = -kMaxReasonableBytes; static constexpr int64_t kMinReasonableBytes = -kMaxReasonableBytes;
static_assert(kMaxReasonableBytes >= i::JSArrayBuffer::kMaxByteLength); static_assert(kMaxReasonableBytes >= i::JSArrayBuffer::kMaxByteLength);
CHECK(kMinReasonableBytes <= change_in_bytes && CHECK(kMinReasonableBytes <= change_in_bytes &&
change_in_bytes < kMaxReasonableBytes); change_in_bytes < kMaxReasonableBytes);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this); i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
int64_t amount = i_isolate->heap()->update_external_memory(change_in_bytes); int64_t amount = i_isolate->heap()->UpdateExternalMemory(change_in_bytes);
if (change_in_bytes <= 0) return amount; if (change_in_bytes <= 0) return amount;
if (amount > i_isolate->heap()->external_memory_limit()) { if (amount > i_isolate->heap()->external_memory_limit_for_interrupt()) {
ReportExternalAllocationLimitReached(); HandleExternalMemoryInterrupt();
} }
return amount; return amount;
} }

View File

@ -24,23 +24,23 @@ namespace internal {
namespace wasm { namespace wasm {
#ifdef DEBUG #ifdef DEBUG
#define FAIL_AND_RETURN(ret, msg) \ #define TRACE_ASM_PARSER(...) \
failed_ = true; \ if (v8_flags.trace_asm_parser) { \
failure_message_ = msg; \ PrintF(__VA_ARGS__); \
failure_location_ = static_cast<int>(scanner_.Position()); \ }
if (v8_flags.trace_asm_parser) { \
PrintF("[asm.js failure: %s, token: '%s', see: %s:%d]\n", msg, \
scanner_.Name(scanner_.Token()).c_str(), __FILE__, __LINE__); \
} \
return ret;
#else #else
#define FAIL_AND_RETURN(ret, msg) \ #define TRACE_ASM_PARSER(...)
failed_ = true; \
failure_message_ = msg; \
failure_location_ = static_cast<int>(scanner_.Position()); \
return ret;
#endif #endif
#define FAIL_AND_RETURN(ret, msg) \
failed_ = true; \
failure_message_ = msg; \
failure_location_ = static_cast<int>(scanner_.Position()); \
TRACE_ASM_PARSER("[asm.js failure: %s, token: '%s', see: %s:%d]\n", msg, \
scanner_.Name(scanner_.Token()).c_str(), __FILE__, \
__LINE__); \
return ret;
#define FAIL(msg) FAIL_AND_RETURN(, msg) #define FAIL(msg) FAIL_AND_RETURN(, msg)
#define FAILn(msg) FAIL_AND_RETURN(nullptr, msg) #define FAILn(msg) FAIL_AND_RETURN(nullptr, msg)
@ -2572,8 +2572,18 @@ void AsmJsParser::GatherCases(ZoneVector<int32_t>* cases) {
scanner_.Seek(start); scanner_.Seek(start);
} }
#undef TOK
#undef RECURSEn
#undef RECURSE
#undef RECURSE_OR_RETURN
#undef EXPECT_TOKENn
#undef EXPECT_TOKEN
#undef EXPECT_TOKEN_OR_RETURN
#undef FAILn
#undef FAIL
#undef FAIL_AND_RETURN
#undef TRACE_ASM_PARSER
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#undef RECURSE

View File

@ -915,7 +915,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// so we don't care that it calls sloppy eval. // so we don't care that it calls sloppy eval.
if (is_script_scope()) return; if (is_script_scope()) return;
// Sloppy eval in a eval scope can only introduce variables into the outer // Sloppy eval in an eval scope can only introduce variables into the outer
// (non-eval) declaration scope, not into this eval scope. // (non-eval) declaration scope, not into this eval scope.
if (is_eval_scope()) { if (is_eval_scope()) {
#ifdef DEBUG #ifdef DEBUG

View File

@ -41,21 +41,14 @@ class BitField final {
static constexpr U kMask = ((U{1} << kShift) << kSize) - (U{1} << kShift); static constexpr U kMask = ((U{1} << kShift) << kSize) - (U{1} << kShift);
static constexpr int kLastUsedBit = kShift + kSize - 1; static constexpr int kLastUsedBit = kShift + kSize - 1;
static constexpr U kNumValues = U{1} << kSize; static constexpr U kNumValues = U{1} << kSize;
static constexpr U kMax = kNumValues - 1;
// Value for the field with all bits set.
// If clang complains
// "constexpr variable 'kMax' must be initialized by a constant expression"
// on this line, then you're creating a BitField for an enum with more bits
// than needed for the enum values. Either reduce the BitField size,
// or give the enum an explicit underlying type.
static constexpr T kMax = static_cast<T>(kNumValues - 1);
template <class T2, int size2> template <class T2, int size2>
using Next = BitField<T2, kShift + kSize, size2, U>; using Next = BitField<T2, kShift + kSize, size2, U>;
// Tells whether the provided value fits into the bit field. // Tells whether the provided value fits into the bit field.
static constexpr bool is_valid(T value) { static constexpr bool is_valid(T value) {
return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0; return (static_cast<U>(value) & ~kMax) == 0;
} }
// Returns a type U with the bit field value encoded. // Returns a type U with the bit field value encoded.

View File

@ -244,6 +244,10 @@ bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
return page_allocator_->DecommitPages(address, size); return page_allocator_->DecommitPages(address, size);
} }
bool BoundedPageAllocator::SealPages(void* address, size_t size) {
return page_allocator_->SealPages(address, size);
}
const char* BoundedPageAllocator::AllocationStatusToString( const char* BoundedPageAllocator::AllocationStatusToString(
AllocationStatus allocation_status) { AllocationStatus allocation_status) {
switch (allocation_status) { switch (allocation_status) {

View File

@ -119,6 +119,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
bool DecommitPages(void* address, size_t size) override; bool DecommitPages(void* address, size_t size) override;
bool SealPages(void* address, size_t size) override;
AllocationStatus get_last_allocation_status() const { AllocationStatus get_last_allocation_status() const {
return allocation_status_; return allocation_status_;
} }

View File

@ -58,7 +58,7 @@ constexpr int kReturnAddressStackSlotCount =
V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0; V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
// Number of bits to represent the page size for paged spaces. // Number of bits to represent the page size for paged spaces.
#if (defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_PPC64)) && !defined(_AIX) #if defined(V8_HOST_ARCH_PPC64) && !defined(V8_OS_AIX)
// Native PPC linux has large (64KB) physical pages. // Native PPC linux has large (64KB) physical pages.
// Simulator (and Aix) need to use the same value as x64. // Simulator (and Aix) need to use the same value as x64.
constexpr int kPageSizeBits = 19; constexpr int kPageSizeBits = 19;

View File

@ -98,10 +98,9 @@
// do not support adding noexcept to default members. // do not support adding noexcept to default members.
// Disabled on MSVC because constructors of standard containers are not noexcept // Disabled on MSVC because constructors of standard containers are not noexcept
// there. // there.
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \ #if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
!defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC) && \ !defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC64) && \
!defined(V8_TARGET_ARCH_PPC64) && !defined(V8_TARGET_ARCH_RISCV64) && \ !defined(V8_TARGET_ARCH_RISCV64) && !defined(V8_TARGET_ARCH_RISCV32)) || \
!defined(V8_TARGET_ARCH_RISCV32)) || \
defined(__clang__)) defined(__clang__))
#define V8_NOEXCEPT noexcept #define V8_NOEXCEPT noexcept
#else #else

View File

@ -20,7 +20,7 @@
#if V8_OS_QNX #if V8_OS_QNX
#include <sys/syspage.h> // cpuinfo #include <sys/syspage.h> // cpuinfo
#endif #endif
#if V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) #if V8_OS_LINUX && V8_HOST_ARCH_PPC64
#include <elf.h> #include <elf.h>
#endif #endif
#if V8_OS_AIX #if V8_OS_AIX
@ -54,8 +54,6 @@
#include "src/base/platform/wrappers.h" #include "src/base/platform/wrappers.h"
#if V8_OS_WIN #if V8_OS_WIN
#include <windows.h> #include <windows.h>
#include "src/base/win32-headers.h"
#endif #endif
namespace v8 { namespace v8 {
@ -430,6 +428,7 @@ CPU::CPU()
has_avx_(false), has_avx_(false),
has_avx2_(false), has_avx2_(false),
has_avx_vnni_(false), has_avx_vnni_(false),
has_avx_vnni_int8_(false),
has_fma3_(false), has_fma3_(false),
has_f16c_(false), has_f16c_(false),
has_bmi1_(false), has_bmi1_(false),
@ -512,6 +511,7 @@ CPU::CPU()
has_avx_ = (cpu_info[2] & 0x10000000) != 0; has_avx_ = (cpu_info[2] & 0x10000000) != 0;
has_avx2_ = (cpu_info70[1] & 0x00000020) != 0; has_avx2_ = (cpu_info70[1] & 0x00000020) != 0;
has_avx_vnni_ = (cpu_info71[0] & 0x00000010) != 0; has_avx_vnni_ = (cpu_info71[0] & 0x00000010) != 0;
has_avx_vnni_int8_ = (cpu_info71[3] & 0x00000020) != 0;
has_fma3_ = (cpu_info[2] & 0x00001000) != 0; has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
has_f16c_ = (cpu_info[2] & 0x20000000) != 0; has_f16c_ = (cpu_info[2] & 0x20000000) != 0;
// CET shadow stack feature flag. See // CET shadow stack feature flag. See
@ -900,7 +900,7 @@ CPU::CPU()
#endif // V8_OS_IOS #endif // V8_OS_IOS
#endif // V8_OS_WIN #endif // V8_OS_WIN
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 #elif V8_HOST_ARCH_PPC64
#ifndef USE_SIMULATOR #ifndef USE_SIMULATOR
#if V8_OS_LINUX #if V8_OS_LINUX
@ -908,11 +908,7 @@ CPU::CPU()
char* auxv_cpu_type = nullptr; char* auxv_cpu_type = nullptr;
FILE* fp = base::Fopen("/proc/self/auxv", "r"); FILE* fp = base::Fopen("/proc/self/auxv", "r");
if (fp != nullptr) { if (fp != nullptr) {
#if V8_TARGET_ARCH_PPC64
Elf64_auxv_t entry; Elf64_auxv_t entry;
#else
Elf32_auxv_t entry;
#endif
for (;;) { for (;;) {
size_t n = fread(&entry, sizeof(entry), 1, fp); size_t n = fread(&entry, sizeof(entry), 1, fp);
if (n == 0 || entry.a_type == AT_NULL) { if (n == 0 || entry.a_type == AT_NULL) {
@ -941,18 +937,6 @@ CPU::CPU()
part_ = kPPCPower9; part_ = kPPCPower9;
} else if (strcmp(auxv_cpu_type, "power8") == 0) { } else if (strcmp(auxv_cpu_type, "power8") == 0) {
part_ = kPPCPower8; part_ = kPPCPower8;
} else if (strcmp(auxv_cpu_type, "power7") == 0) {
part_ = kPPCPower7;
} else if (strcmp(auxv_cpu_type, "power6") == 0) {
part_ = kPPCPower6;
} else if (strcmp(auxv_cpu_type, "power5") == 0) {
part_ = kPPCPower5;
} else if (strcmp(auxv_cpu_type, "ppc970") == 0) {
part_ = kPPCG5;
} else if (strcmp(auxv_cpu_type, "ppc7450") == 0) {
part_ = kPPCG4;
} else if (strcmp(auxv_cpu_type, "pa6t") == 0) {
part_ = kPPCPA6T;
} }
} }
@ -967,15 +951,6 @@ CPU::CPU()
case POWER_8: case POWER_8:
part_ = kPPCPower8; part_ = kPPCPower8;
break; break;
case POWER_7:
part_ = kPPCPower7;
break;
case POWER_6:
part_ = kPPCPower6;
break;
case POWER_5:
part_ = kPPCPower5;
break;
} }
#endif // V8_OS_AIX #endif // V8_OS_AIX
#endif // !USE_SIMULATOR #endif // !USE_SIMULATOR

View File

@ -64,17 +64,7 @@ class V8_BASE_EXPORT CPU final {
static const int kNvidiaDenverV10 = 0x002; static const int kNvidiaDenverV10 = 0x002;
// PPC-specific part codes // PPC-specific part codes
enum { enum { kPPCPower8, kPPCPower9, kPPCPower10 };
kPPCPower5,
kPPCPower6,
kPPCPower7,
kPPCPower8,
kPPCPower9,
kPPCPower10,
kPPCG4,
kPPCG5,
kPPCPA6T
};
// General features // General features
bool has_fpu() const { return has_fpu_; } bool has_fpu() const { return has_fpu_; }
@ -96,6 +86,7 @@ class V8_BASE_EXPORT CPU final {
bool has_avx() const { return has_avx_; } bool has_avx() const { return has_avx_; }
bool has_avx2() const { return has_avx2_; } bool has_avx2() const { return has_avx2_; }
bool has_avx_vnni() const { return has_avx_vnni_; } bool has_avx_vnni() const { return has_avx_vnni_; }
bool has_avx_vnni_int8() const { return has_avx_vnni_int8_; }
bool has_fma3() const { return has_fma3_; } bool has_fma3() const { return has_fma3_; }
bool has_f16c() const { return has_f16c_; } bool has_f16c() const { return has_f16c_; }
bool has_bmi1() const { return has_bmi1_; } bool has_bmi1() const { return has_bmi1_; }
@ -180,6 +171,7 @@ class V8_BASE_EXPORT CPU final {
bool has_avx_; bool has_avx_;
bool has_avx2_; bool has_avx2_;
bool has_avx_vnni_; bool has_avx_vnni_;
bool has_avx_vnni_int8_;
bool has_fma3_; bool has_fma3_;
bool has_f16c_; bool has_f16c_;
bool has_bmi1_; bool has_bmi1_;

View File

@ -36,7 +36,7 @@ V8_BASE_EXPORT bool EnableInProcessStackDumping();
V8_BASE_EXPORT void DisableSignalStackDump(); V8_BASE_EXPORT void DisableSignalStackDump();
// A stacktrace can be helpful in debugging. For example, you can include a // A stacktrace can be helpful in debugging. For example, you can include a
// stacktrace member in a object (probably around #ifndef NDEBUG) so that you // stacktrace member in an object (probably around #ifndef NDEBUG) so that you
// can later see where the given object was created from. // can later see where the given object was created from.
class V8_BASE_EXPORT StackTrace { class V8_BASE_EXPORT StackTrace {
public: public:

View File

@ -707,7 +707,7 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) {
* Algorithm * Algorithm
* 1. Since tan(-x) = -tan(x), we need only to consider positive x. * 1. Since tan(-x) = -tan(x), we need only to consider positive x.
* 2. if x < 2^-28 (hx<0x3E300000 0), return x with inexact if x!=0. * 2. if x < 2^-28 (hx<0x3E300000 0), return x with inexact if x!=0.
* 3. tan(x) is approximated by a odd polynomial of degree 27 on * 3. tan(x) is approximated by an odd polynomial of degree 27 on
* [0,0.67434] * [0,0.67434]
* 3 27 * 3 27
* tan(x) ~ x + T1*x + ... + T13*x * tan(x) ~ x + T1*x + ... + T13*x

View File

@ -139,6 +139,22 @@ auto IterateWithoutLast(const iterator_range<T>& t) {
return IterateWithoutLast(range_copy); return IterateWithoutLast(range_copy);
} }
// {IterateWithoutFirst} returns a container adapter usable in a range-based
// "for" statement for iterating all elements without the first in a forward
// order. It performs a check whether the container is empty.
template <typename T>
auto IterateWithoutFirst(T& t) {
DCHECK_NE(std::begin(t), std::end(t));
auto new_begin = std::begin(t);
return make_iterator_range(++new_begin, std::end(t));
}
template <typename T>
auto IterateWithoutFirst(const iterator_range<T>& t) {
iterator_range<T> range_copy = {t.begin(), t.end()};
return IterateWithoutFirst(range_copy);
}
// TupleIterator is an iterator wrapping around multiple iterators. It is use by // TupleIterator is an iterator wrapping around multiple iterators. It is use by
// the `zip` function below to iterate over multiple containers at once. // the `zip` function below to iterate over multiple containers at once.
template <class... Iterators> template <class... Iterators>

View File

@ -423,9 +423,9 @@ bool is_inbounds(float_t v) {
// Setup for Windows shared library export. // Setup for Windows shared library export.
#define V8_EXPORT_ENUM #define V8_EXPORT_ENUM
#ifdef BUILDING_V8_SHARED_PRIVATE #ifdef BUILDING_V8_SHARED_PRIVATE
#define V8_EXPORT_PRIVATE #define V8_EXPORT_PRIVATE __declspec(dllexport)
#elif USING_V8_SHARED_PRIVATE #elif USING_V8_SHARED_PRIVATE
#define V8_EXPORT_PRIVATE #define V8_EXPORT_PRIVATE __declspec(dllimport)
#else #else
#define V8_EXPORT_PRIVATE #define V8_EXPORT_PRIVATE
#endif // BUILDING_V8_SHARED #endif // BUILDING_V8_SHARED
@ -435,8 +435,8 @@ bool is_inbounds(float_t v) {
// Setup for Linux shared library export. // Setup for Linux shared library export.
#if V8_HAS_ATTRIBUTE_VISIBILITY #if V8_HAS_ATTRIBUTE_VISIBILITY
#ifdef BUILDING_V8_SHARED_PRIVATE #ifdef BUILDING_V8_SHARED_PRIVATE
#define V8_EXPORT_PRIVATE #define V8_EXPORT_PRIVATE __attribute__((visibility("default")))
#define V8_EXPORT_ENUM #define V8_EXPORT_ENUM V8_EXPORT_PRIVATE
#else #else
#define V8_EXPORT_PRIVATE #define V8_EXPORT_PRIVATE
#define V8_EXPORT_ENUM #define V8_EXPORT_ENUM

View File

@ -162,5 +162,9 @@ bool PageAllocator::DecommitPages(void* address, size_t size) {
return base::OS::DecommitPages(address, size); return base::OS::DecommitPages(address, size);
} }
bool PageAllocator::SealPages(void* address, size_t size) {
return base::OS::SealPages(address, size);
}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8

View File

@ -52,6 +52,8 @@ class V8_BASE_EXPORT PageAllocator
bool DecommitPages(void* address, size_t size) override; bool DecommitPages(void* address, size_t size) override;
bool SealPages(void* address, size_t size) override;
private: private:
friend class v8::base::SharedMemory; friend class v8::base::SharedMemory;

View File

@ -19,13 +19,15 @@
#if V8_OS_DARWIN #if V8_OS_DARWIN
#include <malloc/malloc.h> #include <malloc/malloc.h>
#elif V8_OS_OPENBSD
#include <sys/malloc.h>
#elif V8_OS_ZOS #elif V8_OS_ZOS
#include <stdlib.h> #include <stdlib.h>
#else #else
#include <malloc.h> #include <malloc.h>
#endif #endif
#if (V8_OS_POSIX && !V8_OS_AIX && !V8_OS_SOLARIS && !V8_OS_ZOS) || V8_OS_WIN #if (V8_OS_POSIX && !V8_OS_AIX && !V8_OS_SOLARIS && !V8_OS_ZOS && !V8_OS_OPENBSD) || V8_OS_WIN
#define V8_HAS_MALLOC_USABLE_SIZE 1 #define V8_HAS_MALLOC_USABLE_SIZE 1
#endif #endif

View File

@ -9,10 +9,10 @@
#include <pthread.h> #include <pthread.h>
#include <semaphore.h> #include <semaphore.h>
#include <stdarg.h> #include <stdarg.h>
#include <strings.h> // index #include <strings.h> // index
#include <sys/mman.h> // mmap & munmap #include <sys/mman.h> // mmap & munmap
#include <sys/time.h> #include <sys/time.h>
#include <unistd.h> // sysconf #include <unistd.h> // sysconf
#include <cmath> #include <cmath>
@ -76,7 +76,7 @@ class CygwinTimezoneCache : public PosixTimezoneCache {
const char* CygwinTimezoneCache::LocalTimezone(double time) { const char* CygwinTimezoneCache::LocalTimezone(double time) {
if (std::isnan(time)) return ""; if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); time_t tv = static_cast<time_t>(std::floor(time / msPerSecond));
struct tm tm; struct tm tm;
struct tm* t = localtime_r(&tv, &tm); struct tm* t = localtime_r(&tv, &tm);
if (nullptr == t) return ""; if (nullptr == t) return "";
@ -204,6 +204,9 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ptr; return ptr;
} }
// static
bool OS::SealPages(void* address, size_t size) { return false; }
// static // static
bool OS::HasLazyCommits() { bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform. // TODO(alph): implement for the platform.
@ -252,8 +255,8 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
lib_name[strlen(lib_name) - 1] = '\0'; lib_name[strlen(lib_name) - 1] = '\0';
} else { } else {
// No library name found, just record the raw address range. // No library name found, just record the raw address range.
snprintf(lib_name, kLibNameLen, snprintf(lib_name, kLibNameLen, "%08" V8PRIxPTR "-%08" V8PRIxPTR, start,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); end);
} }
result.push_back(SharedLibraryAddress(lib_name, start, end)); result.push_back(SharedLibraryAddress(lib_name, start, end));
} else { } else {

View File

@ -320,6 +320,9 @@ bool OS::DecommitPages(void* address, size_t size) {
DiscardSystemPages(address, size); DiscardSystemPages(address, size);
} }
// static
bool OS::SealPages(void* address, size_t size) { return false; }
// static // static
bool OS::CanReserveAddressSpace() { return true; } bool OS::CanReserveAddressSpace() { return true; }

View File

@ -55,6 +55,8 @@
#if V8_OS_DARWIN #if V8_OS_DARWIN
#include <mach/mach.h> #include <mach/mach.h>
#include <malloc/malloc.h> #include <malloc/malloc.h>
#elif V8_OS_OPENBSD
#include <sys/malloc.h>
#elif !V8_OS_ZOS #elif !V8_OS_ZOS
#include <malloc.h> #include <malloc.h>
#endif #endif
@ -336,21 +338,27 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= 0x007fffff0000ULL; raw_addr &= 0x007fffff0000ULL;
raw_addr += 0x7e8000000000ULL; raw_addr += 0x7e8000000000ULL;
#else #else
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate // Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of // the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request. // fulfilling our placement request.
raw_addr &= uint64_t{0x3FFFFFFFF000}; raw_addr &= uint64_t{0x3FFFFFFFF000};
#elif V8_TARGET_ARCH_ARM64
#if defined(V8_TARGET_OS_LINUX) || defined(V8_TARGET_OS_ANDROID)
// On Linux, the default virtual address space is limited to 39 bits when
// using 4KB pages, see arch/arm64/Kconfig. We truncate to 38 bits.
raw_addr &= uint64_t{0x3FFFFFF000};
#else
// On macOS and elsewhere, we use 46 bits, same as on x64.
raw_addr &= uint64_t{0x3FFFFFFFF000};
#endif
#elif V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX #if V8_OS_AIX
// AIX: 64 bits of virtual addressing, but we limit address range to: // AIX: 64 bits of virtual addressing, but we limit address range to minimize
// a) minimize Segment Lookaside Buffer (SLB) misses and // Segment Lookaside Buffer (SLB) misses.
raw_addr &= uint64_t{0x3FFFF000}; raw_addr &= uint64_t{0x3FFFF000};
// Use extra address space to isolate the mmap regions. // Use extra address space to isolate the mmap regions.
raw_addr += uint64_t{0x400000000000}; raw_addr += uint64_t{0x400000000000};
#elif V8_TARGET_BIG_ENDIAN
// Big-endian Linux: 42 bits of virtual addressing.
raw_addr &= uint64_t{0x03FFFFFFF000};
#else #else
// Little-endian Linux: 46 bits of virtual addressing. // Little-endian Linux: 46 bits of virtual addressing.
raw_addr &= uint64_t{0x3FFFFFFF0000}; raw_addr &= uint64_t{0x3FFFFFFF0000};
@ -613,6 +621,20 @@ bool OS::DecommitPages(void* address, size_t size) {
#endif // !defined(_AIX) #endif // !defined(_AIX)
#endif // !V8_OS_ZOS #endif // !V8_OS_ZOS
// static
bool OS::SealPages(void* address, size_t size) {
#ifdef V8_ENABLE_MEMORY_SEALING
#if V8_OS_LINUX && defined(__NR_mseal)
long ret = syscall(__NR_mseal, address, size, 0);
return ret == 0;
#else
return false;
#endif
#else // V8_ENABLE_MEMORY_SEALING
return false;
#endif
}
// static // static
bool OS::CanReserveAddressSpace() { return true; } bool OS::CanReserveAddressSpace() { return true; }
@ -729,7 +751,7 @@ void OS::DebugBreak() {
asm("break"); asm("break");
#elif V8_HOST_ARCH_LOONG64 #elif V8_HOST_ARCH_LOONG64
asm("break 0"); asm("break 0");
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 #elif V8_HOST_ARCH_PPC64
asm("twge 2,2"); asm("twge 2,2");
#elif V8_HOST_ARCH_IA32 #elif V8_HOST_ARCH_IA32
asm("int $3"); asm("int $3");
@ -1349,6 +1371,15 @@ bool MainThreadIsCurrentThread() {
Stack::StackSlot Stack::ObtainCurrentThreadStackStart() { Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
#if V8_OS_ZOS #if V8_OS_ZOS
return __get_stack_start(); return __get_stack_start();
#elif V8_OS_OPENBSD
stack_t stack;
int error = pthread_stackseg_np(pthread_self(), &stack);
if(error) {
DCHECK(MainThreadIsCurrentThread());
return nullptr;
}
void* stack_start = reinterpret_cast<uint8_t*>(stack.ss_sp) + stack.ss_size;
return stack_start;
#else #else
pthread_attr_t attr; pthread_attr_t attr;
int error = pthread_getattr_np(pthread_self(), &attr); int error = pthread_getattr_np(pthread_self(), &attr);

View File

@ -4,6 +4,8 @@
// Platform-specific code for Win32. // Platform-specific code for Win32.
#include "src/base/platform/platform-win32.h"
// Secure API functions are not available using MinGW with msvcrt.dll // Secure API functions are not available using MinGW with msvcrt.dll
// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to // on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
// disable definition of secure API functions in standard headers that // disable definition of secure API functions in standard headers that
@ -32,12 +34,10 @@
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/lazy-instance.h" #include "src/base/lazy-instance.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/platform-win32.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/base/platform/time.h" #include "src/base/platform/time.h"
#include "src/base/timezone-cache.h" #include "src/base/timezone-cache.h"
#include "src/base/utils/random-number-generator.h" #include "src/base/utils/random-number-generator.h"
#include "src/base/win32-headers.h"
#if defined(_MSC_VER) #if defined(_MSC_VER)
#include <crtdbg.h> #include <crtdbg.h>
@ -980,7 +980,7 @@ void* AllocateInternal(void* hint, size_t size, size_t alignment,
void CheckIsOOMError(int error) { void CheckIsOOMError(int error) {
// We expect one of ERROR_NOT_ENOUGH_MEMORY or ERROR_COMMITMENT_LIMIT. We'd // We expect one of ERROR_NOT_ENOUGH_MEMORY or ERROR_COMMITMENT_LIMIT. We'd
// still like to get the actual error code when its not one of the expected // still like to get the actual error code when it's not one of the expected
// errors, so use the construct below to achieve that. // errors, so use the construct below to achieve that.
if (error != ERROR_NOT_ENOUGH_MEMORY) CHECK_EQ(ERROR_COMMITMENT_LIMIT, error); if (error != ERROR_NOT_ENOUGH_MEMORY) CHECK_EQ(ERROR_COMMITMENT_LIMIT, error);
} }
@ -1121,6 +1121,9 @@ bool OS::DecommitPages(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0; return VirtualFree(address, size, MEM_DECOMMIT) != 0;
} }
// static
bool OS::SealPages(void* address, size_t size) { return false; }
// static // static
bool OS::CanReserveAddressSpace() { bool OS::CanReserveAddressSpace() {
return VirtualAlloc2 != nullptr && MapViewOfFile3 != nullptr && return VirtualAlloc2 != nullptr && MapViewOfFile3 != nullptr &&

View File

@ -64,6 +64,12 @@ extern "C" unsigned long __readfsdword(unsigned long); // NOLINT(runtime/int)
#endif // V8_CC_MSVC && V8_HOST_ARCH_IA32 #endif // V8_CC_MSVC && V8_HOST_ARCH_IA32
#endif // V8_NO_FAST_TLS #endif // V8_NO_FAST_TLS
#if V8_OS_OPENBSD
#define PERMISSION_MUTABLE_SECTION __attribute__((section(".openbsd.mutable")))
#else
#define PERMISSION_MUTABLE_SECTION
#endif
namespace heap::base { namespace heap::base {
class Stack; class Stack;
} }
@ -402,6 +408,8 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size); V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size);
V8_WARN_UNUSED_RESULT static bool SealPages(void* address, size_t size);
V8_WARN_UNUSED_RESULT static bool CanReserveAddressSpace(); V8_WARN_UNUSED_RESULT static bool CanReserveAddressSpace();
V8_WARN_UNUSED_RESULT static std::optional<AddressSpaceReservation> V8_WARN_UNUSED_RESULT static std::optional<AddressSpaceReservation>

View File

@ -38,7 +38,6 @@
#include <atomic> #include <atomic>
#include "src/base/lazy-instance.h" #include "src/base/lazy-instance.h"
#include "src/base/win32-headers.h"
#endif #endif
#include "src/base/cpu.h" #include "src/base/cpu.h"
#include "src/base/logging.h" #include "src/base/logging.h"
@ -888,8 +887,8 @@ double ThreadTicks::TSCTicksPerSecond() {
static const uint64_t tsc_initial = __rdtsc(); static const uint64_t tsc_initial = __rdtsc();
static const uint64_t perf_counter_initial = QPCNowRaw(); static const uint64_t perf_counter_initial = QPCNowRaw();
// Make a another reading of the TSC and the performance counter every time // Make another reading of the TSC and the performance counter every time
// that this function is called. // this function is called.
uint64_t tsc_now = __rdtsc(); uint64_t tsc_now = __rdtsc();
uint64_t perf_counter_now = QPCNowRaw(); uint64_t perf_counter_now = QPCNowRaw();

View File

@ -24,8 +24,6 @@
#include "src/base/macros.h" #include "src/base/macros.h"
#if V8_OS_WIN #if V8_OS_WIN
#include <windows.h> #include <windows.h>
#include "src/base/win32-headers.h"
#endif #endif
#if V8_OS_STARBOARD #if V8_OS_STARBOARD

View File

@ -73,5 +73,9 @@ bool VirtualAddressSpacePageAllocator::DecommitPages(void* address,
return vas_->DecommitPages(reinterpret_cast<Address>(address), size); return vas_->DecommitPages(reinterpret_cast<Address>(address), size);
} }
bool VirtualAddressSpacePageAllocator::SealPages(void* address, size_t size) {
return false;
}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8

View File

@ -55,6 +55,8 @@ class V8_BASE_EXPORT VirtualAddressSpacePageAllocator
bool DecommitPages(void* address, size_t size) override; bool DecommitPages(void* address, size_t size) override;
bool SealPages(void* address, size_t size) override;
private: private:
// Client of this class must keep the VirtualAddressSpace alive during the // Client of this class must keep the VirtualAddressSpace alive during the
// lifetime of this instance. // lifetime of this instance.

View File

@ -60,6 +60,14 @@ namespace v8 {
namespace internal { namespace internal {
namespace baseline { namespace baseline {
#define __ basm_.
#define RCS_BASELINE_SCOPE(rcs) \
RCS_SCOPE(stats_, \
local_isolate_->is_main_thread() \
? RuntimeCallCounterId::kCompileBaseline##rcs \
: RuntimeCallCounterId::kCompileBackgroundBaseline##rcs)
template <typename IsolateT> template <typename IsolateT>
Handle<TrustedByteArray> BytecodeOffsetTableBuilder::ToBytecodeOffsetTable( Handle<TrustedByteArray> BytecodeOffsetTableBuilder::ToBytecodeOffsetTable(
IsolateT* isolate) { IsolateT* isolate) {
@ -294,14 +302,6 @@ BaselineCompiler::BaselineCompiler(
base::bits::RoundUpToPowerOfTwo(16 + bytecode_->Size() / 4)); base::bits::RoundUpToPowerOfTwo(16 + bytecode_->Size() / 4));
} }
#define __ basm_.
#define RCS_BASELINE_SCOPE(rcs) \
RCS_SCOPE(stats_, \
local_isolate_->is_main_thread() \
? RuntimeCallCounterId::kCompileBaseline##rcs \
: RuntimeCallCounterId::kCompileBackgroundBaseline##rcs)
void BaselineCompiler::GenerateCode() { void BaselineCompiler::GenerateCode() {
{ {
RCS_BASELINE_SCOPE(PreVisit); RCS_BASELINE_SCOPE(PreVisit);
@ -521,7 +521,7 @@ void BaselineCompiler::VisitSingleBytecode() {
case interpreter::Bytecode::k##name: \ case interpreter::Bytecode::k##name: \
Visit##name(); \ Visit##name(); \
break; break;
BYTECODE_LIST(BYTECODE_CASE) BYTECODE_LIST(BYTECODE_CASE, BYTECODE_CASE)
#undef BYTECODE_CASE #undef BYTECODE_CASE
} }
} }
@ -570,7 +570,7 @@ void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) {
#endif #endif
#define DECLARE_VISITOR(name, ...) void Visit##name(); #define DECLARE_VISITOR(name, ...) void Visit##name();
BYTECODE_LIST(DECLARE_VISITOR) BYTECODE_LIST(DECLARE_VISITOR, DECLARE_VISITOR)
#undef DECLARE_VISITOR #undef DECLARE_VISITOR
#define DECLARE_VISITOR(name, ...) \ #define DECLARE_VISITOR(name, ...) \
@ -2413,6 +2413,9 @@ SaveAccumulatorScope::~SaveAccumulatorScope() {
assembler_->Pop(kInterpreterAccumulatorRegister); assembler_->Pop(kInterpreterAccumulatorRegister);
} }
#undef RCS_BASELINE_SCOPE
#undef __
} // namespace baseline } // namespace baseline
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -147,7 +147,7 @@ class BaselineCompiler {
// Single bytecode visitors. // Single bytecode visitors.
#define DECLARE_VISITOR(name, ...) void Visit##name(); #define DECLARE_VISITOR(name, ...) void Visit##name();
BYTECODE_LIST(DECLARE_VISITOR) BYTECODE_LIST(DECLARE_VISITOR, DECLARE_VISITOR)
#undef DECLARE_VISITOR #undef DECLARE_VISITOR
// Intrinsic call visitors. // Intrinsic call visitors.

View File

@ -5,8 +5,12 @@
specific_include_rules = { specific_include_rules = {
"setup-builtins-internal.cc": [ "setup-builtins-internal.cc": [
"+src/compiler/pipeline.h", "+src/compiler/pipeline.h",
"+src/compiler/turboshaft/builtin-compiler.h",
"+src/compiler/turboshaft/phase.h", "+src/compiler/turboshaft/phase.h",
], ],
"number-builtins-reducer-inl.h": [
"+src/compiler",
],
".*-tsa.cc": [ ".*-tsa.cc": [
"+src/compiler", "+src/compiler",
], ],

View File

@ -1935,8 +1935,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code; Label jump_to_optimized_code;
{ {
// If maybe_target_code is not null, no need to call into runtime. A // If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a InstructionStream object, // precondition here is: if maybe_target_code is an InstructionStream
// it must NOT be marked_for_deoptimization (callers must ensure this). // object, it must NOT be marked_for_deoptimization (callers must ensure
// this).
__ cmp(maybe_target_code, Operand(Smi::zero())); __ cmp(maybe_target_code, Operand(Smi::zero()));
__ b(ne, &jump_to_optimized_code); __ b(ne, &jump_to_optimized_code);
} }
@ -2802,10 +2803,10 @@ struct SaveWasmParamsScope {
// This builtin creates the following stack frame: // This builtin creates the following stack frame:
// //
// [ feedback vector ] <-- sp // Added by this builtin. // [ feedback vector ] <-- sp // Added by this builtin.
// [ Wasm instance ] // Added by this builtin. // [ Wasm instance data ] // Added by this builtin.
// [ WASM frame marker ] // Already there on entry. // [ WASM frame marker ] // Already there on entry.
// [ saved fp ] <-- fp // Already there on entry. // [ saved fp ] <-- fp // Already there on entry.
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register func_index = wasm::kLiftoffFrameSetupFunctionReg; Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
Register vector = r5; Register vector = r5;
@ -2813,13 +2814,13 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Label allocate_vector, done; Label allocate_vector, done;
__ ldr(vector, __ ldr(vector,
FieldMemOperand(kWasmInstanceRegister, FieldMemOperand(kWasmImplicitArgRegister,
WasmTrustedInstanceData::kFeedbackVectorsOffset)); WasmTrustedInstanceData::kFeedbackVectorsOffset));
__ add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2)); __ add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
__ ldr(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); __ ldr(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
__ JumpIfSmi(vector, &allocate_vector); __ JumpIfSmi(vector, &allocate_vector);
__ bind(&done); __ bind(&done);
__ push(kWasmInstanceRegister); __ push(kWasmImplicitArgRegister);
__ push(vector); __ push(vector);
__ Ret(); __ Ret();
@ -2833,8 +2834,8 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
__ str(scratch, MemOperand(sp)); __ str(scratch, MemOperand(sp));
{ {
SaveWasmParamsScope save_params(masm); SaveWasmParamsScope save_params(masm);
// Arguments to the runtime function: instance, func_index. // Arguments to the runtime function: instance data, func_index.
__ push(kWasmInstanceRegister); __ push(kWasmImplicitArgRegister);
__ SmiTag(func_index); __ SmiTag(func_index);
__ push(func_index); __ push(func_index);
// Allocate a stack slot where the runtime function can spill a pointer // Allocate a stack slot where the runtime function can spill a pointer
@ -2861,8 +2862,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{ {
SaveWasmParamsScope save_params(masm); SaveWasmParamsScope save_params(masm);
// Push the Wasm instance as an explicit argument to the runtime function. // Push the instance data as an explicit argument to the runtime function.
__ push(kWasmInstanceRegister); __ push(kWasmImplicitArgRegister);
// Push the function index as second argument. // Push the function index as second argument.
__ push(kWasmCompileLazyFuncIndexRegister); __ push(kWasmCompileLazyFuncIndexRegister);
// Initialize the JavaScript context with 0. CEntry will use it to // Initialize the JavaScript context with 0. CEntry will use it to
@ -2876,9 +2877,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Saved parameters are restored at the end of this block. // Saved parameters are restored at the end of this block.
} }
// After the instance register has been restored, we can add the jump table // After the instance data register has been restored, we can add the jump
// start to the jump table offset already stored in r8. // table start to the jump table offset already stored in r8.
__ ldr(r9, FieldMemOperand(kWasmInstanceRegister, __ ldr(r9, FieldMemOperand(kWasmImplicitArgRegister,
WasmTrustedInstanceData::kJumpTableStartOffset)); WasmTrustedInstanceData::kJumpTableStartOffset));
__ add(r8, r8, r9); __ add(r8, r8, r9);
} }
@ -3107,7 +3108,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) {
__ Zero(MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset), __ Zero(MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset),
MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
} }
// TODO(irezvov): Consolidate with arm64 RegisterAllocator. // TODO(irezvov): Consolidate with arm64 RegisterAllocator.
@ -3241,19 +3242,21 @@ class RegisterAllocator {
#define FREE_REG(Name) regs.Free(&Name); #define FREE_REG(Name) regs.Free(&Name);
// Loads the context field of the WasmTrustedInstanceData or WasmImportData // Loads the context field of the WasmTrustedInstanceData or WasmImportData
// depending on the ref's type, and places the result in the input register. // depending on the data's type, and places the result in the input register.
void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { void GetContextFromImplicitArg(MacroAssembler* masm, Register data,
__ LoadTaggedField(scratch, FieldMemOperand(ref, HeapObject::kMapOffset)); Register scratch) {
__ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset));
__ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE); __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE);
Label instance; Label instance;
Label end; Label end;
__ b(eq, &instance); __ b(eq, &instance);
__ LoadTaggedField( __ LoadTaggedField(
ref, FieldMemOperand(ref, WasmImportData::kNativeContextOffset)); data, FieldMemOperand(data, WasmImportData::kNativeContextOffset));
__ jmp(&end); __ jmp(&end);
__ bind(&instance); __ bind(&instance);
__ LoadTaggedField( __ LoadTaggedField(
ref, FieldMemOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); data,
FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset));
__ bind(&end); __ bind(&end);
} }
@ -3274,14 +3277,7 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) {
// arbitrarily. // arbitrarily.
__ Push(r6, wasm::kGpParamRegisters[3], wasm::kGpParamRegisters[2], __ Push(r6, wasm::kGpParamRegisters[3], wasm::kGpParamRegisters[2],
wasm::kGpParamRegisters[1]); wasm::kGpParamRegisters[1]);
// Reserve fixed slots for the CSA wrapper. // Reserve a slot for the signature.
// Two slots for stack-switching (central stack pointer and secondary stack
// limit):
Register scratch = r1;
__ mov(scratch, Operand::Zero());
__ Push(scratch);
__ Push(scratch);
// One slot for the signature:
__ Push(r0); __ Push(r0);
__ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA);
} }
@ -3649,8 +3645,8 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs,
promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
__ ldr(kContextRegister, __ ldr(kContextRegister,
MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
GetContextFromRef(masm, kContextRegister, tmp); GetContextFromImplicitArg(masm, kContextRegister, tmp);
ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp, ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp,
tmp2, tmp3); tmp2, tmp3);
@ -3698,8 +3694,8 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
DEFINE_SCOPED(tmp2); DEFINE_SCOPED(tmp2);
DEFINE_SCOPED(tmp3); DEFINE_SCOPED(tmp3);
__ ldr(kContextRegister, __ ldr(kContextRegister,
MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
GetContextFromRef(masm, kContextRegister, tmp); GetContextFromImplicitArg(masm, kContextRegister, tmp);
ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2, ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2,
tmp3); tmp3);
RestoreParentSuspender(masm, tmp, tmp2); RestoreParentSuspender(masm, tmp, tmp2);
@ -3727,8 +3723,10 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
__ AllocateStackSpace(StackSwitchFrameConstants::kNumSpillSlots * __ AllocateStackSpace(StackSwitchFrameConstants::kNumSpillSlots *
kSystemPointerSize); kSystemPointerSize);
DEFINE_PINNED(ref, kWasmInstanceRegister); // Load the implicit argument (instance data or import data) from the frame.
__ ldr(ref, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); DEFINE_PINNED(implicit_arg, kWasmImplicitArgRegister);
__ ldr(implicit_arg,
MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
DEFINE_PINNED(wrapper_buffer, DEFINE_PINNED(wrapper_buffer,
WasmJSToWasmWrapperDescriptor::WrapperBufferRegister()); WasmJSToWasmWrapperDescriptor::WrapperBufferRegister());
@ -3737,20 +3735,22 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
Register original_fp = no_reg; Register original_fp = no_reg;
Register new_wrapper_buffer = no_reg; Register new_wrapper_buffer = no_reg;
if (stack_switch) { if (stack_switch) {
SwitchToAllocatedStack(masm, regs, ref, wrapper_buffer, original_fp, SwitchToAllocatedStack(masm, regs, implicit_arg, wrapper_buffer,
new_wrapper_buffer, &suspend); original_fp, new_wrapper_buffer, &suspend);
} else { } else {
original_fp = fp; original_fp = fp;
new_wrapper_buffer = wrapper_buffer; new_wrapper_buffer = wrapper_buffer;
} }
regs.ResetExcept(original_fp, wrapper_buffer, ref, new_wrapper_buffer); regs.ResetExcept(original_fp, wrapper_buffer, implicit_arg,
new_wrapper_buffer);
{ {
__ str(new_wrapper_buffer, __ str(new_wrapper_buffer,
MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset)); MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset));
if (stack_switch) { if (stack_switch) {
__ str(ref, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); __ str(implicit_arg,
MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
DEFINE_SCOPED(scratch) DEFINE_SCOPED(scratch)
__ ldr( __ ldr(
scratch, scratch,
@ -3777,12 +3777,13 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
if (stack_switch) { if (stack_switch) {
FREE_REG(new_wrapper_buffer) FREE_REG(new_wrapper_buffer)
} }
FREE_REG(ref) FREE_REG(implicit_arg)
for (auto reg : wasm::kGpParamRegisters) { for (auto reg : wasm::kGpParamRegisters) {
regs.Reserve(reg); regs.Reserve(reg);
} }
// The first GP parameter is the instance, which we handle specially. // The first GP parameter holds the trusted instance data or the import data.
// This is handled specially.
int stack_params_offset = int stack_params_offset =
(arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize +
arraysize(wasm::kFpParamRegisters) * kDoubleSize; arraysize(wasm::kFpParamRegisters) * kDoubleSize;
@ -3896,14 +3897,15 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// r2: pointer to the byte buffer which contains all parameters. // r2: pointer to the byte buffer which contains all parameters.
if (stack_switch) { if (stack_switch) {
__ ldr(r1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); __ ldr(r1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
__ ldr(r0, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); __ ldr(r0, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
} else { } else {
__ ldr(r1, MemOperand( __ ldr(r1, MemOperand(
fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset));
__ ldr(r0, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); __ ldr(r0,
MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
} }
Register scratch = r3; Register scratch = r3;
GetContextFromRef(masm, r0, scratch); GetContextFromImplicitArg(masm, r0, scratch);
__ CallBuiltin(Builtin::kJSToWasmHandleReturns); __ CallBuiltin(Builtin::kJSToWasmHandleReturns);
@ -4202,6 +4204,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Jump(scratch); __ Jump(scratch);
} }
#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) {
__ Trap();
}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_DoubleToI(MacroAssembler* masm) { void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label negate, done; Label negate, done;

View File

@ -570,6 +570,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object. // Resume (Ignition/TurboFan) generator object.
{ {
// TODO(40931165): use parameter count from JSDispatchTable and validate
// that it matches the number of values in the JSGeneratorObject.
__ LoadTaggedField( __ LoadTaggedField(
x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset)); x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w0, FieldMemOperand( __ Ldrh(w0, FieldMemOperand(
@ -1519,12 +1521,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing); flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
#ifndef V8_ENABLE_LEAPTIERING
// TODO(olivf, 42204201): This fastcase is difficult to support with the
// sandbox as it requires getting write access to the dispatch table. See
// `JSFunction::UpdateCode`. We might want to remove it for all
// configurations as it does not seem to be performance sensitive.
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Move(x2, kInterpreterBytecodeArrayRegister); __ Move(x2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(x2, closure); __ ReplaceClosureCodeWithOptimizedCode(x2, closure);
__ JumpCodeObject(x2, kJSEntrypointTag); __ JumpCodeObject(x2, kJSEntrypointTag);
#endif // V8_ENABLE_LEAPTIERING
__ bind(&install_baseline_code); __ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
} }
@ -2184,8 +2194,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code; Label jump_to_optimized_code;
{ {
// If maybe_target_code is not null, no need to call into runtime. A // If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a InstructionStream object, // precondition here is: if maybe_target_code is an InstructionStream
// it must NOT be marked_for_deoptimization (callers must ensure this). // object, it must NOT be marked_for_deoptimization (callers must ensure
// this).
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code); __ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code);
} }
@ -2837,9 +2848,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context. // -- cp : the function context.
// ----------------------------------- // -----------------------------------
#ifdef V8_ENABLE_LEAPTIERING
__ InvokeFunctionCode(x1, no_reg, x0, InvokeType::kJump);
#else
__ Ldrh(x2, __ Ldrh(x2,
FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset)); FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump); __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump);
#endif // V8_ENABLE_LEAPTIERING
} }
namespace { namespace {
@ -3194,8 +3209,8 @@ constexpr RegList kSavedGpRegs = ([]() constexpr {
for (Register gp_param_reg : wasm::kGpParamRegisters) { for (Register gp_param_reg : wasm::kGpParamRegisters) {
saved_gp_regs.set(gp_param_reg); saved_gp_regs.set(gp_param_reg);
} }
// The instance has already been stored in the fixed part of the frame. // The instance data has already been stored in the fixed part of the frame.
saved_gp_regs.clear(kWasmInstanceRegister); saved_gp_regs.clear(kWasmImplicitArgRegister);
// All set registers were unique. The instance is skipped. // All set registers were unique. The instance is skipped.
CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1); CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
// We push a multiple of 16 bytes. // We push a multiple of 16 bytes.
@ -3219,19 +3234,19 @@ constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
// When entering this builtin, we have just created a Wasm stack frame: // When entering this builtin, we have just created a Wasm stack frame:
// //
// [ Wasm instance ] <-- sp // [ Wasm instance data ] <-- sp
// [ WASM frame marker ] // [ WASM frame marker ]
// [ saved fp ] <-- fp // [ saved fp ] <-- fp
// //
// Due to stack alignment restrictions, this builtin adds the feedback vector // Due to stack alignment restrictions, this builtin adds the feedback vector
// plus a filler to the stack. The stack pointer will be // plus a filler to the stack. The stack pointer will be
// moved an appropriate distance by {PatchPrepareStackFrame}. // moved an appropriate distance by {PatchPrepareStackFrame}.
// //
// [ (unused) ] <-- sp // [ (unused) ] <-- sp
// [ feedback vector ] // [ feedback vector ]
// [ Wasm instance ] // [ Wasm instance data ]
// [ WASM frame marker ] // [ WASM frame marker ]
// [ saved fp ] <-- fp // [ saved fp ] <-- fp
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register func_index = wasm::kLiftoffFrameSetupFunctionReg; Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
Register vector = x9; Register vector = x9;
@ -3239,7 +3254,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Label allocate_vector, done; Label allocate_vector, done;
__ LoadTaggedField( __ LoadTaggedField(
vector, FieldMemOperand(kWasmInstanceRegister, vector, FieldMemOperand(kWasmImplicitArgRegister,
WasmTrustedInstanceData::kFeedbackVectorsOffset)); WasmTrustedInstanceData::kFeedbackVectorsOffset));
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2)); __ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
@ -3259,11 +3274,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
__ PushQRegList(kSavedFpRegs); __ PushQRegList(kSavedFpRegs);
__ Push<MacroAssembler::kSignLR>(lr, xzr); // xzr is for alignment. __ Push<MacroAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
// Arguments to the runtime function: instance, func_index, and an // Arguments to the runtime function: instance data, func_index, and an
// additional stack slot for the NativeModule. The first pushed register // additional stack slot for the NativeModule. The first pushed register
// is for alignment. {x0} and {x1} are picked arbitrarily. // is for alignment. {x0} and {x1} are picked arbitrarily.
__ SmiTag(func_index); __ SmiTag(func_index);
__ Push(x0, kWasmInstanceRegister, func_index, x1); __ Push(x0, kWasmImplicitArgRegister, func_index, x1);
__ Mov(cp, Smi::zero()); __ Mov(cp, Smi::zero());
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3); __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
__ Mov(vector, kReturnRegister0); __ Mov(vector, kReturnRegister0);
@ -3272,9 +3287,9 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
__ Pop<MacroAssembler::kAuthLR>(xzr, lr); __ Pop<MacroAssembler::kAuthLR>(xzr, lr);
__ PopQRegList(kSavedFpRegs); __ PopQRegList(kSavedFpRegs);
__ PopXRegList(kSavedGpRegs); __ PopXRegList(kSavedGpRegs);
// Restore the instance from the frame. // Restore the instance data from the frame.
__ Ldr(kWasmInstanceRegister, __ Ldr(kWasmImplicitArgRegister,
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM)); __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
__ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
__ B(&done); __ B(&done);
@ -3292,19 +3307,19 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{ {
HardAbortScope hard_abort(masm); // Avoid calls to Abort. HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
// Manually save the instance (which kSavedGpRegs skips because its // Manually save the instance data (which kSavedGpRegs skips because its
// other use puts it into the fixed frame anyway). The stack slot is valid // other use puts it into the fixed frame anyway). The stack slot is valid
// because the {FrameScope} (via {EnterFrame}) always reserves it (for stack // because the {FrameScope} (via {EnterFrame}) always reserves it (for stack
// alignment reasons). The instance is needed because once this builtin is // alignment reasons). The instance is needed because once this builtin is
// done, we'll call a regular Wasm function. // done, we'll call a regular Wasm function.
__ Str(kWasmInstanceRegister, __ Str(kWasmImplicitArgRegister,
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
// Save registers that we need to keep alive across the runtime call. // Save registers that we need to keep alive across the runtime call.
__ PushXRegList(kSavedGpRegs); __ PushXRegList(kSavedGpRegs);
__ PushQRegList(kSavedFpRegs); __ PushQRegList(kSavedFpRegs);
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); __ Push(kWasmImplicitArgRegister, kWasmCompileLazyFuncIndexRegister);
// Initialize the JavaScript context with 0. CEntry will use it to // Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate. // set the current context on the isolate.
__ Mov(cp, Smi::zero()); __ Mov(cp, Smi::zero());
@ -3317,9 +3332,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Restore registers. // Restore registers.
__ PopQRegList(kSavedFpRegs); __ PopQRegList(kSavedFpRegs);
__ PopXRegList(kSavedGpRegs); __ PopXRegList(kSavedGpRegs);
// Restore the instance from the frame. // Restore the instance data from the frame.
__ Ldr(kWasmInstanceRegister, __ Ldr(kWasmImplicitArgRegister,
MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
} }
// The runtime function returned the jump table slot offset as a Smi (now in // The runtime function returned the jump table slot offset as a Smi (now in
@ -3327,7 +3342,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// target, to be compliant with CFI. // target, to be compliant with CFI.
constexpr Register temp = x8; constexpr Register temp = x8;
static_assert(!kSavedGpRegs.has(temp)); static_assert(!kSavedGpRegs.has(temp));
__ ldr(temp, FieldMemOperand(kWasmInstanceRegister, __ ldr(temp, FieldMemOperand(kWasmImplicitArgRegister,
WasmTrustedInstanceData::kJumpTableStartOffset)); WasmTrustedInstanceData::kJumpTableStartOffset));
__ add(x17, temp, Operand(x17)); __ add(x17, temp, Operand(x17));
// Finally, jump to the jump table slot for the function. // Finally, jump to the jump table slot for the function.
@ -3552,7 +3567,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) {
__ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); __ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
__ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); __ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
} }
// TODO(irezvov): Consolidate with arm RegisterAllocator. // TODO(irezvov): Consolidate with arm RegisterAllocator.
@ -3685,19 +3700,21 @@ class RegisterAllocator {
#define FREE_REG(Name) regs.Free(&Name); #define FREE_REG(Name) regs.Free(&Name);
// Loads the context field of the WasmTrustedInstanceData or WasmImportData // Loads the context field of the WasmTrustedInstanceData or WasmImportData
// depending on the ref's type, and places the result in the input register. // depending on the data's type, and places the result in the input register.
void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { void GetContextFromImplicitArg(MacroAssembler* masm, Register data,
__ LoadTaggedField(scratch, FieldMemOperand(ref, HeapObject::kMapOffset)); Register scratch) {
__ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset));
__ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE); __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE);
Label instance; Label instance;
Label end; Label end;
__ B(eq, &instance); __ B(eq, &instance);
__ LoadTaggedField( __ LoadTaggedField(
ref, FieldMemOperand(ref, WasmImportData::kNativeContextOffset)); data, FieldMemOperand(data, WasmImportData::kNativeContextOffset));
__ jmp(&end); __ jmp(&end);
__ bind(&instance); __ bind(&instance);
__ LoadTaggedField( __ LoadTaggedField(
ref, FieldMemOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); data,
FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset));
__ bind(&end); __ bind(&end);
} }
@ -3714,10 +3731,8 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) {
__ Push(wasm::kGpParamRegisters[6], wasm::kGpParamRegisters[5], __ Push(wasm::kGpParamRegisters[6], wasm::kGpParamRegisters[5],
wasm::kGpParamRegisters[4], wasm::kGpParamRegisters[3]); wasm::kGpParamRegisters[4], wasm::kGpParamRegisters[3]);
__ Push(wasm::kGpParamRegisters[2], wasm::kGpParamRegisters[1]); __ Push(wasm::kGpParamRegisters[2], wasm::kGpParamRegisters[1]);
// Push four more slots that will be used as fixed spill slots in the torque // Reserve a slot for the signature, and one for stack alignment.
// wrapper. Two slots for stack-switching (central stack pointer and secondary __ Push(xzr, xzr);
// stack limit), one for the signature, and one for stack alignment.
__ Push(xzr, xzr, xzr, xzr);
__ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA);
} }
@ -4068,8 +4083,8 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs,
promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
__ Ldr(kContextRegister, __ Ldr(kContextRegister,
MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
GetContextFromRef(masm, kContextRegister, tmp); GetContextFromImplicitArg(masm, kContextRegister, tmp);
ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp, ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp,
tmp2, tmp3); tmp2, tmp3);
@ -4114,12 +4129,12 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
__ Ldr(kContextRegister, __ Ldr(kContextRegister,
MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
DEFINE_SCOPED(tmp); DEFINE_SCOPED(tmp);
DEFINE_SCOPED(tmp2); DEFINE_SCOPED(tmp2);
DEFINE_SCOPED(tmp3); DEFINE_SCOPED(tmp3);
GetContextFromRef(masm, kContextRegister, tmp); GetContextFromImplicitArg(masm, kContextRegister, tmp);
ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2, ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2,
tmp3); tmp3);
RestoreParentSuspender(masm, tmp, tmp2); RestoreParentSuspender(masm, tmp, tmp2);
@ -4148,8 +4163,10 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
Immediate(StackSwitchFrameConstants::kNumSpillSlots * Immediate(StackSwitchFrameConstants::kNumSpillSlots *
kSystemPointerSize)); kSystemPointerSize));
DEFINE_PINNED(ref, kWasmInstanceRegister); // Load the implicit argument (instance data or import data) from the frame.
__ Ldr(ref, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); DEFINE_PINNED(implicit_arg, kWasmImplicitArgRegister);
__ Ldr(implicit_arg,
MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
DEFINE_PINNED(wrapper_buffer, DEFINE_PINNED(wrapper_buffer,
WasmJSToWasmWrapperDescriptor::WrapperBufferRegister()); WasmJSToWasmWrapperDescriptor::WrapperBufferRegister());
@ -4158,20 +4175,22 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
Register original_fp = no_reg; Register original_fp = no_reg;
Register new_wrapper_buffer = no_reg; Register new_wrapper_buffer = no_reg;
if (stack_switch) { if (stack_switch) {
SwitchToAllocatedStack(masm, regs, ref, wrapper_buffer, original_fp, SwitchToAllocatedStack(masm, regs, implicit_arg, wrapper_buffer,
new_wrapper_buffer, &suspend); original_fp, new_wrapper_buffer, &suspend);
} else { } else {
original_fp = fp; original_fp = fp;
new_wrapper_buffer = wrapper_buffer; new_wrapper_buffer = wrapper_buffer;
} }
regs.ResetExcept(original_fp, wrapper_buffer, ref, new_wrapper_buffer); regs.ResetExcept(original_fp, wrapper_buffer, implicit_arg,
new_wrapper_buffer);
{ {
__ Str(new_wrapper_buffer, __ Str(new_wrapper_buffer,
MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset)); MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset));
if (stack_switch) { if (stack_switch) {
__ Str(ref, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); __ Str(implicit_arg,
MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
DEFINE_SCOPED(scratch) DEFINE_SCOPED(scratch)
__ Ldr( __ Ldr(
scratch, scratch,
@ -4203,12 +4222,13 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
if (stack_switch) { if (stack_switch) {
FREE_REG(new_wrapper_buffer) FREE_REG(new_wrapper_buffer)
} }
FREE_REG(ref) FREE_REG(implicit_arg)
for (auto reg : wasm::kGpParamRegisters) { for (auto reg : wasm::kGpParamRegisters) {
regs.Reserve(reg); regs.Reserve(reg);
} }
// The first GP parameter is the instance, which we handle specially. // The first GP parameter holds the trusted instance data or the import data.
// This is handled specially.
int stack_params_offset = int stack_params_offset =
(arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize +
arraysize(wasm::kFpParamRegisters) * kDoubleSize; arraysize(wasm::kFpParamRegisters) * kDoubleSize;
@ -4333,14 +4353,15 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// x2: pointer to the byte buffer which contains all parameters. // x2: pointer to the byte buffer which contains all parameters.
if (stack_switch) { if (stack_switch) {
__ Ldr(x1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); __ Ldr(x1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
__ Ldr(x0, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); __ Ldr(x0, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
} else { } else {
__ Ldr(x1, MemOperand( __ Ldr(x1, MemOperand(
fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset));
__ Ldr(x0, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); __ Ldr(x0,
MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
} }
Register scratch = x3; Register scratch = x3;
GetContextFromRef(masm, x0, scratch); GetContextFromImplicitArg(masm, x0, scratch);
__ CallBuiltin(Builtin::kJSToWasmHandleReturns); __ CallBuiltin(Builtin::kJSToWasmHandleReturns);
Label return_promise; Label return_promise;
@ -4668,6 +4689,67 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Br(x17); __ Br(x17);
} }
#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) {
using ER = ExternalReference;
Register frame_base = WasmHandleStackOverflowDescriptor::FrameBaseRegister();
Register gap = WasmHandleStackOverflowDescriptor::GapRegister();
{
DCHECK_NE(kCArgRegs[1], frame_base);
DCHECK_NE(kCArgRegs[3], frame_base);
__ Mov(kCArgRegs[3], gap);
__ Mov(kCArgRegs[1], sp);
__ Sub(kCArgRegs[2], frame_base, kCArgRegs[1]);
__ Mov(kCArgRegs[4], fp);
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kCArgRegs[3], padreg);
__ Mov(kCArgRegs[0], ER::isolate_address());
__ CallCFunction(ER::wasm_grow_stack(), 5);
__ Pop(padreg, gap);
DCHECK_NE(kReturnRegister0, gap);
}
Label call_runtime;
// wasm_grow_stack returns zero if it cannot grow a stack.
__ Cbz(kReturnRegister0, &call_runtime);
{
UseScratchRegisterScope temps(masm);
Register new_fp = temps.AcquireX();
// Calculate old FP - SP offset to adjust FP accordingly to new SP.
__ Mov(new_fp, sp);
__ Sub(new_fp, fp, new_fp);
__ Add(new_fp, kReturnRegister0, new_fp);
__ Mov(fp, new_fp);
}
SwitchSimulatorStackLimit(masm);
__ Mov(sp, kReturnRegister0);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START));
__ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
}
__ Ret();
__ bind(&call_runtime);
// If wasm_grow_stack returns zero interruption or stack overflow
// should be handled by runtime call.
{
__ Ldr(kWasmImplicitArgRegister,
MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
__ LoadTaggedField(
cp, FieldMemOperand(kWasmImplicitArgRegister,
WasmTrustedInstanceData::kNativeContextOffset));
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
__ SmiTag(gap);
__ PushArgument(gap);
__ CallRuntime(Runtime::kWasmStackGuard);
__ LeaveFrame(StackFrame::INTERNAL);
__ Ret();
}
}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_DoubleToI(MacroAssembler* masm) { void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label done; Label done;
Register result = x7; Register result = x7;
@ -4853,7 +4935,24 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
__ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(), __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
StackFrame::API_CALLBACK_EXIT); StackFrame::API_CALLBACK_EXIT);
MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset); // This is a workaround for performance regression observed on Apple Silicon
// (https://crbug.com/347741609): reading argc value after the call via
// MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset);
// is noticeably slower than using sp-based access:
MemOperand argc_operand = ExitFrameStackSlotOperand(FCA::kLengthOffset);
if (v8_flags.debug_code) {
// Ensure sp-based calculation of FC::length_'s address matches the
// fp-based one.
Label ok;
// +kSystemPointerSize is for the slot at [sp] which is reserved in all
// ExitFrames for storing the return PC.
__ Add(scratch, sp,
FCA::kLengthOffset + kSystemPointerSize - FC::kFCIArgcOffset);
__ cmp(scratch, fp);
__ B(eq, &ok);
__ DebugBreak();
__ Bind(&ok);
}
{ {
ASM_CODE_COMMENT_STRING(masm, "Initialize v8::FunctionCallbackInfo"); ASM_CODE_COMMENT_STRING(masm, "Initialize v8::FunctionCallbackInfo");
// FunctionCallbackInfo::length_. // FunctionCallbackInfo::length_.
@ -5205,7 +5304,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ Mov(x5, unwind_limit); __ Mov(x5, unwind_limit);
__ CopyDoubleWords(x3, x1, x5); __ CopyDoubleWords(x3, x1, x5);
// Since {unwind_limit} is the frame size up to the parameter count, we might // Since {unwind_limit} is the frame size up to the parameter count, we might
// end up with a unaligned stack pointer. This is later recovered when // end up with an unaligned stack pointer. This is later recovered when
// setting the stack pointer to {caller_frame_top_offset}. // setting the stack pointer to {caller_frame_top_offset}.
__ Bic(unwind_limit, unwind_limit, 1); __ Bic(unwind_limit, unwind_limit, 1);
__ Drop(unwind_limit); __ Drop(unwind_limit);
@ -5497,8 +5596,13 @@ void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) {
// The arguments are already in the stack (including any necessary padding), // The arguments are already in the stack (including any necessary padding),
// we should not try to massage the arguments again. // we should not try to massage the arguments again.
#ifdef V8_ENABLE_LEAPTIERING
__ InvokeFunction(x1, x0, InvokeType::kJump,
ArgumentAdaptionMode::kDontAdapt);
#else
__ Mov(x2, kDontAdaptArgumentsSentinel); __ Mov(x2, kDontAdaptArgumentsSentinel);
__ InvokeFunction(x1, x2, x0, InvokeType::kJump); __ InvokeFunction(x1, x2, x0, InvokeType::kJump);
#endif
} }
#undef __ #undef __

View File

@ -92,7 +92,7 @@ extern enum ArrayFromAsyncIterableResolveContextSlots extends intptr
} }
extern macro AllocateRootFunctionWithContext( extern macro AllocateRootFunctionWithContext(
constexpr intptr, FunctionContext): JSFunction; constexpr intptr, FunctionContext, NativeContext): JSFunction;
const kArrayFromAsyncIterableOnFulfilledSharedFun: constexpr intptr const kArrayFromAsyncIterableOnFulfilledSharedFun: constexpr intptr
generates 'RootIndex::kArrayFromAsyncIterableOnFulfilledSharedFun'; generates 'RootIndex::kArrayFromAsyncIterableOnFulfilledSharedFun';
@ -139,13 +139,15 @@ macro CreateArrayFromAsyncIterableResolveContext(
ArrayFromAsyncIterableResolveContextSlots:: ArrayFromAsyncIterableResolveContextSlots::
kArrayFromAsyncIterableResolveOnFulfilledFunctionSlot, kArrayFromAsyncIterableResolveOnFulfilledFunctionSlot,
AllocateRootFunctionWithContext( AllocateRootFunctionWithContext(
kArrayFromAsyncIterableOnFulfilledSharedFun, resolveContext)); kArrayFromAsyncIterableOnFulfilledSharedFun, resolveContext,
nativeContext));
InitContextSlot( InitContextSlot(
resolveContext, resolveContext,
ArrayFromAsyncIterableResolveContextSlots:: ArrayFromAsyncIterableResolveContextSlots::
kArrayFromAsyncIterableResolveOnRejectedFunctionSlot, kArrayFromAsyncIterableResolveOnRejectedFunctionSlot,
AllocateRootFunctionWithContext( AllocateRootFunctionWithContext(
kArrayFromAsyncIterableOnRejectedSharedFun, resolveContext)); kArrayFromAsyncIterableOnRejectedSharedFun, resolveContext,
nativeContext));
InitContextSlot( InitContextSlot(
resolveContext, resolveContext,
ArrayFromAsyncIterableResolveContextSlots:: ArrayFromAsyncIterableResolveContextSlots::
@ -559,13 +561,15 @@ macro CreateArrayFromAsyncArrayLikeResolveContext(
ArrayFromAsyncArrayLikeResolveContextSlots:: ArrayFromAsyncArrayLikeResolveContextSlots::
kArrayFromAsyncArrayLikeResolveOnFulfilledFunctionSlot, kArrayFromAsyncArrayLikeResolveOnFulfilledFunctionSlot,
AllocateRootFunctionWithContext( AllocateRootFunctionWithContext(
kArrayFromAsyncArrayLikeOnFulfilledSharedFun, resolveContext)); kArrayFromAsyncArrayLikeOnFulfilledSharedFun, resolveContext,
nativeContext));
InitContextSlot( InitContextSlot(
resolveContext, resolveContext,
ArrayFromAsyncArrayLikeResolveContextSlots:: ArrayFromAsyncArrayLikeResolveContextSlots::
kArrayFromAsyncArrayLikeResolveOnRejectedFunctionSlot, kArrayFromAsyncArrayLikeResolveOnRejectedFunctionSlot,
AllocateRootFunctionWithContext( AllocateRootFunctionWithContext(
kArrayFromAsyncArrayLikeOnRejectedSharedFun, resolveContext)); kArrayFromAsyncArrayLikeOnRejectedSharedFun, resolveContext,
nativeContext));
InitContextSlot( InitContextSlot(
resolveContext, resolveContext,
ArrayFromAsyncArrayLikeResolveContextSlots:: ArrayFromAsyncArrayLikeResolveContextSlots::

View File

@ -26,6 +26,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
ArrayBuiltinsAssembler::ArrayBuiltinsAssembler( ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
compiler::CodeAssemblerState* state) compiler::CodeAssemblerState* state)
: CodeStubAssembler(state), : CodeStubAssembler(state),
@ -2244,5 +2246,7 @@ TF_BUILTIN(CreateObjectFromSlowBoilerplateHelper,
} }
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -699,7 +699,7 @@ class ArrayConcatVisitor {
set_exceeds_array_limit(true); set_exceeds_array_limit(true);
// Exception hasn't been thrown at this point. Return true to // Exception hasn't been thrown at this point. Return true to
// break out, and caller will throw. !visit would imply that // break out, and caller will throw. !visit would imply that
// there is already a exception. // there is already an exception.
return true; return true;
} }

View File

@ -2,17 +2,67 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "src/api/api.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/builtins/builtins-utils-inl.h" #include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/execution/isolate.h"
#include "src/handles/maybe-handles.h" #include "src/handles/maybe-handles.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-disposable-stack-inl.h" #include "src/objects/js-disposable-stack-inl.h"
#include "src/objects/js-disposable-stack.h" #include "src/objects/js-disposable-stack.h"
#include "src/objects/js-objects.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-promise.h" #include "src/objects/js-promise.h"
#include "src/objects/objects.h"
#include "src/roots/roots.h" #include "src/roots/roots.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
BUILTIN(AsyncDisposableStackOnFulfilled) {
HandleScope scope(isolate);
DirectHandle<JSDisposableStackBase> stack(
Cast<JSDisposableStackBase>(isolate->context()->get(static_cast<int>(
JSDisposableStackBase::AsyncDisposableStackContextSlots::kStack))),
isolate);
Handle<JSPromise> promise(
Cast<JSPromise>(isolate->context()->get(static_cast<int>(
JSDisposableStackBase::AsyncDisposableStackContextSlots::
kOuterPromise))),
isolate);
MAYBE_RETURN(JSAsyncDisposableStack::NextDisposeAsyncIteration(isolate, stack,
promise),
ReadOnlyRoots(isolate).exception());
return ReadOnlyRoots(isolate).undefined_value();
}
BUILTIN(AsyncDisposableStackOnRejected) {
HandleScope scope(isolate);
Handle<JSDisposableStackBase> stack(
Cast<JSDisposableStackBase>(isolate->context()->get(static_cast<int>(
JSDisposableStackBase::AsyncDisposableStackContextSlots::kStack))),
isolate);
Handle<JSPromise> promise(
Cast<JSPromise>(isolate->context()->get(static_cast<int>(
JSDisposableStackBase::AsyncDisposableStackContextSlots::
kOuterPromise))),
isolate);
Handle<Object> rejection_error = args.at(1);
DCHECK(isolate->is_catchable_by_javascript(*rejection_error));
JSDisposableStackBase::HandleErrorInDisposal(isolate, stack, rejection_error);
MAYBE_RETURN(JSAsyncDisposableStack::NextDisposeAsyncIteration(isolate, stack,
promise),
ReadOnlyRoots(isolate).exception());
return ReadOnlyRoots(isolate).undefined_value();
}
// Part of // Part of
// https://tc39.es/proposal-explicit-resource-management/#sec-getdisposemethod // https://tc39.es/proposal-explicit-resource-management/#sec-getdisposemethod
BUILTIN(AsyncDisposeFromSyncDispose) { BUILTIN(AsyncDisposeFromSyncDispose) {
@ -32,6 +82,11 @@ BUILTIN(AsyncDisposeFromSyncDispose) {
JSDisposableStackBase::AsyncDisposeFromSyncDisposeContextSlots:: JSDisposableStackBase::AsyncDisposeFromSyncDisposeContextSlots::
kMethod))), kMethod))),
isolate); isolate);
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
try_catch.SetVerbose(false);
try_catch.SetCaptureMessage(false);
MaybeHandle<Object> result = Execution::Call( MaybeHandle<Object> result = Execution::Call(
isolate, sync_method, ReadOnlyRoots(isolate).undefined_value_handle(), 0, isolate, sync_method, ReadOnlyRoots(isolate).undefined_value_handle(), 0,
nullptr); nullptr);
@ -43,13 +98,309 @@ BUILTIN(AsyncDisposeFromSyncDispose) {
// undefined »). // undefined »).
JSPromise::Resolve(promise, result_handle).ToHandleChecked(); JSPromise::Resolve(promise, result_handle).ToHandleChecked();
} else { } else {
Tagged<Object> exception = isolate->exception();
if (!isolate->is_catchable_by_javascript(exception)) {
return {};
}
// d. IfAbruptRejectPromise(result, promiseCapability). // d. IfAbruptRejectPromise(result, promiseCapability).
UNIMPLEMENTED(); DCHECK(try_catch.HasCaught());
JSPromise::Reject(promise, handle(exception, isolate));
} }
// f. Return promiseCapability.[[Promise]]. // f. Return promiseCapability.[[Promise]].
return *promise; return *promise;
} }
// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack
BUILTIN(AsyncDisposableStackConstructor) {
const char kMethodName[] = "AsyncDisposableStack";
HandleScope scope(isolate);
// 1. If NewTarget is undefined, throw a TypeError exception.
if (!IsJSReceiver(*args.new_target(), isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
isolate->factory()->NewStringFromAsciiChecked(
kMethodName)));
}
// 2. Let asyncDisposableStack be ? OrdinaryCreateFromConstructor(NewTarget,
// "%AsyncDisposableStack.prototype%", « [[AsyncDisposableState]],
// [[DisposeCapability]] »).
DirectHandle<Map> map;
Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Cast<JSReceiver>(args.new_target());
DCHECK_EQ(*target,
target->native_context()->js_async_disposable_stack_function());
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
DirectHandle<JSAsyncDisposableStack> async_disposable_stack =
isolate->factory()->NewJSAsyncDisposableStack(map);
// 3. Set asyncDisposableStack.[[AsyncDisposableState]] to pending.
// 4. Set asyncDisposableStack.[[DisposeCapability]] to
// NewDisposeCapability().
JSDisposableStackBase::InitializeJSDisposableStackBase(
isolate, async_disposable_stack);
// 5. Return asyncDisposableStack.
return *async_disposable_stack;
}
// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.use
BUILTIN(AsyncDisposableStackPrototypeUse) {
const char kMethodName[] = "AsyncDisposableStack.prototype.use";
HandleScope scope(isolate);
// 1. Let asyncDisposableStack be the this value.
// 2. Perform ? RequireInternalSlot(asyncDisposableStack,
// [[AsyncDisposableState]]).
CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName);
Handle<Object> value = args.at(1);
// 3. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, throw a
// ReferenceError exception.
if (async_disposable_stack->state() == DisposableStackState::kDisposed) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewReferenceError(
MessageTemplate::kDisposableStackIsDisposed,
isolate->factory()->NewStringFromAsciiChecked(kMethodName)));
}
// 4. Perform ?
// AddDisposableResource(asyncDisposableStack.[[DisposeCapability]],
// value, async-dispose).
Handle<Object> method;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, method,
JSDisposableStackBase::CheckValueAndGetDisposeMethod(
isolate, value, DisposeMethodHint::kAsyncDispose));
JSDisposableStackBase::Add(
isolate, async_disposable_stack,
(IsNullOrUndefined(*value)
? ReadOnlyRoots(isolate).undefined_value_handle()
: value),
method, DisposeMethodCallType::kValueIsReceiver,
DisposeMethodHint::kAsyncDispose);
// 5. Return value.
return *value;
}
// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.disposeAsync
BUILTIN(AsyncDisposableStackPrototypeDisposeAsync) {
HandleScope scope(isolate);
// 1. Let asyncDisposableStack be the this value.
Handle<Object> receiver = args.receiver();
// 2. Let promiseCapability be ! NewPromiseCapability(%Promise%).
Handle<JSPromise> promise = isolate->factory()->NewJSPromise();
// 3. If asyncDisposableStack does not have an [[AsyncDisposableState]]
// internal slot, then
if (!IsJSAsyncDisposableStack(*receiver)) {
// a. Perform ! Call(promiseCapability.[[Reject]], undefined, « a newly
// created TypeError object »).
JSPromise::Reject(promise,
isolate->factory()->NewTypeError(
MessageTemplate::kNotAnAsyncDisposableStack));
// b. Return promiseCapability.[[Promise]].
return *promise;
}
Handle<JSAsyncDisposableStack> async_disposable_stack =
Cast<JSAsyncDisposableStack>(receiver);
// 4. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, then
if (async_disposable_stack->state() == DisposableStackState::kDisposed) {
// a. Perform ! Call(promiseCapability.[[Resolve]], undefined, «
// undefined »).
JSPromise::Resolve(
promise, handle(ReadOnlyRoots(isolate).undefined_value(), isolate))
.ToHandleChecked();
// b. Return promiseCapability.[[Promise]].
return *promise;
}
// 5. Set asyncDisposableStack.[[AsyncDisposableState]] to disposed.
async_disposable_stack->set_state(DisposableStackState::kDisposed);
// 6. Let result be
// DisposeResources(asyncDisposableStack.[[DisposeCapability]],
// NormalCompletion(undefined)).
// 7. IfAbruptRejectPromise(result, promiseCapability).
// 8. Perform ! Call(promiseCapability.[[Resolve]], undefined, « result
// »).
// 9. Return promiseCapability.[[Promise]].
MAYBE_RETURN(JSAsyncDisposableStack::NextDisposeAsyncIteration(
isolate, async_disposable_stack, promise),
ReadOnlyRoots(isolate).exception());
return *promise;
}
// https://tc39.es/proposal-explicit-resource-management/#sec-get-asyncdisposablestack.prototype.disposed
BUILTIN(AsyncDisposableStackPrototypeGetDisposed) {
const char kMethodName[] = "get AsyncDisposableStack.prototype.disposed";
HandleScope scope(isolate);
// 1. Let AsyncdisposableStack be the this value.
// 2. Perform ? RequireInternalSlot(asyncDisposableStack,
// [[AsyncDisposableState]]).
CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName);
// 3. If AsyncdisposableStack.[[AsyncDisposableState]] is disposed, return
// true.
// 4. Otherwise, return false.
return *(isolate->factory()->ToBoolean(async_disposable_stack->state() ==
DisposableStackState::kDisposed));
}
// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.adopt
BUILTIN(AsyncDisposableStackPrototypeAdopt) {
const char kMethodName[] = "AsyncDisposableStack.prototype.adopt";
HandleScope scope(isolate);
Handle<Object> value = args.at(1);
Handle<Object> on_dispose_async = args.at(2);
// 1. Let asyncDisposableStack be the this value.
// 2. Perform ? RequireInternalSlot(asyncDisposableStack,
// [[AsyncDisposableState]]).
CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName);
// 3. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, throw a
// ReferenceError exception.
if (async_disposable_stack->state() == DisposableStackState::kDisposed) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewReferenceError(
MessageTemplate::kDisposableStackIsDisposed,
isolate->factory()->NewStringFromAsciiChecked(kMethodName)));
}
// 4. If IsCallable(onDisposeAsync) is false, throw a TypeError exception.
if (!IsCallable(*on_dispose_async)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotCallable, on_dispose_async));
}
// 5. Let closure be a new Abstract Closure with no parameters that captures
// value and onDisposeAsync and performs the following steps when called:
// a. Return ? Call(onDisposeAsync, undefined, « value »).
// 6. Let F be CreateBuiltinFunction(closure, 0, "", « »).
// 7. Perform ?
// AddDisposableResource(asyncDisposableStack.[[DisposeCapability]],
// undefined, async-dispose, F).
// Instead of creating an abstract closure and a function, we pass
// DisposeMethodCallType::kArgument so at the time of disposal, the value will
// be passed as the argument to the method.
JSDisposableStackBase::Add(isolate, async_disposable_stack, value,
on_dispose_async,
DisposeMethodCallType::kValueIsArgument,
DisposeMethodHint::kAsyncDispose);
// 8. Return value.
return *value;
}
// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.defer
BUILTIN(AsyncDisposableStackPrototypeDefer) {
const char kMethodName[] = "AsyncDisposableStack.prototype.defer";
HandleScope scope(isolate);
Handle<Object> on_dispose_async = args.at(1);
// 1. Let asyncDisposableStack be the this value.
// 2. Perform ? RequireInternalSlot(asyncDisposableStack,
// [[AsyncDisposableState]]).
CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName);
// 3. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, throw a
// ReferenceError exception.
if (async_disposable_stack->state() == DisposableStackState::kDisposed) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewReferenceError(
MessageTemplate::kDisposableStackIsDisposed,
isolate->factory()->NewStringFromAsciiChecked(kMethodName)));
}
// 4. If IsCallable(onDisposeAsync) is false, throw a TypeError exception.
if (!IsCallable(*on_dispose_async)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotCallable, on_dispose_async));
}
// 5. Perform ?
// AddDisposableResource(asyncDisposableStack.[[DisposeCapability]],
// undefined, async-dispose, onDisposeAsync).
JSDisposableStackBase::Add(isolate, async_disposable_stack,
ReadOnlyRoots(isolate).undefined_value_handle(),
on_dispose_async,
DisposeMethodCallType::kValueIsReceiver,
DisposeMethodHint::kAsyncDispose);
// 6. Return undefined.
return ReadOnlyRoots(isolate).undefined_value();
}
// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.move
BUILTIN(AsyncDisposableStackPrototypeMove) {
const char kMethodName[] = "AsyncDisposableStack.prototype.move";
HandleScope scope(isolate);
// 1. Let asyncDisposableStack be the this value.
// 2. Perform ? RequireInternalSlot(asyncDisposableStack,
// [[AsyncDisposableState]]).
CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName);
// 3. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, throw a
// ReferenceError exception.
if (async_disposable_stack->state() == DisposableStackState::kDisposed) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewReferenceError(
MessageTemplate::kDisposableStackIsDisposed,
isolate->factory()->NewStringFromAsciiChecked(kMethodName)));
}
// 4. Let newAsyncDisposableStack be ?
// OrdinaryCreateFromConstructor(%AsyncDisposableStack%,
// "%AsyncDisposableStack.prototype%", « [[AsyncDisposableState]],
// [[DisposeCapability]] »).
// 5. Set newAsyncDisposableStack.[[AsyncDisposableState]] to pending.
Tagged<JSFunction> constructor_function =
Cast<JSFunction>(isolate->native_context()->get(
Context::JS_ASYNC_DISPOSABLE_STACK_FUNCTION_INDEX));
DirectHandle<Map> map(constructor_function->initial_map(), isolate);
DirectHandle<JSAsyncDisposableStack> new_async_disposable_stack =
isolate->factory()->NewJSAsyncDisposableStack(map);
// 6. Set newAsyncDisposableStack.[[DisposeCapability]] to
// asyncDisposableStack.[[DisposeCapability]].
new_async_disposable_stack->set_stack(async_disposable_stack->stack());
new_async_disposable_stack->set_length(async_disposable_stack->length());
new_async_disposable_stack->set_state(DisposableStackState::kPending);
new_async_disposable_stack->set_error(
*(isolate->factory()->uninitialized_value()));
// 7. Set asyncDisposableStack.[[DisposeCapability]] to
// NewDisposeCapability().
async_disposable_stack->set_stack(ReadOnlyRoots(isolate).empty_fixed_array());
async_disposable_stack->set_length(0);
async_disposable_stack->set_error(
*(isolate->factory()->uninitialized_value()));
// 8. Set disposableStack.[[DisposableState]] to disposed.
async_disposable_stack->set_state(DisposableStackState::kDisposed);
// 9. Return newDisposableStack.
return *new_async_disposable_stack;
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -13,6 +13,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler { class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
public: public:
explicit AsyncFunctionBuiltinsAssembler(compiler::CodeAssemblerState* state) explicit AsyncFunctionBuiltinsAssembler(compiler::CodeAssemblerState* state)
@ -200,14 +202,11 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait() {
auto value = Parameter<Object>(Descriptor::kValue); auto value = Parameter<Object>(Descriptor::kValue);
auto context = Parameter<Context>(Descriptor::kContext); auto context = Parameter<Context>(Descriptor::kContext);
TNode<SharedFunctionInfo> on_resolve_sfi =
AsyncFunctionAwaitResolveSharedFunConstant();
TNode<SharedFunctionInfo> on_reject_sfi =
AsyncFunctionAwaitRejectSharedFunConstant();
TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>( TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset); async_function_object, JSAsyncFunctionObject::kPromiseOffset);
Await(context, async_function_object, value, outer_promise, on_resolve_sfi, Await(context, async_function_object, value, outer_promise,
on_reject_sfi); RootIndex::kAsyncFunctionAwaitResolveClosureSharedFun,
RootIndex::kAsyncFunctionAwaitRejectClosureSharedFun);
// Return outer promise to avoid adding an load of the outer promise before // Return outer promise to avoid adding an load of the outer promise before
// suspending in BytecodeGenerator. // suspending in BytecodeGenerator.
@ -219,5 +218,7 @@ TF_BUILTIN(AsyncFunctionAwait, AsyncFunctionBuiltinsAssembler) {
AsyncFunctionAwait<Descriptor>(); AsyncFunctionAwait<Descriptor>();
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -13,6 +13,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
namespace { namespace {
// Describe fields of Context associated with the AsyncIterator unwrap closure. // Describe fields of Context associated with the AsyncIterator unwrap closure.
class ValueUnwrapContext { class ValueUnwrapContext {
@ -22,11 +24,27 @@ class ValueUnwrapContext {
} // namespace } // namespace
TNode<Object> AsyncBuiltinsAssembler::Await(TNode<Context> context,
TNode<JSGeneratorObject> generator,
TNode<Object> value,
TNode<JSPromise> outer_promise,
RootIndex on_resolve_sfi,
RootIndex on_reject_sfi) {
return Await(
context, generator, value, outer_promise,
[&](TNode<Context> context, TNode<NativeContext> native_context) {
auto on_resolve = AllocateRootFunctionWithContext(
on_resolve_sfi, context, native_context);
auto on_reject = AllocateRootFunctionWithContext(on_reject_sfi, context,
native_context);
return std::make_pair(on_resolve, on_reject);
});
}
TNode<Object> AsyncBuiltinsAssembler::Await( TNode<Object> AsyncBuiltinsAssembler::Await(
TNode<Context> context, TNode<JSGeneratorObject> generator, TNode<Context> context, TNode<JSGeneratorObject> generator,
TNode<Object> value, TNode<JSPromise> outer_promise, TNode<Object> value, TNode<JSPromise> outer_promise,
TNode<SharedFunctionInfo> on_resolve_sfi, const CreateClosures& CreateClosures) {
TNode<SharedFunctionInfo> on_reject_sfi) {
const TNode<NativeContext> native_context = LoadNativeContext(context); const TNode<NativeContext> native_context = LoadNativeContext(context);
// We do the `PromiseResolve(%Promise%,value)` avoiding to unnecessarily // We do the `PromiseResolve(%Promise%,value)` avoiding to unnecessarily
@ -103,17 +121,9 @@ TNode<Object> AsyncBuiltinsAssembler::Await(
generator); generator);
} }
// Allocate and initialize resolve handler // Allocate and initialize resolve and reject handlers
TNode<HeapObject> on_resolve = auto [on_resolve, on_reject] =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype); CreateClosures(closure_context, native_context);
InitializeNativeClosure(closure_context, native_context, on_resolve,
on_resolve_sfi);
// Allocate and initialize reject handler
TNode<HeapObject> on_reject =
AllocateInNewSpace(JSFunction::kSizeWithoutPrototype);
InitializeNativeClosure(closure_context, native_context, on_reject,
on_reject_sfi);
// Deal with PromiseHooks and debug support in the runtime. This // Deal with PromiseHooks and debug support in the runtime. This
// also allocates the throwaway promise, which is only needed in // also allocates the throwaway promise, which is only needed in
@ -148,52 +158,13 @@ TNode<Object> AsyncBuiltinsAssembler::Await(
on_resolve, on_reject, var_throwaway.value()); on_resolve, on_reject, var_throwaway.value());
} }
void AsyncBuiltinsAssembler::InitializeNativeClosure(
TNode<Context> context, TNode<NativeContext> native_context,
TNode<HeapObject> function, TNode<SharedFunctionInfo> shared_info) {
TNode<Map> function_map = CAST(LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
// Ensure that we don't have to initialize prototype_or_initial_map field of
// JSFunction.
CSA_DCHECK(this,
IntPtrEqual(LoadMapInstanceSizeInWords(function_map),
IntPtrConstant(JSFunction::kSizeWithoutPrototype /
kTaggedSize)));
static_assert(JSFunction::kSizeWithoutPrototype ==
(7 + V8_ENABLE_LEAPTIERING_BOOL) * kTaggedSize);
StoreMapNoWriteBarrier(function, function_map);
StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(function, JSObject::kElementsOffset,
RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(function, JSFunction::kFeedbackCellOffset,
RootIndex::kManyClosuresCell);
#ifdef V8_ENABLE_LEAPTIERING
// TODO(saelo): obtain an appropriate dispatch handle here.
StoreObjectFieldNoWriteBarrier(function, JSFunction::kDispatchHandleOffset,
Int32Constant(kNullJSDispatchHandle));
#endif // V8_ENABLE_LEAPTIERING
StoreObjectFieldNoWriteBarrier(
function, JSFunction::kSharedFunctionInfoOffset, shared_info);
StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
// For the native closures that are initialized here (for `await`)
// we know that their SharedFunctionInfo::function_data(kAcquireLoad) slot
// contains a builtin index (as Smi), so there's no need to use
// CodeStubAssembler::GetSharedFunctionInfoCode() helper here,
// which almost doubles the size of `await` builtins (unnecessarily).
TNode<Smi> builtin_id = LoadSharedFunctionInfoBuiltinId(shared_info);
TNode<Code> code = LoadBuiltin(builtin_id);
StoreCodePointerFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
}
TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure( TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure(
TNode<NativeContext> native_context, TNode<Boolean> done) { TNode<NativeContext> native_context, TNode<Boolean> done) {
const TNode<Context> closure_context = const TNode<Context> closure_context =
AllocateAsyncIteratorValueUnwrapContext(native_context, done); AllocateAsyncIteratorValueUnwrapContext(native_context, done);
return AllocateRootFunctionWithContext( return AllocateRootFunctionWithContext(
RootIndex::kAsyncIteratorValueUnwrapSharedFun, closure_context); RootIndex::kAsyncIteratorValueUnwrapSharedFun, closure_context,
native_context);
} }
TNode<Context> AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext( TNode<Context> AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
@ -221,5 +192,7 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
Return(unwrapped_value); Return(unwrapped_value);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -21,11 +21,17 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
// `on_reject` is the SharedFunctioninfo instance used to create the reject // `on_reject` is the SharedFunctioninfo instance used to create the reject
// closure. `on_resolve` is the SharedFunctioninfo instance used to create the // closure. `on_resolve` is the SharedFunctioninfo instance used to create the
// resolve closure. Returns the Promise-wrapped `value`. // resolve closure. Returns the Promise-wrapped `value`.
using CreateClosures =
std::function<std::pair<TNode<JSFunction>, TNode<JSFunction>>(
TNode<Context>, TNode<NativeContext>)>;
TNode<Object> Await(TNode<Context> context, TNode<Object> Await(TNode<Context> context,
TNode<JSGeneratorObject> generator, TNode<Object> value, TNode<JSGeneratorObject> generator, TNode<Object> value,
TNode<JSPromise> outer_promise, TNode<JSPromise> outer_promise,
TNode<SharedFunctionInfo> on_resolve_sfi, const CreateClosures& CreateClosures);
TNode<SharedFunctionInfo> on_reject_sfi); TNode<Object> Await(TNode<Context> context,
TNode<JSGeneratorObject> generator, TNode<Object> value,
TNode<JSPromise> outer_promise, RootIndex on_resolve_sfi,
RootIndex on_reject_sfi);
// Return a new built-in function object as defined in // Return a new built-in function object as defined in
// Async Iterator Value Unwrap Functions // Async Iterator Value Unwrap Functions
@ -33,10 +39,6 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
TNode<Boolean> done); TNode<Boolean> done);
private: private:
void InitializeNativeClosure(TNode<Context> context,
TNode<NativeContext> native_context,
TNode<HeapObject> function,
TNode<SharedFunctionInfo> shared_info);
TNode<Context> AllocateAsyncIteratorValueUnwrapContext( TNode<Context> AllocateAsyncIteratorValueUnwrapContext(
TNode<NativeContext> native_context, TNode<Boolean> done); TNode<NativeContext> native_context, TNode<Boolean> done);
}; };

View File

@ -13,6 +13,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
namespace { namespace {
class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler { class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
@ -258,8 +260,8 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait() {
request, AsyncGeneratorRequest::kPromiseOffset); request, AsyncGeneratorRequest::kPromiseOffset);
Await(context, async_generator_object, value, outer_promise, Await(context, async_generator_object, value, outer_promise,
AsyncGeneratorAwaitResolveSharedFunConstant(), RootIndex::kAsyncGeneratorAwaitResolveClosureSharedFun,
AsyncGeneratorAwaitRejectSharedFunConstant()); RootIndex::kAsyncGeneratorAwaitRejectClosureSharedFun);
SetGeneratorAwaiting(async_generator_object); SetGeneratorAwaiting(async_generator_object);
Return(UndefinedConstant()); Return(UndefinedConstant());
} }
@ -591,8 +593,8 @@ TF_BUILTIN(AsyncGeneratorYieldWithAwait, AsyncGeneratorBuiltinsAssembler) {
LoadPromiseFromAsyncGeneratorRequest(request); LoadPromiseFromAsyncGeneratorRequest(request);
Await(context, generator, value, outer_promise, Await(context, generator, value, outer_promise,
AsyncGeneratorYieldWithAwaitResolveSharedFunConstant(), RootIndex::kAsyncGeneratorYieldWithAwaitResolveClosureSharedFun,
AsyncGeneratorAwaitRejectSharedFunConstant()); RootIndex::kAsyncGeneratorAwaitRejectClosureSharedFun);
SetGeneratorAwaiting(generator); SetGeneratorAwaiting(generator);
Return(UndefinedConstant()); Return(UndefinedConstant());
} }
@ -637,21 +639,35 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
const TNode<AsyncGeneratorRequest> req = const TNode<AsyncGeneratorRequest> req =
CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator)); CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator));
Label perform_await(this);
TVARIABLE(SharedFunctionInfo, var_on_resolve,
AsyncGeneratorReturnClosedResolveSharedFunConstant());
TVARIABLE(SharedFunctionInfo, var_on_reject,
AsyncGeneratorReturnClosedRejectSharedFunConstant());
const TNode<Smi> state = LoadGeneratorState(generator); const TNode<Smi> state = LoadGeneratorState(generator);
GotoIf(IsGeneratorStateClosed(state), &perform_await); auto MakeClosures = [&](TNode<Context> context,
var_on_resolve = AsyncGeneratorReturnResolveSharedFunConstant(); TNode<NativeContext> native_context) {
var_on_reject = AsyncGeneratorAwaitRejectSharedFunConstant(); TVARIABLE(JSFunction, var_on_resolve);
TVARIABLE(JSFunction, var_on_reject);
Label closed(this), not_closed(this), done(this);
Branch(IsGeneratorStateClosed(state), &closed, &not_closed);
Goto(&perform_await); BIND(&closed);
var_on_resolve = AllocateRootFunctionWithContext(
RootIndex::kAsyncGeneratorReturnClosedResolveClosureSharedFun, context,
native_context);
var_on_reject = AllocateRootFunctionWithContext(
RootIndex::kAsyncGeneratorReturnClosedRejectClosureSharedFun, context,
native_context);
Goto(&done);
BIND(&perform_await); BIND(&not_closed);
var_on_resolve = AllocateRootFunctionWithContext(
RootIndex::kAsyncGeneratorReturnResolveClosureSharedFun, context,
native_context);
var_on_reject = AllocateRootFunctionWithContext(
RootIndex::kAsyncGeneratorAwaitRejectClosureSharedFun, context,
native_context);
Goto(&done);
BIND(&done);
return std::make_pair(var_on_resolve.value(), var_on_reject.value());
};
SetGeneratorAwaiting(generator); SetGeneratorAwaiting(generator);
auto context = Parameter<Context>(Descriptor::kContext); auto context = Parameter<Context>(Descriptor::kContext);
@ -664,9 +680,7 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
{ {
compiler::ScopedExceptionHandler handler(this, &await_exception, compiler::ScopedExceptionHandler handler(this, &await_exception,
&var_exception); &var_exception);
Await(context, generator, value, outer_promise, MakeClosures);
Await(context, generator, value, outer_promise, var_on_resolve.value(),
var_on_reject.value());
} }
Goto(&done); Goto(&done);
@ -730,5 +744,7 @@ TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
AsyncGeneratorReturnClosedReject(context, generator, value); AsyncGeneratorReturnClosedReject(context, generator, value);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -13,6 +13,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
namespace { namespace {
class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
public: public:
@ -302,7 +304,7 @@ TNode<JSFunction> AsyncFromSyncBuiltinsAssembler::
sync_iterator); sync_iterator);
return AllocateRootFunctionWithContext( return AllocateRootFunctionWithContext(
RootIndex::kAsyncFromSyncIteratorCloseSyncAndRethrowSharedFun, RootIndex::kAsyncFromSyncIteratorCloseSyncAndRethrowSharedFun,
closure_context); closure_context, native_context);
} }
TNode<Context> AsyncFromSyncBuiltinsAssembler:: TNode<Context> AsyncFromSyncBuiltinsAssembler::
@ -448,5 +450,7 @@ TF_BUILTIN(AsyncFromSyncIteratorCloseSyncAndRethrow,
Return(CallRuntime(Runtime::kReThrow, context, error)); Return(CallRuntime(Runtime::kReThrow, context, error));
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -12,6 +12,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
// https://tc39.github.io/proposal-bigint/#sec-to-big-int64 // https://tc39.github.io/proposal-bigint/#sec-to-big-int64
TF_BUILTIN(BigIntToI64, CodeStubAssembler) { TF_BUILTIN(BigIntToI64, CodeStubAssembler) {
if (!Is64()) { if (!Is64()) {
@ -73,5 +75,7 @@ TF_BUILTIN(I32PairToBigInt, CodeStubAssembler) {
Return(BigIntFromInt32Pair(low, high)); Return(BigIntFromInt32Pair(low, high));
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -20,6 +20,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined( void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) { MacroAssembler* masm) {
Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined); Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
@ -907,5 +909,7 @@ TF_BUILTIN(HandleApiCallOrConstruct, CallOrConstructBuiltinsAssembler) {
} }
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -19,6 +19,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
template <class T> template <class T>
using TVariable = compiler::TypedCodeAssemblerVariable<T>; using TVariable = compiler::TypedCodeAssemblerVariable<T>;
@ -3047,5 +3049,7 @@ TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) {
Return(FalseConstant()); Return(FalseConstant());
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -21,6 +21,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) { void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) {
Generate_CallOrConstructVarargs(masm, Builtin::kConstruct); Generate_CallOrConstructVarargs(masm, Builtin::kConstruct);
} }
@ -276,8 +278,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
BIND(&done); BIND(&done);
} }
static_assert(JSFunction::kSizeWithoutPrototype == static_assert(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
(7 + V8_ENABLE_LEAPTIERING_BOOL) * kTaggedSize);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackCellOffset, StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackCellOffset,
feedback_cell); feedback_cell);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset, StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
@ -290,10 +291,11 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
Int32Constant(kNullJSDispatchHandle))); Int32Constant(kNullJSDispatchHandle)));
StoreObjectFieldNoWriteBarrier(result, JSFunction::kDispatchHandleOffset, StoreObjectFieldNoWriteBarrier(result, JSFunction::kDispatchHandleOffset,
dispatch_handle); dispatch_handle);
#endif // V8_ENABLE_LEAPTIERING #else
TNode<Code> lazy_builtin = TNode<Code> lazy_builtin =
HeapConstantNoHole(BUILTIN_CODE(isolate(), CompileLazy)); HeapConstantNoHole(BUILTIN_CODE(isolate(), CompileLazy));
StoreCodePointerField(result, JSFunction::kCodeOffset, lazy_builtin); StoreCodePointerField(result, JSFunction::kCodeOffset, lazy_builtin);
#endif // V8_ENABLE_LEAPTIERING
Return(result); Return(result);
} }
@ -761,5 +763,7 @@ void ConstructorBuiltinsAssembler::CopyMutableHeapNumbersInObject(
kTaggedSize, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); kTaggedSize, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -12,6 +12,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
// ES6 section 7.1.3 ToNumber ( argument ) // ES6 section 7.1.3 ToNumber ( argument )
TF_BUILTIN(ToNumber, CodeStubAssembler) { TF_BUILTIN(ToNumber, CodeStubAssembler) {
auto context = Parameter<Context>(Descriptor::kContext); auto context = Parameter<Context>(Descriptor::kContext);
@ -127,5 +129,7 @@ TF_BUILTIN(Typeof_Baseline, CodeStubAssembler) {
Return(Typeof(object, slot, feedback_vector)); Return(Typeof(object, slot, feedback_vector));
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -10,6 +10,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// ES6 section 20.3 Date Objects // ES6 section 20.3 Date Objects
@ -256,5 +258,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
} }
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -618,7 +618,16 @@ namespace internal {
CPP(DisposableStackPrototypeMove) \ CPP(DisposableStackPrototypeMove) \
\ \
/* Async DisposabeStack*/ \ /* Async DisposabeStack*/ \
CPP(AsyncDisposableStackOnFulfilled) \
CPP(AsyncDisposableStackOnRejected) \
CPP(AsyncDisposeFromSyncDispose) \ CPP(AsyncDisposeFromSyncDispose) \
CPP(AsyncDisposableStackConstructor) \
CPP(AsyncDisposableStackPrototypeUse) \
CPP(AsyncDisposableStackPrototypeDisposeAsync) \
CPP(AsyncDisposableStackPrototypeGetDisposed) \
CPP(AsyncDisposableStackPrototypeAdopt) \
CPP(AsyncDisposableStackPrototypeDefer) \
CPP(AsyncDisposableStackPrototypeMove) \
\ \
/* Error */ \ /* Error */ \
CPP(ErrorConstructor) \ CPP(ErrorConstructor) \
@ -836,7 +845,7 @@ namespace internal {
TFC(Decrement_Baseline, UnaryOp_Baseline) \ TFC(Decrement_Baseline, UnaryOp_Baseline) \
TFC(Increment_Baseline, UnaryOp_Baseline) \ TFC(Increment_Baseline, UnaryOp_Baseline) \
TFC(Negate_Baseline, UnaryOp_Baseline) \ TFC(Negate_Baseline, UnaryOp_Baseline) \
TFC(BitwiseNot_WithFeedback, UnaryOp_WithFeedback) \ IF_TSA(TSC, TFC)(BitwiseNot_WithFeedback, UnaryOp_WithFeedback) \
TFC(Decrement_WithFeedback, UnaryOp_WithFeedback) \ TFC(Decrement_WithFeedback, UnaryOp_WithFeedback) \
TFC(Increment_WithFeedback, UnaryOp_WithFeedback) \ TFC(Increment_WithFeedback, UnaryOp_WithFeedback) \
TFC(Negate_WithFeedback, UnaryOp_WithFeedback) \ TFC(Negate_WithFeedback, UnaryOp_WithFeedback) \
@ -1155,6 +1164,7 @@ namespace internal {
IF_WASM(ASM, WasmLiftoffFrameSetup, WasmDummy) \ IF_WASM(ASM, WasmLiftoffFrameSetup, WasmDummy) \
IF_WASM(ASM, WasmDebugBreak, WasmDummy) \ IF_WASM(ASM, WasmDebugBreak, WasmDummy) \
IF_WASM(ASM, WasmOnStackReplace, WasmDummy) \ IF_WASM(ASM, WasmOnStackReplace, WasmDummy) \
IF_WASM(ASM, WasmHandleStackOverflow, WasmHandleStackOverflow) \
IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \ IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \
IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToTagged) \ IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToTagged) \
IF_WASM(TFC, WasmFloat64ToString, WasmFloat64ToTagged) \ IF_WASM(TFC, WasmFloat64ToString, WasmFloat64ToTagged) \
@ -1876,7 +1886,8 @@ namespace internal {
/* Temporal #sec-temporal.calendar.prototype.inleapyear */ \ /* Temporal #sec-temporal.calendar.prototype.inleapyear */ \
CPP(TemporalCalendarPrototypeInLeapYear) \ CPP(TemporalCalendarPrototypeInLeapYear) \
/* Temporal #sec-temporal.calendar.prototype.fields */ \ /* Temporal #sec-temporal.calendar.prototype.fields */ \
TFJ(TemporalCalendarPrototypeFields, kJSArgcReceiverSlots, kIterable) \ TFJ(TemporalCalendarPrototypeFields, kJSArgcReceiverSlots + 1, kReceiver, \
kIterable) \
/* Temporal #sec-temporal.calendar.prototype.mergefields */ \ /* Temporal #sec-temporal.calendar.prototype.mergefields */ \
CPP(TemporalCalendarPrototypeMergeFields) \ CPP(TemporalCalendarPrototypeMergeFields) \
/* Temporal #sec-temporal.calendar.prototype.tostring */ \ /* Temporal #sec-temporal.calendar.prototype.tostring */ \
@ -1887,8 +1898,10 @@ namespace internal {
CPP(DatePrototypeToTemporalInstant) \ CPP(DatePrototypeToTemporalInstant) \
\ \
/* "Private" (created but not exposed) Bulitins needed by Temporal */ \ /* "Private" (created but not exposed) Bulitins needed by Temporal */ \
TFJ(StringFixedArrayFromIterable, kJSArgcReceiverSlots, kIterable) \ TFJ(StringFixedArrayFromIterable, kJSArgcReceiverSlots + 1, kReceiver, \
TFJ(TemporalInstantFixedArrayFromIterable, kJSArgcReceiverSlots, kIterable) kIterable) \
TFJ(TemporalInstantFixedArrayFromIterable, kJSArgcReceiverSlots + 1, \
kReceiver, kIterable)
#define BUILTIN_LIST_BASE(CPP, TSJ, TFJ, TSC, TFC, TFS, TFH, ASM) \ #define BUILTIN_LIST_BASE(CPP, TSJ, TFJ, TSC, TFC, TFS, TFH, ASM) \
BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM) \ BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM) \

View File

@ -253,10 +253,12 @@ BUILTIN(DisposableStackPrototypeMove) {
new_disposable_stack->set_stack(disposable_stack->stack()); new_disposable_stack->set_stack(disposable_stack->stack());
new_disposable_stack->set_length(disposable_stack->length()); new_disposable_stack->set_length(disposable_stack->length());
new_disposable_stack->set_state(DisposableStackState::kPending); new_disposable_stack->set_state(DisposableStackState::kPending);
new_disposable_stack->set_error(*(isolate->factory()->uninitialized_value()));
// 7. Set disposableStack.[[DisposeCapability]] to NewDisposeCapability(). // 7. Set disposableStack.[[DisposeCapability]] to NewDisposeCapability().
disposable_stack->set_stack(ReadOnlyRoots(isolate).empty_fixed_array()); disposable_stack->set_stack(ReadOnlyRoots(isolate).empty_fixed_array());
disposable_stack->set_length(0); disposable_stack->set_length(0);
disposable_stack->set_error(*(isolate->factory()->uninitialized_value()));
// 8. Set disposableStack.[[DisposableState]] to disposed. // 8. Set disposableStack.[[DisposableState]] to disposed.
disposable_stack->set_state(DisposableStackState::kDisposed); disposable_stack->set_state(DisposableStackState::kDisposed);

View File

@ -12,6 +12,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
class GeneratorBuiltinsAssembler : public CodeStubAssembler { class GeneratorBuiltinsAssembler : public CodeStubAssembler {
public: public:
explicit GeneratorBuiltinsAssembler(compiler::CodeAssemblerState* state) explicit GeneratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
@ -311,5 +313,7 @@ TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) {
Return(LoadJSGeneratorObjectInputOrDebugPos(generator)); Return(LoadJSGeneratorObjectInputOrDebugPos(generator));
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -9,6 +9,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
// ES #sec-isfinite-number // ES #sec-isfinite-number
TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) { TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
auto context = Parameter<Context>(Descriptor::kContext); auto context = Parameter<Context>(Descriptor::kContext);
@ -106,5 +108,7 @@ TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) {
Return(FalseConstant()); Return(FalseConstant());
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -13,6 +13,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
class HandlerBuiltinsAssembler : public CodeStubAssembler { class HandlerBuiltinsAssembler : public CodeStubAssembler {
public: public:
explicit HandlerBuiltinsAssembler(compiler::CodeAssemblerState* state) explicit HandlerBuiltinsAssembler(compiler::CodeAssemblerState* state)
@ -474,5 +476,7 @@ TF_BUILTIN(HasIndexedInterceptorIC, CodeStubAssembler) {
vector); vector);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -217,6 +217,29 @@ constexpr bool Builtins::IsJSEntryVariant(Builtin builtin) {
UNREACHABLE(); UNREACHABLE();
} }
#ifdef V8_ENABLE_WEBASSEMBLY
// static
template <Builtin builtin>
constexpr size_t Builtins::WasmBuiltinHandleArrayIndex() {
constexpr size_t index =
std::find(std::begin(Builtins::kWasmIndirectlyCallableBuiltins),
std::end(Builtins::kWasmIndirectlyCallableBuiltins), builtin) -
std::begin(Builtins::kWasmIndirectlyCallableBuiltins);
static_assert(Builtins::kWasmIndirectlyCallableBuiltins[index] == builtin);
return index;
}
// static
template <Builtin builtin>
wasm::WasmCodePointerTable::Handle Builtins::WasmBuiltinHandleOf(
Isolate* isolate) {
return isolate
->wasm_builtin_code_handles()[WasmBuiltinHandleArrayIndex<builtin>()];
}
#endif // V8_ENABLE_WEBASSEMBLY
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -25,6 +25,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// TurboFan support builtins. // TurboFan support builtins.
@ -327,7 +329,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
shared_barrier_slow(this), generational_barrier_slow(this); shared_barrier_slow(this), generational_barrier_slow(this);
// During incremental marking we always reach this slow path, so we need to // During incremental marking we always reach this slow path, so we need to
// check whether this is a old-to-new or old-to-shared reference. // check whether this is an old-to-new or old-to-shared reference.
TNode<IntPtrT> object = BitcastTaggedToWord( TNode<IntPtrT> object = BitcastTaggedToWord(
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject)); UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
@ -1686,5 +1688,7 @@ TF_BUILTIN(GetOwnPropertyDescriptor, CodeStubAssembler) {
key); key);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -17,6 +17,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
class IntlBuiltinsAssembler : public CodeStubAssembler { class IntlBuiltinsAssembler : public CodeStubAssembler {
public: public:
explicit IntlBuiltinsAssembler(compiler::CodeAssemblerState* state) explicit IntlBuiltinsAssembler(compiler::CodeAssemblerState* state)
@ -289,5 +291,7 @@ TF_BUILTIN(ListFormatPrototypeFormatToParts, IntlBuiltinsAssembler) {
Runtime::kFormatListToParts, "Intl.ListFormat.prototype.formatToParts"); Runtime::kFormatListToParts, "Intl.ListFormat.prototype.formatToParts");
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -18,6 +18,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
using IteratorRecord = TorqueStructIteratorRecord; using IteratorRecord = TorqueStructIteratorRecord;
TNode<Object> IteratorBuiltinsAssembler::GetIteratorMethod( TNode<Object> IteratorBuiltinsAssembler::GetIteratorMethod(
@ -535,5 +537,7 @@ TF_BUILTIN(IterableToFixedArrayWithSymbolLookupSlow,
iterator_fn); iterator_fn);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -8,12 +8,14 @@
#include "src/builtins/builtins.h" #include "src/builtins/builtins.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/objects/code-inl.h" #include "src/objects/code-inl.h"
#include "src/objects/feedback-vector.h" #include "src/objects/feedback-vector-inl.h"
#include "src/objects/shared-function-info.h" #include "src/objects/shared-function-info.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
void LazyBuiltinsAssembler::GenerateTailCallToJSCode( void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
TNode<Code> code, TNode<JSFunction> function) { TNode<Code> code, TNode<JSFunction> function) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount); auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
@ -38,11 +40,10 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
LoadObjectField<Uint16T>(feedback_vector, FeedbackVector::kFlagsOffset); LoadObjectField<Uint16T>(feedback_vector, FeedbackVector::kFlagsOffset);
// Fall through if no optimization trigger or optimized code. // Fall through if no optimization trigger or optimized code.
GotoIfNot( constexpr uint32_t kFlagMask =
IsSetWord32(flags, FeedbackVector::kFlagsHasAnyOptimizedCode | FeedbackVector::FlagMaskForNeedsProcessingCheckFrom(
FeedbackVector::kFlagsTieringStateIsAnyRequested | CodeKind::INTERPRETED_FUNCTION);
FeedbackVector::kFlagsLogNextExecution), GotoIfNot(IsSetWord32(flags, kFlagMask), &fallthrough);
&fallthrough);
GotoIfNot( GotoIfNot(
IsSetWord32(flags, FeedbackVector::kFlagsTieringStateIsAnyRequested), IsSetWord32(flags, FeedbackVector::kFlagsTieringStateIsAnyRequested),
@ -51,12 +52,21 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
BIND(&maybe_needs_logging); BIND(&maybe_needs_logging);
{ {
#ifdef V8_ENABLE_LEAPTIERING
// In the leaptiering case, we don't tier up to optimized code through the
// feedback vector (but instead through the dispatch table), so we can only
// get here if kFlagsLogNextExecution is set.
CSA_DCHECK(this,
IsSetWord32(flags, FeedbackVector::kFlagsLogNextExecution));
#else
GotoIfNot(IsSetWord32(flags, FeedbackVector::kFlagsLogNextExecution), GotoIfNot(IsSetWord32(flags, FeedbackVector::kFlagsLogNextExecution),
&may_have_optimized_code); &may_have_optimized_code);
#endif
GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution, GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution,
function); function);
} }
#ifndef V8_ENABLE_LEAPTIERING
BIND(&may_have_optimized_code); BIND(&may_have_optimized_code);
{ {
Label heal_optimized_code_slot(this); Label heal_optimized_code_slot(this);
@ -86,6 +96,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
BIND(&heal_optimized_code_slot); BIND(&heal_optimized_code_slot);
GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot, function); GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot, function);
} }
#endif // V8_ENABLE_LEAPTIERING
// Fall-through if the optimized code cell is clear and the tiering state is // Fall-through if the optimized code cell is clear and the tiering state is
// kNone. // kNone.
@ -112,7 +123,12 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstantNoHole(BUILTIN_CODE( CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstantNoHole(BUILTIN_CODE(
isolate(), CompileLazy)))); isolate(), CompileLazy))));
USE(sfi_code);
#ifndef V8_ENABLE_LEAPTIERING
// In the leaptiering case, the code is installed below, through the
// InstallSFICode runtime function.
StoreCodePointerField(function, JSFunction::kCodeOffset, sfi_code); StoreCodePointerField(function, JSFunction::kCodeOffset, sfi_code);
#endif // V8_ENABLE_LEAPTIERING
Label maybe_use_sfi_code(this); Label maybe_use_sfi_code(this);
// If there is no feedback, don't check for optimized code. // If there is no feedback, don't check for optimized code.
@ -131,6 +147,13 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// A usual case would be the InterpreterEntryTrampoline to start executing // A usual case would be the InterpreterEntryTrampoline to start executing
// existing bytecode. // existing bytecode.
BIND(&maybe_use_sfi_code); BIND(&maybe_use_sfi_code);
#ifdef V8_ENABLE_LEAPTIERING
// In the leaptiering case, we now simply install the code of the SFI on the
// function's dispatch table entry and call it. Installing the code is
// necessary as the dispatch table entry may still contain the CompileLazy
// builtin at this point (we can only update dispatch table code from C++).
GenerateTailCallToReturnedCode(Runtime::kInstallSFICode, function);
#else
Label tailcall_code(this), baseline(this); Label tailcall_code(this), baseline(this);
TVARIABLE(Code, code); TVARIABLE(Code, code);
@ -153,6 +176,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
BIND(&tailcall_code); BIND(&tailcall_code);
GenerateTailCallToJSCode(code.value(), function); GenerateTailCallToJSCode(code.value(), function);
#endif // V8_ENABLE_LEAPTIERING
BIND(&compile_function); BIND(&compile_function);
GenerateTailCallToReturnedCode(Runtime::kCompileLazy, function); GenerateTailCallToReturnedCode(Runtime::kCompileLazy, function);
@ -168,10 +192,14 @@ TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
auto function = Parameter<JSFunction>(Descriptor::kTarget); auto function = Parameter<JSFunction>(Descriptor::kTarget);
TNode<Code> code = HeapConstantNoHole(BUILTIN_CODE(isolate(), CompileLazy)); TNode<Code> code = HeapConstantNoHole(BUILTIN_CODE(isolate(), CompileLazy));
#ifndef V8_ENABLE_LEAPTIERING
// Set the code slot inside the JSFunction to CompileLazy. // Set the code slot inside the JSFunction to CompileLazy.
StoreCodePointerField(function, JSFunction::kCodeOffset, code); StoreCodePointerField(function, JSFunction::kCodeOffset, code);
#endif // V8_ENABLE_LEAPTIERING
GenerateTailCallToJSCode(code, function); GenerateTailCallToJSCode(code, function);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -14,6 +14,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
using compiler::ScopedExceptionHandler; using compiler::ScopedExceptionHandler;
class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler { class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
@ -623,5 +625,7 @@ TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) {
} }
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -11,6 +11,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// ES6 section 20.1 Number Objects // ES6 section 20.1 Number Objects
@ -118,7 +120,9 @@ DEF_BINOP_RHS_SMI(ShiftRightLogicalSmi_Baseline,
\ \
Return(result); \ Return(result); \
} }
#ifndef V8_ENABLE_EXPERIMENTAL_TSA_BUILTINS
DEF_UNOP(BitwiseNot_WithFeedback, Generate_BitwiseNotWithFeedback) DEF_UNOP(BitwiseNot_WithFeedback, Generate_BitwiseNotWithFeedback)
#endif
DEF_UNOP(Decrement_WithFeedback, Generate_DecrementWithFeedback) DEF_UNOP(Decrement_WithFeedback, Generate_DecrementWithFeedback)
DEF_UNOP(Increment_WithFeedback, Generate_IncrementWithFeedback) DEF_UNOP(Increment_WithFeedback, Generate_IncrementWithFeedback)
DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback) DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback)
@ -293,5 +297,7 @@ TF_BUILTIN(StrictEqual_Baseline, CodeStubAssembler) {
Return(result); Return(result);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -0,0 +1,47 @@
// Copyright 2024 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/number-builtins-reducer-inl.h"
#include "src/codegen/turboshaft-builtins-assembler-inl.h"
namespace v8::internal {
#include "src/compiler/turboshaft/define-assembler-macros.inc"
using namespace compiler::turboshaft; // NOLINT(build/namespaces)
class NumberBuiltinsAssemblerTS
: public TurboshaftBuiltinsAssembler<NumberBuiltinsReducer,
FeedbackCollectorReducer> {
public:
using Base = TurboshaftBuiltinsAssembler;
using Base::Asm;
using Base::Base;
};
#ifdef V8_ENABLE_EXPERIMENTAL_TSA_BUILTINS
TS_BUILTIN(BitwiseNot_WithFeedback, NumberBuiltinsAssemblerTS) {
// TODO(nicohartmann): It would be great to deduce the parameter type from the
// Descriptor directly.
V<Object> value = Parameter<Object>(Descriptor::kValue);
V<Context> context = Parameter<Context>(Descriptor::kContext);
V<FeedbackVector> feedback_vector =
Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
V<WordPtr> slot = Parameter<WordPtr>(Descriptor::kSlot);
SetFeedbackSlot(slot);
SetFeedbackVector(feedback_vector);
V<Object> result = BitwiseNot(context, value);
Return(result);
}
#endif // V8_ENABLE_EXPERIMENTAL_TSA_BUILTINS
#include "src/compiler/turboshaft/undef-assembler-macros.inc"
} // namespace v8::internal

View File

@ -24,6 +24,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
public: public:
explicit ObjectEntriesValuesBuiltinsAssembler( explicit ObjectEntriesValuesBuiltinsAssembler(
@ -430,8 +432,37 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
TNode<JSReceiver> from = ToObject_Inline(context, source); TNode<JSReceiver> from = ToObject_Inline(context, source);
TNode<Map> from_map = LoadMap(from); TNode<Map> from_map = LoadMap(from);
// For the fast case we want the source to be a JSObject.
GotoIfNot(IsJSObjectMap(from_map), &slow_path);
TNode<Map> to_map = LoadMap(to); TNode<Map> to_map = LoadMap(to);
// Chances that the fast cloning is possible is very low in case source
// and target maps belong to different native contexts (the only case
// it'd work is if the |from| object doesn't have enumerable properties)
// or if one of them is a remote JS object.
// TODO(olivf): Re-Evaluate this once we have a representation for "no
// enumerable properties" state in an Object.assign sidestep transition.
{
TNode<Map> to_meta_map = LoadMap(to_map);
GotoIfNot(TaggedEqual(LoadMap(from_map), to_meta_map), &slow_path);
// For the fast case we want the target to be a fresh empty object
// literal from current context.
// TODO(olivf): consider extending the fast path to a case when source
// and target objects are from the same context but not necessarily from
// current one.
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> empty_object_literal_map =
LoadObjectFunctionInitialMap(native_context);
GotoIfNot(TaggedEqual(to_map, empty_object_literal_map), &slow_path);
// Double-check that the meta map is not contextless.
CSA_DCHECK(this,
TaggedEqual(native_context,
LoadMapConstructorOrBackPointerOrNativeContext(
to_meta_map)));
}
// Chances are very slim that cloning is possible if we have different // Chances are very slim that cloning is possible if we have different
// instance sizes. // instance sizes.
// TODO(olivf): Re-Evaluate this once we have a faster target map lookup // TODO(olivf): Re-Evaluate this once we have a faster target map lookup
@ -461,14 +492,6 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
Word32And(target_field3, field3_descriptors_and_extensible_mask)), Word32And(target_field3, field3_descriptors_and_extensible_mask)),
&slow_path); &slow_path);
// For the fastcase we want the source to be a JSObject and the target a
// fresh empty object literal.
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> empty_object_literal_map =
LoadObjectFunctionInitialMap(native_context);
GotoIfNot(TaggedEqual(to_map, empty_object_literal_map), &slow_path);
GotoIfNot(IsJSObjectMap(from_map), &slow_path);
// Check that the source is in fastmode, not a prototype and not deprecated. // Check that the source is in fastmode, not a prototype and not deprecated.
TNode<Uint32T> source_field3 = LoadMapBitField3(from_map); TNode<Uint32T> source_field3 = LoadMapBitField3(from_map);
TNode<Uint32T> field3_exclusion_mask_const = TNode<Uint32T> field3_exclusion_mask_const =
@ -488,12 +511,19 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
GotoIfNot(TaggedEqual(LoadElements(CAST(to)), EmptyFixedArrayConstant()), GotoIfNot(TaggedEqual(LoadElements(CAST(to)), EmptyFixedArrayConstant()),
&slow_path); &slow_path);
// Ensure the properties field is not used to store a hash.
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(to);
GotoIf(TaggedIsSmi(properties), &slow_path);
CSA_DCHECK(this,
Word32Or(TaggedEqual(properties, EmptyFixedArrayConstant()),
IsPropertyArray(CAST(properties))));
Label continue_fast_path(this), runtime_map_lookup(this, Label::kDeferred); Label continue_fast_path(this), runtime_map_lookup(this, Label::kDeferred);
// Check if our particular source->target combination is fast clonable. // Check if our particular source->target combination is fast clonable.
// E.g., this ensures that we only have fast properties and in general that // E.g., this ensures that we only have fast properties and in general that
// the binary layout is compatible for `FastCloneJSObject`. // the binary layout is compatible for `FastCloneJSObject`.
// If suche a clone map exists then it can be found in the transition array // If such a clone map exists then it can be found in the transition array
// with object_assign_clone_transition_symbol as a key. If this transition // with object_assign_clone_transition_symbol as a key. If this transition
// slot is cleared, then the map is not clonable. If the key is missing // slot is cleared, then the map is not clonable. If the key is missing
// from the transitions we rely on the runtime function // from the transitions we rely on the runtime function
@ -1712,5 +1742,8 @@ TNode<HeapObject> ObjectBuiltinsAssembler::GetAccessorOrUndefined(
BIND(&return_result); BIND(&return_result);
return result.value(); return result.value();
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -17,6 +17,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy( TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
TNode<Context> context, TNode<JSReceiver> target, TNode<Context> context, TNode<JSReceiver> target,
TNode<JSReceiver> handler) { TNode<JSReceiver> handler) {
@ -75,11 +77,10 @@ TNode<Context> ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
TNode<JSFunction> ProxiesCodeStubAssembler::AllocateProxyRevokeFunction( TNode<JSFunction> ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
TNode<Context> context, TNode<JSProxy> proxy) { TNode<Context> context, TNode<JSProxy> proxy) {
const TNode<NativeContext> native_context = LoadNativeContext(context); const TNode<NativeContext> native_context = LoadNativeContext(context);
const TNode<Context> proxy_context = const TNode<Context> proxy_context =
CreateProxyRevokeFunctionContext(proxy, native_context); CreateProxyRevokeFunctionContext(proxy, native_context);
return AllocateRootFunctionWithContext(RootIndex::kProxyRevokeSharedFun, return AllocateRootFunctionWithContext(RootIndex::kProxyRevokeSharedFun,
proxy_context); proxy_context, native_context);
} }
TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) { TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
@ -427,5 +428,7 @@ void ProxiesCodeStubAssembler::CheckDeleteTrapResult(TNode<Context> context,
BIND(&check_passed); BIND(&check_passed);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -24,6 +24,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
// Tail calls the regular expression interpreter. // Tail calls the regular expression interpreter.
// static // static
void Builtins::Generate_RegExpInterpreterTrampoline(MacroAssembler* masm) { void Builtins::Generate_RegExpInterpreterTrampoline(MacroAssembler* masm) {
@ -1702,5 +1704,7 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
return var_result.value(); return var_result.value();
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -12,6 +12,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
class ShadowRealmBuiltinsAssembler : public CodeStubAssembler { class ShadowRealmBuiltinsAssembler : public CodeStubAssembler {
public: public:
explicit ShadowRealmBuiltinsAssembler(compiler::CodeAssemblerState* state) explicit ShadowRealmBuiltinsAssembler(compiler::CodeAssemblerState* state)
@ -82,7 +84,8 @@ ShadowRealmBuiltinsAssembler::AllocateImportValueFulfilledFunction(
CreateImportValueFulfilledFunctionContext(caller_context, eval_context, CreateImportValueFulfilledFunctionContext(caller_context, eval_context,
specifier, export_name); specifier, export_name);
return AllocateRootFunctionWithContext( return AllocateRootFunctionWithContext(
RootIndex::kShadowRealmImportValueFulfilledSharedFun, function_context); RootIndex::kShadowRealmImportValueFulfilledSharedFun, function_context,
{});
} }
void ShadowRealmBuiltinsAssembler::CheckAccessor(TNode<DescriptorArray> array, void ShadowRealmBuiltinsAssembler::CheckAccessor(TNode<DescriptorArray> array,
@ -423,5 +426,7 @@ TF_BUILTIN(ShadowRealmImportValueRejected, ShadowRealmBuiltinsAssembler) {
exception); exception);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -10,6 +10,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
public: public:
explicit SharedArrayBufferBuiltinsAssembler( explicit SharedArrayBufferBuiltinsAssembler(
@ -817,5 +819,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name); ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -20,6 +20,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
TNode<RawPtrT> StringBuiltinsAssembler::DirectStringData( TNode<RawPtrT> StringBuiltinsAssembler::DirectStringData(
TNode<String> string, TNode<Word32T> string_instance_type) { TNode<String> string, TNode<Word32T> string_instance_type) {
// Compute the effective offset of the first character. // Compute the effective offset of the first character.
@ -2099,5 +2101,7 @@ TNode<String> StringBuiltinsAssembler::SubString(TNode<String> string,
return var_result.value(); return var_result.value();
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -137,7 +137,8 @@ class StringBuiltinsReducer : public Next {
}; };
class StringBuiltinsAssemblerTS class StringBuiltinsAssemblerTS
: public TurboshaftBuiltinsAssembler<StringBuiltinsReducer> { : public TurboshaftBuiltinsAssembler<StringBuiltinsReducer,
NoFeedbackCollectorReducer> {
public: public:
using Base = TurboshaftBuiltinsAssembler; using Base = TurboshaftBuiltinsAssembler;

View File

@ -13,6 +13,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
class TemporalBuiltinsAssembler : public IteratorBuiltinsAssembler { class TemporalBuiltinsAssembler : public IteratorBuiltinsAssembler {
public: public:
explicit TemporalBuiltinsAssembler(compiler::CodeAssemblerState* state) explicit TemporalBuiltinsAssembler(compiler::CodeAssemblerState* state)
@ -218,5 +220,7 @@ TF_BUILTIN(TemporalCalendarPrototypeFields, TemporalBuiltinsAssembler) {
Return(CalendarFieldsArrayFromIterable(context, calendar, iterable)); Return(CalendarFieldsArrayFromIterable(context, calendar, iterable));
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -17,6 +17,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#include "src/codegen/define-code-stub-assembler-macros.inc"
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects // ES6 section 22.2 TypedArray Objects
@ -655,5 +657,8 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
BIND(&return_undefined); BIND(&return_undefined);
Return(UndefinedConstant()); Return(UndefinedConstant());
} }
#include "src/codegen/undef-code-stub-assembler-macros.inc"
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -58,26 +58,33 @@ class CodeAssemblerState;
} \ } \
void Name##Assembler::Generate##Name##Impl() void Name##Assembler::Generate##Name##Impl()
#define TS_BUILTIN(Name, BaseAssembler) \ #define TS_BUILTIN(Name, BaseAssembler) \
class Name##Assembler : public BaseAssembler { \ class Name##Assembler : public BaseAssembler { \
public: \ public: \
using Descriptor = Builtin_##Name##_InterfaceDescriptor; \ using Descriptor = Builtin_##Name##_InterfaceDescriptor; \
Name##Assembler(compiler::turboshaft::PipelineData* data, \ Name##Assembler(compiler::turboshaft::PipelineData* data, \
Isolate* isolate, compiler::turboshaft::Graph& graph, \ Isolate* isolate, compiler::turboshaft::Graph& graph, \
Zone* phase_zone) \ Zone* phase_zone) \
: BaseAssembler(data, graph, phase_zone) {} \ : BaseAssembler(data, graph, phase_zone) {} \
void Generate##Name##Impl(); \ void Generate##Name##Impl(); \
using BaseAssembler::Asm; \ }; \
}; \ void Builtins::Generate_##Name( \
void Builtins::Generate_##Name( \ compiler::turboshaft::PipelineData* data, Isolate* isolate, \
compiler::turboshaft::PipelineData* data, Isolate* isolate, \ compiler::turboshaft::Graph& graph, Zone* phase_zone) { \
compiler::turboshaft::Graph& graph, Zone* phase_zone) { \ Name##Assembler assembler(data, isolate, graph, phase_zone); \
Name##Assembler assembler(data, isolate, graph, phase_zone); \ assembler.EmitBuiltinProlog(Builtin::k##Name); \
assembler.EmitBuiltinProlog(Builtin::k##Name); \ Block* catch_block = nullptr; \
assembler.Generate##Name##Impl(); \ std::optional<Name##Assembler::CatchScope> catch_scope; \
/* Builtin definition must generate something! */ \ /* If this builtin collects feedback, we need to setup a catch block */ \
DCHECK_GT(graph.op_id_count(), 0); \ if (assembler.HasFeedbackCollector()) { \
} \ catch_block = assembler.NewBlock(); \
catch_scope.emplace(assembler, catch_block); \
} \
assembler.Generate##Name##Impl(); \
/* Builtin definition must generate something! */ \
DCHECK_GT(graph.op_id_count(), 0); \
assembler.EmitEpilog(catch_block); \
} \
void Name##Assembler::Generate##Name##Impl() void Name##Assembler::Generate##Name##Impl()
} // namespace internal } // namespace internal

Some files were not shown because too many files have changed in this diff Show More