mirror of
https://github.com/zebrajr/node.git
synced 2025-12-06 00:20:08 +01:00
deps: update V8 to 14.2.231.9
PR-URL: https://github.com/nodejs/node/pull/60111 Reviewed-By: Richard Lau <richard.lau@ibm.com> Reviewed-By: Joyee Cheung <joyeec9h3@gmail.com> Reviewed-By: Yagiz Nizipli <yagiz@nizipli.com>
This commit is contained in:
parent
520d8277a8
commit
c2843b722c
9
deps/v8/.ycm_extra_conf.py
vendored
9
deps/v8/.ycm_extra_conf.py
vendored
|
|
@ -149,9 +149,14 @@ def GetClangCommandFromNinjaForFilename(v8_root, filename):
|
|||
v8_flags.append(MakeIncludePathAbsolute(flag, "-I", out_dir))
|
||||
elif flag.startswith('-isystem'):
|
||||
v8_flags.append(MakeIncludePathAbsolute(flag, "-isystem", out_dir))
|
||||
elif flag.startswith('-std') or flag.startswith(
|
||||
'-pthread') or flag.startswith('-no'):
|
||||
elif any([flag.startswith(p) for p in ['-std', '-pthread', '-no']]):
|
||||
v8_flags.append(flag)
|
||||
elif any([
|
||||
flag.startswith(p) for p in ['-fmodule-map-file=', '-fmodule-file=']
|
||||
]) or flag == '-fbuiltin-module-map':
|
||||
# Modules don't play well together with clang/clangd, see
|
||||
# https://crrev.com/c/6887510.
|
||||
continue
|
||||
elif flag.startswith('-') and flag[1] in 'DWFfmgOX':
|
||||
v8_flags.append(flag)
|
||||
return v8_flags
|
||||
|
|
|
|||
2
deps/v8/AUTHORS
vendored
2
deps/v8/AUTHORS
vendored
|
|
@ -191,6 +191,7 @@ Keith Smiley <keithbsmiley@gmail.com>
|
|||
Kevin Gibbons <bakkot@gmail.com>
|
||||
Keyhan Vakil <kvakil@googlecontrib.kvakil.me>
|
||||
Kris Selden <kris.selden@gmail.com>
|
||||
Krishna Ravishankar <krishna.ravi732@gmail.com>
|
||||
Kyounga Ra <kyounga@alticast.com>
|
||||
Levi Zim <rsworktech@outlook.com>
|
||||
LN Liberda <lauren@selfisekai.rocks>
|
||||
|
|
@ -295,6 +296,7 @@ Tianping Yang <yangtianping@oppo.com>
|
|||
Timo Teräs <timo.teras@iki.fi>
|
||||
Tobias Burnus <burnus@net-b.de>
|
||||
Tobias Nießen <tniessen@tnie.de>
|
||||
Tomasz Malinowski <tomasz.crowsoftware@gmail.com>
|
||||
Ujjwal Sharma <usharma1998@gmail.com>
|
||||
Vadim Gorbachev <bmsdave@gmail.com>
|
||||
Varun Varada <varuncvarada@gmail.com>
|
||||
|
|
|
|||
26
deps/v8/BUILD.bazel
vendored
26
deps/v8/BUILD.bazel
vendored
|
|
@ -577,6 +577,7 @@ v8_config(
|
|||
}) + select({
|
||||
":enable_pointer_compression_shared_cage": [
|
||||
"V8_COMPRESS_POINTERS_IN_SHARED_CAGE",
|
||||
"V8_CONTIGUOUS_COMPRESSED_RO_SPACE_SIZE_MB=8",
|
||||
],
|
||||
":enable_pointer_compression_multiple_cages": [
|
||||
"V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES",
|
||||
|
|
@ -1461,8 +1462,8 @@ filegroup(
|
|||
"src/codegen/compilation-cache.h",
|
||||
"src/codegen/compiler.cc",
|
||||
"src/codegen/compiler.h",
|
||||
"src/codegen/constant-pool.cc",
|
||||
"src/codegen/constant-pool.h",
|
||||
"src/codegen/constant-pool-entry.h",
|
||||
"src/codegen/constants-arch.h",
|
||||
"src/codegen/cpu-features.h",
|
||||
"src/codegen/external-reference.cc",
|
||||
|
|
@ -1671,6 +1672,8 @@ filegroup(
|
|||
"src/flags/flags-impl.h",
|
||||
"src/flags/flags.cc",
|
||||
"src/flags/flags.h",
|
||||
"src/flags/save-flags.h",
|
||||
"src/flags/save-flags.cc",
|
||||
"src/handles/global-handles.cc",
|
||||
"src/handles/global-handles.h",
|
||||
"src/handles/global-handles-inl.h",
|
||||
|
|
@ -1876,7 +1879,6 @@ filegroup(
|
|||
"src/heap/safepoint.h",
|
||||
"src/heap/scavenger.cc",
|
||||
"src/heap/scavenger.h",
|
||||
"src/heap/scavenger-inl.h",
|
||||
"src/heap/slot-set.cc",
|
||||
"src/heap/slot-set.h",
|
||||
"src/heap/spaces.cc",
|
||||
|
|
@ -2731,6 +2733,8 @@ filegroup(
|
|||
"src/codegen/arm64/assembler-arm64.cc",
|
||||
"src/codegen/arm64/assembler-arm64.h",
|
||||
"src/codegen/arm64/assembler-arm64-inl.h",
|
||||
"src/codegen/arm64/constant-pool-arm64.cc",
|
||||
"src/codegen/arm64/constant-pool-arm64.h",
|
||||
"src/codegen/arm64/constants-arm64.h",
|
||||
"src/codegen/arm64/cpu-arm64.cc",
|
||||
"src/codegen/arm64/decoder-arm64.cc",
|
||||
|
|
@ -2797,6 +2801,8 @@ filegroup(
|
|||
"src/codegen/riscv/base-assembler-riscv.h",
|
||||
"src/codegen/riscv/base-constants-riscv.h",
|
||||
"src/codegen/riscv/base-riscv-i.h",
|
||||
"src/codegen/riscv/constant-pool-riscv.cc",
|
||||
"src/codegen/riscv/constant-pool-riscv.h",
|
||||
"src/codegen/riscv/constant-riscv-a.h",
|
||||
"src/codegen/riscv/constant-riscv-b.h",
|
||||
"src/codegen/riscv/constant-riscv-c.h",
|
||||
|
|
@ -2817,6 +2823,8 @@ filegroup(
|
|||
"src/codegen/riscv/extension-riscv-v.h",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.h",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.h",
|
||||
"src/codegen/riscv/extension-riscv-zimop.cc",
|
||||
"src/codegen/riscv/extension-riscv-zimop.h",
|
||||
"src/codegen/riscv/interface-descriptors-riscv-inl.h",
|
||||
"src/codegen/riscv/macro-assembler-riscv.h",
|
||||
"src/codegen/riscv/register-riscv.h",
|
||||
|
|
@ -2839,6 +2847,8 @@ filegroup(
|
|||
"src/codegen/ppc/assembler-ppc.cc",
|
||||
"src/codegen/ppc/assembler-ppc.h",
|
||||
"src/codegen/ppc/assembler-ppc-inl.h",
|
||||
"src/codegen/ppc/constant-pool-ppc.cc",
|
||||
"src/codegen/ppc/constant-pool-ppc.h",
|
||||
"src/codegen/ppc/constants-ppc.cc",
|
||||
"src/codegen/ppc/constants-ppc.h",
|
||||
"src/codegen/ppc/cpu-ppc.cc",
|
||||
|
|
@ -2907,6 +2917,7 @@ filegroup(
|
|||
"src/maglev/maglev-interpreter-frame-state.h",
|
||||
"src/maglev/maglev-ir-inl.h",
|
||||
"src/maglev/maglev-ir.h",
|
||||
"src/maglev/maglev-kna-processor.h",
|
||||
"src/maglev/maglev-phi-representation-selector.h",
|
||||
"src/maglev/maglev-truncation.h",
|
||||
"src/maglev/maglev-pipeline-statistics.h",
|
||||
|
|
@ -2926,6 +2937,8 @@ filegroup(
|
|||
"src/maglev/maglev-compiler.cc",
|
||||
"src/maglev/maglev-concurrent-dispatcher.cc",
|
||||
"src/maglev/maglev-graph-builder.cc",
|
||||
"src/maglev/maglev-known-node-aspects.cc",
|
||||
"src/maglev/maglev-known-node-aspects.h",
|
||||
"src/maglev/maglev-graph-labeller.cc",
|
||||
"src/maglev/maglev-graph-optimizer.cc",
|
||||
"src/maglev/maglev-graph-printer.cc",
|
||||
|
|
@ -3029,7 +3042,6 @@ filegroup(
|
|||
"src/wasm/leb-helper.h",
|
||||
"src/wasm/local-decl-encoder.cc",
|
||||
"src/wasm/local-decl-encoder.h",
|
||||
"src/wasm/memory-tracing.h",
|
||||
"src/wasm/module-compiler.cc",
|
||||
"src/wasm/module-compiler.h",
|
||||
"src/wasm/module-decoder.cc",
|
||||
|
|
@ -3107,6 +3119,8 @@ filegroup(
|
|||
"src/wasm/wasm-subtyping.cc",
|
||||
"src/wasm/wasm-subtyping.h",
|
||||
"src/wasm/wasm-tier.h",
|
||||
"src/wasm/wasm-tracing.cc",
|
||||
"src/wasm/wasm-tracing.h",
|
||||
"src/wasm/wasm-value.h",
|
||||
"src/wasm/well-known-imports.cc",
|
||||
"src/wasm/well-known-imports.h",
|
||||
|
|
@ -3525,6 +3539,7 @@ filegroup(
|
|||
"src/compiler/turboshaft/select-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/sidetable.cc",
|
||||
"src/compiler/turboshaft/sidetable.h",
|
||||
"src/compiler/turboshaft/simplified-optimization-reducer.h",
|
||||
"src/compiler/turboshaft/simplify-tf-loops.cc",
|
||||
"src/compiler/turboshaft/simplify-tf-loops.h",
|
||||
"src/compiler/turboshaft/snapshot-table.h",
|
||||
|
|
@ -3658,6 +3673,8 @@ filegroup(
|
|||
"src/compiler/turboshaft/wasm-shuffle-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-simd-phase.cc",
|
||||
"src/compiler/turboshaft/wasm-simd-phase.h",
|
||||
"src/compiler/turboshaft/wasm-type-cast-rtt-optimization-helpers.cc",
|
||||
"src/compiler/turboshaft/wasm-type-cast-rtt-optimization-helpers.h",
|
||||
"src/compiler/wasm-address-reassociation.cc",
|
||||
"src/compiler/wasm-address-reassociation.h",
|
||||
"src/compiler/wasm-call-descriptors.cc",
|
||||
|
|
@ -3695,6 +3712,8 @@ filegroup(
|
|||
"src/maglev/maglev-compilation-info.h",
|
||||
"src/maglev/maglev-compilation-unit.cc",
|
||||
"src/maglev/maglev-compilation-unit.h",
|
||||
"src/maglev/maglev-known-node-aspects.cc",
|
||||
"src/maglev/maglev-known-node-aspects.h",
|
||||
"src/maglev/maglev-graph-builder.cc",
|
||||
"src/maglev/maglev-graph-builder.h",
|
||||
"src/maglev/maglev-graph-optimizer.cc",
|
||||
|
|
@ -3714,6 +3733,7 @@ filegroup(
|
|||
"src/maglev/maglev-ir.cc",
|
||||
"src/maglev/maglev-ir.h",
|
||||
"src/maglev/maglev-ir-inl.h",
|
||||
"src/maglev/maglev-kna-processor.h",
|
||||
"src/maglev/maglev-reducer-inl.h",
|
||||
"src/maglev/maglev-reducer.h",
|
||||
"src/maglev/maglev-register-frame-array.h",
|
||||
|
|
|
|||
189
deps/v8/BUILD.gn
vendored
189
deps/v8/BUILD.gn
vendored
|
|
@ -353,9 +353,6 @@ declare_args() {
|
|||
# Sets -DV8_ENABLE_SANDBOX_HARDWARE_SUPPORT
|
||||
v8_enable_sandbox_hardware_support = false
|
||||
|
||||
# Enable leaptiering
|
||||
v8_enable_leaptiering = true
|
||||
|
||||
# Enable the memory corruption API. Useful for testing the sandbox.
|
||||
# The memory corruption API is only exposed to JavaScript if sandbox testing
|
||||
# mode is enabled at runtime, for example via --sandbox-fuzzing.
|
||||
|
|
@ -460,8 +457,8 @@ declare_args() {
|
|||
# Use the experimental TSA-based definition for some builtins.
|
||||
v8_enable_experimental_tsa_builtins = false
|
||||
|
||||
# Use the experimental encoding of undefined in double values.
|
||||
v8_enable_experimental_undefined_double = false
|
||||
# Use the encoding of undefined in double values.
|
||||
v8_enable_undefined_double = false
|
||||
|
||||
v8_dcheck_always_on = dcheck_always_on
|
||||
|
||||
|
|
@ -479,13 +476,7 @@ declare_args() {
|
|||
# closely mimic in-browser behavior.
|
||||
#
|
||||
# Currently used in d8.
|
||||
# TODO(399144247,400141120,392817524): Temporarily disabled again because of
|
||||
# performance regressions in multi-threaded, Zone-allocation heavy workloads,
|
||||
# e.g., JetStream startup or Wasm eager compilation.
|
||||
v8_enable_partition_alloc =
|
||||
false && !build_with_v8_embedder &&
|
||||
!(defined(build_with_node) && build_with_node) &&
|
||||
(v8_target_cpu != "ppc64" && v8_target_cpu != "s390x")
|
||||
v8_enable_partition_alloc = ""
|
||||
|
||||
# V8 uses Chrome's blink-gc clang plugin for checking V8's internal cppgc
|
||||
# usages. The plugin is not exhaustive but covers most common known pitfalls.
|
||||
|
|
@ -510,7 +501,8 @@ if (v8_enable_verify_heap == "") {
|
|||
}
|
||||
if (v8_enable_verify_write_barriers == "") {
|
||||
v8_enable_verify_write_barriers =
|
||||
v8_enable_debugging_features && !v8_disable_write_barriers
|
||||
(v8_enable_debugging_features || v8_dcheck_always_on) &&
|
||||
!v8_disable_write_barriers
|
||||
}
|
||||
if (v8_enable_object_print == "") {
|
||||
v8_enable_object_print = v8_enable_debugging_features
|
||||
|
|
@ -568,8 +560,7 @@ if (v8_multi_arch_build &&
|
|||
rebase_path(get_label_info(":d8", "root_out_dir"), root_build_dir) ==
|
||||
"clang_x64_fuzzer_experiments") {
|
||||
v8_enable_pointer_compression = !v8_enable_pointer_compression
|
||||
v8_enable_experimental_undefined_double =
|
||||
!v8_enable_experimental_undefined_double
|
||||
v8_enable_undefined_double = !v8_enable_undefined_double
|
||||
v8_lower_limits_mode = !v8_lower_limits_mode
|
||||
}
|
||||
|
||||
|
|
@ -615,11 +606,22 @@ if (v8_enable_maglev == "") {
|
|||
v8_enable_maglev = v8_enable_turbofan &&
|
||||
(v8_current_cpu == "arm" || v8_current_cpu == "x64" ||
|
||||
v8_current_cpu == "arm64" || v8_current_cpu == "s390x" ||
|
||||
v8_current_cpu == "riscv64")
|
||||
v8_current_cpu == "ppc64" || v8_current_cpu == "riscv64")
|
||||
}
|
||||
assert(v8_enable_turbofan || !v8_enable_maglev,
|
||||
"Maglev is not available when Turbofan is disabled.")
|
||||
|
||||
if (v8_enable_partition_alloc == "") {
|
||||
v8_enable_partition_alloc =
|
||||
!build_with_v8_embedder &&
|
||||
!(defined(build_with_node) && build_with_node) &&
|
||||
(v8_target_cpu != "ppc64" && v8_target_cpu != "s390x") &&
|
||||
v8_enable_pointer_compression_shared_cage
|
||||
}
|
||||
|
||||
assert(!v8_enable_partition_alloc || v8_enable_pointer_compression_shared_cage,
|
||||
"partition_alloc is not available for multi-cage mode.")
|
||||
|
||||
is_wasm_interpreter_mode_enabled =
|
||||
!v8_enable_sparkplug && !v8_enable_maglev && !v8_enable_turbofan &&
|
||||
v8_enable_webassembly && v8_enable_drumbrake
|
||||
|
|
@ -760,10 +762,6 @@ assert(!v8_enable_pointer_compression_8gb || v8_enable_pointer_compression,
|
|||
assert(!v8_enable_sandbox || v8_enable_external_code_space,
|
||||
"The sandbox requires the external code space")
|
||||
|
||||
assert(
|
||||
v8_enable_leaptiering,
|
||||
"non-leaptiering is deprecated (see https://groups.google.com/g/v8-dev/c/PYk2_GGP2Wk)")
|
||||
|
||||
assert(!v8_enable_memory_corruption_api || v8_enable_sandbox,
|
||||
"The Memory Corruption API requires the sandbox")
|
||||
|
||||
|
|
@ -1174,9 +1172,16 @@ config("features") {
|
|||
defines += [ "V8_TLS_USED_IN_LIBRARY" ]
|
||||
}
|
||||
|
||||
if (v8_enable_pointer_compression &&
|
||||
!v8_enable_pointer_compression_shared_cage) {
|
||||
defines += [ "V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES" ]
|
||||
if (v8_enable_pointer_compression) {
|
||||
if (v8_enable_pointer_compression_shared_cage) {
|
||||
defines += [
|
||||
# TODO(442942399): Re-enable after bug has been addressed.
|
||||
# "V8_CONTIGUOUS_COMPRESSED_RO_SPACE",
|
||||
"V8_CONTIGUOUS_COMPRESSED_RO_SPACE_SIZE_MB=${v8_contiguous_compressed_ro_space_size_mb}",
|
||||
]
|
||||
} else {
|
||||
defines += [ "V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES" ]
|
||||
}
|
||||
}
|
||||
|
||||
if (v8_embedder_string != "") {
|
||||
|
|
@ -1450,12 +1455,10 @@ config("features") {
|
|||
if (v8_enable_experimental_tsa_builtins) {
|
||||
defines += [ "V8_ENABLE_EXPERIMENTAL_TSA_BUILTINS" ]
|
||||
}
|
||||
if (v8_enable_experimental_undefined_double) {
|
||||
defines += [ "V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE" ]
|
||||
}
|
||||
if (v8_enable_leaptiering) {
|
||||
defines += [ "V8_ENABLE_LEAPTIERING" ]
|
||||
if (v8_enable_undefined_double) {
|
||||
defines += [ "V8_ENABLE_UNDEFINED_DOUBLE" ]
|
||||
}
|
||||
defines += [ "V8_ENABLE_LEAPTIERING" ]
|
||||
if (v8_enable_partition_alloc) {
|
||||
defines += [ "V8_ENABLE_PARTITION_ALLOC" ]
|
||||
}
|
||||
|
|
@ -1793,6 +1796,32 @@ config("always_turbofanimize") {
|
|||
}
|
||||
}
|
||||
|
||||
# Sanitizer defines. V8 will inherit a default `-fsanitize=array-bounds`
|
||||
# from Chromium's `//build/config/`, which prevents clean usage of
|
||||
# `__has_feature(undefined_behavior_sanitizer)` in the short term,
|
||||
# until something like `--lie-about-ubsan-enablement=array-bounds`
|
||||
# can be implemented.
|
||||
#
|
||||
# This config provides a clear signal of "are we sanitizing" tied to
|
||||
# GN configuration.
|
||||
#
|
||||
# See also: https://crbug.com/386992829
|
||||
config("sanitizer_defines") {
|
||||
defines = []
|
||||
if (is_asan) {
|
||||
defines += [ "V8_USE_ADDRESS_SANITIZER" ]
|
||||
}
|
||||
if (is_hwasan) {
|
||||
defines += [ "V8_USE_HWADDRESS_SANITIZER" ]
|
||||
}
|
||||
if (is_msan) {
|
||||
defines += [ "V8_USE_MEMORY_SANITIZER" ]
|
||||
}
|
||||
if (is_ubsan) {
|
||||
defines += [ "V8_USE_UNDEFINED_BEHAVIOR_SANITIZER" ]
|
||||
}
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# Actions
|
||||
#
|
||||
|
|
@ -2891,7 +2920,6 @@ action("v8_dump_build_config") {
|
|||
"verify_predictable=$v8_enable_verify_predictable",
|
||||
"wasm_random_fuzzers=$v8_wasm_random_fuzzers",
|
||||
"memory_corruption_api=$v8_enable_memory_corruption_api",
|
||||
"leaptiering=$v8_enable_leaptiering",
|
||||
"lower_limits_mode=$v8_lower_limits_mode",
|
||||
|
||||
# Please add new switches also in `build_config_content` in `bazel/defs.bzl`
|
||||
|
|
@ -3373,6 +3401,23 @@ v8_header_set("v8_flags") {
|
|||
]
|
||||
}
|
||||
|
||||
if (v8_enable_temporal_support) {
|
||||
# In cases where we aren't using ICU4C (e.g. v8_enable_i18n_support=false),
|
||||
# we "bake" in a zoneinfo64.res binary for Temporal
|
||||
action("make_temporal_zoneinfo_cpp") {
|
||||
script = "tools/include-file-as-bytes.py"
|
||||
inputs = [ get_label_info("//third_party/icu/", "dir") +
|
||||
"/tzres/zoneinfo64.res" ]
|
||||
outputs =
|
||||
[ "$target_gen_dir/src/builtins/builtins-temporal-zoneinfo64-data.cc" ]
|
||||
args = [
|
||||
rebase_path(inputs[0], root_build_dir),
|
||||
rebase_path(outputs[0], root_build_dir),
|
||||
"zoneinfo64_static_data",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
v8_header_set("v8_internal_headers") {
|
||||
configs = [ ":internal_config" ]
|
||||
|
||||
|
|
@ -3431,6 +3476,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/codegen/code-reference.h",
|
||||
"src/codegen/compilation-cache.h",
|
||||
"src/codegen/compiler.h",
|
||||
"src/codegen/constant-pool-entry.h",
|
||||
"src/codegen/constant-pool.h",
|
||||
"src/codegen/constants-arch.h",
|
||||
"src/codegen/cpu-features.h",
|
||||
|
|
@ -3673,6 +3719,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/compiler/turboshaft/runtime-call-descriptors.h",
|
||||
"src/compiler/turboshaft/select-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/sidetable.h",
|
||||
"src/compiler/turboshaft/simplified-optimization-reducer.h",
|
||||
"src/compiler/turboshaft/simplify-tf-loops.h",
|
||||
"src/compiler/turboshaft/snapshot-table-opindex.h",
|
||||
"src/compiler/turboshaft/snapshot-table.h",
|
||||
|
|
@ -3780,6 +3827,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/extensions/ignition-statistics-extension.h",
|
||||
"src/extensions/statistics-extension.h",
|
||||
"src/extensions/trigger-failure-extension.h",
|
||||
"src/flags/save-flags.h",
|
||||
"src/handles/global-handles-inl.h",
|
||||
"src/handles/global-handles.h",
|
||||
"src/handles/handles-inl.h",
|
||||
|
|
@ -3910,7 +3958,6 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/heap/remembered-set-inl.h",
|
||||
"src/heap/remembered-set.h",
|
||||
"src/heap/safepoint.h",
|
||||
"src/heap/scavenger-inl.h",
|
||||
"src/heap/scavenger.h",
|
||||
"src/heap/slot-set.h",
|
||||
"src/heap/spaces-inl.h",
|
||||
|
|
@ -4414,6 +4461,7 @@ v8_header_set("v8_internal_headers") {
|
|||
sources += [
|
||||
"src/objects/js-temporal-objects-inl.h",
|
||||
"src/objects/js-temporal-objects.h",
|
||||
"src/objects/js-temporal-zoneinfo64.h",
|
||||
]
|
||||
}
|
||||
|
||||
|
|
@ -4460,6 +4508,8 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/maglev/maglev-interpreter-frame-state.h",
|
||||
"src/maglev/maglev-ir-inl.h",
|
||||
"src/maglev/maglev-ir.h",
|
||||
"src/maglev/maglev-kna-processor.h",
|
||||
"src/maglev/maglev-known-node-aspects.h",
|
||||
"src/maglev/maglev-phi-representation-selector.h",
|
||||
"src/maglev/maglev-pipeline-statistics.h",
|
||||
"src/maglev/maglev-post-hoc-optimizations-processors.h",
|
||||
|
|
@ -4483,7 +4533,7 @@ v8_header_set("v8_internal_headers") {
|
|||
} else if (v8_current_cpu == "s390x") {
|
||||
sources += [ "src/maglev/s390/maglev-assembler-s390-inl.h" ]
|
||||
} else if (v8_current_cpu == "ppc64") {
|
||||
sources += [ "src/maglev/ppc64/maglev-assembler-ppc64-inl.h" ]
|
||||
sources += [ "src/maglev/ppc/maglev-assembler-ppc-inl.h" ]
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4512,6 +4562,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/compiler/turboshaft/wasm-shuffle-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-simd-phase.h",
|
||||
"src/compiler/turboshaft/wasm-turboshaft-compiler.h",
|
||||
"src/compiler/turboshaft/wasm-type-cast-rtt-optimization-helpers.h",
|
||||
"src/compiler/wasm-address-reassociation.h",
|
||||
"src/compiler/wasm-call-descriptors.h",
|
||||
"src/compiler/wasm-compiler-definitions.h",
|
||||
|
|
@ -4552,7 +4603,6 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/wasm/jump-table-assembler.h",
|
||||
"src/wasm/leb-helper.h",
|
||||
"src/wasm/local-decl-encoder.h",
|
||||
"src/wasm/memory-tracing.h",
|
||||
"src/wasm/module-compiler.h",
|
||||
"src/wasm/module-decoder-impl.h",
|
||||
"src/wasm/module-decoder.h",
|
||||
|
|
@ -4601,6 +4651,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/wasm/wasm-serialization.h",
|
||||
"src/wasm/wasm-subtyping.h",
|
||||
"src/wasm/wasm-tier.h",
|
||||
"src/wasm/wasm-tracing.h",
|
||||
"src/wasm/wasm-value.h",
|
||||
"src/wasm/well-known-imports.h",
|
||||
"src/wasm/wrappers-inl.h",
|
||||
|
|
@ -4777,6 +4828,7 @@ v8_header_set("v8_internal_headers") {
|
|||
### gcmole(arm64) ###
|
||||
"src/codegen/arm64/assembler-arm64-inl.h",
|
||||
"src/codegen/arm64/assembler-arm64.h",
|
||||
"src/codegen/arm64/constant-pool-arm64.h",
|
||||
"src/codegen/arm64/constants-arm64.h",
|
||||
"src/codegen/arm64/decoder-arm64-inl.h",
|
||||
"src/codegen/arm64/decoder-arm64.h",
|
||||
|
|
@ -4880,6 +4932,7 @@ v8_header_set("v8_internal_headers") {
|
|||
### gcmole(ppc64) ###
|
||||
"src/codegen/ppc/assembler-ppc-inl.h",
|
||||
"src/codegen/ppc/assembler-ppc.h",
|
||||
"src/codegen/ppc/constant-pool-ppc.h",
|
||||
"src/codegen/ppc/constants-ppc.h",
|
||||
"src/codegen/ppc/interface-descriptors-ppc-inl.h",
|
||||
"src/codegen/ppc/macro-assembler-ppc.h",
|
||||
|
|
@ -4931,6 +4984,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/codegen/riscv/base-assembler-riscv.h",
|
||||
"src/codegen/riscv/base-constants-riscv.h",
|
||||
"src/codegen/riscv/base-riscv-i.h",
|
||||
"src/codegen/riscv/constant-pool-riscv.h",
|
||||
"src/codegen/riscv/constant-riscv-a.h",
|
||||
"src/codegen/riscv/constant-riscv-b.h",
|
||||
"src/codegen/riscv/constant-riscv-c.h",
|
||||
|
|
@ -4953,6 +5007,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/codegen/riscv/extension-riscv-zicond.h",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.h",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.h",
|
||||
"src/codegen/riscv/extension-riscv-zimop.h",
|
||||
"src/codegen/riscv/interface-descriptors-riscv-inl.h",
|
||||
"src/codegen/riscv/macro-assembler-riscv.h",
|
||||
"src/codegen/riscv/register-riscv.h",
|
||||
|
|
@ -5009,6 +5064,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/codegen/riscv/extension-riscv-v.h",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.h",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.h",
|
||||
"src/codegen/riscv/extension-riscv-zimop.h",
|
||||
"src/codegen/riscv/interface-descriptors-riscv-inl.h",
|
||||
"src/codegen/riscv/macro-assembler-riscv.h",
|
||||
"src/codegen/riscv/register-riscv.h",
|
||||
|
|
@ -5232,6 +5288,7 @@ if (!v8_enable_maglev) {
|
|||
"src/maglev/maglev-inlining.cc",
|
||||
"src/maglev/maglev-interpreter-frame-state.cc",
|
||||
"src/maglev/maglev-ir.cc",
|
||||
"src/maglev/maglev-known-node-aspects.cc",
|
||||
"src/maglev/maglev-phi-representation-selector.cc",
|
||||
"src/maglev/maglev-truncation.cc",
|
||||
]
|
||||
|
|
@ -5328,6 +5385,7 @@ if (v8_enable_webassembly) {
|
|||
"src/compiler/turboshaft/wasm-shuffle-reducer.cc",
|
||||
"src/compiler/turboshaft/wasm-simd-phase.cc",
|
||||
"src/compiler/turboshaft/wasm-turboshaft-compiler.cc",
|
||||
"src/compiler/turboshaft/wasm-type-cast-rtt-optimization-helpers.cc",
|
||||
"src/compiler/wasm-address-reassociation.cc",
|
||||
"src/compiler/wasm-call-descriptors.cc",
|
||||
"src/compiler/wasm-compiler.cc",
|
||||
|
|
@ -5517,7 +5575,6 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/codegen/code-reference.cc",
|
||||
"src/codegen/compilation-cache.cc",
|
||||
"src/codegen/compiler.cc",
|
||||
"src/codegen/constant-pool.cc",
|
||||
"src/codegen/external-reference-encoder.cc",
|
||||
"src/codegen/external-reference-table.cc",
|
||||
"src/codegen/external-reference.cc",
|
||||
|
|
@ -5598,6 +5655,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/extensions/statistics-extension.cc",
|
||||
"src/extensions/trigger-failure-extension.cc",
|
||||
"src/flags/flags.cc",
|
||||
"src/flags/save-flags.cc",
|
||||
"src/handles/global-handles.cc",
|
||||
"src/handles/handles.cc",
|
||||
"src/handles/local-handles.cc",
|
||||
|
|
@ -5947,6 +6005,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
sources += [
|
||||
"src/builtins/builtins-temporal.cc",
|
||||
"src/objects/js-temporal-objects.cc",
|
||||
"src/objects/js-temporal-zoneinfo64.cc",
|
||||
]
|
||||
}
|
||||
|
||||
|
|
@ -5973,6 +6032,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/maglev/maglev-inlining.cc",
|
||||
"src/maglev/maglev-interpreter-frame-state.cc",
|
||||
"src/maglev/maglev-ir.cc",
|
||||
"src/maglev/maglev-known-node-aspects.cc",
|
||||
"src/maglev/maglev-phi-representation-selector.cc",
|
||||
"src/maglev/maglev-pipeline-statistics.cc",
|
||||
"src/maglev/maglev-regalloc.cc",
|
||||
|
|
@ -6006,8 +6066,8 @@ v8_source_set("v8_base_without_compiler") {
|
|||
]
|
||||
} else if (v8_current_cpu == "ppc64") {
|
||||
sources += [
|
||||
"src/maglev/ppc64/maglev-assembler-ppc64.cc",
|
||||
"src/maglev/ppc64/maglev-ir-ppc64.cc",
|
||||
"src/maglev/ppc/maglev-assembler-ppc.cc",
|
||||
"src/maglev/ppc/maglev-ir-ppc.cc",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -6075,6 +6135,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/wasm/wasm-result.cc",
|
||||
"src/wasm/wasm-serialization.cc",
|
||||
"src/wasm/wasm-subtyping.cc",
|
||||
"src/wasm/wasm-tracing.cc",
|
||||
"src/wasm/well-known-imports.cc",
|
||||
]
|
||||
if (v8_wasm_random_fuzzers) {
|
||||
|
|
@ -6178,6 +6239,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
sources += [
|
||||
### gcmole(arm64) ###
|
||||
"src/codegen/arm64/assembler-arm64.cc",
|
||||
"src/codegen/arm64/constant-pool-arm64.cc",
|
||||
"src/codegen/arm64/cpu-arm64.cc",
|
||||
"src/codegen/arm64/decoder-arm64.cc",
|
||||
"src/codegen/arm64/instructions-arm64-constants.cc",
|
||||
|
|
@ -6265,6 +6327,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
sources += [
|
||||
### gcmole(ppc64) ###
|
||||
"src/codegen/ppc/assembler-ppc.cc",
|
||||
"src/codegen/ppc/constant-pool-ppc.cc",
|
||||
"src/codegen/ppc/constants-ppc.cc",
|
||||
"src/codegen/ppc/cpu-ppc.cc",
|
||||
"src/codegen/ppc/macro-assembler-ppc.cc",
|
||||
|
|
@ -6298,6 +6361,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/codegen/riscv/base-assembler-riscv.cc",
|
||||
"src/codegen/riscv/base-constants-riscv.cc",
|
||||
"src/codegen/riscv/base-riscv-i.cc",
|
||||
"src/codegen/riscv/constant-pool-riscv.cc",
|
||||
"src/codegen/riscv/cpu-riscv.cc",
|
||||
"src/codegen/riscv/extension-riscv-a.cc",
|
||||
"src/codegen/riscv/extension-riscv-b.cc",
|
||||
|
|
@ -6310,6 +6374,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/codegen/riscv/extension-riscv-zicond.cc",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.cc",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.cc",
|
||||
"src/codegen/riscv/extension-riscv-zimop.cc",
|
||||
"src/codegen/riscv/macro-assembler-riscv.cc",
|
||||
"src/deoptimizer/riscv/deoptimizer-riscv.cc",
|
||||
"src/diagnostics/riscv/disasm-riscv.cc",
|
||||
|
|
@ -6339,6 +6404,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/codegen/riscv/base-assembler-riscv.cc",
|
||||
"src/codegen/riscv/base-constants-riscv.cc",
|
||||
"src/codegen/riscv/base-riscv-i.cc",
|
||||
"src/codegen/riscv/constant-pool-riscv.cc",
|
||||
"src/codegen/riscv/cpu-riscv.cc",
|
||||
"src/codegen/riscv/extension-riscv-a.cc",
|
||||
"src/codegen/riscv/extension-riscv-b.cc",
|
||||
|
|
@ -6351,6 +6417,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/codegen/riscv/extension-riscv-zicond.cc",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.cc",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.cc",
|
||||
"src/codegen/riscv/extension-riscv-zimop.cc",
|
||||
"src/codegen/riscv/macro-assembler-riscv.cc",
|
||||
"src/deoptimizer/riscv/deoptimizer-riscv.cc",
|
||||
"src/diagnostics/riscv/disasm-riscv.cc",
|
||||
|
|
@ -6457,6 +6524,14 @@ v8_source_set("v8_base_without_compiler") {
|
|||
]
|
||||
}
|
||||
|
||||
# In i18n mode, we can use ICU4C to load ICU4C's builtin zoneinfo64.res data
|
||||
# In non-i18n mode, we need to copy that into the binary
|
||||
if (v8_enable_temporal_support && !v8_enable_i18n_support) {
|
||||
sources +=
|
||||
[ "$target_gen_dir/src/builtins/builtins-temporal-zoneinfo64-data.cc" ]
|
||||
deps += [ ":make_temporal_zoneinfo_cpp" ]
|
||||
}
|
||||
|
||||
if (v8_postmortem_support) {
|
||||
sources += [ "$target_gen_dir/debug-support.cc" ]
|
||||
deps += [ ":postmortem-metadata" ]
|
||||
|
|
@ -8132,10 +8207,7 @@ if (v8_enable_webassembly) {
|
|||
":v8_internal_headers",
|
||||
]
|
||||
|
||||
public_deps = [
|
||||
":wasm_fuzzer_common",
|
||||
":wasm_test_common",
|
||||
]
|
||||
public_deps = [ ":wasm_test_common" ]
|
||||
|
||||
configs = [
|
||||
":external_config",
|
||||
|
|
@ -8206,14 +8278,18 @@ if (v8_enable_webassembly) {
|
|||
"test/common/flag-utils.h",
|
||||
"test/common/value-helper.h",
|
||||
"test/common/wasm/flag-utils.h",
|
||||
"test/common/wasm/fuzzer-common.cc",
|
||||
"test/common/wasm/fuzzer-common.h",
|
||||
"test/common/wasm/wasm-macro-gen.h",
|
||||
"test/common/wasm/wasm-module-runner.cc",
|
||||
"test/common/wasm/wasm-module-runner.h",
|
||||
"test/common/wasm/wasm-run-utils.cc",
|
||||
"test/common/wasm/wasm-run-utils.h",
|
||||
"tools/wasm/mjsunit-module-disassembler-impl.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
":fuzzer_support",
|
||||
":generate_bytecode_builtins_list",
|
||||
":run_torque",
|
||||
":v8_internal_headers",
|
||||
|
|
@ -8234,43 +8310,10 @@ if (v8_enable_webassembly) {
|
|||
]
|
||||
}
|
||||
|
||||
v8_source_set("wasm_fuzzer_common") {
|
||||
testonly = true
|
||||
|
||||
sources = [
|
||||
"test/fuzzer/wasm/fuzzer-common.cc",
|
||||
"test/fuzzer/wasm/fuzzer-common.h",
|
||||
"tools/wasm/mjsunit-module-disassembler-impl.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
":fuzzer_support",
|
||||
":generate_bytecode_builtins_list",
|
||||
":run_torque",
|
||||
":v8_internal_headers",
|
||||
":v8_maybe_temporal",
|
||||
":v8_tracing",
|
||||
":wasm_test_common",
|
||||
]
|
||||
|
||||
public_deps = [
|
||||
":v8_abseil",
|
||||
":v8_maybe_icu",
|
||||
]
|
||||
|
||||
configs = [
|
||||
":external_config",
|
||||
":internal_config_base",
|
||||
]
|
||||
}
|
||||
|
||||
template("v8_wasm_fuzzer") {
|
||||
forward_variables_from(invoker, "*")
|
||||
v8_fuzzer(target_name) {
|
||||
deps = [
|
||||
":wasm_fuzzer_common",
|
||||
":wasm_test_common",
|
||||
]
|
||||
deps = [ ":wasm_test_common" ]
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
260
deps/v8/DEPS
vendored
260
deps/v8/DEPS
vendored
|
|
@ -59,7 +59,7 @@ vars = {
|
|||
'checkout_fuchsia_no_hooks': False,
|
||||
|
||||
# reclient CIPD package version
|
||||
'reclient_version': 're_client_version:0.179.0.28341fc7-gomaip',
|
||||
'reclient_version': 're_client_version:0.183.0.3b3097cd-gomaip',
|
||||
|
||||
# Fetch configuration files required for the 'use_remoteexec' gn arg
|
||||
'download_remoteexec_cfg': False,
|
||||
|
|
@ -75,24 +75,24 @@ vars = {
|
|||
'build_with_chromium': False,
|
||||
|
||||
# GN CIPD package version.
|
||||
'gn_version': 'git_revision:5d0a4153b0bcc86c5a23310d5b648a587be3c56d',
|
||||
'gn_version': 'git_revision:81b24e01531ecf0eff12ec9359a555ec3944ec4e',
|
||||
|
||||
# ninja CIPD package version
|
||||
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
|
||||
'ninja_version': 'version:3@1.12.1.chromium.4',
|
||||
|
||||
# siso CIPD package version
|
||||
'siso_version': 'git_revision:8863265a67843154872be2be1fc0c37339691405',
|
||||
'siso_version': 'git_revision:f7020b54462c37f1b10a16e68563c338c9f14371',
|
||||
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling Fuchsia sdk
|
||||
# and whatever else without interference from each other.
|
||||
'fuchsia_version': 'version:29.20250824.3.1',
|
||||
'fuchsia_version': 'version:29.20250927.0.1',
|
||||
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling partition_alloc_version
|
||||
# and whatever else without interference from each other.
|
||||
'partition_alloc_version': '51d0a558ecdf5cac58509d08263c36764c270ca6',
|
||||
'partition_alloc_version': 'cca2b369b2f8895cb14e24740e1f9bf91d5b371e',
|
||||
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling android_sdk_build-tools_version
|
||||
|
|
@ -130,9 +130,9 @@ vars = {
|
|||
|
||||
deps = {
|
||||
'build':
|
||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd964efc5f27dcb8690921b9dc4a7780dce83695c',
|
||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'dd54bc718b7c5363155660d12b7965ea9f87ada9',
|
||||
'buildtools':
|
||||
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '2ca9a5b96fbf0a4947d626454781e333b28e275a',
|
||||
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'fe8567e143162ec1a2fc8d13f85d67a8d2dde1b7',
|
||||
'buildtools/linux64': {
|
||||
'packages': [
|
||||
{
|
||||
|
|
@ -178,9 +178,9 @@ deps = {
|
|||
'test/mozilla/data':
|
||||
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
|
||||
'test/test262/data':
|
||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'b947715fdda79a420b253821c1cc52272a77222d',
|
||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a5e69a1534de88d1eb29b76657d84c8541b72df7',
|
||||
'third_party/android_platform': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'e97e62b0b5f26315a0cd58ff8772a2483107158e',
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'e3919359f2387399042d31401817db4a02d756ec',
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/android_sdk/public': {
|
||||
|
|
@ -232,7 +232,7 @@ deps = {
|
|||
'dep_type': 'cipd',
|
||||
},
|
||||
'third_party/catapult': {
|
||||
'url': Var('chromium_url') + '/catapult.git' + '@' + '3c5077921dbacc75db5768cf4fc0b1d9ca05d2e0',
|
||||
'url': Var('chromium_url') + '/catapult.git' + '@' + 'e2b34e6c5df90b060797419372b230d5638a3843',
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/clang-format/script':
|
||||
|
|
@ -246,15 +246,15 @@ deps = {
|
|||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/depot_tools':
|
||||
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '6b19831e3cfb16884a36b8045383594955712892',
|
||||
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '73e46667ed3a1326cf564747737b4e11137d7f29',
|
||||
'third_party/dragonbox/src':
|
||||
Var('chromium_url') + '/external/github.com/jk-jeon/dragonbox.git' + '@' + '6c7c925b571d54486b9ffae8d9d18a822801cbda',
|
||||
'third_party/fp16/src':
|
||||
Var('chromium_url') + '/external/github.com/Maratyszcza/FP16.git' + '@' + 'b3720617faf1a4581ed7e6787cc51722ec7751f0',
|
||||
Var('chromium_url') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '3d2de1816307bac63c16a297e8c4dc501b4076df',
|
||||
'third_party/fast_float/src':
|
||||
Var('chromium_url') + '/external/github.com/fastfloat/fast_float.git' + '@' + 'cb1d42aaa1e14b09e1452cfdef373d051b8c02a4',
|
||||
'third_party/fuchsia-gn-sdk': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '7e28b752c19443ee31cb47a7195add4131a1cc09',
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '99294ee55f28f8ae5a3552f4c435528e4c1686b6',
|
||||
'condition': 'checkout_fuchsia',
|
||||
},
|
||||
'third_party/simdutf':
|
||||
|
|
@ -280,11 +280,11 @@ deps = {
|
|||
'third_party/fuzztest':
|
||||
Var('chromium_url') + '/chromium/src/third_party/fuzztest.git' + '@' + 'aa6ba9074b8d66a2e2853a0a0992c25966022e13',
|
||||
'third_party/fuzztest/src':
|
||||
Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + '169baf17795850fd4b2c29e4d52136aa8d955b61',
|
||||
Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + 'e101ca021a40733d0fa76a3bd9b49b5f76da4f8a',
|
||||
'third_party/googletest/src':
|
||||
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '244cec869d12e53378fa0efb610cd4c32a454ec8',
|
||||
'third_party/highway/src':
|
||||
Var('chromium_url') + '/external/github.com/google/highway.git' + '@' + '00fe003dac355b979f36157f9407c7c46448958e',
|
||||
Var('chromium_url') + '/external/github.com/google/highway.git' + '@' + '84379d1c73de9681b54fbe1c035a23c7bd5d272d',
|
||||
'third_party/icu':
|
||||
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '1b2e3e8a421efae36141a7b932b41e315b089af8',
|
||||
'third_party/instrumented_libs': {
|
||||
|
|
@ -302,169 +302,169 @@ deps = {
|
|||
'third_party/jsoncpp/source':
|
||||
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
|
||||
'third_party/libc++/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '70c21e34ea54ef13377bc6d8283453290e17c7b8',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '4b4a57f5cf627639c041368120af9d69ed40032c',
|
||||
'third_party/libc++abi/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'f7f5a32b3e9582092d8a4511acec036a09ae8524',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '864f61dc9253d56586ada34c388278565ef513f6',
|
||||
'third_party/libunwind/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '92fb77dfd4d86aa120730359f5e4d6bb47f1c129',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '322be580a5a193a921c349a15747eeeb9a716ad1',
|
||||
'third_party/llvm-libc/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libc.git' + '@' + '11725050a2e117625867ec45d70746bb78b170fd',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libc.git' + '@' + '7b45735a2b2c0c11c7c4e02fc6bae12ea95aec48',
|
||||
'third_party/llvm-build/Release+Asserts': {
|
||||
'dep_type': 'gcs',
|
||||
'bucket': 'chromium-browser-clang',
|
||||
'objects': [
|
||||
{
|
||||
'object_name': 'Linux_x64/clang-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': 'a9f5af449672a239366199c17441427c2c4433a120cace9ffd32397e15224c64',
|
||||
'size_bytes': 55087424,
|
||||
'generation': 1754486730635359,
|
||||
'object_name': 'Linux_x64/clang-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '35a8629534f534aa6771470945fc0baa6906b3fffb28433bc08674d343b84c90',
|
||||
'size_bytes': 55674480,
|
||||
'generation': 1758743123214066,
|
||||
'condition': 'host_os == "linux"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Linux_x64/clang-tidy-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': 'c2ce17d666c5124d1b3999e160836b096b22a7c2dbb6f70637be6dceefa4bb86',
|
||||
'size_bytes': 13688944,
|
||||
'generation': 1754486730632975,
|
||||
'object_name': 'Linux_x64/clang-tidy-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '0342c1f9f546b2c87010418c37eaf494b3bcee24e60a351a880046951bf4d47b',
|
||||
'size_bytes': 14059964,
|
||||
'generation': 1758743123322050,
|
||||
'condition': 'host_os == "linux" and checkout_clang_tidy',
|
||||
},
|
||||
{
|
||||
'object_name': 'Linux_x64/clangd-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': 'd42b0b22da85e7a49f239eeb378b0e8cd6eeeb1c685e89155c30a344de219636',
|
||||
'size_bytes': 13982120,
|
||||
'generation': 1754486730644041,
|
||||
'object_name': 'Linux_x64/clangd-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '8b9513babd89f706e928be51b9a4c08a4511dae1c152285808d7a25b299ae94b',
|
||||
'size_bytes': 14210752,
|
||||
'generation': 1758743123414815,
|
||||
'condition': 'host_os == "linux" and checkout_clangd',
|
||||
},
|
||||
{
|
||||
'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '5768970291fb6173bc69c342235e9dcc53c2c475acde8422e7787a8f8170bdd8',
|
||||
'size_bytes': 2251652,
|
||||
'generation': 1754486730690951,
|
||||
'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '56bb0093e2e8f71e682f03b0e379d7dac0bacfcc83bfccfd42a4fcd1310fbe75',
|
||||
'size_bytes': 2272396,
|
||||
'generation': 1758743123592944,
|
||||
'condition': 'host_os == "linux" and checkout_clang_coverage_tools',
|
||||
},
|
||||
{
|
||||
'object_name': 'Linux_x64/llvmobjdump-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '861c331f1bab58556bd84f33632667fd5af90402f94fb104f8b06dc039a8f598',
|
||||
'size_bytes': 5619264,
|
||||
'generation': 1754486730668455,
|
||||
'object_name': 'Linux_x64/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '9236697d64fc9444b22c90a112f6b3a76ee1edf5b3891af67de0849deb274514',
|
||||
'size_bytes': 5666148,
|
||||
'generation': 1758743123461779,
|
||||
'condition': '(checkout_linux or checkout_mac or checkout_android) and host_os == "linux"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/clang-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '484e1b4128566635f123aefd6f9db9f0a1e99f462c247d2393941eb1a6b2efe2',
|
||||
'size_bytes': 52422108,
|
||||
'generation': 1754486732274509,
|
||||
'object_name': 'Mac/clang-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '90e1a97b91d9a39bafc719f5e3b4c3cd8bf457c39f1dc4a27e4bfc59b9331bc5',
|
||||
'size_bytes': 53576996,
|
||||
'generation': 1758743125100350,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/clang-mac-runtime-library-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '9a1fc6d92af9af410736066c8fff34cd1f95b3e3696b2b6dd581f8021eb74abc',
|
||||
'size_bytes': 996044,
|
||||
'generation': 1754486741367172,
|
||||
'object_name': 'Mac/clang-mac-runtime-library-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '7140b54db5936c79bb6f216ea176be70c7e6711f0dec2224369fba76cb9c1572',
|
||||
'size_bytes': 1004900,
|
||||
'generation': 1758743135101043,
|
||||
'condition': 'checkout_mac and not host_os == "mac"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/clang-tidy-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '4a4a9dcfe0b11c50e9cfb86963b7014dedf53e2de951fd573713803d45c3fb0f',
|
||||
'size_bytes': 13749248,
|
||||
'generation': 1754486732350716,
|
||||
'object_name': 'Mac/clang-tidy-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '0ef9124d5c56825ebbd10539298400a0b0d1d8d67e0902a7e89b3fecff7f9b0c',
|
||||
'size_bytes': 14141008,
|
||||
'generation': 1758743125225488,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_tidy',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/clangd-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': 'a26a4bc078745f89a5aee6ba20e3507de4497e236592116e304510ce669d5760',
|
||||
'size_bytes': 15159680,
|
||||
'generation': 1754486732421420,
|
||||
'object_name': 'Mac/clangd-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': 'aec08495162681dbfe4e78bd6c728e6f1f410f3fe6c0e070c095dcf4bfda1382',
|
||||
'size_bytes': 15632104,
|
||||
'generation': 1758743125301839,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clangd',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/llvm-code-coverage-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': 'f1b13f22aa030969870d72eaee9a3cfa633c41c811d6a4ee442e616ce4836202',
|
||||
'size_bytes': 2283192,
|
||||
'generation': 1754486732574927,
|
||||
'object_name': 'Mac/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '36b279a1a6dc9d90e932823138f522e3c2741005e34732bce60fea60881a3963',
|
||||
'size_bytes': 2321200,
|
||||
'generation': 1758743125546947,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_coverage_tools',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/llvmobjdump-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '99dbba5b4f8eb4b7bd6675d0589a4809576bceb4fc857474302d00b545945dcd',
|
||||
'size_bytes': 5489896,
|
||||
'generation': 1754486732472583,
|
||||
'object_name': 'Mac/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '29e8b2d333ecb6640cf99d9103b999ff2be0bb13fe8300528b4245bf6b88869c',
|
||||
'size_bytes': 5582716,
|
||||
'generation': 1758743125362967,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/clang-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '7b99ec0bd96307f6eee85abbe9efe97d341051d7572e65d56f99b0e981fdc2c6',
|
||||
'size_bytes': 43856532,
|
||||
'generation': 1754486742864144,
|
||||
'object_name': 'Mac_arm64/clang-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '29d82cb9830396c21b967a5784f838dcb3d62abfebd08d67d36821dba6eb4ce8',
|
||||
'size_bytes': 44576940,
|
||||
'generation': 1758743136591599,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/clang-tidy-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '9c9538cb6c5e431ff030b524ab456775c914dcff8d29751bd02eb991948fc588',
|
||||
'size_bytes': 11831704,
|
||||
'generation': 1754486742856483,
|
||||
'object_name': 'Mac_arm64/clang-tidy-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '8d7781b19bd032eeda7a94810e5429e0501392ac5585fcd16499a3d72e12ab9e',
|
||||
'size_bytes': 12142468,
|
||||
'generation': 1758743136678250,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_tidy',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/clangd-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '6dbb3d3d584e8d2c778f89f48bf9614bfce8e9d5876e03dbc91747991eec33b1',
|
||||
'size_bytes': 12138872,
|
||||
'generation': 1754486742962580,
|
||||
'object_name': 'Mac_arm64/clangd-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '16617a896e7304ba76af9cbcab00edeb63753804237fc5055810b2049d00b3dc',
|
||||
'size_bytes': 12474420,
|
||||
'generation': 1758743136764487,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clangd',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '0e58aceeb995192461b4a26f059694346e869ba2c2ed806c38e74ed92a3fcf0f',
|
||||
'size_bytes': 1933704,
|
||||
'generation': 1754486743038880,
|
||||
'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '3ae73edf0d6b69d6aa41247c8268aaf292630f708036d55f3e0e5fa2ce340497',
|
||||
'size_bytes': 1947856,
|
||||
'generation': 1758743136945536,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_coverage_tools',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/llvmobjdump-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': 'd197d5d7581336a63a11f3cb8ca3d3f807c9f6032a21616d029573b90633fed5',
|
||||
'size_bytes': 5243848,
|
||||
'generation': 1754486742944902,
|
||||
'object_name': 'Mac_arm64/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '069266d0ab2b9029908edc0b958af5d5ec3d9cd939b063da7aeeb53548137df9',
|
||||
'size_bytes': 5277360,
|
||||
'generation': 1758743136838343,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/clang-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '1f3dc2b70567abfa52effbcdcd271aa54fbe5e4325e91a2d488748998df79f7e',
|
||||
'size_bytes': 47038772,
|
||||
'generation': 1754486753863077,
|
||||
'object_name': 'Win/clang-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': 'bec899a7163ba0d446a5355e554cf8644b5e3db729404c6defb077549bc9f1b4',
|
||||
'size_bytes': 47645664,
|
||||
'generation': 1758743148772393,
|
||||
'condition': 'host_os == "win"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/clang-tidy-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '0e640abc3d4335945662024d0583017ef073d6db59171fad290ee0b86de099bc',
|
||||
'size_bytes': 13681872,
|
||||
'generation': 1754486754006910,
|
||||
'object_name': 'Win/clang-tidy-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '10770b3b7b34a0e968cbeb1838b1446080897941c2bb5d192aa6596bbb386c27',
|
||||
'size_bytes': 14025008,
|
||||
'generation': 1758743148836717,
|
||||
'condition': 'host_os == "win" and checkout_clang_tidy',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/clang-win-runtime-library-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '3e41cf1c8b4d5996e60353e282e0219608f134ca475a16541f536a63bf1a036f',
|
||||
'size_bytes': 2483996,
|
||||
'generation': 1754486763172399,
|
||||
'object_name': 'Win/clang-win-runtime-library-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': 'fedf17614b4cba1c8edc7f3ad1c4636bb79535068e76ad6fed75fe65515dc4b8',
|
||||
'size_bytes': 2503180,
|
||||
'generation': 1758743159444585,
|
||||
'condition': 'checkout_win and not host_os == "win"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/clangd-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': 'd65400e92d8d7393511dc6beab1a2c8be2d4a5b5d946f957a6b55f8e39f666a4',
|
||||
'size_bytes': 14175060,
|
||||
'generation': 1754486754078416,
|
||||
'object_name': 'Win/clangd-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '6dcd1c2f3bd7dbd547f8b93b014a3bc9f9d84b0920fc7632f45a6bfc1b359ae1',
|
||||
'size_bytes': 14366920,
|
||||
'generation': 1758743148925930,
|
||||
'condition': 'host_os == "win" and checkout_clangd',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/llvm-code-coverage-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': '01f7cec8caee5cbc89107f0b287b7f41a4c26979bbec3d88f3eee5faebee4c5e',
|
||||
'size_bytes': 2349144,
|
||||
'generation': 1754486754112875,
|
||||
'object_name': 'Win/llvm-code-coverage-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '57e86c6eeeccb1e6e5b87d87c2231f01e006d9067e2f3ad50530e32674599ad6',
|
||||
'size_bytes': 2366460,
|
||||
'generation': 1758743149180966,
|
||||
'condition': 'host_os == "win" and checkout_clang_coverage_tools',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/llvmobjdump-llvmorg-21-init-16348-gbd809ffb-17.tar.xz',
|
||||
'sha256sum': 'f4048cb8c08849e3f4ff8228ccaca4cf08789023df28bdf5cbad07aa0e245b45',
|
||||
'size_bytes': 5603744,
|
||||
'generation': 1754486754075834,
|
||||
'object_name': 'Win/llvmobjdump-llvmorg-22-init-8940-g4d4cb757-1.tar.xz',
|
||||
'sha256sum': '3f398db586e4f75a48eda2a508be4577a9c54cda78cf03afa57b454801ed5bde',
|
||||
'size_bytes': 5668924,
|
||||
'generation': 1758743148999346,
|
||||
'condition': '(checkout_linux or checkout_mac or checkout_android) and host_os == "win"',
|
||||
},
|
||||
],
|
||||
|
|
@ -490,7 +490,7 @@ deps = {
|
|||
'third_party/perfetto':
|
||||
Var('android_url') + '/platform/external/perfetto.git' + '@' + '40b529923598b739b2892a536a7692eedbed5685',
|
||||
'third_party/protobuf':
|
||||
Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + 'e354a43e42a0ecacd1d65caafb8d40b21f4b4286',
|
||||
Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + 'fef7a765bb0d1122d32b99f588537b83e2dffe7b',
|
||||
'third_party/re2/src':
|
||||
Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + '6569a9a3df256f4c0c3813cb8ee2f8eef6e2c1fb',
|
||||
'third_party/requests': {
|
||||
|
|
@ -498,41 +498,41 @@ deps = {
|
|||
'condition': 'checkout_android',
|
||||
},
|
||||
'tools/rust':
|
||||
Var('chromium_url') + '/chromium/src/tools/rust' + '@' + 'f71995f8f92a8d6b0658fafce0f0dde769edabfa',
|
||||
Var('chromium_url') + '/chromium/src/tools/rust' + '@' + 'f93e7ca2a64938e9b4759ec3297f02ca7b3f605f',
|
||||
'tools/win':
|
||||
Var('chromium_url') + '/chromium/src/tools/win' + '@' + '89d58ebe78f02706d414154f923f759a05a887b6',
|
||||
Var('chromium_url') + '/chromium/src/tools/win' + '@' + '2cbfc8d2e5ef4a6afd9774e9a9eaebd921a9f248',
|
||||
'third_party/rust':
|
||||
Var('chromium_url') + '/chromium/src/third_party/rust' + '@' + 'f058a25008a8a6b48a35656b21f0589535134d0f',
|
||||
Var('chromium_url') + '/chromium/src/third_party/rust' + '@' + '667365a2aef07b2a9065a53beddbc8ea60ff5c6d',
|
||||
'third_party/rust-toolchain': {
|
||||
'dep_type': 'gcs',
|
||||
'bucket': 'chromium-browser-clang',
|
||||
'objects': [
|
||||
{
|
||||
'object_name': 'Linux_x64/rust-toolchain-22be76b7e259f27bf3e55eb931f354cd8b69d55f-4-llvmorg-21-init-16348-gbd809ffb.tar.xz',
|
||||
'sha256sum': '3e5cf980edb893cbdc915d62bce1b29b896eda6df6455e145200bf25a52576b1',
|
||||
'size_bytes': 159517088,
|
||||
'generation': 1756377175296503,
|
||||
'object_name': 'Linux_x64/rust-toolchain-15283f6fe95e5b604273d13a428bab5fc0788f5a-1-llvmorg-22-init-8940-g4d4cb757.tar.xz',
|
||||
'sha256sum': '2bdaea0b11cb11a8f2f4dcb79b0dbb4bf38e2bd22479ff8014f55b9b6890e135',
|
||||
'size_bytes': 142044388,
|
||||
'generation': 1758743116775859,
|
||||
'condition': 'host_os == "linux"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/rust-toolchain-22be76b7e259f27bf3e55eb931f354cd8b69d55f-4-llvmorg-21-init-16348-gbd809ffb.tar.xz',
|
||||
'sha256sum': '8f0d15259a48df6c284ebcfb9dfb0ecba77d8267620aae1ff42d23a2f595ad77',
|
||||
'size_bytes': 132425148,
|
||||
'generation': 1756377177172203,
|
||||
'object_name': 'Mac/rust-toolchain-15283f6fe95e5b604273d13a428bab5fc0788f5a-1-llvmorg-22-init-8940-g4d4cb757.tar.xz',
|
||||
'sha256sum': '351347e1930a900c63b3953cdb10775b73572c6145e389f3820ba920816d46ca',
|
||||
'size_bytes': 135891820,
|
||||
'generation': 1758743118329536,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/rust-toolchain-22be76b7e259f27bf3e55eb931f354cd8b69d55f-4-llvmorg-21-init-16348-gbd809ffb.tar.xz',
|
||||
'sha256sum': 'ef0f5795e28fde6b0708647500fc94138e9518f173c3e99321cd8918006f606c',
|
||||
'size_bytes': 120345408,
|
||||
'generation': 1756377179094363,
|
||||
'object_name': 'Mac_arm64/rust-toolchain-15283f6fe95e5b604273d13a428bab5fc0788f5a-1-llvmorg-22-init-8940-g4d4cb757.tar.xz',
|
||||
'sha256sum': '33d6b8cf4fc6617aa98888a46bc1dbef29ae9a9ebd01c3f248ef8c08ec5f198b',
|
||||
'size_bytes': 123302332,
|
||||
'generation': 1758743119839246,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/rust-toolchain-22be76b7e259f27bf3e55eb931f354cd8b69d55f-4-llvmorg-21-init-16348-gbd809ffb.tar.xz',
|
||||
'sha256sum': '056cfdae49dd3d73b38ca7ef8245dec2105c7a77b47efba99995552ea1d89f6e',
|
||||
'size_bytes': 194943632,
|
||||
'generation': 1756377180954050,
|
||||
'object_name': 'Win/rust-toolchain-15283f6fe95e5b604273d13a428bab5fc0788f5a-1-llvmorg-22-init-8940-g4d4cb757.tar.xz',
|
||||
'sha256sum': '4f6dfa230e5d401bf9aadd804142b412467177b17d50a3f52a8c69c1957aa2db',
|
||||
'size_bytes': 199998880,
|
||||
'generation': 1758743121322555,
|
||||
'condition': 'host_os == "win"',
|
||||
},
|
||||
],
|
||||
|
|
@ -548,13 +548,13 @@ deps = {
|
|||
'condition': 'not build_with_chromium and host_cpu != "s390" and host_os != "zos" and host_cpu != "ppc"',
|
||||
},
|
||||
'third_party/zlib':
|
||||
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'caf4afa1afc92e16fef429f182444bed98a46a6c',
|
||||
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '85f05b0835f934e52772efc308baa80cdd491838',
|
||||
'tools/clang':
|
||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '3895bad8d8b0aa864c77deff02fd42ff7b2732d1',
|
||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b1d66053e6460f04dbe81d77cfeaa9a5d50dee3e',
|
||||
'tools/protoc_wrapper':
|
||||
Var('chromium_url') + '/chromium/src/tools/protoc_wrapper.git' + '@' + '3438d4183bfc7c0d6850e8b970204cc8189f0323',
|
||||
'third_party/abseil-cpp': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '5141e83267542f8869adf18b5bd6440440d6801e',
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + 'c3655ab8bb514aa318207c2685b3ba557a048201',
|
||||
'condition': 'not build_with_chromium',
|
||||
},
|
||||
'third_party/zoslib': {
|
||||
|
|
|
|||
2
deps/v8/MODULE.bazel
vendored
2
deps/v8/MODULE.bazel
vendored
|
|
@ -7,7 +7,7 @@ bazel_dep(name = "bazel_skylib", version = "1.7.1")
|
|||
bazel_dep(name = "rules_cc", version = "0.1.2")
|
||||
bazel_dep(name = "rules_python", version = "1.0.0")
|
||||
bazel_dep(name = "platforms", version = "0.0.11")
|
||||
bazel_dep(name = "abseil-cpp", version = "20240722.0.bcr.2")
|
||||
bazel_dep(name = "abseil-cpp", version = "20250814.0")
|
||||
bazel_dep(name = "highway", version = "1.2.0")
|
||||
|
||||
pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip")
|
||||
|
|
|
|||
54
deps/v8/PRESUBMIT.py
vendored
54
deps/v8/PRESUBMIT.py
vendored
|
|
@ -172,8 +172,15 @@ def _CheckUnwantedDependencies(input_api, output_api):
|
|||
# eval-ed and thus doesn't have __file__.
|
||||
original_sys_path = sys.path
|
||||
try:
|
||||
sys.path = sys.path + [input_api.os_path.join(
|
||||
input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')]
|
||||
root = input_api.PresubmitLocalPath()
|
||||
if not os.path.exists(os.path.join(root, 'buildtools')):
|
||||
root = os.path.dirname(root)
|
||||
if not os.path.exists(os.path.join(root, 'buildtools')):
|
||||
raise RuntimeError(
|
||||
"Failed to find //buildtools directory for checkdeps")
|
||||
sys.path = sys.path + [
|
||||
input_api.os_path.join(root, 'buildtools', 'checkdeps')
|
||||
]
|
||||
import checkdeps
|
||||
from cpp_checker import CppChecker
|
||||
from rules import Rule
|
||||
|
|
@ -485,6 +492,7 @@ def _CommonChecks(input_api, output_api):
|
|||
_CheckInlineHeadersIncludeNonInlineHeadersFirst,
|
||||
_CheckJSONFiles,
|
||||
_CheckNoexceptAnnotations,
|
||||
_CheckBannedCpp,
|
||||
_RunTestsWithVPythonSpec,
|
||||
_CheckPythonLiterals,
|
||||
]
|
||||
|
|
@ -607,6 +615,48 @@ def _CheckNoexceptAnnotations(input_api, output_api):
|
|||
return []
|
||||
|
||||
|
||||
def _CheckBannedCpp(input_api, output_api):
|
||||
# We only check for a single pattern right now; feel free to add more, but
|
||||
# potentially change the logic for files_to_skip then (and skip individual
|
||||
# checks for individual files instead).
|
||||
bad_cpp = [
|
||||
('std::bit_cast',
|
||||
'Use base::bit_cast instead, which has additional checks'),
|
||||
]
|
||||
|
||||
def file_filter(x):
|
||||
return input_api.FilterSourceFile(
|
||||
x,
|
||||
files_to_skip=[
|
||||
# The implementation of base::bit_cast uses std::bit_cast.
|
||||
r'src/base/macros\.h',
|
||||
# src/base/numerics is a dependency-free header-only library, hence
|
||||
# uses std::bit_cast directly.
|
||||
r'src/base/numerics/.*'
|
||||
],
|
||||
files_to_check=[r'.*\.h$', r'.*\.cc$'])
|
||||
|
||||
errors = []
|
||||
for f in input_api.AffectedSourceFiles(file_filter):
|
||||
for line_number, line in f.ChangedContents():
|
||||
for pattern, message in bad_cpp:
|
||||
if not pattern in line:
|
||||
continue
|
||||
# Skip if part of a comment.
|
||||
if '//' in line and line.index('//') < line.index(pattern):
|
||||
continue
|
||||
|
||||
# Make sure there are word separators around the pattern.
|
||||
regex = r'\b%s\b' % pattern
|
||||
if not input_api.re.search(regex, line):
|
||||
continue
|
||||
|
||||
errors.append(
|
||||
output_api.PresubmitError('Banned pattern ({}):\n {}:{} {}'.format(
|
||||
regex, f.LocalPath(), line_number, message)))
|
||||
return errors
|
||||
|
||||
|
||||
def CheckChangeOnUpload(input_api, output_api):
|
||||
results = []
|
||||
results.extend(_CommonChecks(input_api, output_api))
|
||||
|
|
|
|||
4
deps/v8/bazel/defs.bzl
vendored
4
deps/v8/bazel/defs.bzl
vendored
|
|
@ -8,6 +8,8 @@ This module contains helper functions to compile V8.
|
|||
|
||||
load("@rules_cc//cc:cc_library.bzl", "cc_library")
|
||||
load("@rules_cc//cc:cc_binary.bzl", "cc_binary")
|
||||
load("@rules_cc//cc/common:cc_common.bzl", "cc_common")
|
||||
load("@rules_cc//cc/common:cc_info.bzl", "CcInfo")
|
||||
|
||||
FlagInfo = provider("The value of an option.",
|
||||
fields = ["value"])
|
||||
|
|
@ -441,7 +443,7 @@ def _v8_target_cpu_transition_impl(settings,
|
|||
"armeabi-v7a": "arm32",
|
||||
"s390x": "s390x",
|
||||
"riscv64": "riscv64",
|
||||
"ppc64": "ppc64le",
|
||||
"ppc": "ppc64le",
|
||||
}
|
||||
v8_target_cpu = mapping[settings["//command_line_option:cpu"]]
|
||||
return {"@v8//bazel/config:v8_target_cpu": v8_target_cpu}
|
||||
|
|
|
|||
6
deps/v8/gni/v8.gni
vendored
6
deps/v8/gni/v8.gni
vendored
|
|
@ -218,6 +218,11 @@ declare_args() {
|
|||
v8_enable_pointer_compression_shared_cage = ""
|
||||
v8_enable_31bit_smis_on_64bit_arch = false
|
||||
|
||||
# When `v8_enable_pointer_compression_shared_cage` RO space is placed into a
|
||||
# contiguous area at the front of the cage. In case RO allocations fails this
|
||||
# size needs to be adjusted.
|
||||
v8_contiguous_compressed_ro_space_size_mb = 8
|
||||
|
||||
# Change code emission and runtime features to be CET shadow-stack compliant
|
||||
# (incomplete and experimental).
|
||||
v8_enable_cet_shadow_stack = false
|
||||
|
|
@ -332,6 +337,7 @@ v8_add_configs = [
|
|||
v8_path_prefix + ":features",
|
||||
v8_path_prefix + ":toolchain",
|
||||
v8_path_prefix + ":strict_warnings",
|
||||
v8_path_prefix + ":sanitizer_defines",
|
||||
]
|
||||
|
||||
if (is_debug && !v8_optimized_debug) {
|
||||
|
|
|
|||
8
deps/v8/include/js_protocol.pdl
vendored
8
deps/v8/include/js_protocol.pdl
vendored
|
|
@ -794,6 +794,8 @@ experimental domain HeapProfiler
|
|||
# Average sample interval in bytes. Poisson distribution is used for the intervals. The
|
||||
# default value is 32768 bytes.
|
||||
optional number samplingInterval
|
||||
# Maximum stack depth. The default value is 128.
|
||||
optional number stackDepth
|
||||
# By default, the sampling heap profiler reports only objects which are
|
||||
# still alive when the profile is returned via getSamplingProfile or
|
||||
# stopSampling, which is useful for determining what functions contribute
|
||||
|
|
@ -1140,6 +1142,8 @@ domain Runtime
|
|||
dataview
|
||||
webassemblymemory
|
||||
wasmvalue
|
||||
# blink's subtypes.
|
||||
trustedtype
|
||||
# Object class (constructor) name. Specified for `object` type values only.
|
||||
optional string className
|
||||
# Remote object value in case of primitive values or JSON values (if it was requested).
|
||||
|
|
@ -1201,6 +1205,8 @@ domain Runtime
|
|||
dataview
|
||||
webassemblymemory
|
||||
wasmvalue
|
||||
# blink's subtypes.
|
||||
trustedtype
|
||||
# String representation of the object.
|
||||
optional string description
|
||||
# True iff some of the properties or entries of the original object did not fit.
|
||||
|
|
@ -1250,6 +1256,8 @@ domain Runtime
|
|||
dataview
|
||||
webassemblymemory
|
||||
wasmvalue
|
||||
# blink's subtypes.
|
||||
trustedtype
|
||||
|
||||
experimental type EntryPreview extends object
|
||||
properties
|
||||
|
|
|
|||
7
deps/v8/include/v8-callbacks.h
vendored
7
deps/v8/include/v8-callbacks.h
vendored
|
|
@ -184,6 +184,7 @@ enum GCCallbackFlags {
|
|||
kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4,
|
||||
kGCCallbackFlagCollectAllExternalMemory = 1 << 5,
|
||||
kGCCallbackScheduleIdleGarbageCollection = 1 << 6,
|
||||
kGCCallbackFlagLastResort = 1 << 7,
|
||||
};
|
||||
|
||||
using GCCallback = void (*)(GCType type, GCCallbackFlags flags);
|
||||
|
|
@ -321,9 +322,6 @@ using WasmAsyncResolvePromiseCallback = void (*)(
|
|||
using WasmLoadSourceMapCallback = Local<String> (*)(Isolate* isolate,
|
||||
const char* name);
|
||||
|
||||
// --- Callback for checking if WebAssembly imported strings are enabled ---
|
||||
using WasmImportedStringsEnabledCallback = bool (*)(Local<Context> context);
|
||||
|
||||
// --- Callback for checking if WebAssembly Custom Descriptors are enabled ---
|
||||
using WasmCustomDescriptorsEnabledCallback = bool (*)(Local<Context> context);
|
||||
|
||||
|
|
@ -331,9 +329,6 @@ using WasmCustomDescriptorsEnabledCallback = bool (*)(Local<Context> context);
|
|||
using SharedArrayBufferConstructorEnabledCallback =
|
||||
bool (*)(Local<Context> context);
|
||||
|
||||
// --- Callback for checking if WebAssembly JSPI is enabled ---
|
||||
using WasmJSPIEnabledCallback = bool (*)(Local<Context> context);
|
||||
|
||||
/**
|
||||
* Import phases in import requests.
|
||||
*/
|
||||
|
|
|
|||
63
deps/v8/include/v8-context.h
vendored
63
deps/v8/include/v8-context.h
vendored
|
|
@ -255,12 +255,6 @@ class V8_EXPORT Context : public Data {
|
|||
*/
|
||||
Maybe<void> DeepFreeze(DeepFreezeDelegate* delegate = nullptr);
|
||||
|
||||
/** Returns the isolate associated with a current context. */
|
||||
V8_DEPRECATED(
|
||||
"Use Isolate::GetCurrent() instead, which is guaranteed to return the "
|
||||
"same isolate since https://crrev.com/c/6458560.")
|
||||
Isolate* GetIsolate();
|
||||
|
||||
/** Returns the microtask queue associated with a current context. */
|
||||
MicrotaskQueue* GetMicrotaskQueue();
|
||||
|
||||
|
|
@ -305,9 +299,30 @@ class V8_EXPORT Context : public Data {
|
|||
* SetAlignedPointerInEmbedderData with the same index. Note that index 0
|
||||
* currently has a special meaning for Chrome's debugger.
|
||||
*/
|
||||
V8_INLINE void* GetAlignedPointerFromEmbedderData(Isolate* isolate, int index,
|
||||
EmbedderDataTypeTag tag);
|
||||
V8_INLINE void* GetAlignedPointerFromEmbedderData(int index,
|
||||
EmbedderDataTypeTag tag);
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use GetAlignedPointerFromEmbedderData with EmbedderDataTypeTag "
|
||||
"parameter instead.")
|
||||
V8_INLINE void* GetAlignedPointerFromEmbedderData(Isolate* isolate,
|
||||
int index);
|
||||
V8_INLINE void* GetAlignedPointerFromEmbedderData(int index);
|
||||
int index) {
|
||||
return GetAlignedPointerFromEmbedderData(isolate, index,
|
||||
kEmbedderDataTypeTagDefault);
|
||||
}
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use GetAlignedPointerFromEmbedderData with EmbedderDataTypeTag "
|
||||
"parameter instead.")
|
||||
V8_INLINE void* GetAlignedPointerFromEmbedderData(int index) {
|
||||
return GetAlignedPointerFromEmbedderData(index,
|
||||
kEmbedderDataTypeTagDefault);
|
||||
}
|
||||
|
||||
void SetAlignedPointerInEmbedderData(int index, void* value,
|
||||
EmbedderDataTypeTag tag);
|
||||
|
||||
/**
|
||||
* Sets a 2-byte-aligned native pointer in the embedder data with the given
|
||||
|
|
@ -317,10 +332,9 @@ class V8_EXPORT Context : public Data {
|
|||
V8_DEPRECATE_SOON(
|
||||
"Use SetAlignedPointerInEmbedderData with EmbedderDataTypeTag parameter "
|
||||
"instead.")
|
||||
void SetAlignedPointerInEmbedderData(int index, void* value);
|
||||
|
||||
void SetAlignedPointerInEmbedderData(int index, void* value,
|
||||
EmbedderDataTypeTag slot);
|
||||
void SetAlignedPointerInEmbedderData(int index, void* value) {
|
||||
SetAlignedPointerInEmbedderData(index, value, kEmbedderDataTypeTagDefault);
|
||||
}
|
||||
|
||||
/**
|
||||
* Control whether code generation from strings is allowed. Calling
|
||||
|
|
@ -439,7 +453,8 @@ class V8_EXPORT Context : public Data {
|
|||
internal::ValueHelper::InternalRepresentationType GetDataFromSnapshotOnce(
|
||||
size_t index);
|
||||
Local<Value> SlowGetEmbedderData(int index);
|
||||
void* SlowGetAlignedPointerFromEmbedderData(int index);
|
||||
void* SlowGetAlignedPointerFromEmbedderData(int index,
|
||||
EmbedderDataTypeTag tag);
|
||||
};
|
||||
|
||||
// --- Implementation ---
|
||||
|
|
@ -467,7 +482,8 @@ Local<Value> Context::GetEmbedderData(int index) {
|
|||
#endif
|
||||
}
|
||||
|
||||
void* Context::GetAlignedPointerFromEmbedderData(Isolate* isolate, int index) {
|
||||
void* Context::GetAlignedPointerFromEmbedderData(Isolate* isolate, int index,
|
||||
EmbedderDataTypeTag tag) {
|
||||
#if !defined(V8_ENABLE_CHECKS)
|
||||
using A = internal::Address;
|
||||
using I = internal::Internals;
|
||||
|
|
@ -477,16 +493,15 @@ void* Context::GetAlignedPointerFromEmbedderData(Isolate* isolate, int index) {
|
|||
int value_offset = I::kEmbedderDataArrayHeaderSize +
|
||||
(I::kEmbedderDataSlotSize * index) +
|
||||
I::kEmbedderDataSlotExternalPointerOffset;
|
||||
return reinterpret_cast<void*>(
|
||||
I::ReadExternalPointerField<{internal::kFirstEmbedderDataTag,
|
||||
internal::kLastEmbedderDataTag}>(
|
||||
isolate, embedder_data, value_offset));
|
||||
return reinterpret_cast<void*>(I::ReadExternalPointerField(
|
||||
isolate, embedder_data, value_offset, ToExternalPointerTag(tag)));
|
||||
#else
|
||||
return SlowGetAlignedPointerFromEmbedderData(index);
|
||||
return SlowGetAlignedPointerFromEmbedderData(index, tag);
|
||||
#endif
|
||||
}
|
||||
|
||||
void* Context::GetAlignedPointerFromEmbedderData(int index) {
|
||||
void* Context::GetAlignedPointerFromEmbedderData(int index,
|
||||
EmbedderDataTypeTag tag) {
|
||||
#if !defined(V8_ENABLE_CHECKS)
|
||||
using A = internal::Address;
|
||||
using I = internal::Internals;
|
||||
|
|
@ -497,12 +512,10 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
|
|||
(I::kEmbedderDataSlotSize * index) +
|
||||
I::kEmbedderDataSlotExternalPointerOffset;
|
||||
Isolate* isolate = I::GetCurrentIsolateForSandbox();
|
||||
return reinterpret_cast<void*>(
|
||||
I::ReadExternalPointerField<{internal::kFirstEmbedderDataTag,
|
||||
internal::kLastEmbedderDataTag}>(
|
||||
isolate, embedder_data, value_offset));
|
||||
return reinterpret_cast<void*>(I::ReadExternalPointerField(
|
||||
isolate, embedder_data, value_offset, ToExternalPointerTag(tag)));
|
||||
#else
|
||||
return SlowGetAlignedPointerFromEmbedderData(index);
|
||||
return SlowGetAlignedPointerFromEmbedderData(index, tag);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
|||
5
deps/v8/include/v8-data.h
vendored
5
deps/v8/include/v8-data.h
vendored
|
|
@ -52,6 +52,11 @@ class V8_EXPORT Data {
|
|||
*/
|
||||
bool IsFunctionTemplate() const;
|
||||
|
||||
/**
|
||||
* Returns true if this data is a |v8::DictionaryTemplate|.
|
||||
*/
|
||||
bool IsDictionaryTemplate() const;
|
||||
|
||||
/**
|
||||
* Returns true if this data is a |v8::Context|.
|
||||
*/
|
||||
|
|
|
|||
10
deps/v8/include/v8-inspector.h
vendored
10
deps/v8/include/v8-inspector.h
vendored
|
|
@ -288,11 +288,21 @@ class V8_EXPORT V8InspectorClient {
|
|||
|
||||
virtual void installAdditionalCommandLineAPI(v8::Local<v8::Context>,
|
||||
v8::Local<v8::Object>) {}
|
||||
// Deprecated. Use version with contextId.
|
||||
virtual void consoleAPIMessage(int contextGroupId,
|
||||
v8::Isolate::MessageErrorLevel level,
|
||||
const StringView& message,
|
||||
const StringView& url, unsigned lineNumber,
|
||||
unsigned columnNumber, V8StackTrace*) {}
|
||||
virtual void consoleAPIMessage(int contextGroupId, int contextId,
|
||||
v8::Isolate::MessageErrorLevel level,
|
||||
const StringView& message,
|
||||
const StringView& url, unsigned lineNumber,
|
||||
unsigned columnNumber,
|
||||
V8StackTrace* stackTrace) {
|
||||
consoleAPIMessage(contextGroupId, level, message, url, lineNumber,
|
||||
columnNumber, stackTrace);
|
||||
}
|
||||
virtual v8::MaybeLocal<v8::Value> memoryInfo(v8::Isolate*,
|
||||
v8::Local<v8::Context>) {
|
||||
return v8::MaybeLocal<v8::Value>();
|
||||
|
|
|
|||
72
deps/v8/include/v8-internal.h
vendored
72
deps/v8/include/v8-internal.h
vendored
|
|
@ -421,10 +421,10 @@ constexpr size_t kMaxCppHeapPointers = 0;
|
|||
|
||||
#endif // V8_COMPRESS_POINTERS
|
||||
|
||||
// The number of tags reserved for embedder data. The value is picked
|
||||
// arbitrarily. In Chrome there are 4 embedders, so at least 4 tags are needed.
|
||||
// A generic tag was used for embedder data before, so one tag is used for that.
|
||||
#define V8_EMBEDDER_DATA_TAG_COUNT 5
|
||||
// The number of tags reserved for embedder data stored in internal fields. The
|
||||
// value is picked arbitrarily, and is slightly larger than the number of tags
|
||||
// currently used in Chrome.
|
||||
#define V8_EMBEDDER_DATA_TAG_COUNT 15
|
||||
|
||||
// Generic tag range struct to represent ranges of type tags.
|
||||
//
|
||||
|
|
@ -434,6 +434,19 @@ constexpr size_t kMaxCppHeapPointers = 0;
|
|||
// which all subtypes of a given supertype use contiguous tags. This struct can
|
||||
// then be used to represent such a type range.
|
||||
//
|
||||
// As an example, consider the following type hierarchy:
|
||||
//
|
||||
// A F
|
||||
// / \
|
||||
// B E
|
||||
// / \
|
||||
// C D
|
||||
//
|
||||
// A potential type id assignment for range-based type checks is
|
||||
// {A: 0, B: 1, C: 2, D: 3, E: 4, F: 5}. With that, the type check for type A
|
||||
// would check for the range [A, E], while the check for B would check range
|
||||
// [B, D], and for F it would simply check [F, F].
|
||||
//
|
||||
// In addition, there is an option for performance tweaks: if the size of the
|
||||
// type range corresponding to a supertype is a power of two and starts at a
|
||||
// power of two (e.g. [0x100, 0x13f]), then the compiler can often optimize
|
||||
|
|
@ -566,7 +579,6 @@ enum ExternalPointerTag : uint16_t {
|
|||
// Placeholders for embedder data.
|
||||
kFirstEmbedderDataTag,
|
||||
kLastEmbedderDataTag = kFirstEmbedderDataTag + V8_EMBEDDER_DATA_TAG_COUNT - 1,
|
||||
kEmbedderDataSlotPayloadTag = kLastEmbedderDataTag,
|
||||
// This tag essentially stands for a `void*` pointer in the V8 API, and it is
|
||||
// the Embedder's responsibility to ensure type safety (against substitution)
|
||||
// and lifetime validity of these objects.
|
||||
|
|
@ -1025,16 +1037,12 @@ class Internals {
|
|||
using Tagged_t = uint32_t;
|
||||
struct StaticReadOnlyRoot {
|
||||
#ifdef V8_ENABLE_WEBASSEMBLY
|
||||
#ifdef V8_INTL_SUPPORT
|
||||
static constexpr Tagged_t kBuildDependentTheHoleValue = 0x67b9;
|
||||
#else
|
||||
static constexpr Tagged_t kBuildDependentTheHoleValue = 0x5b1d;
|
||||
#endif
|
||||
static constexpr Tagged_t kBuildDependentTheHoleValue = 0x20001;
|
||||
#else
|
||||
#ifdef V8_INTL_SUPPORT
|
||||
static constexpr Tagged_t kBuildDependentTheHoleValue = 0x6511;
|
||||
static constexpr Tagged_t kBuildDependentTheHoleValue = 0x6559;
|
||||
#else
|
||||
static constexpr Tagged_t kBuildDependentTheHoleValue = 0x5875;
|
||||
static constexpr Tagged_t kBuildDependentTheHoleValue = 0x58bd;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
@ -1328,18 +1336,6 @@ class Internals {
|
|||
#endif
|
||||
}
|
||||
|
||||
V8_DEPRECATED(
|
||||
"Use GetCurrentIsolateForSandbox() instead, which is guaranteed to "
|
||||
"return the same isolate since https://crrev.com/c/6458560.")
|
||||
V8_INLINE static v8::Isolate* GetIsolateForSandbox(Address obj) {
|
||||
#ifdef V8_ENABLE_SANDBOX
|
||||
return GetCurrentIsolate();
|
||||
#else
|
||||
// Not used in non-sandbox mode.
|
||||
return nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns v8::Isolate::Current(), but without needing to include the
|
||||
// v8-isolate.h header.
|
||||
V8_EXPORT static v8::Isolate* GetCurrentIsolate();
|
||||
|
|
@ -1383,6 +1379,34 @@ class Internals {
|
|||
#endif // V8_ENABLE_SANDBOX
|
||||
}
|
||||
|
||||
V8_INLINE static Address ReadExternalPointerField(
|
||||
v8::Isolate* isolate, Address heap_object_ptr, int offset,
|
||||
ExternalPointerTagRange tag_range) {
|
||||
#ifdef V8_ENABLE_SANDBOX
|
||||
// See src/sandbox/external-pointer-table.h. Logic duplicated here so
|
||||
// it can be inlined and doesn't require an additional call.
|
||||
Address* table = IsSharedExternalPointerType(tag_range)
|
||||
? GetSharedExternalPointerTableBase(isolate)
|
||||
: GetExternalPointerTableBase(isolate);
|
||||
internal::ExternalPointerHandle handle =
|
||||
ReadRawField<ExternalPointerHandle>(heap_object_ptr, offset);
|
||||
uint32_t index = handle >> kExternalPointerIndexShift;
|
||||
std::atomic<Address>* ptr =
|
||||
reinterpret_cast<std::atomic<Address>*>(&table[index]);
|
||||
Address entry = std::atomic_load_explicit(ptr, std::memory_order_relaxed);
|
||||
ExternalPointerTag actual_tag = static_cast<ExternalPointerTag>(
|
||||
(entry & kExternalPointerTagMask) >> kExternalPointerTagShift);
|
||||
if (V8_LIKELY(tag_range.Contains(actual_tag))) {
|
||||
return entry & kExternalPointerPayloadMask;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
return entry;
|
||||
#else
|
||||
return ReadRawField<Address>(heap_object_ptr, offset);
|
||||
#endif // V8_ENABLE_SANDBOX
|
||||
}
|
||||
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
V8_INLINE static Address GetPtrComprCageBaseFromOnHeapAddress(Address addr) {
|
||||
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
|
||||
|
|
|
|||
5
deps/v8/include/v8-isolate.h
vendored
5
deps/v8/include/v8-isolate.h
vendored
|
|
@ -1708,17 +1708,12 @@ class V8_EXPORT Isolate {
|
|||
|
||||
void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
|
||||
|
||||
void SetWasmImportedStringsEnabledCallback(
|
||||
WasmImportedStringsEnabledCallback callback);
|
||||
|
||||
void SetWasmCustomDescriptorsEnabledCallback(
|
||||
WasmCustomDescriptorsEnabledCallback callback);
|
||||
|
||||
void SetSharedArrayBufferConstructorEnabledCallback(
|
||||
SharedArrayBufferConstructorEnabledCallback callback);
|
||||
|
||||
void SetWasmJSPIEnabledCallback(WasmJSPIEnabledCallback callback);
|
||||
|
||||
/**
|
||||
* This function can be called by the embedder to signal V8 that the dynamic
|
||||
* enabling of features has finished. V8 can now set up dynamically added
|
||||
|
|
|
|||
8
deps/v8/include/v8-message.h
vendored
8
deps/v8/include/v8-message.h
vendored
|
|
@ -108,14 +108,6 @@ class V8_EXPORT Message {
|
|||
public:
|
||||
Local<String> Get() const;
|
||||
|
||||
/**
|
||||
* Return the isolate to which the Message belongs.
|
||||
*/
|
||||
V8_DEPRECATED(
|
||||
"Use Isolate::GetCurrent() instead, which is guaranteed to return the "
|
||||
"same isolate since https://crrev.com/c/6458560.")
|
||||
Isolate* GetIsolate() const;
|
||||
|
||||
V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSource(
|
||||
Local<Context> context) const;
|
||||
V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
|
||||
|
|
|
|||
146
deps/v8/include/v8-object.h
vendored
146
deps/v8/include/v8-object.h
vendored
|
|
@ -33,6 +33,11 @@ class PropertyCallbackInfo;
|
|||
*/
|
||||
using EmbedderDataTypeTag = uint16_t;
|
||||
|
||||
constexpr EmbedderDataTypeTag kEmbedderDataTypeTagDefault = 0;
|
||||
|
||||
V8_EXPORT internal::ExternalPointerTag ToExternalPointerTag(
|
||||
v8::EmbedderDataTypeTag api_tag);
|
||||
|
||||
/**
|
||||
* A private symbol
|
||||
*
|
||||
|
|
@ -177,10 +182,10 @@ using AccessorNameSetterCallback =
|
|||
* the kind of cross-context access that should be allowed.
|
||||
*
|
||||
*/
|
||||
enum V8_DEPRECATE_SOON(
|
||||
"This enum is no longer used and will be removed in V8 12.9.")
|
||||
enum V8_DEPRECATED(
|
||||
"This enum is no longer used and will be removed in V8 14.3.")
|
||||
AccessControl {
|
||||
DEFAULT V8_ENUM_DEPRECATE_SOON("not used") = 0,
|
||||
DEFAULT V8_ENUM_DEPRECATED("not used") = 0,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -451,23 +456,10 @@ class V8_EXPORT Object : public Value {
|
|||
*/
|
||||
Local<Value> GetPrototypeV2();
|
||||
|
||||
/**
|
||||
* Set the prototype object. This does not skip objects marked to
|
||||
* be skipped by __proto__ and it does not consult the security
|
||||
* handler.
|
||||
*/
|
||||
V8_DEPRECATED(
|
||||
"V8 will stop providing access to hidden prototype (i.e. "
|
||||
"JSGlobalObject). Use SetPrototypeV2() instead. "
|
||||
"See http://crbug.com/333672197.")
|
||||
V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
|
||||
Local<Value> prototype);
|
||||
|
||||
/**
|
||||
* Set the prototype object (same as calling Object.setPrototypeOf(..)).
|
||||
* This does not consult the security handler.
|
||||
* TODO(333672197): rename back to SetPrototype() once the old version goes
|
||||
* through the deprecation process and is removed.
|
||||
* TODO(http://crbug.com/333672197): rename back to SetPrototype().
|
||||
*/
|
||||
V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototypeV2(Local<Context> context,
|
||||
Local<Value> prototype);
|
||||
|
|
@ -531,11 +523,40 @@ class V8_EXPORT Object : public Value {
|
|||
* must have been set by SetAlignedPointerInInternalField, everything else
|
||||
* leads to undefined behavior.
|
||||
*/
|
||||
V8_INLINE void* GetAlignedPointerFromInternalField(int index);
|
||||
V8_INLINE void* GetAlignedPointerFromInternalField(int index,
|
||||
EmbedderDataTypeTag tag);
|
||||
V8_INLINE void* GetAlignedPointerFromInternalField(v8::Isolate* isolate,
|
||||
int index);
|
||||
int index,
|
||||
EmbedderDataTypeTag tag);
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use GetAlignedPointerFromInternalField with EmbedderDataTypeTag "
|
||||
"parameter instead.")
|
||||
V8_INLINE void* GetAlignedPointerFromInternalField(int index) {
|
||||
return GetAlignedPointerFromInternalField(index,
|
||||
kEmbedderDataTypeTagDefault);
|
||||
}
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use GetAlignedPointerFromInternalField with EmbedderDataTypeTag "
|
||||
"parameter instead.")
|
||||
V8_INLINE void* GetAlignedPointerFromInternalField(v8::Isolate* isolate,
|
||||
int index) {
|
||||
return GetAlignedPointerFromInternalField(isolate, index,
|
||||
kEmbedderDataTypeTagDefault);
|
||||
}
|
||||
|
||||
/** Same as above, but works for PersistentBase. */
|
||||
V8_INLINE static void* GetAlignedPointerFromInternalField(
|
||||
const PersistentBase<Object>& object, int index,
|
||||
EmbedderDataTypeTag tag) {
|
||||
return object.template value<Object>()->GetAlignedPointerFromInternalField(
|
||||
index, tag);
|
||||
}
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use GetAlignedPointerFromInternalField with EmbedderDataTypeTag "
|
||||
"parameter instead.")
|
||||
V8_INLINE static void* GetAlignedPointerFromInternalField(
|
||||
const PersistentBase<Object>& object, int index) {
|
||||
return object.template value<Object>()->GetAlignedPointerFromInternalField(
|
||||
|
|
@ -543,6 +564,16 @@ class V8_EXPORT Object : public Value {
|
|||
}
|
||||
|
||||
/** Same as above, but works for TracedReference. */
|
||||
V8_INLINE static void* GetAlignedPointerFromInternalField(
|
||||
const BasicTracedReference<Object>& object, int index,
|
||||
EmbedderDataTypeTag tag) {
|
||||
return object.template value<Object>()->GetAlignedPointerFromInternalField(
|
||||
index, tag);
|
||||
}
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use GetAlignedPointerFromInternalField with EmbedderDataTypeTag "
|
||||
"parameter instead.")
|
||||
V8_INLINE static void* GetAlignedPointerFromInternalField(
|
||||
const BasicTracedReference<Object>& object, int index) {
|
||||
return object.template value<Object>()->GetAlignedPointerFromInternalField(
|
||||
|
|
@ -554,13 +585,15 @@ class V8_EXPORT Object : public Value {
|
|||
* a field, GetAlignedPointerFromInternalField must be used, everything else
|
||||
* leads to undefined behavior.
|
||||
*/
|
||||
void SetAlignedPointerInInternalField(int index, void* value,
|
||||
EmbedderDataTypeTag tag);
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use SetAlignedPointerInInternalField with EmbedderDataTypeTag parameter "
|
||||
"instead.")
|
||||
void SetAlignedPointerInInternalField(int index, void* value);
|
||||
|
||||
void SetAlignedPointerInInternalField(int index, void* value,
|
||||
EmbedderDataTypeTag tag);
|
||||
void SetAlignedPointerInInternalField(int index, void* value) {
|
||||
SetAlignedPointerInInternalField(index, value, kEmbedderDataTypeTagDefault);
|
||||
}
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use SetAlignedPointerInInternalField with EmbedderDataTypeTag "
|
||||
|
|
@ -795,9 +828,27 @@ class V8_EXPORT Object : public Value {
|
|||
* Prefer using version with Isolate parameter if you have an Isolate,
|
||||
* otherwise use the other one.
|
||||
*/
|
||||
void* GetAlignedPointerFromEmbedderDataInCreationContext(
|
||||
v8::Isolate* isolate, int index, EmbedderDataTypeTag tag);
|
||||
void* GetAlignedPointerFromEmbedderDataInCreationContext(
|
||||
int index, EmbedderDataTypeTag tag);
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use GetAlignedPointerFromEmbedderDataInCreationContext with "
|
||||
"EmbedderDataTypeTag parameter instead.")
|
||||
void* GetAlignedPointerFromEmbedderDataInCreationContext(v8::Isolate* isolate,
|
||||
int index);
|
||||
void* GetAlignedPointerFromEmbedderDataInCreationContext(int index);
|
||||
int index) {
|
||||
return GetAlignedPointerFromEmbedderDataInCreationContext(
|
||||
isolate, index, kEmbedderDataTypeTagDefault);
|
||||
}
|
||||
|
||||
V8_DEPRECATE_SOON(
|
||||
"Use GetAlignedPointerFromEmbedderDataInCreationContext with "
|
||||
"EmbedderDataTypeTag parameter instead.")
|
||||
void* GetAlignedPointerFromEmbedderDataInCreationContext(int index) {
|
||||
return GetAlignedPointerFromEmbedderDataInCreationContext(
|
||||
index, kEmbedderDataTypeTagDefault);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether a callback is set by the
|
||||
|
|
@ -848,21 +899,6 @@ class V8_EXPORT Object : public Value {
|
|||
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
|
||||
Local<Context> context, int argc, Local<Value> argv[]);
|
||||
|
||||
/**
|
||||
* Return the isolate to which the Object belongs to.
|
||||
*/
|
||||
V8_DEPRECATED(
|
||||
"Use Isolate::GetCurrent() instead, which is guaranteed to return the "
|
||||
"same isolate since https://crrev.com/c/6458560.")
|
||||
Isolate* GetIsolate();
|
||||
|
||||
V8_DEPRECATED(
|
||||
"Use Isolate::GetCurrent() instead, which is guaranteed to return the "
|
||||
"same isolate since https://crrev.com/c/6458560.")
|
||||
V8_INLINE static Isolate* GetIsolate(const TracedReference<Object>& handle) {
|
||||
return handle.template value<Object>()->GetIsolate();
|
||||
}
|
||||
|
||||
/**
|
||||
* If this object is a Set, Map, WeakSet or WeakMap, this returns a
|
||||
* representation of the elements of this object as an array.
|
||||
|
|
@ -909,8 +945,10 @@ class V8_EXPORT Object : public Value {
|
|||
Object();
|
||||
static void CheckCast(Value* obj);
|
||||
Local<Data> SlowGetInternalField(int index);
|
||||
void* SlowGetAlignedPointerFromInternalField(int index);
|
||||
void* SlowGetAlignedPointerFromInternalField(v8::Isolate* isolate, int index);
|
||||
void* SlowGetAlignedPointerFromInternalField(int index,
|
||||
EmbedderDataTypeTag tag);
|
||||
void* SlowGetAlignedPointerFromInternalField(v8::Isolate* isolate, int index,
|
||||
EmbedderDataTypeTag tag);
|
||||
};
|
||||
|
||||
// --- Implementation ---
|
||||
|
|
@ -929,7 +967,7 @@ Local<Data> Object::GetInternalField(int index) {
|
|||
A value = I::ReadRawField<A>(obj, offset);
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
// We read the full pointer value and then decompress it in order to avoid
|
||||
// dealing with potential endiannes issues.
|
||||
// dealing with potential endianness issues.
|
||||
value = I::DecompressTaggedField(obj, static_cast<uint32_t>(value));
|
||||
#endif
|
||||
|
||||
|
|
@ -941,7 +979,8 @@ Local<Data> Object::GetInternalField(int index) {
|
|||
}
|
||||
|
||||
void* Object::GetAlignedPointerFromInternalField(v8::Isolate* isolate,
|
||||
int index) {
|
||||
int index,
|
||||
EmbedderDataTypeTag tag) {
|
||||
#if !defined(V8_ENABLE_CHECKS)
|
||||
using A = internal::Address;
|
||||
using I = internal::Internals;
|
||||
|
|
@ -953,17 +992,16 @@ void* Object::GetAlignedPointerFromInternalField(v8::Isolate* isolate,
|
|||
int offset = I::kJSAPIObjectWithEmbedderSlotsHeaderSize +
|
||||
(I::kEmbedderDataSlotSize * index) +
|
||||
I::kEmbedderDataSlotExternalPointerOffset;
|
||||
A value =
|
||||
I::ReadExternalPointerField<{internal::kFirstEmbedderDataTag,
|
||||
internal::kLastEmbedderDataTag}>(
|
||||
isolate, obj, offset);
|
||||
A value = I::ReadExternalPointerField(isolate, obj, offset,
|
||||
ToExternalPointerTag(tag));
|
||||
return reinterpret_cast<void*>(value);
|
||||
}
|
||||
#endif
|
||||
return SlowGetAlignedPointerFromInternalField(isolate, index);
|
||||
return SlowGetAlignedPointerFromInternalField(isolate, index, tag);
|
||||
}
|
||||
|
||||
void* Object::GetAlignedPointerFromInternalField(int index) {
|
||||
void* Object::GetAlignedPointerFromInternalField(int index,
|
||||
EmbedderDataTypeTag tag) {
|
||||
#if !defined(V8_ENABLE_CHECKS)
|
||||
using A = internal::Address;
|
||||
using I = internal::Internals;
|
||||
|
|
@ -976,14 +1014,12 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
|
|||
(I::kEmbedderDataSlotSize * index) +
|
||||
I::kEmbedderDataSlotExternalPointerOffset;
|
||||
Isolate* isolate = I::GetCurrentIsolateForSandbox();
|
||||
A value =
|
||||
I::ReadExternalPointerField<{internal::kFirstEmbedderDataTag,
|
||||
internal::kLastEmbedderDataTag}>(
|
||||
isolate, obj, offset);
|
||||
A value = I::ReadExternalPointerField(isolate, obj, offset,
|
||||
ToExternalPointerTag(tag));
|
||||
return reinterpret_cast<void*>(value);
|
||||
}
|
||||
#endif
|
||||
return SlowGetAlignedPointerFromInternalField(index);
|
||||
return SlowGetAlignedPointerFromInternalField(index, tag);
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
|
|||
45
deps/v8/include/v8-profiler.h
vendored
45
deps/v8/include/v8-profiler.h
vendored
|
|
@ -1041,6 +1041,8 @@ class V8_EXPORT HeapProfiler {
|
|||
|
||||
/**
|
||||
* Callback interface for retrieving user friendly names of global objects.
|
||||
*
|
||||
* This interface will soon be deprecated in favour of ContextNameResolver.
|
||||
*/
|
||||
class ObjectNameResolver {
|
||||
public:
|
||||
|
|
@ -1054,6 +1056,23 @@ class V8_EXPORT HeapProfiler {
|
|||
virtual ~ObjectNameResolver() = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Callback interface for retrieving user friendly names of a V8::Context
|
||||
* objects.
|
||||
*/
|
||||
class ContextNameResolver {
|
||||
public:
|
||||
/**
|
||||
* Returns name to be used in the heap snapshot for given node. Returned
|
||||
* string must stay alive until snapshot collection is completed.
|
||||
* If no user friendly name is available return nullptr.
|
||||
*/
|
||||
virtual const char* GetName(Local<Context> context) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~ContextNameResolver() = default;
|
||||
};
|
||||
|
||||
enum class HeapSnapshotMode {
|
||||
/**
|
||||
* Heap snapshot for regular developers.
|
||||
|
|
@ -1083,6 +1102,10 @@ class V8_EXPORT HeapProfiler {
|
|||
// NOLINTNEXTLINE
|
||||
HeapSnapshotOptions() {}
|
||||
|
||||
// TODO(https://crbug.com/333672197): remove once ObjectNameResolver is
|
||||
// removed.
|
||||
ALLOW_COPY_AND_MOVE_WITH_DEPRECATED_FIELDS(HeapSnapshotOptions)
|
||||
|
||||
/**
|
||||
* The control used to report intermediate progress to.
|
||||
*/
|
||||
|
|
@ -1090,7 +1113,15 @@ class V8_EXPORT HeapProfiler {
|
|||
/**
|
||||
* The resolver used by the snapshot generator to get names for V8 objects.
|
||||
*/
|
||||
V8_DEPRECATE_SOON("Use context_name_resolver callback instead.")
|
||||
ObjectNameResolver* global_object_name_resolver = nullptr;
|
||||
/**
|
||||
* The resolver used by the snapshot generator to get names for v8::Context
|
||||
* objects.
|
||||
* In case both this and |global_object_name_resolver| callbacks are
|
||||
* provided, this one will be used.
|
||||
*/
|
||||
ContextNameResolver* context_name_resolver = nullptr;
|
||||
/**
|
||||
* Mode for taking the snapshot, see `HeapSnapshotMode`.
|
||||
*/
|
||||
|
|
@ -1120,10 +1151,20 @@ class V8_EXPORT HeapProfiler {
|
|||
*
|
||||
* \returns the snapshot.
|
||||
*/
|
||||
V8_DEPRECATE_SOON("Use overload with ContextNameResolver* resolver instead.")
|
||||
const HeapSnapshot* TakeHeapSnapshot(
|
||||
ActivityControl* control,
|
||||
ObjectNameResolver* global_object_name_resolver = nullptr,
|
||||
ActivityControl* control, ObjectNameResolver* global_object_name_resolver,
|
||||
bool hide_internals = true, bool capture_numeric_value = false);
|
||||
const HeapSnapshot* TakeHeapSnapshot(ActivityControl* control,
|
||||
ContextNameResolver* resolver,
|
||||
bool hide_internals = true,
|
||||
bool capture_numeric_value = false);
|
||||
// TODO(333672197): remove this version once ObjectNameResolver* overload
|
||||
// is removed.
|
||||
const HeapSnapshot* TakeHeapSnapshot(ActivityControl* control,
|
||||
std::nullptr_t resolver = nullptr,
|
||||
bool hide_internals = true,
|
||||
bool capture_numeric_value = false);
|
||||
|
||||
/**
|
||||
* Obtains list of Detached JS Wrapper Objects. This functon calls garbage
|
||||
|
|
|
|||
2
deps/v8/include/v8-template.h
vendored
2
deps/v8/include/v8-template.h
vendored
|
|
@ -1071,7 +1071,7 @@ class V8_EXPORT ObjectTemplate : public Template {
|
|||
/**
|
||||
* A template to create dictionary objects at runtime.
|
||||
*/
|
||||
class V8_EXPORT DictionaryTemplate final {
|
||||
class V8_EXPORT DictionaryTemplate final : public Data {
|
||||
public:
|
||||
/** Creates a new template. Also declares data properties that can be passed
|
||||
* on instantiation of the template. Properties can only be declared on
|
||||
|
|
|
|||
6
deps/v8/include/v8-version.h
vendored
6
deps/v8/include/v8-version.h
vendored
|
|
@ -9,9 +9,9 @@
|
|||
// NOTE these macros are used by some of the tool scripts and the build
|
||||
// system so their names cannot be changed without changing the scripts.
|
||||
#define V8_MAJOR_VERSION 14
|
||||
#define V8_MINOR_VERSION 1
|
||||
#define V8_BUILD_NUMBER 146
|
||||
#define V8_PATCH_LEVEL 11
|
||||
#define V8_MINOR_VERSION 2
|
||||
#define V8_BUILD_NUMBER 231
|
||||
#define V8_PATCH_LEVEL 9
|
||||
|
||||
// Use 1 for candidates and 0 otherwise.
|
||||
// (Boolean macro values are not supported by all preprocessors.)
|
||||
|
|
|
|||
9
deps/v8/include/v8config.h
vendored
9
deps/v8/include/v8config.h
vendored
|
|
@ -335,7 +335,6 @@ path. Add it with -I<path> to the command line
|
|||
// - [[no_unique_address]] supported
|
||||
// V8_HAS_CPP_ATTRIBUTE_LIFETIME_BOUND - [[clang::lifetimebound]] supported
|
||||
// V8_HAS_BUILTIN_ADD_OVERFLOW - __builtin_add_overflow() supported
|
||||
// V8_HAS_BUILTIN_BIT_CAST - __builtin_bit_cast() supported
|
||||
// V8_HAS_BUILTIN_BSWAP16 - __builtin_bswap16() supported
|
||||
// V8_HAS_BUILTIN_BSWAP32 - __builtin_bswap32() supported
|
||||
// V8_HAS_BUILTIN_BSWAP64 - __builtin_bswap64() supported
|
||||
|
|
@ -418,7 +417,6 @@ path. Add it with -I<path> to the command line
|
|||
# define V8_HAS_BUILTIN_ADD_OVERFLOW (__has_builtin(__builtin_add_overflow))
|
||||
# define V8_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume))
|
||||
# define V8_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned))
|
||||
# define V8_HAS_BUILTIN_BIT_CAST (__has_builtin(__builtin_bit_cast))
|
||||
# define V8_HAS_BUILTIN_BSWAP16 (__has_builtin(__builtin_bswap16))
|
||||
# define V8_HAS_BUILTIN_BSWAP32 (__has_builtin(__builtin_bswap32))
|
||||
# define V8_HAS_BUILTIN_BSWAP64 (__has_builtin(__builtin_bswap64))
|
||||
|
|
@ -473,9 +471,6 @@ path. Add it with -I<path> to the command line
|
|||
// for V8_HAS_CPP_ATTRIBUTE_NODISCARD. See https://crbug.com/v8/11707.
|
||||
|
||||
# define V8_HAS_BUILTIN_ASSUME_ALIGNED 1
|
||||
# if __GNUC__ >= 11
|
||||
# define V8_HAS_BUILTIN_BIT_CAST 1
|
||||
# endif
|
||||
# define V8_HAS_BUILTIN_CLZ 1
|
||||
# define V8_HAS_BUILTIN_CTZ 1
|
||||
# define V8_HAS_BUILTIN_EXPECT 1
|
||||
|
|
@ -605,15 +600,11 @@ path. Add it with -I<path> to the command line
|
|||
// functions.
|
||||
// Use like:
|
||||
// V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod();
|
||||
#if V8_OS_WIN
|
||||
# define V8_PRESERVE_MOST
|
||||
#else
|
||||
#if V8_HAS_ATTRIBUTE_PRESERVE_MOST
|
||||
# define V8_PRESERVE_MOST __attribute__((preserve_most))
|
||||
#else
|
||||
# define V8_PRESERVE_MOST /* NOT SUPPORTED */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
|
||||
|
|
|
|||
139
deps/v8/infra/mb/mb_config.pyl
vendored
139
deps/v8/infra/mb/mb_config.pyl
vendored
|
|
@ -136,6 +136,7 @@
|
|||
'V8 Linux64 - verify builtins': 'release_x64_verify_builtins',
|
||||
'V8 Linux64 - verify deterministic': 'release_x64_verify_deterministic',
|
||||
'V8 Linux - full debug builder': 'full_debug_x86',
|
||||
'V8 Linux64 - full debug builder': 'full_debug_x64',
|
||||
'V8 Mac64 - full debug builder': 'full_debug_x64',
|
||||
'V8 Random Deopt Fuzzer - debug': 'debug_x64',
|
||||
},
|
||||
|
|
@ -145,44 +146,44 @@
|
|||
'V8 Centipede Linux64 ASAN - release builder':
|
||||
'release_x64_asan_centipede',
|
||||
'V8 Clusterfuzz Win64 ASAN - release builder':
|
||||
'release_x64_asan_no_lsan_verify_heap',
|
||||
'release_x64_asan_no_lsan_verify_heap_undefined_double',
|
||||
# Note this is called a debug builder, but it uses a release build
|
||||
# configuration with dchecks (which enables DEBUG in V8), since win-asan
|
||||
# debug is not supported.
|
||||
'V8 Clusterfuzz Win64 ASAN - debug builder':
|
||||
'release_x64_asan_no_lsan_verify_heap_dchecks',
|
||||
'release_x64_asan_no_lsan_verify_heap_dchecks_undefined_double',
|
||||
'V8 Clusterfuzz Mac64 ASAN - release builder':
|
||||
'release_x64_asan_no_lsan_verify_heap',
|
||||
'release_x64_asan_no_lsan_verify_heap_undefined_double',
|
||||
'V8 Clusterfuzz Mac64 ASAN - debug builder':
|
||||
'debug_x64_asan_no_lsan_static',
|
||||
'debug_x64_asan_no_lsan_static_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 - release builder':
|
||||
'release_x64_correctness_fuzzer',
|
||||
'V8 Clusterfuzz Linux64 - debug builder': 'debug_x64',
|
||||
'release_x64_correctness_fuzzer_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 - debug builder': 'debug_x64_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 ASAN no inline - release builder':
|
||||
'release_x64_asan_symbolized_verify_heap',
|
||||
'release_x64_asan_symbolized_verify_heap_undefined_double',
|
||||
'V8 Clusterfuzz Linux ASAN no inline - release builder':
|
||||
'release_x86_asan_symbolized_verify_heap',
|
||||
'V8 Clusterfuzz Linux64 ASAN - debug builder': 'debug_x64_asan',
|
||||
'release_x86_asan_symbolized_verify_heap_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 ASAN - debug builder': 'debug_x64_asan_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 ASAN - undefined double - debug builder': 'debug_x64_asan_undefined_double',
|
||||
'V8 Clusterfuzz Linux ASAN - debug builder': 'debug_x86_asan',
|
||||
'V8 Clusterfuzz Linux ASAN - debug builder': 'debug_x86_asan_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 ASAN arm64 - debug builder':
|
||||
'debug_simulate_arm64_asan',
|
||||
'V8 Clusterfuzz Linux - debug builder': 'debug_x86',
|
||||
'debug_simulate_arm64_asan_undefined_double',
|
||||
'V8 Clusterfuzz Linux - debug builder': 'debug_x86_undefined_double',
|
||||
'V8 Clusterfuzz Linux ASAN arm - debug builder':
|
||||
'debug_simulate_arm_asan',
|
||||
'debug_simulate_arm_asan_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 CFI - release builder':
|
||||
'release_x64_cfi_clusterfuzz',
|
||||
'release_x64_cfi_clusterfuzz_undefined_double',
|
||||
'V8 Clusterfuzz Linux MSAN no origins':
|
||||
'release_simulate_arm64_msan_no_origins',
|
||||
'release_simulate_arm64_msan_no_origins_undefined_double',
|
||||
'V8 Clusterfuzz Linux MSAN chained origins':
|
||||
'release_simulate_arm64_msan',
|
||||
'V8 Clusterfuzz Linux64 TSAN - release builder': 'release_x64_tsan',
|
||||
'release_simulate_arm64_msan_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 TSAN - release builder': 'release_x64_tsan_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 UBSan - release builder':
|
||||
'release_x64_ubsan_recover',
|
||||
'release_x64_ubsan_recover_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 sandbox testing - release builder':
|
||||
'release_x64_sandbox_testing',
|
||||
'release_x64_sandbox_testing_undefined_double',
|
||||
'V8 Clusterfuzz Linux64 ASAN sandbox testing - release builder':
|
||||
'release_x64_asan_sandbox_testing',
|
||||
'release_x64_asan_sandbox_testing_undefined_double',
|
||||
},
|
||||
'client.v8.perf' : {
|
||||
# Arm
|
||||
|
|
@ -232,7 +233,9 @@
|
|||
'V8 Linux - ppc64 - sim - builder': 'release_simulate_ppc64',
|
||||
'V8 Linux - s390x - sim - builder': 'release_simulate_s390x',
|
||||
# RISC-V
|
||||
'V8 Linux - riscv32 - sim - debug builder': 'debug_simulate_riscv32',
|
||||
'V8 Linux - riscv32 - sim - builder': 'release_simulate_riscv32',
|
||||
'V8 Linux - riscv64 - sim - debug builder': 'debug_simulate_riscv64',
|
||||
'V8 Linux - riscv64 - sim - builder': 'release_simulate_riscv64',
|
||||
'V8 Linux - riscv64 - sim - pointer compression - builder': 'release_simulate_riscv64_pointer_compression',
|
||||
# Loongson
|
||||
|
|
@ -275,6 +278,7 @@
|
|||
'v8_linux64_dict_tracking_compile_dbg': 'debug_x64_dict_tracking_trybot',
|
||||
'v8_linux64_disable_runtime_call_stats_compile_rel': 'release_x64_disable_runtime_call_stats',
|
||||
'v8_linux64_css_compile_dbg': 'debug_x64_conservative_stack_scanning',
|
||||
'v8_linux64_full_compile_dbg': 'full_debug_x64',
|
||||
'v8_linux64_gc_stress_custom_snapshot_compile_dbg': 'debug_x64_trybot_custom',
|
||||
'v8_linux64_gc_stress_compile_dbg': 'debug_x64_trybot',
|
||||
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
|
||||
|
|
@ -300,14 +304,16 @@
|
|||
'v8_linux64_arm64_no_wasm_compile_dbg': 'debug_arm64_webassembly_disabled',
|
||||
'v8_linux64_verify_csa_compile_rel': 'release_x64_verify_csa',
|
||||
'v8_linux64_asan_compile_rel': 'release_x64_asan_minimal_symbols',
|
||||
'v8_linux64_sandbox_testing_compile_rel': 'release_x64_sandbox_testing',
|
||||
'v8_linux64_asan_sandbox_testing_compile_rel': 'release_x64_asan_sandbox_testing',
|
||||
'v8_linux64_sandbox_testing_compile_rel': 'release_x64_sandbox_testing_undefined_double',
|
||||
'v8_linux64_asan_sandbox_testing_compile_rel': 'release_x64_asan_sandbox_testing_undefined_double',
|
||||
'v8_linux64_cfi_compile_rel': 'release_x64_cfi',
|
||||
'v8_linux64_fuzzilli_compile_rel': 'release_x64_fuzzilli',
|
||||
'v8_linux64_loong64_compile_rel': 'release_simulate_loong64',
|
||||
'v8_linux64_lower_limits_compile_rel': 'release_x64_lower_limits',
|
||||
'v8_linux64_msan_compile_rel': 'release_simulate_arm64_msan_minimal_symbols',
|
||||
'v8_linux_riscv32_compile_dbg': 'debug_simulate_riscv32',
|
||||
'v8_linux_riscv32_compile_rel': 'release_simulate_riscv32',
|
||||
'v8_linux64_riscv64_compile_dbg': 'debug_simulate_riscv64',
|
||||
'v8_linux64_riscv64_compile_rel': 'release_simulate_riscv64',
|
||||
'v8_linux64_riscv64_pointer_compression_compile_rel': 'release_simulate_riscv64_pointer_compression',
|
||||
'v8_linux64_sticky_mark_bits_compile_dbg': 'debug_x64_sticky_mark_bits',
|
||||
|
|
@ -351,7 +357,7 @@
|
|||
'v8_linux_arm64_compile_dbg': 'debug_simulate_arm64',
|
||||
'v8_linux_arm64_gc_stress_compile_dbg': 'debug_simulate_arm64',
|
||||
'v8_linux_mips64el_compile_rel': 'release_simulate_mips64el',
|
||||
'v8_numfuzz_asan_compile_rel': 'release_x64_asan_symbolized_verify_heap',
|
||||
'v8_numfuzz_asan_compile_rel': 'release_x64_asan_symbolized_verify_heap_undefined_double',
|
||||
'v8_numfuzz_compile_rel': 'release_x64',
|
||||
'v8_numfuzz_compile_dbg': 'debug_x64',
|
||||
'v8_numfuzz_tsan_compile_rel': 'release_x64_tsan',
|
||||
|
|
@ -450,14 +456,19 @@
|
|||
# Debug configs for simulators.
|
||||
'debug_simulate_arm': [
|
||||
'debug_bot', 'simulate_arm'],
|
||||
'debug_simulate_arm_asan': [
|
||||
'debug_bot', 'simulate_arm', 'asan'],
|
||||
'debug_simulate_arm_asan_undefined_double': [
|
||||
'debug_bot', 'simulate_arm', 'asan', 'v8_enable_undefined_double'],
|
||||
'debug_simulate_arm_lite': [
|
||||
'debug_bot', 'simulate_arm', 'v8_enable_lite_mode'],
|
||||
'debug_simulate_arm64': [
|
||||
'debug_bot', 'simulate_arm64'],
|
||||
'debug_simulate_arm64_asan': [
|
||||
'debug_bot', 'simulate_arm64', 'asan', 'lsan'],
|
||||
'debug_simulate_arm64_asan_undefined_double': [
|
||||
'debug_bot', 'simulate_arm64', 'asan', 'lsan',
|
||||
'v8_enable_undefined_double'],
|
||||
'debug_simulate_riscv32': [
|
||||
'debug_bot', 'simulate_riscv32'],
|
||||
'debug_simulate_riscv64': [
|
||||
'debug_bot', 'simulate_riscv64'],
|
||||
|
||||
# Release configs for simulators.
|
||||
'release_simulate_arm_gcmole': [
|
||||
|
|
@ -481,10 +492,11 @@
|
|||
'release_bot', 'simulate_arm64', 'msan'],
|
||||
'release_simulate_arm64_msan_minimal_symbols': [
|
||||
'release_bot', 'simulate_arm64', 'msan', 'minimal_symbols'],
|
||||
'release_simulate_arm64_msan_no_origins': [
|
||||
'release_bot', 'simulate_arm64', 'msan_no_origins'],
|
||||
'release_simulate_arm64_msan': [
|
||||
'release_bot', 'simulate_arm64', 'msan'],
|
||||
'release_simulate_arm64_msan_no_origins_undefined_double': [
|
||||
'release_bot', 'simulate_arm64', 'msan_no_origins',
|
||||
'v8_enable_undefined_double'],
|
||||
'release_simulate_arm64_msan_undefined_double': [
|
||||
'release_bot', 'simulate_arm64', 'msan', 'v8_enable_undefined_double'],
|
||||
'release_simulate_loong64': [
|
||||
'release_bot', 'simulate_loong64'],
|
||||
'release_simulate_mips64el': [
|
||||
|
|
@ -578,25 +590,27 @@
|
|||
'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols'],
|
||||
'release_x64_asan_no_lsan': [
|
||||
'release_bot', 'x64', 'asan'],
|
||||
'release_x64_asan_no_lsan_verify_heap': [
|
||||
'release_bot', 'x64', 'asan', 'v8_verify_heap'],
|
||||
'release_x64_asan_no_lsan_verify_heap_dchecks': [
|
||||
'release_x64_asan_no_lsan_verify_heap_undefined_double': [
|
||||
'release_bot', 'x64', 'asan', 'v8_verify_heap',
|
||||
'v8_enable_undefined_double'],
|
||||
'release_x64_asan_no_lsan_verify_heap_dchecks_undefined_double': [
|
||||
'release_bot', 'x64', 'asan', 'dcheck_always_on',
|
||||
'v8_enable_slow_dchecks', 'v8_verify_heap'],
|
||||
'release_x64_sandbox_testing': [
|
||||
'release_bot', 'x64', 'symbolized', 'backtrace', 'v8_enable_memory_corruption_api'],
|
||||
'release_x64_asan_sandbox_testing': [
|
||||
'v8_enable_slow_dchecks', 'v8_verify_heap', 'v8_enable_undefined_double'],
|
||||
'release_x64_sandbox_testing_undefined_double': [
|
||||
'release_bot', 'x64', 'symbolized', 'backtrace', 'v8_enable_memory_corruption_api',
|
||||
'v8_enable_undefined_double'],
|
||||
'release_x64_asan_sandbox_testing_undefined_double': [
|
||||
'release_bot', 'x64', 'asan', 'symbolized',
|
||||
'v8_enable_memory_corruption_api'],
|
||||
'release_x64_asan_symbolized_verify_heap': [
|
||||
'v8_enable_memory_corruption_api', 'v8_enable_undefined_double'],
|
||||
'release_x64_asan_symbolized_verify_heap_undefined_double': [
|
||||
'release_bot', 'x64', 'asan', 'lsan', 'symbolized',
|
||||
'v8_verify_heap'],
|
||||
'v8_verify_heap', 'v8_enable_undefined_double'],
|
||||
'release_x64_cet_shadow_stack': [
|
||||
'release_bot', 'x64', 'cet_shadow_stack'],
|
||||
'release_x64_cfi': [
|
||||
'release_bot', 'x64', 'cfi'],
|
||||
'release_x64_cfi_clusterfuzz': [
|
||||
'release_bot', 'x64', 'cfi_clusterfuzz'],
|
||||
'release_x64_cfi_clusterfuzz_undefined_double': [
|
||||
'release_bot', 'x64', 'cfi_clusterfuzz', 'v8_enable_undefined_double'],
|
||||
'release_x64_coverage': [
|
||||
'release_bot', 'x64', 'clang_coverage'],
|
||||
'release_x64_fuzzilli': [
|
||||
|
|
@ -604,8 +618,9 @@
|
|||
'v8_verify_heap', 'v8_verify_csa', 'fuzzilli'],
|
||||
'release_x64_gcmole': [
|
||||
'release_bot', 'x64', 'gcmole'],
|
||||
'release_x64_correctness_fuzzer' : [
|
||||
'release_bot', 'x64', 'v8_correctness_fuzzer'],
|
||||
'release_x64_correctness_fuzzer_undefined_double' : [
|
||||
'release_bot', 'x64', 'v8_correctness_fuzzer',
|
||||
'v8_enable_undefined_double'],
|
||||
'release_x64_disable_runtime_call_stats': [
|
||||
'release_bot', 'x64', 'v8_disable_runtime_call_stats'],
|
||||
'release_x64_fuchsia': [
|
||||
|
|
@ -641,14 +656,14 @@
|
|||
'release_bot', 'x64', 'tsan', 'disable_concurrent_marking'],
|
||||
'release_x64_tsan_minimal_symbols': [
|
||||
'release_bot', 'x64', 'tsan', 'minimal_symbols'],
|
||||
'release_x64_tsan': [
|
||||
'release_bot', 'x64', 'tsan'],
|
||||
'release_x64_tsan_undefined_double': [
|
||||
'release_bot', 'x64', 'tsan', 'v8_enable_undefined_double'],
|
||||
'release_x64_ubsan': [
|
||||
'release_bot', 'x64', 'ubsan'],
|
||||
'release_x64_ubsan_minimal_symbols': [
|
||||
'release_bot', 'x64', 'ubsan', 'minimal_symbols'],
|
||||
'release_x64_ubsan_recover': [
|
||||
'release_bot', 'x64', 'ubsan_recover'],
|
||||
'release_x64_ubsan_recover_undefined_double': [
|
||||
'release_bot', 'x64', 'ubsan_recover', 'v8_enable_undefined_double'],
|
||||
'release_x64_shared_verify_heap': [
|
||||
'release_bot', 'x64', 'shared', 'v8_verify_heap'],
|
||||
'release_x64_verify_builtins': [
|
||||
|
|
@ -674,13 +689,13 @@
|
|||
# Debug configs for x64.
|
||||
'debug_x64': [
|
||||
'debug_bot', 'x64'],
|
||||
'debug_x64_asan': [
|
||||
'debug_bot', 'x64', 'asan', 'lsan'],
|
||||
'debug_x64_asan_undefined_double': [
|
||||
'debug_bot', 'x64', 'asan', 'lsan', 'v8_enable_undefined_double'],
|
||||
'debug_x64_asan_centipede': [
|
||||
'debug_bot', 'x64', 'asan', 'use_centipede'],
|
||||
'debug_x64_asan_no_lsan_static': [
|
||||
'debug_x64_asan_no_lsan_static_undefined_double': [
|
||||
'debug', 'static', 'remoteexec', 'v8_enable_slow_dchecks',
|
||||
'v8_optimized_debug', 'x64', 'asan'],
|
||||
'v8_optimized_debug', 'x64', 'asan', 'v8_enable_undefined_double'],
|
||||
'debug_x64_asan_undefined_double': [
|
||||
'debug_bot', 'x64', 'asan', 'v8_enable_undefined_double'],
|
||||
'debug_x64_conservative_stack_scanning': [
|
||||
|
|
@ -715,8 +730,8 @@
|
|||
'debug_bot', 'x64', 'v8_enable_sticky_mark_bits'],
|
||||
'debug_x64_trybot': [
|
||||
'debug_trybot', 'x64'],
|
||||
'debug_x64': [
|
||||
'debug_bot', 'x64'],
|
||||
'debug_x64_undefined_double': [
|
||||
'debug_bot', 'x64', 'v8_enable_undefined_double'],
|
||||
'debug_x64_dict_tracking_trybot': [
|
||||
'debug_trybot', 'x64', 'v8_enable_dict_property_const_tracking'],
|
||||
'debug_x64_trybot_custom': [
|
||||
|
|
@ -732,25 +747,25 @@
|
|||
# Debug configs for x86.
|
||||
'debug_x86': [
|
||||
'debug_bot', 'x86'],
|
||||
'debug_x86_asan': [
|
||||
'debug_bot', 'x86', 'asan', 'lsan'],
|
||||
'debug_x86_asan_undefined_double': [
|
||||
'debug_bot', 'x86', 'asan', 'lsan', 'v8_enable_undefined_double'],
|
||||
'debug_x86_minimal_symbols': [
|
||||
'debug_bot', 'x86', 'minimal_symbols'],
|
||||
'debug_x86_no_i18n': [
|
||||
'debug_bot', 'x86', 'v8_no_i18n'],
|
||||
'debug_x86_trybot': [
|
||||
'debug_trybot', 'x86'],
|
||||
'debug_x86': [
|
||||
'debug_bot', 'x86'],
|
||||
'debug_x86_undefined_double': [
|
||||
'debug_bot', 'x86', 'v8_enable_undefined_double'],
|
||||
'debug_x86_vtunejit': [
|
||||
'debug_bot', 'x86', 'v8_enable_vtunejit'],
|
||||
'full_debug_x86': [
|
||||
'debug', 'x86', 'remoteexec', 'v8_enable_slow_dchecks', 'v8_full_debug'],
|
||||
|
||||
# Release configs for x86.
|
||||
'release_x86_asan_symbolized_verify_heap': [
|
||||
'release_x86_asan_symbolized_verify_heap_undefined_double': [
|
||||
'release_bot', 'x86', 'asan', 'lsan', 'symbolized',
|
||||
'v8_verify_heap'],
|
||||
'v8_verify_heap', 'v8_enable_undefined_double'],
|
||||
'release_x86_gcmole': [
|
||||
'release_bot', 'x86', 'gcmole'],
|
||||
'release_x86_gcmole_trybot': [
|
||||
|
|
@ -1136,7 +1151,7 @@
|
|||
},
|
||||
|
||||
'v8_enable_undefined_double': {
|
||||
'gn_args': 'v8_enable_experimental_undefined_double=true',
|
||||
'gn_args': 'v8_enable_undefined_double=true',
|
||||
},
|
||||
|
||||
'v8_enable_verify_predictable': {
|
||||
|
|
|
|||
4
deps/v8/infra/testing/PRESUBMIT.py
vendored
4
deps/v8/infra/testing/PRESUBMIT.py
vendored
|
|
@ -123,9 +123,9 @@ def _check_test(error_msg, test):
|
|||
if not all(isinstance(x, str) for x in test_args):
|
||||
errors += error_msg('If specified, all test_args must be strings')
|
||||
|
||||
# Limit shards to 14 to avoid erroneous resource exhaustion.
|
||||
# Limit shards to avoid erroneous resource exhaustion.
|
||||
errors += _check_int_range(
|
||||
error_msg, test, 'shards', lower_bound=1, upper_bound=14)
|
||||
error_msg, test, 'shards', lower_bound=1, upper_bound=16)
|
||||
|
||||
variant = test.get('variant', 'default')
|
||||
if not variant or not isinstance(variant, str):
|
||||
|
|
|
|||
272
deps/v8/infra/testing/builders.pyl
vendored
272
deps/v8/infra/testing/builders.pyl
vendored
|
|
@ -56,29 +56,28 @@
|
|||
{'name': 'benchmarks', 'variant': 'extra'},
|
||||
{'name': 'mozilla'},
|
||||
{'name': 'mozilla', 'variant': 'extra'},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 2},
|
||||
{'name': 'test262', 'variant': 'future', 'shards': 2},
|
||||
{'name': 'test262', 'variant': 'extra', 'shards': 7},
|
||||
{'name': 'v8testing', 'shards': 4},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 4},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 4},
|
||||
{'name': 'test262', 'variant': 'future', 'shards': 4},
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 5},
|
||||
# Noavx.
|
||||
{
|
||||
'name': 'mozilla',
|
||||
'suffix': 'noavx',
|
||||
'test_args': ['--extra-flags', '--noenable-avx']
|
||||
'test_args': ['--extra-flags', '--noenable-avx'],
|
||||
},
|
||||
{
|
||||
'name': 'test262',
|
||||
'suffix': 'noavx',
|
||||
'variant': 'default',
|
||||
'test_args': ['--extra-flags', '--noenable-avx'],
|
||||
'shards': 2
|
||||
'shards': 4,
|
||||
},
|
||||
{
|
||||
'name': 'v8testing',
|
||||
'suffix': 'noavx',
|
||||
'test_args': ['--extra-flags', '--noenable-avx'],
|
||||
'shards': 4
|
||||
'shards': 6,
|
||||
},
|
||||
# Nosse3.
|
||||
{
|
||||
|
|
@ -88,7 +87,7 @@
|
|||
'--extra-flags',
|
||||
'--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx',
|
||||
],
|
||||
'shards': 4,
|
||||
'shards': 6,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
|
@ -156,7 +155,6 @@
|
|||
{'name': 'mozilla', 'variant': 'extra'},
|
||||
{'name': 'optimize_for_size'},
|
||||
{'name': 'test262', 'shards': 4},
|
||||
{'name': 'test262', 'variant': 'extra', 'shards': 6},
|
||||
{'name': 'v8testing', 'shards': 4},
|
||||
{'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates'], 'shards': 4},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 4},
|
||||
|
|
@ -393,7 +391,7 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 3},
|
||||
{'name': 'v8testing', 'shards': 5},
|
||||
],
|
||||
},
|
||||
'v8_linux64_css_dbg': {
|
||||
|
|
@ -417,37 +415,37 @@
|
|||
{'name': 'benchmarks', 'variant': 'extra', 'shards': 2},
|
||||
{'name': 'mozilla'},
|
||||
{'name': 'mozilla', 'variant': 'extra'},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 3},
|
||||
{'name': 'test262', 'variant': 'future', 'shards': 3},
|
||||
{'name': 'test262', 'variant': 'extra', 'shards': 14},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 4},
|
||||
{'name': 'test262', 'variant': 'future', 'shards': 4},
|
||||
{'name': 'v8testing', 'shards': 8},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 5},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
{'name': 'v8testing', 'variant': 'no_lfa'},
|
||||
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
|
||||
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
|
||||
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
|
||||
{'name': 'mjsunit', 'variant': 'stress_maglev_tests_with_turbofan'},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'no_lfa', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining','shards': 2},
|
||||
{'name': 'mjsunit', 'variant': 'stress_maglev_tests_with_turbofan', 'shards': 2},
|
||||
# Maglev -- move to extra once more architectures are supported.
|
||||
{'name': 'v8testing', 'variant': 'maglev'},
|
||||
{'name': 'v8testing', 'variant': 'maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'maglev', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'maglev_future', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'turbolev', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future', 'shards': 2},
|
||||
# Code serializer.
|
||||
{'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'd8testing', 'variant': 'code_serializer', 'shards': 2},
|
||||
{'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'test262', 'variant': 'code_serializer', 'shards': 5},
|
||||
{'name': 'test262', 'variant': 'code_serializer', 'shards': 6},
|
||||
# GC stress
|
||||
{
|
||||
'name': 'd8testing',
|
||||
'suffix': 'gc-stress',
|
||||
'test_args': ['--gc-stress'],
|
||||
'shards': 7,
|
||||
'shards': 9,
|
||||
},
|
||||
# Jit fuzzing.
|
||||
{'name': 'mjsunit', 'variant': 'jit_fuzzing'},
|
||||
|
|
@ -471,6 +469,14 @@
|
|||
{'name': 'v8testing'},
|
||||
],
|
||||
},
|
||||
'v8_linux64_full_dbg': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'variant': 'default', 'shards': 8},
|
||||
],
|
||||
},
|
||||
'v8_linux64_fuzzilli_rel': {
|
||||
'swarming_dimensions' : {
|
||||
'os': 'Ubuntu-22.04',
|
||||
|
|
@ -494,8 +500,12 @@
|
|||
# Experimental regexp engine.
|
||||
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
|
||||
# Variants for maglev.
|
||||
{'name': 'v8testing', 'variant': 'maglev_no_turbofan'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_no_turbofan'},
|
||||
# Variants maglev + regexp assemble from bc
|
||||
# TODO(437003349) change to maglev_no_turbofan once the regexp project
|
||||
# is complete.
|
||||
{'name': 'v8testing', 'variant': 'maglev_no_turbofan_regexp_from_bc'},
|
||||
{'name': 'mjsunit', 'variant': 'stress_maglev_tracing'},
|
||||
],
|
||||
},
|
||||
'v8_linux64_gc_stress_custom_snapshot_dbg': {
|
||||
|
|
@ -624,9 +634,15 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
{
|
||||
'name': 'v8testing',
|
||||
'suffix': 'noavx',
|
||||
'test_args': ['--extra-flags', '--noenable-avx']
|
||||
},
|
||||
],
|
||||
},
|
||||
'v8_linux64_perfetto_dbg': {
|
||||
|
|
@ -659,7 +675,7 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 5},
|
||||
{'name': 'v8testing', 'shards': 7},
|
||||
],
|
||||
},
|
||||
'v8_linux64_official_rel': {
|
||||
|
|
@ -713,7 +729,7 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 3},
|
||||
{'name': 'v8testing', 'shards': 7},
|
||||
],
|
||||
},
|
||||
'v8_linux64_sticky_mark_bits_dbg': {
|
||||
|
|
@ -735,7 +751,6 @@
|
|||
{'name': 'mozilla', 'shards': 1},
|
||||
{'name': 'optimize_for_size'},
|
||||
{'name': 'test262', 'shards': 6},
|
||||
{'name': 'test262', 'variant': 'extra', 'shards': 6},
|
||||
{'name': 'v8initializers'},
|
||||
{'name': 'v8testing', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
|
||||
|
|
@ -753,6 +768,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
# Code serializer.
|
||||
{'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1},
|
||||
|
|
@ -803,7 +819,7 @@
|
|||
{'name': 'mozilla', 'shards': 2},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 5},
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 5},
|
||||
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms', 'shards': 2},
|
||||
|
|
@ -820,7 +836,7 @@
|
|||
{'name': 'mozilla', 'shards': 4},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 5},
|
||||
{'name': 'v8testing', 'shards': 12},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 9},
|
||||
],
|
||||
},
|
||||
'v8_linux64_tsan_no_cm_rel': {
|
||||
|
|
@ -857,7 +873,7 @@
|
|||
'cpu': 'x86-64',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 3},
|
||||
{'name': 'v8testing', 'shards': 5},
|
||||
],
|
||||
},
|
||||
'v8_linux64_verify_csa_rel': {
|
||||
|
|
@ -865,7 +881,7 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 2},
|
||||
{'name': 'v8testing', 'shards': 3},
|
||||
],
|
||||
},
|
||||
##############################################################################
|
||||
|
|
@ -878,7 +894,7 @@
|
|||
{'name': 'mozilla', 'shards': 4},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 4},
|
||||
{'name': 'v8testing', 'shards': 14},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 14},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 12},
|
||||
{
|
||||
'name': 'v8testing',
|
||||
'suffix': 'all_features',
|
||||
|
|
@ -905,7 +921,7 @@
|
|||
{'name': 'mozilla', 'shards': 4},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 4},
|
||||
{'name': 'v8testing', 'shards': 14},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 14},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 12},
|
||||
{
|
||||
'name': 'v8testing',
|
||||
'suffix': 'all_features',
|
||||
|
|
@ -935,6 +951,14 @@
|
|||
},
|
||||
##############################################################################
|
||||
# Linux with RISC-V simulators
|
||||
'v8_linux_riscv32_dbg': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
],
|
||||
},
|
||||
'v8_linux_riscv32_rel': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
|
|
@ -943,6 +967,14 @@
|
|||
{'name': 'v8testing', 'shards': 3},
|
||||
],
|
||||
},
|
||||
'v8_linux64_riscv64_dbg': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
],
|
||||
},
|
||||
'v8_linux64_riscv64_rel': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
|
|
@ -1075,12 +1107,12 @@
|
|||
},
|
||||
'tests': [
|
||||
{'name': 'mozilla'},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 4},
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 6},
|
||||
{'name': 'v8testing', 'shards': 8},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 7},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning', 'shards': 2},
|
||||
],
|
||||
},
|
||||
'v8_mac_arm64_gc_stress_dbg': {
|
||||
|
|
@ -1132,6 +1164,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
|
|
@ -1144,7 +1177,7 @@
|
|||
'pool': 'chromium.tests',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing'},
|
||||
{'name': 'v8testing', 'shards': 2},
|
||||
{'name': 'mjsunit', 'variant': 'stress_maglev_tests_with_turbofan'},
|
||||
# Maglev -- move to extra once more architectures are supported.
|
||||
{'name': 'v8testing', 'variant': 'maglev'},
|
||||
|
|
@ -1153,6 +1186,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
|
|
@ -1174,6 +1208,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
|
|
@ -1286,13 +1321,13 @@
|
|||
{'name': 'mozilla', 'variant': 'extra'},
|
||||
{'name': 'optimize_for_size'},
|
||||
{'name': 'test262', 'shards': 10},
|
||||
{'name': 'test262', 'variant': 'extra', 'shards': 12},
|
||||
{'name': 'test262', 'variant': 'extra', 'shards': 10},
|
||||
{'name': 'v8testing', 'shards': 3},
|
||||
{
|
||||
'name': 'v8testing',
|
||||
'suffix': 'isolates',
|
||||
'test_args': ['--isolates'],
|
||||
'shards': 4
|
||||
'shards': 6,
|
||||
},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 4},
|
||||
# Nosse3.
|
||||
|
|
@ -1377,6 +1412,11 @@
|
|||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'swarming_task_attrs': {
|
||||
'expiration': 14400,
|
||||
'hard_timeout': 3600,
|
||||
'priority': 35,
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'variant': 'default', 'shards': 8},
|
||||
],
|
||||
|
|
@ -1481,6 +1521,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
# Noavx.
|
||||
{
|
||||
'name': 'mozilla',
|
||||
|
|
@ -1573,7 +1614,7 @@
|
|||
{'name': 'mozilla', 'variant': 'extra'},
|
||||
{'name': 'optimize_for_size'},
|
||||
{'name': 'test262', 'shards': 12},
|
||||
{'name': 'test262', 'variant': 'extra', 'shards': 12},
|
||||
{'name': 'test262', 'variant': 'extra', 'shards': 10},
|
||||
{'name': 'v8testing', 'shards': 5},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 4},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
|
|
@ -1592,6 +1633,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
# Noavx.
|
||||
{
|
||||
'name': 'mozilla',
|
||||
|
|
@ -1603,7 +1645,7 @@
|
|||
'suffix': 'noavx',
|
||||
'variant': 'default',
|
||||
'test_args': ['--extra-flags', '--noenable-avx'],
|
||||
'shards': 2
|
||||
'shards': 3
|
||||
},
|
||||
{
|
||||
'name': 'v8testing',
|
||||
|
|
@ -1615,12 +1657,20 @@
|
|||
{'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'test262', 'variant': 'code_serializer', 'shards': 3},
|
||||
{'name': 'test262', 'variant': 'code_serializer', 'shards': 4},
|
||||
# Jit fuzzing.
|
||||
{'name': 'mjsunit', 'variant': 'jit_fuzzing'},
|
||||
{'name': 'mjsunit', 'variant': 'jit_fuzzing_maglev'},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - full debug': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'variant': 'default', 'shards': 8},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - minor mc - debug': {
|
||||
'swarming_dimensions': {
|
||||
'cpu': 'x86-64-avx2',
|
||||
|
|
@ -1630,7 +1680,7 @@
|
|||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'benchmarks', 'variant': 'minor_ms'},
|
||||
{'name': 'mozilla', 'variant': 'minor_ms'},
|
||||
{'name': 'test262', 'variant': 'minor_ms', 'shards': 2},
|
||||
{'name': 'test262', 'variant': 'minor_ms', 'shards': 3},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - disable runtime call stats': {
|
||||
|
|
@ -1647,7 +1697,7 @@
|
|||
},
|
||||
'tests': [
|
||||
# Infra staging.
|
||||
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 3},
|
||||
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 4},
|
||||
# Stress sampling.
|
||||
{'name': 'mjsunit', 'variant': 'stress_sampling'},
|
||||
{'name': 'webkit', 'variant': 'stress_sampling'},
|
||||
|
|
@ -1657,8 +1707,12 @@
|
|||
# Experimental regexp engine.
|
||||
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
|
||||
# Variants for maglev.
|
||||
{'name': 'v8testing', 'variant': 'maglev_no_turbofan'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_no_turbofan'},
|
||||
# Variants maglev + regexp assemble from bc
|
||||
# TODO(437003349) change to maglev_no_turbofan once the regexp project
|
||||
# is complete.
|
||||
{'name': 'v8testing', 'variant': 'maglev_no_turbofan_regexp_from_bc'},
|
||||
{'name': 'mjsunit', 'variant': 'stress_maglev_tracing'},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - cppgc-non-default - debug': {
|
||||
|
|
@ -1667,7 +1721,7 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 3},
|
||||
{'name': 'v8testing', 'shards': 5},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - debug - perfetto': {
|
||||
|
|
@ -1712,8 +1766,12 @@
|
|||
# Experimental regexp engine.
|
||||
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
|
||||
# Variants for maglev.
|
||||
{'name': 'v8testing', 'variant': 'maglev_no_turbofan'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_no_turbofan'},
|
||||
# Variants maglev + regexp assemble from bc
|
||||
# TODO(437003349) change to maglev_no_turbofan once the regexp project
|
||||
# is complete.
|
||||
{'name': 'v8testing', 'variant': 'maglev_no_turbofan_regexp_from_bc'},
|
||||
{'name': 'mjsunit', 'variant': 'stress_maglev_tracing'},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - PKU': {
|
||||
|
|
@ -1829,7 +1887,7 @@
|
|||
'priority': 35,
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 4},
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - no pointer compression': {
|
||||
|
|
@ -1855,7 +1913,7 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 3},
|
||||
{'name': 'v8testing', 'shards': 4},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - sandbox testing': {
|
||||
|
|
@ -1892,7 +1950,7 @@
|
|||
'cpu': 'x86-64',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 3},
|
||||
{'name': 'v8testing', 'shards': 5},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - verify csa': {
|
||||
|
|
@ -1971,7 +2029,7 @@
|
|||
{'name': 'mozilla', 'shards': 4},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 6},
|
||||
{'name': 'v8testing', 'shards': 12},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 12},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 TSAN - stress-incremental-marking': {
|
||||
|
|
@ -2059,10 +2117,10 @@
|
|||
{'name': 'mozilla'},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 4},
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 5},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning', 'shards': 2},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning', 'shards': 2},
|
||||
],
|
||||
},
|
||||
'V8 Mac64 ASAN': {
|
||||
|
|
@ -2100,6 +2158,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
|
|
@ -2122,6 +2181,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
|
|
@ -2144,6 +2204,7 @@
|
|||
{'name': 'v8testing', 'variant': 'stress_maglev_non_eager_inlining'},
|
||||
{'name': 'v8testing', 'variant': 'stress_maglev_future'},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
{'name': 'v8testing', 'variant': 'stress_turbolev_future'},
|
||||
{'name': 'v8testing', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'conservative_stack_scanning'},
|
||||
{'name': 'v8testing', 'variant': 'precise_pinning'},
|
||||
|
|
@ -2156,7 +2217,7 @@
|
|||
'pool': 'chromium.tests',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing'},
|
||||
{'name': 'v8testing', 'shards': 2},
|
||||
],
|
||||
},
|
||||
'V8 Win32': {
|
||||
|
|
@ -2333,7 +2394,7 @@
|
|||
{'name': 'mozilla', 'shards': 3},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 2},
|
||||
{'name': 'v8testing', 'shards': 10},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 9},
|
||||
{'name': 'v8testing', 'variant': 'turbolev'},
|
||||
# Armv8-a.
|
||||
{
|
||||
|
|
@ -2423,9 +2484,9 @@
|
|||
},
|
||||
'tests': [
|
||||
{'name': 'mozilla', 'shards': 2},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 2},
|
||||
{'name': 'test262', 'variant': 'default', 'shards': 3},
|
||||
{'name': 'v8testing', 'shards': 12},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 14},
|
||||
{'name': 'v8testing', 'variant': 'extra', 'shards': 12},
|
||||
{
|
||||
'name': 'v8testing',
|
||||
'suffix': 'all_features',
|
||||
|
|
@ -2448,7 +2509,7 @@
|
|||
{
|
||||
'name': 'd8testing',
|
||||
'test_args': ['--gc-stress', '--extra-flags=--verify-heap-skip-remembered-set'],
|
||||
'shards': 7
|
||||
'shards': 12
|
||||
},
|
||||
],
|
||||
},
|
||||
|
|
@ -2492,6 +2553,19 @@
|
|||
{'name': 'v8testing', 'shards': 3},
|
||||
],
|
||||
},
|
||||
'V8 Linux - riscv32 - sim - debug': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'swarming_task_attrs': {
|
||||
'expiration': 14400,
|
||||
'hard_timeout': 3600,
|
||||
'priority': 35,
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
],
|
||||
},
|
||||
'V8 Linux - riscv32 - sim': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
|
|
@ -2505,6 +2579,19 @@
|
|||
{'name': 'v8testing', 'shards': 3},
|
||||
],
|
||||
},
|
||||
'V8 Linux - riscv64 - sim - debug': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'swarming_task_attrs': {
|
||||
'expiration': 14400,
|
||||
'hard_timeout': 3600,
|
||||
'priority': 35,
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 6},
|
||||
],
|
||||
},
|
||||
'V8 Linux - riscv64 - sim': {
|
||||
'swarming_dimensions': {
|
||||
'os': 'Ubuntu-22.04',
|
||||
|
|
@ -2584,7 +2671,7 @@
|
|||
'priority': 35,
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'shards': 7},
|
||||
{'name': 'v8testing', 'shards': 8},
|
||||
],
|
||||
},
|
||||
##############################################################################
|
||||
|
|
@ -2599,6 +2686,11 @@
|
|||
'priority': 35,
|
||||
},
|
||||
'tests': [
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'bytecode',
|
||||
'test_args': ['--total-timeout-sec=2100', '--stress-bytecode-budget=1']
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'deopt',
|
||||
|
|
@ -2631,6 +2723,11 @@
|
|||
'--extra-flags=--gc-interval=500',
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'bytecode',
|
||||
'test_args': ['--total-timeout-sec=4200', '--stress-bytecode-budget=1']
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'marking',
|
||||
|
|
@ -2662,6 +2759,7 @@
|
|||
'test_args': [
|
||||
'--total-timeout-sec=4200',
|
||||
'--allocation-offset=2',
|
||||
'--stress-bytecode-budget=1',
|
||||
'--stress-delay-tasks=4',
|
||||
'--stress-deopt=2',
|
||||
'--stress-compaction=2',
|
||||
|
|
@ -2691,7 +2789,7 @@
|
|||
'priority': 35,
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'd8testing_random_gc'},
|
||||
{'name': 'd8testing_random_gc', 'shards': 2},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'allocation',
|
||||
|
|
@ -2699,7 +2797,13 @@
|
|||
'--total-timeout-sec=4200',
|
||||
'--allocation-offset=1',
|
||||
'--extra-flags=--gc-interval=500',
|
||||
]
|
||||
],
|
||||
'shards': 2
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'bytecode',
|
||||
'test_args': ['--total-timeout-sec=4200', '--stress-bytecode-budget=1']
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
|
|
@ -2733,6 +2837,7 @@
|
|||
'test_args': [
|
||||
'--total-timeout-sec=4200',
|
||||
'--allocation-offset=2',
|
||||
'--stress-bytecode-budget=1',
|
||||
'--stress-delay-tasks=4',
|
||||
'--stress-deopt=2',
|
||||
'--stress-compaction=2',
|
||||
|
|
@ -2763,6 +2868,11 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'bytecode',
|
||||
'test_args': ['--total-timeout-sec=900', '--stress-bytecode-budget=1']
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'deopt',
|
||||
|
|
@ -2789,6 +2899,11 @@
|
|||
'--extra-flags=--gc-interval=500',
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'bytecode',
|
||||
'test_args': ['--total-timeout-sec=900', '--stress-bytecode-budget=1']
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'marking',
|
||||
|
|
@ -2820,6 +2935,7 @@
|
|||
'test_args': [
|
||||
'--total-timeout-sec=900',
|
||||
'--allocation-offset=2',
|
||||
'--stress-bytecode-budget=1',
|
||||
'--stress-delay-tasks=4',
|
||||
'--stress-deopt=2',
|
||||
'--stress-compaction=2',
|
||||
|
|
@ -2852,6 +2968,11 @@
|
|||
'--extra-flags=--gc-interval=500',
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'bytecode',
|
||||
'test_args': ['--total-timeout-sec=900', '--stress-bytecode-budget=1']
|
||||
},
|
||||
{
|
||||
'name': 'numfuzz',
|
||||
'suffix': 'marking',
|
||||
|
|
@ -2883,6 +3004,7 @@
|
|||
'test_args': [
|
||||
'--total-timeout-sec=900',
|
||||
'--allocation-offset=2',
|
||||
'--stress-bytecode-budget=1',
|
||||
'--stress-delay-tasks=4',
|
||||
'--stress-deopt=2',
|
||||
'--stress-compaction=2',
|
||||
|
|
|
|||
6
deps/v8/src/api/api-natives.cc
vendored
6
deps/v8/src/api/api-natives.cc
vendored
|
|
@ -331,8 +331,8 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
|
|||
const auto new_js_object_type =
|
||||
constructor->has_initial_map() &&
|
||||
IsJSApiWrapperObjectMap(constructor->initial_map())
|
||||
? NewJSObjectType::kAPIWrapper
|
||||
: NewJSObjectType::kNoAPIWrapper;
|
||||
? NewJSObjectType::kMaybeEmbedderFieldsAndApiWrapper
|
||||
: NewJSObjectType::kMaybeEmbedderFieldsAndNoApiWrapper;
|
||||
Handle<JSObject> object;
|
||||
ASSIGN_RETURN_ON_EXCEPTION(
|
||||
isolate, object,
|
||||
|
|
@ -532,7 +532,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
|
|||
|
||||
Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(
|
||||
object_map, AllocationType::kYoung, DirectHandle<AllocationSite>::null(),
|
||||
NewJSObjectType::kAPIWrapper);
|
||||
NewJSObjectType::kMaybeEmbedderFieldsAndApiWrapper);
|
||||
JSObject::ForceSetPrototype(isolate, object,
|
||||
isolate->factory()->null_value());
|
||||
|
||||
|
|
|
|||
148
deps/v8/src/api/api.cc
vendored
148
deps/v8/src/api/api.cc
vendored
|
|
@ -185,8 +185,6 @@
|
|||
|
||||
namespace v8 {
|
||||
|
||||
namespace {
|
||||
|
||||
i::ExternalPointerTag ToExternalPointerTag(v8::EmbedderDataTypeTag api_tag) {
|
||||
uint16_t tag_value = static_cast<uint16_t>(i::kFirstEmbedderDataTag) +
|
||||
static_cast<uint16_t>(api_tag);
|
||||
|
|
@ -195,16 +193,6 @@ i::ExternalPointerTag ToExternalPointerTag(v8::EmbedderDataTypeTag api_tag) {
|
|||
return static_cast<i::ExternalPointerTag>(tag_value);
|
||||
}
|
||||
|
||||
v8::EmbedderDataTypeTag ToApiEmbedderDataTypeTag(i::ExternalPointerTag tag) {
|
||||
DCHECK_GE(tag, i::kFirstEmbedderDataTag);
|
||||
DCHECK_LE(tag, i::kLastEmbedderDataTag);
|
||||
uint16_t tag_value = static_cast<uint16_t>(tag) -
|
||||
static_cast<uint16_t>(i::kFirstEmbedderDataTag);
|
||||
return static_cast<v8::EmbedderDataTypeTag>(tag_value);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
static OOMErrorCallback g_oom_error_callback = nullptr;
|
||||
|
||||
using RCCId = i::RuntimeCallCounterId;
|
||||
|
|
@ -859,6 +847,10 @@ bool Data::IsFunctionTemplate() const {
|
|||
return i::IsFunctionTemplateInfo(*Utils::OpenDirectHandle(this));
|
||||
}
|
||||
|
||||
bool Data::IsDictionaryTemplate() const {
|
||||
return i::IsDictionaryTemplateInfo(*Utils::OpenDirectHandle(this));
|
||||
}
|
||||
|
||||
bool Data::IsContext() const {
|
||||
return i::IsContext(*Utils::OpenDirectHandle(this));
|
||||
}
|
||||
|
|
@ -976,7 +968,8 @@ void Context::SetEmbedderData(int index, v8::Local<Value> value) {
|
|||
*Utils::OpenDirectHandle(*GetEmbedderData(index)));
|
||||
}
|
||||
|
||||
void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
|
||||
void* Context::SlowGetAlignedPointerFromEmbedderData(int index,
|
||||
EmbedderDataTypeTag tag) {
|
||||
const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
|
||||
i::Isolate* i_isolate = i::Isolate::Current();
|
||||
i::HandleScope handle_scope(i_isolate);
|
||||
|
|
@ -984,17 +977,13 @@ void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
|
|||
EmbedderDataFor(this, index, false, location);
|
||||
if (data.is_null()) return nullptr;
|
||||
void* result;
|
||||
Utils::ApiCheck(i::EmbedderDataSlot(*data, index)
|
||||
.DeprecatedToAlignedPointer(i_isolate, &result),
|
||||
location, "Pointer is not aligned");
|
||||
Utils::ApiCheck(
|
||||
i::EmbedderDataSlot(*data, index)
|
||||
.ToAlignedPointer(i_isolate, &result, ToExternalPointerTag(tag)),
|
||||
location, "Pointer is not aligned");
|
||||
return result;
|
||||
}
|
||||
|
||||
void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
|
||||
SetAlignedPointerInEmbedderData(
|
||||
index, value, ToApiEmbedderDataTypeTag(i::kEmbedderDataSlotPayloadTag));
|
||||
}
|
||||
|
||||
void Context::SetAlignedPointerInEmbedderData(int index, void* value,
|
||||
EmbedderDataTypeTag tag) {
|
||||
const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
|
||||
|
|
@ -1005,7 +994,7 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value,
|
|||
.store_aligned_pointer(i_isolate, *data, value,
|
||||
ToExternalPointerTag(tag));
|
||||
Utils::ApiCheck(ok, location, "Pointer is not aligned");
|
||||
DCHECK_EQ(value, GetAlignedPointerFromEmbedderData(index));
|
||||
DCHECK_EQ(value, GetAlignedPointerFromEmbedderData(index, tag));
|
||||
}
|
||||
|
||||
// --- T e m p l a t e ---
|
||||
|
|
@ -2924,11 +2913,6 @@ Local<String> Message::Get() const {
|
|||
return scope.Escape(result);
|
||||
}
|
||||
|
||||
v8::Isolate* Message::GetIsolate() const {
|
||||
i::Isolate* i_isolate = i::Isolate::Current();
|
||||
return reinterpret_cast<Isolate*>(i_isolate);
|
||||
}
|
||||
|
||||
ScriptOrigin Message::GetScriptOrigin() const {
|
||||
auto self = Utils::OpenDirectHandle(this);
|
||||
i::Isolate* i_isolate = i::Isolate::Current();
|
||||
|
|
@ -4721,12 +4705,6 @@ Maybe<bool> SetPrototypeImpl(v8::Object* this_, Local<Context> context,
|
|||
|
||||
} // namespace
|
||||
|
||||
Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
|
||||
Local<Value> value) {
|
||||
static constexpr bool from_javascript = false;
|
||||
return SetPrototypeImpl(this, context, value, from_javascript);
|
||||
}
|
||||
|
||||
Maybe<bool> v8::Object::SetPrototypeV2(Local<Context> context,
|
||||
Local<Value> value) {
|
||||
static constexpr bool from_javascript = true;
|
||||
|
|
@ -5193,7 +5171,8 @@ Local<v8::Context> v8::Object::GetCreationContextChecked() {
|
|||
namespace {
|
||||
V8_INLINE void* GetAlignedPointerFromEmbedderDataInCreationContextImpl(
|
||||
i::DirectHandle<i::JSReceiver> object,
|
||||
i::IsolateForSandbox i_isolate_for_sandbox, int index) {
|
||||
i::IsolateForSandbox i_isolate_for_sandbox, int index,
|
||||
EmbedderDataTypeTag tag) {
|
||||
const char* location =
|
||||
"v8::Object::GetAlignedPointerFromEmbedderDataInCreationContext()";
|
||||
auto maybe_context = object->GetCreationContext();
|
||||
|
|
@ -5228,10 +5207,10 @@ V8_INLINE void* GetAlignedPointerFromEmbedderDataInCreationContextImpl(
|
|||
if (V8_LIKELY(static_cast<unsigned>(index) <
|
||||
static_cast<unsigned>(data->length()))) {
|
||||
void* result;
|
||||
Utils::ApiCheck(
|
||||
i::EmbedderDataSlot(data, index)
|
||||
.DeprecatedToAlignedPointer(i_isolate_for_sandbox, &result),
|
||||
location, "Pointer is not aligned");
|
||||
Utils::ApiCheck(i::EmbedderDataSlot(data, index)
|
||||
.ToAlignedPointer(i_isolate_for_sandbox, &result,
|
||||
ToExternalPointerTag(tag)),
|
||||
location, "Pointer is not aligned");
|
||||
return result;
|
||||
}
|
||||
// Bad index, report an API error.
|
||||
|
|
@ -5243,19 +5222,19 @@ V8_INLINE void* GetAlignedPointerFromEmbedderDataInCreationContextImpl(
|
|||
} // namespace
|
||||
|
||||
void* v8::Object::GetAlignedPointerFromEmbedderDataInCreationContext(
|
||||
v8::Isolate* isolate, int index) {
|
||||
v8::Isolate* isolate, int index, EmbedderDataTypeTag tag) {
|
||||
auto self = Utils::OpenDirectHandle(this);
|
||||
auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
return GetAlignedPointerFromEmbedderDataInCreationContextImpl(self, i_isolate,
|
||||
index);
|
||||
index, tag);
|
||||
}
|
||||
|
||||
void* v8::Object::GetAlignedPointerFromEmbedderDataInCreationContext(
|
||||
int index) {
|
||||
int index, EmbedderDataTypeTag tag) {
|
||||
auto self = Utils::OpenDirectHandle(this);
|
||||
i::IsolateForSandbox isolate = i::GetCurrentIsolateForSandbox();
|
||||
return GetAlignedPointerFromEmbedderDataInCreationContextImpl(self, isolate,
|
||||
index);
|
||||
index, tag);
|
||||
}
|
||||
|
||||
int v8::Object::GetIdentityHash() {
|
||||
|
|
@ -5953,7 +5932,6 @@ void String::WriteV2(Isolate* v8_isolate, uint32_t offset, uint32_t length,
|
|||
|
||||
void String::WriteOneByteV2(Isolate* v8_isolate, uint32_t offset,
|
||||
uint32_t length, uint8_t* buffer, int flags) const {
|
||||
DCHECK(IsOneByte());
|
||||
WriteHelperV2(reinterpret_cast<i::Isolate*>(v8_isolate), this, buffer, offset,
|
||||
length, flags);
|
||||
}
|
||||
|
|
@ -6280,36 +6258,32 @@ void v8::Object::SetInternalField(int index, v8::Local<Data> value) {
|
|||
i::Cast<i::JSObject>(obj)->SetEmbedderField(index, *val);
|
||||
}
|
||||
|
||||
void* v8::Object::SlowGetAlignedPointerFromInternalField(v8::Isolate* isolate,
|
||||
int index) {
|
||||
void* v8::Object::SlowGetAlignedPointerFromInternalField(
|
||||
v8::Isolate* isolate, int index, EmbedderDataTypeTag tag) {
|
||||
auto obj = Utils::OpenDirectHandle(this);
|
||||
const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
|
||||
if (!InternalFieldOK(obj, index, location)) return nullptr;
|
||||
void* result;
|
||||
Utils::ApiCheck(i::EmbedderDataSlot(i::Cast<i::JSObject>(*obj), index)
|
||||
.DeprecatedToAlignedPointer(
|
||||
reinterpret_cast<i::Isolate*>(isolate), &result),
|
||||
.ToAlignedPointer(reinterpret_cast<i::Isolate*>(isolate),
|
||||
&result, ToExternalPointerTag(tag)),
|
||||
location, "Unaligned pointer");
|
||||
return result;
|
||||
}
|
||||
|
||||
void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
|
||||
void* v8::Object::SlowGetAlignedPointerFromInternalField(
|
||||
int index, EmbedderDataTypeTag tag) {
|
||||
auto obj = Utils::OpenDirectHandle(this);
|
||||
const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
|
||||
if (!InternalFieldOK(obj, index, location)) return nullptr;
|
||||
void* result;
|
||||
Utils::ApiCheck(
|
||||
i::EmbedderDataSlot(i::Cast<i::JSObject>(*obj), index)
|
||||
.DeprecatedToAlignedPointer(i::Isolate::Current(), &result),
|
||||
location, "Unaligned pointer");
|
||||
Utils::ApiCheck(i::EmbedderDataSlot(i::Cast<i::JSObject>(*obj), index)
|
||||
.ToAlignedPointer(i::Isolate::Current(), &result,
|
||||
ToExternalPointerTag(tag)),
|
||||
location, "Unaligned pointer");
|
||||
return result;
|
||||
}
|
||||
|
||||
void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
|
||||
SetAlignedPointerInInternalField(
|
||||
index, value, ToApiEmbedderDataTypeTag(i::kEmbedderDataSlotPayloadTag));
|
||||
}
|
||||
|
||||
void v8::Object::SetAlignedPointerInInternalField(int index, void* value,
|
||||
EmbedderDataTypeTag tag) {
|
||||
auto obj = Utils::OpenDirectHandle(this);
|
||||
|
|
@ -6321,13 +6295,12 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value,
|
|||
.store_aligned_pointer(i::Isolate::Current(), *obj, value,
|
||||
ToExternalPointerTag(tag)),
|
||||
location, "Unaligned pointer");
|
||||
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
|
||||
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index, tag));
|
||||
}
|
||||
|
||||
void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
|
||||
void* values[]) {
|
||||
EmbedderDataTypeTag tag =
|
||||
ToApiEmbedderDataTypeTag(i::kEmbedderDataSlotPayloadTag);
|
||||
EmbedderDataTypeTag tag = kEmbedderDataTypeTagDefault;
|
||||
|
||||
auto obj = Utils::OpenDirectHandle(this);
|
||||
if (!IsJSObject(*obj)) return;
|
||||
|
|
@ -6347,7 +6320,7 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
|
|||
.store_aligned_pointer(i::Isolate::Current(), *obj, value,
|
||||
ToExternalPointerTag(tag)),
|
||||
location, "Unaligned pointer");
|
||||
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
|
||||
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index, tag));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -7232,8 +7205,6 @@ Maybe<void> Context::DeepFreeze(DeepFreezeDelegate* delegate) {
|
|||
return JustVoid();
|
||||
}
|
||||
|
||||
v8::Isolate* Context::GetIsolate() { return Isolate::GetCurrent(); }
|
||||
|
||||
v8::MicrotaskQueue* Context::GetMicrotaskQueue() {
|
||||
auto env = Utils::OpenDirectHandle(this);
|
||||
Utils::ApiCheck(i::IsNativeContext(*env), "v8::Context::GetMicrotaskQueue",
|
||||
|
|
@ -7825,11 +7796,6 @@ bool v8::String::StringEquals(Local<String> that) const {
|
|||
return self->Equals(*other);
|
||||
}
|
||||
|
||||
Isolate* v8::Object::GetIsolate() {
|
||||
i::Isolate* i_isolate = i::Isolate::Current();
|
||||
return reinterpret_cast<Isolate*>(i_isolate);
|
||||
}
|
||||
|
||||
Local<v8::Object> v8::Object::New(Isolate* v8_isolate) {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
|
||||
ApiRuntimeCallStatsScope rcs_scope(i_isolate, RCCId::kAPI_Object_New);
|
||||
|
|
@ -8342,10 +8308,10 @@ FastIterateResult FastIterateArray(DirectHandle<JSArray> array,
|
|||
DirectHandle<Object> value;
|
||||
if (elements->is_the_hole(i)) {
|
||||
value = Handle<Object>(isolate->factory()->undefined_value());
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
} else if (elements->is_undefined(i)) {
|
||||
value = Handle<Object>(isolate->factory()->undefined_value());
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
} else {
|
||||
value = isolate->factory()->NewNumber(elements->get_scalar(i));
|
||||
}
|
||||
|
|
@ -9748,6 +9714,13 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemoryImpl(
|
|||
return amount;
|
||||
}
|
||||
|
||||
#if V8_VERIFY_WRITE_BARRIERS
|
||||
// Incrementing the number of allocated bytes may trigger GC.
|
||||
i_isolate->main_thread_local_heap()
|
||||
->allocator()
|
||||
->ResetMostRecentYoungAllocation();
|
||||
#endif
|
||||
|
||||
if (amount > i_isolate->heap()->external_memory_limit_for_interrupt()) {
|
||||
HandleExternalMemoryInterrupt();
|
||||
}
|
||||
|
|
@ -10906,17 +10879,10 @@ CALLBACK_SETTER(WasmAsyncResolvePromiseCallback,
|
|||
CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback,
|
||||
wasm_load_source_map_callback)
|
||||
|
||||
CALLBACK_SETTER(WasmImportedStringsEnabledCallback,
|
||||
WasmImportedStringsEnabledCallback,
|
||||
wasm_imported_strings_enabled_callback)
|
||||
|
||||
CALLBACK_SETTER(WasmCustomDescriptorsEnabledCallback,
|
||||
WasmCustomDescriptorsEnabledCallback,
|
||||
wasm_custom_descriptors_enabled_callback)
|
||||
|
||||
CALLBACK_SETTER(WasmJSPIEnabledCallback, WasmJSPIEnabledCallback,
|
||||
wasm_jspi_enabled_callback)
|
||||
|
||||
CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
|
||||
SharedArrayBufferConstructorEnabledCallback,
|
||||
sharedarraybuffer_constructor_enabled_callback)
|
||||
|
|
@ -11926,6 +11892,34 @@ const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(ActivityControl* control,
|
|||
return TakeHeapSnapshot(options);
|
||||
}
|
||||
|
||||
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
|
||||
ActivityControl* control, ContextNameResolver* resolver,
|
||||
bool hide_internals, bool capture_numeric_value) {
|
||||
HeapSnapshotOptions options;
|
||||
options.control = control;
|
||||
options.context_name_resolver = resolver;
|
||||
options.snapshot_mode = hide_internals ? HeapSnapshotMode::kRegular
|
||||
: HeapSnapshotMode::kExposeInternals;
|
||||
options.numerics_mode = capture_numeric_value
|
||||
? NumericsMode::kExposeNumericValues
|
||||
: NumericsMode::kHideNumericValues;
|
||||
return TakeHeapSnapshot(options);
|
||||
}
|
||||
|
||||
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(ActivityControl* control,
|
||||
std::nullptr_t resolver,
|
||||
bool hide_internals,
|
||||
bool capture_numeric_value) {
|
||||
HeapSnapshotOptions options;
|
||||
options.control = control;
|
||||
options.snapshot_mode = hide_internals ? HeapSnapshotMode::kRegular
|
||||
: HeapSnapshotMode::kExposeInternals;
|
||||
options.numerics_mode = capture_numeric_value
|
||||
? NumericsMode::kExposeNumericValues
|
||||
: NumericsMode::kHideNumericValues;
|
||||
return TakeHeapSnapshot(options);
|
||||
}
|
||||
|
||||
std::vector<v8::Local<v8::Value>> HeapProfiler::GetDetachedJSWrapperObjects() {
|
||||
return reinterpret_cast<i::HeapProfiler*>(this)
|
||||
->GetDetachedJSWrapperObjects();
|
||||
|
|
|
|||
3
deps/v8/src/asmjs/asm-parser.cc
vendored
3
deps/v8/src/asmjs/asm-parser.cc
vendored
|
|
@ -806,7 +806,8 @@ void AsmJsParser::ValidateFunction() {
|
|||
}
|
||||
|
||||
// Check against limit on number of local variables.
|
||||
if (locals.size() + function_temp_locals_used_ > kV8MaxWasmFunctionLocals) {
|
||||
if (locals.size() + function_temp_locals_used_ + params.size() >
|
||||
kV8MaxWasmFunctionLocals) {
|
||||
FAIL("Number of local variables exceeds internal limit");
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -70,11 +70,12 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
|
|||
for (int i = 0; i < props->length(); ++i) {
|
||||
ClassLiteralProperty* prop = props->at(i);
|
||||
|
||||
// Public fields with computed names have their key
|
||||
// and value present in instance_members_initializer_function, so they will
|
||||
// Public fields and auto accessors with computed names have their key and
|
||||
// value present in instance_members_initializer_function, so they will
|
||||
// already have been visited.
|
||||
if (prop->is_computed_name() &&
|
||||
prop->kind() == ClassLiteralProperty::Kind::FIELD) {
|
||||
(prop->kind() == ClassLiteralProperty::Kind::FIELD ||
|
||||
(prop->kind() == ClassLiteralProperty::Kind::AUTO_ACCESSOR))) {
|
||||
if (!prop->key()->IsLiteral()) {
|
||||
CheckVisited(prop->key());
|
||||
}
|
||||
|
|
@ -85,6 +86,10 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
|
|||
}
|
||||
Visit(prop->value());
|
||||
}
|
||||
if (prop->kind() == ClassLiteralProperty::Kind::AUTO_ACCESSOR) {
|
||||
Visit(prop->auto_accessor_info()->generated_getter());
|
||||
Visit(prop->auto_accessor_info()->generated_setter());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
5
deps/v8/src/ast/prettyprinter.cc
vendored
5
deps/v8/src/ast/prettyprinter.cc
vendored
|
|
@ -631,7 +631,10 @@ void CallPrinter::FindArguments(const ZonePtrList<Expression>* arguments) {
|
|||
void CallPrinter::PrintLiteral(DirectHandle<Object> value, bool quote) {
|
||||
if (!ShouldPrint()) return;
|
||||
|
||||
if (IsString(*value)) {
|
||||
if (IsAnyHole(*value)) {
|
||||
// Holes can occur in array literals, and should show up as empty entries.
|
||||
Print("");
|
||||
} else if (IsString(*value)) {
|
||||
if (quote) Print("\"");
|
||||
Print(Cast<String>(value));
|
||||
if (quote) Print("\"");
|
||||
|
|
|
|||
24
deps/v8/src/base/address-region.h
vendored
24
deps/v8/src/base/address-region.h
vendored
|
|
@ -30,37 +30,37 @@ class AddressRegion {
|
|||
constexpr AddressRegion(Address address, size_t size)
|
||||
: address_(address), size_(size) {}
|
||||
|
||||
Address begin() const { return address_; }
|
||||
Address end() const { return address_ + size_; }
|
||||
constexpr Address begin() const { return address_; }
|
||||
constexpr Address end() const { return address_ + size_; }
|
||||
|
||||
size_t size() const { return size_; }
|
||||
constexpr size_t size() const { return size_; }
|
||||
void set_size(size_t size) { size_ = size; }
|
||||
|
||||
bool is_empty() const { return size_ == 0; }
|
||||
constexpr bool is_empty() const { return size_ == 0; }
|
||||
|
||||
bool contains(Address address) const {
|
||||
constexpr bool contains(Address address) const {
|
||||
static_assert(std::is_unsigned_v<Address>);
|
||||
return (address - begin()) < size();
|
||||
}
|
||||
|
||||
bool contains(Address address, size_t size) const {
|
||||
constexpr bool contains(Address address, size_t size) const {
|
||||
static_assert(std::is_unsigned_v<Address>);
|
||||
Address offset = address - begin();
|
||||
const Address offset = address - begin();
|
||||
return (offset < size_) && (offset + size <= size_);
|
||||
}
|
||||
|
||||
bool contains(AddressRegion region) const {
|
||||
constexpr bool contains(AddressRegion region) const {
|
||||
return contains(region.address_, region.size_);
|
||||
}
|
||||
|
||||
base::AddressRegion GetOverlap(AddressRegion region) const {
|
||||
Address overlap_start = std::max(begin(), region.begin());
|
||||
Address overlap_end =
|
||||
constexpr base::AddressRegion GetOverlap(AddressRegion region) const {
|
||||
const Address overlap_start = std::max(begin(), region.begin());
|
||||
const Address overlap_end =
|
||||
std::max(overlap_start, std::min(end(), region.end()));
|
||||
return {overlap_start, overlap_end - overlap_start};
|
||||
}
|
||||
|
||||
bool operator==(AddressRegion other) const {
|
||||
constexpr bool operator==(AddressRegion other) const {
|
||||
return address_ == other.address_ && size_ == other.size_;
|
||||
}
|
||||
|
||||
|
|
|
|||
19
deps/v8/src/base/atomic-utils.h
vendored
19
deps/v8/src/base/atomic-utils.h
vendored
|
|
@ -134,6 +134,13 @@ class AsAtomicImpl {
|
|||
cast_helper<T>::to_storage_type(new_value)));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T Relaxed_FetchOr(T* addr, std::remove_reference_t<T> bits) {
|
||||
static_assert(sizeof(T) <= sizeof(AtomicStorageType));
|
||||
return cast_helper<T>::to_return_type(base::Relaxed_FetchOr(
|
||||
to_storage_addr(addr), cast_helper<T>::to_storage_type(bits)));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T AcquireRelease_CompareAndSwap(T* addr,
|
||||
std::remove_reference_t<T> old_value,
|
||||
|
|
@ -187,6 +194,18 @@ class AsAtomicImpl {
|
|||
return true;
|
||||
}
|
||||
|
||||
// Atomically sets bits selected by the mask to 1. Returns false if the bits
|
||||
// are already set as needed.
|
||||
template <typename T>
|
||||
static bool Relaxed_SetBits(T* addr, T mask) {
|
||||
static_assert(sizeof(T) <= sizeof(AtomicStorageType));
|
||||
T old_value = Relaxed_Load(addr);
|
||||
if ((old_value & mask) == mask) return false;
|
||||
|
||||
T old_value_before_fo = Relaxed_FetchOr(addr, mask);
|
||||
return (old_value_before_fo | mask) != old_value_before_fo;
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename U>
|
||||
struct cast_helper {
|
||||
|
|
|
|||
20
deps/v8/src/base/atomicops.h
vendored
20
deps/v8/src/base/atomicops.h
vendored
|
|
@ -125,6 +125,21 @@ inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
|
|||
return old_value;
|
||||
}
|
||||
|
||||
inline Atomic8 Relaxed_FetchOr(volatile Atomic8* ptr, Atomic8 bits) {
|
||||
auto old = helper::to_std_atomic(ptr);
|
||||
return old->fetch_or(bits, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline Atomic16 Relaxed_FetchOr(volatile Atomic16* ptr, Atomic16 bits) {
|
||||
auto old = helper::to_std_atomic(ptr);
|
||||
return old->fetch_or(bits, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline Atomic32 Relaxed_FetchOr(volatile Atomic32* ptr, Atomic32 bits) {
|
||||
auto old = helper::to_std_atomic(ptr);
|
||||
return old->fetch_or(bits, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
|
||||
Atomic32 new_value) {
|
||||
return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
|
||||
|
|
@ -276,6 +291,11 @@ inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
|
|||
return old_value;
|
||||
}
|
||||
|
||||
inline Atomic64 Relaxed_FetchOr(volatile Atomic64* ptr, Atomic64 bits) {
|
||||
auto old = helper::to_std_atomic(ptr);
|
||||
return old->fetch_or(bits, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
|
||||
Atomic64 new_value) {
|
||||
return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
|
||||
|
|
|
|||
27
deps/v8/src/base/bit-field.h
vendored
27
deps/v8/src/base/bit-field.h
vendored
|
|
@ -8,6 +8,7 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/macros.h"
|
||||
|
||||
|
|
@ -30,6 +31,15 @@ class BitField final {
|
|||
static_assert(shift + size <= 8 * sizeof(U));
|
||||
static_assert(size > 0);
|
||||
|
||||
// Make sure we don't create bitfields that are too large for their value.
|
||||
// Carve out an exception for 32-bit size_t, for uniformity between 32-bit
|
||||
// and 64-bit code.
|
||||
static_assert(size <= 8 * sizeof(T) ||
|
||||
(std::is_same_v<T, size_t> && sizeof(size_t) == 4),
|
||||
"Bitfield is unnecessarily big!");
|
||||
static_assert(!std::is_same_v<T, bool> || size == 1,
|
||||
"Bitfield is unnecessarily big!");
|
||||
|
||||
using FieldType = T;
|
||||
using BaseType = U;
|
||||
|
||||
|
|
@ -53,7 +63,22 @@ class BitField final {
|
|||
|
||||
// Returns a type U with the bit field value encoded.
|
||||
static constexpr U encode(T value) {
|
||||
DCHECK(is_valid(value));
|
||||
if constexpr (std::is_enum_v<T> || sizeof(T) * 8 <= kSize ||
|
||||
std::is_same_v<T, bool>) {
|
||||
// For enums, we trust that they are within the valid range, since they
|
||||
// are typed and we assume that the enum itself has a valid value. DCHECK
|
||||
// just in case (e.g. in case valid enum values are outside the bitfield
|
||||
// size).
|
||||
//
|
||||
// Similarly, if T fits exactly in the bitfield (either in bytes, or
|
||||
// because bools can be stored as 1 bit), we trust that they are valid.
|
||||
DCHECK(is_valid(value));
|
||||
} else {
|
||||
// For non-enums (in practice, integers), we don't trust that they are
|
||||
// valid, since we pass them around without static value interval
|
||||
// information.
|
||||
CHECK(is_valid(value));
|
||||
}
|
||||
return static_cast<U>(value) << kShift;
|
||||
}
|
||||
|
||||
|
|
|
|||
7
deps/v8/src/base/bounded-page-allocator.cc
vendored
7
deps/v8/src/base/bounded-page-allocator.cc
vendored
|
|
@ -330,5 +330,12 @@ const char* BoundedPageAllocator::AllocationStatusToString(
|
|||
}
|
||||
}
|
||||
|
||||
BoundedPageAllocator::Stats BoundedPageAllocator::RecordStats() {
|
||||
MutexGuard guard(&mutex_);
|
||||
return {.free_size = region_allocator_.free_size(),
|
||||
.largest_free_region = region_allocator_.GetLargestFreeRegionSize(),
|
||||
.allocation_status = allocation_status_};
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
|
|
|||
8
deps/v8/src/base/bounded-page-allocator.h
vendored
8
deps/v8/src/base/bounded-page-allocator.h
vendored
|
|
@ -64,6 +64,12 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
|
|||
kHintedAddressTakenOrNotFound,
|
||||
};
|
||||
|
||||
struct Stats {
|
||||
size_t free_size = 0;
|
||||
size_t largest_free_region = 0;
|
||||
AllocationStatus allocation_status = AllocationStatus::kSuccess;
|
||||
};
|
||||
|
||||
using Address = uintptr_t;
|
||||
|
||||
static const char* AllocationStatusToString(AllocationStatus);
|
||||
|
|
@ -132,6 +138,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
|
|||
return allocation_status_;
|
||||
}
|
||||
|
||||
Stats RecordStats();
|
||||
|
||||
private:
|
||||
v8::base::Mutex mutex_;
|
||||
const size_t allocate_page_size_;
|
||||
|
|
|
|||
16
deps/v8/src/base/cpu.cc
vendored
16
deps/v8/src/base/cpu.cc
vendored
|
|
@ -56,6 +56,18 @@
|
|||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#if V8_HOST_ARCH_RISCV64
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// The __riscv_vlenb intrinsic is only available when compiling with the RVV
|
||||
// extension enabled. Use the 'target' attribute to tell the compiler to
|
||||
// compile this function with RVV enabled.
|
||||
// We must not call this function when RVV is not supported by the CPU.
|
||||
__attribute__((target("arch=+v"))) static unsigned vlen_intrinsic() {
|
||||
return static_cast<unsigned>(__riscv_vlenb() * 8);
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
|
|
@ -459,6 +471,7 @@ CPU::CPU()
|
|||
is_running_in_vm_(false),
|
||||
has_msa_(false),
|
||||
riscv_mmu_(RV_MMU_MODE::kRiscvSV48),
|
||||
vlen_(kUnknownVlen),
|
||||
has_rvv_(false),
|
||||
has_zba_(false),
|
||||
has_zbb_(false),
|
||||
|
|
@ -1030,6 +1043,9 @@ CPU::CPU()
|
|||
riscv_mmu_ = RV_MMU_MODE::kRiscvSV57;
|
||||
}
|
||||
#endif
|
||||
if (has_rvv_) {
|
||||
vlen_ = vlen_intrinsic();
|
||||
}
|
||||
#endif // V8_HOST_ARCH_RISCV64
|
||||
}
|
||||
|
||||
|
|
|
|||
3
deps/v8/src/base/cpu.h
vendored
3
deps/v8/src/base/cpu.h
vendored
|
|
@ -132,6 +132,7 @@ class V8_BASE_EXPORT CPU final {
|
|||
bool has_msa() const { return has_msa_; }
|
||||
|
||||
// riscv-specific part codes
|
||||
unsigned vlen() const { return vlen_; }
|
||||
bool has_rvv() const { return has_rvv_; }
|
||||
bool has_zba() const { return has_zba_; }
|
||||
bool has_zbb() const { return has_zbb_; }
|
||||
|
|
@ -142,6 +143,7 @@ class V8_BASE_EXPORT CPU final {
|
|||
kRiscvSV57,
|
||||
};
|
||||
RV_MMU_MODE riscv_mmu() const { return riscv_mmu_; }
|
||||
static const unsigned kUnknownVlen = 0;
|
||||
|
||||
private:
|
||||
#if defined(V8_OS_STARBOARD)
|
||||
|
|
@ -206,6 +208,7 @@ class V8_BASE_EXPORT CPU final {
|
|||
bool is_running_in_vm_;
|
||||
bool has_msa_;
|
||||
RV_MMU_MODE riscv_mmu_;
|
||||
unsigned vlen_;
|
||||
bool has_rvv_;
|
||||
bool has_zba_;
|
||||
bool has_zbb_;
|
||||
|
|
|
|||
42
deps/v8/src/base/macros.h
vendored
42
deps/v8/src/base/macros.h
vendored
|
|
@ -5,6 +5,7 @@
|
|||
#ifndef V8_BASE_MACROS_H_
|
||||
#define V8_BASE_MACROS_H_
|
||||
|
||||
#include <bit>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
|
|
@ -122,6 +123,9 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
|
|||
SUPPRESSED_DANGLING_ELSE_WARNING_IF(init; false) {} \
|
||||
SUPPRESSED_DANGLING_ELSE_WARNING_ELSE
|
||||
|
||||
// -- Copied from chromium's "base/bit_cast.h", but uses `std::bit_cast` instead
|
||||
// of `__builtin_bit_cast`.
|
||||
//
|
||||
// This is an equivalent to C++20's std::bit_cast<>(), but with additional
|
||||
// warnings. It morally does what `*reinterpret_cast<Dest*>(&source)` does, but
|
||||
// the cast/deref pair is undefined behavior, while bit_cast<>() isn't.
|
||||
|
|
@ -134,7 +138,7 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
|
|||
namespace v8::base {
|
||||
|
||||
template <class Dest, class Source>
|
||||
V8_INLINE Dest bit_cast(Source const& source) {
|
||||
V8_INLINE constexpr Dest bit_cast(Source const& source) noexcept {
|
||||
static_assert(!std::is_pointer_v<Source>,
|
||||
"bit_cast must not be used on pointer types");
|
||||
static_assert(!std::is_pointer_v<Dest>,
|
||||
|
|
@ -150,13 +154,7 @@ V8_INLINE Dest bit_cast(Source const& source) {
|
|||
std::is_trivially_copyable_v<Dest>,
|
||||
"bit_cast requires the destination type to be trivially copyable");
|
||||
|
||||
#if V8_HAS_BUILTIN_BIT_CAST
|
||||
return __builtin_bit_cast(Dest, source);
|
||||
#else
|
||||
Dest dest;
|
||||
memcpy(&dest, &source, sizeof(dest));
|
||||
return dest;
|
||||
#endif
|
||||
return std::bit_cast<Dest, Source>(source);
|
||||
}
|
||||
|
||||
} // namespace v8::base
|
||||
|
|
@ -200,34 +198,6 @@ V8_INLINE Dest bit_cast(Source const& source) {
|
|||
void operator delete(void*, size_t) { v8::base::OS::Abort(); } \
|
||||
void operator delete[](void*, size_t) { v8::base::OS::Abort(); }
|
||||
|
||||
// Define V8_USE_ADDRESS_SANITIZER macro.
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(address_sanitizer)
|
||||
#define V8_USE_ADDRESS_SANITIZER 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Define V8_USE_HWADDRESS_SANITIZER macro.
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(hwaddress_sanitizer)
|
||||
#define V8_USE_HWADDRESS_SANITIZER 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Define V8_USE_MEMORY_SANITIZER macro.
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(memory_sanitizer)
|
||||
#define V8_USE_MEMORY_SANITIZER 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Define V8_USE_UNDEFINED_BEHAVIOR_SANITIZER macro.
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(undefined_behavior_sanitizer)
|
||||
#define V8_USE_UNDEFINED_BEHAVIOR_SANITIZER 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Define V8_USE_SAFE_STACK macro.
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(safe_stack)
|
||||
|
|
|
|||
4
deps/v8/src/base/numbers/double.h
vendored
4
deps/v8/src/base/numbers/double.h
vendored
|
|
@ -15,10 +15,10 @@ namespace base {
|
|||
|
||||
// We assume that doubles and uint64_t have the same endianness.
|
||||
inline constexpr uint64_t double_to_uint64(double d) {
|
||||
return std::bit_cast<uint64_t>(d);
|
||||
return base::bit_cast<uint64_t>(d);
|
||||
}
|
||||
inline constexpr double uint64_to_double(uint64_t d64) {
|
||||
return std::bit_cast<double>(d64);
|
||||
return base::bit_cast<double>(d64);
|
||||
}
|
||||
|
||||
// Helper functions for doubles.
|
||||
|
|
|
|||
6
deps/v8/src/base/platform/mutex.cc
vendored
6
deps/v8/src/base/platform/mutex.cc
vendored
|
|
@ -60,17 +60,17 @@ Mutex::Mutex() {
|
|||
Mutex::~Mutex() { DCHECK_EQ(0, level_); }
|
||||
|
||||
void Mutex::Lock() ABSL_NO_THREAD_SAFETY_ANALYSIS {
|
||||
native_handle_.Lock();
|
||||
native_handle_.lock();
|
||||
AssertUnheldAndMark();
|
||||
}
|
||||
|
||||
void Mutex::Unlock() ABSL_NO_THREAD_SAFETY_ANALYSIS {
|
||||
AssertHeldAndUnmark();
|
||||
native_handle_.Unlock();
|
||||
native_handle_.unlock();
|
||||
}
|
||||
|
||||
bool Mutex::TryLock() ABSL_NO_THREAD_SAFETY_ANALYSIS {
|
||||
if (!native_handle_.TryLock()) return false;
|
||||
if (!native_handle_.try_lock()) return false;
|
||||
AssertUnheldAndMark();
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
14
deps/v8/src/base/platform/platform-posix.cc
vendored
14
deps/v8/src/base/platform/platform-posix.cc
vendored
|
|
@ -82,19 +82,7 @@
|
|||
#define MAP_ANONYMOUS MAP_ANON
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NOTE: illumos starting with illumos#14418 (pushed April 20th, 2022)
|
||||
* prototypes madvise(3C) properly with a `void *` first argument.
|
||||
* The only way to detect this outside of configure-time checking is to
|
||||
* check for the existence of MEMCNTL_SHARED, which gets defined for the first
|
||||
* time in illumos#14418 under the same circumstances save _STRICT_POSIX, which
|
||||
* thankfully neither Solaris nor illumos builds of Node or V8 do.
|
||||
*
|
||||
* If some future illumos push changes the MEMCNTL_SHARED assumptions made
|
||||
* above, the illumos check below will have to be revisited. This check
|
||||
* will work on both pre-and-post illumos#14418 illumos environments.
|
||||
*/
|
||||
#if defined(V8_OS_SOLARIS) && !(defined(__illumos__) && defined(MEMCNTL_SHARED))
|
||||
#if defined(V8_OS_SOLARIS)
|
||||
#if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
|
||||
extern "C" int madvise(caddr_t, size_t, int);
|
||||
#else
|
||||
|
|
|
|||
7
deps/v8/src/base/region-allocator.cc
vendored
7
deps/v8/src/base/region-allocator.cc
vendored
|
|
@ -76,7 +76,7 @@ RegionAllocator::Region* RegionAllocator::FreeListFindRegion(size_t size) {
|
|||
}
|
||||
|
||||
RegionAllocator::Region* RegionAllocator::FreeListFindLargestRegion(
|
||||
size_t size) {
|
||||
size_t size) const {
|
||||
Region* region = nullptr;
|
||||
for (Region* free_region : free_regions_) {
|
||||
if (free_region->size() < size) continue;
|
||||
|
|
@ -87,6 +87,11 @@ RegionAllocator::Region* RegionAllocator::FreeListFindLargestRegion(
|
|||
return region;
|
||||
}
|
||||
|
||||
size_t RegionAllocator::GetLargestFreeRegionSize() const {
|
||||
Region* region = FreeListFindLargestRegion(0);
|
||||
return region != nullptr ? region->size() : 0;
|
||||
}
|
||||
|
||||
void RegionAllocator::FreeListRemoveRegion(Region* region) {
|
||||
DCHECK(region->is_free());
|
||||
auto iter = free_regions_.find(region);
|
||||
|
|
|
|||
4
deps/v8/src/base/region-allocator.h
vendored
4
deps/v8/src/base/region-allocator.h
vendored
|
|
@ -140,6 +140,8 @@ class V8_BASE_EXPORT RegionAllocator final {
|
|||
// Total size of not yet acquired regions.
|
||||
size_t free_size() const { return free_size_; }
|
||||
|
||||
size_t GetLargestFreeRegionSize() const;
|
||||
|
||||
// The alignment of the allocated region's addresses and granularity of
|
||||
// the allocated region's sizes.
|
||||
size_t page_size() const { return page_size_; }
|
||||
|
|
@ -213,7 +215,7 @@ class V8_BASE_EXPORT RegionAllocator final {
|
|||
Region* FreeListFindRegion(size_t size);
|
||||
|
||||
// Finds largest free region for given size.
|
||||
Region* FreeListFindLargestRegion(size_t size);
|
||||
Region* FreeListFindLargestRegion(size_t size) const;
|
||||
|
||||
// Removes given region from the set of free regions.
|
||||
void FreeListRemoveRegion(Region* region);
|
||||
|
|
|
|||
|
|
@ -6,8 +6,11 @@
|
|||
#define V8_BASE_TEMPLATE_META_PROGRAMMING_LIST_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <initializer_list>
|
||||
#include <limits>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "src/base/template-meta-programming/common.h"
|
||||
|
||||
|
|
@ -98,6 +101,13 @@ template <typename Head, typename... Tail,
|
|||
struct all_equal_impl<list<Head, Tail...>, Cmp>
|
||||
: std::bool_constant<(Cmp<Head, Tail>::value && ...)> {};
|
||||
|
||||
template <typename List, typename T>
|
||||
struct append_impl;
|
||||
template <typename... Ts, typename T>
|
||||
struct append_impl<list<Ts...>, T> {
|
||||
using type = list<Ts..., T>;
|
||||
};
|
||||
|
||||
template <size_t I, typename T, typename Before, typename After>
|
||||
struct insert_at_impl;
|
||||
template <size_t I, typename T, typename... Before, typename Head,
|
||||
|
|
@ -154,6 +164,25 @@ struct fold_right1_impl<F, T, list1<>> {
|
|||
using type = T;
|
||||
};
|
||||
|
||||
template <typename Tuple>
|
||||
struct from_tuple_impl;
|
||||
template <typename... Ts>
|
||||
struct from_tuple_impl<std::tuple<Ts...>> {
|
||||
using type = list<Ts...>;
|
||||
};
|
||||
|
||||
template <typename List, template <typename, size_t> typename Fn, typename Seq>
|
||||
struct call_foreach_impl;
|
||||
template <typename... Ts, template <typename, size_t> typename Fn,
|
||||
size_t... Indices>
|
||||
struct call_foreach_impl<list<Ts...>, Fn, std::index_sequence<Indices...>> {
|
||||
template <typename... Args>
|
||||
static void call(Args&&... args) {
|
||||
std::initializer_list<int> _{
|
||||
(Fn<Ts, Indices>{}(std::forward<Args>(args)...), 0)...};
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// length<List>::value is the length of the {List}.
|
||||
|
|
@ -211,7 +240,13 @@ struct all_equal : detail::all_equal_impl<List, Cmp> {};
|
|||
template <typename List, template <typename, typename> typename Cmp = equals>
|
||||
constexpr bool all_equal_v = all_equal<List, Cmp>::value;
|
||||
|
||||
// insert_at<List, I, T>::value is identical to {List}, except that {T} is
|
||||
// append<List, T>::value appends {T} to {List}.
|
||||
template <typename List, typename T>
|
||||
struct append : public detail::append_impl<List, T> {};
|
||||
template <typename List, typename T>
|
||||
using append_t = append<List, T>::type;
|
||||
|
||||
// insert_at<List, I, T>::type is identical to {List}, except that {T} is
|
||||
// inserted at position {I}. If {I} is larger than the length of the list, {T}
|
||||
// is simply appended.
|
||||
template <typename List, size_t I, typename T>
|
||||
|
|
@ -238,6 +273,23 @@ struct fold_right1 : public detail::fold_right1_impl<F, T, List1> {};
|
|||
template <template <TYPENAME1, typename> typename F, typename List1, typename T>
|
||||
using fold_right1_t = fold_right1<F, List1, T>::type;
|
||||
|
||||
// from_tuple<Tuple>::type defines a type list of the types contained in the
|
||||
// tuple (in order).
|
||||
template <typename Tuple>
|
||||
struct from_tuple : public detail::from_tuple_impl<Tuple> {};
|
||||
template <typename Tuple>
|
||||
using from_tuple_t = from_tuple<Tuple>::type;
|
||||
|
||||
// call_foreach instantiates Fn<T, I> for each element T (at index I) of the
|
||||
// list and then invokes the operator() with the args passed.
|
||||
template <typename List, template <typename, size_t> typename Fn,
|
||||
typename... Args>
|
||||
void call_foreach(Args&&... args) {
|
||||
detail::call_foreach_impl<List, Fn,
|
||||
std::make_index_sequence<length_v<List>>>::
|
||||
call(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
} // namespace v8::base::tmp
|
||||
|
||||
#undef TYPENAME1
|
||||
|
|
|
|||
19
deps/v8/src/baseline/baseline-compiler.cc
vendored
19
deps/v8/src/baseline/baseline-compiler.cc
vendored
|
|
@ -634,7 +634,6 @@ constexpr static bool BuiltinMayDeopt(Builtin id) {
|
|||
case Builtin::kStoreCurrentContextElementBaseline:
|
||||
// This one explicitly skips the construct if the debugger is enabled.
|
||||
case Builtin::kFindNonDefaultConstructorOrConstruct:
|
||||
case Builtin::kForOfNextBaseline:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
|
|
@ -1016,8 +1015,22 @@ void BaselineCompiler::VisitStaModuleVariable() {
|
|||
}
|
||||
|
||||
void BaselineCompiler::VisitSetPrototypeProperties() {
|
||||
CallRuntime(Runtime::kSetPrototypeProperties, kInterpreterAccumulatorRegister,
|
||||
Constant<ObjectBoilerplateDescription>(0));
|
||||
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
||||
Register feedback_array = scratch_scope.AcquireScratch();
|
||||
LoadClosureFeedbackArray(feedback_array);
|
||||
|
||||
CallRuntime(Runtime::kSetPrototypeProperties,
|
||||
// The object upon whose prototype boilerplate shall be applied
|
||||
kInterpreterAccumulatorRegister,
|
||||
// ObjectBoilerplateDescription whose properties will be merged in
|
||||
// to the above object
|
||||
Constant<ObjectBoilerplateDescription>(0),
|
||||
// Array of feedback cells. Needed to instantiate
|
||||
// ShareFunctionInfo(s) from the boilerplate
|
||||
feedback_array,
|
||||
// Index of the feedback cell of the first ShareFunctionInfo. We
|
||||
// may assume all other SFI to be tightly packed.
|
||||
IndexAsSmi(1));
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitSetNamedProperty() {
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
|
|||
// todo: compress pointer
|
||||
__ AssertSmi(lhs);
|
||||
__ AssertSmi(rhs);
|
||||
__ CompareTaggedAndBranch(target, cc, lhs, Operand(rhs), distance);
|
||||
__ CompareTaggedAndBranch(target, cc, lhs, Operand(rhs));
|
||||
}
|
||||
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
||||
MemOperand operand, Label* target,
|
||||
|
|
@ -179,7 +179,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
|||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ LoadWord(scratch, operand);
|
||||
__ CompareTaggedAndBranch(target, cc, value, Operand(scratch), distance);
|
||||
__ CompareTaggedAndBranch(target, cc, value, Operand(scratch));
|
||||
}
|
||||
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
||||
Register value, Label* target,
|
||||
|
|
@ -188,7 +188,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
|||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ LoadWord(scratch, operand);
|
||||
__ CompareTaggedAndBranch(target, cc, scratch, Operand(value), distance);
|
||||
__ CompareTaggedAndBranch(target, cc, scratch, Operand(value));
|
||||
}
|
||||
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
|
||||
Label* target, Label::Distance distance) {
|
||||
|
|
@ -523,7 +523,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
|
|||
// We're going to use pc-relative addressing to load from the jump table,
|
||||
// so we need to block trampoline pool emission for the entire length of
|
||||
// the table including the preamble.
|
||||
MacroAssembler::BlockTrampolinePoolScope block(
|
||||
MacroAssembler::BlockPoolsScope block_pools(
|
||||
masm_, (2 + 5 + num_labels * 2) * kInstrSize);
|
||||
|
||||
int64_t imm64;
|
||||
|
|
|
|||
17
deps/v8/src/bigint/vector-arithmetic.cc
vendored
17
deps/v8/src/bigint/vector-arithmetic.cc
vendored
|
|
@ -13,6 +13,11 @@ namespace bigint {
|
|||
digit_t AddAndReturnOverflow(RWDigits Z, Digits X) {
|
||||
X.Normalize();
|
||||
if (X.len() == 0) return 0;
|
||||
// Here and below: callers are careful to pass sufficiently large result
|
||||
// storage Z. If that ever goes wrong, then something is corrupted; could
|
||||
// be a concurrent-mutation attack. So we harden against that with Release-
|
||||
// mode CHECKs.
|
||||
CHECK(Z.len() >= X.len());
|
||||
digit_t carry = 0;
|
||||
uint32_t i = 0;
|
||||
for (; i < X.len(); i++) {
|
||||
|
|
@ -27,6 +32,7 @@ digit_t AddAndReturnOverflow(RWDigits Z, Digits X) {
|
|||
digit_t SubAndReturnBorrow(RWDigits Z, Digits X) {
|
||||
X.Normalize();
|
||||
if (X.len() == 0) return 0;
|
||||
CHECK(Z.len() >= X.len());
|
||||
digit_t borrow = 0;
|
||||
uint32_t i = 0;
|
||||
for (; i < X.len(); i++) {
|
||||
|
|
@ -39,9 +45,8 @@ digit_t SubAndReturnBorrow(RWDigits Z, Digits X) {
|
|||
}
|
||||
|
||||
void Add(RWDigits Z, Digits X, Digits Y) {
|
||||
if (X.len() < Y.len()) {
|
||||
return Add(Z, Y, X);
|
||||
}
|
||||
if (X.len() < Y.len()) std::swap(X, Y); // Now X.len() >= Y.len().
|
||||
CHECK(Z.len() >= X.len());
|
||||
uint32_t i = 0;
|
||||
digit_t carry = 0;
|
||||
for (; i < Y.len(); i++) {
|
||||
|
|
@ -59,7 +64,7 @@ void Add(RWDigits Z, Digits X, Digits Y) {
|
|||
void Subtract(RWDigits Z, Digits X, Digits Y) {
|
||||
X.Normalize();
|
||||
Y.Normalize();
|
||||
DCHECK(X.len() >= Y.len());
|
||||
CHECK(Z.len() >= X.len() && X.len() >= Y.len());
|
||||
uint32_t i = 0;
|
||||
digit_t borrow = 0;
|
||||
for (; i < Y.len(); i++) {
|
||||
|
|
@ -73,7 +78,7 @@ void Subtract(RWDigits Z, Digits X, Digits Y) {
|
|||
}
|
||||
|
||||
digit_t AddAndReturnCarry(RWDigits Z, Digits X, Digits Y) {
|
||||
DCHECK(Z.len() >= Y.len() && X.len() >= Y.len());
|
||||
CHECK(Z.len() >= Y.len() && X.len() >= Y.len());
|
||||
digit_t carry = 0;
|
||||
for (uint32_t i = 0; i < Y.len(); i++) {
|
||||
Z[i] = digit_add3(X[i], Y[i], carry, &carry);
|
||||
|
|
@ -82,7 +87,7 @@ digit_t AddAndReturnCarry(RWDigits Z, Digits X, Digits Y) {
|
|||
}
|
||||
|
||||
digit_t SubtractAndReturnBorrow(RWDigits Z, Digits X, Digits Y) {
|
||||
DCHECK(Z.len() >= Y.len() && X.len() >= Y.len());
|
||||
CHECK(Z.len() >= Y.len() && X.len() >= Y.len());
|
||||
digit_t borrow = 0;
|
||||
for (uint32_t i = 0; i < Y.len(); i++) {
|
||||
Z[i] = digit_sub2(X[i], Y[i], borrow, &borrow);
|
||||
|
|
|
|||
238
deps/v8/src/builtins/arm/builtins-arm.cc
vendored
238
deps/v8/src/builtins/arm/builtins-arm.cc
vendored
|
|
@ -2063,6 +2063,82 @@ void Builtins::Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
|
|||
|
||||
#endif // V8_ENABLE_MAGLEV
|
||||
|
||||
static void GenerateCall(MacroAssembler* masm, Register argc, Register target,
|
||||
ConvertReceiverMode mode,
|
||||
std::optional<RootIndex> error_string_root) {
|
||||
Register map = r4;
|
||||
Register instance_type = r5;
|
||||
Register scratch = r6;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CompareInstanceTypeRange(map, instance_type, scratch,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), ls);
|
||||
__ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = r4;
|
||||
DCHECK(!AreAliased(argc, target, flags));
|
||||
__ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ tst(flags, Operand(Map::Bits1::IsCallableBit::kMask));
|
||||
__ b(eq, &non_callable);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ cmp(instance_type, Operand(JS_PROXY_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ cmp(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ cmp(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ b(eq, &class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ str(target, __ ReceiverOperand());
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
if (!error_string_root.has_value()) {
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
} else {
|
||||
__ LoadRoot(r2, error_string_root.value());
|
||||
__ Push(target, r2);
|
||||
__ CallRuntime(Runtime::kThrowTargetNonFunction);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
|
|
@ -2109,7 +2185,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||
__ bind(&no_arguments);
|
||||
{
|
||||
__ mov(r0, Operand(JSParameterCount(0)));
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
GenerateCall(masm, r0, r1, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_apply_string);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2133,7 +2210,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
__ sub(r0, r0, Operand(1));
|
||||
|
||||
// 4. Call the callable.
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
GenerateCall(masm, r0, r1, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
|
|
@ -2607,70 +2685,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
|
|||
// -- r1 : the target to call (can be any Object).
|
||||
// -----------------------------------
|
||||
Register target = r1;
|
||||
Register map = r4;
|
||||
Register instance_type = r5;
|
||||
Register scratch = r6;
|
||||
DCHECK(!AreAliased(r0, target, map, instance_type));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CompareInstanceTypeRange(map, instance_type, scratch,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), ls);
|
||||
__ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = r4;
|
||||
__ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ tst(flags, Operand(Map::Bits1::IsCallableBit::kMask));
|
||||
__ b(eq, &non_callable);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ cmp(instance_type, Operand(JS_PROXY_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ cmp(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ cmp(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ b(eq, &class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver the (original) target.
|
||||
__ str(target, __ ReceiverOperand());
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
GenerateCall(masm, r0, target, mode, std::nullopt);
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
@ -3017,12 +3033,9 @@ void ReloadParentStack(MacroAssembler* masm, Register return_reg,
|
|||
Register parent = tmp2;
|
||||
__ ldr(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
|
||||
// Update active stack.
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {return_reg, return_value, context, parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {return_reg, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false, tmp3);
|
||||
}
|
||||
|
||||
|
|
@ -3228,48 +3241,20 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Set a sentinel value for the spill slots visited by the GC.
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
|
||||
// -------------------------------------------
|
||||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
DEFINE_REG(stack);
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
regs.ResetExcept(suspender, stack);
|
||||
|
||||
DEFINE_REG(suspender_stack);
|
||||
__ ldr(suspender_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset));
|
||||
if (v8_flags.debug_code) {
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this
|
||||
// check will not hold anymore: it's possible that the active stack
|
||||
// changed (due to an internal switch), so we have to update the suspender.
|
||||
__ cmp(suspender_stack, stack);
|
||||
Label ok;
|
||||
__ b(&ok, eq);
|
||||
__ Trap();
|
||||
__ bind(&ok);
|
||||
}
|
||||
// -------------------------------------------
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(caller);
|
||||
__ ldr(caller, MemOperand(suspender_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
DEFINE_REG(parent);
|
||||
__ LoadTaggedField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
regs.ResetExcept(suspender, caller, stack);
|
||||
DEFINE_REG(target_stack);
|
||||
__ ldr(target_stack,
|
||||
FieldMemOperand(parent, WasmSuspenderObject::kStackOffset));
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), stack, &resume,
|
||||
no_reg, {caller, suspender});
|
||||
FREE_REG(stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), target_stack,
|
||||
&resume, no_reg, {target_stack, suspender, parent});
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
|
||||
|
|
@ -3277,7 +3262,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
MemOperand(fp, WasmJspiFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Zero(GCScanSlotPlace);
|
||||
DEFINE_REG(scratch);
|
||||
LoadJumpBuffer(masm, caller, true, scratch);
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch);
|
||||
if (v8_flags.debug_code) {
|
||||
__ Trap();
|
||||
}
|
||||
|
|
@ -3340,8 +3325,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
__ ldr(target_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset));
|
||||
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_stack(), active_stack,
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), target_stack,
|
||||
&suspend, suspender, {target_stack});
|
||||
regs.ResetExcept(target_stack);
|
||||
|
||||
|
|
@ -3387,6 +3371,30 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) {
|
|||
Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXResume(MacroAssembler* masm) {
|
||||
__ EnterFrame(StackFrame::WASM_STACK_EXIT);
|
||||
Register target_stack = WasmFXResumeDescriptor::GetRegisterParameter(0);
|
||||
Label suspend;
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_wasmfx_stack(),
|
||||
target_stack, &suspend, no_reg, {target_stack});
|
||||
LoadJumpBuffer(masm, target_stack, true, r1);
|
||||
__ Trap();
|
||||
__ bind(&suspend);
|
||||
__ LeaveFrame(StackFrame::WASM_STACK_EXIT);
|
||||
__ Jump(lr);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) {
|
||||
Register active_stack = r0;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
Register parent = r1;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {parent});
|
||||
LoadJumpBuffer(masm, parent, true, r2);
|
||||
__ Trap();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
|
||||
// Only needed on x64.
|
||||
__ Trap();
|
||||
|
|
@ -3399,19 +3407,17 @@ void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
Label* suspend) {
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
DEFINE_SCOPED(scratch)
|
||||
DEFINE_REG(target_stack)
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(parent_stack)
|
||||
__ ldr(parent_stack, MemOperand(target_stack, wasm::kStackParentOffset));
|
||||
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), parent_stack,
|
||||
suspend, no_reg, {wasm_instance, wrapper_buffer});
|
||||
|
||||
FREE_REG(parent_stack);
|
||||
DEFINE_REG(stack)
|
||||
__ LoadRootRelative(stack, IsolateData::active_suspender_offset());
|
||||
__ ldr(stack, FieldMemOperand(stack, WasmSuspenderObject::kStackOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), stack, suspend,
|
||||
no_reg, {wasm_instance, wrapper_buffer});
|
||||
FREE_REG(stack);
|
||||
// Save the old stack's fp in x9, and use it to access the parameters in
|
||||
// the parent frame.
|
||||
regs.Pinned(r9, &original_fp);
|
||||
__ Move(original_fp, fp);
|
||||
DEFINE_REG(target_stack)
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
LoadTargetJumpBuffer(masm, target_stack, scratch);
|
||||
FREE_REG(target_stack);
|
||||
|
|
|
|||
263
deps/v8/src/builtins/arm64/builtins-arm64.cc
vendored
263
deps/v8/src/builtins/arm64/builtins-arm64.cc
vendored
|
|
@ -2379,6 +2379,85 @@ void Builtins::Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
|
|||
|
||||
#endif // V8_ENABLE_MAGLEV
|
||||
|
||||
static void GenerateCall(MacroAssembler* masm, Register argc, Register target,
|
||||
ConvertReceiverMode mode,
|
||||
std::optional<RootIndex> error_string_root) {
|
||||
Register map = x4;
|
||||
Register instance_type = x5;
|
||||
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CompareInstanceTypeRange(map, instance_type,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), ls);
|
||||
__ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = x4;
|
||||
DCHECK(!AreAliased(argc, target, flags));
|
||||
__ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ TestAndBranchIfAllClear(flags, Map::Bits1::IsCallableBit::kMask,
|
||||
&non_callable);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ Cmp(instance_type, JS_PROXY_TYPE);
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ Cmp(instance_type, JS_WRAPPED_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ Cmp(instance_type, JS_CLASS_CONSTRUCTOR_TYPE);
|
||||
__ B(eq, &class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ Poke(target, __ ReceiverOperand());
|
||||
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ Bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
if (!error_string_root.has_value()) {
|
||||
// Use the simpler error for Generate_Call
|
||||
__ PushArgument(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
} else {
|
||||
// Use the more specific error for Function.prototype.call/apply
|
||||
__ LoadRoot(x2, error_string_root.value());
|
||||
__ Push(target, x2);
|
||||
__ CallRuntime(Runtime::kThrowTargetNonFunction);
|
||||
__ Unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ Bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ PushArgument(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
|
|
@ -2443,7 +2522,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||
{
|
||||
__ Mov(x0, JSParameterCount(0));
|
||||
DCHECK_EQ(receiver, x1);
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = x1;
|
||||
GenerateCall(masm, x0, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_apply_string);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2468,7 +2550,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
// We do not need to overwrite the padding slot above it with anything.
|
||||
__ Poke(scratch, 0);
|
||||
// Call function. The argument count is already zero.
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = function;
|
||||
GenerateCall(masm, x0, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
__ Bind(&non_zero);
|
||||
}
|
||||
|
||||
|
|
@ -2511,7 +2596,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
// receiver and call the callable.
|
||||
__ Bind(&arguments_ready);
|
||||
__ Sub(argc, argc, 1);
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = function;
|
||||
GenerateCall(masm, x0, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
|
|
@ -3094,70 +3182,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
|
|||
// -- x1 : the target to call (can be any Object).
|
||||
// -----------------------------------
|
||||
Register target = x1;
|
||||
Register map = x4;
|
||||
Register instance_type = x5;
|
||||
DCHECK(!AreAliased(x0, target, map, instance_type));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CompareInstanceTypeRange(map, instance_type,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), ls);
|
||||
__ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = x4;
|
||||
__ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ TestAndBranchIfAllClear(flags, Map::Bits1::IsCallableBit::kMask,
|
||||
&non_callable);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ Cmp(instance_type, JS_PROXY_TYPE);
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ Cmp(instance_type, JS_WRAPPED_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ Cmp(instance_type, JS_CLASS_CONSTRUCTOR_TYPE);
|
||||
__ B(eq, &class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ Poke(target, __ ReceiverOperand());
|
||||
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ PushArgument(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
__ Unreachable();
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ PushArgument(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Unreachable();
|
||||
}
|
||||
GenerateCall(masm, x0, target, mode, std::nullopt);
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
@ -3461,6 +3487,7 @@ void SwitchStackPointerAndSimulatorStackLimit(MacroAssembler* masm,
|
|||
Register stack, Register tmp) {
|
||||
if (masm->options().enable_simulator_code) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
DCHECK_NE(x16, tmp);
|
||||
temps.Exclude(x16);
|
||||
__ Ldr(tmp, MemOperand(stack, wasm::kStackSpOffset));
|
||||
__ Ldr(x16, MemOperand(stack, wasm::kStackLimitOffset));
|
||||
|
|
@ -3540,12 +3567,9 @@ void ReloadParentStack(MacroAssembler* masm, Register return_reg,
|
|||
Register parent = tmp2;
|
||||
__ Ldr(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
|
||||
// Update active stack.
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {return_reg, return_value, context, parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {return_reg, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false, tmp3);
|
||||
}
|
||||
|
||||
|
|
@ -3749,59 +3773,27 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Set a sentinel value for the spill slots visited by the GC.
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
|
||||
// -------------------------------------------
|
||||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
DEFINE_REG(stack);
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(scratch);
|
||||
regs.ResetExcept(suspender, stack);
|
||||
|
||||
DEFINE_REG(suspender_stack);
|
||||
__ LoadExternalPointerField(
|
||||
suspender_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
if (v8_flags.debug_code) {
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this
|
||||
// check will not hold anymore: it's possible that the active stack changed
|
||||
// (due to an internal switch), so we have to update the suspender.
|
||||
__ cmp(suspender_stack, stack);
|
||||
Label ok;
|
||||
__ B(&ok, eq);
|
||||
__ Trap();
|
||||
__ bind(&ok);
|
||||
}
|
||||
// -------------------------------------------
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(caller);
|
||||
__ Ldr(caller, MemOperand(suspender_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
DEFINE_REG(parent);
|
||||
__ LoadProtectedPointerField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
regs.ResetExcept(suspender, caller, stack);
|
||||
DEFINE_REG(target_stack);
|
||||
__ LoadExternalPointerField(
|
||||
target_stack, FieldMemOperand(parent, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), stack, &resume,
|
||||
no_reg, {caller, suspender});
|
||||
FREE_REG(stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), target_stack,
|
||||
&resume, no_reg, {target_stack, suspender, parent});
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
|
||||
MemOperand GCScanSlotPlace =
|
||||
MemOperand(fp, WasmJspiFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Str(xzr, GCScanSlotPlace);
|
||||
ASSIGN_REG(scratch)
|
||||
LoadJumpBuffer(masm, caller, true, scratch);
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch);
|
||||
__ Trap();
|
||||
__ Bind(&resume, BranchTargetIdentifier::kBtiJump);
|
||||
__ LeaveFrame(StackFrame::WASM_JSPI);
|
||||
|
|
@ -3868,8 +3860,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
target_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_stack(), active_stack,
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), target_stack,
|
||||
&suspend, suspender, {target_stack});
|
||||
regs.ResetExcept(target_stack);
|
||||
|
||||
|
|
@ -3913,6 +3904,38 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) {
|
|||
Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXResume(MacroAssembler* masm) {
|
||||
__ EnterFrame(StackFrame::WASM_STACK_EXIT);
|
||||
Register target_stack = WasmFXResumeDescriptor::GetRegisterParameter(0);
|
||||
Label suspend;
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_wasmfx_stack(),
|
||||
target_stack, &suspend, no_reg, {target_stack});
|
||||
// When we resume the stack for the first time, we enter the wrapper. However
|
||||
// the wrapper starts with bti c, not bti j, so we cannot jump to it with an
|
||||
// arbitrary register. We have to use ip0 or ip1 as the target register here
|
||||
// (but ip0=x16 is already used as a fixed register inside LoadJumpBuffer in
|
||||
// simulator builds).
|
||||
// Alternatively we would have to change {CodeGenerator::AssembleCode} to
|
||||
// exceptionally emit bti j instead of bti c at the start of the code for code
|
||||
// kind WASM_STACK_ENTRY.
|
||||
LoadJumpBuffer(masm, target_stack, true, ip1);
|
||||
__ Trap();
|
||||
__ Bind(&suspend, BranchTargetIdentifier::kBtiJump);
|
||||
__ LeaveFrame(StackFrame::WASM_STACK_EXIT);
|
||||
__ Ret(lr);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) {
|
||||
Register active_stack = x0;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
Register parent = x1;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {parent});
|
||||
LoadJumpBuffer(masm, parent, true, x2);
|
||||
__ Trap();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
|
||||
// Only needed on x64.
|
||||
__ Trap();
|
||||
|
|
@ -3924,12 +3947,14 @@ void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
Label* suspend) {
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
DEFINE_SCOPED(scratch)
|
||||
DEFINE_REG(parent_stack)
|
||||
__ LoadRootRelative(parent_stack, IsolateData::active_stack_offset());
|
||||
__ Ldr(parent_stack, MemOperand(parent_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), parent_stack,
|
||||
suspend, no_reg, {wasm_instance, wrapper_buffer});
|
||||
FREE_REG(parent_stack);
|
||||
DEFINE_REG(stack)
|
||||
__ LoadRootRelative(stack, IsolateData::active_suspender_offset());
|
||||
__ LoadExternalPointerField(
|
||||
stack, FieldMemOperand(stack, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), stack, suspend,
|
||||
no_reg, {wasm_instance, wrapper_buffer});
|
||||
FREE_REG(stack);
|
||||
// Save the old stack's fp in x9, and use it to access the parameters in
|
||||
// the parent frame.
|
||||
regs.Pinned(x9, &original_fp);
|
||||
|
|
|
|||
8
deps/v8/src/builtins/array-map.tq
vendored
8
deps/v8/src/builtins/array-map.tq
vendored
|
|
@ -107,7 +107,7 @@ struct Vector {
|
|||
if (this.onlyNumbers) {
|
||||
kind = ElementsKind::PACKED_DOUBLE_ELEMENTS;
|
||||
} else if (this.onlyNumbersAndUndefined) {
|
||||
dcheck(kEnableExperimentalUndefinedDouble);
|
||||
dcheck(kEnableUndefinedDouble);
|
||||
kind = ElementsKind::HOLEY_DOUBLE_ELEMENTS;
|
||||
} else {
|
||||
kind = ElementsKind::PACKED_ELEMENTS;
|
||||
|
|
@ -134,7 +134,7 @@ struct Vector {
|
|||
AllocateFixedDoubleArrayWithHoles(SmiUntag(length));
|
||||
a = NewJSArray(map, this.fixedArray);
|
||||
for (let i: Smi = 0; i < validLength; i++) {
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) {
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE) {
|
||||
typeswitch (UnsafeCast<(Number | Undefined | TheHole)>(
|
||||
this.fixedArray.objects[i])) {
|
||||
case (TheHole): {
|
||||
|
|
@ -147,7 +147,7 @@ struct Vector {
|
|||
}
|
||||
}
|
||||
}
|
||||
@ifnot(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) {
|
||||
@ifnot(V8_ENABLE_UNDEFINED_DOUBLE) {
|
||||
typeswitch (
|
||||
UnsafeCast<(Number | TheHole)>(this.fixedArray.objects[i])) {
|
||||
case (TheHole): {
|
||||
|
|
@ -182,7 +182,7 @@ struct Vector {
|
|||
case (s: Undefined): {
|
||||
this.onlySmis = false;
|
||||
this.onlyNumbers = false;
|
||||
@ifnot(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) {
|
||||
@ifnot(V8_ENABLE_UNDEFINED_DOUBLE) {
|
||||
// If we are running without undefined doubles, we don't distinguish
|
||||
// undefineds here.
|
||||
this.onlyNumbersAndUndefined = false;
|
||||
|
|
|
|||
33
deps/v8/src/builtins/base.tq
vendored
33
deps/v8/src/builtins/base.tq
vendored
|
|
@ -170,15 +170,15 @@ extern macro RunLazy(Lazy<JSAny>): JSAny;
|
|||
// A Smi value containing a bitfield struct as its integer data.
|
||||
@useParentTypeChecker type SmiTagged<T : type extends uint31> extends Smi;
|
||||
|
||||
const kEnableExperimentalUndefinedDouble:
|
||||
constexpr bool generates 'V8_EXPERIMENTAL_UNDEFINED_DOUBLE_BOOL';
|
||||
const kEnableUndefinedDouble:
|
||||
constexpr bool generates 'V8_UNDEFINED_DOUBLE_BOOL';
|
||||
// WARNING: The memory representation (i.e., in class fields and arrays) of
|
||||
// float64_or_undefined_or_hole is just a float64 that may be the undefined- or
|
||||
// hole-representing signalling NaN bit-pattern. So it's memory size is that of
|
||||
// float64 and loading and storing float64_or_undefined_or_hole emits special
|
||||
// code.
|
||||
struct float64_or_undefined_or_hole {
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
macro Value(): float64 labels IfUndefined, IfHole {
|
||||
if (this.is_undefined) {
|
||||
goto IfUndefined;
|
||||
|
|
@ -189,7 +189,7 @@ struct float64_or_undefined_or_hole {
|
|||
return this.value;
|
||||
}
|
||||
|
||||
@ifnot(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@ifnot(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
macro Value(): float64 labels _IfUndefined, IfHole {
|
||||
if (this.is_hole) {
|
||||
goto IfHole;
|
||||
|
|
@ -198,29 +198,29 @@ struct float64_or_undefined_or_hole {
|
|||
}
|
||||
|
||||
macro ValueUnsafeAssumeNotHole(): float64 {
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) {
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE) {
|
||||
dcheck(!this.is_undefined);
|
||||
}
|
||||
dcheck(!this.is_hole);
|
||||
return this.value;
|
||||
}
|
||||
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) is_undefined: bool;
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE) is_undefined: bool;
|
||||
is_hole: bool;
|
||||
value: float64;
|
||||
}
|
||||
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
const kDoubleHole: float64_or_undefined_or_hole =
|
||||
float64_or_undefined_or_hole{is_undefined: false, is_hole: true, value: 0};
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
const kDoubleUndefined: float64_or_undefined_or_hole =
|
||||
float64_or_undefined_or_hole{is_undefined: true, is_hole: false, value: 0};
|
||||
|
||||
@ifnot(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@ifnot(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
const kDoubleHole: float64_or_undefined_or_hole =
|
||||
float64_or_undefined_or_hole{is_hole: true, value: 0};
|
||||
@ifnot(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@ifnot(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
const kDoubleUndefined: float64_or_undefined_or_hole =
|
||||
float64_or_undefined_or_hole{is_hole: false, value: 0};
|
||||
|
||||
|
|
@ -710,16 +710,27 @@ extern macro Is64(): constexpr bool;
|
|||
|
||||
extern macro SelectBooleanConstant(bool): Boolean;
|
||||
|
||||
const kCStringNull: constexpr string generates '((const char*)nullptr)';
|
||||
extern macro Print(constexpr string): void;
|
||||
extern macro Print(String, Object): void;
|
||||
extern macro Print(constexpr string, Object): void;
|
||||
extern macro Print(Object): void;
|
||||
extern macro Print(String, uintptr): void;
|
||||
extern macro Print(constexpr string, uintptr): void;
|
||||
extern macro Print(String, uint32): void;
|
||||
extern macro Print(constexpr string, uint32): void;
|
||||
extern macro Print(String, uint64): void;
|
||||
extern macro Print(constexpr string, uint64): void;
|
||||
extern macro Print(String, float32): void;
|
||||
extern macro Print(constexpr string, float32): void;
|
||||
extern macro Print(String, float64): void;
|
||||
extern macro Print(constexpr string, float64): void;
|
||||
extern macro PrintErr(constexpr string): void;
|
||||
extern macro PrintErr(constexpr string, Object): void;
|
||||
extern macro PrintErr(Object): void;
|
||||
extern macro Comment(constexpr string): void;
|
||||
extern macro DebugBreak(): void;
|
||||
extern macro PrintStringSimple(String): void;
|
||||
|
||||
extern macro SetSupportsDynamicParameterCount(
|
||||
JSFunction, DispatchHandle): void;
|
||||
|
|
@ -2094,7 +2105,7 @@ transitioning builtin FastCreateDataProperty(
|
|||
const elements = Cast<FixedArray>(array.elements) otherwise unreachable;
|
||||
elements[index] = smiValue;
|
||||
} else if (IsDoubleElementsKind(kind)) {
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) {
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE) {
|
||||
const doubleElements = Cast<FixedDoubleArray>(array.elements)
|
||||
otherwise unreachable;
|
||||
if (kind == ElementsKind::HOLEY_DOUBLE_ELEMENTS) {
|
||||
|
|
|
|||
74
deps/v8/src/builtins/builtins-array-gen.cc
vendored
74
deps/v8/src/builtins/builtins-array-gen.cc
vendored
|
|
@ -337,7 +337,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
|
|||
StoreFixedDoubleArrayHole(elements_known_double_array, new_length_intptr);
|
||||
args.PopAndReturn(AllocateHeapNumberWithValue(value));
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
BIND(&pop_and_return_undefined);
|
||||
{
|
||||
StoreFixedDoubleArrayHole(elements_known_double_array,
|
||||
|
|
@ -346,7 +346,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
|
|||
}
|
||||
#else
|
||||
DCHECK(!pop_and_return_undefined.is_used());
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
}
|
||||
|
||||
BIND(&fast_elements);
|
||||
|
|
@ -383,10 +383,10 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
|
|||
Label object_push(this, &arg_index);
|
||||
Label double_push(this, &arg_index);
|
||||
Label double_transition(this);
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label holey_double_push(this, &arg_index);
|
||||
Label holey_double_transition(this);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label runtime(this, Label::kDeferred);
|
||||
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
|
||||
|
|
@ -434,22 +434,22 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
|
|||
GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
|
||||
&default_label);
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
GotoIfNotNumberOrUndefined(arg, &object_push);
|
||||
Branch(IsElementsKindGreaterThan(elements_kind, PACKED_DOUBLE_ELEMENTS),
|
||||
&holey_double_push, &double_push);
|
||||
#else
|
||||
GotoIfNotNumber(arg, &object_push);
|
||||
Goto(&double_push);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
}
|
||||
|
||||
BIND(&object_push_pre);
|
||||
{
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
GotoIf(Word32Equal(kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
|
||||
&holey_double_push);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &double_push,
|
||||
&object_push);
|
||||
}
|
||||
|
|
@ -487,14 +487,14 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
|
|||
TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
|
||||
GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
|
||||
&default_label);
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
GotoIf(Word32Equal(elements_kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
|
||||
&holey_double_push);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Goto(&object_push);
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
BIND(&holey_double_push);
|
||||
{
|
||||
TNode<Smi> new_length =
|
||||
|
|
@ -523,7 +523,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
|
|||
&default_label);
|
||||
Goto(&object_push);
|
||||
}
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|
||||
// Fallback that stores un-processed arguments using the full, heavyweight
|
||||
// SetProperty machinery.
|
||||
|
|
@ -887,16 +887,19 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
|
|||
return_found(this), return_not_found(this);
|
||||
|
||||
GotoIfNot(TaggedIsSmi(search_element), ¬_smi);
|
||||
search_num = SmiToFloat64(CAST(search_element));
|
||||
search_num = SmiToFloat64(UncheckedCast<Smi>(search_element));
|
||||
Goto(&heap_num_loop);
|
||||
|
||||
BIND(¬_smi);
|
||||
TNode<HeapObject> ho_search_element =
|
||||
UncheckedCast<HeapObject>(search_element);
|
||||
if (variant == kIncludes) {
|
||||
GotoIf(IsUndefined(search_element), &undef_loop);
|
||||
GotoIf(IsUndefined(ho_search_element), &undef_loop);
|
||||
}
|
||||
TNode<Map> map = LoadMap(CAST(search_element));
|
||||
TNode<Map> map = LoadMap(ho_search_element);
|
||||
GotoIfNot(IsHeapNumberMap(map), ¬_heap_num);
|
||||
search_num = LoadHeapNumberValue(CAST(search_element));
|
||||
search_num =
|
||||
LoadHeapNumberValue(UncheckedCast<HeapNumber>(ho_search_element));
|
||||
Goto(&heap_num_loop);
|
||||
|
||||
BIND(¬_heap_num);
|
||||
|
|
@ -999,11 +1002,12 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
|
|||
&return_found, &continue_loop);
|
||||
|
||||
BIND(&element_k_not_smi);
|
||||
GotoIf(IsTheHole(element_k), &continue_loop);
|
||||
GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
|
||||
Branch(Float64Equal(search_num.value(),
|
||||
LoadHeapNumberValue(CAST(element_k))),
|
||||
&return_found, &continue_loop);
|
||||
TNode<HeapObject> ho_element_k = UncheckedCast<HeapObject>(element_k);
|
||||
GotoIf(IsTheHole(ho_element_k), &continue_loop);
|
||||
GotoIfNot(IsHeapNumber(ho_element_k), &continue_loop);
|
||||
Branch(
|
||||
Float64Equal(search_num.value(), LoadHeapNumberValue(ho_element_k)),
|
||||
&return_found, &continue_loop);
|
||||
|
||||
BIND(&continue_loop);
|
||||
Increment(&index_var);
|
||||
|
|
@ -1019,9 +1023,10 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
|
|||
TNode<Object> element_k =
|
||||
UnsafeLoadFixedArrayElement(elements, index_var.value());
|
||||
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
||||
GotoIf(IsTheHole(element_k), &continue_loop);
|
||||
GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
|
||||
BranchIfFloat64IsNaN(LoadHeapNumberValue(CAST(element_k)), &return_found,
|
||||
TNode<HeapObject> ho_element_k = UncheckedCast<HeapObject>(element_k);
|
||||
GotoIf(IsTheHole(ho_element_k), &continue_loop);
|
||||
GotoIfNot(IsHeapNumber(ho_element_k), &continue_loop);
|
||||
BranchIfFloat64IsNaN(LoadHeapNumberValue(ho_element_k), &return_found,
|
||||
&continue_loop);
|
||||
|
||||
BIND(&continue_loop);
|
||||
|
|
@ -1044,17 +1049,19 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
|
|||
TNode<Object> element_k =
|
||||
UnsafeLoadFixedArrayElement(elements, index_var.value());
|
||||
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
||||
GotoIf(TaggedEqual(search_element_string, element_k), &return_found);
|
||||
GotoIf(IsTheHole(element_k), &continue_loop);
|
||||
TNode<Uint16T> element_k_type = LoadInstanceType(CAST(element_k));
|
||||
TNode<HeapObject> ho_element_k = UncheckedCast<HeapObject>(element_k);
|
||||
GotoIf(TaggedEqual(search_element_string, ho_element_k), &return_found);
|
||||
GotoIf(IsTheHole(ho_element_k), &continue_loop);
|
||||
TNode<Uint16T> element_k_type = LoadInstanceType(ho_element_k);
|
||||
GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
|
||||
Branch(IntPtrEqual(search_length, LoadStringLengthAsWord(CAST(element_k))),
|
||||
TNode<String> string_element_k = UncheckedCast<String>(ho_element_k);
|
||||
Branch(IntPtrEqual(search_length, LoadStringLengthAsWord(string_element_k)),
|
||||
&slow_compare, &continue_loop);
|
||||
|
||||
BIND(&slow_compare);
|
||||
StringBuiltinsAssembler string_asm(state());
|
||||
string_asm.StringEqual_Core(search_element_string, search_type,
|
||||
CAST(element_k), element_k_type, search_length,
|
||||
string_element_k, element_k_type, search_length,
|
||||
&return_found, &continue_loop, &runtime);
|
||||
BIND(&runtime);
|
||||
TNode<Object> result = CallRuntime(Runtime::kStringEqual, context,
|
||||
|
|
@ -1075,8 +1082,9 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
|
|||
UnsafeLoadFixedArrayElement(elements, index_var.value());
|
||||
Label continue_loop(this);
|
||||
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
||||
GotoIf(IsTheHole(element_k), &continue_loop);
|
||||
GotoIfNot(IsBigInt(CAST(element_k)), &continue_loop);
|
||||
TNode<HeapObject> ho_element_k = UncheckedCast<HeapObject>(element_k);
|
||||
GotoIf(IsTheHole(ho_element_k), &continue_loop);
|
||||
GotoIfNot(IsBigInt(ho_element_k), &continue_loop);
|
||||
TNode<Object> result = CallRuntime(Runtime::kBigIntEqualToBigInt, context,
|
||||
search_element, element_k);
|
||||
Branch(TaggedEqual(result, TrueConstant()), &return_found, &continue_loop);
|
||||
|
|
@ -1210,7 +1218,7 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(
|
|||
Goto(¬_nan_case);
|
||||
|
||||
BIND(&search_notnan);
|
||||
if (variant == kIncludes || V8_EXPERIMENTAL_UNDEFINED_DOUBLE_BOOL) {
|
||||
if (variant == kIncludes || V8_UNDEFINED_DOUBLE_BOOL) {
|
||||
GotoIf(IsUndefined(search_element), &hole_loop);
|
||||
}
|
||||
GotoIfNot(IsHeapNumber(CAST(search_element)), &return_not_found);
|
||||
|
|
@ -1279,7 +1287,7 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(
|
|||
}
|
||||
|
||||
// Array.p.includes treats the hole as undefined.
|
||||
if (variant == kIncludes || V8_EXPERIMENTAL_UNDEFINED_DOUBLE_BOOL) {
|
||||
if (variant == kIncludes || V8_UNDEFINED_DOUBLE_BOOL) {
|
||||
BIND(&hole_loop);
|
||||
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
||||
&return_not_found);
|
||||
|
|
|
|||
24
deps/v8/src/builtins/builtins-array.cc
vendored
24
deps/v8/src/builtins/builtins-array.cc
vendored
|
|
@ -1306,12 +1306,12 @@ bool IterateElements(Isolate* isolate, DirectHandle<JSReceiver> receiver,
|
|||
JSReceiver::GetElement(isolate, array, j), false);
|
||||
if (!visitor->visit(j, element_value)) return false;
|
||||
}
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
} else if (elements->is_undefined(j)) {
|
||||
DirectHandle<Object> element_value =
|
||||
isolate->factory()->undefined_value();
|
||||
if (!visitor->visit(j, element_value)) return false;
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
} else {
|
||||
double double_value = elements->get_scalar(j);
|
||||
DirectHandle<Object> element_value =
|
||||
|
|
@ -1491,24 +1491,24 @@ Tagged<Object> Slow_ArrayConcat(BuiltinArguments* args,
|
|||
for (uint32_t k = 0; k < length; k++) {
|
||||
const bool is_hole = elements->is_the_hole(k);
|
||||
if (is_hole
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|| elements->is_undefined(k)
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
) {
|
||||
if (has_array_prototype &&
|
||||
Protectors::IsNoElementsIntact(isolate)) {
|
||||
// If we do not have elements on the prototype chain,
|
||||
// we can generate a HOLEY_DOUBLE_ELEMENTS.
|
||||
kind = HOLEY_DOUBLE_ELEMENTS;
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
if (is_hole) {
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
double_storage->set_the_hole(j);
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
} else {
|
||||
double_storage->set_undefined(j);
|
||||
}
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
} else {
|
||||
failure = true;
|
||||
break;
|
||||
|
|
@ -1528,7 +1528,7 @@ Tagged<Object> Slow_ArrayConcat(BuiltinArguments* args,
|
|||
for (uint32_t k = 0; k < length; k++) {
|
||||
Tagged<Object> element = elements->get(k);
|
||||
if (element == the_hole) {
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
if (has_array_prototype &&
|
||||
Protectors::IsNoElementsIntact(isolate)) {
|
||||
// If we do not have elements on the prototype chain,
|
||||
|
|
@ -1536,12 +1536,12 @@ Tagged<Object> Slow_ArrayConcat(BuiltinArguments* args,
|
|||
kind = HOLEY_DOUBLE_ELEMENTS;
|
||||
double_storage->set_the_hole(j);
|
||||
} else {
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
failure = true;
|
||||
break;
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
}
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
} else {
|
||||
int32_t int_value = Smi::ToInt(element);
|
||||
double_storage->set(j, int_value);
|
||||
|
|
|
|||
3
deps/v8/src/builtins/builtins-arraybuffer.cc
vendored
3
deps/v8/src/builtins/builtins-arraybuffer.cc
vendored
|
|
@ -121,7 +121,8 @@ Tagged<Object> ConstructBuffer(Isolate* isolate,
|
|||
DirectHandle<JSObject> result;
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, result,
|
||||
JSObject::New(target, new_target, {}, NewJSObjectType::kAPIWrapper));
|
||||
JSObject::New(target, new_target, {},
|
||||
NewJSObjectType::kMaybeEmbedderFieldsAndApiWrapper));
|
||||
auto array_buffer = Cast<JSArrayBuffer>(result);
|
||||
const bool backing_store_creation_failed = !backing_store;
|
||||
array_buffer->Setup(shared, resizable, std::move(backing_store), isolate);
|
||||
|
|
|
|||
5
deps/v8/src/builtins/builtins-call-gen.cc
vendored
5
deps/v8/src/builtins/builtins-call-gen.cc
vendored
|
|
@ -12,6 +12,7 @@
|
|||
#include "src/common/globals.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/execution/protectors.h"
|
||||
#include "src/heap/factory-inl.h"
|
||||
#include "src/objects/api-callbacks.h"
|
||||
#include "src/objects/arguments.h"
|
||||
#include "src/objects/property-cell.h"
|
||||
|
|
@ -179,7 +180,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
|
|||
&if_target_not_callable);
|
||||
BIND(&if_target_not_callable);
|
||||
{
|
||||
CallRuntime(Runtime::kThrowApplyNonFunction, context, target);
|
||||
CallRuntime(Runtime::kThrowTargetNonFunction, context, target,
|
||||
HeapConstantNoHole(
|
||||
isolate()->factory()->Function_prototype_apply_string()));
|
||||
Unreachable();
|
||||
}
|
||||
BIND(&if_target_callable);
|
||||
|
|
|
|||
207
deps/v8/src/builtins/builtins-collections-gen.cc
vendored
207
deps/v8/src/builtins/builtins-collections-gen.cc
vendored
|
|
@ -545,9 +545,9 @@ TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
|
|||
TNode<Float64T> element = LoadFixedDoubleArrayElement(
|
||||
CAST(elements), index, &if_hole_or_undefined, &if_hole_or_undefined);
|
||||
{ // not hole
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
CSA_DCHECK(this, Word32Equal(Int32Constant(0), IsDoubleUndefined(element)));
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
entry = AllocateHeapNumberWithValue(element);
|
||||
Goto(&next);
|
||||
}
|
||||
|
|
@ -1195,12 +1195,12 @@ template <typename CollectionType>
|
|||
void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey(
|
||||
TNode<CollectionType> table, TNode<String> key_tagged,
|
||||
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
|
||||
const TNode<Uint32T> hash = ComputeStringHash(key_tagged);
|
||||
const TNode<Uint32T> hash = LoadNameHash(key_tagged);
|
||||
*result = Signed(ChangeUint32ToWord(hash));
|
||||
FindOrderedHashTableEntry<CollectionType>(
|
||||
table, hash,
|
||||
[&](TNode<Object> other_key, Label* if_same, Label* if_not_same) {
|
||||
SameValueZeroString(key_tagged, other_key, if_same, if_not_same);
|
||||
Branch(TaggedEqual(key_tagged, other_key), if_same, if_not_same);
|
||||
},
|
||||
result, entry_found, not_found);
|
||||
}
|
||||
|
|
@ -1248,34 +1248,6 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
|
|||
result, entry_found, not_found);
|
||||
}
|
||||
|
||||
TNode<Uint32T> CollectionsBuiltinsAssembler::ComputeStringHash(
|
||||
TNode<String> string_key) {
|
||||
TVARIABLE(Uint32T, var_result);
|
||||
|
||||
Label hash_not_computed(this), done(this, &var_result);
|
||||
const TNode<Uint32T> hash = LoadNameHash(string_key, &hash_not_computed);
|
||||
var_result = hash;
|
||||
Goto(&done);
|
||||
|
||||
BIND(&hash_not_computed);
|
||||
var_result = CallGetHashRaw(string_key);
|
||||
Goto(&done);
|
||||
|
||||
BIND(&done);
|
||||
return var_result.value();
|
||||
}
|
||||
|
||||
void CollectionsBuiltinsAssembler::SameValueZeroString(
|
||||
TNode<String> key_string, TNode<Object> candidate_key, Label* if_same,
|
||||
Label* if_not_same) {
|
||||
// If the candidate is not a string, the keys are not equal.
|
||||
GotoIf(TaggedIsSmi(candidate_key), if_not_same);
|
||||
GotoIfNot(IsString(CAST(candidate_key)), if_not_same);
|
||||
|
||||
GotoIf(TaggedEqual(key_string, candidate_key), if_same);
|
||||
BranchIfStringEqual(key_string, CAST(candidate_key), if_same, if_not_same);
|
||||
}
|
||||
|
||||
void CollectionsBuiltinsAssembler::SameValueZeroBigInt(
|
||||
TNode<BigInt> key, TNode<Object> candidate_key, Label* if_same,
|
||||
Label* if_not_same) {
|
||||
|
|
@ -1607,7 +1579,7 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
|
|||
|
||||
TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) {
|
||||
const auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
const auto key = Parameter<Object>(Descriptor::kKey);
|
||||
const auto key = Parameter<JSAny>(Descriptor::kKey);
|
||||
const auto context = Parameter<Context>(Descriptor::kContext);
|
||||
|
||||
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has");
|
||||
|
|
@ -1627,7 +1599,7 @@ TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) {
|
|||
|
||||
TNode<BoolT> CollectionsBuiltinsAssembler::TableHasKey(
|
||||
const TNode<Object> context, TNode<OrderedHashMap> table,
|
||||
TNode<Object> key) {
|
||||
TNode<JSAny> key) {
|
||||
TNode<Smi> index =
|
||||
CAST(CallBuiltin(Builtin::kFindOrderedHashMapEntry, context, table, key));
|
||||
|
||||
|
|
@ -1654,7 +1626,7 @@ const TNode<JSAny> CollectionsBuiltinsAssembler::NormalizeNumberKey(
|
|||
|
||||
template <typename CollectionType>
|
||||
TNode<CollectionType> CollectionsBuiltinsAssembler::AddToOrderedHashTable(
|
||||
const TNode<CollectionType> table, const TNode<Object> key,
|
||||
const TNode<CollectionType> table, TVariable<JSAny>* key,
|
||||
const GrowCollection<CollectionType>& grow,
|
||||
const StoreAtEntry<CollectionType>& store_at_new_entry,
|
||||
const StoreAtEntry<CollectionType>& store_at_existing_entry) {
|
||||
|
|
@ -1681,7 +1653,8 @@ TNode<CollectionType> CollectionsBuiltinsAssembler::AddToOrderedHashTable(
|
|||
&add_entry);
|
||||
|
||||
// Otherwise, go to runtime to compute the hash code.
|
||||
entry_start_position_or_hash = SmiUntag(CallGetOrCreateHashRaw(CAST(key)));
|
||||
entry_start_position_or_hash =
|
||||
SmiUntag(CallGetOrCreateHashRaw(CAST(key->value())));
|
||||
Goto(&add_entry);
|
||||
}
|
||||
|
||||
|
|
@ -1733,23 +1706,22 @@ TNode<CollectionType> CollectionsBuiltinsAssembler::AddToOrderedHashTable(
|
|||
|
||||
TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
|
||||
const auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto key = Parameter<JSAny>(Descriptor::kKey);
|
||||
auto original_key = Parameter<JSAny>(Descriptor::kKey);
|
||||
const auto value = Parameter<Object>(Descriptor::kValue);
|
||||
const auto context = Parameter<Context>(Descriptor::kContext);
|
||||
|
||||
TVARIABLE(JSAny, key, original_key);
|
||||
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set");
|
||||
|
||||
key = NormalizeNumberKey(key);
|
||||
|
||||
GrowCollection<OrderedHashMap> grow = [this, context, receiver]() {
|
||||
CallRuntime(Runtime::kMapGrow, context, receiver);
|
||||
return LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset);
|
||||
};
|
||||
|
||||
StoreAtEntry<OrderedHashMap> store_at_new_entry =
|
||||
[this, key, value](const TNode<OrderedHashMap> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
UnsafeStoreKeyValueInOrderedHashMapEntry(table, key, value,
|
||||
[this, &key, value](const TNode<OrderedHashMap> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
UnsafeStoreKeyValueInOrderedHashMapEntry(table, key.value(), value,
|
||||
entry_start);
|
||||
};
|
||||
|
||||
|
|
@ -1761,7 +1733,7 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
|
|||
|
||||
const TNode<OrderedHashMap> table =
|
||||
LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset);
|
||||
AddToOrderedHashTable(table, key, grow, store_at_new_entry,
|
||||
AddToOrderedHashTable(table, &key, grow, store_at_new_entry,
|
||||
store_at_existing_entry);
|
||||
Return(receiver);
|
||||
}
|
||||
|
|
@ -1805,7 +1777,7 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashTableNewEntry(
|
|||
// when we are adding new entries from a Set.
|
||||
template <typename CollectionType>
|
||||
void CollectionsBuiltinsAssembler::AddNewToOrderedHashTable(
|
||||
const TNode<CollectionType> table, const TNode<Object> normalised_key,
|
||||
const TNode<CollectionType> table, const TNode<JSAny> normalised_key,
|
||||
const TNode<IntPtrT> number_of_buckets, const TNode<IntPtrT> occupancy,
|
||||
const StoreAtEntry<CollectionType>& store_at_new_entry) {
|
||||
Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
|
||||
|
|
@ -1836,7 +1808,9 @@ void CollectionsBuiltinsAssembler::AddNewToOrderedHashTable(
|
|||
|
||||
BIND(&if_key_string);
|
||||
{
|
||||
hash = Signed(ChangeUint32ToWord(ComputeStringHash(CAST(normalised_key))));
|
||||
CSA_DCHECK(this, IsInternalizedStringInstanceType(key_instance_type));
|
||||
|
||||
hash = Signed(ChangeUint32ToWord(LoadNameHash(CAST(normalised_key))));
|
||||
Goto(&call_store);
|
||||
}
|
||||
|
||||
|
|
@ -1867,7 +1841,7 @@ void CollectionsBuiltinsAssembler::StoreValueInOrderedHashMapEntry(
|
|||
}
|
||||
|
||||
void CollectionsBuiltinsAssembler::StoreKeyValueInOrderedHashMapEntry(
|
||||
const TNode<OrderedHashMap> table, const TNode<Object> key,
|
||||
const TNode<OrderedHashMap> table, TNode<Object> key,
|
||||
const TNode<Object> value, const TNode<IntPtrT> entry_start,
|
||||
CheckBounds check_bounds) {
|
||||
StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER,
|
||||
|
|
@ -1878,9 +1852,11 @@ void CollectionsBuiltinsAssembler::StoreKeyValueInOrderedHashMapEntry(
|
|||
|
||||
TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
|
||||
const auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
const auto key = Parameter<Object>(Descriptor::kKey);
|
||||
const auto original_key = Parameter<JSAny>(Descriptor::kKey);
|
||||
const auto context = Parameter<Context>(Descriptor::kContext);
|
||||
|
||||
TVARIABLE(JSAny, key, original_key);
|
||||
|
||||
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
|
||||
"Map.prototype.delete");
|
||||
|
||||
|
|
@ -1891,7 +1867,7 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
|
|||
Label entry_found(this), not_found(this);
|
||||
|
||||
TryLookupOrderedHashTableIndex<OrderedHashMap>(
|
||||
table, key, &entry_start_position_or_hash, &entry_found, ¬_found);
|
||||
table, &key, &entry_start_position_or_hash, &entry_found, ¬_found);
|
||||
|
||||
BIND(¬_found);
|
||||
Return(FalseConstant());
|
||||
|
|
@ -1933,12 +1909,12 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
|
|||
|
||||
TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
|
||||
const auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto key = Parameter<JSAny>(Descriptor::kKey);
|
||||
auto original_key = Parameter<JSAny>(Descriptor::kKey);
|
||||
const auto context = Parameter<Context>(Descriptor::kContext);
|
||||
|
||||
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.add");
|
||||
TVARIABLE(JSAny, key, original_key);
|
||||
|
||||
key = NormalizeNumberKey(key);
|
||||
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.add");
|
||||
|
||||
GrowCollection<OrderedHashSet> grow = [this, context, receiver]() {
|
||||
CallRuntime(Runtime::kSetGrow, context, receiver);
|
||||
|
|
@ -1946,9 +1922,9 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
|
|||
};
|
||||
|
||||
StoreAtEntry<OrderedHashSet> store_at_new_entry =
|
||||
[this, key](const TNode<OrderedHashSet> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
UnsafeStoreKeyInOrderedHashSetEntry(table, key, entry_start);
|
||||
[this, &key](const TNode<OrderedHashSet> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
UnsafeStoreKeyInOrderedHashSetEntry(table, key.value(), entry_start);
|
||||
};
|
||||
|
||||
StoreAtEntry<OrderedHashSet> store_at_existing_entry =
|
||||
|
|
@ -1958,15 +1934,15 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
|
|||
|
||||
const TNode<OrderedHashSet> table =
|
||||
LoadObjectField<OrderedHashSet>(CAST(receiver), JSSet::kTableOffset);
|
||||
AddToOrderedHashTable(table, key, grow, store_at_new_entry,
|
||||
AddToOrderedHashTable(table, &key, grow, store_at_new_entry,
|
||||
store_at_existing_entry);
|
||||
Return(receiver);
|
||||
}
|
||||
|
||||
TNode<OrderedHashSet> CollectionsBuiltinsAssembler::AddToSetTable(
|
||||
const TNode<Object> context, TNode<OrderedHashSet> table, TNode<JSAny> key,
|
||||
TNode<String> method_name) {
|
||||
key = NormalizeNumberKey(key);
|
||||
const TNode<Object> context, TNode<OrderedHashSet> table,
|
||||
TNode<JSAny> original_key, TNode<String> method_name) {
|
||||
TVARIABLE(JSAny, key, original_key);
|
||||
|
||||
GrowCollection<OrderedHashSet> grow = [this, context, table, method_name]() {
|
||||
TNode<OrderedHashSet> new_table = Cast(
|
||||
|
|
@ -1977,9 +1953,9 @@ TNode<OrderedHashSet> CollectionsBuiltinsAssembler::AddToSetTable(
|
|||
};
|
||||
|
||||
StoreAtEntry<OrderedHashSet> store_at_new_entry =
|
||||
[this, key](const TNode<OrderedHashSet> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
UnsafeStoreKeyInOrderedHashSetEntry(table, key, entry_start);
|
||||
[this, &key](const TNode<OrderedHashSet> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
UnsafeStoreKeyInOrderedHashSetEntry(table, key.value(), entry_start);
|
||||
};
|
||||
|
||||
StoreAtEntry<OrderedHashSet> store_at_existing_entry =
|
||||
|
|
@ -1987,7 +1963,7 @@ TNode<OrderedHashSet> CollectionsBuiltinsAssembler::AddToSetTable(
|
|||
// If the entry was found, there is nothing to do.
|
||||
};
|
||||
|
||||
return AddToOrderedHashTable(table, key, grow, store_at_new_entry,
|
||||
return AddToOrderedHashTable(table, &key, grow, store_at_new_entry,
|
||||
store_at_existing_entry);
|
||||
}
|
||||
|
||||
|
|
@ -2021,7 +1997,7 @@ CollectionsBuiltinsAssembler::LoadValueFromOrderedHashMapEntry(
|
|||
|
||||
TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
|
||||
const auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
const auto key = Parameter<Object>(Descriptor::kKey);
|
||||
const auto key = Parameter<JSAny>(Descriptor::kKey);
|
||||
const auto context = Parameter<Context>(Descriptor::kContext);
|
||||
|
||||
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
|
||||
|
|
@ -2056,13 +2032,14 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
|
|||
}
|
||||
|
||||
TNode<Smi> CollectionsBuiltinsAssembler::DeleteFromSetTable(
|
||||
const TNode<Object> context, TNode<OrderedHashSet> table, TNode<Object> key,
|
||||
Label* not_found) {
|
||||
const TNode<Object> context, TNode<OrderedHashSet> table,
|
||||
TNode<JSAny> original_key, Label* not_found) {
|
||||
TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0));
|
||||
Label entry_found(this);
|
||||
|
||||
TVARIABLE(JSAny, key, original_key);
|
||||
TryLookupOrderedHashTableIndex<OrderedHashSet>(
|
||||
table, key, &entry_start_position_or_hash, &entry_found, not_found);
|
||||
table, &key, &entry_start_position_or_hash, &entry_found, not_found);
|
||||
|
||||
BIND(&entry_found);
|
||||
// If we found the entry, mark the entry as deleted.
|
||||
|
|
@ -2258,7 +2235,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
|
|||
|
||||
TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
|
||||
const auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
const auto key = Parameter<Object>(Descriptor::kKey);
|
||||
const auto key = Parameter<JSAny>(Descriptor::kKey);
|
||||
const auto context = Parameter<Context>(Descriptor::kContext);
|
||||
|
||||
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has");
|
||||
|
|
@ -2278,7 +2255,7 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
|
|||
|
||||
TNode<BoolT> CollectionsBuiltinsAssembler::TableHasKey(
|
||||
const TNode<Object> context, TNode<OrderedHashSet> table,
|
||||
TNode<Object> key) {
|
||||
TNode<JSAny> key) {
|
||||
TNode<Smi> index =
|
||||
CAST(CallBuiltin(Builtin::kFindOrderedHashSetEntry, context, table, key));
|
||||
|
||||
|
|
@ -2440,57 +2417,88 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
|
|||
|
||||
template <typename CollectionType>
|
||||
void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex(
|
||||
const TNode<CollectionType> table, const TNode<Object> key,
|
||||
const TNode<CollectionType> table, TVariable<JSAny>* key,
|
||||
TVariable<IntPtrT>* result, Label* if_entry_found, Label* if_not_found) {
|
||||
Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
|
||||
if_key_bigint(this);
|
||||
if_key_bigint(this), if_key_minus_0(this);
|
||||
|
||||
GotoIf(TaggedIsSmi(key), &if_key_smi);
|
||||
GotoIf(TaggedIsSmi(key->value()), &if_key_smi);
|
||||
|
||||
TNode<Map> key_map = LoadMap(CAST(key));
|
||||
TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
|
||||
TNode<Map> key_map = LoadMap(CAST(key->value()));
|
||||
|
||||
GotoIf(IsStringInstanceType(key_instance_type), &if_key_string);
|
||||
GotoIf(IsHeapNumberMap(key_map), &if_key_heap_number);
|
||||
|
||||
TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
|
||||
GotoIf(IsStringInstanceType(key_instance_type), &if_key_string);
|
||||
GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint);
|
||||
|
||||
FindOrderedHashTableEntryForOtherKey<CollectionType>(
|
||||
table, CAST(key), result, if_entry_found, if_not_found);
|
||||
|
||||
BIND(&if_key_smi);
|
||||
{
|
||||
FindOrderedHashTableEntryForSmiKey<CollectionType>(
|
||||
table, CAST(key), result, if_entry_found, if_not_found);
|
||||
}
|
||||
table, CAST(key->value()), result, if_entry_found, if_not_found);
|
||||
|
||||
BIND(&if_key_string);
|
||||
{
|
||||
FindOrderedHashTableEntryForStringKey<CollectionType>(
|
||||
table, CAST(key), result, if_entry_found, if_not_found);
|
||||
Label if_unique(this), if_not_thin(this);
|
||||
GotoIf(IsInternalizedStringInstanceType(key_instance_type), &if_unique);
|
||||
GotoIfNot(IsSetWord32(key_instance_type, kThinStringTagBit), &if_not_thin);
|
||||
{
|
||||
*key = LoadObjectField<String>(CAST(key->value()),
|
||||
offsetof(ThinString, actual_));
|
||||
Goto(&if_unique);
|
||||
}
|
||||
|
||||
BIND(&if_not_thin);
|
||||
{
|
||||
*key = CAST(CallRuntime(Runtime::kInternalizeString, NoContextConstant(),
|
||||
key->value()));
|
||||
Goto(&if_unique);
|
||||
}
|
||||
|
||||
BIND(&if_unique);
|
||||
{
|
||||
FindOrderedHashTableEntryForStringKey<CollectionType>(
|
||||
table, CAST(key->value()), result, if_entry_found, if_not_found);
|
||||
}
|
||||
}
|
||||
|
||||
BIND(&if_key_heap_number);
|
||||
{
|
||||
const TNode<Float64T> number = LoadHeapNumberValue(CAST(key->value()));
|
||||
GotoIf(Float64Equal(number, Float64Constant(0.0)), &if_key_minus_0);
|
||||
// We know the value is zero, so we take the key to be Smi 0.
|
||||
// Another option would be to normalize to Smi here.
|
||||
FindOrderedHashTableEntryForHeapNumberKey<CollectionType>(
|
||||
table, CAST(key), result, if_entry_found, if_not_found);
|
||||
table, CAST(key->value()), result, if_entry_found, if_not_found);
|
||||
}
|
||||
|
||||
BIND(&if_key_minus_0);
|
||||
{
|
||||
*key = SmiConstant(0);
|
||||
Goto(&if_key_smi);
|
||||
}
|
||||
|
||||
BIND(&if_key_smi);
|
||||
{
|
||||
FindOrderedHashTableEntryForSmiKey<CollectionType>(
|
||||
table, CAST(key->value()), result, if_entry_found, if_not_found);
|
||||
}
|
||||
|
||||
BIND(&if_key_bigint);
|
||||
{
|
||||
FindOrderedHashTableEntryForBigIntKey<CollectionType>(
|
||||
table, CAST(key), result, if_entry_found, if_not_found);
|
||||
table, CAST(key->value()), result, if_entry_found, if_not_found);
|
||||
}
|
||||
}
|
||||
|
||||
TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) {
|
||||
const auto table = Parameter<OrderedHashMap>(Descriptor::kTable);
|
||||
const auto key = Parameter<Object>(Descriptor::kKey);
|
||||
const auto original_key = Parameter<JSAny>(Descriptor::kKey);
|
||||
|
||||
TVARIABLE(JSAny, key, original_key);
|
||||
TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0));
|
||||
Label entry_found(this), not_found(this);
|
||||
|
||||
TryLookupOrderedHashTableIndex<OrderedHashMap>(
|
||||
table, key, &entry_start_position, &entry_found, ¬_found);
|
||||
table, &key, &entry_start_position, &entry_found, ¬_found);
|
||||
|
||||
BIND(&entry_found);
|
||||
Return(SmiTag(entry_start_position.value()));
|
||||
|
|
@ -2501,13 +2509,15 @@ TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) {
|
|||
|
||||
TF_BUILTIN(FindOrderedHashSetEntry, CollectionsBuiltinsAssembler) {
|
||||
const auto table = Parameter<OrderedHashSet>(Descriptor::kTable);
|
||||
const auto key = Parameter<Object>(Descriptor::kKey);
|
||||
const auto original_key = Parameter<JSAny>(Descriptor::kKey);
|
||||
|
||||
TVARIABLE(JSAny, key, original_key);
|
||||
|
||||
TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0));
|
||||
Label entry_found(this), not_found(this);
|
||||
|
||||
TryLookupOrderedHashTableIndex<OrderedHashSet>(
|
||||
table, key, &entry_start_position, &entry_found, ¬_found);
|
||||
table, &key, &entry_start_position, &entry_found, ¬_found);
|
||||
|
||||
BIND(&entry_found);
|
||||
Return(SmiTag(entry_start_position.value()));
|
||||
|
|
@ -2518,8 +2528,9 @@ TF_BUILTIN(FindOrderedHashSetEntry, CollectionsBuiltinsAssembler) {
|
|||
|
||||
const TNode<OrderedHashMap> CollectionsBuiltinsAssembler::AddValueToKeyedGroup(
|
||||
const TNode<Context> context, const TNode<OrderedHashMap> groups,
|
||||
const TNode<Object> key, const TNode<Object> value,
|
||||
const TNode<JSAny> original_key, const TNode<Object> value,
|
||||
const TNode<String> methodName) {
|
||||
TVARIABLE(JSAny, key, original_key);
|
||||
GrowCollection<OrderedHashMap> grow = [&]() {
|
||||
TNode<OrderedHashMap> new_groups = CAST(
|
||||
CallRuntime(Runtime::kOrderedHashMapGrow, context, groups, methodName));
|
||||
|
|
@ -2539,24 +2550,26 @@ const TNode<OrderedHashMap> CollectionsBuiltinsAssembler::AddValueToKeyedGroup(
|
|||
};
|
||||
|
||||
StoreAtEntry<OrderedHashMap> store_at_new_entry =
|
||||
[this, key, value](const TNode<OrderedHashMap> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
[this, &key, value](const TNode<OrderedHashMap> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
TNode<ArrayList> array = AllocateArrayList(SmiConstant(1));
|
||||
ArrayListSet(array, SmiConstant(0), value);
|
||||
ArrayListSetLength(array, SmiConstant(1));
|
||||
StoreKeyValueInOrderedHashMapEntry(table, key, array, entry_start);
|
||||
StoreKeyValueInOrderedHashMapEntry(table, key.value(), array,
|
||||
entry_start);
|
||||
};
|
||||
|
||||
StoreAtEntry<OrderedHashMap> store_at_existing_entry =
|
||||
[this, key, value](const TNode<OrderedHashMap> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
[this, &key, value](const TNode<OrderedHashMap> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
TNode<ArrayList> array =
|
||||
CAST(LoadValueFromOrderedHashMapEntry(table, entry_start));
|
||||
TNode<ArrayList> new_array = ArrayListAdd(array, value);
|
||||
StoreKeyValueInOrderedHashMapEntry(table, key, new_array, entry_start);
|
||||
StoreKeyValueInOrderedHashMapEntry(table, key.value(), new_array,
|
||||
entry_start);
|
||||
};
|
||||
|
||||
return AddToOrderedHashTable(groups, key, grow, store_at_new_entry,
|
||||
return AddToOrderedHashTable(groups, &key, grow, store_at_new_entry,
|
||||
store_at_existing_entry);
|
||||
}
|
||||
|
||||
|
|
|
|||
50
deps/v8/src/builtins/builtins-collections-gen.h
vendored
50
deps/v8/src/builtins/builtins-collections-gen.h
vendored
|
|
@ -194,16 +194,16 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
|
|||
|
||||
// Checks if the set/map contains a key.
|
||||
TNode<BoolT> TableHasKey(const TNode<Object> context,
|
||||
TNode<OrderedHashSet> table, TNode<Object> key);
|
||||
TNode<OrderedHashSet> table, TNode<JSAny> key);
|
||||
TNode<BoolT> TableHasKey(const TNode<Object> context,
|
||||
TNode<OrderedHashMap> table, TNode<Object> key);
|
||||
TNode<OrderedHashMap> table, TNode<JSAny> key);
|
||||
|
||||
// Adds {value} to a FixedArray keyed by {key} in {groups}.
|
||||
//
|
||||
// Utility used by Object.groupBy and Map.groupBy.
|
||||
const TNode<OrderedHashMap> AddValueToKeyedGroup(
|
||||
const TNode<Context> context, const TNode<OrderedHashMap> groups,
|
||||
const TNode<Object> key, const TNode<Object> value,
|
||||
const TNode<JSAny> original_key, const TNode<Object> value,
|
||||
const TNode<String> methodName);
|
||||
|
||||
// Normalizes -0 to +0.
|
||||
|
|
@ -219,7 +219,7 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
|
|||
}
|
||||
|
||||
TNode<Smi> DeleteFromSetTable(const TNode<Object> context,
|
||||
TNode<OrderedHashSet> table, TNode<Object> key,
|
||||
TNode<OrderedHashSet> table, TNode<JSAny> key,
|
||||
Label* not_found);
|
||||
|
||||
TorqueStructOrderedHashSetIndexPair TransitionOrderedHashSetNoUpdate(
|
||||
|
|
@ -307,6 +307,16 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
|
|||
TNode<CollectionType> table, TNode<HeapNumber> key_heap_number,
|
||||
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found);
|
||||
|
||||
// Specialization for string.
|
||||
// The {result} variable will contain the entry index if the key was found,
|
||||
// or the hash code otherwise.
|
||||
template <typename CollectionType>
|
||||
void FindOrderedHashTableEntryForStringKey(TNode<CollectionType> table,
|
||||
TNode<String> key_tagged,
|
||||
TVariable<IntPtrT>* result,
|
||||
Label* entry_found,
|
||||
Label* not_found);
|
||||
|
||||
// Specialization for bigints.
|
||||
// The {result} variable will contain the entry index if the key was found,
|
||||
// or the hash code otherwise.
|
||||
|
|
@ -319,19 +329,6 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
|
|||
Label* entry_found,
|
||||
Label* not_found);
|
||||
|
||||
// Specialization for string.
|
||||
// The {result} variable will contain the entry index if the key was found,
|
||||
// or the hash code otherwise.
|
||||
template <typename CollectionType>
|
||||
void FindOrderedHashTableEntryForStringKey(TNode<CollectionType> table,
|
||||
TNode<String> key_tagged,
|
||||
TVariable<IntPtrT>* result,
|
||||
Label* entry_found,
|
||||
Label* not_found);
|
||||
TNode<Uint32T> ComputeStringHash(TNode<String> string_key);
|
||||
void SameValueZeroString(TNode<String> key_string,
|
||||
TNode<Object> candidate_key, Label* if_same,
|
||||
Label* if_not_same);
|
||||
|
||||
// Specialization for non-strings, non-numbers. For those we only need
|
||||
// reference equality to compare the keys.
|
||||
|
|
@ -365,14 +362,14 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
|
|||
const TNode<IntPtrT> entry_start)>;
|
||||
template <typename CollectionType>
|
||||
TNode<CollectionType> AddToOrderedHashTable(
|
||||
const TNode<CollectionType> table, const TNode<Object> key,
|
||||
const TNode<CollectionType> table, TVariable<JSAny>* key,
|
||||
const GrowCollection<CollectionType>& grow,
|
||||
const StoreAtEntry<CollectionType>& store_at_new_entry,
|
||||
const StoreAtEntry<CollectionType>& store_at_existing_entry);
|
||||
|
||||
template <typename CollectionType>
|
||||
void TryLookupOrderedHashTableIndex(const TNode<CollectionType> table,
|
||||
const TNode<Object> key,
|
||||
TVariable<JSAny>* key,
|
||||
TVariable<IntPtrT>* result,
|
||||
Label* if_entry_found,
|
||||
Label* if_not_found);
|
||||
|
|
@ -380,7 +377,7 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
|
|||
// Helper function to store a new entry when constructing sets from sets.
|
||||
template <typename CollectionType>
|
||||
void AddNewToOrderedHashTable(
|
||||
const TNode<CollectionType> table, const TNode<Object> normalised_key,
|
||||
const TNode<CollectionType> table, const TNode<JSAny> normalised_key,
|
||||
const TNode<IntPtrT> number_of_buckets, const TNode<IntPtrT> occupancy,
|
||||
const StoreAtEntry<CollectionType>& store_at_new_entry);
|
||||
|
||||
|
|
@ -388,16 +385,13 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
|
|||
const TNode<JSAny> key,
|
||||
const TNode<IntPtrT> number_of_buckets,
|
||||
const TNode<IntPtrT> occupancy) {
|
||||
TNode<JSAny> normalised_key = NormalizeNumberKey(key);
|
||||
StoreAtEntry<OrderedHashSet> store_at_new_entry =
|
||||
[this, normalised_key](const TNode<OrderedHashSet> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
UnsafeStoreKeyInOrderedHashSetEntry(table, normalised_key,
|
||||
entry_start);
|
||||
[this, key](const TNode<OrderedHashSet> table,
|
||||
const TNode<IntPtrT> entry_start) {
|
||||
UnsafeStoreKeyInOrderedHashSetEntry(table, key, entry_start);
|
||||
};
|
||||
AddNewToOrderedHashTable<OrderedHashSet>(table, normalised_key,
|
||||
number_of_buckets, occupancy,
|
||||
store_at_new_entry);
|
||||
AddNewToOrderedHashTable<OrderedHashSet>(table, key, number_of_buckets,
|
||||
occupancy, store_at_new_entry);
|
||||
}
|
||||
|
||||
// Generates code to store a new entry into {table}, connecting to the bucket
|
||||
|
|
|
|||
8
deps/v8/src/builtins/builtins-dataview.cc
vendored
8
deps/v8/src/builtins/builtins-dataview.cc
vendored
|
|
@ -111,13 +111,15 @@ BUILTIN(DataViewConstructor) {
|
|||
JSFunction::GetDerivedRabGsabDataViewMap(isolate, new_target));
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, result,
|
||||
JSObject::NewWithMap(isolate, initial_map, {},
|
||||
NewJSObjectType::kAPIWrapper));
|
||||
JSObject::NewWithMap(
|
||||
isolate, initial_map, {},
|
||||
NewJSObjectType::kMaybeEmbedderFieldsAndApiWrapper));
|
||||
} else {
|
||||
// Create a JSDataView.
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, result,
|
||||
JSObject::New(target, new_target, {}, NewJSObjectType::kAPIWrapper));
|
||||
JSObject::New(target, new_target, {},
|
||||
NewJSObjectType::kMaybeEmbedderFieldsAndApiWrapper));
|
||||
}
|
||||
auto data_view = Cast<JSDataViewOrRabGsabDataView>(result);
|
||||
{
|
||||
|
|
|
|||
6
deps/v8/src/builtins/builtins-definitions.h
vendored
6
deps/v8/src/builtins/builtins-definitions.h
vendored
|
|
@ -720,6 +720,7 @@ constexpr int kGearboxGenericBuiltinIdOffset = -2;
|
|||
CPP(FunctionPrototypeBind, kDontAdaptArgumentsSentinel) \
|
||||
IF_WASM(CPP, WebAssemblyFunctionPrototypeBind, kDontAdaptArgumentsSentinel) \
|
||||
IF_WASM(TFJ, WasmConstructorWrapper, kDontAdaptArgumentsSentinel) \
|
||||
IF_WASM(TFJ, WasmMethodWrapper, kDontAdaptArgumentsSentinel) \
|
||||
ASM(FunctionPrototypeCall, JSTrampoline) \
|
||||
/* ES6 #sec-function.prototype.tostring */ \
|
||||
CPP(FunctionPrototypeToString, kDontAdaptArgumentsSentinel) \
|
||||
|
|
@ -1165,7 +1166,7 @@ constexpr int kGearboxGenericBuiltinIdOffset = -2;
|
|||
\
|
||||
/* Wasm */ \
|
||||
IF_WASM_DRUMBRAKE(ASM, WasmInterpreterEntry, WasmDummy) \
|
||||
IF_WASM_DRUMBRAKE(ASM, GenericJSToWasmInterpreterWrapper, WasmDummy) \
|
||||
IF_WASM_DRUMBRAKE(ASM, GenericJSToWasmInterpreterWrapper, JSTrampoline) \
|
||||
IF_WASM_DRUMBRAKE(ASM, WasmInterpreterCWasmEntry, WasmDummy) \
|
||||
IF_WASM_DRUMBRAKE(ASM, GenericWasmToJSInterpreterWrapper, WasmDummy) \
|
||||
\
|
||||
|
|
@ -1383,6 +1384,8 @@ constexpr int kGearboxGenericBuiltinIdOffset = -2;
|
|||
IF_WASM(TFC, WasmToJsWrapperInvalidSig, WasmToJSWrapper) \
|
||||
IF_WASM(ASM, WasmSuspend, WasmSuspend) \
|
||||
IF_WASM(ASM, WasmResume, JSTrampoline) \
|
||||
IF_WASM(ASM, WasmFXResume, WasmFXResume) \
|
||||
IF_WASM(ASM, WasmFXReturn, WasmFXReturn) \
|
||||
IF_WASM(ASM, WasmReject, JSTrampoline) \
|
||||
IF_WASM(ASM, WasmTrapHandlerLandingPad, WasmDummy) \
|
||||
IF_WASM(ASM, WasmCompileLazy, WasmDummy) \
|
||||
|
|
@ -1493,7 +1496,6 @@ constexpr int kGearboxGenericBuiltinIdOffset = -2;
|
|||
ASM(CEntry_Return1_ArgvOnStack_BuiltinExit, CEntry1ArgvOnStack) \
|
||||
ASM(CEntry_Return1_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
|
||||
ASM(CEntry_Return2_ArgvInRegister_NoBuiltinExit, InterpreterCEntry2) \
|
||||
ASM(CEntry_Return2_ArgvOnStack_BuiltinExit, CEntryDummy) \
|
||||
ASM(CEntry_Return2_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
|
||||
ASM(WasmCEntry, CEntryDummy) \
|
||||
ASM(DirectCEntry, CEntryDummy) \
|
||||
|
|
|
|||
2
deps/v8/src/builtins/builtins-inl.h
vendored
2
deps/v8/src/builtins/builtins-inl.h
vendored
|
|
@ -181,8 +181,6 @@ constexpr Builtin Builtins::CEntry(int result_size, ArgvMode argv_mode,
|
|||
return Builtin::kCEntry_Return1_ArgvInRegister_NoBuiltinExit;
|
||||
} else if (rs == 2 && am == ArgvMode::kStack && !be) {
|
||||
return Builtin::kCEntry_Return2_ArgvOnStack_NoBuiltinExit;
|
||||
} else if (rs == 2 && am == ArgvMode::kStack && be) {
|
||||
return Builtin::kCEntry_Return2_ArgvOnStack_BuiltinExit;
|
||||
} else if (rs == 2 && am == ArgvMode::kRegister && !be) {
|
||||
return Builtin::kCEntry_Return2_ArgvInRegister_NoBuiltinExit;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1423,11 +1423,6 @@ void Builtins::Generate_CEntry_Return2_ArgvOnStack_NoBuiltinExit(
|
|||
Generate_CEntry(masm, 2, ArgvMode::kStack, false, false);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CEntry_Return2_ArgvOnStack_BuiltinExit(
|
||||
MacroAssembler* masm) {
|
||||
Generate_CEntry(masm, 2, ArgvMode::kStack, true, false);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CEntry_Return2_ArgvInRegister_NoBuiltinExit(
|
||||
MacroAssembler* masm) {
|
||||
Generate_CEntry(masm, 2, ArgvMode::kRegister, false, false);
|
||||
|
|
|
|||
18
deps/v8/src/builtins/builtins-number-tsa.cc
vendored
18
deps/v8/src/builtins/builtins-number-tsa.cc
vendored
|
|
@ -282,8 +282,9 @@ class NumberBuiltinsAssemblerTS
|
|||
// Fast path where both {lhs} and {rhs} are strings. Since {lhs} is a
|
||||
// string we no longer need an Oddball check.
|
||||
CombineFeedback(BinaryOperationFeedback::kString);
|
||||
V<Object> result = CallBuiltin_StringAdd_CheckNone(
|
||||
isolate(), context, V<String>::Cast(lhs), V<String>::Cast(rhs));
|
||||
V<Object> result = CallBuiltin<builtin::StringAdd_CheckNone>(
|
||||
context,
|
||||
{.left = V<String>::Cast(lhs), .right = V<String>::Cast(rhs)});
|
||||
GOTO(done, result);
|
||||
} ELSE IF (IsStringWrapper(rhs_heap_object)) {
|
||||
// lhs is a string and rhs is a string wrapper.
|
||||
|
|
@ -298,8 +299,8 @@ class NumberBuiltinsAssemblerTS
|
|||
CombineFeedback(BinaryOperationFeedback::kStringOrStringWrapper);
|
||||
V<String> rhs_string = V<String>::Cast(LoadField(
|
||||
rhs_heap_object, AccessBuilderTS::ForJSPrimitiveWrapperValue()));
|
||||
V<Object> result = CallBuiltin_StringAdd_CheckNone(
|
||||
isolate(), context, V<String>::Cast(lhs), rhs_string);
|
||||
V<Object> result = CallBuiltin<builtin::StringAdd_CheckNone>(
|
||||
context, {.left = V<String>::Cast(lhs), .right = rhs_string});
|
||||
GOTO(done, result);
|
||||
} ELSE {
|
||||
GOTO(call_with_any_feedback);
|
||||
|
|
@ -367,8 +368,10 @@ class NumberBuiltinsAssemblerTS
|
|||
{
|
||||
// Both {lhs} and {rhs} are of BigInt type.
|
||||
CombineFeedbackOnException(BinaryOperationFeedback::kAny);
|
||||
V<BigInt> result = CallBuiltin_BigIntAdd(
|
||||
isolate(), context, V<BigInt>::Cast(lhs), V<BigInt>::Cast(rhs));
|
||||
V<BigInt> result = CallBuiltin<builtin::BigIntAdd>(
|
||||
context,
|
||||
{.left = V<BigInt>::Cast(lhs), .right = V<BigInt>::Cast(rhs)});
|
||||
CombineFeedback(BinaryOperationFeedback::kBigInt);
|
||||
GOTO(done, result);
|
||||
}
|
||||
|
||||
|
|
@ -381,8 +384,7 @@ class NumberBuiltinsAssemblerTS
|
|||
BIND(call_add_stub);
|
||||
{
|
||||
V<Object> result =
|
||||
CallBuiltin_Add(isolate(), FrameStateForCall::NoFrameState(this),
|
||||
context, lhs, rhs, compiler::LazyDeoptOnThrow::kNo);
|
||||
CallBuiltin<builtin::Add>(context, {.left = lhs, .right = rhs});
|
||||
GOTO(done, result);
|
||||
}
|
||||
|
||||
|
|
|
|||
4
deps/v8/src/builtins/builtins-temporal.cc
vendored
4
deps/v8/src/builtins/builtins-temporal.cc
vendored
|
|
@ -484,7 +484,7 @@ TEMPORAL_GET_RUST(ZonedDateTime, zoned_date_time, EpochMilliseconds,
|
|||
epochMilliseconds, epoch_milliseconds, CONVERT_DOUBLE)
|
||||
TEMPORAL_PROTOTYPE_METHOD0(ZonedDateTime, EpochNanoseconds, nanoseconds)
|
||||
TEMPORAL_GET_RUST(ZonedDateTime, zoned_date_time, DayOfWeek, dayOfWeek,
|
||||
day_of_week, CONVERT_FALLIBLE_INTEGER_AS_NULLABLE)
|
||||
day_of_week, CONVERT_SMI)
|
||||
TEMPORAL_GET_RUST(ZonedDateTime, zoned_date_time, DayOfYear, dayOfYear,
|
||||
day_of_year, CONVERT_SMI)
|
||||
TEMPORAL_GET_RUST(ZonedDateTime, zoned_date_time, WeekOfYear, weekOfYear,
|
||||
|
|
@ -493,7 +493,7 @@ TEMPORAL_GET_RUST(ZonedDateTime, zoned_date_time, YearOfWeek, YearOfWeek,
|
|||
year_of_week, CONVERT_NULLABLE_INTEGER)
|
||||
TEMPORAL_PROTOTYPE_METHOD0(ZonedDateTime, HoursInDay, hoursInDay)
|
||||
TEMPORAL_GET_RUST(ZonedDateTime, zoned_date_time, DaysInWeek, daysInWeek,
|
||||
days_in_week, CONVERT_FALLIBLE_INTEGER_AS_NULLABLE)
|
||||
days_in_week, CONVERT_SMI)
|
||||
TEMPORAL_GET_RUST(ZonedDateTime, zoned_date_time, DaysInMonth, daysInMonth,
|
||||
days_in_month, CONVERT_SMI)
|
||||
TEMPORAL_GET_RUST(ZonedDateTime, zoned_date_time, DaysInYear, daysInYear,
|
||||
|
|
|
|||
13
deps/v8/src/builtins/builtins-typed-array.cc
vendored
13
deps/v8/src/builtins/builtins-typed-array.cc
vendored
|
|
@ -3,6 +3,7 @@
|
|||
// found in the LICENSE file.
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/builtins/builtins-utils-inl.h"
|
||||
#include "src/builtins/builtins.h"
|
||||
#include "src/common/message-template.h"
|
||||
|
|
@ -15,8 +16,7 @@
|
|||
#include "src/objects/simd.h"
|
||||
#include "third_party/simdutf/simdutf.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace v8::internal {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// ES6 section 22.2 TypedArray Objects
|
||||
|
|
@ -837,11 +837,11 @@ BUILTIN(Uint8ArrayPrototypeToBase64) {
|
|||
size_t simd_result_size;
|
||||
if (uint8array->buffer()->is_shared()) {
|
||||
simd_result_size = simdutf::atomic_binary_to_base64(
|
||||
std::bit_cast<const char*>(uint8array->DataPtr()), length,
|
||||
reinterpret_cast<const char*>(uint8array->DataPtr()), length,
|
||||
reinterpret_cast<char*>(output->GetChars(no_gc)), alphabet);
|
||||
} else {
|
||||
simd_result_size = simdutf::binary_to_base64(
|
||||
std::bit_cast<const char*>(uint8array->DataPtr()), length,
|
||||
reinterpret_cast<const char*>(uint8array->DataPtr()), length,
|
||||
reinterpret_cast<char*>(output->GetChars(no_gc)), alphabet);
|
||||
}
|
||||
DCHECK_EQ(simd_result_size, output_length);
|
||||
|
|
@ -1080,9 +1080,8 @@ BUILTIN(Uint8ArrayPrototypeToHex) {
|
|||
// b. Set hex to StringPad(hex, 2, "0", start).
|
||||
// c. Set out to the string-concatenation of out and hex.
|
||||
// 6. Return out.
|
||||
return Uint8ArrayToHex(std::bit_cast<const char*>(uint8array->DataPtr()),
|
||||
return Uint8ArrayToHex(reinterpret_cast<const char*>(uint8array->DataPtr()),
|
||||
length, uint8array->buffer()->is_shared(), output);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
} // namespace v8::internal
|
||||
|
|
|
|||
42
deps/v8/src/builtins/builtins-wasm-gen.cc
vendored
42
deps/v8/src/builtins/builtins-wasm-gen.cc
vendored
|
|
@ -177,6 +177,48 @@ TF_BUILTIN(WasmConstructorWrapper, WasmBuiltinsAssembler) {
|
|||
target, argc);
|
||||
}
|
||||
|
||||
// Similar, but for exported Wasm functions that can be called as methods,
|
||||
// i.e. that pass their JS-side receiver as their Wasm-side first parameter.
|
||||
// To wrap a Wasm function like the following:
|
||||
//
|
||||
// (func (export "bar") (param $recv externref) (param $other ...)
|
||||
// (do-something-with $recv)
|
||||
// )
|
||||
//
|
||||
// we create wrappers here that behave as if they were created by this
|
||||
// JS snippet:
|
||||
//
|
||||
// function MakeMethod(wasm_instance, name) {
|
||||
// let wasm_func = wasm_instance.exports[name];
|
||||
// return function(...args) {
|
||||
// return wasm_func(this, ...args);
|
||||
// }
|
||||
// }
|
||||
// Foo.prototype.bar = MakeMethod(..., "bar");
|
||||
//
|
||||
// So that when called like this:
|
||||
//
|
||||
// let foo = new Foo();
|
||||
// foo.bar("other");
|
||||
//
|
||||
// the Wasm function receives {foo} as $recv and "other" as $other.
|
||||
TF_BUILTIN(WasmMethodWrapper, WasmBuiltinsAssembler) {
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
|
||||
CodeStubArguments args(this, argc);
|
||||
TNode<Context> context = Parameter<Context>(Descriptor::kContext);
|
||||
static constexpr int kSlot = wasm::kMethodWrapperContextSlot;
|
||||
TNode<JSFunction> target = CAST(LoadContextElementNoCell(context, kSlot));
|
||||
TNode<Int32T> start_index = Int32Constant(0);
|
||||
TNode<Object> receiver = args.GetReceiver();
|
||||
// We push the receiver twice: once into the usual receiver slot, where
|
||||
// the Wasm function callee ignores it; once more as the first parameter.
|
||||
TNode<Int32T> already_on_stack = Int32Constant(2);
|
||||
TNode<Object> result =
|
||||
CallBuiltin(Builtin::kCallFunctionForwardVarargs, context, target,
|
||||
already_on_stack, start_index, receiver, receiver);
|
||||
args.PopAndReturn(CAST(result));
|
||||
}
|
||||
|
||||
TNode<BoolT> WasmBuiltinsAssembler::InSharedSpace(TNode<HeapObject> object) {
|
||||
TNode<IntPtrT> address = BitcastTaggedToWord(object);
|
||||
return IsPageFlagSet(address, MemoryChunk::kInSharedHeap);
|
||||
|
|
|
|||
19
deps/v8/src/builtins/builtins.cc
vendored
19
deps/v8/src/builtins/builtins.cc
vendored
|
|
@ -207,11 +207,12 @@ CallInterfaceDescriptor Builtins::CallInterfaceDescriptorFor(Builtin builtin) {
|
|||
#undef CASE_OTHER
|
||||
default:
|
||||
Builtins::Kind kind = Builtins::KindOf(builtin);
|
||||
DCHECK_NE(BCH, kind);
|
||||
DCHECK_NE(BCH_TSA, kind);
|
||||
if (kind == TFJ_TSA || kind == TFJ || kind == CPP) {
|
||||
return JSTrampolineDescriptor{};
|
||||
}
|
||||
if (kind == BCH || kind == BCH_TSA) {
|
||||
return InterpreterDispatchDescriptor{};
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
return CallInterfaceDescriptor{key};
|
||||
|
|
@ -225,7 +226,6 @@ Callable Builtins::CallableFor(Isolate* isolate, Builtin builtin) {
|
|||
|
||||
// static
|
||||
bool Builtins::HasJSLinkage(Builtin builtin) {
|
||||
DCHECK_NE(BCH, Builtins::KindOf(builtin));
|
||||
return CallInterfaceDescriptorFor(builtin) == JSTrampolineDescriptor{};
|
||||
}
|
||||
|
||||
|
|
@ -516,19 +516,6 @@ bool Builtins::IsCpp(Builtin builtin) {
|
|||
|
||||
// static
|
||||
CodeEntrypointTag Builtins::EntrypointTagFor(Builtin builtin) {
|
||||
if (builtin == Builtin::kNoBuiltinId) {
|
||||
// Special case needed for example for tests.
|
||||
return kDefaultCodeEntrypointTag;
|
||||
}
|
||||
|
||||
#if V8_ENABLE_DRUMBRAKE
|
||||
if (builtin == Builtin::kGenericJSToWasmInterpreterWrapper) {
|
||||
return kJSEntrypointTag;
|
||||
} else if (builtin == Builtin::kGenericWasmToJSInterpreterWrapper) {
|
||||
return kWasmEntrypointTag;
|
||||
}
|
||||
#endif // V8_ENABLE_DRUMBRAKE
|
||||
|
||||
Kind kind = Builtins::KindOf(builtin);
|
||||
switch (kind) {
|
||||
case CPP:
|
||||
|
|
|
|||
2
deps/v8/src/builtins/cast.tq
vendored
2
deps/v8/src/builtins/cast.tq
vendored
|
|
@ -335,7 +335,7 @@ Cast<Number|TheHole>(o: Object): Number|TheHole labels CastError {
|
|||
}
|
||||
}
|
||||
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
Cast<Number|Undefined|TheHole>(o: Object): Number|Undefined|
|
||||
TheHole labels CastError {
|
||||
typeswitch (o) {
|
||||
|
|
|
|||
10
deps/v8/src/builtins/collections.tq
vendored
10
deps/v8/src/builtins/collections.tq
vendored
|
|
@ -38,12 +38,12 @@ extern macro CollectionsBuiltinsAssembler::AddToSetTable(
|
|||
implicit context: Context)(OrderedHashSet, JSAny, String): OrderedHashSet;
|
||||
|
||||
extern macro CollectionsBuiltinsAssembler::TableHasKey(
|
||||
implicit context: Context)(OrderedHashSet, Object): bool;
|
||||
implicit context: Context)(OrderedHashSet, JSAny): bool;
|
||||
extern macro CollectionsBuiltinsAssembler::TableHasKey(
|
||||
implicit context: Context)(OrderedHashMap, Object): bool;
|
||||
implicit context: Context)(OrderedHashMap, JSAny): bool;
|
||||
|
||||
extern macro CollectionsBuiltinsAssembler::DeleteFromSetTable(
|
||||
implicit context: Context)(OrderedHashSet, Object): Smi labels NotFound;
|
||||
implicit context: Context)(OrderedHashSet, JSAny): Smi labels NotFound;
|
||||
|
||||
extern runtime OrderedHashSetShrink(implicit context: Context)(
|
||||
OrderedHashSet): OrderedHashSet;
|
||||
|
|
@ -393,7 +393,7 @@ struct StableJSSetBackingTableWitness {
|
|||
this.unstable, kOrderedHashSetNumberOfElementsIndex);
|
||||
}
|
||||
|
||||
macro HasKey(implicit context: Context)(key: Object): bool {
|
||||
macro HasKey(implicit context: Context)(key: JSAny): bool {
|
||||
return TableHasKey(this.unstable, key);
|
||||
}
|
||||
|
||||
|
|
@ -411,7 +411,7 @@ struct StableJSMapBackingTableWitness {
|
|||
this.unstable, kOrderedHashMapNumberOfElementsIndex);
|
||||
}
|
||||
|
||||
macro HasKey(implicit context: Context)(key: Object): bool {
|
||||
macro HasKey(implicit context: Context)(key: JSAny): bool {
|
||||
return TableHasKey(this.unstable, key);
|
||||
}
|
||||
|
||||
|
|
|
|||
4
deps/v8/src/builtins/convert.tq
vendored
4
deps/v8/src/builtins/convert.tq
vendored
|
|
@ -345,14 +345,14 @@ Convert<float64, float32>(f: float32): float64 {
|
|||
}
|
||||
Convert<float64_or_undefined_or_hole, float64>(f: float64):
|
||||
float64_or_undefined_or_hole {
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) {
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE) {
|
||||
return float64_or_undefined_or_hole{
|
||||
is_undefined: false,
|
||||
is_hole: false,
|
||||
value: f
|
||||
};
|
||||
}
|
||||
@ifnot(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) {
|
||||
@ifnot(V8_ENABLE_UNDEFINED_DOUBLE) {
|
||||
return float64_or_undefined_or_hole{is_hole: false, value: f};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ extern macro WeakCollectionsBuiltinsAssembler::GotoIfCannotBeHeldWeakly(JSAny):
|
|||
|
||||
transitioning macro PushCell(
|
||||
finalizationRegistry: JSFinalizationRegistry, cell: WeakCell): void {
|
||||
dcheck(finalizationRegistry == cell.finalization_registry);
|
||||
cell.next = finalizationRegistry.active_cells;
|
||||
typeswitch (finalizationRegistry.active_cells) {
|
||||
case (Undefined): {
|
||||
|
|
@ -101,15 +102,12 @@ transitioning javascript builtin FinalizationRegistryRegister(
|
|||
|
||||
// 6. Let cell be the Record { [[WeakRefTarget]] : target, [[HeldValue]]:
|
||||
// heldValue, [[UnregisterToken]]: unregisterToken }.
|
||||
// Allocate the WeakCell object in the old space, because 1) WeakCell weakness
|
||||
// handling is only implemented in the old space 2) they're supposedly
|
||||
// long-living. TODO(marja, gsathya): Support WeakCells in Scavenger.
|
||||
const cell = new (Pretenured) WeakCell{
|
||||
const cell = new WeakCell{
|
||||
map: GetWeakCellMap(),
|
||||
finalization_registry: finalizationRegistry,
|
||||
holdings: heldValue,
|
||||
target: target,
|
||||
unregister_token: unregisterToken,
|
||||
holdings: heldValue,
|
||||
prev: Undefined,
|
||||
next: Undefined,
|
||||
key_list_prev: Undefined,
|
||||
|
|
|
|||
263
deps/v8/src/builtins/ia32/builtins-ia32.cc
vendored
263
deps/v8/src/builtins/ia32/builtins-ia32.cc
vendored
|
|
@ -2106,6 +2106,94 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
|
|||
__ ret(1 * kSystemPointerSize); // Remove eax.
|
||||
}
|
||||
|
||||
static void GenerateCall(MacroAssembler* masm, Register argc, Register target,
|
||||
ConvertReceiverMode mode,
|
||||
std::optional<RootIndex> error_string_root) {
|
||||
Register map = ecx;
|
||||
Register instance_type = edx;
|
||||
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type));
|
||||
|
||||
StackArgumentsAccessor args(argc);
|
||||
|
||||
Label non_callable, non_smi, non_callable_jsfunction, non_jsboundfunction,
|
||||
non_proxy, non_wrapped_function, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ bind(&non_smi);
|
||||
__ LoadMap(map, target);
|
||||
__ CmpInstanceTypeRange(map, instance_type, map,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ j(above, &non_callable_jsfunction);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode));
|
||||
|
||||
__ bind(&non_callable_jsfunction);
|
||||
__ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
|
||||
__ j(not_equal, &non_jsboundfunction);
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction);
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ bind(&non_jsboundfunction);
|
||||
__ LoadMap(map, target);
|
||||
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
|
||||
Immediate(Map::Bits1::IsCallableBit::kMask));
|
||||
__ j(zero, &non_callable);
|
||||
|
||||
// Call CallProxy external builtin
|
||||
__ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
|
||||
__ j(not_equal, &non_proxy);
|
||||
__ TailCallBuiltin(Builtin::kCallProxy);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ bind(&non_proxy);
|
||||
__ cmpw(instance_type, Immediate(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ j(not_equal, &non_wrapped_function);
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ bind(&non_wrapped_function);
|
||||
__ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ j(equal, &class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ mov(args.GetReceiverOperand(), target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
if (!error_string_root.has_value()) {
|
||||
// Use the simpler error for Generate_Call
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
} else {
|
||||
// Use the more specific error for Function.prototype.call/apply
|
||||
__ LoadRoot(edx, error_string_root.value());
|
||||
__ Push(target);
|
||||
__ Push(edx);
|
||||
__ CallRuntime(Runtime::kThrowTargetNonFunction);
|
||||
__ Trap();
|
||||
}
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap();
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
|
|
@ -2167,7 +2255,11 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||
__ bind(&no_arguments);
|
||||
{
|
||||
__ Move(eax, JSParameterCount(0));
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = edi;
|
||||
__ movd(target, xmm0);
|
||||
GenerateCall(masm, eax, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_apply_string);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2208,7 +2300,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
__ dec(eax); // One fewer argument (first argument is new receiver).
|
||||
|
||||
// 5. Call the callable.
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = edi;
|
||||
GenerateCall(masm, eax, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
|
|
@ -2772,81 +2867,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
|
|||
// -- eax : the number of arguments
|
||||
// -- edi : the target to call (can be any Object).
|
||||
// -----------------------------------
|
||||
Register argc = eax;
|
||||
Register target = edi;
|
||||
Register map = ecx;
|
||||
Register instance_type = edx;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type));
|
||||
|
||||
StackArgumentsAccessor args(argc);
|
||||
|
||||
Label non_callable, non_smi, non_callable_jsfunction, non_jsboundfunction,
|
||||
non_proxy, non_wrapped_function, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ bind(&non_smi);
|
||||
__ LoadMap(map, target);
|
||||
__ CmpInstanceTypeRange(map, instance_type, map,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ j(above, &non_callable_jsfunction);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode));
|
||||
|
||||
__ bind(&non_callable_jsfunction);
|
||||
__ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
|
||||
__ j(not_equal, &non_jsboundfunction);
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction);
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ bind(&non_jsboundfunction);
|
||||
__ LoadMap(map, target);
|
||||
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
|
||||
Immediate(Map::Bits1::IsCallableBit::kMask));
|
||||
__ j(zero, &non_callable);
|
||||
|
||||
// Call CallProxy external builtin
|
||||
__ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
|
||||
__ j(not_equal, &non_proxy);
|
||||
__ TailCallBuiltin(Builtin::kCallProxy);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ bind(&non_proxy);
|
||||
__ cmpw(instance_type, Immediate(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ j(not_equal, &non_wrapped_function);
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ bind(&non_wrapped_function);
|
||||
__ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ j(equal, &class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ mov(args.GetReceiverOperand(), target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
GenerateCall(masm, eax, target, mode, std::nullopt);
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
@ -3347,18 +3370,12 @@ void ReloadParentStack(MacroAssembler* masm, Register promise,
|
|||
|
||||
DCHECK(!AreAliased(promise, return_value, context, tmp));
|
||||
|
||||
__ Push(promise);
|
||||
|
||||
Register parent = tmp2;
|
||||
__ mov(parent, Operand(active_stack, wasm::kStackParentOffset));
|
||||
|
||||
// Update active stack root.
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
|
||||
__ Pop(promise);
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {promise, return_value, context, parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {promise, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false);
|
||||
}
|
||||
|
||||
|
|
@ -3399,12 +3416,12 @@ void SwitchToAllocatedStack(MacroAssembler* masm, Register wrapper_buffer,
|
|||
Register scratch, Register scratch2,
|
||||
Label* suspend) {
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
Register parent_stack = new_wrapper_buffer;
|
||||
__ LoadRootRelative(parent_stack, IsolateData::active_stack_offset());
|
||||
__ Move(parent_stack, Operand(parent_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), parent_stack,
|
||||
suspend, no_reg, {wrapper_buffer});
|
||||
parent_stack = no_reg;
|
||||
Register stack = new_wrapper_buffer;
|
||||
__ LoadRootRelative(stack, IsolateData::active_suspender_offset());
|
||||
__ Move(stack, FieldOperand(stack, WasmSuspenderObject::kStackOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), stack, suspend,
|
||||
no_reg, {wrapper_buffer});
|
||||
stack = no_reg;
|
||||
Register target_stack = scratch;
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
// Save the old stack's ebp, and use it to access the parameters in
|
||||
|
|
@ -3792,52 +3809,27 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Set a sentinel value for the spill slots visited by the GC.
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
|
||||
// -------------------------------------------
|
||||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
Register stack = edx;
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
|
||||
Register suspender_stack = edi;
|
||||
__ Move(suspender_stack,
|
||||
FieldOperand(suspender, WasmSuspenderObject::kStackOffset));
|
||||
#ifdef DEBUG
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this check
|
||||
// will not hold anymore: it's possible that the active stack changed
|
||||
// (due to an internal switch), so we have to update the suspender.
|
||||
__ cmp(suspender_stack, stack);
|
||||
Label ok;
|
||||
__ j(equal, &ok);
|
||||
__ Trap();
|
||||
__ bind(&ok);
|
||||
#endif
|
||||
|
||||
// -------------------------------------------
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
Register caller = ecx;
|
||||
__ Move(caller, Operand(suspender_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
Register parent = edi;
|
||||
__ Move(parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
Register target_stack = ecx;
|
||||
__ Move(target_stack,
|
||||
FieldOperand(parent, WasmSuspenderObject::kStackOffset));
|
||||
|
||||
// Switch stacks.
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), target_stack,
|
||||
&resume, no_reg, {target_stack, suspender, parent});
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
parent = no_reg;
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), stack, &resume,
|
||||
no_reg, {caller, suspender});
|
||||
__ Move(kReturnRegister0,
|
||||
FieldOperand(suspender, WasmSuspenderObject::kPromiseOffset));
|
||||
MemOperand GCScanSlotPlace =
|
||||
MemOperand(ebp, WasmJspiFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Move(GCScanSlotPlace, Immediate(0));
|
||||
LoadJumpBuffer(masm, caller, true);
|
||||
LoadJumpBuffer(masm, target_stack, true);
|
||||
__ Trap();
|
||||
__ bind(&resume);
|
||||
__ LeaveFrame(StackFrame::WASM_JSPI);
|
||||
|
|
@ -3893,8 +3885,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
Register target_stack = edx;
|
||||
__ Move(target_stack,
|
||||
FieldOperand(suspender, WasmSuspenderObject::kStackOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_stack(), active_stack,
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), target_stack,
|
||||
&suspend, suspender, {target_stack});
|
||||
suspender = no_reg;
|
||||
|
||||
|
|
@ -3935,6 +3926,30 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) {
|
|||
Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXResume(MacroAssembler* masm) {
|
||||
__ EnterFrame(StackFrame::WASM_STACK_EXIT);
|
||||
Register target_stack = WasmFXResumeDescriptor::GetRegisterParameter(0);
|
||||
Label suspend;
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_wasmfx_stack(),
|
||||
target_stack, &suspend, no_reg, {target_stack});
|
||||
LoadJumpBuffer(masm, target_stack, true);
|
||||
__ Trap();
|
||||
__ bind(&suspend);
|
||||
__ LeaveFrame(StackFrame::WASM_STACK_EXIT);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) {
|
||||
Register active_stack = ecx;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
Register parent = esi;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {parent});
|
||||
LoadJumpBuffer(masm, parent, true);
|
||||
__ Trap();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
|
||||
// Only needed on x64.
|
||||
__ Trap();
|
||||
|
|
|
|||
2
deps/v8/src/builtins/js-to-wasm.tq
vendored
2
deps/v8/src/builtins/js-to-wasm.tq
vendored
|
|
@ -720,7 +720,7 @@ macro JSToWasmWrapperHelper(
|
|||
// undefined double experiment. In order to silence these false positives,
|
||||
// we temporarily fall back to the old pattern. This should be removed
|
||||
// when the feature is shipped.
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE) {
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE) {
|
||||
if (param == Undefined) {
|
||||
*RefCast<float64>(toRef) = kCppQuietNaN;
|
||||
}
|
||||
|
|
|
|||
245
deps/v8/src/builtins/loong64/builtins-loong64.cc
vendored
245
deps/v8/src/builtins/loong64/builtins-loong64.cc
vendored
|
|
@ -2119,6 +2119,84 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
|||
D::ExpectedParameterCountRegister());
|
||||
}
|
||||
|
||||
static void GenerateCall(MacroAssembler* masm, Register argc, Register target,
|
||||
ConvertReceiverMode mode,
|
||||
std::optional<RootIndex> error_string_root) {
|
||||
Register map = t1;
|
||||
Register instance_type = t2;
|
||||
Register scratch = t3;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
scratch);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), ls, scratch,
|
||||
Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq, instance_type,
|
||||
Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = t1;
|
||||
DCHECK(!AreAliased(argc, target, flags));
|
||||
__ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
|
||||
__ Branch(&non_callable, eq, flags, Operand(zero_reg));
|
||||
}
|
||||
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq, instance_type,
|
||||
Operand(JS_PROXY_TYPE));
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq, instance_type,
|
||||
Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ Branch(&class_constructor, eq, instance_type,
|
||||
Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ StoreReceiver(target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
if (!error_string_root.has_value()) {
|
||||
// Use the simpler error for Generate_Call
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
} else {
|
||||
// Use the more specific error for Function.prototype.call/apply
|
||||
__ LoadRoot(t2, error_string_root.value());
|
||||
__ Push(target, t2);
|
||||
__ CallRuntime(Runtime::kThrowTargetNonFunction);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
|
|
@ -2179,7 +2257,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||
{
|
||||
__ li(a0, JSParameterCount(0));
|
||||
DCHECK(receiver == a1);
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = a1;
|
||||
GenerateCall(masm, a0, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_apply_string);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2202,7 +2283,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
__ addi_d(a0, a0, -1);
|
||||
|
||||
// 4. Call the callable.
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
Register target = a1;
|
||||
GenerateCall(masm, a0, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
|
|
@ -2689,70 +2772,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
|
|||
// -- a0 : the number of arguments
|
||||
// -- a1 : the target to call (can be any Object).
|
||||
// -----------------------------------
|
||||
|
||||
Register target = a1;
|
||||
Register map = t1;
|
||||
Register instance_type = t2;
|
||||
Register scratch = t3;
|
||||
DCHECK(!AreAliased(a0, target, map, instance_type, scratch));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
scratch);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), ls, scratch,
|
||||
Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq, instance_type,
|
||||
Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = t1;
|
||||
__ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
|
||||
__ Branch(&non_callable, eq, flags, Operand(zero_reg));
|
||||
}
|
||||
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq, instance_type,
|
||||
Operand(JS_PROXY_TYPE));
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq, instance_type,
|
||||
Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ Branch(&class_constructor, eq, instance_type,
|
||||
Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ StoreReceiver(target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
}
|
||||
GenerateCall(masm, a0, target, mode, std::nullopt);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
||||
|
|
@ -3090,7 +3111,7 @@ void SwitchStackPointer(MacroAssembler* masm, Register stack) {
|
|||
}
|
||||
|
||||
void LoadJumpBuffer(MacroAssembler* masm, Register stack, bool load_pc,
|
||||
Register tmp, wasm::JumpBuffer::StackState expected_state) {
|
||||
Register tmp) {
|
||||
SwitchStackPointer(masm, stack);
|
||||
__ Ld_d(fp, MemOperand(stack, wasm::kStackFpOffset));
|
||||
if (load_pc) {
|
||||
|
|
@ -3102,12 +3123,11 @@ void LoadJumpBuffer(MacroAssembler* masm, Register stack, bool load_pc,
|
|||
}
|
||||
|
||||
void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_stack,
|
||||
Register tmp,
|
||||
wasm::JumpBuffer::StackState expected_state) {
|
||||
Register tmp) {
|
||||
__ St_d(zero_reg,
|
||||
MemOperand(fp, WasmJspiFrameConstants::kGCScanSlotCountOffset));
|
||||
// Switch stack!
|
||||
LoadJumpBuffer(masm, target_stack, false, tmp, expected_state);
|
||||
LoadJumpBuffer(masm, target_stack, false, tmp);
|
||||
}
|
||||
|
||||
// Updates the stack limit and central stack info, and validates the switch.
|
||||
|
|
@ -3171,7 +3191,7 @@ void ReloadParentStack(MacroAssembler* masm, Register return_reg,
|
|||
// Switch stack!
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {return_reg, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false, tmp3, wasm::JumpBuffer::Inactive);
|
||||
LoadJumpBuffer(masm, parent, false, tmp3);
|
||||
}
|
||||
|
||||
void RestoreParentSuspender(MacroAssembler* masm, Register tmp1) {
|
||||
|
|
@ -3404,57 +3424,31 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Set a sentinel value for the spill slots visited by the GC.
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
|
||||
// -------------------------------------------
|
||||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
DEFINE_REG(stack);
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
regs.ResetExcept(suspender, stack);
|
||||
DEFINE_REG(scratch);
|
||||
|
||||
DEFINE_REG(suspender_stack);
|
||||
__ LoadExternalPointerField(
|
||||
suspender_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
if (v8_flags.debug_code) {
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this
|
||||
// check will not hold anymore: it's possible that the active stack
|
||||
// changed (due to an internal switch), so we have to update the suspender.
|
||||
Label ok;
|
||||
__ Branch(&ok, eq, suspender_stack, Operand(stack));
|
||||
__ Trap();
|
||||
__ bind(&ok);
|
||||
}
|
||||
// -------------------------------------------
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(caller);
|
||||
__ Ld_d(caller, MemOperand(suspender_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
// Update active stack.
|
||||
DEFINE_REG(parent);
|
||||
__ LoadProtectedPointerField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
regs.ResetExcept(suspender, caller, stack);
|
||||
DEFINE_REG(target_stack);
|
||||
__ LoadExternalPointerField(
|
||||
target_stack, FieldMemOperand(parent, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), stack, &resume,
|
||||
no_reg, {caller, suspender});
|
||||
FREE_REG(stack);
|
||||
no_reg, {target_stack, suspender, parent});
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
|
||||
MemOperand GCScanSlotPlace =
|
||||
MemOperand(fp, WasmJspiFrameConstants::kGCScanSlotCountOffset);
|
||||
__ St_d(zero_reg, GCScanSlotPlace);
|
||||
DEFINE_REG(scratch)
|
||||
LoadJumpBuffer(masm, caller, true, scratch, wasm::JumpBuffer::Inactive);
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch);
|
||||
__ Trap();
|
||||
__ bind(&resume);
|
||||
__ LeaveFrame(StackFrame::WASM_JSPI);
|
||||
|
|
@ -3520,7 +3514,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
kWasmStackMemoryTag);
|
||||
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_stack(), active_stack,
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), active_stack,
|
||||
&suspend, suspender, {target_stack});
|
||||
|
||||
regs.ResetExcept(target_stack);
|
||||
|
|
@ -3537,8 +3531,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
__ St_d(zero_reg, GCScanSlotPlace);
|
||||
if (on_resume == wasm::OnResume::kThrow) {
|
||||
// Switch without restoring the PC.
|
||||
LoadJumpBuffer(masm, target_stack, false, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
LoadJumpBuffer(masm, target_stack, false, scratch);
|
||||
// Pop this frame now. The unwinder expects that the first STACK_SWITCH
|
||||
// frame is the outermost one.
|
||||
__ LeaveFrame(StackFrame::WASM_JSPI);
|
||||
|
|
@ -3547,8 +3540,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
__ CallRuntime(Runtime::kThrow);
|
||||
} else {
|
||||
// Resume the stack normally.
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch);
|
||||
}
|
||||
__ Trap();
|
||||
__ bind(&suspend);
|
||||
|
|
@ -3567,6 +3559,34 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) {
|
|||
Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXResume(MacroAssembler* masm) {
|
||||
__ EnterFrame(StackFrame::WASM_STACK_EXIT);
|
||||
Register target_stack = WasmFXResumeDescriptor::GetRegisterParameter(0);
|
||||
Label suspend;
|
||||
Register active_stack = a0;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_wasmfx_stack(),
|
||||
active_stack, &suspend, no_reg, {target_stack});
|
||||
LoadJumpBuffer(masm, target_stack, true, a1);
|
||||
__ Trap();
|
||||
__ bind(&suspend);
|
||||
__ LeaveFrame(StackFrame::WASM_STACK_EXIT);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) {
|
||||
Register active_stack = a0;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
Register parent = a1;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {parent});
|
||||
LoadJumpBuffer(masm, parent, true, a2);
|
||||
__ Trap();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
|
||||
// Only needed on x64.
|
||||
__ Trap();
|
||||
|
|
@ -3593,8 +3613,7 @@ void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
__ mov(original_fp, fp);
|
||||
DEFINE_REG(target_stack);
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
LoadTargetJumpBuffer(masm, target_stack, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
LoadTargetJumpBuffer(masm, target_stack, scratch);
|
||||
FREE_REG(target_stack);
|
||||
|
||||
// Push the loaded fp. We know it is null, because there is no frame yet,
|
||||
|
|
|
|||
154
deps/v8/src/builtins/mips64/builtins-mips64.cc
vendored
154
deps/v8/src/builtins/mips64/builtins-mips64.cc
vendored
|
|
@ -2030,6 +2030,84 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
|||
D::ExpectedParameterCountRegister());
|
||||
}
|
||||
|
||||
static void GenerateCall(MacroAssembler* masm, Register argc, Register target,
|
||||
ConvertReceiverMode mode,
|
||||
std::optional<RootIndex> error_string_root) {
|
||||
Register map = t1;
|
||||
Register instance_type = t2;
|
||||
Register scratch = t8;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
scratch);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), ls, scratch,
|
||||
Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq, instance_type,
|
||||
Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = t1;
|
||||
DCHECK(!AreAliased(argc, target, flags));
|
||||
__ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
|
||||
__ Branch(&non_callable, eq, flags, Operand(zero_reg));
|
||||
}
|
||||
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq, instance_type,
|
||||
Operand(JS_PROXY_TYPE));
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq, instance_type,
|
||||
Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ Branch(&class_constructor, eq, instance_type,
|
||||
Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ StoreReceiver(target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
if (!error_string_root.has_value()) {
|
||||
// Use the simpler error for Generate_Call
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
} else {
|
||||
// Use the more specific error for Function.prototype.call/apply
|
||||
__ LoadRoot(t2, error_string_root.value());
|
||||
__ Push(target, t2);
|
||||
__ CallRuntime(Runtime::kThrowTargetNonFunction);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
|
|
@ -2088,7 +2166,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||
{
|
||||
__ li(a0, JSParameterCount(0));
|
||||
DCHECK(receiver == a1);
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = a1;
|
||||
GenerateCall(masm, a0, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_apply_string);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2113,7 +2194,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
__ daddiu(a0, a0, -1);
|
||||
|
||||
// 4. Call the callable.
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
Register target = a1;
|
||||
GenerateCall(masm, a0, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
|
|
@ -2587,68 +2670,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
|
|||
// -- a1 : the target to call (can be any Object).
|
||||
// -----------------------------------
|
||||
Register target = a1;
|
||||
Register map = t1;
|
||||
Register instance_type = t2;
|
||||
Register scratch = t8;
|
||||
DCHECK(!AreAliased(a0, target, map, instance_type, scratch));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
scratch);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), ls, scratch,
|
||||
Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq, instance_type,
|
||||
Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = t1;
|
||||
__ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
|
||||
__ Branch(&non_callable, eq, flags, Operand(zero_reg));
|
||||
}
|
||||
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq, instance_type,
|
||||
Operand(JS_PROXY_TYPE));
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq, instance_type,
|
||||
Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ Branch(&class_constructor, eq, instance_type,
|
||||
Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ StoreReceiver(target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
}
|
||||
GenerateCall(masm, a0, target, mode, std::nullopt);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
||||
|
|
@ -3052,6 +3074,10 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) {
|
|||
__ Trap();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXResume(MacroAssembler* masm) { __ Trap(); }
|
||||
|
||||
void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) { __ Trap(); }
|
||||
|
||||
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
|
||||
// Only needed on x64.
|
||||
__ Trap();
|
||||
|
|
|
|||
2
deps/v8/src/builtins/object-groupby.tq
vendored
2
deps/v8/src/builtins/object-groupby.tq
vendored
|
|
@ -5,7 +5,7 @@
|
|||
namespace collections {
|
||||
|
||||
extern macro CollectionsBuiltinsAssembler::AddValueToKeyedGroup(
|
||||
implicit context: Context)(OrderedHashMap, Object, Object,
|
||||
implicit context: Context)(OrderedHashMap, JSAny, Object,
|
||||
String): OrderedHashMap;
|
||||
|
||||
extern macro CollectionsBuiltinsAssembler::NormalizeNumberKey(JSAny): JSAny;
|
||||
|
|
|
|||
239
deps/v8/src/builtins/ppc/builtins-ppc.cc
vendored
239
deps/v8/src/builtins/ppc/builtins-ppc.cc
vendored
|
|
@ -2234,6 +2234,82 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
|||
D::ExpectedParameterCountRegister());
|
||||
}
|
||||
|
||||
static void GenerateCall(MacroAssembler* masm, Register argc, Register target,
|
||||
ConvertReceiverMode mode,
|
||||
std::optional<RootIndex> error_string_root) {
|
||||
Register map = r7;
|
||||
Register instance_type = r8;
|
||||
Register scratch = r9;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CompareInstanceTypeRange(map, instance_type, scratch,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), le);
|
||||
__ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = r7;
|
||||
DCHECK(!AreAliased(argc, target, flags));
|
||||
__ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ TestBit(flags, Map::Bits1::IsCallableBit::kShift, r0);
|
||||
__ beq(&non_callable, cr0);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ cmpi(instance_type, Operand(JS_PROXY_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ cmpi(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ cmpi(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ beq(&class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver wth the (original) target.
|
||||
__ StoreReceiver(target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
if (!error_string_root.has_value()) {
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
} else {
|
||||
__ LoadRoot(r5, error_string_root.value());
|
||||
__ Push(target, r5);
|
||||
__ CallRuntime(Runtime::kThrowTargetNonFunction);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
|
|
@ -2286,7 +2362,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||
__ bind(&no_arguments);
|
||||
{
|
||||
__ mov(r3, Operand(JSParameterCount(0)));
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
GenerateCall(masm, r3, r4, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_apply_string);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2310,7 +2387,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
__ subi(r3, r3, Operand(1));
|
||||
|
||||
// 4. Call the callable.
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
GenerateCall(masm, r3, r4, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
|
|
@ -2805,70 +2883,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
|
|||
// -- r4 : the target to call (can be any Object).
|
||||
// -----------------------------------
|
||||
Register target = r4;
|
||||
Register map = r7;
|
||||
Register instance_type = r8;
|
||||
Register scratch = r9;
|
||||
DCHECK(!AreAliased(r3, target, map, instance_type));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CompareInstanceTypeRange(map, instance_type, scratch,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), le);
|
||||
__ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = r7;
|
||||
__ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ TestBit(flags, Map::Bits1::IsCallableBit::kShift, r0);
|
||||
__ beq(&non_callable, cr0);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ cmpi(instance_type, Operand(JS_PROXY_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ cmpi(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ cmpi(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ beq(&class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver the (original) target.
|
||||
__ StoreReceiver(target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
GenerateCall(masm, r3, target, mode, std::nullopt);
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
@ -3241,12 +3256,9 @@ void ReloadParentStack(MacroAssembler* masm, Register return_reg,
|
|||
Register parent = tmp2;
|
||||
__ LoadU64(parent, MemOperand(active_stack, wasm::kStackParentOffset), r0);
|
||||
|
||||
// Update active stack.
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {return_reg, return_value, context, parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {return_reg, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false, tmp3);
|
||||
}
|
||||
|
||||
|
|
@ -3454,49 +3466,21 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Set a sentinel value for the spill slots visited by the GC.
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
|
||||
// -------------------------------------------
|
||||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
DEFINE_REG(stack);
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
regs.ResetExcept(suspender, stack);
|
||||
|
||||
DEFINE_REG(suspender_stack);
|
||||
__ LoadU64(suspender_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset), r0);
|
||||
if (v8_flags.debug_code) {
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this
|
||||
// check will not hold anymore: it's possible that the active stack
|
||||
// changed (due to an internal switch), so we have to update the suspender.
|
||||
__ CmpS64(suspender_stack, stack);
|
||||
Label ok;
|
||||
__ beq(&ok);
|
||||
__ Trap();
|
||||
__ bind(&ok);
|
||||
}
|
||||
// -------------------------------------------
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(caller);
|
||||
__ LoadU64(caller, MemOperand(suspender_stack, wasm::kStackParentOffset), r0);
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
DEFINE_REG(parent);
|
||||
__ LoadTaggedField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset),
|
||||
r0);
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
regs.ResetExcept(suspender, caller, stack);
|
||||
DEFINE_REG(target_stack);
|
||||
__ LoadU64(target_stack,
|
||||
FieldMemOperand(parent, WasmSuspenderObject::kStackOffset), r0);
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), stack, &resume,
|
||||
no_reg, {caller, suspender});
|
||||
FREE_REG(stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), target_stack,
|
||||
&resume, no_reg, {target_stack, suspender, parent});
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset), r0);
|
||||
|
|
@ -3504,7 +3488,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
MemOperand(fp, WasmJspiFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Zero(GCScanSlotPlace);
|
||||
DEFINE_REG(scratch);
|
||||
LoadJumpBuffer(masm, caller, true, scratch);
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch);
|
||||
if (v8_flags.debug_code) {
|
||||
__ Trap();
|
||||
}
|
||||
|
|
@ -3571,8 +3555,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
__ LoadU64(target_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset), r0);
|
||||
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_stack(), active_stack,
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), target_stack,
|
||||
&suspend, suspender, {target_stack});
|
||||
regs.ResetExcept(target_stack);
|
||||
|
||||
|
|
@ -3618,6 +3601,30 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) {
|
|||
Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXResume(MacroAssembler* masm) {
|
||||
__ EnterFrame(StackFrame::WASM_STACK_EXIT);
|
||||
Register target_stack = WasmFXResumeDescriptor::GetRegisterParameter(0);
|
||||
Label suspend;
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_wasmfx_stack(),
|
||||
target_stack, &suspend, no_reg, {target_stack});
|
||||
LoadJumpBuffer(masm, target_stack, true, r4);
|
||||
__ Trap();
|
||||
__ bind(&suspend);
|
||||
__ LeaveFrame(StackFrame::WASM_STACK_EXIT);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) {
|
||||
Register active_stack = r3;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
Register parent = r4;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {parent});
|
||||
LoadJumpBuffer(masm, parent, true, r5);
|
||||
__ Trap();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
|
||||
// Only needed on x64.
|
||||
__ Trap();
|
||||
|
|
@ -3630,20 +3637,18 @@ void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
Label* suspend) {
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
DEFINE_SCOPED(scratch)
|
||||
DEFINE_REG(target_stack)
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(parent_stack)
|
||||
__ LoadU64(parent_stack, MemOperand(target_stack, wasm::kStackParentOffset),
|
||||
DEFINE_REG(stack)
|
||||
__ LoadRootRelative(stack, IsolateData::active_suspender_offset());
|
||||
__ LoadU64(stack, FieldMemOperand(stack, WasmSuspenderObject::kStackOffset),
|
||||
r0);
|
||||
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), parent_stack,
|
||||
suspend, no_reg, {wasm_instance, wrapper_buffer});
|
||||
|
||||
FREE_REG(parent_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), stack, suspend,
|
||||
no_reg, {wasm_instance, wrapper_buffer});
|
||||
FREE_REG(stack);
|
||||
// Save the old stack's fp in r15, and use it to access the parameters in
|
||||
// the parent frame.
|
||||
regs.Pinned(r15, &original_fp);
|
||||
__ Move(original_fp, fp);
|
||||
DEFINE_REG(target_stack)
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
LoadTargetJumpBuffer(masm, target_stack, scratch);
|
||||
FREE_REG(target_stack);
|
||||
|
|
|
|||
919
deps/v8/src/builtins/riscv/builtins-riscv.cc
vendored
919
deps/v8/src/builtins/riscv/builtins-riscv.cc
vendored
File diff suppressed because it is too large
Load Diff
239
deps/v8/src/builtins/s390/builtins-s390.cc
vendored
239
deps/v8/src/builtins/s390/builtins-s390.cc
vendored
|
|
@ -2256,6 +2256,82 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
|
|||
__ Ret();
|
||||
}
|
||||
|
||||
static void GenerateCall(MacroAssembler* masm, Register argc, Register target,
|
||||
ConvertReceiverMode mode,
|
||||
std::optional<RootIndex> error_string_root) {
|
||||
Register map = r6;
|
||||
Register instance_type = r7;
|
||||
Register scratch = r8;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CompareInstanceTypeRange(map, instance_type, scratch,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), le);
|
||||
__ CmpS64(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = r6;
|
||||
DCHECK(!AreAliased(argc, target, flags));
|
||||
__ LoadU8(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ TestBit(flags, Map::Bits1::IsCallableBit::kShift);
|
||||
__ beq(&non_callable);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ CmpS64(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ CmpS64(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ beq(&class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ StoreReceiver(target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
if (!error_string_root.has_value()) {
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
} else {
|
||||
__ LoadRoot(r4, error_string_root.value());
|
||||
__ Push(target, r4);
|
||||
__ CallRuntime(Runtime::kThrowTargetNonFunction);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
|
|
@ -2308,7 +2384,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||
__ bind(&no_arguments);
|
||||
{
|
||||
__ mov(r2, Operand(JSParameterCount(0)));
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
GenerateCall(masm, r2, r3, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_apply_string);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2332,7 +2409,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
__ SubS64(r2, r2, Operand(1));
|
||||
|
||||
// 4. Call the callable.
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
GenerateCall(masm, r2, r3, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
|
|
@ -2828,70 +2906,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
|
|||
// -- r3 : the target to call (can be any Object).
|
||||
// -----------------------------------
|
||||
Register target = r3;
|
||||
Register map = r6;
|
||||
Register instance_type = r7;
|
||||
Register scratch = r8;
|
||||
DCHECK(!AreAliased(r2, target, map, instance_type));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CompareInstanceTypeRange(map, instance_type, scratch,
|
||||
FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), le);
|
||||
__ CmpS64(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = r6;
|
||||
__ LoadU8(flags, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ TestBit(flags, Map::Bits1::IsCallableBit::kShift);
|
||||
__ beq(&non_callable);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, eq);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ CmpS64(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ CmpS64(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ beq(&class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver the (original) target.
|
||||
__ StoreReceiver(target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
GenerateCall(masm, r2, target, mode, std::nullopt);
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
@ -3248,12 +3263,9 @@ void ReloadParentStack(MacroAssembler* masm, Register return_reg,
|
|||
Register parent = tmp2;
|
||||
__ LoadU64(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
|
||||
// Update active stack.
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {return_reg, return_value, context, parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {return_reg, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false, tmp3);
|
||||
}
|
||||
|
||||
|
|
@ -3460,48 +3472,20 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Set a sentinel value for the spill slots visited by the GC.
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
|
||||
// -------------------------------------------
|
||||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
DEFINE_REG(stack);
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
regs.ResetExcept(suspender, stack);
|
||||
|
||||
DEFINE_REG(suspender_stack);
|
||||
__ LoadU64(suspender_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset));
|
||||
if (v8_flags.debug_code) {
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this
|
||||
// check will not hold anymore: it's possible that the active stack
|
||||
// changed (due to an internal switch), so we have to update the suspender.
|
||||
__ CmpS64(suspender_stack, stack);
|
||||
Label ok;
|
||||
__ beq(&ok);
|
||||
__ Trap();
|
||||
__ bind(&ok);
|
||||
}
|
||||
// -------------------------------------------
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(caller);
|
||||
__ LoadU64(caller, MemOperand(suspender_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
DEFINE_REG(parent);
|
||||
__ LoadTaggedField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
regs.ResetExcept(suspender, caller, stack);
|
||||
DEFINE_REG(target_stack);
|
||||
__ LoadU64(target_stack,
|
||||
FieldMemOperand(parent, WasmSuspenderObject::kStackOffset));
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), stack, &resume,
|
||||
no_reg, {caller, suspender});
|
||||
FREE_REG(stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), target_stack,
|
||||
&resume, no_reg, {target_stack, suspender, parent});
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
|
||||
|
|
@ -3509,7 +3493,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
MemOperand(fp, WasmJspiFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Zero(GCScanSlotPlace);
|
||||
DEFINE_REG(scratch);
|
||||
LoadJumpBuffer(masm, caller, true, scratch);
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch);
|
||||
if (v8_flags.debug_code) {
|
||||
__ Trap();
|
||||
}
|
||||
|
|
@ -3573,8 +3557,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
__ LoadU64(target_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset));
|
||||
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_stack(), active_stack,
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), target_stack,
|
||||
&suspend, suspender, {target_stack});
|
||||
regs.ResetExcept(target_stack);
|
||||
|
||||
|
|
@ -3620,6 +3603,30 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) {
|
|||
Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXResume(MacroAssembler* masm) {
|
||||
__ EnterFrame(StackFrame::WASM_STACK_EXIT);
|
||||
Register target_stack = WasmFXResumeDescriptor::GetRegisterParameter(0);
|
||||
Label suspend;
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_wasmfx_stack(),
|
||||
target_stack, &suspend, no_reg, {target_stack});
|
||||
LoadJumpBuffer(masm, target_stack, true, r3);
|
||||
__ Trap();
|
||||
__ bind(&suspend);
|
||||
__ LeaveFrame(StackFrame::WASM_STACK_EXIT);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) {
|
||||
Register active_stack = r2;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
Register parent = r3;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {parent});
|
||||
LoadJumpBuffer(masm, parent, true, r4);
|
||||
__ Trap();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
|
||||
// Only needed on x64.
|
||||
__ Trap();
|
||||
|
|
@ -3632,19 +3639,17 @@ void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
Label* suspend) {
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
DEFINE_SCOPED(scratch)
|
||||
DEFINE_REG(target_stack)
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(parent_stack)
|
||||
__ LoadU64(parent_stack, MemOperand(target_stack, wasm::kStackParentOffset));
|
||||
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), parent_stack,
|
||||
suspend, no_reg, {wasm_instance, wrapper_buffer});
|
||||
|
||||
FREE_REG(parent_stack);
|
||||
DEFINE_REG(stack)
|
||||
__ LoadRootRelative(stack, IsolateData::active_suspender_offset());
|
||||
__ LoadU64(stack, FieldMemOperand(stack, WasmSuspenderObject::kStackOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), stack, suspend,
|
||||
no_reg, {wasm_instance, wrapper_buffer});
|
||||
FREE_REG(stack);
|
||||
// Save the old stack's fp in r13, and use it to access the parameters in
|
||||
// the parent frame.
|
||||
regs.Pinned(r13, &original_fp);
|
||||
__ Move(original_fp, fp);
|
||||
DEFINE_REG(target_stack)
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
LoadTargetJumpBuffer(masm, target_stack, scratch);
|
||||
FREE_REG(target_stack);
|
||||
|
|
|
|||
12
deps/v8/src/builtins/torque-internal.tq
vendored
12
deps/v8/src/builtins/torque-internal.tq
vendored
|
|
@ -323,13 +323,13 @@ InitializeFieldsFromIterator<char16, UninitializedIterator>(
|
|||
_originIterator: UninitializedIterator): void {}
|
||||
|
||||
extern macro IsDoubleHole(HeapObject, intptr): bool;
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
extern macro IsDoubleUndefined(HeapObject, intptr): bool;
|
||||
extern macro StoreDoubleHole(HeapObject, intptr): void;
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
extern macro StoreDoubleUndefined(HeapObject, intptr): void;
|
||||
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
macro LoadFloat64OrUndefinedOrHole(r:&float64_or_undefined_or_hole):
|
||||
float64_or_undefined_or_hole {
|
||||
return float64_or_undefined_or_hole{
|
||||
|
|
@ -341,7 +341,7 @@ macro LoadFloat64OrUndefinedOrHole(r:&float64_or_undefined_or_hole):
|
|||
};
|
||||
}
|
||||
|
||||
@ifnot(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@ifnot(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
macro LoadFloat64OrHole(r:&float64_or_undefined_or_hole):
|
||||
float64_or_undefined_or_hole {
|
||||
return float64_or_undefined_or_hole{
|
||||
|
|
@ -351,7 +351,7 @@ macro LoadFloat64OrHole(r:&float64_or_undefined_or_hole):
|
|||
};
|
||||
}
|
||||
|
||||
@if(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@if(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
macro StoreFloat64OrUndefinedOrHole(
|
||||
r:&float64_or_undefined_or_hole,
|
||||
value: float64_or_undefined_or_hole): void {
|
||||
|
|
@ -366,7 +366,7 @@ macro StoreFloat64OrUndefinedOrHole(
|
|||
}
|
||||
}
|
||||
|
||||
@ifnot(V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE)
|
||||
@ifnot(V8_ENABLE_UNDEFINED_DOUBLE)
|
||||
macro StoreFloat64OrHole(
|
||||
r:&float64_or_undefined_or_hole,
|
||||
value: float64_or_undefined_or_hole): void {
|
||||
|
|
|
|||
|
|
@ -213,8 +213,34 @@ transitioning macro ConstructByIterable(
|
|||
implicit context: Context)(iterable: JSReceiver,
|
||||
iteratorFn: Callable): never
|
||||
labels IfConstructByArrayLike(JSArray, uintptr) {
|
||||
const array: JSArray =
|
||||
IterableToListConvertHoles(context, iterable, iteratorFn);
|
||||
let array: JSArray;
|
||||
try {
|
||||
array = Cast<JSArray>(iterable) otherwise UseUserProvidedIterator;
|
||||
|
||||
const elementsKind = array.map.elements_kind;
|
||||
// Can only use the fast path for numeric fast elements arrays, since
|
||||
// objects in the array can have side-effects in their ToNumber conversion.
|
||||
if (!IsFastSmiElementsKind(elementsKind) &&
|
||||
!IsDoubleElementsKind(elementsKind)) {
|
||||
goto UseUserProvidedIterator;
|
||||
}
|
||||
|
||||
// Check that the ArrayIterator prototype's "next" method hasn't been
|
||||
// overridden.
|
||||
if (IsArrayIteratorProtectorCellInvalid()) goto UseUserProvidedIterator;
|
||||
|
||||
// Check that the iterator function is exactly
|
||||
// Builtin::kArrayPrototypeValues.
|
||||
const iteratorJSFn =
|
||||
Cast<JSFunction>(iteratorFn) otherwise UseUserProvidedIterator;
|
||||
if (!TaggedEqual(
|
||||
iteratorJSFn.shared_function_info.untrusted_function_data,
|
||||
SmiConstant(kArrayPrototypeValues))) {
|
||||
goto UseUserProvidedIterator;
|
||||
}
|
||||
} label UseUserProvidedIterator deferred {
|
||||
array = IterableToListConvertHoles(context, iterable, iteratorFn);
|
||||
}
|
||||
// Max JSArray length is a valid JSTypedArray length so we just use it.
|
||||
goto IfConstructByArrayLike(array, array.length_uintptr);
|
||||
}
|
||||
|
|
|
|||
10
deps/v8/src/builtins/weak-ref.tq
vendored
10
deps/v8/src/builtins/weak-ref.tq
vendored
|
|
@ -36,10 +36,16 @@ transitioning javascript builtin WeakRefConstructor(
|
|||
// "%WeakRefPrototype%", « [[WeakRefTarget]] »).
|
||||
const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
|
||||
const weakRef = UnsafeCast<JSWeakRef>(AllocateFastOrSlowJSObjectFromMap(map));
|
||||
// 5. Set weakRef.[[WeakRefTarget]] to target.
|
||||
// We do step 5 and set the target before step 4 which calls
|
||||
// `runtime::JSWeakRefAddToKeptObjects`, as that method can trigger GC and
|
||||
// this reorder is otherwise not observable from JS. That way we can avoid
|
||||
// promoting the `weakRef` from the young generation to the old generation,
|
||||
// and don't need generational barriers for the reference from `weakRef` to
|
||||
// `weakTarget`.
|
||||
weakRef.target = weakTarget;
|
||||
// 4. Perfom ! AddToKeptObjects(target).
|
||||
runtime::JSWeakRefAddToKeptObjects(weakTarget);
|
||||
// 5. Set weakRef.[[WeakRefTarget]] to target.
|
||||
weakRef.target = weakTarget;
|
||||
// 6. Return weakRef.
|
||||
return weakRef;
|
||||
}
|
||||
|
|
|
|||
265
deps/v8/src/builtins/x64/builtins-x64.cc
vendored
265
deps/v8/src/builtins/x64/builtins-x64.cc
vendored
|
|
@ -2153,6 +2153,86 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
|
|||
__ ret(1 * kSystemPointerSize); // Remove rax.
|
||||
}
|
||||
|
||||
static void GenerateCall(MacroAssembler* masm, Register argc, Register target,
|
||||
ConvertReceiverMode mode,
|
||||
std::optional<RootIndex> error_string_root) {
|
||||
Register map = rcx;
|
||||
Register instance_type = rdx;
|
||||
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type));
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CmpInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), below_equal);
|
||||
__ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, equal);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
{
|
||||
Register flags = rcx;
|
||||
DCHECK(!AreAliased(argc, target, flags));
|
||||
__ movzxbl(flags, FieldOperand(map, Map::kBitFieldOffset));
|
||||
map = no_reg;
|
||||
__ testl(flags, Immediate(Map::Bits1::IsCallableBit::kMask));
|
||||
__ j(zero, &non_callable, Label::kNear);
|
||||
}
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, equal);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ cmpw(instance_type, Immediate(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, equal);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ j(equal, &class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
StackArgumentsAccessor args(rax);
|
||||
__ movq(args.GetReceiverOperand(), target);
|
||||
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
if (!error_string_root.has_value()) {
|
||||
// Use the simpler error for Generate_Call
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
} else {
|
||||
// Use the more specific error for Function.prototype.call/apply
|
||||
__ LoadRoot(rdx, error_string_root.value());
|
||||
__ Push(target);
|
||||
__ Push(rdx);
|
||||
__ CallRuntime(Runtime::kThrowTargetNonFunction);
|
||||
__ Trap();
|
||||
}
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap();
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
|
|
@ -2210,7 +2290,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||
__ bind(&no_arguments);
|
||||
{
|
||||
__ Move(rax, JSParameterCount(0));
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = rdi;
|
||||
GenerateCall(masm, rax, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_apply_string);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2253,7 +2336,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||
// 5. Call the callable.
|
||||
// Since we did not create a frame for Function.prototype.call() yet,
|
||||
// we use a normal Call builtin here.
|
||||
__ TailCallBuiltin(Builtins::Call());
|
||||
|
||||
Register target = rdi;
|
||||
GenerateCall(masm, rax, target, ConvertReceiverMode::kAny,
|
||||
RootIndex::kFunction_prototype_call_string);
|
||||
}
|
||||
|
||||
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
|
|
@ -2689,19 +2775,14 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||
// -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
|
||||
// -----------------------------------
|
||||
|
||||
// TODO(victor): Use Generate_StackOverflowCheck here.
|
||||
// Check the stack for overflow.
|
||||
{
|
||||
Label stack_overflow;
|
||||
__ StackOverflowCheck(rbx, &stack_overflow, Label::kNear);
|
||||
Label done;
|
||||
__ shlq(rbx, Immediate(kSystemPointerSizeLog2));
|
||||
__ movq(kScratchRegister, rsp);
|
||||
__ subq(kScratchRegister, rbx);
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// We are not trying to catch interruptions (i.e. debug break and
|
||||
// preemption) here, so check the "real stack limit".
|
||||
__ cmpq(kScratchRegister,
|
||||
__ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
|
||||
__ j(above_equal, &done, Label::kNear);
|
||||
__ bind(&stack_overflow);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterFrame(StackFrame::INTERNAL);
|
||||
|
|
@ -2717,10 +2798,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||
// Push [[BoundArguments]] to the stack.
|
||||
{
|
||||
Label loop;
|
||||
__ LoadTaggedField(
|
||||
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
||||
__ SmiUntagFieldUnsigned(
|
||||
rbx, FieldOperand(rcx, offsetof(FixedArray, length_)));
|
||||
__ addq(rax, rbx); // Adjust effective number of arguments.
|
||||
__ bind(&loop);
|
||||
// Instead of doing decl(rbx) here subtract kTaggedSize from the header
|
||||
|
|
@ -2772,70 +2849,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
|
|||
// -- rax : the number of arguments
|
||||
// -- rdi : the target to call (can be any Object)
|
||||
// -----------------------------------
|
||||
Register argc = rax;
|
||||
Register target = rdi;
|
||||
Register map = rcx;
|
||||
Register instance_type = rdx;
|
||||
DCHECK(!AreAliased(argc, target, map, instance_type));
|
||||
|
||||
StackArgumentsAccessor args(argc);
|
||||
|
||||
Label non_callable, class_constructor;
|
||||
__ JumpIfSmi(target, &non_callable);
|
||||
__ LoadMap(map, target);
|
||||
__ CmpInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
|
||||
LAST_CALLABLE_JS_FUNCTION_TYPE);
|
||||
__ TailCallBuiltin(Builtins::CallFunction(mode), below_equal);
|
||||
|
||||
__ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallBoundFunction, equal);
|
||||
|
||||
// Check if target has a [[Call]] internal method.
|
||||
__ testb(FieldOperand(map, Map::kBitFieldOffset),
|
||||
Immediate(Map::Bits1::IsCallableBit::kMask));
|
||||
__ j(zero, &non_callable, Label::kNear);
|
||||
|
||||
// Check if target is a proxy and call CallProxy external builtin
|
||||
__ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallProxy, equal);
|
||||
|
||||
// Check if target is a wrapped function and call CallWrappedFunction external
|
||||
// builtin
|
||||
__ cmpw(instance_type, Immediate(JS_WRAPPED_FUNCTION_TYPE));
|
||||
__ TailCallBuiltin(Builtin::kCallWrappedFunction, equal);
|
||||
|
||||
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
||||
// Check that the function is not a "classConstructor".
|
||||
__ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
|
||||
__ j(equal, &class_constructor);
|
||||
|
||||
// 2. Call to something else, which might have a [[Call]] internal method (if
|
||||
// not we raise an exception).
|
||||
|
||||
// Overwrite the original receiver with the (original) target.
|
||||
__ movq(args.GetReceiverOperand(), target);
|
||||
// Let the "call_as_function_delegate" take care of the rest.
|
||||
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
|
||||
__ TailCallBuiltin(
|
||||
Builtins::CallFunction(ConvertReceiverMode::kNotNullOrUndefined));
|
||||
|
||||
// 3. Call to something that is not callable.
|
||||
__ bind(&non_callable);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowCalledNonCallable);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
|
||||
// 4. The function is a "classConstructor", need to raise an exception.
|
||||
__ bind(&class_constructor);
|
||||
{
|
||||
FrameScope frame(masm, StackFrame::INTERNAL);
|
||||
__ Push(target);
|
||||
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
|
||||
__ Trap(); // Unreachable.
|
||||
}
|
||||
GenerateCall(masm, rax, target, mode, std::nullopt);
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
@ -3481,10 +3497,9 @@ void ReloadParentStack(MacroAssembler* masm, Register promise,
|
|||
|
||||
Register parent = tmp2;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {promise, return_value, context, parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {promise, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false);
|
||||
}
|
||||
|
||||
|
|
@ -3527,12 +3542,13 @@ void SwitchToAllocatedStack(MacroAssembler* masm, Register wasm_instance,
|
|||
Register new_wrapper_buffer, Register scratch,
|
||||
Label* suspend) {
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
Register parent_stack = new_wrapper_buffer;
|
||||
__ LoadRootRelative(parent_stack, IsolateData::active_stack_offset());
|
||||
__ Move(parent_stack, MemOperand(parent_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), parent_stack,
|
||||
suspend, no_reg, {kWasmImplicitArgRegister, wrapper_buffer});
|
||||
parent_stack = no_reg;
|
||||
Register stack = new_wrapper_buffer;
|
||||
__ LoadRootRelative(stack, IsolateData::active_suspender_offset());
|
||||
__ LoadExternalPointerField(
|
||||
stack, FieldOperand(stack, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag, kScratchRegister);
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), stack, suspend,
|
||||
no_reg, {kWasmImplicitArgRegister, wrapper_buffer});
|
||||
Register target_stack = scratch;
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
// Save the old stack's rbp in r9, and use it to access the parameters in
|
||||
|
|
@ -3889,58 +3905,27 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Set a sentinel value for the spill slots visited by the GC.
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
|
||||
// -------------------------------------------
|
||||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
Register stack = rbx;
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
// live: [rax, rbx]
|
||||
|
||||
Register suspender_stack = rdx;
|
||||
__ LoadExternalPointerField(
|
||||
suspender_stack,
|
||||
FieldOperand(suspender, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag, kScratchRegister);
|
||||
#ifdef DEBUG
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this check
|
||||
// will not hold anymore: it's possible that the active stack changed
|
||||
// (due to an internal switch), so we have to update the suspender.
|
||||
__ cmpq(suspender_stack, stack);
|
||||
Label ok;
|
||||
__ j(equal, &ok);
|
||||
__ Trap();
|
||||
__ bind(&ok);
|
||||
#endif
|
||||
|
||||
// -------------------------------------------
|
||||
// Update roots.
|
||||
// -------------------------------------------
|
||||
Register caller = rcx;
|
||||
__ Move(caller, MemOperand(suspender_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
Register parent = rdx;
|
||||
__ LoadProtectedPointerField(
|
||||
parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
Register target_stack = rcx;
|
||||
__ LoadExternalPointerField(
|
||||
target_stack, FieldOperand(parent, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag, kScratchRegister);
|
||||
|
||||
// Switch stacks.
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), target_stack,
|
||||
&resume, no_reg, {target_stack, suspender, parent});
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
parent = no_reg;
|
||||
// live: [suspender:rax, stack:rbx, caller:rcx]
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), stack, &resume,
|
||||
no_reg, {caller, suspender});
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
FieldOperand(suspender, WasmSuspenderObject::kPromiseOffset));
|
||||
MemOperand GCScanSlotPlace =
|
||||
MemOperand(rbp, WasmJspiFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Move(GCScanSlotPlace, 0);
|
||||
LoadJumpBuffer(masm, caller, true);
|
||||
LoadJumpBuffer(masm, target_stack, true);
|
||||
__ Trap();
|
||||
__ bind(&resume);
|
||||
__ endbr64();
|
||||
|
|
@ -4004,8 +3989,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
__ LoadExternalPointerField(
|
||||
target_stack, FieldOperand(suspender, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag, kScratchRegister);
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_stack(), active_stack,
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), target_stack,
|
||||
&suspend, suspender, {target_stack});
|
||||
suspender = no_reg;
|
||||
|
||||
|
|
@ -4046,6 +4030,31 @@ void Builtins::Generate_WasmReject(MacroAssembler* masm) {
|
|||
Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXResume(MacroAssembler* masm) {
|
||||
__ EnterFrame(StackFrame::WASM_STACK_EXIT);
|
||||
Register target_stack = WasmFXResumeDescriptor::GetRegisterParameter(0);
|
||||
Label suspend;
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_wasmfx_stack(),
|
||||
target_stack, &suspend, no_reg, {target_stack});
|
||||
LoadJumpBuffer(masm, target_stack, true);
|
||||
__ Trap();
|
||||
__ bind(&suspend);
|
||||
__ endbr64();
|
||||
__ LeaveFrame(StackFrame::WASM_STACK_EXIT);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) {
|
||||
Register active_stack = rax;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
Register parent = rbx;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {parent});
|
||||
LoadJumpBuffer(masm, parent, true);
|
||||
__ Trap();
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
|
||||
MemOperand OSRTargetSlot(rbp, -wasm::kOSRTargetOffset);
|
||||
__ movq(kScratchRegister, OSRTargetSlot);
|
||||
|
|
|
|||
37
deps/v8/src/codegen/arm/assembler-arm.cc
vendored
37
deps/v8/src/codegen/arm/assembler-arm.cc
vendored
|
|
@ -266,6 +266,24 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
|
|||
CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
|
||||
}
|
||||
|
||||
static bool IsEabiHardFloat() {
|
||||
const char* arm_fpu_abi = v8_flags.mfloat_abi;
|
||||
if (strcmp(arm_fpu_abi, "hardfp") == 0) {
|
||||
return true;
|
||||
} else if (strcmp(arm_fpu_abi, "softfp") == 0) {
|
||||
return false;
|
||||
} else if (strcmp(arm_fpu_abi, "auto") == 0) {
|
||||
#ifdef __arm__
|
||||
return base::OS::ArmUsingHardFloat();
|
||||
#elif USE_EABI_HARDFLOAT
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void CpuFeatures::PrintTarget() {
|
||||
const char* arm_arch = nullptr;
|
||||
const char* arm_target_type = "";
|
||||
|
|
@ -302,13 +320,7 @@ void CpuFeatures::PrintTarget() {
|
|||
arm_fpu = " vfp2";
|
||||
#endif
|
||||
|
||||
#ifdef __arm__
|
||||
arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
|
||||
#elif USE_EABI_HARDFLOAT
|
||||
arm_float_abi = "hard";
|
||||
#else
|
||||
arm_float_abi = "softfp";
|
||||
#endif
|
||||
arm_float_abi = IsEabiHardFloat() ? "hard" : "softfp";
|
||||
|
||||
#if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
|
||||
arm_thumb = " thumb";
|
||||
|
|
@ -323,13 +335,7 @@ void CpuFeatures::PrintFeatures() {
|
|||
CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
|
||||
CpuFeatures::IsSupported(VFPv3), CpuFeatures::IsSupported(VFP32DREGS),
|
||||
CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV));
|
||||
#ifdef __arm__
|
||||
bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
|
||||
#elif USE_EABI_HARDFLOAT
|
||||
bool eabi_hardfloat = true;
|
||||
#else
|
||||
bool eabi_hardfloat = false;
|
||||
#endif
|
||||
bool eabi_hardfloat = IsEabiHardFloat();
|
||||
printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
|
||||
}
|
||||
|
||||
|
|
@ -519,7 +525,8 @@ Assembler::Assembler(const AssemblerOptions& options,
|
|||
: AssemblerBase(options, std::move(buffer)),
|
||||
pending_32_bit_constants_(),
|
||||
scratch_register_list_(DefaultTmpList()),
|
||||
scratch_vfp_register_list_(DefaultFPTmpList()) {
|
||||
scratch_vfp_register_list_(DefaultFPTmpList()),
|
||||
use_eabi_hardfloat_(IsEabiHardFloat()) {
|
||||
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
|
||||
constant_pool_deadline_ = kMaxInt;
|
||||
const_pool_blocked_nesting_ = 0;
|
||||
|
|
|
|||
7
deps/v8/src/codegen/arm/assembler-arm.h
vendored
7
deps/v8/src/codegen/arm/assembler-arm.h
vendored
|
|
@ -49,7 +49,7 @@
|
|||
#include "src/codegen/arm/constants-arm.h"
|
||||
#include "src/codegen/arm/register-arm.h"
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/constant-pool.h"
|
||||
#include "src/codegen/constant-pool-entry.h"
|
||||
#include "src/codegen/machine-type.h"
|
||||
#include "src/utils/boxed-float.h"
|
||||
namespace v8 {
|
||||
|
|
@ -337,6 +337,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
|
||||
}
|
||||
|
||||
// EABI variant for double arguments in use.
|
||||
bool use_eabi_hardfloat() const { return use_eabi_hardfloat_; }
|
||||
|
||||
// Label operations & relative jumps (PPUM Appendix D)
|
||||
//
|
||||
// Takes a branch opcode (cc) and a label (L) and generates
|
||||
|
|
@ -1335,6 +1338,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||
// The bound position, before this we cannot do instruction elimination.
|
||||
int last_bound_pos_;
|
||||
|
||||
const bool use_eabi_hardfloat_;
|
||||
|
||||
V8_INLINE void CheckBuffer();
|
||||
void GrowBuffer();
|
||||
|
||||
|
|
|
|||
73
deps/v8/src/codegen/arm/macro-assembler-arm.cc
vendored
73
deps/v8/src/codegen/arm/macro-assembler-arm.cc
vendored
|
|
@ -903,6 +903,24 @@ void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
|
|||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::CallVerifySkippedWriteBarrierStubSaveRegisters(
|
||||
Register object, Register value, SaveFPRegsMode fp_mode) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(kCallerSaved.has(kCArgRegs[0]));
|
||||
DCHECK(kCallerSaved.has(kCArgRegs[1]));
|
||||
PushCallerSaved(fp_mode);
|
||||
CallVerifySkippedWriteBarrierStub(object, value);
|
||||
PopCallerSaved(fp_mode);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallVerifySkippedWriteBarrierStub(Register object,
|
||||
Register value) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
MovePair(kCArgRegs[0], object, kCArgRegs[1], value);
|
||||
PrepareCallCFunction(2);
|
||||
CallCFunction(ExternalReference::verify_skipped_write_barrier(), 2);
|
||||
}
|
||||
|
||||
void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
|
||||
Register object, Operand offset) {
|
||||
DCHECK_NE(dst_object, dst_slot);
|
||||
|
|
@ -2966,6 +2984,61 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
|
|||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void MacroAssembler::PreCheckSkippedWriteBarrier(Register object,
|
||||
Register value,
|
||||
Register scratch, Label* ok) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(object, scratch));
|
||||
DCHECK(!AreAliased(value, scratch));
|
||||
|
||||
// The most common case: Static write barrier elimination is allowed on the
|
||||
// last young allocation.
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch1 = temps.Acquire();
|
||||
sub(scratch, object, Operand(kHeapObjectTag));
|
||||
ldr(scratch1,
|
||||
MemOperand(kRootRegister, IsolateData::last_young_allocation_offset()));
|
||||
cmp(scratch, scratch1);
|
||||
b(Condition::kEqual, ok);
|
||||
}
|
||||
|
||||
// Write barier can also be removed if value is in read-only space.
|
||||
CheckPageFlag(value, scratch, MemoryChunk::kIsInReadOnlyHeapMask, ne, ok);
|
||||
|
||||
Label not_ok;
|
||||
|
||||
// Handle allocation folding, allow WB removal if:
|
||||
// LAB start <= last_young_allocation_ < (object address+1) < LAB top
|
||||
// Note that object has tag bit set, so object == object address+1.
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch1 = temps.Acquire();
|
||||
|
||||
// Check LAB start <= last_young_allocation_.
|
||||
ldr(scratch, MemOperand(kRootRegister,
|
||||
IsolateData::new_allocation_info_start_offset()));
|
||||
ldr(scratch1,
|
||||
MemOperand(kRootRegister, IsolateData::last_young_allocation_offset()));
|
||||
cmp(scratch, scratch1);
|
||||
b(Condition::kUnsignedGreaterThan, ¬_ok);
|
||||
|
||||
// Check last_young_allocation_ < (object address+1).
|
||||
cmp(scratch1, object);
|
||||
b(Condition::kUnsignedGreaterThanEqual, ¬_ok);
|
||||
|
||||
// Check (object address+1) < LAB top.
|
||||
ldr(scratch, MemOperand(kRootRegister,
|
||||
IsolateData::new_allocation_info_top_offset()));
|
||||
cmp(object, scratch);
|
||||
b(Condition::kUnsignedLessThan, ok);
|
||||
}
|
||||
|
||||
// Slow path: Potentially check more cases in C++.
|
||||
bind(¬_ok);
|
||||
}
|
||||
|
||||
void MacroAssembler::ComputeCodeStartAddress(Register dst) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
// We can use the register pc - 8 for the address of the current instruction.
|
||||
|
|
|
|||
26
deps/v8/src/codegen/arm/macro-assembler-arm.h
vendored
26
deps/v8/src/codegen/arm/macro-assembler-arm.h
vendored
|
|
@ -343,9 +343,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
#endif // V8_ENABLE_LEAPTIERING
|
||||
|
||||
// Load the code entry point from the Code object.
|
||||
void LoadCodeInstructionStart(
|
||||
Register destination, Register code_object,
|
||||
CodeEntrypointTag tag = kDefaultCodeEntrypointTag);
|
||||
void LoadCodeInstructionStart(Register destination, Register code_object,
|
||||
CodeEntrypointTag tag = kInvalidEntrypointTag);
|
||||
void CallCodeObject(Register code_object);
|
||||
void JumpCodeObject(Register code_object,
|
||||
JumpMode jump_mode = JumpMode::kJump);
|
||||
|
|
@ -418,6 +417,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
CheckPageFlag(object, mask, cc, condition_met);
|
||||
}
|
||||
|
||||
void PreCheckSkippedWriteBarrier(Register object, Register value,
|
||||
Register scratch, Label* ok);
|
||||
|
||||
// Check whether d16-d31 are available on the CPU. The result is given by the
|
||||
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
|
||||
void CheckFor32DRegs(Register scratch);
|
||||
|
|
@ -435,6 +437,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
Register object, Register slot_address, SaveFPRegsMode fp_mode,
|
||||
StubCallMode mode = StubCallMode::kCallBuiltinPointer);
|
||||
|
||||
void CallVerifySkippedWriteBarrierStubSaveRegisters(Register object,
|
||||
Register value,
|
||||
SaveFPRegsMode fp_mode);
|
||||
void CallVerifySkippedWriteBarrierStub(Register object, Register value);
|
||||
|
||||
// For a given |object| and |offset|:
|
||||
// - Move |object| to |dst_object|.
|
||||
// - Compute the address of the slot pointed to by |offset| in |object| and
|
||||
|
|
@ -725,17 +732,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
|
||||
DwVfpRegister double_input, StubCallMode stub_mode);
|
||||
|
||||
// EABI variant for double arguments in use.
|
||||
bool use_eabi_hardfloat() {
|
||||
#ifdef __arm__
|
||||
return base::OS::ArmUsingHardFloat();
|
||||
#elif USE_EABI_HARDFLOAT
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Compute the start of the generated instruction stream from the current PC.
|
||||
// This is an alternative to embedding the {CodeObject} handle as a reference.
|
||||
void ComputeCodeStartAddress(Register dst);
|
||||
|
|
@ -776,6 +772,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
// ---------------------------------------------------------------------------
|
||||
// GC Support
|
||||
|
||||
void MaybeJumpIfReadOnlyOrSmallSmi(Register, Label*) {}
|
||||
|
||||
// Notify the garbage collector that we wrote a pointer into an object.
|
||||
// |object| is the object being stored into, |value| is the object being
|
||||
// stored.
|
||||
|
|
|
|||
261
deps/v8/src/codegen/arm64/constant-pool-arm64.cc
vendored
Normal file
261
deps/v8/src/codegen/arm64/constant-pool-arm64.cc
vendored
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/codegen/assembler-arch.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
#include "src/codegen/constant-pool.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Constant Pool.
|
||||
|
||||
ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
|
||||
ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
|
||||
RelocInfo::Mode rmode) {
|
||||
ConstantPoolKey key(data, rmode);
|
||||
CHECK(key.is_value32());
|
||||
return RecordKey(std::move(key), assm_->pc_offset());
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
|
||||
RelocInfo::Mode rmode) {
|
||||
ConstantPoolKey key(data, rmode);
|
||||
CHECK(!key.is_value32());
|
||||
return RecordKey(std::move(key), assm_->pc_offset());
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
|
||||
RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
|
||||
if (write_reloc_info == RelocInfoStatus::kMustRecord) {
|
||||
if (key.is_value32()) {
|
||||
if (entry32_count_ == 0) first_use_32_ = offset;
|
||||
++entry32_count_;
|
||||
} else {
|
||||
if (entry64_count_ == 0) first_use_64_ = offset;
|
||||
++entry64_count_;
|
||||
}
|
||||
}
|
||||
entries_.insert(std::make_pair(key, offset));
|
||||
|
||||
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
|
||||
// Request constant pool emission after the next instruction.
|
||||
SetNextCheckIn(1);
|
||||
}
|
||||
|
||||
return write_reloc_info;
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
|
||||
const ConstantPoolKey& key) {
|
||||
if (key.AllowsDeduplication()) {
|
||||
auto existing = entries_.find(key);
|
||||
if (existing != entries_.end()) {
|
||||
return RelocInfoStatus::kMustOmitForDuplicate;
|
||||
}
|
||||
}
|
||||
return RelocInfoStatus::kMustRecord;
|
||||
}
|
||||
|
||||
void ConstantPool::EmitAndClear(Jump require_jump) {
|
||||
DCHECK(!IsBlocked());
|
||||
// Prevent recursive pool emission.
|
||||
Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
|
||||
Alignment require_alignment =
|
||||
IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
|
||||
int size = ComputeSize(require_jump, require_alignment);
|
||||
Label size_check;
|
||||
assm_->bind(&size_check);
|
||||
assm_->RecordConstPool(size);
|
||||
|
||||
// Emit the constant pool. It is preceded by an optional branch if
|
||||
// {require_jump} and a header which will:
|
||||
// 1) Encode the size of the constant pool, for use by the disassembler.
|
||||
// 2) Terminate the program, to try to prevent execution from accidentally
|
||||
// flowing into the constant pool.
|
||||
// 3) align the 64bit pool entries to 64-bit.
|
||||
// TODO(all): Make the alignment part less fragile. Currently code is
|
||||
// allocated as a byte array so there are no guarantees the alignment will
|
||||
// be preserved on compaction. Currently it works as allocation seems to be
|
||||
// 64-bit aligned.
|
||||
|
||||
Label after_pool;
|
||||
if (require_jump == Jump::kRequired) assm_->b(&after_pool);
|
||||
|
||||
assm_->RecordComment("[ Constant Pool");
|
||||
EmitPrologue(require_alignment);
|
||||
if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
|
||||
EmitEntries();
|
||||
assm_->RecordComment("]");
|
||||
|
||||
if (after_pool.is_linked()) assm_->bind(&after_pool);
|
||||
|
||||
DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
|
||||
Clear();
|
||||
}
|
||||
|
||||
void ConstantPool::Clear() {
|
||||
entries_.clear();
|
||||
first_use_32_ = -1;
|
||||
first_use_64_ = -1;
|
||||
entry32_count_ = 0;
|
||||
entry64_count_ = 0;
|
||||
next_check_ = 0;
|
||||
old_next_check_ = 0;
|
||||
}
|
||||
|
||||
void ConstantPool::StartBlock() {
|
||||
if (blocked_nesting_ == 0) {
|
||||
// Prevent constant pool checks from happening by setting the next check to
|
||||
// the biggest possible offset.
|
||||
old_next_check_ = next_check_;
|
||||
next_check_ = kMaxInt;
|
||||
}
|
||||
++blocked_nesting_;
|
||||
}
|
||||
|
||||
void ConstantPool::EndBlock() {
|
||||
--blocked_nesting_;
|
||||
if (blocked_nesting_ == 0) {
|
||||
DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
|
||||
// Restore the old next_check_ value if it's less than the current
|
||||
// next_check_. This accounts for any attempt to emit pools sooner whilst
|
||||
// pools were blocked.
|
||||
next_check_ = std::min(next_check_, old_next_check_);
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
|
||||
|
||||
void ConstantPool::SetNextCheckIn(size_t instructions) {
|
||||
next_check_ =
|
||||
assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
|
||||
}
|
||||
|
||||
void ConstantPool::EmitEntries() {
|
||||
for (auto iter = entries_.begin(); iter != entries_.end();) {
|
||||
DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
|
||||
auto range = entries_.equal_range(iter->first);
|
||||
bool shared = iter->first.AllowsDeduplication();
|
||||
for (auto it = range.first; it != range.second; ++it) {
|
||||
SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
|
||||
if (!shared) Emit(it->first);
|
||||
}
|
||||
if (shared) Emit(iter->first);
|
||||
iter = range.second;
|
||||
}
|
||||
}
|
||||
|
||||
void ConstantPool::Emit(const ConstantPoolKey& key) {
|
||||
if (key.is_value32()) {
|
||||
assm_->dd(key.value32());
|
||||
} else {
|
||||
assm_->dq(key.value64());
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
|
||||
if (IsEmpty()) return false;
|
||||
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
|
||||
return true;
|
||||
}
|
||||
// We compute {dist32/64}, i.e. the distance from the first instruction
|
||||
// accessing a 32bit/64bit entry in the constant pool to any of the
|
||||
// 32bit/64bit constant pool entries, respectively. This is required because
|
||||
// we do not guarantee that entries are emitted in order of reference, i.e. it
|
||||
// is possible that the entry with the earliest reference is emitted last.
|
||||
// The constant pool should be emitted if either of the following is true:
|
||||
// (A) {dist32/64} will be out of range at the next check in.
|
||||
// (B) Emission can be done behind an unconditional branch and {dist32/64}
|
||||
// exceeds {kOpportunityDist*}.
|
||||
// (C) {dist32/64} exceeds the desired approximate distance to the pool.
|
||||
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
|
||||
size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
|
||||
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
|
||||
if (Entry64Count() != 0) {
|
||||
// The 64-bit constants are always emitted before the 32-bit constants, so
|
||||
// we subtract the size of the 32-bit constants from {size}.
|
||||
size_t dist64 = pool_end_64 - first_use_64_;
|
||||
bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
|
||||
bool opportune_emission_without_jump =
|
||||
require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
|
||||
bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
|
||||
if (next_check_too_late || opportune_emission_without_jump ||
|
||||
approximate_distance_exceeded) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (Entry32Count() != 0) {
|
||||
size_t dist32 = pool_end_32 - first_use_32_;
|
||||
bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
|
||||
bool opportune_emission_without_jump =
|
||||
require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
|
||||
bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
|
||||
if (next_check_too_late || opportune_emission_without_jump ||
|
||||
approximate_distance_exceeded) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int ConstantPool::ComputeSize(Jump require_jump,
|
||||
Alignment require_alignment) const {
|
||||
int size_up_to_marker = PrologueSize(require_jump);
|
||||
int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
|
||||
size_t size_after_marker =
|
||||
Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
|
||||
return size_up_to_marker + static_cast<int>(size_after_marker);
|
||||
}
|
||||
|
||||
Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
|
||||
int pc_offset) const {
|
||||
int size_up_to_marker = PrologueSize(require_jump);
|
||||
if (Entry64Count() != 0 &&
|
||||
!IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
|
||||
return Alignment::kRequired;
|
||||
}
|
||||
return Alignment::kOmitted;
|
||||
}
|
||||
|
||||
bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
|
||||
// Check that all entries are in range if the pool is emitted at {pc_offset}.
|
||||
// This ignores kPcLoadDelta (conservatively, since all offsets are positive),
|
||||
// and over-estimates the last entry's address with the pool's end.
|
||||
Alignment require_alignment =
|
||||
IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
|
||||
size_t pool_end_32 =
|
||||
pc_offset + ComputeSize(Jump::kRequired, require_alignment);
|
||||
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
|
||||
bool entries_in_range_32 =
|
||||
Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
|
||||
bool entries_in_range_64 =
|
||||
Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
|
||||
return entries_in_range_32 && entries_in_range_64;
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
|
||||
: pool_(&assm->constpool_) {
|
||||
pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
|
||||
pool_->StartBlock();
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
|
||||
: pool_(&assm->constpool_) {
|
||||
DCHECK_EQ(check, PoolEmissionCheck::kSkip);
|
||||
pool_->StartBlock();
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
|
||||
|
||||
void ConstantPool::MaybeCheck() {
|
||||
if (assm_->pc_offset() >= next_check_) {
|
||||
Check(Emission::kIfNeeded, Jump::kRequired);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
204
deps/v8/src/codegen/arm64/constant-pool-arm64.h
vendored
Normal file
204
deps/v8/src/codegen/arm64/constant-pool-arm64.h
vendored
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_ARM64_CONSTANT_POOL_ARM64_H_
|
||||
#define V8_CODEGEN_ARM64_CONSTANT_POOL_ARM64_H_
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "src/base/numbers/double.h"
|
||||
#include "src/codegen/label.h"
|
||||
#include "src/codegen/reloc-info.h"
|
||||
#include "src/common/globals.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Instruction;
|
||||
|
||||
class ConstantPoolKey {
|
||||
public:
|
||||
explicit ConstantPoolKey(uint64_t value,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: is_value32_(false), value64_(value), rmode_(rmode) {}
|
||||
|
||||
explicit ConstantPoolKey(uint32_t value,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: is_value32_(true), value32_(value), rmode_(rmode) {}
|
||||
|
||||
uint64_t value64() const {
|
||||
CHECK(!is_value32_);
|
||||
return value64_;
|
||||
}
|
||||
uint32_t value32() const {
|
||||
CHECK(is_value32_);
|
||||
return value32_;
|
||||
}
|
||||
|
||||
bool is_value32() const { return is_value32_; }
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
|
||||
bool AllowsDeduplication() const {
|
||||
DCHECK(rmode_ != RelocInfo::CONST_POOL &&
|
||||
rmode_ != RelocInfo::VENEER_POOL &&
|
||||
rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
|
||||
rmode_ != RelocInfo::DEOPT_INLINING_ID &&
|
||||
rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID &&
|
||||
rmode_ != RelocInfo::DEOPT_NODE_ID);
|
||||
// CODE_TARGETs can be shared because they aren't patched anymore,
|
||||
// and we make sure we emit only one reloc info for them (thus delta
|
||||
// patching) will apply the delta only once. At the moment, we do not dedup
|
||||
// code targets if they are wrapped in a heap object request (value == 0).
|
||||
bool is_sharable_code_target =
|
||||
rmode_ == RelocInfo::CODE_TARGET &&
|
||||
(is_value32() ? (value32() != 0) : (value64() != 0));
|
||||
bool is_sharable_embedded_object = RelocInfo::IsEmbeddedObjectMode(rmode_);
|
||||
return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target ||
|
||||
is_sharable_embedded_object;
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_value32_;
|
||||
union {
|
||||
uint64_t value64_;
|
||||
uint32_t value32_;
|
||||
};
|
||||
RelocInfo::Mode rmode_;
|
||||
};
|
||||
|
||||
// Order for pool entries. 64bit entries go first.
|
||||
inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
|
||||
if (a.is_value32() < b.is_value32()) return true;
|
||||
if (a.is_value32() > b.is_value32()) return false;
|
||||
if (a.rmode() < b.rmode()) return true;
|
||||
if (a.rmode() > b.rmode()) return false;
|
||||
if (a.is_value32()) return a.value32() < b.value32();
|
||||
return a.value64() < b.value64();
|
||||
}
|
||||
|
||||
inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
|
||||
if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
|
||||
return false;
|
||||
}
|
||||
if (a.is_value32()) return a.value32() == b.value32();
|
||||
return a.value64() == b.value64();
|
||||
}
|
||||
|
||||
// Constant pool generation
|
||||
enum class Jump { kOmitted, kRequired };
|
||||
enum class Emission { kIfNeeded, kForced };
|
||||
enum class Alignment { kOmitted, kRequired };
|
||||
enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
|
||||
enum class PoolEmissionCheck { kSkip };
|
||||
|
||||
// Pools are emitted in the instruction stream, preferably after unconditional
|
||||
// jumps or after returns from functions (in dead code locations).
|
||||
// If a long code sequence does not contain unconditional jumps, it is
|
||||
// necessary to emit the constant pool before the pool gets too far from the
|
||||
// location it is accessed from. In this case, we emit a jump over the emitted
|
||||
// constant pool.
|
||||
// Constants in the pool may be addresses of functions that gets relocated;
|
||||
// if so, a relocation info entry is associated to the constant pool entry.
|
||||
class ConstantPool {
|
||||
public:
|
||||
explicit ConstantPool(Assembler* assm);
|
||||
~ConstantPool();
|
||||
|
||||
// Returns true when we need to write RelocInfo and false when we do not.
|
||||
RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
|
||||
RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
|
||||
|
||||
size_t Entry32Count() const { return entry32_count_; }
|
||||
size_t Entry64Count() const { return entry64_count_; }
|
||||
bool IsEmpty() const { return entries_.empty(); }
|
||||
// Check if pool will be out of range at {pc_offset}.
|
||||
bool IsInImmRangeIfEmittedAt(int pc_offset);
|
||||
// Size in bytes of the constant pool. Depending on parameters, the size will
|
||||
// include the branch over the pool and alignment padding.
|
||||
int ComputeSize(Jump require_jump, Alignment require_alignment) const;
|
||||
|
||||
// Emit the pool at the current pc with a branch over the pool if requested.
|
||||
void EmitAndClear(Jump require);
|
||||
bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
|
||||
V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
|
||||
size_t margin = 0);
|
||||
|
||||
V8_EXPORT_PRIVATE void MaybeCheck();
|
||||
void Clear();
|
||||
|
||||
// Constant pool emission can be blocked temporarily.
|
||||
bool IsBlocked() const;
|
||||
|
||||
// Repeated checking whether the constant pool should be emitted is expensive;
|
||||
// only check once a number of instructions have been generated.
|
||||
void SetNextCheckIn(size_t instructions);
|
||||
|
||||
// Class for scoping postponing the constant pool generation.
|
||||
class V8_EXPORT_PRIVATE V8_NODISCARD BlockScope {
|
||||
public:
|
||||
// BlockScope immediatelly emits the pool if necessary to ensure that
|
||||
// during the block scope at least {margin} bytes can be emitted without
|
||||
// pool emission becomming necessary.
|
||||
explicit BlockScope(Assembler* pool, size_t margin = 0);
|
||||
BlockScope(Assembler* pool, PoolEmissionCheck);
|
||||
~BlockScope();
|
||||
|
||||
private:
|
||||
ConstantPool* pool_;
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
|
||||
};
|
||||
|
||||
// Hard limit to the const pool which must not be exceeded.
|
||||
static const size_t kMaxDistToPool32;
|
||||
static const size_t kMaxDistToPool64;
|
||||
// Approximate distance where the pool should be emitted.
|
||||
static const size_t kApproxDistToPool32;
|
||||
V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
|
||||
// Approximate distance where the pool may be emitted if
|
||||
// no jump is required (due to a recent unconditional jump).
|
||||
static const size_t kOpportunityDistToPool32;
|
||||
static const size_t kOpportunityDistToPool64;
|
||||
// PC distance between constant pool checks.
|
||||
V8_EXPORT_PRIVATE static const size_t kCheckInterval;
|
||||
// Number of entries in the pool which trigger a check.
|
||||
static const size_t kApproxMaxEntryCount;
|
||||
|
||||
private:
|
||||
void StartBlock();
|
||||
void EndBlock();
|
||||
|
||||
void EmitEntries();
|
||||
void EmitPrologue(Alignment require_alignment);
|
||||
int PrologueSize(Jump require_jump) const;
|
||||
RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
|
||||
RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
|
||||
void Emit(const ConstantPoolKey& key);
|
||||
void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
|
||||
const ConstantPoolKey& key);
|
||||
Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
|
||||
int pc_offset) const;
|
||||
|
||||
Assembler* assm_;
|
||||
// Keep track of the first instruction requiring a constant pool entry
|
||||
// since the previous constant pool was emitted.
|
||||
int first_use_32_ = -1;
|
||||
int first_use_64_ = -1;
|
||||
// We sort not according to insertion order, but since we do not insert
|
||||
// addresses (for heap objects we insert an index which is created in
|
||||
// increasing order), the order is deterministic. We map each entry to the
|
||||
// pc offset of the load. We use a multimap because we need to record the
|
||||
// pc offset of each load of the same constant so that the immediate of the
|
||||
// loads can be back-patched when the pool is emitted.
|
||||
std::multimap<ConstantPoolKey, int> entries_;
|
||||
size_t entry32_count_ = 0;
|
||||
size_t entry64_count_ = 0;
|
||||
int next_check_ = 0;
|
||||
int old_next_check_ = 0;
|
||||
int blocked_nesting_ = 0;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_ARM64_CONSTANT_POOL_ARM64_H_
|
||||
142
deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
vendored
142
deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
vendored
|
|
@ -2676,8 +2676,20 @@ void MacroAssembler::ResolveWasmCodePointer(Register target,
|
|||
Register scratch = temps.AcquireX();
|
||||
Mov(scratch, global_jump_table);
|
||||
#ifdef V8_ENABLE_SANDBOX
|
||||
static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 16);
|
||||
Add(target, scratch, Operand(target, LSL, 4));
|
||||
// Mask `target` to be within [0, WasmCodePointerTable::kMaxWasmCodePointers).
|
||||
static_assert(wasm::WasmCodePointerTable::kMaxWasmCodePointers <
|
||||
(kMaxUInt32 / sizeof(wasm::WasmCodePointerTableEntry)));
|
||||
static_assert(base::bits::IsPowerOfTwo(
|
||||
wasm::WasmCodePointerTable::kMaxWasmCodePointers));
|
||||
And(target.W(), target.W(),
|
||||
wasm::WasmCodePointerTable::kMaxWasmCodePointers - 1);
|
||||
|
||||
// Shift to multiply by `sizeof(WasmCodePointerTableEntry)`.
|
||||
Add(target, scratch,
|
||||
Operand(target, LSL,
|
||||
base::bits::WhichPowerOfTwo(
|
||||
sizeof(wasm::WasmCodePointerTableEntry))));
|
||||
|
||||
Ldr(scratch,
|
||||
MemOperand(target, wasm::WasmCodePointerTable::kOffsetOfSignatureHash));
|
||||
bool has_second_tmp = temps.CanAcquire();
|
||||
|
|
@ -2716,9 +2728,21 @@ void MacroAssembler::CallWasmCodePointerNoSignatureCheck(Register target) {
|
|||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireX();
|
||||
Mov(scratch, global_jump_table);
|
||||
constexpr unsigned int kEntrySizeLog2 =
|
||||
std::bit_width(sizeof(wasm::WasmCodePointerTableEntry)) - 1;
|
||||
Add(target, scratch, Operand(target, LSL, kEntrySizeLog2));
|
||||
|
||||
// Mask `target` to be within [0, WasmCodePointerTable::kMaxWasmCodePointers).
|
||||
static_assert(wasm::WasmCodePointerTable::kMaxWasmCodePointers <
|
||||
(kMaxUInt32 / sizeof(wasm::WasmCodePointerTableEntry)));
|
||||
static_assert(base::bits::IsPowerOfTwo(
|
||||
wasm::WasmCodePointerTable::kMaxWasmCodePointers));
|
||||
And(target.W(), target.W(),
|
||||
wasm::WasmCodePointerTable::kMaxWasmCodePointers - 1);
|
||||
|
||||
// Shift to multiply by `sizeof(WasmCodePointerTableEntry)`.
|
||||
Add(target, scratch,
|
||||
Operand(target, LSL,
|
||||
base::bits::WhichPowerOfTwo(
|
||||
sizeof(wasm::WasmCodePointerTableEntry))));
|
||||
|
||||
Ldr(target, MemOperand(target));
|
||||
|
||||
Call(target);
|
||||
|
|
@ -3895,6 +3919,70 @@ void MacroAssembler::JumpIfNotMarking(Label* not_marking,
|
|||
Cbz(scratch, not_marking);
|
||||
}
|
||||
|
||||
void MacroAssembler::PreCheckSkippedWriteBarrier(Register object,
|
||||
Register value,
|
||||
Register scratch, Label* ok) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(object, scratch));
|
||||
DCHECK(!AreAliased(value, scratch));
|
||||
|
||||
// The most common case: Static write barrier elimination is allowed on the
|
||||
// last young allocation.
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch1 = temps.AcquireX();
|
||||
sub(scratch, object, kHeapObjectTag);
|
||||
Ldr(scratch1,
|
||||
MemOperand(kRootRegister, IsolateData::last_young_allocation_offset()));
|
||||
cmp(scratch, scratch1);
|
||||
B(Condition::kEqual, ok);
|
||||
}
|
||||
|
||||
// Write barier can also be removed if value is in read-only space.
|
||||
CheckPageFlag(value, scratch, MemoryChunk::kIsInReadOnlyHeapMask, ne, ok);
|
||||
|
||||
Label not_ok;
|
||||
|
||||
// Handle allocation folding, allow WB removal if:
|
||||
// LAB start <= last_young_allocation_ < (object address+1) < LAB top
|
||||
// Note that object has tag bit set, so object == object address+1.
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch1 = temps.AcquireX();
|
||||
|
||||
// Check LAB start <= last_young_allocation_.
|
||||
ldr(scratch, MemOperand(kRootRegister,
|
||||
IsolateData::new_allocation_info_start_offset()));
|
||||
ldr(scratch1,
|
||||
MemOperand(kRootRegister, IsolateData::last_young_allocation_offset()));
|
||||
cmp(scratch, scratch1);
|
||||
B(Condition::kUnsignedGreaterThan, ¬_ok);
|
||||
|
||||
// Check last_young_allocation_ < (object address+1).
|
||||
cmp(scratch1, object);
|
||||
B(Condition::kUnsignedGreaterThanEqual, ¬_ok);
|
||||
|
||||
// Check (object address+1) < LAB top.
|
||||
ldr(scratch, MemOperand(kRootRegister,
|
||||
IsolateData::new_allocation_info_top_offset()));
|
||||
cmp(object, scratch);
|
||||
B(Condition::kUnsignedLessThan, ok);
|
||||
}
|
||||
|
||||
// Slow path: Potentially check more cases in C++.
|
||||
bind(¬_ok);
|
||||
}
|
||||
|
||||
void MacroAssembler::MaybeJumpIfReadOnlyOrSmallSmi(Register value,
|
||||
Label* dest) {
|
||||
#if V8_STATIC_ROOTS_BOOL
|
||||
// Quick check for Read-only and small Smi values.
|
||||
static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize);
|
||||
JumpIfUnsignedLessThan(value, kRegularPageSize, dest);
|
||||
#endif // V8_STATIC_ROOTS_BOOL
|
||||
}
|
||||
|
||||
void MacroAssembler::RecordWriteField(
|
||||
Register object, int offset, Register value, LinkRegisterStatus lr_status,
|
||||
SaveFPRegsMode save_fp, SmiCheck smi_check, ReadOnlyCheck ro_check,
|
||||
|
|
@ -3905,13 +3993,9 @@ void MacroAssembler::RecordWriteField(
|
|||
// catch stores of Smis and read-only objects.
|
||||
Label done;
|
||||
|
||||
#if V8_STATIC_ROOTS_BOOL
|
||||
if (ro_check == ReadOnlyCheck::kInline) {
|
||||
// Quick check for Read-only and small Smi values.
|
||||
static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize);
|
||||
JumpIfUnsignedLessThan(value, kRegularPageSize, &done);
|
||||
MaybeJumpIfReadOnlyOrSmallSmi(value, &done);
|
||||
}
|
||||
#endif // V8_STATIC_ROOTS_BOOL
|
||||
|
||||
// Skip the barrier if writing a smi.
|
||||
if (smi_check == SmiCheck::kInline) {
|
||||
|
|
@ -4398,6 +4482,38 @@ void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
|
|||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::CallVerifySkippedWriteBarrierStubSaveRegisters(
|
||||
Register object, Register value, SaveFPRegsMode fp_mode) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
PushCallerSaved(fp_mode);
|
||||
CallVerifySkippedWriteBarrierStub(object, value);
|
||||
PopCallerSaved(fp_mode);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallVerifySkippedWriteBarrierStub(Register object,
|
||||
Register value) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
MovePair(kCArgRegs[0], object, kCArgRegs[1], value);
|
||||
CallCFunction(ExternalReference::verify_skipped_write_barrier(), 2,
|
||||
SetIsolateDataSlots::kNo);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallVerifySkippedIndirectWriteBarrierStubSaveRegisters(
|
||||
Register object, Register value, SaveFPRegsMode fp_mode) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
PushCallerSaved(fp_mode);
|
||||
CallVerifySkippedIndirectWriteBarrierStub(object, value);
|
||||
PopCallerSaved(fp_mode);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallVerifySkippedIndirectWriteBarrierStub(Register object,
|
||||
Register value) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
MovePair(kCArgRegs[0], object, kCArgRegs[1], value);
|
||||
CallCFunction(ExternalReference::verify_skipped_indirect_write_barrier(), 2,
|
||||
SetIsolateDataSlots::kNo);
|
||||
}
|
||||
|
||||
void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
|
||||
Register object, Operand offset) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
|
|
@ -4470,13 +4586,9 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
|
|||
// young generation.
|
||||
Label done;
|
||||
|
||||
#if V8_STATIC_ROOTS_BOOL
|
||||
if (ro_check == ReadOnlyCheck::kInline) {
|
||||
// Quick check for Read-only and small Smi values.
|
||||
static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize);
|
||||
JumpIfUnsignedLessThan(value, kRegularPageSize, &done);
|
||||
MaybeJumpIfReadOnlyOrSmallSmi(value, &done);
|
||||
}
|
||||
#endif // V8_STATIC_ROOTS_BOOL
|
||||
|
||||
if (smi_check == SmiCheck::kInline) {
|
||||
DCHECK_EQ(0, kSmiTag);
|
||||
|
|
|
|||
|
|
@ -955,6 +955,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
Register object, Register slot_address, SaveFPRegsMode fp_mode,
|
||||
StubCallMode mode = StubCallMode::kCallBuiltinPointer);
|
||||
|
||||
void CallVerifySkippedWriteBarrierStubSaveRegisters(Register object,
|
||||
Register value,
|
||||
SaveFPRegsMode fp_mode);
|
||||
void CallVerifySkippedWriteBarrierStub(Register object, Register value);
|
||||
|
||||
void CallVerifySkippedIndirectWriteBarrierStubSaveRegisters(
|
||||
Register object, Register value, SaveFPRegsMode fp_mode);
|
||||
void CallVerifySkippedIndirectWriteBarrierStub(Register object,
|
||||
Register value);
|
||||
|
||||
// For a given |object| and |offset|:
|
||||
// - Move |object| to |dst_object|.
|
||||
// - Compute the address of the slot pointed to by |offset| in |object| and
|
||||
|
|
@ -1074,6 +1084,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
void LoadRootRelative(Register destination, int32_t offset) final;
|
||||
void StoreRootRelative(int32_t offset, Register value) final;
|
||||
|
||||
void PreCheckSkippedWriteBarrier(Register object, Register value,
|
||||
Register scratch, Label* ok);
|
||||
|
||||
// Operand pointing to an external reference.
|
||||
// May emit code to set up the scratch register. The operand is
|
||||
// only guaranteed to be correct as long as the scratch register
|
||||
|
|
@ -2360,6 +2373,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
// ---------------------------------------------------------------------------
|
||||
// Garbage collector support (GC).
|
||||
|
||||
// Performs a fast check for whether `value` is a read-only object or a small
|
||||
// Smi. Only enabled in some configurations.
|
||||
void MaybeJumpIfReadOnlyOrSmallSmi(Register value, Label* dest);
|
||||
|
||||
// Notify the garbage collector that we wrote a pointer into an object.
|
||||
// |object| is the object being stored into, |value| is the object being
|
||||
// stored.
|
||||
|
|
|
|||
1
deps/v8/src/codegen/assembler.cc
vendored
1
deps/v8/src/codegen/assembler.cc
vendored
|
|
@ -223,6 +223,7 @@ bool CpuFeatures::supports_cetss_ = false;
|
|||
unsigned CpuFeatures::supported_ = 0;
|
||||
unsigned CpuFeatures::icache_line_size_ = 0;
|
||||
unsigned CpuFeatures::dcache_line_size_ = 0;
|
||||
unsigned CpuFeatures::vlen_ = 0;
|
||||
|
||||
HeapNumberRequest::HeapNumberRequest(double heap_number, int offset)
|
||||
: offset_(offset) {
|
||||
|
|
|
|||
1
deps/v8/src/codegen/bailout-reason.h
vendored
1
deps/v8/src/codegen/bailout-reason.h
vendored
|
|
@ -94,6 +94,7 @@ namespace internal {
|
|||
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
|
||||
V(kUnexpectedValue, "Unexpected value") \
|
||||
V(kUninhabitableType, "Uninhabitable type") \
|
||||
V(kUnreachable, "Unreachable code") \
|
||||
V(kUnsupportedDeopt, \
|
||||
"Lazy deopt after a fast API call with return value is unsupported") \
|
||||
V(kUnsupportedModuleOperation, "Unsupported module operation") \
|
||||
|
|
|
|||
274
deps/v8/src/codegen/code-stub-assembler.cc
vendored
274
deps/v8/src/codegen/code-stub-assembler.cc
vendored
|
|
@ -38,6 +38,7 @@
|
|||
#include "src/objects/property-descriptor-object.h"
|
||||
#include "src/objects/tagged-field.h"
|
||||
#include "src/roots/roots.h"
|
||||
#include "src/runtime/runtime.h"
|
||||
#include "third_party/v8/codegen/fp16-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
|
|
@ -1595,6 +1596,20 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
|
|||
if (v8_flags.sticky_mark_bits && (flags & AllocationFlag::kPretenured)) {
|
||||
CSA_DCHECK(this, IsMarked(result.value()));
|
||||
}
|
||||
if (v8_flags.verify_write_barriers) {
|
||||
TNode<ExternalReference> last_young_allocation_address = ExternalConstant(
|
||||
ExternalReference::last_young_allocation_address(isolate()));
|
||||
|
||||
if (flags & AllocationFlag::kPretenured) {
|
||||
StoreNoWriteBarrier(MachineType::PointerRepresentation(),
|
||||
last_young_allocation_address, IntPtrConstant(0));
|
||||
} else {
|
||||
StoreNoWriteBarrier(MachineType::PointerRepresentation(),
|
||||
last_young_allocation_address,
|
||||
IntPtrSub(BitcastTaggedToWord(result.value()),
|
||||
IntPtrConstant(kHeapObjectTag)));
|
||||
}
|
||||
}
|
||||
return UncheckedCast<HeapObject>(result.value());
|
||||
}
|
||||
|
||||
|
|
@ -1648,36 +1663,12 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
|
|||
}
|
||||
return heap_object;
|
||||
}
|
||||
TNode<ExternalReference> top_address = ExternalConstant(
|
||||
new_space
|
||||
? ExternalReference::new_space_allocation_top_address(isolate())
|
||||
: ExternalReference::old_space_allocation_top_address(isolate()));
|
||||
|
||||
#ifdef DEBUG
|
||||
// New space is optional and if disabled both top and limit return
|
||||
// kNullAddress.
|
||||
if (ExternalReference::new_space_allocation_top_address(isolate())
|
||||
.address() != kNullAddress) {
|
||||
Address raw_top_address =
|
||||
ExternalReference::new_space_allocation_top_address(isolate())
|
||||
.address();
|
||||
Address raw_limit_address =
|
||||
ExternalReference::new_space_allocation_limit_address(isolate())
|
||||
.address();
|
||||
|
||||
CHECK_EQ(kSystemPointerSize, raw_limit_address - raw_top_address);
|
||||
}
|
||||
|
||||
DCHECK_EQ(kSystemPointerSize,
|
||||
ExternalReference::old_space_allocation_limit_address(isolate())
|
||||
.address() -
|
||||
ExternalReference::old_space_allocation_top_address(isolate())
|
||||
.address());
|
||||
#endif
|
||||
|
||||
TNode<IntPtrT> limit_address =
|
||||
IntPtrAdd(ReinterpretCast<IntPtrT>(top_address),
|
||||
IntPtrConstant(kSystemPointerSize));
|
||||
TNode<ExternalReference> top_address =
|
||||
IsolateField(new_space ? IsolateFieldId::kNewAllocationInfoTop
|
||||
: IsolateFieldId::kOldAllocationInfoTop);
|
||||
TNode<ExternalReference> limit_address =
|
||||
IsolateField(new_space ? IsolateFieldId::kNewAllocationInfoLimit
|
||||
: IsolateFieldId::kOldAllocationInfoLimit);
|
||||
|
||||
if (flags & AllocationFlag::kDoubleAlignment) {
|
||||
return AllocateRawDoubleAligned(size_in_bytes, flags,
|
||||
|
|
@ -2052,16 +2043,7 @@ TNode<Code> CodeStubAssembler::LoadCodeObjectFromJSDispatchTable(
|
|||
TNode<UintPtrT> shifted_value;
|
||||
if (JSDispatchEntry::kObjectPointerOffset == 0) {
|
||||
shifted_value =
|
||||
#if defined(__illumos__) && defined(V8_HOST_ARCH_64_BIT)
|
||||
// Pointers in illumos span both the low 2^47 range and the high 2^47 range
|
||||
// as well. Checking the high bit being set in illumos means all higher bits
|
||||
// need to be set to 1 after shifting right.
|
||||
// Use WordSar() so any high-bit check wouldn't be necessary.
|
||||
UncheckedCast<UintPtrT>(WordSar(UncheckedCast<IntPtrT>(value),
|
||||
IntPtrConstant(JSDispatchEntry::kObjectPointerShift)));
|
||||
#else
|
||||
WordShr(value, UintPtrConstant(JSDispatchEntry::kObjectPointerShift));
|
||||
#endif /* __illumos__ and 64-bit */
|
||||
} else {
|
||||
shifted_value = UintPtrAdd(
|
||||
WordShr(value, UintPtrConstant(JSDispatchEntry::kObjectPointerShift)),
|
||||
|
|
@ -3504,7 +3486,7 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
|
|||
|
||||
BIND(&if_holey_double);
|
||||
{
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label if_undefined(this);
|
||||
TNode<Float64T> float_value = LoadFixedDoubleArrayElement(
|
||||
CAST(elements), index, &if_undefined, if_hole);
|
||||
|
|
@ -3520,7 +3502,7 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
|
|||
var_result = AllocateHeapNumberWithValue(
|
||||
LoadFixedDoubleArrayElement(CAST(elements), index, nullptr, if_hole));
|
||||
Goto(&done);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
}
|
||||
|
||||
BIND(&if_dictionary);
|
||||
|
|
@ -3550,7 +3532,7 @@ TNode<BoolT> CodeStubAssembler::IsDoubleHole(TNode<Object> base,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
TNode<BoolT> CodeStubAssembler::IsDoubleUndefined(TNode<Object> base,
|
||||
TNode<IntPtrT> offset) {
|
||||
// TODO(ishell): Compare only the upper part for the hole once the
|
||||
|
|
@ -3576,9 +3558,9 @@ TNode<BoolT> CodeStubAssembler::IsDoubleUndefined(TNode<Float64T> value) {
|
|||
return Word32Equal(bits_upper, Int32Constant(kUndefinedNanUpper32));
|
||||
}
|
||||
}
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
TNode<Float64T> CodeStubAssembler::LoadDoubleWithUndefinedAndHoleCheck(
|
||||
TNode<Object> base, TNode<IntPtrT> offset, Label* if_undefined,
|
||||
Label* if_hole, MachineType machine_type) {
|
||||
|
|
@ -3607,7 +3589,7 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithUndefinedAndHoleCheck(
|
|||
}
|
||||
return UncheckedCast<Float64T>(Load(machine_type, base, offset));
|
||||
}
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|
||||
TNode<ScopeInfo> CodeStubAssembler::LoadScopeInfo(TNode<Context> context) {
|
||||
return CAST(LoadContextElementNoCell(context, Context::SCOPE_INFO_INDEX));
|
||||
|
|
@ -4312,7 +4294,7 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
|
|||
return var_tagged_length.value();
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, Label* bailout,
|
||||
TNode<FixedArrayBase> elements,
|
||||
TNode<BInt> index,
|
||||
|
|
@ -5966,7 +5948,7 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
|
|||
LoopUnrollingMode::kYes);
|
||||
} else {
|
||||
DCHECK_EQ(value_root_index, RootIndex::kUndefinedValue);
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
BuildFastArrayForEach(
|
||||
array, kind, from_index, to_index,
|
||||
[this](TNode<HeapObject> array, TNode<IntPtrT> offset) {
|
||||
|
|
@ -6019,7 +6001,7 @@ void CodeStubAssembler::StoreDoubleHole(TNode<HeapObject> object,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
void CodeStubAssembler::StoreDoubleUndefined(TNode<HeapObject> object,
|
||||
TNode<IntPtrT> offset) {
|
||||
TNode<UintPtrT> double_undefined =
|
||||
|
|
@ -6040,7 +6022,7 @@ void CodeStubAssembler::StoreDoubleUndefined(TNode<HeapObject> object,
|
|||
double_undefined);
|
||||
}
|
||||
}
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|
||||
void CodeStubAssembler::StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
|
||||
TNode<IntPtrT> index) {
|
||||
|
|
@ -6054,7 +6036,7 @@ void CodeStubAssembler::StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
|
|||
StoreDoubleHole(array, offset);
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
template <typename TIndex>
|
||||
requires(std::is_same_v<TIndex, Smi> || std::is_same_v<TIndex, UintPtrT> ||
|
||||
std::is_same_v<TIndex, IntPtrT>)
|
||||
|
|
@ -6074,7 +6056,7 @@ void CodeStubAssembler::StoreFixedDoubleArrayUndefined(
|
|||
template V8_EXPORT_PRIVATE void
|
||||
CodeStubAssembler::StoreFixedDoubleArrayUndefined<Smi>(
|
||||
TNode<FixedDoubleArray>, TNode<Smi>);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|
||||
void CodeStubAssembler::FillFixedArrayWithSmiZero(ElementsKind kind,
|
||||
TNode<FixedArray> array,
|
||||
|
|
@ -6597,7 +6579,7 @@ TNode<Object> CodeStubAssembler::LoadElementAndPrepareForStore(
|
|||
CSA_DCHECK(this, IsFixedArrayWithKind(array, from_kind));
|
||||
DCHECK(!IsDoubleElementsKind(to_kind));
|
||||
if (IsDoubleElementsKind(from_kind)) {
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label if_undefined(this);
|
||||
Label done(this);
|
||||
TVARIABLE(Object, result);
|
||||
|
|
@ -6619,7 +6601,7 @@ TNode<Object> CodeStubAssembler::LoadElementAndPrepareForStore(
|
|||
TNode<Float64T> value = LoadDoubleWithUndefinedAndHoleCheck(
|
||||
array, offset, nullptr, if_hole, MachineType::Float64());
|
||||
return AllocateHeapNumberWithValue(value);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
} else {
|
||||
TNode<Object> value = Load<Object>(array, offset);
|
||||
if (if_hole) {
|
||||
|
|
@ -6798,18 +6780,18 @@ TNode<IntPtrT> CodeStubAssembler::TryTaggedToInt32AsIntPtr(
|
|||
|
||||
TNode<Float64T> CodeStubAssembler::TryTaggedToFloat64(
|
||||
TNode<Object> value,
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label* if_valueisundefined,
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label* if_valueisnotnumber) {
|
||||
return Select<Float64T>(
|
||||
TaggedIsSmi(value), [&]() { return SmiToFloat64(CAST(value)); },
|
||||
[&]() {
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
if (if_valueisundefined) {
|
||||
GotoIf(IsUndefined(value), if_valueisundefined);
|
||||
}
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
GotoIfNot(IsHeapNumber(CAST(value)), if_valueisnotnumber);
|
||||
return LoadHeapNumberValue(CAST(value));
|
||||
});
|
||||
|
|
@ -6832,9 +6814,9 @@ TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
|
|||
// Convert {value} to Float64 if it is a number and convert it to a number
|
||||
// otherwise.
|
||||
var_result = TryTaggedToFloat64(value,
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
nullptr,
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
&if_valueisnotnumber);
|
||||
Goto(&done_loop);
|
||||
|
||||
|
|
@ -14193,7 +14175,7 @@ void CodeStubAssembler::EmitElementStore(
|
|||
if (IsSmiElementsKind(elements_kind)) {
|
||||
GotoIfNot(TaggedIsSmi(value), bailout);
|
||||
} else if (IsDoubleElementsKind(elements_kind)) {
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label float_done(this), is_undefined(this);
|
||||
TVARIABLE(Float64T, float_var);
|
||||
TVARIABLE(BoolT, float_is_undefined_var, BoolConstant(false));
|
||||
|
|
@ -14219,7 +14201,7 @@ void CodeStubAssembler::EmitElementStore(
|
|||
float_is_undefined_value = float_is_undefined_var.value();
|
||||
#else
|
||||
float_value = TryTaggedToFloat64(value, bailout);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
}
|
||||
|
||||
TNode<Smi> smi_length = Select<Smi>(
|
||||
|
|
@ -14267,12 +14249,12 @@ void CodeStubAssembler::EmitElementStore(
|
|||
StoreElement(elements, elements_kind, intptr_key, float_value.value());
|
||||
Goto(&store_done);
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Bind(&store_undefined);
|
||||
StoreFixedDoubleArrayUndefined(
|
||||
TNode<FixedDoubleArray>::UncheckedCast(elements), intptr_key);
|
||||
Goto(&store_done);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|
||||
Bind(&store_done);
|
||||
} else {
|
||||
|
|
@ -14419,8 +14401,8 @@ void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
|
|||
Label no_memento_found(this);
|
||||
Label top_check(this), map_check(this);
|
||||
|
||||
TNode<ExternalReference> new_space_top_address = ExternalConstant(
|
||||
ExternalReference::new_space_allocation_top_address(isolate()));
|
||||
TNode<ExternalReference> new_space_top_address =
|
||||
IsolateField(IsolateFieldId::kNewAllocationInfoTop);
|
||||
const int kMementoMapOffset =
|
||||
ALIGN_TO_ALLOCATION_ALIGNMENT(JSArray::kHeaderSize);
|
||||
const int kMementoLastWordOffset =
|
||||
|
|
@ -17239,7 +17221,7 @@ void CodeStubAssembler::GotoIfNotNumber(TNode<Object> input,
|
|||
BIND(&is_number);
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
void CodeStubAssembler::GotoIfNotNumberOrUndefined(
|
||||
TNode<Object> input, Label* is_not_number_or_undefined) {
|
||||
Label is_number_or_undefined(this);
|
||||
|
|
@ -17255,7 +17237,7 @@ void CodeStubAssembler::GotoIfNumberOrUndefined(TNode<Object> input,
|
|||
GotoIf(IsHeapNumber(CAST(input)), is_number_or_undefined);
|
||||
GotoIf(IsUndefined(input), is_number_or_undefined);
|
||||
}
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|
||||
void CodeStubAssembler::GotoIfNumber(TNode<Object> input, Label* is_number) {
|
||||
GotoIf(TaggedIsSmi(input), is_number);
|
||||
|
|
@ -17394,6 +17376,12 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
|
|||
return CAST(result);
|
||||
}
|
||||
|
||||
TNode<Object> CodeStubAssembler::GetResultValueForHole(TNode<Object> value) {
|
||||
return Select<Object>(
|
||||
IsTheHole(value), [this] { return UndefinedConstant(); },
|
||||
[&] { return value; });
|
||||
}
|
||||
|
||||
std::pair<TNode<Object>, TNode<Object>> CodeStubAssembler::CallIteratorNext(
|
||||
TNode<Object> iterator, TNode<Object> next_method, TNode<Context> context) {
|
||||
Label callable(this), not_callable(this, Label::kDeferred);
|
||||
|
|
@ -17409,7 +17397,7 @@ std::pair<TNode<Object>, TNode<Object>> CodeStubAssembler::CallIteratorNext(
|
|||
Call(context, next_method, ConvertReceiverMode::kAny, CAST(iterator));
|
||||
|
||||
Label if_js_receiver(this), if_not_js_receiver(this, Label::kDeferred);
|
||||
Branch(IsJSReceiver(CAST(result)), &if_js_receiver, &if_not_js_receiver);
|
||||
BranchIfJSReceiver(result, &if_js_receiver, &if_not_js_receiver);
|
||||
|
||||
BIND(&if_not_js_receiver);
|
||||
{
|
||||
|
|
@ -17498,7 +17486,7 @@ ForOfNextResult CodeStubAssembler::ForOfNextHelper(TNode<Context> context,
|
|||
BIND(&load_entry);
|
||||
{
|
||||
var_element_value = AllocateJSIteratorResultValueForEntry(
|
||||
context, smi_index, var_element_value.value());
|
||||
context, smi_index, GetResultValueForHole(var_element_value.value()));
|
||||
Goto(&element_value_resolved);
|
||||
}
|
||||
|
||||
|
|
@ -17507,7 +17495,7 @@ ForOfNextResult CodeStubAssembler::ForOfNextHelper(TNode<Context> context,
|
|||
StoreObjectFieldNoWriteBarrier(array_iterator,
|
||||
JSArrayIterator::kNextIndexOffset,
|
||||
SmiAdd(smi_index, SmiConstant(1)));
|
||||
var_value = var_element_value;
|
||||
var_value = GetResultValueForHole(var_element_value.value());
|
||||
var_done = FalseConstant();
|
||||
Goto(&return_result);
|
||||
}
|
||||
|
|
@ -18599,19 +18587,56 @@ void CodeStubAssembler::PrintToStream(const char* s, int stream) {
|
|||
StringConstant(formatted.c_str()), SmiConstant(stream));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(TNode<String> prefix, TNode<Object> value) {
|
||||
std::array<TNode<Object>, 4> chunks{value, SmiConstant(0), SmiConstant(0),
|
||||
SmiConstant(0)};
|
||||
PrintToStream(prefix, DebugPrintValueType::kTagged, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(const char* prefix,
|
||||
TNode<MaybeObject> tagged_value) {
|
||||
PrintToStream(prefix, tagged_value, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(TNode<String> prefix, TNode<Uint32T> value) {
|
||||
auto chunks = EncodeValueForDebugPrint(value);
|
||||
PrintToStream(prefix, DebugPrintValueType::kWord32, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(const char* prefix, TNode<Uint32T> value) {
|
||||
PrintToStream(prefix, value, fileno(stdout));
|
||||
auto chunks = EncodeValueForDebugPrint(value);
|
||||
PrintToStream(prefix, DebugPrintValueType::kWord32, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(TNode<String> prefix, TNode<Uint64T> value) {
|
||||
auto chunks = EncodeValueForDebugPrint(value);
|
||||
PrintToStream(prefix, DebugPrintValueType::kWord64, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(const char* prefix, TNode<Uint64T> value) {
|
||||
auto chunks = EncodeValueForDebugPrint(value);
|
||||
PrintToStream(prefix, DebugPrintValueType::kWord64, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(const char* prefix, TNode<UintPtrT> value) {
|
||||
PrintToStream(prefix, value, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(TNode<String> prefix, TNode<Float32T> value) {
|
||||
auto chunks = EncodeValueForDebugPrint(value);
|
||||
PrintToStream(prefix, DebugPrintValueType::kFloat32, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(const char* prefix, TNode<Float32T> value) {
|
||||
auto chunks = EncodeValueForDebugPrint(value);
|
||||
PrintToStream(prefix, DebugPrintValueType::kFloat32, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(TNode<String> prefix, TNode<Float64T> value) {
|
||||
auto chunks = EncodeValueForDebugPrint(value);
|
||||
PrintToStream(prefix, DebugPrintValueType::kFloat64, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Print(const char* prefix, TNode<Float64T> value) {
|
||||
PrintToStream(prefix, value, fileno(stdout));
|
||||
}
|
||||
|
|
@ -18688,6 +18713,93 @@ void CodeStubAssembler::PrintToStream(const char* prefix, TNode<UintPtrT> value,
|
|||
chunks[2], chunks[1], chunks[0], SmiConstant(stream));
|
||||
}
|
||||
|
||||
std::array<TNode<Object>, 4> CodeStubAssembler::EncodeValueForDebugPrint(
|
||||
TNode<Word64T> value) {
|
||||
std::array<TNode<Object>, 4> result;
|
||||
|
||||
// We use 16 bit per chunk.
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
result[i] = SmiFromUint32(ReinterpretCast<Uint32T>(
|
||||
Word64And(ReinterpretCast<Uint64T>(value), Int64Constant(0xFFFF))));
|
||||
value = Word64Shr(value, Int64Constant(16));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::array<TNode<Object>, 4> CodeStubAssembler::EncodeValueForDebugPrint(
|
||||
TNode<Word32T> value) {
|
||||
std::array<TNode<Object>, 4> result;
|
||||
|
||||
// We use 16 bit per chunk.
|
||||
result[0] = SmiFromUint32(ReinterpretCast<Uint32T>(Word32And(value, 0xFFFF)));
|
||||
result[1] = SmiFromUint32(ReinterpretCast<Uint32T>(
|
||||
Word32And(Word32Shr(value, Int32Constant(16)), 0xFFFF)));
|
||||
result[2] = SmiConstant(0);
|
||||
result[3] = SmiConstant(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::array<TNode<Object>, 4> CodeStubAssembler::EncodeValueForDebugPrint(
|
||||
TNode<Float32T> value) {
|
||||
std::array<TNode<Object>, 4> result;
|
||||
|
||||
TNode<Uint32T> low = BitcastFloat32ToInt32(value);
|
||||
|
||||
// We use 16 bit per chunk.
|
||||
result[0] = SmiFromUint32(ReinterpretCast<Uint32T>(Word32And(low, 0xFFFF)));
|
||||
result[1] = SmiFromUint32(ReinterpretCast<Uint32T>(
|
||||
Word32And(Word32Shr(low, Int32Constant(16)), 0xFFFF)));
|
||||
result[2] = SmiConstant(0);
|
||||
result[3] = SmiConstant(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::array<TNode<Object>, 4> CodeStubAssembler::EncodeValueForDebugPrint(
|
||||
TNode<Float64T> value) {
|
||||
std::array<TNode<Object>, 4> result;
|
||||
|
||||
// We use word32 extraction instead of `BitcastFloat64ToInt64` to support 32
|
||||
// bit architectures, too.
|
||||
TNode<Uint32T> high = Float64ExtractHighWord32(value);
|
||||
TNode<Uint32T> low = Float64ExtractLowWord32(value);
|
||||
|
||||
// We use 16 bit per chunk.
|
||||
result[0] = SmiFromUint32(ReinterpretCast<Uint32T>(Word32And(low, 0xFFFF)));
|
||||
result[1] = SmiFromUint32(ReinterpretCast<Uint32T>(
|
||||
Word32And(Word32Shr(low, Int32Constant(16)), 0xFFFF)));
|
||||
result[2] = SmiFromUint32(ReinterpretCast<Uint32T>(Word32And(high, 0xFFFF)));
|
||||
result[3] = SmiFromUint32(ReinterpretCast<Uint32T>(
|
||||
Word32And(Word32Shr(high, Int32Constant(16)), 0xFFFF)));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodeStubAssembler::PrintToStream(
|
||||
const char* prefix, DebugPrintValueType value_type,
|
||||
const std::array<TNode<Object>, 4>& chunks, int stream) {
|
||||
TNode<Object> prefix_string = UndefinedConstant();
|
||||
if (prefix != nullptr) {
|
||||
std::string formatted(prefix);
|
||||
formatted += ": ";
|
||||
prefix_string = HeapConstantNoHole(
|
||||
isolate()->factory()->InternalizeString(formatted.c_str()));
|
||||
}
|
||||
CallRuntime(Runtime::kDebugPrintGeneric, NoContextConstant(), prefix_string,
|
||||
SmiConstant(Smi::FromEnum(value_type)), chunks[3], chunks[2],
|
||||
chunks[1], chunks[0], SmiConstant(stream));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::PrintToStream(
|
||||
TNode<String> prefix, DebugPrintValueType value_type,
|
||||
const std::array<TNode<Object>, 4>& chunks, int stream) {
|
||||
CallRuntime(Runtime::kDebugPrintGeneric, NoContextConstant(), prefix,
|
||||
SmiConstant(Smi::FromEnum(value_type)), chunks[3], chunks[2],
|
||||
chunks[1], chunks[0], SmiConstant(stream));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::PrintToStream(const char* prefix, TNode<Float64T> value,
|
||||
int stream) {
|
||||
if (prefix != nullptr) {
|
||||
|
|
@ -18699,25 +18811,19 @@ void CodeStubAssembler::PrintToStream(const char* prefix, TNode<Float64T> value,
|
|||
HeapConstantNoHole(string), SmiConstant(stream));
|
||||
}
|
||||
|
||||
// We use word32 extraction instead of `BitcastFloat64ToInt64` to support 32
|
||||
// bit architectures, too.
|
||||
TNode<Uint32T> high = Float64ExtractHighWord32(value);
|
||||
TNode<Uint32T> low = Float64ExtractLowWord32(value);
|
||||
|
||||
// We use 16 bit per chunk.
|
||||
TNode<Smi> chunks[4];
|
||||
chunks[0] = SmiFromUint32(ReinterpretCast<Uint32T>(Word32And(low, 0xFFFF)));
|
||||
chunks[1] = SmiFromUint32(ReinterpretCast<Uint32T>(
|
||||
Word32And(Word32Shr(low, Int32Constant(16)), 0xFFFF)));
|
||||
chunks[2] = SmiFromUint32(ReinterpretCast<Uint32T>(Word32And(high, 0xFFFF)));
|
||||
chunks[3] = SmiFromUint32(ReinterpretCast<Uint32T>(
|
||||
Word32And(Word32Shr(high, Int32Constant(16)), 0xFFFF)));
|
||||
std::array<TNode<Object>, 4> chunks = EncodeValueForDebugPrint(value);
|
||||
|
||||
// Args are: <bits 63-48>, <bits 47-32>, <bits 31-16>, <bits 15-0>, stream.
|
||||
CallRuntime(Runtime::kDebugPrintFloat, NoContextConstant(), chunks[3],
|
||||
chunks[2], chunks[1], chunks[0], SmiConstant(stream));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::PrintStringSimple(TNode<String> s) {
|
||||
std::array<TNode<Object>, 4> chunks{s, SmiConstant(0), SmiConstant(0),
|
||||
SmiConstant(0)};
|
||||
PrintToStream(nullptr, DebugPrintValueType::kTagged, chunks, fileno(stdout));
|
||||
}
|
||||
|
||||
IntegerLiteral CodeStubAssembler::ConstexprIntegerLiteralAdd(
|
||||
const IntegerLiteral& lhs, const IntegerLiteral& rhs) {
|
||||
return lhs + rhs;
|
||||
|
|
|
|||
42
deps/v8/src/codegen/code-stub-assembler.h
vendored
42
deps/v8/src/codegen/code-stub-assembler.h
vendored
|
|
@ -619,12 +619,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||
TNode<Number> NumberAdd(TNode<Number> a, TNode<Number> b);
|
||||
TNode<Number> NumberSub(TNode<Number> a, TNode<Number> b);
|
||||
void GotoIfNotNumber(TNode<Object> value, Label* is_not_number);
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
void GotoIfNumberOrUndefined(TNode<Object> value,
|
||||
Label* is_number_or_undefined);
|
||||
void GotoIfNotNumberOrUndefined(TNode<Object> value,
|
||||
Label* is_not_number_or_undefined);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
void GotoIfNumber(TNode<Object> value, Label* is_number);
|
||||
TNode<Number> SmiToNumber(TNode<Smi> v) { return v; }
|
||||
|
||||
|
|
@ -1650,12 +1650,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||
Label* if_undefined, Label* if_hole);
|
||||
|
||||
TNode<BoolT> IsDoubleHole(TNode<Object> base, TNode<IntPtrT> offset);
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
TNode<BoolT> IsDoubleUndefined(TNode<Object> base, TNode<IntPtrT> offset);
|
||||
TNode<BoolT> IsDoubleUndefined(TNode<Float64T> value);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
TNode<Float64T> LoadDoubleWithUndefinedAndHoleCheck(
|
||||
TNode<Object> base, TNode<IntPtrT> offset, Label* if_undefined,
|
||||
Label* if_hole, MachineType machine_type = MachineType::Float64());
|
||||
|
|
@ -1666,7 +1666,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||
TNode<Float64T> LoadDoubleWithUndefinedAndHoleCheck(
|
||||
TNode<Object> base, TNode<IntPtrT> offset, Label* if_undefined,
|
||||
Label* if_hole, MachineType machine_type = MachineType::Float64());
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
TNode<Numeric> LoadFixedTypedArrayElementAsTagged(TNode<RawPtrT> data_pointer,
|
||||
TNode<UintPtrT> index,
|
||||
ElementsKind elements_kind);
|
||||
|
|
@ -1982,18 +1982,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||
TNode<Float64T> value, CheckBounds check_bounds = CheckBounds::kAlways);
|
||||
|
||||
void StoreDoubleHole(TNode<HeapObject> object, TNode<IntPtrT> offset);
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
void StoreDoubleUndefined(TNode<HeapObject> object, TNode<IntPtrT> offset);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
void StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
|
||||
TNode<IntPtrT> index);
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
template <typename TIndex>
|
||||
requires(std::is_same_v<TIndex, Smi> || std::is_same_v<TIndex, UintPtrT> ||
|
||||
std::is_same_v<TIndex, IntPtrT>)
|
||||
void StoreFixedDoubleArrayUndefined(TNode<FixedDoubleArray> array,
|
||||
TNode<TIndex> index);
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
void StoreFeedbackVectorSlot(
|
||||
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
|
||||
TNode<AnyTaggedT> value,
|
||||
|
|
@ -2307,7 +2307,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||
TNode<JSObject> AllocateJSIteratorResultForEntry(TNode<Context> context,
|
||||
TNode<Object> key,
|
||||
TNode<Object> value);
|
||||
|
||||
TNode<Object> GetResultValueForHole(TNode<Object> value);
|
||||
// Calls the next method of an iterator and returns the pair of
|
||||
// {value, done} properties of the result.
|
||||
std::pair<TNode<Object>, TNode<Object>> CallIteratorNext(
|
||||
|
|
@ -2663,9 +2663,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||
TNode<IntPtrT> TryTaggedToInt32AsIntPtr(TNode<Object> value,
|
||||
Label* if_not_possible);
|
||||
TNode<Float64T> TryTaggedToFloat64(TNode<Object> value,
|
||||
#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#ifdef V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label* if_valueisundefined,
|
||||
#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
|
||||
#endif // V8_ENABLE_UNDEFINED_DOUBLE
|
||||
Label* if_valueisnotnumber);
|
||||
TNode<Float64T> TruncateTaggedToFloat64(TNode<Context> context,
|
||||
TNode<Object> value);
|
||||
|
|
@ -4411,12 +4411,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||
|
||||
// Support for printf-style debugging
|
||||
void Print(const char* s);
|
||||
void Print(TNode<String>, TNode<Object> value);
|
||||
void Print(const char* prefix, TNode<MaybeObject> tagged_value);
|
||||
void Print(TNode<MaybeObject> tagged_value) {
|
||||
return Print(nullptr, tagged_value);
|
||||
}
|
||||
void Print(TNode<String> prefix, TNode<Uint32T> value);
|
||||
void Print(const char* prefix, TNode<Uint32T> value);
|
||||
void Print(TNode<String> prefix, TNode<Uint64T> value);
|
||||
void Print(const char* prefix, TNode<Uint64T> value);
|
||||
void Print(const char* prefix, TNode<UintPtrT> value);
|
||||
void Print(TNode<String> prefix, TNode<Float32T> value);
|
||||
void Print(const char* prefix, TNode<Float32T> value);
|
||||
void Print(TNode<String> prefix, TNode<Float64T> value);
|
||||
void Print(const char* prefix, TNode<Float64T> value);
|
||||
void PrintErr(const char* s);
|
||||
void PrintErr(const char* prefix, TNode<MaybeObject> tagged_value);
|
||||
|
|
@ -4429,6 +4436,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||
void PrintToStream(const char* prefix, TNode<Uint32T> value, int stream);
|
||||
void PrintToStream(const char* prefix, TNode<UintPtrT> value, int stream);
|
||||
void PrintToStream(const char* prefix, TNode<Float64T> value, int stream);
|
||||
std::array<TNode<Object>, 4> EncodeValueForDebugPrint(TNode<Word32T> value);
|
||||
std::array<TNode<Object>, 4> EncodeValueForDebugPrint(TNode<Word64T> value);
|
||||
std::array<TNode<Object>, 4> EncodeValueForDebugPrint(TNode<Float32T> value);
|
||||
std::array<TNode<Object>, 4> EncodeValueForDebugPrint(TNode<Float64T> value);
|
||||
void PrintToStream(const char* prefix, DebugPrintValueType value_type,
|
||||
const std::array<TNode<Object>, 4>& chunks, int stream);
|
||||
void PrintToStream(TNode<String> prefix, DebugPrintValueType value_type,
|
||||
const std::array<TNode<Object>, 4>& chunks, int stream);
|
||||
void PrintStringSimple(TNode<String> string);
|
||||
|
||||
template <class... TArgs>
|
||||
TNode<HeapObject> MakeTypeError(MessageTemplate message,
|
||||
|
|
|
|||
4
deps/v8/src/codegen/compilation-cache.cc
vendored
4
deps/v8/src/codegen/compilation-cache.cc
vendored
|
|
@ -122,8 +122,8 @@ void CompilationCacheEvalOrScript::Clear() {
|
|||
}
|
||||
|
||||
void CompilationCacheRegExp::Clear() {
|
||||
MemsetPointer(reinterpret_cast<Address*>(tables_),
|
||||
ReadOnlyRoots(isolate()).undefined_value().ptr(), kGenerations);
|
||||
MemsetPointer(FullObjectSlot(tables_),
|
||||
ReadOnlyRoots(isolate()).undefined_value(), kGenerations);
|
||||
}
|
||||
|
||||
void CompilationCacheEvalOrScript::Remove(
|
||||
|
|
|
|||
82
deps/v8/src/codegen/constant-pool-entry.h
vendored
Normal file
82
deps/v8/src/codegen/constant-pool-entry.h
vendored
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_CONSTANT_POOL_ENTRY_H_
|
||||
#define V8_CODEGEN_CONSTANT_POOL_ENTRY_H_
|
||||
|
||||
#include "src/base/numbers/double.h"
|
||||
#include "src/codegen/reloc-info.h"
|
||||
#include "src/common/globals.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Constant pool support
|
||||
|
||||
class ConstantPoolEntry {
|
||||
public:
|
||||
ConstantPoolEntry() = default;
|
||||
ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: position_(position),
|
||||
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
|
||||
value_(value),
|
||||
rmode_(rmode) {}
|
||||
ConstantPoolEntry(int position, base::Double value,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: position_(position),
|
||||
merged_index_(SHARING_ALLOWED),
|
||||
value64_(value.AsUint64()),
|
||||
rmode_(rmode) {}
|
||||
|
||||
int position() const { return position_; }
|
||||
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
|
||||
bool is_merged() const { return merged_index_ >= 0; }
|
||||
int merged_index() const {
|
||||
DCHECK(is_merged());
|
||||
return merged_index_;
|
||||
}
|
||||
void set_merged_index(int index) {
|
||||
DCHECK(sharing_ok());
|
||||
merged_index_ = index;
|
||||
DCHECK(is_merged());
|
||||
}
|
||||
int offset() const {
|
||||
DCHECK_GE(merged_index_, 0);
|
||||
return merged_index_;
|
||||
}
|
||||
void set_offset(int offset) {
|
||||
DCHECK_GE(offset, 0);
|
||||
merged_index_ = offset;
|
||||
}
|
||||
intptr_t value() const { return value_; }
|
||||
uint64_t value64() const { return value64_; }
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
|
||||
enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
|
||||
|
||||
static int size(Type type) {
|
||||
return (type == INTPTR) ? kSystemPointerSize : kDoubleSize;
|
||||
}
|
||||
|
||||
enum Access { REGULAR, OVERFLOWED };
|
||||
|
||||
private:
|
||||
int position_;
|
||||
int merged_index_;
|
||||
union {
|
||||
intptr_t value_;
|
||||
uint64_t value64_;
|
||||
};
|
||||
// TODO(leszeks): The way we use this, it could probably be packed into
|
||||
// merged_index_ if size is a concern.
|
||||
RelocInfo::Mode rmode_;
|
||||
enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_CONSTANT_POOL_ENTRY_H_
|
||||
724
deps/v8/src/codegen/constant-pool.cc
vendored
724
deps/v8/src/codegen/constant-pool.cc
vendored
|
|
@ -1,724 +0,0 @@
|
|||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/codegen/constant-pool.h"
|
||||
#include "src/codegen/assembler-arch.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#if defined(V8_TARGET_ARCH_PPC64)
|
||||
|
||||
ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
|
||||
int double_reach_bits) {
|
||||
info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
|
||||
info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
|
||||
info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
|
||||
}
|
||||
|
||||
ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
|
||||
ConstantPoolEntry::Type type) const {
|
||||
const PerTypeEntryInfo& info = info_[type];
|
||||
|
||||
if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
|
||||
|
||||
int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
|
||||
int dbl_offset = dbl_count * kDoubleSize;
|
||||
int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
|
||||
int ptr_offset = ptr_count * kSystemPointerSize + dbl_offset;
|
||||
|
||||
if (type == ConstantPoolEntry::DOUBLE) {
|
||||
// Double overflow detection must take into account the reach for both types
|
||||
int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
|
||||
if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
|
||||
(ptr_count > 0 &&
|
||||
!is_uintn(ptr_offset + kDoubleSize - kSystemPointerSize,
|
||||
ptr_reach_bits))) {
|
||||
return ConstantPoolEntry::OVERFLOWED;
|
||||
}
|
||||
} else {
|
||||
DCHECK(type == ConstantPoolEntry::INTPTR);
|
||||
if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
|
||||
return ConstantPoolEntry::OVERFLOWED;
|
||||
}
|
||||
}
|
||||
|
||||
return ConstantPoolEntry::REGULAR;
|
||||
}
|
||||
|
||||
ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
|
||||
ConstantPoolEntry* entry, ConstantPoolEntry::Type type) {
|
||||
DCHECK(!emitted_label_.is_bound());
|
||||
PerTypeEntryInfo& info = info_[type];
|
||||
const int entry_size = ConstantPoolEntry::size(type);
|
||||
bool merged = false;
|
||||
|
||||
if (entry->sharing_ok()) {
|
||||
// Try to merge entries
|
||||
std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
|
||||
int end = static_cast<int>(info.shared_entries.size());
|
||||
for (int i = 0; i < end; i++, it++) {
|
||||
if ((entry_size == kSystemPointerSize)
|
||||
? entry->value() == it->value()
|
||||
: entry->value64() == it->value64()) {
|
||||
// Merge with found entry.
|
||||
entry->set_merged_index(i);
|
||||
merged = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// By definition, merged entries have regular access.
|
||||
DCHECK(!merged || entry->merged_index() < info.regular_count);
|
||||
ConstantPoolEntry::Access access =
|
||||
(merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
|
||||
|
||||
// Enforce an upper bound on search time by limiting the search to
|
||||
// unique sharable entries which fit in the regular section.
|
||||
if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
|
||||
info.shared_entries.push_back(*entry);
|
||||
} else {
|
||||
info.entries.push_back(*entry);
|
||||
}
|
||||
|
||||
// We're done if we found a match or have already triggered the
|
||||
// overflow state.
|
||||
if (merged || info.overflow()) return access;
|
||||
|
||||
if (access == ConstantPoolEntry::REGULAR) {
|
||||
info.regular_count++;
|
||||
} else {
|
||||
info.overflow_start = static_cast<int>(info.entries.size()) - 1;
|
||||
}
|
||||
|
||||
return access;
|
||||
}
|
||||
|
||||
void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
|
||||
ConstantPoolEntry::Type type) {
|
||||
PerTypeEntryInfo& info = info_[type];
|
||||
std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
|
||||
const int entry_size = ConstantPoolEntry::size(type);
|
||||
int base = emitted_label_.pos();
|
||||
DCHECK_GT(base, 0);
|
||||
int shared_end = static_cast<int>(shared_entries.size());
|
||||
std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
|
||||
for (int i = 0; i < shared_end; i++, shared_it++) {
|
||||
int offset = assm->pc_offset() - base;
|
||||
shared_it->set_offset(offset); // Save offset for merged entries.
|
||||
if (entry_size == kSystemPointerSize) {
|
||||
assm->dp(shared_it->value());
|
||||
} else {
|
||||
assm->dq(shared_it->value64());
|
||||
}
|
||||
DCHECK(is_uintn(offset, info.regular_reach_bits));
|
||||
|
||||
// Patch load sequence with correct offset.
|
||||
assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
|
||||
ConstantPoolEntry::REGULAR, type);
|
||||
}
|
||||
}
|
||||
|
||||
void ConstantPoolBuilder::EmitGroup(Assembler* assm,
|
||||
ConstantPoolEntry::Access access,
|
||||
ConstantPoolEntry::Type type) {
|
||||
PerTypeEntryInfo& info = info_[type];
|
||||
const bool overflow = info.overflow();
|
||||
std::vector<ConstantPoolEntry>& entries = info.entries;
|
||||
std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
|
||||
const int entry_size = ConstantPoolEntry::size(type);
|
||||
int base = emitted_label_.pos();
|
||||
DCHECK_GT(base, 0);
|
||||
int begin;
|
||||
int end;
|
||||
|
||||
if (access == ConstantPoolEntry::REGULAR) {
|
||||
// Emit any shared entries first
|
||||
EmitSharedEntries(assm, type);
|
||||
}
|
||||
|
||||
if (access == ConstantPoolEntry::REGULAR) {
|
||||
begin = 0;
|
||||
end = overflow ? info.overflow_start : static_cast<int>(entries.size());
|
||||
} else {
|
||||
DCHECK(access == ConstantPoolEntry::OVERFLOWED);
|
||||
if (!overflow) return;
|
||||
begin = info.overflow_start;
|
||||
end = static_cast<int>(entries.size());
|
||||
}
|
||||
|
||||
std::vector<ConstantPoolEntry>::iterator it = entries.begin();
|
||||
if (begin > 0) std::advance(it, begin);
|
||||
for (int i = begin; i < end; i++, it++) {
|
||||
// Update constant pool if necessary and get the entry's offset.
|
||||
int offset;
|
||||
ConstantPoolEntry::Access entry_access;
|
||||
if (!it->is_merged()) {
|
||||
// Emit new entry
|
||||
offset = assm->pc_offset() - base;
|
||||
entry_access = access;
|
||||
if (entry_size == kSystemPointerSize) {
|
||||
assm->dp(it->value());
|
||||
} else {
|
||||
assm->dq(it->value64());
|
||||
}
|
||||
} else {
|
||||
// Retrieve offset from shared entry.
|
||||
offset = shared_entries[it->merged_index()].offset();
|
||||
entry_access = ConstantPoolEntry::REGULAR;
|
||||
}
|
||||
|
||||
DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
|
||||
is_uintn(offset, info.regular_reach_bits));
|
||||
|
||||
// Patch load sequence with correct offset.
|
||||
assm->PatchConstantPoolAccessInstruction(it->position(), offset,
|
||||
entry_access, type);
|
||||
}
|
||||
}
|
||||
|
||||
// Emit and return size of pool.
|
||||
int ConstantPoolBuilder::Emit(Assembler* assm) {
|
||||
bool emitted = emitted_label_.is_bound();
|
||||
bool empty = IsEmpty();
|
||||
|
||||
if (!emitted) {
|
||||
// Mark start of constant pool. Align if necessary.
|
||||
if (!empty) assm->DataAlign(kDoubleSize);
|
||||
assm->bind(&emitted_label_);
|
||||
if (!empty) {
|
||||
// Emit in groups based on access and type.
|
||||
// Emit doubles first for alignment purposes.
|
||||
EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
|
||||
EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
|
||||
if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
|
||||
assm->DataAlign(kDoubleSize);
|
||||
EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
|
||||
ConstantPoolEntry::DOUBLE);
|
||||
}
|
||||
if (info_[ConstantPoolEntry::INTPTR].overflow()) {
|
||||
EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
|
||||
ConstantPoolEntry::INTPTR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0;
|
||||
}
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_PPC64)
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM64)
|
||||
|
||||
// Constant Pool.
|
||||
|
||||
ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
|
||||
ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
|
||||
RelocInfo::Mode rmode) {
|
||||
ConstantPoolKey key(data, rmode);
|
||||
CHECK(key.is_value32());
|
||||
return RecordKey(std::move(key), assm_->pc_offset());
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
|
||||
RelocInfo::Mode rmode) {
|
||||
ConstantPoolKey key(data, rmode);
|
||||
CHECK(!key.is_value32());
|
||||
return RecordKey(std::move(key), assm_->pc_offset());
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
|
||||
RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
|
||||
if (write_reloc_info == RelocInfoStatus::kMustRecord) {
|
||||
if (key.is_value32()) {
|
||||
if (entry32_count_ == 0) first_use_32_ = offset;
|
||||
++entry32_count_;
|
||||
} else {
|
||||
if (entry64_count_ == 0) first_use_64_ = offset;
|
||||
++entry64_count_;
|
||||
}
|
||||
}
|
||||
entries_.insert(std::make_pair(key, offset));
|
||||
|
||||
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
|
||||
// Request constant pool emission after the next instruction.
|
||||
SetNextCheckIn(1);
|
||||
}
|
||||
|
||||
return write_reloc_info;
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
|
||||
const ConstantPoolKey& key) {
|
||||
if (key.AllowsDeduplication()) {
|
||||
auto existing = entries_.find(key);
|
||||
if (existing != entries_.end()) {
|
||||
return RelocInfoStatus::kMustOmitForDuplicate;
|
||||
}
|
||||
}
|
||||
return RelocInfoStatus::kMustRecord;
|
||||
}
|
||||
|
||||
void ConstantPool::EmitAndClear(Jump require_jump) {
|
||||
DCHECK(!IsBlocked());
|
||||
// Prevent recursive pool emission.
|
||||
Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
|
||||
Alignment require_alignment =
|
||||
IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
|
||||
int size = ComputeSize(require_jump, require_alignment);
|
||||
Label size_check;
|
||||
assm_->bind(&size_check);
|
||||
assm_->RecordConstPool(size);
|
||||
|
||||
// Emit the constant pool. It is preceded by an optional branch if
|
||||
// {require_jump} and a header which will:
|
||||
// 1) Encode the size of the constant pool, for use by the disassembler.
|
||||
// 2) Terminate the program, to try to prevent execution from accidentally
|
||||
// flowing into the constant pool.
|
||||
// 3) align the 64bit pool entries to 64-bit.
|
||||
// TODO(all): Make the alignment part less fragile. Currently code is
|
||||
// allocated as a byte array so there are no guarantees the alignment will
|
||||
// be preserved on compaction. Currently it works as allocation seems to be
|
||||
// 64-bit aligned.
|
||||
|
||||
Label after_pool;
|
||||
if (require_jump == Jump::kRequired) assm_->b(&after_pool);
|
||||
|
||||
assm_->RecordComment("[ Constant Pool");
|
||||
EmitPrologue(require_alignment);
|
||||
if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
|
||||
EmitEntries();
|
||||
assm_->RecordComment("]");
|
||||
|
||||
if (after_pool.is_linked()) assm_->bind(&after_pool);
|
||||
|
||||
DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
|
||||
Clear();
|
||||
}
|
||||
|
||||
void ConstantPool::Clear() {
|
||||
entries_.clear();
|
||||
first_use_32_ = -1;
|
||||
first_use_64_ = -1;
|
||||
entry32_count_ = 0;
|
||||
entry64_count_ = 0;
|
||||
next_check_ = 0;
|
||||
old_next_check_ = 0;
|
||||
}
|
||||
|
||||
void ConstantPool::StartBlock() {
|
||||
if (blocked_nesting_ == 0) {
|
||||
// Prevent constant pool checks from happening by setting the next check to
|
||||
// the biggest possible offset.
|
||||
old_next_check_ = next_check_;
|
||||
next_check_ = kMaxInt;
|
||||
}
|
||||
++blocked_nesting_;
|
||||
}
|
||||
|
||||
void ConstantPool::EndBlock() {
|
||||
--blocked_nesting_;
|
||||
if (blocked_nesting_ == 0) {
|
||||
DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
|
||||
// Restore the old next_check_ value if it's less than the current
|
||||
// next_check_. This accounts for any attempt to emit pools sooner whilst
|
||||
// pools were blocked.
|
||||
next_check_ = std::min(next_check_, old_next_check_);
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
|
||||
|
||||
void ConstantPool::SetNextCheckIn(size_t instructions) {
|
||||
next_check_ =
|
||||
assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
|
||||
}
|
||||
|
||||
void ConstantPool::EmitEntries() {
|
||||
for (auto iter = entries_.begin(); iter != entries_.end();) {
|
||||
DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
|
||||
auto range = entries_.equal_range(iter->first);
|
||||
bool shared = iter->first.AllowsDeduplication();
|
||||
for (auto it = range.first; it != range.second; ++it) {
|
||||
SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
|
||||
if (!shared) Emit(it->first);
|
||||
}
|
||||
if (shared) Emit(iter->first);
|
||||
iter = range.second;
|
||||
}
|
||||
}
|
||||
|
||||
void ConstantPool::Emit(const ConstantPoolKey& key) {
|
||||
if (key.is_value32()) {
|
||||
assm_->dd(key.value32());
|
||||
} else {
|
||||
assm_->dq(key.value64());
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
|
||||
if (IsEmpty()) return false;
|
||||
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
|
||||
return true;
|
||||
}
|
||||
// We compute {dist32/64}, i.e. the distance from the first instruction
|
||||
// accessing a 32bit/64bit entry in the constant pool to any of the
|
||||
// 32bit/64bit constant pool entries, respectively. This is required because
|
||||
// we do not guarantee that entries are emitted in order of reference, i.e. it
|
||||
// is possible that the entry with the earliest reference is emitted last.
|
||||
// The constant pool should be emitted if either of the following is true:
|
||||
// (A) {dist32/64} will be out of range at the next check in.
|
||||
// (B) Emission can be done behind an unconditional branch and {dist32/64}
|
||||
// exceeds {kOpportunityDist*}.
|
||||
// (C) {dist32/64} exceeds the desired approximate distance to the pool.
|
||||
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
|
||||
size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
|
||||
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
|
||||
if (Entry64Count() != 0) {
|
||||
// The 64-bit constants are always emitted before the 32-bit constants, so
|
||||
// we subtract the size of the 32-bit constants from {size}.
|
||||
size_t dist64 = pool_end_64 - first_use_64_;
|
||||
bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
|
||||
bool opportune_emission_without_jump =
|
||||
require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
|
||||
bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
|
||||
if (next_check_too_late || opportune_emission_without_jump ||
|
||||
approximate_distance_exceeded) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (Entry32Count() != 0) {
|
||||
size_t dist32 = pool_end_32 - first_use_32_;
|
||||
bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
|
||||
bool opportune_emission_without_jump =
|
||||
require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
|
||||
bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
|
||||
if (next_check_too_late || opportune_emission_without_jump ||
|
||||
approximate_distance_exceeded) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int ConstantPool::ComputeSize(Jump require_jump,
|
||||
Alignment require_alignment) const {
|
||||
int size_up_to_marker = PrologueSize(require_jump);
|
||||
int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
|
||||
size_t size_after_marker =
|
||||
Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
|
||||
return size_up_to_marker + static_cast<int>(size_after_marker);
|
||||
}
|
||||
|
||||
Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
|
||||
int pc_offset) const {
|
||||
int size_up_to_marker = PrologueSize(require_jump);
|
||||
if (Entry64Count() != 0 &&
|
||||
!IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
|
||||
return Alignment::kRequired;
|
||||
}
|
||||
return Alignment::kOmitted;
|
||||
}
|
||||
|
||||
bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
|
||||
// Check that all entries are in range if the pool is emitted at {pc_offset}.
|
||||
// This ignores kPcLoadDelta (conservatively, since all offsets are positive),
|
||||
// and over-estimates the last entry's address with the pool's end.
|
||||
Alignment require_alignment =
|
||||
IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
|
||||
size_t pool_end_32 =
|
||||
pc_offset + ComputeSize(Jump::kRequired, require_alignment);
|
||||
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
|
||||
bool entries_in_range_32 =
|
||||
Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
|
||||
bool entries_in_range_64 =
|
||||
Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
|
||||
return entries_in_range_32 && entries_in_range_64;
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
|
||||
: pool_(&assm->constpool_) {
|
||||
pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
|
||||
pool_->StartBlock();
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
|
||||
: pool_(&assm->constpool_) {
|
||||
DCHECK_EQ(check, PoolEmissionCheck::kSkip);
|
||||
pool_->StartBlock();
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
|
||||
|
||||
void ConstantPool::MaybeCheck() {
|
||||
if (assm_->pc_offset() >= next_check_) {
|
||||
Check(Emission::kIfNeeded, Jump::kRequired);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_ARM64)
|
||||
|
||||
#if defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32)
|
||||
|
||||
// Constant Pool.
|
||||
|
||||
ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
|
||||
ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
|
||||
RelocInfo::Mode rmode) {
|
||||
ConstantPoolKey key(data, rmode);
|
||||
CHECK(key.is_value32());
|
||||
return RecordKey(std::move(key), assm_->pc_offset());
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
|
||||
RelocInfo::Mode rmode) {
|
||||
ConstantPoolKey key(data, rmode);
|
||||
CHECK(!key.is_value32());
|
||||
return RecordKey(std::move(key), assm_->pc_offset());
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
|
||||
RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
|
||||
if (write_reloc_info == RelocInfoStatus::kMustRecord) {
|
||||
if (key.is_value32()) {
|
||||
if (entry32_count_ == 0) first_use_32_ = offset;
|
||||
++entry32_count_;
|
||||
} else {
|
||||
if (entry64_count_ == 0) first_use_64_ = offset;
|
||||
++entry64_count_;
|
||||
}
|
||||
}
|
||||
entries_.insert(std::make_pair(key, offset));
|
||||
|
||||
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
|
||||
// Request constant pool emission after the next instruction.
|
||||
SetNextCheckIn(1);
|
||||
}
|
||||
|
||||
return write_reloc_info;
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
|
||||
const ConstantPoolKey& key) {
|
||||
if (key.AllowsDeduplication()) {
|
||||
auto existing = entries_.find(key);
|
||||
if (existing != entries_.end()) {
|
||||
return RelocInfoStatus::kMustOmitForDuplicate;
|
||||
}
|
||||
}
|
||||
return RelocInfoStatus::kMustRecord;
|
||||
}
|
||||
|
||||
void ConstantPool::EmitAndClear(Jump require_jump) {
|
||||
DCHECK(!IsBlocked());
|
||||
// Prevent recursive pool emission. We conservatively assume that we will
|
||||
// have to add padding for alignment, so the margin is guaranteed to be
|
||||
// at least as large as the actual size of the constant pool.
|
||||
int margin = ComputeSize(require_jump, Alignment::kRequired);
|
||||
Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip,
|
||||
margin);
|
||||
|
||||
// The pc offset may have changed as a result of blocking pools. We can
|
||||
// now go ahead and compute the required alignment and the correct size.
|
||||
Alignment require_alignment =
|
||||
IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
|
||||
int size = ComputeSize(require_jump, require_alignment);
|
||||
DCHECK_LE(size, margin);
|
||||
Label size_check;
|
||||
assm_->bind(&size_check);
|
||||
assm_->RecordConstPool(size);
|
||||
|
||||
// Emit the constant pool. It is preceded by an optional branch if
|
||||
// {require_jump} and a header which will:
|
||||
// 1) Encode the size of the constant pool, for use by the disassembler.
|
||||
// 2) Terminate the program, to try to prevent execution from accidentally
|
||||
// flowing into the constant pool.
|
||||
// 3) align the 64bit pool entries to 64-bit.
|
||||
// TODO(all): Make the alignment part less fragile. Currently code is
|
||||
// allocated as a byte array so there are no guarantees the alignment will
|
||||
// be preserved on compaction. Currently it works as allocation seems to be
|
||||
// 64-bit aligned.
|
||||
DEBUG_PRINTF("\tConstant Pool start\n")
|
||||
Label after_pool;
|
||||
if (require_jump == Jump::kRequired) assm_->b(&after_pool);
|
||||
|
||||
assm_->RecordComment("[ Constant Pool");
|
||||
|
||||
EmitPrologue(require_alignment);
|
||||
if (require_alignment == Alignment::kRequired) assm_->DataAlign(kInt64Size);
|
||||
EmitEntries();
|
||||
assm_->RecordComment("]");
|
||||
assm_->bind(&after_pool);
|
||||
DEBUG_PRINTF("\tConstant Pool end\n")
|
||||
|
||||
DCHECK_LE(assm_->SizeOfCodeGeneratedSince(&size_check) - size, 3);
|
||||
Clear();
|
||||
}
|
||||
|
||||
void ConstantPool::Clear() {
|
||||
entries_.clear();
|
||||
first_use_32_ = -1;
|
||||
first_use_64_ = -1;
|
||||
entry32_count_ = 0;
|
||||
entry64_count_ = 0;
|
||||
next_check_ = 0;
|
||||
}
|
||||
|
||||
void ConstantPool::StartBlock() {
|
||||
if (blocked_nesting_ == 0) {
|
||||
// Prevent constant pool checks from happening by setting the next check to
|
||||
// the biggest possible offset.
|
||||
next_check_ = kMaxInt;
|
||||
}
|
||||
++blocked_nesting_;
|
||||
}
|
||||
|
||||
void ConstantPool::EndBlock() {
|
||||
--blocked_nesting_;
|
||||
if (blocked_nesting_ == 0) {
|
||||
DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
|
||||
// Make sure a check happens quickly after getting unblocked.
|
||||
next_check_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
|
||||
|
||||
void ConstantPool::SetNextCheckIn(size_t instructions) {
|
||||
next_check_ =
|
||||
assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
|
||||
}
|
||||
|
||||
void ConstantPool::EmitEntries() {
|
||||
for (auto iter = entries_.begin(); iter != entries_.end();) {
|
||||
DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
|
||||
auto range = entries_.equal_range(iter->first);
|
||||
bool shared = iter->first.AllowsDeduplication();
|
||||
for (auto it = range.first; it != range.second; ++it) {
|
||||
SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
|
||||
if (!shared) Emit(it->first);
|
||||
}
|
||||
if (shared) Emit(iter->first);
|
||||
iter = range.second;
|
||||
}
|
||||
}
|
||||
|
||||
void ConstantPool::Emit(const ConstantPoolKey& key) {
|
||||
if (key.is_value32()) {
|
||||
assm_->dd(key.value32());
|
||||
} else {
|
||||
assm_->dq(key.value64());
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
|
||||
if (IsEmpty()) return false;
|
||||
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
|
||||
return true;
|
||||
}
|
||||
// We compute {dist32/64}, i.e. the distance from the first instruction
|
||||
// accessing a 32bit/64bit entry in the constant pool to any of the
|
||||
// 32bit/64bit constant pool entries, respectively. This is required because
|
||||
// we do not guarantee that entries are emitted in order of reference, i.e. it
|
||||
// is possible that the entry with the earliest reference is emitted last.
|
||||
// The constant pool should be emitted if either of the following is true:
|
||||
// (A) {dist32/64} will be out of range at the next check in.
|
||||
// (B) Emission can be done behind an unconditional branch and {dist32/64}
|
||||
// exceeds {kOpportunityDist*}.
|
||||
// (C) {dist32/64} exceeds the desired approximate distance to the pool.
|
||||
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
|
||||
size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
|
||||
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
|
||||
if (Entry64Count() != 0) {
|
||||
// The 64-bit constants are always emitted before the 32-bit constants, so
|
||||
// we subtract the size of the 32-bit constants from {size}.
|
||||
size_t dist64 = pool_end_64 - first_use_64_;
|
||||
bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
|
||||
bool opportune_emission_without_jump =
|
||||
require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
|
||||
bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
|
||||
if (next_check_too_late || opportune_emission_without_jump ||
|
||||
approximate_distance_exceeded) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (Entry32Count() != 0) {
|
||||
size_t dist32 = pool_end_32 - first_use_32_;
|
||||
bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
|
||||
bool opportune_emission_without_jump =
|
||||
require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
|
||||
bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
|
||||
if (next_check_too_late || opportune_emission_without_jump ||
|
||||
approximate_distance_exceeded) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int ConstantPool::ComputeSize(Jump require_jump,
|
||||
Alignment require_alignment) const {
|
||||
int size_up_to_marker = PrologueSize(require_jump);
|
||||
int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
|
||||
size_t size_after_marker =
|
||||
Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
|
||||
return size_up_to_marker + static_cast<int>(size_after_marker);
|
||||
}
|
||||
|
||||
Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
|
||||
int pc_offset) const {
|
||||
int size_up_to_marker = PrologueSize(require_jump);
|
||||
if (Entry64Count() != 0 &&
|
||||
!IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
|
||||
return Alignment::kRequired;
|
||||
}
|
||||
return Alignment::kOmitted;
|
||||
}
|
||||
|
||||
bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
|
||||
// Check that all entries are in range if the pool is emitted at {pc_offset}.
|
||||
// This ignores kPcLoadDelta (conservatively, since all offsets are positive),
|
||||
// and over-estimates the last entry's address with the pool's end.
|
||||
Alignment require_alignment =
|
||||
IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
|
||||
size_t pool_end_32 =
|
||||
pc_offset + ComputeSize(Jump::kRequired, require_alignment);
|
||||
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
|
||||
bool entries_in_range_32 =
|
||||
Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
|
||||
bool entries_in_range_64 =
|
||||
Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
|
||||
return entries_in_range_32 && entries_in_range_64;
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
|
||||
: pool_(&assm->constpool_) {
|
||||
pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
|
||||
pool_->StartBlock();
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
|
||||
: pool_(&assm->constpool_) {
|
||||
DCHECK_EQ(check, PoolEmissionCheck::kSkip);
|
||||
pool_->StartBlock();
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
|
||||
|
||||
void ConstantPool::MaybeCheck() {
|
||||
if (assm_->pc_offset() >= next_check_) {
|
||||
Check(Emission::kIfNeeded, Jump::kRequired);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
352
deps/v8/src/codegen/constant-pool.h
vendored
352
deps/v8/src/codegen/constant-pool.h
vendored
|
|
@ -5,352 +5,12 @@
|
|||
#ifndef V8_CODEGEN_CONSTANT_POOL_H_
|
||||
#define V8_CODEGEN_CONSTANT_POOL_H_
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "src/base/numbers/double.h"
|
||||
#include "src/codegen/label.h"
|
||||
#include "src/codegen/reloc-info.h"
|
||||
#include "src/common/globals.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Instruction;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Constant pool support
|
||||
|
||||
class ConstantPoolEntry {
|
||||
public:
|
||||
ConstantPoolEntry() = default;
|
||||
ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: position_(position),
|
||||
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
|
||||
value_(value),
|
||||
rmode_(rmode) {}
|
||||
ConstantPoolEntry(int position, base::Double value,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: position_(position),
|
||||
merged_index_(SHARING_ALLOWED),
|
||||
value64_(value.AsUint64()),
|
||||
rmode_(rmode) {}
|
||||
|
||||
int position() const { return position_; }
|
||||
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
|
||||
bool is_merged() const { return merged_index_ >= 0; }
|
||||
int merged_index() const {
|
||||
DCHECK(is_merged());
|
||||
return merged_index_;
|
||||
}
|
||||
void set_merged_index(int index) {
|
||||
DCHECK(sharing_ok());
|
||||
merged_index_ = index;
|
||||
DCHECK(is_merged());
|
||||
}
|
||||
int offset() const {
|
||||
DCHECK_GE(merged_index_, 0);
|
||||
return merged_index_;
|
||||
}
|
||||
void set_offset(int offset) {
|
||||
DCHECK_GE(offset, 0);
|
||||
merged_index_ = offset;
|
||||
}
|
||||
intptr_t value() const { return value_; }
|
||||
uint64_t value64() const { return value64_; }
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
|
||||
enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
|
||||
|
||||
static int size(Type type) {
|
||||
return (type == INTPTR) ? kSystemPointerSize : kDoubleSize;
|
||||
}
|
||||
|
||||
enum Access { REGULAR, OVERFLOWED };
|
||||
|
||||
private:
|
||||
int position_;
|
||||
int merged_index_;
|
||||
union {
|
||||
intptr_t value_;
|
||||
uint64_t value64_;
|
||||
};
|
||||
// TODO(leszeks): The way we use this, it could probably be packed into
|
||||
// merged_index_ if size is a concern.
|
||||
RelocInfo::Mode rmode_;
|
||||
enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
|
||||
};
|
||||
|
||||
#if defined(V8_TARGET_ARCH_PPC64)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Embedded constant pool support
|
||||
|
||||
class ConstantPoolBuilder {
|
||||
public:
|
||||
ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
|
||||
|
||||
#ifdef DEBUG
|
||||
~ConstantPoolBuilder() {
|
||||
// Unused labels to prevent DCHECK failures.
|
||||
emitted_label_.Unuse();
|
||||
emitted_label_.UnuseNear();
|
||||
}
|
||||
#if V8_TARGET_ARCH_ARM64
|
||||
#include "src/codegen/arm64/constant-pool-arm64.h"
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
#include "src/codegen/ppc/constant-pool-ppc.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv/constant-pool-riscv.h"
|
||||
#endif
|
||||
|
||||
// Add pointer-sized constant to the embedded constant pool
|
||||
ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
|
||||
bool sharing_ok) {
|
||||
ConstantPoolEntry entry(position, value, sharing_ok);
|
||||
return AddEntry(&entry, ConstantPoolEntry::INTPTR);
|
||||
}
|
||||
|
||||
// Add double constant to the embedded constant pool
|
||||
ConstantPoolEntry::Access AddEntry(int position, base::Double value) {
|
||||
ConstantPoolEntry entry(position, value);
|
||||
return AddEntry(&entry, ConstantPoolEntry::DOUBLE);
|
||||
}
|
||||
|
||||
// Add double constant to the embedded constant pool
|
||||
ConstantPoolEntry::Access AddEntry(int position, double value) {
|
||||
return AddEntry(position, base::Double(value));
|
||||
}
|
||||
|
||||
// Previews the access type required for the next new entry to be added.
|
||||
ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
|
||||
|
||||
bool IsEmpty() {
|
||||
return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
|
||||
info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
|
||||
info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
|
||||
info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
|
||||
}
|
||||
|
||||
// Emit the constant pool. Invoke only after all entries have been
|
||||
// added and all instructions have been emitted.
|
||||
// Returns position of the emitted pool (zero implies no constant pool).
|
||||
int Emit(Assembler* assm);
|
||||
|
||||
// Returns the label associated with the start of the constant pool.
|
||||
// Linking to this label in the function prologue may provide an
|
||||
// efficient means of constant pool pointer register initialization
|
||||
// on some architectures.
|
||||
inline Label* EmittedPosition() { return &emitted_label_; }
|
||||
|
||||
private:
|
||||
ConstantPoolEntry::Access AddEntry(ConstantPoolEntry* entry,
|
||||
ConstantPoolEntry::Type type);
|
||||
void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
|
||||
void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
|
||||
ConstantPoolEntry::Type type);
|
||||
|
||||
struct PerTypeEntryInfo {
|
||||
PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
|
||||
bool overflow() const {
|
||||
return (overflow_start >= 0 &&
|
||||
overflow_start < static_cast<int>(entries.size()));
|
||||
}
|
||||
int regular_reach_bits;
|
||||
int regular_count;
|
||||
int overflow_start;
|
||||
std::vector<ConstantPoolEntry> entries;
|
||||
std::vector<ConstantPoolEntry> shared_entries;
|
||||
};
|
||||
|
||||
Label emitted_label_; // Records pc_offset of emitted pool
|
||||
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
|
||||
};
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_PPC64)
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64) || \
|
||||
defined(V8_TARGET_ARCH_RISCV32)
|
||||
|
||||
class ConstantPoolKey {
|
||||
public:
|
||||
explicit ConstantPoolKey(uint64_t value,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: is_value32_(false), value64_(value), rmode_(rmode) {}
|
||||
|
||||
explicit ConstantPoolKey(uint32_t value,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: is_value32_(true), value32_(value), rmode_(rmode) {}
|
||||
|
||||
uint64_t value64() const {
|
||||
CHECK(!is_value32_);
|
||||
return value64_;
|
||||
}
|
||||
uint32_t value32() const {
|
||||
CHECK(is_value32_);
|
||||
return value32_;
|
||||
}
|
||||
|
||||
bool is_value32() const { return is_value32_; }
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
|
||||
bool AllowsDeduplication() const {
|
||||
DCHECK(rmode_ != RelocInfo::CONST_POOL &&
|
||||
rmode_ != RelocInfo::VENEER_POOL &&
|
||||
rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
|
||||
rmode_ != RelocInfo::DEOPT_INLINING_ID &&
|
||||
rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID &&
|
||||
rmode_ != RelocInfo::DEOPT_NODE_ID);
|
||||
// CODE_TARGETs can be shared because they aren't patched anymore,
|
||||
// and we make sure we emit only one reloc info for them (thus delta
|
||||
// patching) will apply the delta only once. At the moment, we do not dedup
|
||||
// code targets if they are wrapped in a heap object request (value == 0).
|
||||
bool is_sharable_code_target =
|
||||
rmode_ == RelocInfo::CODE_TARGET &&
|
||||
(is_value32() ? (value32() != 0) : (value64() != 0));
|
||||
bool is_sharable_embedded_object = RelocInfo::IsEmbeddedObjectMode(rmode_);
|
||||
return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target ||
|
||||
is_sharable_embedded_object;
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_value32_;
|
||||
union {
|
||||
uint64_t value64_;
|
||||
uint32_t value32_;
|
||||
};
|
||||
RelocInfo::Mode rmode_;
|
||||
};
|
||||
|
||||
// Order for pool entries. 64bit entries go first.
|
||||
inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
|
||||
if (a.is_value32() < b.is_value32()) return true;
|
||||
if (a.is_value32() > b.is_value32()) return false;
|
||||
if (a.rmode() < b.rmode()) return true;
|
||||
if (a.rmode() > b.rmode()) return false;
|
||||
if (a.is_value32()) return a.value32() < b.value32();
|
||||
return a.value64() < b.value64();
|
||||
}
|
||||
|
||||
inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
|
||||
if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
|
||||
return false;
|
||||
}
|
||||
if (a.is_value32()) return a.value32() == b.value32();
|
||||
return a.value64() == b.value64();
|
||||
}
|
||||
|
||||
// Constant pool generation
|
||||
enum class Jump { kOmitted, kRequired };
|
||||
enum class Emission { kIfNeeded, kForced };
|
||||
enum class Alignment { kOmitted, kRequired };
|
||||
enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
|
||||
enum class PoolEmissionCheck { kSkip };
|
||||
|
||||
// Pools are emitted in the instruction stream, preferably after unconditional
|
||||
// jumps or after returns from functions (in dead code locations).
|
||||
// If a long code sequence does not contain unconditional jumps, it is
|
||||
// necessary to emit the constant pool before the pool gets too far from the
|
||||
// location it is accessed from. In this case, we emit a jump over the emitted
|
||||
// constant pool.
|
||||
// Constants in the pool may be addresses of functions that gets relocated;
|
||||
// if so, a relocation info entry is associated to the constant pool entry.
|
||||
class ConstantPool {
|
||||
public:
|
||||
explicit ConstantPool(Assembler* assm);
|
||||
~ConstantPool();
|
||||
|
||||
// Returns true when we need to write RelocInfo and false when we do not.
|
||||
RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
|
||||
RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
|
||||
|
||||
size_t Entry32Count() const { return entry32_count_; }
|
||||
size_t Entry64Count() const { return entry64_count_; }
|
||||
bool IsEmpty() const { return entries_.empty(); }
|
||||
// Check if pool will be out of range at {pc_offset}.
|
||||
bool IsInImmRangeIfEmittedAt(int pc_offset);
|
||||
// Size in bytes of the constant pool. Depending on parameters, the size will
|
||||
// include the branch over the pool and alignment padding.
|
||||
int ComputeSize(Jump require_jump, Alignment require_alignment) const;
|
||||
|
||||
// Emit the pool at the current pc with a branch over the pool if requested.
|
||||
void EmitAndClear(Jump require);
|
||||
bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
|
||||
V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
|
||||
size_t margin = 0);
|
||||
|
||||
V8_EXPORT_PRIVATE void MaybeCheck();
|
||||
void Clear();
|
||||
|
||||
// Constant pool emission can be blocked temporarily.
|
||||
bool IsBlocked() const;
|
||||
|
||||
// Repeated checking whether the constant pool should be emitted is expensive;
|
||||
// only check once a number of instructions have been generated.
|
||||
void SetNextCheckIn(size_t instructions);
|
||||
|
||||
// Class for scoping postponing the constant pool generation.
|
||||
class V8_EXPORT_PRIVATE V8_NODISCARD BlockScope {
|
||||
public:
|
||||
// BlockScope immediatelly emits the pool if necessary to ensure that
|
||||
// during the block scope at least {margin} bytes can be emitted without
|
||||
// pool emission becomming necessary.
|
||||
explicit BlockScope(Assembler* pool, size_t margin = 0);
|
||||
BlockScope(Assembler* pool, PoolEmissionCheck);
|
||||
~BlockScope();
|
||||
|
||||
private:
|
||||
ConstantPool* pool_;
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
|
||||
};
|
||||
|
||||
// Hard limit to the const pool which must not be exceeded.
|
||||
static const size_t kMaxDistToPool32;
|
||||
static const size_t kMaxDistToPool64;
|
||||
// Approximate distance where the pool should be emitted.
|
||||
static const size_t kApproxDistToPool32;
|
||||
V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
|
||||
// Approximate distance where the pool may be emitted if
|
||||
// no jump is required (due to a recent unconditional jump).
|
||||
static const size_t kOpportunityDistToPool32;
|
||||
static const size_t kOpportunityDistToPool64;
|
||||
// PC distance between constant pool checks.
|
||||
V8_EXPORT_PRIVATE static const size_t kCheckInterval;
|
||||
// Number of entries in the pool which trigger a check.
|
||||
static const size_t kApproxMaxEntryCount;
|
||||
|
||||
private:
|
||||
void StartBlock();
|
||||
void EndBlock();
|
||||
|
||||
void EmitEntries();
|
||||
void EmitPrologue(Alignment require_alignment);
|
||||
int PrologueSize(Jump require_jump) const;
|
||||
RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
|
||||
RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
|
||||
void Emit(const ConstantPoolKey& key);
|
||||
void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
|
||||
const ConstantPoolKey& key);
|
||||
Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
|
||||
int pc_offset) const;
|
||||
|
||||
Assembler* assm_;
|
||||
// Keep track of the first instruction requiring a constant pool entry
|
||||
// since the previous constant pool was emitted.
|
||||
int first_use_32_ = -1;
|
||||
int first_use_64_ = -1;
|
||||
// We sort not according to insertion order, but since we do not insert
|
||||
// addresses (for heap objects we insert an index which is created in
|
||||
// increasing order), the order is deterministic. We map each entry to the
|
||||
// pc offset of the load. We use a multimap because we need to record the
|
||||
// pc offset of each load of the same constant so that the immediate of the
|
||||
// loads can be back-patched when the pool is emitted.
|
||||
std::multimap<ConstantPoolKey, int> entries_;
|
||||
size_t entry32_count_ = 0;
|
||||
size_t entry64_count_ = 0;
|
||||
int next_check_ = 0;
|
||||
int old_next_check_ = 0;
|
||||
int blocked_nesting_ = 0;
|
||||
};
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64) ||
|
||||
// defined(V8_TARGET_ARCH_RISCV32)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_CONSTANT_POOL_H_
|
||||
|
|
|
|||
8
deps/v8/src/codegen/cpu-features.h
vendored
8
deps/v8/src/codegen/cpu-features.h
vendored
|
|
@ -100,6 +100,7 @@ enum CpuFeature {
|
|||
ZBB,
|
||||
ZBS,
|
||||
ZICOND,
|
||||
ZICFISS,
|
||||
#endif
|
||||
|
||||
NUMBER_OF_CPU_FEATURES
|
||||
|
|
@ -152,6 +153,11 @@ class V8_EXPORT_PRIVATE CpuFeatures : public AllStatic {
|
|||
return dcache_line_size_;
|
||||
}
|
||||
|
||||
static inline unsigned vlen() {
|
||||
DCHECK_NE(vlen_, 0);
|
||||
return vlen_;
|
||||
}
|
||||
|
||||
static void PrintTarget();
|
||||
static void PrintFeatures();
|
||||
|
||||
|
|
@ -173,6 +179,8 @@ class V8_EXPORT_PRIVATE CpuFeatures : public AllStatic {
|
|||
// CpuFeatures::SupportWasmSimd128().
|
||||
static bool supports_wasm_simd_128_;
|
||||
static bool supports_cetss_;
|
||||
// VLEN is the length in bits of the vector registers on RISC-V.
|
||||
static unsigned vlen_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user