mirror of
https://github.com/zebrajr/node.git
synced 2025-12-06 00:20:08 +01:00
deps: update V8 to 11.9.169.7
PR-URL: https://github.com/nodejs/node/pull/50115 Reviewed-By: Rafael Gonzaga <rafael.nunu@hotmail.com> Reviewed-By: Michael Dawson <midawson@redhat.com>
This commit is contained in:
parent
084d761dfc
commit
d8c97e4857
2
deps/v8/.gitignore
vendored
2
deps/v8/.gitignore
vendored
|
|
@ -51,7 +51,7 @@
|
|||
/src/inspector/build/closure-compiler
|
||||
/src/inspector/build/closure-compiler.tar.gz
|
||||
/test/benchmarks/data
|
||||
/test/fuzzer/wasm_corpus
|
||||
/test/fuzzer/wasm_corpus/
|
||||
/test/fuzzer/wasm_corpus.tar.gz
|
||||
!/test/mjsunit/tools/*.log
|
||||
/test/mozilla/data
|
||||
|
|
|
|||
4
deps/v8/.gn
vendored
4
deps/v8/.gn
vendored
|
|
@ -25,10 +25,6 @@ no_check_targets = [
|
|||
]
|
||||
|
||||
default_args = {
|
||||
# Overwrite default args declared in the Fuchsia sdk
|
||||
# Please maintain this in sync with Chromium version in src/.gn
|
||||
fuchsia_target_api_level = 9
|
||||
|
||||
# Disable rust dependencies.
|
||||
enable_rust = false
|
||||
}
|
||||
|
|
|
|||
2
deps/v8/AUTHORS
vendored
2
deps/v8/AUTHORS
vendored
|
|
@ -177,8 +177,8 @@ Kris Selden <kris.selden@gmail.com>
|
|||
Kyounga Ra <kyounga@alticast.com>
|
||||
Loo Rong Jie <loorongjie@gmail.com>
|
||||
Lu Yahan <yahan@iscas.ac.cn>
|
||||
Ludovic Mermod <ludovic.mermod@gmail.com>
|
||||
Luis Reis <luis.m.reis@gmail.com>
|
||||
Luke Albao <lukealbao@gmail.com>
|
||||
Luke Zarko <lukezarko@gmail.com>
|
||||
Ma Aiguo <maaiguo@uniontech.com>
|
||||
Maciej Małecki <me@mmalecki.com>
|
||||
|
|
|
|||
27
deps/v8/BUILD.bazel
vendored
27
deps/v8/BUILD.bazel
vendored
|
|
@ -829,6 +829,7 @@ filegroup(
|
|||
"src/builtins/array-findlastindex.tq",
|
||||
"src/builtins/array-foreach.tq",
|
||||
"src/builtins/array-from.tq",
|
||||
"src/builtins/array-from-async.tq",
|
||||
"src/builtins/array-isarray.tq",
|
||||
"src/builtins/array-join.tq",
|
||||
"src/builtins/array-lastindexof.tq",
|
||||
|
|
@ -1010,6 +1011,7 @@ filegroup(
|
|||
"src/objects/name.tq",
|
||||
"src/objects/oddball.tq",
|
||||
"src/objects/hole.tq",
|
||||
"src/objects/trusted-object.tq",
|
||||
"src/objects/ordered-hash-table.tq",
|
||||
"src/objects/primitive-heap-object.tq",
|
||||
"src/objects/promise.tq",
|
||||
|
|
@ -1516,6 +1518,7 @@ filegroup(
|
|||
"src/heap/cppgc-js/wrappable-info-inl.h",
|
||||
"src/heap/ephemeron-remembered-set.h",
|
||||
"src/heap/ephemeron-remembered-set.cc",
|
||||
"src/heap/evacuation-allocator.cc",
|
||||
"src/heap/evacuation-allocator.h",
|
||||
"src/heap/evacuation-allocator-inl.h",
|
||||
"src/heap/evacuation-verifier.cc",
|
||||
|
|
@ -1572,6 +1575,9 @@ filegroup(
|
|||
"src/heap/local-heap.cc",
|
||||
"src/heap/local-heap.h",
|
||||
"src/heap/local-heap-inl.h",
|
||||
"src/heap/main-allocator.cc",
|
||||
"src/heap/main-allocator.h",
|
||||
"src/heap/main-allocator-inl.h",
|
||||
"src/heap/mark-compact.cc",
|
||||
"src/heap/mark-compact.h",
|
||||
"src/heap/mark-compact-inl.h",
|
||||
|
|
@ -1945,6 +1951,8 @@ filegroup(
|
|||
"src/objects/oddball.h",
|
||||
"src/objects/oddball-inl.h",
|
||||
"src/objects/hole.h",
|
||||
"src/objects/trusted-object.h",
|
||||
"src/objects/trusted-object-inl.h",
|
||||
"src/objects/hole-inl.h",
|
||||
"src/objects/option-utils.cc",
|
||||
"src/objects/option-utils.h",
|
||||
|
|
@ -2197,9 +2205,13 @@ filegroup(
|
|||
"src/sandbox/code-pointer-table.cc",
|
||||
"src/sandbox/code-pointer-table.h",
|
||||
"src/sandbox/code-pointer-table-inl.h",
|
||||
"src/sandbox/indirect-pointer-table.cc",
|
||||
"src/sandbox/indirect-pointer-table.h",
|
||||
"src/sandbox/indirect-pointer-table-inl.h",
|
||||
"src/sandbox/code-pointer.h",
|
||||
"src/sandbox/code-pointer-inl.h",
|
||||
"src/sandbox/indirect-pointer.h",
|
||||
"src/sandbox/indirect-pointer-tag.h",
|
||||
"src/sandbox/indirect-pointer-inl.h",
|
||||
"src/sandbox/external-entity-table.h",
|
||||
"src/sandbox/external-entity-table-inl.h",
|
||||
|
|
@ -2578,6 +2590,8 @@ filegroup(
|
|||
"src/asmjs/asm-scanner.h",
|
||||
"src/asmjs/asm-types.cc",
|
||||
"src/asmjs/asm-types.h",
|
||||
"src/compiler/turboshaft/wasm-dead-code-elimination-phase.cc",
|
||||
"src/compiler/turboshaft/wasm-dead-code-elimination-phase.h",
|
||||
"src/debug/debug-wasm-objects.cc",
|
||||
"src/debug/debug-wasm-objects.h",
|
||||
"src/debug/debug-wasm-objects-inl.h",
|
||||
|
|
@ -2650,6 +2664,7 @@ filegroup(
|
|||
"src/wasm/value-type.cc",
|
||||
"src/wasm/value-type.h",
|
||||
"src/wasm/wasm-arguments.h",
|
||||
"src/wasm/wasm-builtin-list.h",
|
||||
"src/wasm/wasm-code-manager.cc",
|
||||
"src/wasm/wasm-code-manager.h",
|
||||
"src/wasm/wasm-debug.cc",
|
||||
|
|
@ -3020,6 +3035,10 @@ filegroup(
|
|||
"src/compiler/turboshaft/late-load-elimination-reducer.cc",
|
||||
"src/compiler/turboshaft/late-load-elimination-reducer.h",
|
||||
"src/compiler/turboshaft/layered-hash-map.h",
|
||||
"src/compiler/turboshaft/loop-unrolling-phase.cc",
|
||||
"src/compiler/turboshaft/loop-unrolling-phase.h",
|
||||
"src/compiler/turboshaft/loop-unrolling-reducer.cc",
|
||||
"src/compiler/turboshaft/loop-unrolling-reducer.h",
|
||||
"src/compiler/turboshaft/machine-lowering-phase.cc",
|
||||
"src/compiler/turboshaft/machine-lowering-phase.h",
|
||||
"src/compiler/turboshaft/machine-lowering-reducer.h",
|
||||
|
|
@ -3052,6 +3071,8 @@ filegroup(
|
|||
"src/compiler/turboshaft/simplify-tf-loops.cc",
|
||||
"src/compiler/turboshaft/simplify-tf-loops.h",
|
||||
"src/compiler/turboshaft/snapshot-table.h",
|
||||
"src/compiler/turboshaft/snapshot-table-opindex.h",
|
||||
"src/compiler/turboshaft/stack-check-reducer.h",
|
||||
"src/compiler/turboshaft/store-store-elimination-phase.cc",
|
||||
"src/compiler/turboshaft/store-store-elimination-phase.h",
|
||||
"src/compiler/turboshaft/store-store-elimination-reducer.h",
|
||||
|
|
@ -3158,6 +3179,10 @@ filegroup(
|
|||
"src/compiler/turboshaft/int64-lowering-phase.h",
|
||||
"src/compiler/turboshaft/int64-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-gc-optimize-phase.cc",
|
||||
"src/compiler/turboshaft/wasm-gc-optimize-phase.h",
|
||||
"src/compiler/turboshaft/wasm-gc-type-reducer.cc",
|
||||
"src/compiler/turboshaft/wasm-gc-type-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-optimize-phase.cc",
|
||||
"src/compiler/turboshaft/wasm-optimize-phase.h",
|
||||
"src/compiler/turboshaft/wasm-turboshaft-compiler.cc",
|
||||
|
|
@ -3401,6 +3426,8 @@ filegroup(
|
|||
"src/heap/base/bytes.h",
|
||||
"src/heap/base/incremental-marking-schedule.cc",
|
||||
"src/heap/base/incremental-marking-schedule.h",
|
||||
"src/heap/base/memory-tagging.h",
|
||||
"src/heap/base/memory-tagging.cc",
|
||||
"src/heap/base/stack.cc",
|
||||
"src/heap/base/stack.h",
|
||||
"src/heap/base/worklist.cc",
|
||||
|
|
|
|||
72
deps/v8/BUILD.gn
vendored
72
deps/v8/BUILD.gn
vendored
|
|
@ -337,10 +337,6 @@ declare_args() {
|
|||
# Sets -DV8_ENABLE_SANDBOX.
|
||||
v8_enable_sandbox = ""
|
||||
|
||||
# Enable experimental code pointer sandboxing for the V8 sandbox.
|
||||
# Sets -DV8_CODE_POINTER_SANDBOXING
|
||||
v8_code_pointer_sandboxing = ""
|
||||
|
||||
# Expose the memory corruption API to JavaScript. Useful for testing the sandbox.
|
||||
# WARNING This will expose builtins that (by design) cause memory corruption.
|
||||
# Sets -DV8_EXPOSE_MEMORY_CORRUPTION_API
|
||||
|
|
@ -568,11 +564,6 @@ if (v8_enable_sandbox == "") {
|
|||
v8_enable_external_code_space && target_os != "fuchsia"
|
||||
}
|
||||
|
||||
if (v8_code_pointer_sandboxing == "") {
|
||||
# By default, enable code pointer sandboxing if the sandbox is enabled.
|
||||
v8_code_pointer_sandboxing = v8_enable_sandbox
|
||||
}
|
||||
|
||||
if (v8_enable_static_roots == "") {
|
||||
# Static roots are only valid for builds with pointer compression and a
|
||||
# shared read-only heap.
|
||||
|
|
@ -674,8 +665,8 @@ assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage,
|
|||
assert(!v8_enable_sandbox || v8_enable_external_code_space,
|
||||
"The sandbox requires the external code space")
|
||||
|
||||
assert(!v8_code_pointer_sandboxing || v8_enable_sandbox,
|
||||
"Code pointer sandboxing requires the sandbox")
|
||||
assert(!v8_enable_sandbox || !v8_enable_third_party_heap,
|
||||
"The sandbox is incompatible with the third-party heap")
|
||||
|
||||
assert(!v8_expose_memory_corruption_api || v8_enable_sandbox,
|
||||
"The Memory Corruption API requires the sandbox")
|
||||
|
|
@ -769,7 +760,7 @@ config("internal_config") {
|
|||
config("v8_tracing_config") {
|
||||
if (v8_use_perfetto) {
|
||||
include_dirs = [
|
||||
"third_party/perfetto/include",
|
||||
"//third_party/perfetto/include",
|
||||
"$root_gen_dir/third_party/perfetto",
|
||||
"$root_gen_dir/third_party/perfetto/build_config",
|
||||
]
|
||||
|
|
@ -1210,9 +1201,6 @@ config("features") {
|
|||
if (v8_enable_wasm_simd256_revec) {
|
||||
defines += [ "V8_ENABLE_WASM_SIMD256_REVEC" ]
|
||||
}
|
||||
if (v8_code_pointer_sandboxing) {
|
||||
defines += [ "V8_CODE_POINTER_SANDBOXING" ]
|
||||
}
|
||||
if (v8_enable_maglev_graph_printer) {
|
||||
defines += [ "V8_ENABLE_MAGLEV_GRAPH_PRINTER" ]
|
||||
}
|
||||
|
|
@ -1842,6 +1830,7 @@ torque_files = [
|
|||
"src/builtins/array-findlast.tq",
|
||||
"src/builtins/array-findlastindex.tq",
|
||||
"src/builtins/array-foreach.tq",
|
||||
"src/builtins/array-from-async.tq",
|
||||
"src/builtins/array-from.tq",
|
||||
"src/builtins/array-isarray.tq",
|
||||
"src/builtins/array-join.tq",
|
||||
|
|
@ -2025,6 +2014,7 @@ torque_files = [
|
|||
"src/objects/name.tq",
|
||||
"src/objects/oddball.tq",
|
||||
"src/objects/hole.tq",
|
||||
"src/objects/trusted-object.tq",
|
||||
"src/objects/ordered-hash-table.tq",
|
||||
"src/objects/primitive-heap-object.tq",
|
||||
"src/objects/promise.tq",
|
||||
|
|
@ -3332,6 +3322,8 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/compiler/turboshaft/late-escape-analysis-reducer.h",
|
||||
"src/compiler/turboshaft/late-load-elimination-reducer.h",
|
||||
"src/compiler/turboshaft/layered-hash-map.h",
|
||||
"src/compiler/turboshaft/loop-unrolling-phase.h",
|
||||
"src/compiler/turboshaft/loop-unrolling-reducer.h",
|
||||
"src/compiler/turboshaft/machine-lowering-phase.h",
|
||||
"src/compiler/turboshaft/machine-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/machine-optimization-reducer.h",
|
||||
|
|
@ -3352,7 +3344,9 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/compiler/turboshaft/select-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/sidetable.h",
|
||||
"src/compiler/turboshaft/simplify-tf-loops.h",
|
||||
"src/compiler/turboshaft/snapshot-table-opindex.h",
|
||||
"src/compiler/turboshaft/snapshot-table.h",
|
||||
"src/compiler/turboshaft/stack-check-reducer.h",
|
||||
"src/compiler/turboshaft/store-store-elimination-phase.h",
|
||||
"src/compiler/turboshaft/store-store-elimination-reducer.h",
|
||||
"src/compiler/turboshaft/structural-optimization-reducer.h",
|
||||
|
|
@ -3371,6 +3365,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/compiler/turboshaft/utils.h",
|
||||
"src/compiler/turboshaft/value-numbering-reducer.h",
|
||||
"src/compiler/turboshaft/variable-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-dead-code-elimination-phase.h",
|
||||
"src/compiler/type-cache.h",
|
||||
"src/compiler/type-narrowing-reducer.h",
|
||||
"src/compiler/typed-optimization.h",
|
||||
|
|
@ -3523,6 +3518,8 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/heap/local-factory.h",
|
||||
"src/heap/local-heap-inl.h",
|
||||
"src/heap/local-heap.h",
|
||||
"src/heap/main-allocator-inl.h",
|
||||
"src/heap/main-allocator.h",
|
||||
"src/heap/mark-compact-inl.h",
|
||||
"src/heap/mark-compact.h",
|
||||
"src/heap/mark-sweep-utilities.h",
|
||||
|
|
@ -3844,6 +3841,8 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/objects/torque-defined-classes.h",
|
||||
"src/objects/transitions-inl.h",
|
||||
"src/objects/transitions.h",
|
||||
"src/objects/trusted-object-inl.h",
|
||||
"src/objects/trusted-object.h",
|
||||
"src/objects/turbofan-types-inl.h",
|
||||
"src/objects/turbofan-types.h",
|
||||
"src/objects/turboshaft-types-inl.h",
|
||||
|
|
@ -3931,6 +3930,9 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/sandbox/external-pointer-table.h",
|
||||
"src/sandbox/external-pointer.h",
|
||||
"src/sandbox/indirect-pointer-inl.h",
|
||||
"src/sandbox/indirect-pointer-table-inl.h",
|
||||
"src/sandbox/indirect-pointer-table.h",
|
||||
"src/sandbox/indirect-pointer-tag.h",
|
||||
"src/sandbox/indirect-pointer.h",
|
||||
"src/sandbox/sandbox.h",
|
||||
"src/sandbox/sandboxed-pointer-inl.h",
|
||||
|
|
@ -4071,6 +4073,8 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/compiler/int64-lowering.h",
|
||||
"src/compiler/turboshaft/int64-lowering-phase.h",
|
||||
"src/compiler/turboshaft/int64-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-gc-optimize-phase.h",
|
||||
"src/compiler/turboshaft/wasm-gc-type-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-js-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-lowering-reducer.h",
|
||||
"src/compiler/turboshaft/wasm-optimize-phase.h",
|
||||
|
|
@ -4130,6 +4134,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/wasm/turboshaft-graph-interface.h",
|
||||
"src/wasm/value-type.h",
|
||||
"src/wasm/wasm-arguments.h",
|
||||
"src/wasm/wasm-builtin-list.h",
|
||||
"src/wasm/wasm-code-manager.h",
|
||||
"src/wasm/wasm-debug.h",
|
||||
"src/wasm/wasm-disassembler-impl.h",
|
||||
|
|
@ -4379,6 +4384,17 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/regexp/loong64/regexp-macro-assembler-loong64.h",
|
||||
"src/wasm/baseline/loong64/liftoff-assembler-loong64.h",
|
||||
]
|
||||
if (v8_enable_webassembly) {
|
||||
# Trap handling is enabled on loong64 Linux and in simulators on
|
||||
# x64 on Linux.
|
||||
if ((current_cpu == "loong64" && is_linux) ||
|
||||
(current_cpu == "x64" && is_linux)) {
|
||||
sources += [ "src/trap-handler/handler-inside-posix.h" ]
|
||||
}
|
||||
if (current_cpu == "x64" && is_linux) {
|
||||
sources += [ "src/trap-handler/trap-handler-simulator.h" ]
|
||||
}
|
||||
}
|
||||
} else if (v8_current_cpu == "ppc") {
|
||||
sources += [
|
||||
### gcmole(ppc) ###
|
||||
|
|
@ -4738,6 +4754,8 @@ if (v8_enable_webassembly) {
|
|||
v8_compiler_sources += [
|
||||
"src/compiler/int64-lowering.cc",
|
||||
"src/compiler/turboshaft/int64-lowering-phase.cc",
|
||||
"src/compiler/turboshaft/wasm-gc-optimize-phase.cc",
|
||||
"src/compiler/turboshaft/wasm-gc-type-reducer.cc",
|
||||
"src/compiler/turboshaft/wasm-optimize-phase.cc",
|
||||
"src/compiler/turboshaft/wasm-turboshaft-compiler.cc",
|
||||
"src/compiler/wasm-address-reassociation.cc",
|
||||
|
|
@ -4847,6 +4865,8 @@ v8_source_set("v8_turboshaft") {
|
|||
"src/compiler/turboshaft/instruction-selection-phase.cc",
|
||||
"src/compiler/turboshaft/late-escape-analysis-reducer.cc",
|
||||
"src/compiler/turboshaft/late-load-elimination-reducer.cc",
|
||||
"src/compiler/turboshaft/loop-unrolling-phase.cc",
|
||||
"src/compiler/turboshaft/loop-unrolling-reducer.cc",
|
||||
"src/compiler/turboshaft/machine-lowering-phase.cc",
|
||||
"src/compiler/turboshaft/memory-optimization-reducer.cc",
|
||||
"src/compiler/turboshaft/operations.cc",
|
||||
|
|
@ -5082,6 +5102,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/heap/cppgc-js/unified-heap-marking-verifier.cc",
|
||||
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
|
||||
"src/heap/ephemeron-remembered-set.cc",
|
||||
"src/heap/evacuation-allocator.cc",
|
||||
"src/heap/evacuation-verifier.cc",
|
||||
"src/heap/factory-base.cc",
|
||||
"src/heap/factory.cc",
|
||||
|
|
@ -5102,6 +5123,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/heap/large-spaces.cc",
|
||||
"src/heap/local-factory.cc",
|
||||
"src/heap/local-heap.cc",
|
||||
"src/heap/main-allocator.cc",
|
||||
"src/heap/mark-compact.cc",
|
||||
"src/heap/mark-sweep-utilities.cc",
|
||||
"src/heap/marking-barrier.cc",
|
||||
|
|
@ -5327,6 +5349,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/runtime/runtime.cc",
|
||||
"src/sandbox/code-pointer-table.cc",
|
||||
"src/sandbox/external-pointer-table.cc",
|
||||
"src/sandbox/indirect-pointer-table.cc",
|
||||
"src/sandbox/sandbox.cc",
|
||||
"src/sandbox/testing.cc",
|
||||
"src/snapshot/code-serializer.cc",
|
||||
|
|
@ -5427,6 +5450,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/asmjs/asm-parser.cc",
|
||||
"src/asmjs/asm-scanner.cc",
|
||||
"src/asmjs/asm-types.cc",
|
||||
"src/compiler/turboshaft/wasm-dead-code-elimination-phase.cc",
|
||||
"src/debug/debug-wasm-objects.cc",
|
||||
"src/runtime/runtime-test-wasm.cc",
|
||||
"src/runtime/runtime-wasm.cc",
|
||||
|
|
@ -5640,6 +5664,20 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/execution/loong64/simulator-loong64.cc",
|
||||
"src/regexp/loong64/regexp-macro-assembler-loong64.cc",
|
||||
]
|
||||
if (v8_enable_webassembly) {
|
||||
# Trap handling is enabled on loong64 Linux and in simulators on
|
||||
# x64 on Linux.
|
||||
if ((current_cpu == "loong64" && is_linux) ||
|
||||
(current_cpu == "x64" && is_linux)) {
|
||||
sources += [
|
||||
"src/trap-handler/handler-inside-posix.cc",
|
||||
"src/trap-handler/handler-outside-posix.cc",
|
||||
]
|
||||
}
|
||||
if (current_cpu == "x64" && is_linux) {
|
||||
sources += [ "src/trap-handler/handler-outside-simulator.cc" ]
|
||||
}
|
||||
}
|
||||
} else if (v8_current_cpu == "ppc") {
|
||||
sources += [
|
||||
### gcmole(ppc) ###
|
||||
|
|
@ -6391,6 +6429,7 @@ v8_header_set("v8_heap_base_headers") {
|
|||
"src/heap/base/basic-slot-set.h",
|
||||
"src/heap/base/bytes.h",
|
||||
"src/heap/base/incremental-marking-schedule.h",
|
||||
"src/heap/base/memory-tagging.h",
|
||||
"src/heap/base/stack.h",
|
||||
"src/heap/base/worklist.h",
|
||||
]
|
||||
|
|
@ -6404,6 +6443,7 @@ v8_source_set("v8_heap_base") {
|
|||
sources = [
|
||||
"src/heap/base/active-system-pages.cc",
|
||||
"src/heap/base/incremental-marking-schedule.cc",
|
||||
"src/heap/base/memory-tagging.cc",
|
||||
"src/heap/base/stack.cc",
|
||||
"src/heap/base/worklist.cc",
|
||||
]
|
||||
|
|
@ -6440,6 +6480,8 @@ v8_source_set("v8_heap_base") {
|
|||
|
||||
configs = [ ":internal_config" ]
|
||||
|
||||
deps = [ ":v8_config_headers" ]
|
||||
|
||||
public_deps = [
|
||||
":v8_heap_base_headers",
|
||||
":v8_libbase",
|
||||
|
|
|
|||
62
deps/v8/DEPS
vendored
62
deps/v8/DEPS
vendored
|
|
@ -24,7 +24,7 @@ vars = {
|
|||
# Since the images are hundreds of MB, default to only downloading the image
|
||||
# most commonly useful for developers. Bots and developers that need to use
|
||||
# other images (e.g., qemu.arm64) can override this with additional images.
|
||||
'checkout_fuchsia_boot_images': "terminal.qemu-x64",
|
||||
'checkout_fuchsia_boot_images': "terminal.qemu-x64,terminal.x64",
|
||||
'checkout_fuchsia_product_bundles': '"{checkout_fuchsia_boot_images}" != ""',
|
||||
|
||||
'checkout_instrumented_libraries': False,
|
||||
|
|
@ -57,23 +57,33 @@ vars = {
|
|||
# reclient CIPD package version
|
||||
'reclient_version': 're_client_version:0.113.0.8b45b89-gomaip',
|
||||
|
||||
# Fetch configuration files required for the 'use_remoteexec' gn arg
|
||||
'download_remoteexec_cfg': False,
|
||||
|
||||
# RBE instance to use for running remote builds
|
||||
'rbe_instance': Str('projects/rbe-chrome-untrusted/instances/default_instance'),
|
||||
|
||||
# RBE project to download rewrapper config files for. Only needed if
|
||||
# different from the project used in 'rbe_instance'
|
||||
'rewrapper_cfg_project': Str(''),
|
||||
|
||||
# This variable is overrided in Chromium's DEPS file.
|
||||
'build_with_chromium': False,
|
||||
|
||||
# GN CIPD package version.
|
||||
'gn_version': 'git_revision:cc56a0f98bb34accd5323316e0292575ff17a5d4',
|
||||
'gn_version': 'git_revision:991530ce394efb58fcd848195469022fa17ae126',
|
||||
|
||||
# ninja CIPD package version
|
||||
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
|
||||
'ninja_version': 'version:2@1.11.1.chromium.6',
|
||||
|
||||
# luci-go CIPD package version.
|
||||
'luci_go': 'git_revision:fe3cfd422b1012c2c8cf00d65cdb11aa2c26cd66',
|
||||
'luci_go': 'git_revision:589d8654cfa7808816a6ecb4284ed2fd72c2f6d5',
|
||||
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling Fuchsia sdk
|
||||
# and whatever else without interference from each other.
|
||||
'fuchsia_version': 'version:14.20230902.2.1',
|
||||
'fuchsia_version': 'version:15.20230930.1.1',
|
||||
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling android_sdk_build-tools_version
|
||||
|
|
@ -111,11 +121,11 @@ vars = {
|
|||
|
||||
deps = {
|
||||
'base/trace_event/common':
|
||||
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '147f65333c38ddd1ebf554e89965c243c8ce50b3',
|
||||
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '29ac73db520575590c3aceb0a6f1f58dda8934f6',
|
||||
'build':
|
||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e14e0cc3b60c6ba8901741da3f9c18b7fa983880',
|
||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b3ac98b5aa5333fa8b1059b5bf19885923dfe050',
|
||||
'buildtools':
|
||||
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'b2043d4f435131d0a1bdd5342c17753ef9236572',
|
||||
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '50c348906cbd450e031bc3123b657f833f8455b7',
|
||||
'buildtools/linux64': {
|
||||
'packages': [
|
||||
{
|
||||
|
|
@ -161,9 +171,9 @@ deps = {
|
|||
'test/mozilla/data':
|
||||
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
|
||||
'test/test262/data':
|
||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '9efb4f8e531efbc297680145a7fa67d7415d0a4a',
|
||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '6789b50cce139af4ca819feb8ce3a9c77ba4098a',
|
||||
'third_party/android_platform': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '4b4eee2d24ec8002602e1b82d63a586d46507501',
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'eeb2d566f963bb66212fdc0d9bbe1dde550b4969',
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/android_sdk/public': {
|
||||
|
|
@ -208,14 +218,14 @@ deps = {
|
|||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_toolchain/android_toolchain',
|
||||
'version': 'R_8suM8m0oHbZ1awdxGXvKEFpAOETscbfZxkkMthyk8C',
|
||||
'version': 'NSOM616pOQCfRfDAhC72ltgjyUQp9lAWCMzlmgB18dAC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
'dep_type': 'cipd',
|
||||
},
|
||||
'third_party/catapult': {
|
||||
'url': Var('chromium_url') + '/catapult.git' + '@' + 'f16ca3c78e46cefa982100444844da3fcb25390e',
|
||||
'url': Var('chromium_url') + '/catapult.git' + '@' + '4f81c1e295978227d83f1b42ceff40b4f9b5b08c',
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/clang-format/script':
|
||||
|
|
@ -229,7 +239,7 @@ deps = {
|
|||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/depot_tools':
|
||||
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '693e0b312171685d34de77b39bc90b8271ad6541',
|
||||
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '90a30a5b5357636fa05bb315c393275be7ca705c',
|
||||
'third_party/fuchsia-gn-sdk': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + 'd1e0ff4350f77c7f6b246ff62c232318a73c8176',
|
||||
'condition': 'checkout_fuchsia',
|
||||
|
|
@ -266,11 +276,11 @@ deps = {
|
|||
'third_party/jsoncpp/source':
|
||||
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
|
||||
'third_party/libc++/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '84fb809dd6dae36d556dc0bb702c6cc2ce9d4b80',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '7cf98622abaf832e2d4784889ebc69d5b6fde4d8',
|
||||
'third_party/libc++abi/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '331847d7a5e6f8706689cf5d468e6e58c868fa10',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'e8e4eb8f1c413ea4365256b2b83a6093c95d2d86',
|
||||
'third_party/libunwind/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a321409e66c212098e755cfae1a978bbcff1ccbb',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '43e5a34c5b7066a7ee15c74f09dc37b4b9b5630e',
|
||||
'third_party/logdog/logdog':
|
||||
Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '0b2078a90f7a638d576b3a7c407d136f2fb62399',
|
||||
'third_party/markupsafe':
|
||||
|
|
@ -294,9 +304,9 @@ deps = {
|
|||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/zlib':
|
||||
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'f5fd0ad2663e239a31184ad4c9919991dda16f46',
|
||||
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '3f0af7f1d5ca6bb9d247f40b861346627c3032a1',
|
||||
'tools/clang':
|
||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '9fc887ccded86c9355f1abbe80c651271c59632f',
|
||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'dc3593cbb8b6e77c06e17697ea5b34b38d54a7ba',
|
||||
'tools/luci-go': {
|
||||
'packages': [
|
||||
{
|
||||
|
|
@ -312,7 +322,7 @@ deps = {
|
|||
'dep_type': 'cipd',
|
||||
},
|
||||
'third_party/abseil-cpp': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '7affa303ea4ebf4d4de65b3f20f230c7bb16a2ed',
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '7207ed23d56aa19796ffd08b8203f7af7f3b5f29',
|
||||
'condition': 'not build_with_chromium',
|
||||
}
|
||||
}
|
||||
|
|
@ -680,4 +690,20 @@ hooks = [
|
|||
'condition': 'host_os == "win"',
|
||||
'action': ['python3', 'build/del_ninja_deps_cache.py'],
|
||||
},
|
||||
# Configure remote exec cfg files
|
||||
{
|
||||
'name': 'configure_reclient_cfgs',
|
||||
'pattern': '.',
|
||||
'condition': 'download_remoteexec_cfg and not build_with_chromium',
|
||||
'action': ['python3',
|
||||
'buildtools/reclient_cfgs/configure_reclient_cfgs.py',
|
||||
'--rbe_instance',
|
||||
Var('rbe_instance'),
|
||||
'--reproxy_cfg_template',
|
||||
'reproxy.cfg.template',
|
||||
'--rewrapper_cfg_project',
|
||||
Var('rewrapper_cfg_project'),
|
||||
'--quiet',
|
||||
],
|
||||
},
|
||||
]
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@
|
|||
// use_perfetto_client_library GN arg. If that flag is disabled, we fall back to
|
||||
// the legacy implementation in the latter half of this file (and
|
||||
// trace_event.h).
|
||||
// TODO(skyostil): Remove the legacy macro implementation.
|
||||
// TODO(skyostil, crbug.com/1006541): Remove the legacy macro implementation.
|
||||
|
||||
// Normally we'd use BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) for this, but
|
||||
// because v8 includes trace_event_common.h directly (in non-Perfetto mode), we
|
||||
|
|
|
|||
24
deps/v8/include/js_protocol.pdl
vendored
24
deps/v8/include/js_protocol.pdl
vendored
|
|
@ -1014,8 +1014,7 @@ domain Runtime
|
|||
# Unique script identifier.
|
||||
type ScriptId extends string
|
||||
|
||||
# Represents options for serialization. Overrides `generatePreview`, `returnByValue` and
|
||||
# `generateWebDriverValue`.
|
||||
# Represents options for serialization. Overrides `generatePreview` and `returnByValue`.
|
||||
type SerializationOptions extends object
|
||||
properties
|
||||
enum serialization
|
||||
|
|
@ -1027,8 +1026,7 @@ domain Runtime
|
|||
# `returnByValue: true`. Overrides `returnByValue`.
|
||||
json
|
||||
# Only remote object id is put in the result. Same bahaviour as if no
|
||||
# `serializationOptions`, `generatePreview`, `returnByValue` nor `generateWebDriverValue`
|
||||
# are provided.
|
||||
# `serializationOptions`, `generatePreview` nor `returnByValue` are provided.
|
||||
idOnly
|
||||
|
||||
# Deep serialization depth. Default is full depth. Respected only in `deep` serialization mode.
|
||||
|
|
@ -1066,6 +1064,7 @@ domain Runtime
|
|||
arraybuffer
|
||||
node
|
||||
window
|
||||
generator
|
||||
optional any value
|
||||
optional string objectId
|
||||
# Set if value reference met more then once during serialization. In such
|
||||
|
|
@ -1125,8 +1124,6 @@ domain Runtime
|
|||
optional UnserializableValue unserializableValue
|
||||
# String representation of the object.
|
||||
optional string description
|
||||
# Deprecated. Use `deepSerializedValue` instead. WebDriver BiDi representation of the value.
|
||||
deprecated optional DeepSerializedValue webDriverValue
|
||||
# Deep serialized value.
|
||||
experimental optional DeepSerializedValue deepSerializedValue
|
||||
# Unique object identifier (for non-primitive values).
|
||||
|
|
@ -1442,13 +1439,8 @@ domain Runtime
|
|||
# boundaries).
|
||||
# This is mutually exclusive with `executionContextId`.
|
||||
experimental optional string uniqueContextId
|
||||
# Deprecated. Use `serializationOptions: {serialization:"deep"}` instead.
|
||||
# Whether the result should contain `webDriverValue`, serialized according to
|
||||
# https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but
|
||||
# resulting `objectId` is still provided.
|
||||
deprecated optional boolean generateWebDriverValue
|
||||
# Specifies the result serialization. If provided, overrides
|
||||
# `generatePreview`, `returnByValue` and `generateWebDriverValue`.
|
||||
# `generatePreview` and `returnByValue`.
|
||||
experimental optional SerializationOptions serializationOptions
|
||||
|
||||
returns
|
||||
|
|
@ -1536,14 +1528,8 @@ domain Runtime
|
|||
# boundaries).
|
||||
# This is mutually exclusive with `contextId`.
|
||||
experimental optional string uniqueContextId
|
||||
# Deprecated. Use `serializationOptions: {serialization:"deep"}` instead.
|
||||
# Whether the result should contain `webDriverValue`, serialized
|
||||
# according to
|
||||
# https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but
|
||||
# resulting `objectId` is still provided.
|
||||
deprecated optional boolean generateWebDriverValue
|
||||
# Specifies the result serialization. If provided, overrides
|
||||
# `generatePreview`, `returnByValue` and `generateWebDriverValue`.
|
||||
# `generatePreview` and `returnByValue`.
|
||||
experimental optional SerializationOptions serializationOptions
|
||||
returns
|
||||
# Evaluation result.
|
||||
|
|
|
|||
36
deps/v8/include/v8-container.h
vendored
36
deps/v8/include/v8-container.h
vendored
|
|
@ -43,6 +43,42 @@ class V8_EXPORT Array : public Object {
|
|||
return static_cast<Array*>(value);
|
||||
}
|
||||
|
||||
enum class CallbackResult {
|
||||
kException,
|
||||
kBreak,
|
||||
kContinue,
|
||||
};
|
||||
using IterationCallback = CallbackResult (*)(uint32_t index,
|
||||
Local<Value> element,
|
||||
void* data);
|
||||
|
||||
/**
|
||||
* Calls {callback} for every element of this array, passing {callback_data}
|
||||
* as its {data} parameter.
|
||||
* This function will typically be faster than calling {Get()} repeatedly.
|
||||
* As a consequence of being optimized for low overhead, the provided
|
||||
* callback must adhere to the following restrictions:
|
||||
* - It must not allocate any V8 objects and continue iterating; it may
|
||||
* allocate (e.g. an error message/object) and then immediately terminate
|
||||
* the iteration.
|
||||
* - It must not modify the array being iterated.
|
||||
* - It must not call back into V8 (unless it can guarantee that such a
|
||||
* call does not violate the above restrictions, which is difficult).
|
||||
* - The {Local<Value> element} must not "escape", i.e. must not be assigned
|
||||
* to any other {Local}. Creating a {Global} from it, or updating a
|
||||
* v8::TypecheckWitness with it, is safe.
|
||||
* These restrictions may be lifted in the future if use cases arise that
|
||||
* justify a slower but more robust implementation.
|
||||
*
|
||||
* Returns {Nothing} on exception; use a {TryCatch} to catch and handle this
|
||||
* exception.
|
||||
* When the {callback} returns {kException}, iteration is terminated
|
||||
* immediately, returning {Nothing}. By returning {kBreak}, the callback
|
||||
* can request non-exceptional early termination of the iteration.
|
||||
*/
|
||||
Maybe<void> Iterate(Local<Context> context, IterationCallback callback,
|
||||
void* callback_data);
|
||||
|
||||
private:
|
||||
Array();
|
||||
static void CheckCast(Value* obj);
|
||||
|
|
|
|||
23
deps/v8/include/v8-exception.h
vendored
23
deps/v8/include/v8-exception.h
vendored
|
|
@ -30,14 +30,21 @@ class ThreadLocalTop;
|
|||
*/
|
||||
class V8_EXPORT Exception {
|
||||
public:
|
||||
static Local<Value> RangeError(Local<String> message);
|
||||
static Local<Value> ReferenceError(Local<String> message);
|
||||
static Local<Value> SyntaxError(Local<String> message);
|
||||
static Local<Value> TypeError(Local<String> message);
|
||||
static Local<Value> WasmCompileError(Local<String> message);
|
||||
static Local<Value> WasmLinkError(Local<String> message);
|
||||
static Local<Value> WasmRuntimeError(Local<String> message);
|
||||
static Local<Value> Error(Local<String> message);
|
||||
static Local<Value> RangeError(Local<String> message,
|
||||
Local<Value> options = {});
|
||||
static Local<Value> ReferenceError(Local<String> message,
|
||||
Local<Value> options = {});
|
||||
static Local<Value> SyntaxError(Local<String> message,
|
||||
Local<Value> options = {});
|
||||
static Local<Value> TypeError(Local<String> message,
|
||||
Local<Value> options = {});
|
||||
static Local<Value> WasmCompileError(Local<String> message,
|
||||
Local<Value> options = {});
|
||||
static Local<Value> WasmLinkError(Local<String> message,
|
||||
Local<Value> options = {});
|
||||
static Local<Value> WasmRuntimeError(Local<String> message,
|
||||
Local<Value> options = {});
|
||||
static Local<Value> Error(Local<String> message, Local<Value> options = {});
|
||||
|
||||
/**
|
||||
* Creates an error message for the given exception.
|
||||
|
|
|
|||
17
deps/v8/include/v8-inspector.h
vendored
17
deps/v8/include/v8-inspector.h
vendored
|
|
@ -217,17 +217,6 @@ class V8_EXPORT V8InspectorSession {
|
|||
virtual void stop() = 0;
|
||||
};
|
||||
|
||||
// Deprecated.
|
||||
// TODO(crbug.com/1420968): remove.
|
||||
class V8_EXPORT WebDriverValue {
|
||||
public:
|
||||
explicit WebDriverValue(std::unique_ptr<StringBuffer> type,
|
||||
v8::MaybeLocal<v8::Value> value = {})
|
||||
: type(std::move(type)), value(value) {}
|
||||
std::unique_ptr<StringBuffer> type;
|
||||
v8::MaybeLocal<v8::Value> value;
|
||||
};
|
||||
|
||||
struct V8_EXPORT DeepSerializedValue {
|
||||
explicit DeepSerializedValue(std::unique_ptr<StringBuffer> type,
|
||||
v8::MaybeLocal<v8::Value> value = {})
|
||||
|
|
@ -266,12 +255,6 @@ class V8_EXPORT V8InspectorClient {
|
|||
virtual void beginUserGesture() {}
|
||||
virtual void endUserGesture() {}
|
||||
|
||||
// Deprecated. Use `deepSerialize` instead.
|
||||
// TODO(crbug.com/1420968): remove.
|
||||
virtual std::unique_ptr<WebDriverValue> serializeToWebDriverValue(
|
||||
v8::Local<v8::Value> v8Value, int maxDepth) {
|
||||
return nullptr;
|
||||
}
|
||||
virtual std::unique_ptr<DeepSerializationResult> deepSerialize(
|
||||
v8::Local<v8::Value> v8Value, int maxDepth,
|
||||
v8::Local<v8::Object> additionalParameters) {
|
||||
|
|
|
|||
84
deps/v8/include/v8-internal.h
vendored
84
deps/v8/include/v8-internal.h
vendored
|
|
@ -484,65 +484,74 @@ PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS)
|
|||
// Indirect Pointers.
|
||||
//
|
||||
// When the sandbox is enabled, indirect pointers are used to reference
|
||||
// HeapObjects that live outside of the sandbox (but are still managed through
|
||||
// the GC). When object A references an object B through an indirect pointer,
|
||||
// object A will contain a IndirectPointerHandle, i.e. a shifted 32-bit index,
|
||||
// which identifies an entry in a pointer table (such as the CodePointerTable).
|
||||
// HeapObjects that live outside of the sandbox (but are still managed by V8's
|
||||
// garbage collector). When object A references an object B through an indirect
|
||||
// pointer, object A will contain a IndirectPointerHandle, i.e. a shifted
|
||||
// 32-bit index, which identifies an entry in a pointer table (generally an
|
||||
// indirect pointer table, or the code pointer table if it is a Code object).
|
||||
// This table entry then contains the actual pointer to object B. Further,
|
||||
// object B owns this pointer table entry, and it is responsible for updating
|
||||
// the "self-pointer" in the entry when it is relocated in memory. This way, in
|
||||
// contrast to "normal" pointers, indirect pointers never need to be tracked by
|
||||
// the GC (i.e. there is no remembered set for them).
|
||||
// Currently there is only one type of object referenced through indirect
|
||||
// pointers (Code objects), but once there are different types of such objects,
|
||||
// the pointer table entry would probably also contain the type of the target
|
||||
// object (e.g. by XORing the instance type into the top bits of the pointer).
|
||||
|
||||
// An IndirectPointerHandle represents a 32-bit index into a pointer table.
|
||||
using IndirectPointerHandle = uint32_t;
|
||||
|
||||
// The size of the virtual memory reservation for the indirect pointer table.
|
||||
// As with the external pointer table, a maximum table size in combination with
|
||||
// shifted indices allows omitting bounds checks.
|
||||
constexpr size_t kIndirectPointerTableReservationSize = 8 * MB;
|
||||
|
||||
// The indirect pointer handles are stores shifted to the left by this amount
|
||||
// to guarantee that they are smaller than the maximum table size.
|
||||
constexpr uint32_t kIndirectPointerHandleShift = 6;
|
||||
constexpr uint32_t kIndirectPointerHandleShift = 12;
|
||||
|
||||
// A null handle always references an entry that contains nullptr.
|
||||
constexpr IndirectPointerHandle kNullIndirectPointerHandle = 0;
|
||||
|
||||
// Currently only Code objects can be referenced through indirect pointers and
|
||||
// various places rely on that assumption. They will all static_assert against
|
||||
// this constant to make them easy to find and fix once we reference other types
|
||||
// of objects indirectly.
|
||||
constexpr bool kAllIndirectPointerObjectsAreCode = true;
|
||||
// The maximum number of entries in an indirect pointer table.
|
||||
constexpr int kIndirectPointerTableEntrySize = 8;
|
||||
constexpr int kIndirectPointerTableEntrySizeLog2 = 3;
|
||||
constexpr size_t kMaxIndirectPointers =
|
||||
kIndirectPointerTableReservationSize / kIndirectPointerTableEntrySize;
|
||||
static_assert((1 << (32 - kIndirectPointerHandleShift)) == kMaxIndirectPointers,
|
||||
"kIndirectPointerTableReservationSize and "
|
||||
"kIndirectPointerHandleShift don't match");
|
||||
|
||||
//
|
||||
// Code Pointers.
|
||||
//
|
||||
// When the sandbox is enabled, Code objects are referenced from inside the
|
||||
// sandbox through indirect pointers that reference entries in the code pointer
|
||||
// table (CPT). Each entry in the CPT contains both a pointer to a Code object
|
||||
// as well as a pointer to the Code's entrypoint. This allows calling/jumping
|
||||
// into Code with one fewer memory access (compared to the case where the
|
||||
// entrypoint pointer needs to be loaded from the Code object).
|
||||
// As such, a CodePointerHandle can be used both to obtain the referenced Code
|
||||
// object and to directly load its entrypoint pointer.
|
||||
// table (CPT) instead of the indirect pointer table (IPT). Each entry in the
|
||||
// CPT contains both a pointer to a Code object as well as a pointer to the
|
||||
// Code's entrypoint. This allows calling/jumping into Code with one fewer
|
||||
// memory access (compared to the case where the entrypoint pointer needs to be
|
||||
// loaded from the Code object). As such, a CodePointerHandle can be used both
|
||||
// to obtain the referenced Code object and to directly load its entrypoint
|
||||
// pointer.
|
||||
using CodePointerHandle = IndirectPointerHandle;
|
||||
constexpr uint32_t kCodePointerHandleShift = kIndirectPointerHandleShift;
|
||||
constexpr CodePointerHandle kNullCodePointerHandle = 0;
|
||||
|
||||
// The size of the virtual memory reservation for code pointer table.
|
||||
// This determines the maximum number of entries in a table. Using a maximum
|
||||
// size allows omitting bounds checks on table accesses if the indices are
|
||||
// guaranteed (e.g. through shifting) to be below the maximum index. This
|
||||
// value must be a power of two.
|
||||
// The size of the virtual memory reservation for the code pointer table.
|
||||
// As with the other tables, a maximum table size in combination with shifted
|
||||
// indices allows omitting bounds checks.
|
||||
constexpr size_t kCodePointerTableReservationSize = 1 * GB;
|
||||
|
||||
// The maximum number of entries in an external pointer table.
|
||||
// Code pointer handles are shifted by a different amount than indirect pointer
|
||||
// handles as the tables have a different maximum size.
|
||||
constexpr uint32_t kCodePointerHandleShift = 6;
|
||||
|
||||
// A null handle always references an entry that contains nullptr.
|
||||
constexpr CodePointerHandle kNullCodePointerHandle = 0;
|
||||
|
||||
// The maximum number of entries in a code pointer table.
|
||||
constexpr int kCodePointerTableEntrySize = 16;
|
||||
constexpr int kCodePointerTableEntrySizeLog2 = 4;
|
||||
constexpr size_t kMaxCodePointers =
|
||||
kCodePointerTableReservationSize / kCodePointerTableEntrySize;
|
||||
static_assert(
|
||||
(1 << (32 - kIndirectPointerHandleShift)) == kMaxCodePointers,
|
||||
(1 << (32 - kCodePointerHandleShift)) == kMaxCodePointers,
|
||||
"kCodePointerTableReservationSize and kCodePointerHandleShift don't match");
|
||||
|
||||
constexpr int kCodePointerTableEntryEntrypointOffset = 0;
|
||||
|
|
@ -602,9 +611,11 @@ class Internals {
|
|||
static const int kHandleScopeDataSize =
|
||||
2 * kApiSystemPointerSize + 2 * kApiInt32Size;
|
||||
|
||||
// ExternalPointerTable layout guarantees.
|
||||
// ExternalPointerTable and IndirectPointerTable layout guarantees.
|
||||
static const int kExternalPointerTableBasePointerOffset = 0;
|
||||
static const int kExternalPointerTableSize = 2 * kApiSystemPointerSize;
|
||||
static const int kIndirectPointerTableSize = 2 * kApiSystemPointerSize;
|
||||
static const int kIndirectPointerTableBasePointerOffset = 0;
|
||||
|
||||
// IsolateData layout guarantees.
|
||||
static const int kIsolateCageBaseOffset = 0;
|
||||
|
|
@ -639,8 +650,10 @@ class Internals {
|
|||
kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
|
||||
static const int kIsolateSharedExternalPointerTableAddressOffset =
|
||||
kIsolateExternalPointerTableOffset + kExternalPointerTableSize;
|
||||
static const int kIsolateApiCallbackThunkArgumentOffset =
|
||||
static const int kIsolateIndirectPointerTableOffset =
|
||||
kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize;
|
||||
static const int kIsolateApiCallbackThunkArgumentOffset =
|
||||
kIsolateIndirectPointerTableOffset + kIndirectPointerTableSize;
|
||||
#else
|
||||
static const int kIsolateApiCallbackThunkArgumentOffset =
|
||||
kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
|
||||
|
|
@ -763,6 +776,15 @@ class Internals {
|
|||
return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
|
||||
}
|
||||
|
||||
V8_INLINE static Address LoadMap(Address obj) {
|
||||
if (!HasHeapObjectTag(obj)) return kNullAddress;
|
||||
Address map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
|
||||
#ifdef V8_MAP_PACKING
|
||||
map = UnpackMapWord(map);
|
||||
#endif
|
||||
return map;
|
||||
}
|
||||
|
||||
V8_INLINE static int GetOddballKind(Address obj) {
|
||||
return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset));
|
||||
}
|
||||
|
|
|
|||
64
deps/v8/include/v8-isolate.h
vendored
64
deps/v8/include/v8-isolate.h
vendored
|
|
@ -421,36 +421,36 @@ class V8_EXPORT Isolate {
|
|||
enum UseCounterFeature {
|
||||
kUseAsm = 0,
|
||||
kBreakIterator = 1,
|
||||
kLegacyConst V8_DEPRECATE_SOON("unused") = 2,
|
||||
kMarkDequeOverflow V8_DEPRECATE_SOON("unused") = 3,
|
||||
kStoreBufferOverflow V8_DEPRECATE_SOON("unused") = 4,
|
||||
kSlotsBufferOverflow V8_DEPRECATE_SOON("unused") = 5,
|
||||
kObjectObserve V8_DEPRECATE_SOON("unused") = 6,
|
||||
kLegacyConst V8_DEPRECATED("unused") = 2,
|
||||
kMarkDequeOverflow V8_DEPRECATED("unused") = 3,
|
||||
kStoreBufferOverflow V8_DEPRECATED("unused") = 4,
|
||||
kSlotsBufferOverflow V8_DEPRECATED("unused") = 5,
|
||||
kObjectObserve V8_DEPRECATED("unused") = 6,
|
||||
kForcedGC = 7,
|
||||
kSloppyMode = 8,
|
||||
kStrictMode = 9,
|
||||
kStrongMode V8_DEPRECATE_SOON("unused") = 10,
|
||||
kStrongMode V8_DEPRECATED("unused") = 10,
|
||||
kRegExpPrototypeStickyGetter = 11,
|
||||
kRegExpPrototypeToString = 12,
|
||||
kRegExpPrototypeUnicodeGetter = 13,
|
||||
kIntlV8Parse V8_DEPRECATE_SOON("unused") = 14,
|
||||
kIntlPattern V8_DEPRECATE_SOON("unused") = 15,
|
||||
kIntlResolved V8_DEPRECATE_SOON("unused") = 16,
|
||||
kPromiseChain V8_DEPRECATE_SOON("unused") = 17,
|
||||
kPromiseAccept V8_DEPRECATE_SOON("unused") = 18,
|
||||
kPromiseDefer V8_DEPRECATE_SOON("unused") = 19,
|
||||
kIntlV8Parse V8_DEPRECATED("unused") = 14,
|
||||
kIntlPattern V8_DEPRECATED("unused") = 15,
|
||||
kIntlResolved V8_DEPRECATED("unused") = 16,
|
||||
kPromiseChain V8_DEPRECATED("unused") = 17,
|
||||
kPromiseAccept V8_DEPRECATED("unused") = 18,
|
||||
kPromiseDefer V8_DEPRECATED("unused") = 19,
|
||||
kHtmlCommentInExternalScript = 20,
|
||||
kHtmlComment = 21,
|
||||
kSloppyModeBlockScopedFunctionRedefinition = 22,
|
||||
kForInInitializer = 23,
|
||||
kArrayProtectorDirtied V8_DEPRECATE_SOON("unused") = 24,
|
||||
kArrayProtectorDirtied V8_DEPRECATED("unused") = 24,
|
||||
kArraySpeciesModified = 25,
|
||||
kArrayPrototypeConstructorModified = 26,
|
||||
kArrayInstanceProtoModified V8_DEPRECATE_SOON("unused") = 27,
|
||||
kArrayInstanceProtoModified V8_DEPRECATED("unused") = 27,
|
||||
kArrayInstanceConstructorModified = 28,
|
||||
kLegacyFunctionDeclaration V8_DEPRECATE_SOON("unused") = 29,
|
||||
kRegExpPrototypeSourceGetter V8_DEPRECATE_SOON("unused") = 30,
|
||||
kRegExpPrototypeOldFlagGetter V8_DEPRECATE_SOON("unused") = 31,
|
||||
kLegacyFunctionDeclaration V8_DEPRECATED("unused") = 29,
|
||||
kRegExpPrototypeSourceGetter V8_DEPRECATED("unused") = 30,
|
||||
kRegExpPrototypeOldFlagGetter V8_DEPRECATED("unused") = 31,
|
||||
kDecimalWithLeadingZeroInStrictMode = 32,
|
||||
kLegacyDateParser = 33,
|
||||
kDefineGetterOrSetterWouldThrow = 34,
|
||||
|
|
@ -458,22 +458,21 @@ class V8_EXPORT Isolate {
|
|||
kAssigmentExpressionLHSIsCallInSloppy = 36,
|
||||
kAssigmentExpressionLHSIsCallInStrict = 37,
|
||||
kPromiseConstructorReturnedUndefined = 38,
|
||||
kConstructorNonUndefinedPrimitiveReturn V8_DEPRECATE_SOON("unused") = 39,
|
||||
kLabeledExpressionStatement V8_DEPRECATE_SOON("unused") = 40,
|
||||
kLineOrParagraphSeparatorAsLineTerminator V8_DEPRECATE_SOON("unused") = 41,
|
||||
kConstructorNonUndefinedPrimitiveReturn V8_DEPRECATED("unused") = 39,
|
||||
kLabeledExpressionStatement V8_DEPRECATED("unused") = 40,
|
||||
kLineOrParagraphSeparatorAsLineTerminator V8_DEPRECATED("unused") = 41,
|
||||
kIndexAccessor = 42,
|
||||
kErrorCaptureStackTrace = 43,
|
||||
kErrorPrepareStackTrace = 44,
|
||||
kErrorStackTraceLimit = 45,
|
||||
kWebAssemblyInstantiation = 46,
|
||||
kDeoptimizerDisableSpeculation = 47,
|
||||
kArrayPrototypeSortJSArrayModifiedPrototype V8_DEPRECATE_SOON("unused") =
|
||||
48,
|
||||
kArrayPrototypeSortJSArrayModifiedPrototype V8_DEPRECATED("unused") = 48,
|
||||
kFunctionTokenOffsetTooLongForToString = 49,
|
||||
kWasmSharedMemory = 50,
|
||||
kWasmThreadOpcodes = 51,
|
||||
kAtomicsNotify V8_DEPRECATE_SOON("unused") = 52,
|
||||
kAtomicsWake V8_DEPRECATE_SOON("unused") = 53,
|
||||
kAtomicsNotify V8_DEPRECATED("unused") = 52,
|
||||
kAtomicsWake V8_DEPRECATED("unused") = 53,
|
||||
kCollator = 54,
|
||||
kNumberFormat = 55,
|
||||
kDateTimeFormat = 56,
|
||||
|
|
@ -483,7 +482,7 @@ class V8_EXPORT Isolate {
|
|||
kListFormat = 60,
|
||||
kSegmenter = 61,
|
||||
kStringLocaleCompare = 62,
|
||||
kStringToLocaleUpperCase V8_DEPRECATE_SOON("unused") = 63,
|
||||
kStringToLocaleUpperCase V8_DEPRECATED("unused") = 63,
|
||||
kStringToLocaleLowerCase = 64,
|
||||
kNumberToLocaleString = 65,
|
||||
kDateToLocaleString = 66,
|
||||
|
|
@ -491,14 +490,14 @@ class V8_EXPORT Isolate {
|
|||
kDateToLocaleTimeString = 68,
|
||||
kAttemptOverrideReadOnlyOnPrototypeSloppy = 69,
|
||||
kAttemptOverrideReadOnlyOnPrototypeStrict = 70,
|
||||
kOptimizedFunctionWithOneShotBytecode V8_DEPRECATE_SOON("unused") = 71,
|
||||
kOptimizedFunctionWithOneShotBytecode V8_DEPRECATED("unused") = 71,
|
||||
kRegExpMatchIsTrueishOnNonJSRegExp = 72,
|
||||
kRegExpMatchIsFalseishOnJSRegExp = 73,
|
||||
kDateGetTimezoneOffset V8_DEPRECATE_SOON("unused") = 74,
|
||||
kDateGetTimezoneOffset V8_DEPRECATED("unused") = 74,
|
||||
kStringNormalize = 75,
|
||||
kCallSiteAPIGetFunctionSloppyCall = 76,
|
||||
kCallSiteAPIGetThisSloppyCall = 77,
|
||||
kRegExpMatchAllWithNonGlobalRegExp V8_DEPRECATE_SOON("unused") = 78,
|
||||
kRegExpMatchAllWithNonGlobalRegExp V8_DEPRECATED("unused") = 78,
|
||||
kRegExpExecCalledOnSlowRegExp = 79,
|
||||
kRegExpReplaceCalledOnSlowRegExp = 80,
|
||||
kDisplayNames = 81,
|
||||
|
|
@ -529,9 +528,9 @@ class V8_EXPORT Isolate {
|
|||
kWasmSimdOpcodes = 106,
|
||||
kVarRedeclaredCatchBinding = 107,
|
||||
kWasmRefTypes = 108,
|
||||
kWasmBulkMemory V8_DEPRECATE_SOON(
|
||||
kWasmBulkMemory V8_DEPRECATED(
|
||||
"Unused since 2021 (https://crrev.com/c/2622913)") = 109,
|
||||
kWasmMultiValue V8_DEPRECATE_SOON(
|
||||
kWasmMultiValue V8_DEPRECATED(
|
||||
"Unused since 2021 (https://crrev.com/c/2817790)") = 110,
|
||||
kWasmExceptionHandling = 111,
|
||||
kInvalidatedMegaDOMProtector = 112,
|
||||
|
|
@ -541,8 +540,8 @@ class V8_EXPORT Isolate {
|
|||
kAsyncStackTaggingCreateTaskCall = 116,
|
||||
kDurationFormat = 117,
|
||||
kInvalidatedNumberStringNotRegexpLikeProtector = 118,
|
||||
kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode V8_DEPRECATE_SOON(
|
||||
"unused") = 119,
|
||||
kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode V8_DEPRECATED("unused") =
|
||||
119,
|
||||
kImportAssertionDeprecatedSyntax = 120,
|
||||
kLocaleInfoObsoletedGetters = 121,
|
||||
kLocaleInfoFunctions = 122,
|
||||
|
|
@ -551,6 +550,7 @@ class V8_EXPORT Isolate {
|
|||
kWasmMemory64 = 125,
|
||||
kWasmMultiMemory = 126,
|
||||
kWasmGC = 127,
|
||||
kWasmImportedStrings = 128,
|
||||
|
||||
// If you add new values here, you'll also need to update Chromium's:
|
||||
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
|
||||
|
|
|
|||
154
deps/v8/include/v8-memory-span.h
vendored
154
deps/v8/include/v8-memory-span.h
vendored
|
|
@ -7,12 +7,16 @@
|
|||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <array>
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
|
||||
#include "v8config.h" // NOLINT(build/include_directory)
|
||||
|
||||
namespace v8 {
|
||||
|
||||
/**
|
||||
* Points to an unowned continous buffer holding a known number of elements.
|
||||
* Points to an unowned contiguous buffer holding a known number of elements.
|
||||
*
|
||||
* This is similar to std::span (under consideration for C++20), but does not
|
||||
* require advanced C++ support. In the (far) future, this may be replaced with
|
||||
|
|
@ -23,21 +27,167 @@ namespace v8 {
|
|||
*/
|
||||
template <typename T>
|
||||
class V8_EXPORT MemorySpan {
|
||||
private:
|
||||
/** Some C++ machinery, brought from the future. */
|
||||
template <typename From, typename To>
|
||||
using is_array_convertible = std::is_convertible<From (*)[], To (*)[]>;
|
||||
template <typename From, typename To>
|
||||
static constexpr bool is_array_convertible_v =
|
||||
is_array_convertible<From, To>::value;
|
||||
|
||||
template <typename It>
|
||||
using iter_reference_t = decltype(*std::declval<It&>());
|
||||
|
||||
template <typename It, typename = void>
|
||||
struct is_compatible_iterator : std::false_type {};
|
||||
template <typename It>
|
||||
struct is_compatible_iterator<
|
||||
It,
|
||||
std::void_t<
|
||||
std::is_base_of<std::random_access_iterator_tag,
|
||||
typename std::iterator_traits<It>::iterator_category>,
|
||||
is_array_convertible<std::remove_reference_t<iter_reference_t<It>>,
|
||||
T>>> : std::true_type {};
|
||||
template <typename It>
|
||||
static constexpr bool is_compatible_iterator_v =
|
||||
is_compatible_iterator<It>::value;
|
||||
|
||||
template <typename U>
|
||||
static constexpr U* to_address(U* p) noexcept {
|
||||
return p;
|
||||
}
|
||||
|
||||
template <typename It,
|
||||
typename = std::void_t<decltype(std::declval<It&>().operator->())>>
|
||||
static constexpr auto to_address(It it) noexcept {
|
||||
return it.operator->();
|
||||
}
|
||||
|
||||
public:
|
||||
/** The default constructor creates an empty span. */
|
||||
constexpr MemorySpan() = default;
|
||||
|
||||
constexpr MemorySpan(T* data, size_t size) : data_(data), size_(size) {}
|
||||
/** Constructor from nullptr and count, for backwards compatibility.
|
||||
* This is not compatible with C++20 std::span.
|
||||
*/
|
||||
constexpr MemorySpan(std::nullptr_t, size_t) {}
|
||||
|
||||
/** Constructor from "iterator" and count. */
|
||||
template <typename Iterator,
|
||||
std::enable_if_t<is_compatible_iterator_v<Iterator>, bool> = true>
|
||||
constexpr MemorySpan(Iterator first,
|
||||
size_t count) // NOLINT(runtime/explicit)
|
||||
: data_(to_address(first)), size_(count) {}
|
||||
|
||||
/** Constructor from two "iterators". */
|
||||
template <typename Iterator,
|
||||
std::enable_if_t<is_compatible_iterator_v<Iterator> &&
|
||||
!std::is_convertible_v<Iterator, size_t>,
|
||||
bool> = true>
|
||||
constexpr MemorySpan(Iterator first,
|
||||
Iterator last) // NOLINT(runtime/explicit)
|
||||
: data_(to_address(first)), size_(last - first) {}
|
||||
|
||||
/** Implicit conversion from C-style array. */
|
||||
template <size_t N>
|
||||
constexpr MemorySpan(T (&a)[N]) noexcept // NOLINT(runtime/explicit)
|
||||
: data_(a), size_(N) {}
|
||||
|
||||
/** Implicit conversion from std::array. */
|
||||
template <typename U, size_t N,
|
||||
std::enable_if_t<is_array_convertible_v<U, T>, bool> = true>
|
||||
constexpr MemorySpan(
|
||||
std::array<U, N>& a) noexcept // NOLINT(runtime/explicit)
|
||||
: data_(a.data()), size_{N} {}
|
||||
|
||||
/** Implicit conversion from const std::array. */
|
||||
template <typename U, size_t N,
|
||||
std::enable_if_t<is_array_convertible_v<const U, T>, bool> = true>
|
||||
constexpr MemorySpan(
|
||||
const std::array<U, N>& a) noexcept // NOLINT(runtime/explicit)
|
||||
: data_(a.data()), size_{N} {}
|
||||
|
||||
/** Returns a pointer to the beginning of the buffer. */
|
||||
constexpr T* data() const { return data_; }
|
||||
/** Returns the number of elements that the buffer holds. */
|
||||
constexpr size_t size() const { return size_; }
|
||||
|
||||
constexpr T& operator[](size_t i) const { return data_[i]; }
|
||||
|
||||
class Iterator {
|
||||
public:
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
using value_type = T;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using pointer = value_type*;
|
||||
using reference = value_type&;
|
||||
|
||||
T& operator*() const { return *ptr_; }
|
||||
T* operator->() const { return ptr_; }
|
||||
|
||||
bool operator==(Iterator other) const { return ptr_ == other.ptr_; }
|
||||
bool operator!=(Iterator other) const { return !(*this == other); }
|
||||
|
||||
Iterator& operator++() {
|
||||
++ptr_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
Iterator temp(*this);
|
||||
++(*this);
|
||||
return temp;
|
||||
}
|
||||
|
||||
private:
|
||||
explicit Iterator(T* ptr) : ptr_(ptr) {}
|
||||
|
||||
T* ptr_ = nullptr;
|
||||
};
|
||||
|
||||
Iterator begin() const { return Iterator(data_); }
|
||||
Iterator end() const { return Iterator(data_ + size_); }
|
||||
|
||||
private:
|
||||
T* data_ = nullptr;
|
||||
size_t size_ = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper function template to create an array of fixed length, initialized by
|
||||
* the provided initializer list, without explicitly specifying the array size,
|
||||
* e.g.
|
||||
*
|
||||
* auto arr = v8::to_array<Local<String>>({v8_str("one"), v8_str("two")});
|
||||
*
|
||||
* In the future, this may be replaced with or aliased to std::to_array (under
|
||||
* consideration for C++20).
|
||||
*/
|
||||
|
||||
namespace detail {
|
||||
template <class T, std::size_t N, std::size_t... I>
|
||||
constexpr std::array<std::remove_cv_t<T>, N> to_array_lvalue_impl(
|
||||
T (&a)[N], std::index_sequence<I...>) {
|
||||
return {{a[I]...}};
|
||||
}
|
||||
|
||||
template <class T, std::size_t N, std::size_t... I>
|
||||
constexpr std::array<std::remove_cv_t<T>, N> to_array_rvalue_impl(
|
||||
T (&&a)[N], std::index_sequence<I...>) {
|
||||
return {{std::move(a[I])...}};
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
template <class T, std::size_t N>
|
||||
constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N]) {
|
||||
return detail::to_array_lvalue_impl(a, std::make_index_sequence<N>{});
|
||||
}
|
||||
|
||||
template <class T, std::size_t N>
|
||||
constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&&a)[N]) {
|
||||
return detail::to_array_rvalue_impl(std::move(a),
|
||||
std::make_index_sequence<N>{});
|
||||
}
|
||||
|
||||
} // namespace v8
|
||||
#endif // INCLUDE_V8_MEMORY_SPAN_H_
|
||||
|
|
|
|||
1
deps/v8/include/v8-metrics.h
vendored
1
deps/v8/include/v8-metrics.h
vendored
|
|
@ -55,6 +55,7 @@ struct GarbageCollectionFullCycle {
|
|||
double efficiency_cpp_in_bytes_per_us = -1.0;
|
||||
double main_thread_efficiency_in_bytes_per_us = -1.0;
|
||||
double main_thread_efficiency_cpp_in_bytes_per_us = -1.0;
|
||||
int64_t incremental_marking_start_stop_wall_clock_duration_in_us = -1;
|
||||
};
|
||||
|
||||
struct GarbageCollectionFullMainThreadIncrementalMark {
|
||||
|
|
|
|||
2
deps/v8/include/v8-object.h
vendored
2
deps/v8/include/v8-object.h
vendored
|
|
@ -174,7 +174,7 @@ enum AccessControl {
|
|||
DEFAULT = 0,
|
||||
ALL_CAN_READ = 1,
|
||||
ALL_CAN_WRITE = 1 << 1,
|
||||
PROHIBITS_OVERWRITING V8_ENUM_DEPRECATE_SOON("unused") = 1 << 2
|
||||
PROHIBITS_OVERWRITING V8_ENUM_DEPRECATED("unused") = 1 << 2
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
2
deps/v8/include/v8-persistent-handle.h
vendored
2
deps/v8/include/v8-persistent-handle.h
vendored
|
|
@ -241,7 +241,7 @@ class NonCopyablePersistentTraits {
|
|||
* This will clone the contents of storage cell, but not any of the flags, etc.
|
||||
*/
|
||||
template <class T>
|
||||
struct CopyablePersistentTraits {
|
||||
struct V8_DEPRECATED("Use v8::Global instead") CopyablePersistentTraits {
|
||||
using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
|
||||
static const bool kResetInDestructor = true;
|
||||
template <class S, class M>
|
||||
|
|
|
|||
10
deps/v8/include/v8-profiler.h
vendored
10
deps/v8/include/v8-profiler.h
vendored
|
|
@ -921,22 +921,12 @@ class V8_EXPORT EmbedderGraph {
|
|||
virtual ~EmbedderGraph() = default;
|
||||
};
|
||||
|
||||
class QueryObjectPredicate {
|
||||
public:
|
||||
virtual ~QueryObjectPredicate() = default;
|
||||
virtual bool Filter(v8::Local<v8::Object> object) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Interface for controlling heap profiling. Instance of the
|
||||
* profiler can be retrieved using v8::Isolate::GetHeapProfiler.
|
||||
*/
|
||||
class V8_EXPORT HeapProfiler {
|
||||
public:
|
||||
void QueryObjects(v8::Local<v8::Context> context,
|
||||
QueryObjectPredicate* predicate,
|
||||
std::vector<v8::Global<v8::Object>>* objects);
|
||||
|
||||
enum SamplingFlags {
|
||||
kSamplingNoFlags = 0,
|
||||
kSamplingForceGC = 1 << 0,
|
||||
|
|
|
|||
6
deps/v8/include/v8-script.h
vendored
6
deps/v8/include/v8-script.h
vendored
|
|
@ -16,6 +16,7 @@
|
|||
#include "v8-data.h" // NOLINT(build/include_directory)
|
||||
#include "v8-local-handle.h" // NOLINT(build/include_directory)
|
||||
#include "v8-maybe.h" // NOLINT(build/include_directory)
|
||||
#include "v8-memory-span.h" // NOLINT(build/include_directory)
|
||||
#include "v8-message.h" // NOLINT(build/include_directory)
|
||||
#include "v8config.h" // NOLINT(build/include_directory)
|
||||
|
||||
|
|
@ -285,10 +286,15 @@ class V8_EXPORT Module : public Data {
|
|||
* module_name is used solely for logging/debugging and doesn't affect module
|
||||
* behavior.
|
||||
*/
|
||||
V8_DEPRECATE_SOON("Please use the version that takes a MemorySpan")
|
||||
static Local<Module> CreateSyntheticModule(
|
||||
Isolate* isolate, Local<String> module_name,
|
||||
const std::vector<Local<String>>& export_names,
|
||||
SyntheticModuleEvaluationSteps evaluation_steps);
|
||||
static Local<Module> CreateSyntheticModule(
|
||||
Isolate* isolate, Local<String> module_name,
|
||||
const MemorySpan<const Local<String>>& export_names,
|
||||
SyntheticModuleEvaluationSteps evaluation_steps);
|
||||
|
||||
/**
|
||||
* Set this module's exported value for the name export_name to the specified
|
||||
|
|
|
|||
104
deps/v8/include/v8-typed-array.h
vendored
104
deps/v8/include/v8-typed-array.h
vendored
|
|
@ -5,14 +5,14 @@
|
|||
#ifndef INCLUDE_V8_TYPED_ARRAY_H_
|
||||
#define INCLUDE_V8_TYPED_ARRAY_H_
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "v8-array-buffer.h" // NOLINT(build/include_directory)
|
||||
#include "v8-local-handle.h" // NOLINT(build/include_directory)
|
||||
#include "v8config.h" // NOLINT(build/include_directory)
|
||||
|
||||
namespace v8 {
|
||||
|
||||
class SharedArrayBuffer;
|
||||
|
||||
/**
|
||||
* A base class for an instance of TypedArray series of constructors
|
||||
* (ES6 draft 15.13.6).
|
||||
|
|
@ -20,12 +20,25 @@ class SharedArrayBuffer;
|
|||
class V8_EXPORT TypedArray : public ArrayBufferView {
|
||||
public:
|
||||
/*
|
||||
* The largest typed array size that can be constructed using New.
|
||||
* The largest supported typed array byte size. Each subclass defines a
|
||||
* type-specific kMaxLength for the maximum length that can be passed to New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
internal::kApiSystemPointerSize == 4
|
||||
? internal::kSmiMaxValue
|
||||
: static_cast<size_t>(uint64_t{1} << 32);
|
||||
#if V8_ENABLE_SANDBOX
|
||||
static constexpr size_t kMaxByteLength =
|
||||
internal::kMaxSafeBufferSizeForSandbox;
|
||||
#elif V8_HOST_ARCH_32_BIT
|
||||
static constexpr size_t kMaxByteLength = std::numeric_limits<int>::max();
|
||||
#else
|
||||
// The maximum safe integer (2^53 - 1).
|
||||
static constexpr size_t kMaxByteLength =
|
||||
static_cast<size_t>((uint64_t{1} << 53) - 1);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Deprecated: Use |kMaxByteLength| or the type-specific |kMaxLength| fields.
|
||||
*/
|
||||
V8_DEPRECATE_SOON("Use kMaxByteLength")
|
||||
static constexpr size_t kMaxLength = kMaxByteLength;
|
||||
|
||||
/**
|
||||
* Number of elements in this typed array
|
||||
|
|
@ -50,6 +63,13 @@ class V8_EXPORT TypedArray : public ArrayBufferView {
|
|||
*/
|
||||
class V8_EXPORT Uint8Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Uint8Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(uint8_t);
|
||||
static_assert(sizeof(uint8_t) == 1);
|
||||
|
||||
static Local<Uint8Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Uint8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -71,6 +91,13 @@ class V8_EXPORT Uint8Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT Uint8ClampedArray : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Uint8ClampedArray size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(uint8_t);
|
||||
static_assert(sizeof(uint8_t) == 1);
|
||||
|
||||
static Local<Uint8ClampedArray> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Uint8ClampedArray> New(
|
||||
|
|
@ -93,6 +120,13 @@ class V8_EXPORT Uint8ClampedArray : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT Int8Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Int8Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(int8_t);
|
||||
static_assert(sizeof(int8_t) == 1);
|
||||
|
||||
static Local<Int8Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Int8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -114,6 +148,13 @@ class V8_EXPORT Int8Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT Uint16Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Uint16Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(uint16_t);
|
||||
static_assert(sizeof(uint16_t) == 2);
|
||||
|
||||
static Local<Uint16Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Uint16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -135,6 +176,13 @@ class V8_EXPORT Uint16Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT Int16Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Int16Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(int16_t);
|
||||
static_assert(sizeof(int16_t) == 2);
|
||||
|
||||
static Local<Int16Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Int16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -156,6 +204,13 @@ class V8_EXPORT Int16Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT Uint32Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Uint32Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(uint32_t);
|
||||
static_assert(sizeof(uint32_t) == 4);
|
||||
|
||||
static Local<Uint32Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Uint32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -177,6 +232,13 @@ class V8_EXPORT Uint32Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT Int32Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Int32Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(int32_t);
|
||||
static_assert(sizeof(int32_t) == 4);
|
||||
|
||||
static Local<Int32Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Int32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -198,6 +260,13 @@ class V8_EXPORT Int32Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT Float32Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Float32Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(float);
|
||||
static_assert(sizeof(float) == 4);
|
||||
|
||||
static Local<Float32Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Float32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -219,6 +288,13 @@ class V8_EXPORT Float32Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT Float64Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest Float64Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(double);
|
||||
static_assert(sizeof(double) == 8);
|
||||
|
||||
static Local<Float64Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<Float64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -240,6 +316,13 @@ class V8_EXPORT Float64Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT BigInt64Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest BigInt64Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(int64_t);
|
||||
static_assert(sizeof(int64_t) == 8);
|
||||
|
||||
static Local<BigInt64Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<BigInt64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
@ -261,6 +344,13 @@ class V8_EXPORT BigInt64Array : public TypedArray {
|
|||
*/
|
||||
class V8_EXPORT BigUint64Array : public TypedArray {
|
||||
public:
|
||||
/*
|
||||
* The largest BigUint64Array size that can be constructed using New.
|
||||
*/
|
||||
static constexpr size_t kMaxLength =
|
||||
TypedArray::kMaxByteLength / sizeof(uint64_t);
|
||||
static_assert(sizeof(uint64_t) == 8);
|
||||
|
||||
static Local<BigUint64Array> New(Local<ArrayBuffer> array_buffer,
|
||||
size_t byte_offset, size_t length);
|
||||
static Local<BigUint64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
|
||||
|
|
|
|||
45
deps/v8/include/v8-value.h
vendored
45
deps/v8/include/v8-value.h
vendored
|
|
@ -391,7 +391,7 @@ class V8_EXPORT Value : public Data {
|
|||
V8_WARN_UNUSED_RESULT MaybeLocal<String> ToDetailString(
|
||||
Local<Context> context) const;
|
||||
/**
|
||||
* Perform the equivalent of `Object(value)` in JS.
|
||||
* Perform the equivalent of `Tagged<Object>(value)` in JS.
|
||||
*/
|
||||
V8_WARN_UNUSED_RESULT MaybeLocal<Object> ToObject(
|
||||
Local<Context> context) const;
|
||||
|
|
@ -469,6 +469,41 @@ class V8_EXPORT Value : public Data {
|
|||
static void CheckCast(Data* that);
|
||||
};
|
||||
|
||||
/**
|
||||
* Can be used to avoid repeated expensive type checks for groups of objects
|
||||
* that are expected to be similar (e.g. when Blink converts a bunch of
|
||||
* JavaScript objects to "ScriptWrappable" after a "HasInstance" check) by
|
||||
* making use of V8-internal "hidden classes". An object that has passed the
|
||||
* full check can be remembered via {Update}; further objects can be queried
|
||||
* using {Matches}.
|
||||
* Note that the answer will be conservative/"best-effort": when {Matches}
|
||||
* returns true, then the {candidate} can be relied upon to have the same
|
||||
* shape/constructor/prototype/etc. as the {baseline}. Otherwise, no reliable
|
||||
* statement can be made (the objects might still have indistinguishable shapes
|
||||
* for all intents and purposes, but this mechanism, being optimized for speed,
|
||||
* couldn't determine that quickly).
|
||||
*/
|
||||
class V8_EXPORT TypecheckWitness {
|
||||
public:
|
||||
explicit TypecheckWitness(Isolate* isolate);
|
||||
|
||||
/**
|
||||
* Checks whether {candidate} can cheaply be identified as being "similar"
|
||||
* to the {baseline} that was passed to {Update} earlier.
|
||||
* It's safe to call this on an uninitialized {TypecheckWitness} instance:
|
||||
* it will then return {false} for any input.
|
||||
*/
|
||||
V8_INLINE bool Matches(Local<Value> candidate) const;
|
||||
|
||||
/**
|
||||
* Remembers a new baseline for future {Matches} queries.
|
||||
*/
|
||||
void Update(Local<Value> baseline);
|
||||
|
||||
private:
|
||||
Local<Data> cached_map_;
|
||||
};
|
||||
|
||||
template <>
|
||||
V8_INLINE Value* Value::Cast(Data* value) {
|
||||
#ifdef V8_ENABLE_CHECKS
|
||||
|
|
@ -562,6 +597,14 @@ bool Value::QuickIsString() const {
|
|||
#endif // V8_STATIC_ROOTS_BOOL
|
||||
}
|
||||
|
||||
bool TypecheckWitness::Matches(Local<Value> candidate) const {
|
||||
internal::Address obj = internal::ValueHelper::ValueAsAddress(*candidate);
|
||||
internal::Address obj_map = internal::Internals::LoadMap(obj);
|
||||
internal::Address cached =
|
||||
internal::ValueHelper::ValueAsAddress(*cached_map_);
|
||||
return obj_map == cached;
|
||||
}
|
||||
|
||||
} // namespace v8
|
||||
|
||||
#endif // INCLUDE_V8_VALUE_H_
|
||||
|
|
|
|||
6
deps/v8/include/v8-version.h
vendored
6
deps/v8/include/v8-version.h
vendored
|
|
@ -9,9 +9,9 @@
|
|||
// NOTE these macros are used by some of the tool scripts and the build
|
||||
// system so their names cannot be changed without changing the scripts.
|
||||
#define V8_MAJOR_VERSION 11
|
||||
#define V8_MINOR_VERSION 8
|
||||
#define V8_BUILD_NUMBER 172
|
||||
#define V8_PATCH_LEVEL 17
|
||||
#define V8_MINOR_VERSION 9
|
||||
#define V8_BUILD_NUMBER 169
|
||||
#define V8_PATCH_LEVEL 7
|
||||
|
||||
// Use 1 for candidates and 0 otherwise.
|
||||
// (Boolean macro values are not supported by all preprocessors.)
|
||||
|
|
|
|||
3
deps/v8/infra/mb/mb_config.pyl
vendored
3
deps/v8/infra/mb/mb_config.pyl
vendored
|
|
@ -288,6 +288,7 @@
|
|||
'v8_linux64_arm64_no_wasm_compile_dbg': 'debug_arm64_webassembly_disabled',
|
||||
'v8_linux64_verify_csa_compile_rel': 'release_x64_verify_csa',
|
||||
'v8_linux64_asan_compile_rel': 'release_x64_asan_minimal_symbols',
|
||||
'v8_linux64_asan_sandbox_compile_rel': 'release_x64_asan_symbolized_expose_memory_corruption',
|
||||
'v8_linux64_cfi_compile_rel': 'release_x64_cfi',
|
||||
'v8_linux64_fuzzilli_compile_rel': 'release_x64_fuzzilli',
|
||||
'v8_linux64_loong64_compile_rel': 'release_simulate_loong64',
|
||||
|
|
@ -750,7 +751,7 @@
|
|||
|
||||
'mixins': {
|
||||
'android': {
|
||||
'gn_args': 'target_os="android" v8_android_log_stdout=true default_min_sdk_version=19',
|
||||
'gn_args': 'target_os="android" v8_android_log_stdout=true default_min_sdk_version=21',
|
||||
},
|
||||
|
||||
'android_strip_outputs': {
|
||||
|
|
|
|||
13
deps/v8/infra/testing/builders.pyl
vendored
13
deps/v8/infra/testing/builders.pyl
vendored
|
|
@ -489,6 +489,7 @@
|
|||
{'name': 'webkit', 'variant': 'stress_sampling'},
|
||||
# Stress snapshot.
|
||||
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
|
||||
{'name': 'mjsunit', 'variant': 'rehash_snapshot'},
|
||||
# Experimental regexp engine.
|
||||
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
|
||||
# Variants for maglev.
|
||||
|
|
@ -554,11 +555,6 @@
|
|||
{'name': 'mozilla', 'variant': 'minor_ms'},
|
||||
{'name': 'test262', 'variant': 'minor_ms', 'shards': 2},
|
||||
{'name': 'mjsunit', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'concurrent_minor_ms'},
|
||||
{'name': 'benchmarks', 'variant': 'concurrent_minor_ms'},
|
||||
{'name': 'mozilla', 'variant': 'concurrent_minor_ms'},
|
||||
{'name': 'test262', 'variant': 'concurrent_minor_ms', 'shards': 2},
|
||||
{'name': 'mjsunit', 'variant': 'concurrent_minor_ms'},
|
||||
],
|
||||
},
|
||||
'v8_linux64_msan_rel': {
|
||||
|
|
@ -1534,11 +1530,6 @@
|
|||
{'name': 'mozilla', 'variant': 'minor_ms'},
|
||||
{'name': 'test262', 'variant': 'minor_ms', 'shards': 2},
|
||||
{'name': 'mjsunit', 'variant': 'minor_ms'},
|
||||
{'name': 'v8testing', 'variant': 'concurrent_minor_ms'},
|
||||
{'name': 'benchmarks', 'variant': 'concurrent_minor_ms'},
|
||||
{'name': 'mozilla', 'variant': 'concurrent_minor_ms'},
|
||||
{'name': 'test262', 'variant': 'concurrent_minor_ms', 'shards': 2},
|
||||
{'name': 'mjsunit', 'variant': 'concurrent_minor_ms'},
|
||||
],
|
||||
},
|
||||
'V8 Linux64 - disable runtime call stats': {
|
||||
|
|
@ -1561,6 +1552,7 @@
|
|||
{'name': 'webkit', 'variant': 'stress_sampling'},
|
||||
# Stress snapshot.
|
||||
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
|
||||
{'name': 'mjsunit', 'variant': 'rehash_snapshot'},
|
||||
# Experimental regexp engine.
|
||||
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
|
||||
# Variants for maglev.
|
||||
|
|
@ -1624,6 +1616,7 @@
|
|||
{'name': 'webkit', 'variant': 'stress_sampling'},
|
||||
# Stress snapshot.
|
||||
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
|
||||
{'name': 'mjsunit', 'variant': 'rehash_snapshot'},
|
||||
# Experimental regexp engine.
|
||||
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
|
||||
# Variants for maglev.
|
||||
|
|
|
|||
2
deps/v8/src/api/api-arguments-inl.h
vendored
2
deps/v8/src/api/api-arguments-inl.h
vendored
|
|
@ -32,7 +32,7 @@ CustomArgumentsBase::CustomArgumentsBase(Isolate* isolate)
|
|||
|
||||
template <typename T>
|
||||
CustomArguments<T>::~CustomArguments() {
|
||||
slot_at(kReturnValueIndex).store(Object(kHandleZapValue));
|
||||
slot_at(kReturnValueIndex).store(Tagged<Object>(kHandleZapValue));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
|
|
|||
6
deps/v8/src/api/api-arguments.cc
vendored
6
deps/v8/src/api/api-arguments.cc
vendored
|
|
@ -21,7 +21,8 @@ PropertyCallbackArguments::PropertyCallbackArguments(
|
|||
slot_at(T::kThisIndex).store(self);
|
||||
slot_at(T::kHolderIndex).store(holder);
|
||||
slot_at(T::kDataIndex).store(data);
|
||||
slot_at(T::kIsolateIndex).store(Object(reinterpret_cast<Address>(isolate)));
|
||||
slot_at(T::kIsolateIndex)
|
||||
.store(Tagged<Object>(reinterpret_cast<Address>(isolate)));
|
||||
int value = Internals::kInferShouldThrowMode;
|
||||
if (should_throw.IsJust()) {
|
||||
value = should_throw.FromJust();
|
||||
|
|
@ -45,7 +46,8 @@ FunctionCallbackArguments::FunctionCallbackArguments(
|
|||
slot_at(T::kDataIndex).store(data);
|
||||
slot_at(T::kHolderIndex).store(holder);
|
||||
slot_at(T::kNewTargetIndex).store(new_target);
|
||||
slot_at(T::kIsolateIndex).store(Object(reinterpret_cast<Address>(isolate)));
|
||||
slot_at(T::kIsolateIndex)
|
||||
.store(Tagged<Object>(reinterpret_cast<Address>(isolate)));
|
||||
// Here the hole is set as default value. It's converted to and not
|
||||
// directly exposed to js.
|
||||
// TODO(cbruni): Remove and/or use custom sentinel value.
|
||||
|
|
|
|||
8
deps/v8/src/api/api-inl.h
vendored
8
deps/v8/src/api/api-inl.h
vendored
|
|
@ -117,7 +117,7 @@ TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
|
|||
const v8::From* that, bool allow_empty_handle) { \
|
||||
DCHECK(allow_empty_handle || !v8::internal::ValueHelper::IsEmpty(that)); \
|
||||
DCHECK(v8::internal::ValueHelper::IsEmpty(that) || \
|
||||
Is##To(v8::internal::Object( \
|
||||
Is##To(v8::internal::Tagged<v8::internal::Object>( \
|
||||
v8::internal::ValueHelper::ValueAsAddress(that)))); \
|
||||
if (v8::internal::ValueHelper::IsEmpty(that)) { \
|
||||
return v8::internal::Handle<v8::internal::To>::null(); \
|
||||
|
|
@ -131,7 +131,7 @@ TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
|
|||
const v8::From* that, bool allow_empty_handle) { \
|
||||
DCHECK(allow_empty_handle || !v8::internal::ValueHelper::IsEmpty(that)); \
|
||||
DCHECK(v8::internal::ValueHelper::IsEmpty(that) || \
|
||||
Is##To(v8::internal::Object( \
|
||||
Is##To(v8::internal::Tagged<v8::internal::Object>( \
|
||||
v8::internal::ValueHelper::ValueAsAddress(that)))); \
|
||||
return v8::internal::DirectHandle<v8::internal::To>( \
|
||||
v8::internal::ValueHelper::ValueAsAddress(that)); \
|
||||
|
|
@ -149,7 +149,7 @@ TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
|
|||
const v8::From* that, bool allow_empty_handle) { \
|
||||
DCHECK(allow_empty_handle || !v8::internal::ValueHelper::IsEmpty(that)); \
|
||||
DCHECK(v8::internal::ValueHelper::IsEmpty(that) || \
|
||||
Is##To(v8::internal::Object( \
|
||||
Is##To(v8::internal::Tagged<v8::internal::Object>( \
|
||||
v8::internal::ValueHelper::ValueAsAddress(that)))); \
|
||||
return v8::internal::Handle<v8::internal::To>( \
|
||||
reinterpret_cast<v8::internal::Address*>( \
|
||||
|
|
@ -312,7 +312,7 @@ bool CopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
|
|||
}
|
||||
|
||||
i::DisallowGarbageCollection no_gc;
|
||||
i::Tagged<i::JSArray> obj = *reinterpret_cast<i::JSArray*>(*src);
|
||||
i::Tagged<i::JSArray> obj = *Utils::OpenHandle(*src);
|
||||
if (i::Object::IterationHasObservableEffects(obj)) {
|
||||
// The array has a custom iterator.
|
||||
return false;
|
||||
|
|
|
|||
49
deps/v8/src/api/api-natives.cc
vendored
49
deps/v8/src/api/api-natives.cc
vendored
|
|
@ -182,7 +182,7 @@ Tagged<Object> GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
|
|||
V8_INTRINSICS_LIST(GET_INTRINSIC_VALUE)
|
||||
#undef GET_INTRINSIC_VALUE
|
||||
}
|
||||
return Object();
|
||||
return Tagged<Object>();
|
||||
}
|
||||
|
||||
template <typename TemplateInfoT>
|
||||
|
|
@ -195,13 +195,13 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
|
|||
|
||||
// Walk the inheritance chain and copy all accessors to current object.
|
||||
int max_number_of_properties = 0;
|
||||
TemplateInfoT info = *data;
|
||||
Tagged<TemplateInfoT> info = *data;
|
||||
while (!info.is_null()) {
|
||||
Tagged<Object> props = info.property_accessors();
|
||||
Tagged<Object> props = info->property_accessors();
|
||||
if (!IsUndefined(props, isolate)) {
|
||||
max_number_of_properties += TemplateList::cast(props)->length();
|
||||
max_number_of_properties += ArrayList::cast(props)->Length();
|
||||
}
|
||||
info = info.GetParent(isolate);
|
||||
info = info->GetParent(isolate);
|
||||
}
|
||||
|
||||
if (max_number_of_properties > 0) {
|
||||
|
|
@ -210,7 +210,9 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
|
|||
Handle<FixedArray> array =
|
||||
isolate->factory()->NewFixedArray(max_number_of_properties);
|
||||
|
||||
for (Handle<TemplateInfoT> temp(*data, isolate); !temp->is_null();
|
||||
// TODO(leszeks): Avoid creating unnecessary handles for cases where we
|
||||
// don't need to append anything.
|
||||
for (Handle<TemplateInfoT> temp(*data, isolate); !(*temp).is_null();
|
||||
temp = handle(temp->GetParent(isolate), isolate)) {
|
||||
// Accumulate accessors.
|
||||
Tagged<Object> maybe_properties = temp->property_accessors();
|
||||
|
|
@ -233,28 +235,27 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
|
|||
|
||||
Tagged<Object> maybe_property_list = data->property_list();
|
||||
if (IsUndefined(maybe_property_list, isolate)) return obj;
|
||||
Handle<TemplateList> properties(TemplateList::cast(maybe_property_list),
|
||||
isolate);
|
||||
if (properties->length() == 0) return obj;
|
||||
Handle<ArrayList> properties(ArrayList::cast(maybe_property_list), isolate);
|
||||
if (properties->Length() == 0) return obj;
|
||||
|
||||
int i = 0;
|
||||
for (int c = 0; c < data->number_of_properties(); c++) {
|
||||
auto name = handle(Name::cast(properties->get(i++)), isolate);
|
||||
Tagged<Object> bit = properties->get(i++);
|
||||
auto name = handle(Name::cast(properties->Get(i++)), isolate);
|
||||
Tagged<Object> bit = properties->Get(i++);
|
||||
if (IsSmi(bit)) {
|
||||
PropertyDetails details(Smi::cast(bit));
|
||||
PropertyAttributes attributes = details.attributes();
|
||||
PropertyKind kind = details.kind();
|
||||
|
||||
if (kind == PropertyKind::kData) {
|
||||
auto prop_data = handle(properties->get(i++), isolate);
|
||||
auto prop_data = handle(properties->Get(i++), isolate);
|
||||
RETURN_ON_EXCEPTION(
|
||||
isolate,
|
||||
DefineDataProperty(isolate, obj, name, prop_data, attributes),
|
||||
JSObject);
|
||||
} else {
|
||||
auto getter = handle(properties->get(i++), isolate);
|
||||
auto setter = handle(properties->get(i++), isolate);
|
||||
auto getter = handle(properties->Get(i++), isolate);
|
||||
auto setter = handle(properties->Get(i++), isolate);
|
||||
RETURN_ON_EXCEPTION(isolate,
|
||||
DefineAccessorProperty(isolate, obj, name, getter,
|
||||
setter, attributes),
|
||||
|
|
@ -263,12 +264,12 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
|
|||
} else {
|
||||
// Intrinsic data property --- Get appropriate value from the current
|
||||
// context.
|
||||
PropertyDetails details(Smi::cast(properties->get(i++)));
|
||||
PropertyDetails details(Smi::cast(properties->Get(i++)));
|
||||
PropertyAttributes attributes = details.attributes();
|
||||
DCHECK_EQ(PropertyKind::kData, details.kind());
|
||||
|
||||
v8::Intrinsic intrinsic =
|
||||
static_cast<v8::Intrinsic>(Smi::ToInt(properties->get(i++)));
|
||||
static_cast<v8::Intrinsic>(Smi::ToInt(properties->Get(i++)));
|
||||
auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate);
|
||||
|
||||
RETURN_ON_EXCEPTION(
|
||||
|
|
@ -560,11 +561,11 @@ MaybeHandle<JSFunction> InstantiateFunction(
|
|||
void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
|
||||
int length, Handle<Object>* data) {
|
||||
Tagged<Object> maybe_list = templ->property_list();
|
||||
Handle<TemplateList> list;
|
||||
Handle<ArrayList> list;
|
||||
if (IsUndefined(maybe_list, isolate)) {
|
||||
list = TemplateList::New(isolate, length);
|
||||
list = ArrayList::New(isolate, length, AllocationType::kOld);
|
||||
} else {
|
||||
list = handle(TemplateList::cast(maybe_list), isolate);
|
||||
list = handle(ArrayList::cast(maybe_list), isolate);
|
||||
}
|
||||
templ->set_number_of_properties(templ->number_of_properties() + 1);
|
||||
for (int i = 0; i < length; i++) {
|
||||
|
|
@ -572,7 +573,7 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
|
|||
data[i].is_null()
|
||||
? Handle<Object>::cast(isolate->factory()->undefined_value())
|
||||
: data[i];
|
||||
list = TemplateList::Add(isolate, list, value);
|
||||
list = ArrayList::Add(isolate, list, value);
|
||||
}
|
||||
templ->set_property_list(*list);
|
||||
}
|
||||
|
|
@ -678,13 +679,13 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
|
|||
Handle<TemplateInfo> info,
|
||||
Handle<AccessorInfo> property) {
|
||||
Tagged<Object> maybe_list = info->property_accessors();
|
||||
Handle<TemplateList> list;
|
||||
Handle<ArrayList> list;
|
||||
if (IsUndefined(maybe_list, isolate)) {
|
||||
list = TemplateList::New(isolate, 1);
|
||||
list = ArrayList::New(isolate, 1, AllocationType::kOld);
|
||||
} else {
|
||||
list = handle(TemplateList::cast(maybe_list), isolate);
|
||||
list = handle(ArrayList::cast(maybe_list), isolate);
|
||||
}
|
||||
list = TemplateList::Add(isolate, list, property);
|
||||
list = ArrayList::Add(isolate, list, property);
|
||||
info->set_property_accessors(*list);
|
||||
}
|
||||
|
||||
|
|
|
|||
326
deps/v8/src/api/api.cc
vendored
326
deps/v8/src/api/api.cc
vendored
|
|
@ -731,7 +731,7 @@ i::Address* GlobalizeTracedReference(i::Isolate* i_isolate, i::Address value,
|
|||
auto result = i_isolate->traced_handles()->Create(value, slot, store_mode);
|
||||
#ifdef VERIFY_HEAP
|
||||
if (i::v8_flags.verify_heap) {
|
||||
Object::ObjectVerify(i::Object(value), i_isolate);
|
||||
Object::ObjectVerify(i::Tagged<i::Object>(value), i_isolate);
|
||||
}
|
||||
#endif // VERIFY_HEAP
|
||||
return result.location();
|
||||
|
|
@ -800,7 +800,7 @@ i::Address* GlobalizeReference(i::Isolate* i_isolate, i::Address value) {
|
|||
i::Handle<i::Object> result = i_isolate->global_handles()->Create(value);
|
||||
#ifdef VERIFY_HEAP
|
||||
if (i::v8_flags.verify_heap) {
|
||||
i::Object::ObjectVerify(i::Object(value), i_isolate);
|
||||
i::Object::ObjectVerify(i::Tagged<i::Object>(value), i_isolate);
|
||||
}
|
||||
#endif // VERIFY_HEAP
|
||||
return result.location();
|
||||
|
|
@ -920,8 +920,9 @@ EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
|
|||
|
||||
i::Address* EscapableHandleScope::Escape(i::Address* escape_value) {
|
||||
i::Heap* heap = reinterpret_cast<i::Isolate*>(GetIsolate())->heap();
|
||||
Utils::ApiCheck(i::IsTheHole(i::Object(*escape_slot_), heap->isolate()),
|
||||
"EscapableHandleScope::Escape", "Escape value set twice");
|
||||
Utils::ApiCheck(
|
||||
i::IsTheHole(i::Tagged<i::Object>(*escape_slot_), heap->isolate()),
|
||||
"EscapableHandleScope::Escape", "Escape value set twice");
|
||||
if (escape_value == nullptr) {
|
||||
*escape_slot_ = i::ReadOnlyRoots(heap).undefined_value().ptr();
|
||||
return nullptr;
|
||||
|
|
@ -1805,8 +1806,8 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
|
|||
i::Handle<i::AccessCheckInfo>::cast(struct_info);
|
||||
|
||||
SET_FIELD_WRAPPED(i_isolate, info, set_callback, callback);
|
||||
info->set_named_interceptor(i::Object());
|
||||
info->set_indexed_interceptor(i::Object());
|
||||
info->set_named_interceptor(i::Tagged<i::Object>());
|
||||
info->set_indexed_interceptor(i::Tagged<i::Object>());
|
||||
|
||||
if (data.IsEmpty()) {
|
||||
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(i_isolate));
|
||||
|
|
@ -2443,7 +2444,17 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
|
|||
|
||||
Local<Module> Module::CreateSyntheticModule(
|
||||
Isolate* v8_isolate, Local<String> module_name,
|
||||
const std::vector<Local<v8::String>>& export_names,
|
||||
const std::vector<Local<String>>& export_names,
|
||||
v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) {
|
||||
return CreateSyntheticModule(
|
||||
v8_isolate, module_name,
|
||||
MemorySpan<const Local<String>>(export_names.begin(), export_names.end()),
|
||||
evaluation_steps);
|
||||
}
|
||||
|
||||
Local<Module> Module::CreateSyntheticModule(
|
||||
Isolate* v8_isolate, Local<String> module_name,
|
||||
const MemorySpan<const Local<String>>& export_names,
|
||||
v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) {
|
||||
auto i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
|
||||
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
|
||||
|
|
@ -2981,8 +2992,9 @@ void v8::TryCatch::operator delete(void*, size_t) { base::OS::Abort(); }
|
|||
void v8::TryCatch::operator delete[](void*, size_t) { base::OS::Abort(); }
|
||||
|
||||
bool v8::TryCatch::HasCaught() const {
|
||||
return !IsTheHole(i::Object(reinterpret_cast<i::Address>(exception_)),
|
||||
i_isolate_);
|
||||
return !IsTheHole(
|
||||
i::Tagged<i::Object>(reinterpret_cast<i::Address>(exception_)),
|
||||
i_isolate_);
|
||||
}
|
||||
|
||||
bool v8::TryCatch::CanContinue() const { return can_continue_; }
|
||||
|
|
@ -3970,7 +3982,8 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
|
|||
}
|
||||
|
||||
i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) {
|
||||
return i::GetIsolateFromWritableObject(i::HeapObject::cast(i::Object(obj)));
|
||||
return i::GetIsolateFromWritableObject(
|
||||
i::HeapObject::cast(i::Tagged<i::Object>(obj)));
|
||||
}
|
||||
|
||||
bool i::ShouldThrowOnError(i::Isolate* i_isolate) {
|
||||
|
|
@ -4225,8 +4238,6 @@ void v8::ArrayBufferView::CheckCast(Value* that) {
|
|||
"Value is not an ArrayBufferView");
|
||||
}
|
||||
|
||||
constexpr size_t v8::TypedArray::kMaxLength;
|
||||
|
||||
void v8::TypedArray::CheckCast(Value* that) {
|
||||
i::Handle<i::Object> obj = Utils::OpenHandle(that);
|
||||
Utils::ApiCheck(i::IsJSTypedArray(*obj), "v8::TypedArray::Cast()",
|
||||
|
|
@ -6716,9 +6727,10 @@ MaybeLocal<Object> v8::Context::NewRemoteContext(
|
|||
i::Handle<i::AccessCheckInfo> access_check_info = i::handle(
|
||||
i::AccessCheckInfo::cast(global_constructor->GetAccessCheckInfo()),
|
||||
i_isolate);
|
||||
Utils::ApiCheck(access_check_info->named_interceptor() != i::Object(),
|
||||
"v8::Context::NewRemoteContext",
|
||||
"Global template needs to have access check handlers");
|
||||
Utils::ApiCheck(
|
||||
access_check_info->named_interceptor() != i::Tagged<i::Object>(),
|
||||
"v8::Context::NewRemoteContext",
|
||||
"Global template needs to have access check handlers");
|
||||
i::Handle<i::JSObject> global_proxy = CreateEnvironment<i::JSGlobalProxy>(
|
||||
i_isolate, nullptr, global_template, global_object, 0,
|
||||
DeserializeInternalFieldsCallback(), nullptr);
|
||||
|
|
@ -7024,7 +7036,7 @@ class ObjectVisitorDeepFreezer : i::ObjectVisitor {
|
|||
|
||||
i::Isolate* isolate_;
|
||||
Context::DeepFreezeDelegate* delegate_;
|
||||
std::unordered_set<i::Object, i::Object::Hasher> done_list_;
|
||||
std::unordered_set<i::Tagged<i::Object>, i::Object::Hasher> done_list_;
|
||||
std::vector<i::Handle<i::JSReceiver>> objects_to_freeze_;
|
||||
std::vector<i::Handle<i::AccessorPair>> lazy_accessor_pairs_to_freeze_;
|
||||
base::Optional<ErrorInfo> error_;
|
||||
|
|
@ -7320,9 +7332,10 @@ MaybeLocal<v8::Object> FunctionTemplate::NewRemoteInstance() {
|
|||
"InstanceTemplate needs to have access checks enabled");
|
||||
i::Handle<i::AccessCheckInfo> access_check_info = i::handle(
|
||||
i::AccessCheckInfo::cast(constructor->GetAccessCheckInfo()), i_isolate);
|
||||
Utils::ApiCheck(access_check_info->named_interceptor() != i::Object(),
|
||||
"v8::FunctionTemplate::NewRemoteInstance",
|
||||
"InstanceTemplate needs to have access check handlers");
|
||||
Utils::ApiCheck(
|
||||
access_check_info->named_interceptor() != i::Tagged<i::Object>(),
|
||||
"v8::FunctionTemplate::NewRemoteInstance",
|
||||
"InstanceTemplate needs to have access check handlers");
|
||||
i::Handle<i::JSObject> object;
|
||||
if (!i::ApiNatives::InstantiateRemoteObject(
|
||||
Utils::OpenHandle(*InstanceTemplate()))
|
||||
|
|
@ -7996,16 +8009,213 @@ Local<v8::Array> v8::Array::New(Isolate* v8_isolate, Local<Value>* elements,
|
|||
factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, len));
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
uint32_t GetLength(Tagged<JSArray> array) {
|
||||
Tagged<Object> length = array->length();
|
||||
if (IsSmi(length)) return Smi::ToInt(length);
|
||||
return static_cast<uint32_t>(Object::Number(length));
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
uint32_t v8::Array::Length() const {
|
||||
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
|
||||
i::Tagged<i::Object> length = obj->length();
|
||||
if (i::IsSmi(length)) {
|
||||
return i::Smi::ToInt(length);
|
||||
} else {
|
||||
return static_cast<uint32_t>(i::Object::Number(length));
|
||||
return i::GetLength(*obj);
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
bool CanUseFastIteration(Isolate* isolate, Handle<JSArray> array) {
|
||||
if (IsCustomElementsReceiverMap(array->map())) return false;
|
||||
if (array->GetElementsAccessor()->HasAccessors(*array)) return false;
|
||||
if (!JSObject::PrototypeHasNoElements(isolate, *array)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
enum class FastIterateResult {
|
||||
kException = static_cast<int>(v8::Array::CallbackResult::kException),
|
||||
kBreak = static_cast<int>(v8::Array::CallbackResult::kBreak),
|
||||
kSlowPath,
|
||||
kFinished,
|
||||
};
|
||||
|
||||
FastIterateResult FastIterateArray(Handle<JSArray> array, Isolate* isolate,
|
||||
v8::Array::IterationCallback callback,
|
||||
void* callback_data) {
|
||||
// Instead of relying on callers to check condition, this function returns
|
||||
// {kSlowPath} for situations it can't handle.
|
||||
// Most code paths below don't allocate, and rely on {callback} not allocating
|
||||
// either, but this isn't enforced with {DisallowHeapAllocation} to allow
|
||||
// embedders to allocate error objects before terminating the iteration.
|
||||
// Since {callback} must not allocate anyway, we can get away with fake
|
||||
// handles, reducing per-element overhead.
|
||||
if (!CanUseFastIteration(isolate, array)) return FastIterateResult::kSlowPath;
|
||||
using Result = v8::Array::CallbackResult;
|
||||
DisallowJavascriptExecution no_js(isolate);
|
||||
uint32_t length = GetLength(*array);
|
||||
if (length == 0) return FastIterateResult::kFinished;
|
||||
switch (array->GetElementsKind()) {
|
||||
case PACKED_SMI_ELEMENTS:
|
||||
case PACKED_ELEMENTS:
|
||||
case PACKED_FROZEN_ELEMENTS:
|
||||
case PACKED_SEALED_ELEMENTS:
|
||||
case PACKED_NONEXTENSIBLE_ELEMENTS: {
|
||||
Tagged<FixedArray> elements = FixedArray::cast(array->elements());
|
||||
for (uint32_t i = 0; i < length; i++) {
|
||||
Tagged<Object> element = elements->get(static_cast<int>(i));
|
||||
// TODO(13270): When we switch to CSS, we can pass {element} to
|
||||
// the callback directly, without {fake_handle}.
|
||||
Handle<Object> fake_handle(reinterpret_cast<Address*>(&element));
|
||||
Result result = callback(i, Utils::ToLocal(fake_handle), callback_data);
|
||||
if (result != Result::kContinue) {
|
||||
return static_cast<FastIterateResult>(result);
|
||||
}
|
||||
DCHECK(CanUseFastIteration(isolate, array));
|
||||
}
|
||||
return FastIterateResult::kFinished;
|
||||
}
|
||||
case HOLEY_SMI_ELEMENTS:
|
||||
case HOLEY_FROZEN_ELEMENTS:
|
||||
case HOLEY_SEALED_ELEMENTS:
|
||||
case HOLEY_NONEXTENSIBLE_ELEMENTS:
|
||||
case HOLEY_ELEMENTS: {
|
||||
Tagged<FixedArray> elements = FixedArray::cast(array->elements());
|
||||
for (uint32_t i = 0; i < length; i++) {
|
||||
Tagged<Object> element = elements->get(static_cast<int>(i));
|
||||
if (IsTheHole(element)) continue;
|
||||
// TODO(13270): When we switch to CSS, we can pass {element} to
|
||||
// the callback directly, without {fake_handle}.
|
||||
Handle<Object> fake_handle(reinterpret_cast<Address*>(&element));
|
||||
Result result = callback(i, Utils::ToLocal(fake_handle), callback_data);
|
||||
if (result != Result::kContinue) {
|
||||
return static_cast<FastIterateResult>(result);
|
||||
}
|
||||
DCHECK(CanUseFastIteration(isolate, array));
|
||||
}
|
||||
return FastIterateResult::kFinished;
|
||||
}
|
||||
case HOLEY_DOUBLE_ELEMENTS:
|
||||
case PACKED_DOUBLE_ELEMENTS: {
|
||||
DCHECK_NE(length, 0); // Cast to FixedDoubleArray would be invalid.
|
||||
Handle<FixedDoubleArray> elements(
|
||||
FixedDoubleArray::cast(array->elements()), isolate);
|
||||
FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, i++, {
|
||||
if (elements->is_the_hole(i)) continue;
|
||||
double element = elements->get_scalar(i);
|
||||
Handle<Object> value = isolate->factory()->NewNumber(element);
|
||||
Result result = callback(i, Utils::ToLocal(value), callback_data);
|
||||
if (result != Result::kContinue) {
|
||||
return static_cast<FastIterateResult>(result);
|
||||
}
|
||||
DCHECK(CanUseFastIteration(isolate, array));
|
||||
});
|
||||
return FastIterateResult::kFinished;
|
||||
}
|
||||
case DICTIONARY_ELEMENTS: {
|
||||
DisallowGarbageCollection no_gc;
|
||||
Tagged<NumberDictionary> dict = array->element_dictionary();
|
||||
struct Entry {
|
||||
uint32_t index;
|
||||
InternalIndex entry;
|
||||
};
|
||||
std::vector<Entry> sorted;
|
||||
sorted.reserve(dict->NumberOfElements());
|
||||
ReadOnlyRoots roots(isolate);
|
||||
for (InternalIndex i : dict->IterateEntries()) {
|
||||
Tagged<Object> key = dict->KeyAt(isolate, i);
|
||||
if (!dict->IsKey(roots, key)) continue;
|
||||
uint32_t index = static_cast<uint32_t>(Object::Number(key));
|
||||
sorted.push_back({index, i});
|
||||
}
|
||||
std::sort(
|
||||
sorted.begin(), sorted.end(),
|
||||
[](const Entry& a, const Entry& b) { return a.index < b.index; });
|
||||
for (const Entry& entry : sorted) {
|
||||
Tagged<Object> value = dict->ValueAt(entry.entry);
|
||||
// TODO(13270): When we switch to CSS, we can pass {element} to
|
||||
// the callback directly, without {fake_handle}.
|
||||
Handle<Object> fake_handle(reinterpret_cast<Address*>(&value));
|
||||
Result result =
|
||||
callback(entry.index, Utils::ToLocal(fake_handle), callback_data);
|
||||
if (result != Result::kContinue) {
|
||||
return static_cast<FastIterateResult>(result);
|
||||
}
|
||||
SLOW_DCHECK(CanUseFastIteration(isolate, array));
|
||||
}
|
||||
return FastIterateResult::kFinished;
|
||||
}
|
||||
case NO_ELEMENTS:
|
||||
return FastIterateResult::kFinished;
|
||||
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
|
||||
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
|
||||
// Probably not worth implementing. Take the slow path.
|
||||
return FastIterateResult::kSlowPath;
|
||||
case WASM_ARRAY_ELEMENTS:
|
||||
case FAST_STRING_WRAPPER_ELEMENTS:
|
||||
case SLOW_STRING_WRAPPER_ELEMENTS:
|
||||
case SHARED_ARRAY_ELEMENTS:
|
||||
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
|
||||
TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
// These are never used by v8::Array instances.
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
Maybe<void> v8::Array::Iterate(Local<Context> context,
|
||||
v8::Array::IterationCallback callback,
|
||||
void* callback_data) {
|
||||
i::Handle<i::JSArray> array = Utils::OpenHandle(this);
|
||||
i::Isolate* isolate = array->GetIsolate();
|
||||
i::FastIterateResult fast_result =
|
||||
i::FastIterateArray(array, isolate, callback, callback_data);
|
||||
if (fast_result == i::FastIterateResult::kException) return Nothing<void>();
|
||||
// Early breaks and completed iteration both return successfully.
|
||||
if (fast_result != i::FastIterateResult::kSlowPath) return JustVoid();
|
||||
|
||||
// Slow path: retrieving elements could have side effects.
|
||||
ENTER_V8(isolate, context, Array, Iterate, Nothing<void>(), i::HandleScope);
|
||||
for (uint32_t i = 0; i < i::GetLength(*array); ++i) {
|
||||
i::Handle<i::Object> element;
|
||||
has_pending_exception =
|
||||
!i::JSReceiver::GetElement(isolate, array, i).ToHandle(&element);
|
||||
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(void);
|
||||
using Result = v8::Array::CallbackResult;
|
||||
Result result = callback(i, Utils::ToLocal(element), callback_data);
|
||||
if (result == Result::kException) return Nothing<void>();
|
||||
if (result == Result::kBreak) return JustVoid();
|
||||
}
|
||||
return JustVoid();
|
||||
}
|
||||
|
||||
v8::TypecheckWitness::TypecheckWitness(Isolate* isolate)
|
||||
// We need to reserve a handle that we can patch later.
|
||||
// TODO(13270): When we switch to CSS, we can use a direct pointer
|
||||
// instead of a handle.
|
||||
: cached_map_(v8::Number::New(isolate, 1)) {}
|
||||
|
||||
void v8::TypecheckWitness::Update(Local<Value> baseline) {
|
||||
i::Tagged<i::Object> obj = *Utils::OpenHandle(*baseline);
|
||||
i::Tagged<i::Object> map = i::Smi::zero();
|
||||
if (!IsSmi(obj)) map = i::HeapObject::cast(obj)->map();
|
||||
// Design overview: in the {TypecheckWitness} constructor, we create
|
||||
// a single handle for the witness value. Whenever {Update} is called, we
|
||||
// make this handle point at the fresh baseline/witness; the intention is
|
||||
// to allow having short-lived HandleScopes (e.g. in {FastIterateArray}
|
||||
// above) while a {TypecheckWitness} is alive: it therefore cannot hold
|
||||
// on to one of the short-lived handles.
|
||||
// Calling {OpenHandle} on the {cached_map_} only serves to "reinterpret_cast"
|
||||
// it to an {i::Handle} on which we can call {PatchValue}.
|
||||
// TODO(13270): When we switch to CSS, this can become simpler: we can
|
||||
// then simply overwrite the direct pointer.
|
||||
i::Handle<i::Object> cache = Utils::OpenHandle(*cached_map_);
|
||||
cache.PatchValue(map);
|
||||
}
|
||||
|
||||
Local<v8::Map> v8::Map::New(Isolate* v8_isolate) {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
|
||||
API_RCS_SCOPE(i_isolate, Map, New);
|
||||
|
|
@ -8113,11 +8323,12 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* i_isolate,
|
|||
int result_index = 0;
|
||||
{
|
||||
i::DisallowGarbageCollection no_gc;
|
||||
i::Tagged<i::Hole> the_hole = i::ReadOnlyRoots(i_isolate).the_hole_value();
|
||||
i::Tagged<i::Hole> hash_table_hole =
|
||||
i::ReadOnlyRoots(i_isolate).hash_table_hole_value();
|
||||
for (int i = offset; i < capacity; ++i) {
|
||||
i::InternalIndex entry(i);
|
||||
i::Tagged<i::Object> key = table->KeyAt(entry);
|
||||
if (key == the_hole) continue;
|
||||
if (key == hash_table_hole) continue;
|
||||
if (collect_keys) result->set(result_index++, key);
|
||||
if (collect_values) result->set(result_index++, table->ValueAt(entry));
|
||||
}
|
||||
|
|
@ -8218,11 +8429,12 @@ i::Handle<i::JSArray> SetAsArray(i::Isolate* i_isolate,
|
|||
int result_index = 0;
|
||||
{
|
||||
i::DisallowGarbageCollection no_gc;
|
||||
i::Tagged<i::Hole> the_hole = i::ReadOnlyRoots(i_isolate).the_hole_value();
|
||||
i::Tagged<i::Hole> hash_table_hole =
|
||||
i::ReadOnlyRoots(i_isolate).hash_table_hole_value();
|
||||
for (int i = offset; i < capacity; ++i) {
|
||||
i::InternalIndex entry(i);
|
||||
i::Tagged<i::Object> key = table->KeyAt(entry);
|
||||
if (key == the_hole) continue;
|
||||
if (key == hash_table_hole) continue;
|
||||
result->set(result_index++, key);
|
||||
if (collect_key_values) result->set(result_index++, key);
|
||||
}
|
||||
|
|
@ -8776,9 +8988,9 @@ size_t v8::TypedArray::Length() {
|
|||
return obj->WasDetached() ? 0 : obj->GetLength();
|
||||
}
|
||||
|
||||
static_assert(
|
||||
v8::TypedArray::kMaxLength == i::JSTypedArray::kMaxLength,
|
||||
"v8::TypedArray::kMaxLength must match i::JSTypedArray::kMaxLength");
|
||||
static_assert(v8::TypedArray::kMaxByteLength == i::JSTypedArray::kMaxByteLength,
|
||||
"v8::TypedArray::kMaxByteLength must match "
|
||||
"i::JSTypedArray::kMaxByteLength");
|
||||
|
||||
#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype) \
|
||||
Local<Type##Array> Type##Array::New(Local<ArrayBuffer> array_buffer, \
|
||||
|
|
@ -10234,13 +10446,13 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
|
|||
#endif // V8_INTL_SUPPORT
|
||||
}
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
#if defined(V8_OS_WIN) && defined(V8_ENABLE_ETW_STACK_WALKING)
|
||||
void Isolate::SetFilterETWSessionByURLCallback(
|
||||
FilterETWSessionByURLCallback callback) {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
i_isolate->SetFilterETWSessionByURLCallback(callback);
|
||||
}
|
||||
#endif // V8_OS_WIN
|
||||
#endif // V8_OS_WIN && V8_ENABLE_ETW_STACK_WALKING
|
||||
|
||||
bool v8::Object::IsCodeLike(v8::Isolate* v8_isolate) const {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
|
||||
|
|
@ -10354,20 +10566,25 @@ String::Value::Value(v8::Isolate* v8_isolate, v8::Local<v8::Value> obj)
|
|||
|
||||
String::Value::~Value() { i::DeleteArray(str_); }
|
||||
|
||||
#define DEFINE_ERROR(NAME, name) \
|
||||
Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
|
||||
i::Isolate* i_isolate = i::Isolate::Current(); \
|
||||
API_RCS_SCOPE(i_isolate, NAME, New); \
|
||||
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); \
|
||||
i::Object error; \
|
||||
{ \
|
||||
i::HandleScope scope(i_isolate); \
|
||||
i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
|
||||
i::Handle<i::JSFunction> constructor = i_isolate->name##_function(); \
|
||||
error = *i_isolate->factory()->NewError(constructor, message); \
|
||||
} \
|
||||
i::Handle<i::Object> result(error, i_isolate); \
|
||||
return Utils::ToLocal(result); \
|
||||
#define DEFINE_ERROR(NAME, name) \
|
||||
Local<Value> Exception::NAME(v8::Local<v8::String> raw_message, \
|
||||
v8::Local<v8::Value> raw_options) { \
|
||||
i::Isolate* i_isolate = i::Isolate::Current(); \
|
||||
API_RCS_SCOPE(i_isolate, NAME, New); \
|
||||
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); \
|
||||
i::Tagged<i::Object> error; \
|
||||
{ \
|
||||
i::HandleScope scope(i_isolate); \
|
||||
i::Handle<i::Object> options; \
|
||||
if (!raw_options.IsEmpty()) { \
|
||||
options = Utils::OpenHandle(*raw_options); \
|
||||
} \
|
||||
i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
|
||||
i::Handle<i::JSFunction> constructor = i_isolate->name##_function(); \
|
||||
error = *i_isolate->factory()->NewError(constructor, message, options); \
|
||||
} \
|
||||
i::Handle<i::Object> result(error, i_isolate); \
|
||||
return Utils::ToLocal(result); \
|
||||
}
|
||||
|
||||
DEFINE_ERROR(RangeError, range_error)
|
||||
|
|
@ -10917,16 +11134,6 @@ int HeapProfiler::GetSnapshotCount() {
|
|||
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotsCount();
|
||||
}
|
||||
|
||||
void HeapProfiler::QueryObjects(Local<Context> v8_context,
|
||||
QueryObjectPredicate* predicate,
|
||||
std::vector<Global<Object>>* objects) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_context->GetIsolate());
|
||||
i::HeapProfiler* profiler = reinterpret_cast<i::HeapProfiler*>(this);
|
||||
DCHECK_EQ(isolate, profiler->isolate());
|
||||
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
|
||||
profiler->QueryObjects(Utils::OpenHandle(*v8_context), predicate, objects);
|
||||
}
|
||||
|
||||
const HeapSnapshot* HeapProfiler::GetHeapSnapshot(int index) {
|
||||
return reinterpret_cast<const HeapSnapshot*>(
|
||||
reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshot(index));
|
||||
|
|
@ -11285,8 +11492,9 @@ void InvokeAccessorGetterCallback(
|
|||
{
|
||||
Address arg = i_isolate->isolate_data()->api_callback_thunk_argument();
|
||||
// Currently we don't call InterceptorInfo callbacks via CallApiGetter.
|
||||
DCHECK(IsAccessorInfo(Object(arg)));
|
||||
Tagged<AccessorInfo> accessor_info = AccessorInfo::cast(Object(arg));
|
||||
DCHECK(IsAccessorInfo(Tagged<Object>(arg)));
|
||||
Tagged<AccessorInfo> accessor_info =
|
||||
AccessorInfo::cast(Tagged<Object>(arg));
|
||||
getter = reinterpret_cast<v8::AccessorNameGetterCallback>(
|
||||
accessor_info->getter(i_isolate));
|
||||
|
||||
|
|
|
|||
4
deps/v8/src/api/api.h
vendored
4
deps/v8/src/api/api.h
vendored
|
|
@ -408,11 +408,11 @@ class HandleScopeImplementer {
|
|||
// `is_microtask_context_[i]` is 1.
|
||||
// TODO(tzik): Remove |is_microtask_context_| after the deprecated
|
||||
// v8::Isolate::GetEnteredContext() is removed.
|
||||
DetachableVector<NativeContext> entered_contexts_;
|
||||
DetachableVector<Tagged<NativeContext>> entered_contexts_;
|
||||
DetachableVector<int8_t> is_microtask_context_;
|
||||
|
||||
// Used as a stack to keep track of saved contexts.
|
||||
DetachableVector<Context> saved_contexts_;
|
||||
DetachableVector<Tagged<Context>> saved_contexts_;
|
||||
Address* spare_;
|
||||
Address* last_handle_before_deferred_block_;
|
||||
// This is only used for threading support.
|
||||
|
|
|
|||
3
deps/v8/src/asmjs/asm-js.cc
vendored
3
deps/v8/src/asmjs/asm-js.cc
vendored
|
|
@ -72,7 +72,8 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
|
|||
base::StaticCharVector(#fname))); \
|
||||
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
|
||||
if (!IsJSFunction(*value)) return false; \
|
||||
SharedFunctionInfo shared = Handle<JSFunction>::cast(value)->shared(); \
|
||||
Tagged<SharedFunctionInfo> shared = \
|
||||
Handle<JSFunction>::cast(value)->shared(); \
|
||||
if (!shared->HasBuiltinId() || \
|
||||
shared->builtin_id() != Builtin::kMath##FName) { \
|
||||
return false; \
|
||||
|
|
|
|||
8
deps/v8/src/ast/scopes.cc
vendored
8
deps/v8/src/ast/scopes.cc
vendored
|
|
@ -204,7 +204,7 @@ ClassScope::ClassScope(IsolateT* isolate, Zone* zone,
|
|||
// If the class variable is context-allocated and its index is
|
||||
// saved for deserialization, deserialize it.
|
||||
if (scope_info->HasSavedClassVariable()) {
|
||||
String name;
|
||||
Tagged<String> name;
|
||||
int index;
|
||||
std::tie(name, index) = scope_info->SavedClassVariable();
|
||||
DCHECK_EQ(scope_info->ContextLocalMode(index), VariableMode::kConst);
|
||||
|
|
@ -475,7 +475,7 @@ Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone,
|
|||
DCHECK_EQ(scope_info->ContextLocalMode(0), VariableMode::kVar);
|
||||
DCHECK_EQ(scope_info->ContextLocalInitFlag(0), kCreatedInitialized);
|
||||
DCHECK(scope_info->HasInlinedLocalNames());
|
||||
String name = scope_info->ContextInlinedLocalName(0);
|
||||
Tagged<String> name = scope_info->ContextInlinedLocalName(0);
|
||||
MaybeAssignedFlag maybe_assigned =
|
||||
scope_info->ContextLocalMaybeAssignedFlag(0);
|
||||
outer_scope =
|
||||
|
|
@ -976,8 +976,8 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
|
|||
DCHECK_NULL(cache->variables_.Lookup(name));
|
||||
DisallowGarbageCollection no_gc;
|
||||
|
||||
String name_handle = *name->string();
|
||||
ScopeInfo scope_info = *scope_info_;
|
||||
Tagged<String> name_handle = *name->string();
|
||||
Tagged<ScopeInfo> scope_info = *scope_info_;
|
||||
// The Scope is backed up by ScopeInfo. This means it cannot operate in a
|
||||
// heap-independent mode, and all strings must be internalized immediately. So
|
||||
// it's ok to get the Handle<String> here.
|
||||
|
|
|
|||
32
deps/v8/src/base/cpu.cc
vendored
32
deps/v8/src/base/cpu.cc
vendored
|
|
@ -14,7 +14,7 @@
|
|||
#if V8_OS_LINUX
|
||||
#include <linux/auxvec.h> // AT_HWCAP
|
||||
#endif
|
||||
#if V8_GLIBC_PREREQ(2, 16)
|
||||
#if V8_GLIBC_PREREQ(2, 16) || V8_OS_ANDROID
|
||||
#include <sys/auxv.h> // getauxval()
|
||||
#endif
|
||||
#if V8_OS_QNX
|
||||
|
|
@ -163,17 +163,27 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
|
|||
#define HWCAP_SB (1 << 29)
|
||||
#define HWCAP_PACA (1 << 30)
|
||||
#define HWCAP_PACG (1UL << 31)
|
||||
|
||||
// See <uapi/asm/hwcap.h> kernel header.
|
||||
/*
|
||||
* HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2
|
||||
*/
|
||||
#define HWCAP2_MTE (1 << 18)
|
||||
#endif // V8_HOST_ARCH_ARM64
|
||||
|
||||
#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64
|
||||
|
||||
static uint32_t ReadELFHWCaps() {
|
||||
uint32_t result = 0;
|
||||
#if V8_GLIBC_PREREQ(2, 16)
|
||||
result = static_cast<uint32_t>(getauxval(AT_HWCAP));
|
||||
static std::tuple<uint32_t, uint32_t> ReadELFHWCaps() {
|
||||
uint32_t hwcap = 0;
|
||||
uint32_t hwcap2 = 0;
|
||||
#if defined(AT_HWCAP)
|
||||
hwcap = static_cast<uint32_t>(getauxval(AT_HWCAP));
|
||||
#if defined(AT_HWCAP2)
|
||||
hwcap2 = static_cast<uint32_t>(getauxval(AT_HWCAP2));
|
||||
#endif // AT_HWCAP2
|
||||
#else
|
||||
// Read the ELF HWCAP flags by parsing /proc/self/auxv.
|
||||
// If getauxval is not available, the kernel/libc is also not new enough to
|
||||
// expose hwcap2.
|
||||
FILE* fp = base::Fopen("/proc/self/auxv", "r");
|
||||
if (fp != nullptr) {
|
||||
struct {
|
||||
|
|
@ -193,7 +203,7 @@ static uint32_t ReadELFHWCaps() {
|
|||
base::Fclose(fp);
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
return std::make_tuple(hwcap, hwcap2);
|
||||
}
|
||||
|
||||
#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64
|
||||
|
|
@ -406,6 +416,7 @@ CPU::CPU()
|
|||
has_jscvt_(false),
|
||||
has_dot_prod_(false),
|
||||
has_lse_(false),
|
||||
has_mte_(false),
|
||||
is_fp64_mode_(false),
|
||||
has_non_stop_time_stamp_counter_(false),
|
||||
is_running_in_vm_(false),
|
||||
|
|
@ -628,7 +639,8 @@ CPU::CPU()
|
|||
}
|
||||
|
||||
// Try to extract the list of CPU features from ELF hwcaps.
|
||||
uint32_t hwcaps = ReadELFHWCaps();
|
||||
uint32_t hwcaps, hwcaps2;
|
||||
std::tie(hwcaps, hwcaps2) = ReadELFHWCaps();
|
||||
if (hwcaps != 0) {
|
||||
has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
|
||||
has_neon_ = (hwcaps & HWCAP_NEON) != 0;
|
||||
|
|
@ -740,7 +752,9 @@ CPU::CPU()
|
|||
|
||||
#elif V8_OS_LINUX
|
||||
// Try to extract the list of CPU features from ELF hwcaps.
|
||||
uint32_t hwcaps = ReadELFHWCaps();
|
||||
uint32_t hwcaps, hwcaps2;
|
||||
std::tie(hwcaps, hwcaps2) = ReadELFHWCaps();
|
||||
has_mte_ = (hwcaps2 & HWCAP2_MTE) != 0;
|
||||
if (hwcaps != 0) {
|
||||
has_jscvt_ = (hwcaps & HWCAP_JSCVT) != 0;
|
||||
has_dot_prod_ = (hwcaps & HWCAP_ASIMDDP) != 0;
|
||||
|
|
|
|||
2
deps/v8/src/base/cpu.h
vendored
2
deps/v8/src/base/cpu.h
vendored
|
|
@ -125,6 +125,7 @@ class V8_BASE_EXPORT CPU final {
|
|||
bool has_jscvt() const { return has_jscvt_; }
|
||||
bool has_dot_prod() const { return has_dot_prod_; }
|
||||
bool has_lse() const { return has_lse_; }
|
||||
bool has_mte() const { return has_mte_; }
|
||||
|
||||
// mips features
|
||||
bool is_fp64_mode() const { return is_fp64_mode_; }
|
||||
|
|
@ -186,6 +187,7 @@ class V8_BASE_EXPORT CPU final {
|
|||
bool has_jscvt_;
|
||||
bool has_dot_prod_;
|
||||
bool has_lse_;
|
||||
bool has_mte_;
|
||||
bool is_fp64_mode_;
|
||||
bool has_non_stop_time_stamp_counter_;
|
||||
bool is_running_in_vm_;
|
||||
|
|
|
|||
68
deps/v8/src/base/iterator.h
vendored
68
deps/v8/src/base/iterator.h
vendored
|
|
@ -6,6 +6,8 @@
|
|||
#define V8_BASE_ITERATOR_H_
|
||||
|
||||
#include <iterator>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
|
@ -68,7 +70,7 @@ struct DerefPtrIterator : base::iterator<std::bidirectional_iterator_tag, T> {
|
|||
|
||||
explicit DerefPtrIterator(T* const* ptr) : ptr(ptr) {}
|
||||
|
||||
T& operator*() { return **ptr; }
|
||||
T& operator*() const { return **ptr; }
|
||||
DerefPtrIterator& operator++() {
|
||||
++ptr;
|
||||
return *this;
|
||||
|
|
@ -77,7 +79,12 @@ struct DerefPtrIterator : base::iterator<std::bidirectional_iterator_tag, T> {
|
|||
--ptr;
|
||||
return *this;
|
||||
}
|
||||
bool operator!=(DerefPtrIterator other) { return ptr != other.ptr; }
|
||||
bool operator!=(const DerefPtrIterator& other) const {
|
||||
return ptr != other.ptr;
|
||||
}
|
||||
bool operator==(const DerefPtrIterator& other) const {
|
||||
return ptr == other.ptr;
|
||||
}
|
||||
};
|
||||
|
||||
// {Reversed} returns a container adapter usable in a range-based "for"
|
||||
|
|
@ -130,6 +137,63 @@ auto IterateWithoutLast(const iterator_range<T>& t) {
|
|||
return IterateWithoutLast(range_copy);
|
||||
}
|
||||
|
||||
// TupleIterator is an iterator wrapping around multiple iterators. It is use by
|
||||
// the `zip` function below to iterate over multiple containers at once.
|
||||
template <class... Iterators>
|
||||
class TupleIterator
|
||||
: public base::iterator<
|
||||
std::bidirectional_iterator_tag,
|
||||
std::tuple<typename std::iterator_traits<Iterators>::reference...>> {
|
||||
public:
|
||||
using value_type =
|
||||
std::tuple<typename std::iterator_traits<Iterators>::reference...>;
|
||||
|
||||
explicit TupleIterator(Iterators... its) : its_(its...) {}
|
||||
|
||||
TupleIterator& operator++() {
|
||||
std::apply([](auto&... iterators) { (++iterators, ...); }, its_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class Other>
|
||||
bool operator!=(const Other& other) const {
|
||||
return not_equal_impl(other, std::index_sequence_for<Iterators...>{});
|
||||
}
|
||||
|
||||
value_type operator*() const {
|
||||
return std::apply(
|
||||
[](auto&... this_iterators) { return value_type{*this_iterators...}; },
|
||||
its_);
|
||||
}
|
||||
|
||||
private:
|
||||
template <class Other, size_t... indices>
|
||||
bool not_equal_impl(const Other& other,
|
||||
std::index_sequence<indices...>) const {
|
||||
return (... || (std::get<indices>(its_) != std::get<indices>(other.its_)));
|
||||
}
|
||||
|
||||
std::tuple<Iterators...> its_;
|
||||
};
|
||||
|
||||
// `zip` creates an iterator_range from multiple containers. It can be used to
|
||||
// iterate over multiple containers at once. For instance:
|
||||
//
|
||||
// std::vector<int> arr = { 2, 4, 6 };
|
||||
// std::set<double> set = { 3.5, 4.5, 5.5 };
|
||||
// for (auto [i, d] : base::zip(arr, set)) {
|
||||
// std::cout << i << " and " << d << std::endl;
|
||||
// }
|
||||
//
|
||||
// Prints "2 and 3.5", "4 and 4.5" and "6 and 5.5".
|
||||
template <class... Containers>
|
||||
auto zip(Containers&... containers) {
|
||||
using TupleIt =
|
||||
TupleIterator<decltype(std::declval<Containers>().begin())...>;
|
||||
return base::make_iterator_range(TupleIt(containers.begin()...),
|
||||
TupleIt(containers.end()...));
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
||||
|
|
|
|||
8
deps/v8/src/base/macros.h
vendored
8
deps/v8/src/base/macros.h
vendored
|
|
@ -389,9 +389,9 @@ bool is_inbounds(float_t v) {
|
|||
// Setup for Windows shared library export.
|
||||
#define V8_EXPORT_ENUM
|
||||
#ifdef BUILDING_V8_SHARED
|
||||
#define V8_EXPORT_PRIVATE
|
||||
#define V8_EXPORT_PRIVATE __declspec(dllexport)
|
||||
#elif USING_V8_SHARED
|
||||
#define V8_EXPORT_PRIVATE
|
||||
#define V8_EXPORT_PRIVATE __declspec(dllimport)
|
||||
#else
|
||||
#define V8_EXPORT_PRIVATE
|
||||
#endif // BUILDING_V8_SHARED
|
||||
|
|
@ -401,8 +401,8 @@ bool is_inbounds(float_t v) {
|
|||
// Setup for Linux shared library export.
|
||||
#if V8_HAS_ATTRIBUTE_VISIBILITY
|
||||
#ifdef BUILDING_V8_SHARED
|
||||
#define V8_EXPORT_PRIVATE
|
||||
#define V8_EXPORT_ENUM
|
||||
#define V8_EXPORT_PRIVATE __attribute__((visibility("default")))
|
||||
#define V8_EXPORT_ENUM V8_EXPORT_PRIVATE
|
||||
#else
|
||||
#define V8_EXPORT_PRIVATE
|
||||
#define V8_EXPORT_ENUM
|
||||
|
|
|
|||
|
|
@ -32,6 +32,9 @@ class V8_BASE_EXPORT MemoryProtectionKey {
|
|||
// mprotect().
|
||||
static constexpr int kNoMemoryProtectionKey = -1;
|
||||
|
||||
// The default ProtectionKey can be used to remove pkey assignments.
|
||||
static constexpr int kDefaultProtectionKey = 0;
|
||||
|
||||
// Permissions for memory protection keys on top of the page's permissions.
|
||||
// NOTE: Since there is no executable bit, the executable permission cannot be
|
||||
// withdrawn by memory protection keys.
|
||||
|
|
|
|||
25
deps/v8/src/base/platform/platform-posix.cc
vendored
25
deps/v8/src/base/platform/platform-posix.cc
vendored
|
|
@ -370,9 +370,9 @@ void* OS::GetRandomMmapAddr() {
|
|||
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
|
||||
raw_addr &= 0x3FFFF000;
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
|
||||
// to fulfill request.
|
||||
raw_addr &= uint64_t{0xFFFFFF0000};
|
||||
// 40 or 47 bits of virtual addressing. Truncate to 38 bits to allow kernel
|
||||
// chance to fulfill request.
|
||||
raw_addr &= uint64_t{0x3FFFFF0000};
|
||||
#else
|
||||
raw_addr &= 0x3FFFF000;
|
||||
|
||||
|
|
@ -849,6 +849,25 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
|
|||
}
|
||||
#endif
|
||||
|
||||
int OS::GetPeakMemoryUsageKb() {
|
||||
#if defined(V8_OS_FUCHSIA)
|
||||
// Fuchsia does not implement getrusage()
|
||||
return -1;
|
||||
#else
|
||||
struct rusage usage;
|
||||
if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
|
||||
|
||||
#if defined(V8_OS_MACOS) || defined(V8_OS_IOS)
|
||||
constexpr int KB = 1024;
|
||||
// MacOS and iOS ru_maxrss count bytes
|
||||
return static_cast<int>(usage.ru_maxrss / KB);
|
||||
#else
|
||||
// Most other cases (at least Linux, IOS, return kilobytes)
|
||||
return static_cast<int>(usage.ru_maxrss);
|
||||
#endif // defined(V8_OS_MACOS) || defined(V8_OS_IOS)
|
||||
#endif // defined(V8_OS_FUCHSIA)
|
||||
}
|
||||
|
||||
double OS::TimeCurrentMillis() {
|
||||
return Time::Now().ToJsTime();
|
||||
}
|
||||
|
|
|
|||
13
deps/v8/src/base/platform/platform-win32.cc
vendored
13
deps/v8/src/base/platform/platform-win32.cc
vendored
|
|
@ -22,6 +22,7 @@
|
|||
#include <dbghelp.h> // For SymLoadModule64 and al.
|
||||
#include <malloc.h> // For _msize()
|
||||
#include <mmsystem.h> // For timeGetTime().
|
||||
#include <psapi.h> // For GetProcessmMemoryInfo().
|
||||
#include <tlhelp32.h> // For Module32First and al.
|
||||
|
||||
#include <limits>
|
||||
|
|
@ -487,6 +488,18 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int OS::GetPeakMemoryUsageKb() {
|
||||
constexpr int KB = 1024;
|
||||
|
||||
PROCESS_MEMORY_COUNTERS mem_counters;
|
||||
int ret;
|
||||
|
||||
ret = GetProcessMemoryInfo(GetCurrentProcess(), &mem_counters,
|
||||
sizeof(mem_counters));
|
||||
if (ret == 0) return -1;
|
||||
|
||||
return static_cast<int>(mem_counters.PeakWorkingSetSize / KB);
|
||||
}
|
||||
|
||||
// Returns current time as the number of milliseconds since
|
||||
// 00:00:00 UTC, January 1, 1970.
|
||||
|
|
|
|||
3
deps/v8/src/base/platform/platform.h
vendored
3
deps/v8/src/base/platform/platform.h
vendored
|
|
@ -162,6 +162,9 @@ class V8_BASE_EXPORT OS {
|
|||
// micro-second resolution.
|
||||
static int GetUserTime(uint32_t* secs, uint32_t* usecs);
|
||||
|
||||
// Obtain the peak memory usage in kilobytes
|
||||
static int GetPeakMemoryUsageKb();
|
||||
|
||||
// Returns current time as the number of milliseconds since
|
||||
// 00:00:00 UTC, January 1, 1970.
|
||||
static double TimeCurrentMillis();
|
||||
|
|
|
|||
15
deps/v8/src/base/small-vector.h
vendored
15
deps/v8/src/base/small-vector.h
vendored
|
|
@ -27,6 +27,7 @@ class SmallVector {
|
|||
|
||||
public:
|
||||
static constexpr size_t kInlineSize = kSize;
|
||||
using value_type = T;
|
||||
|
||||
SmallVector() = default;
|
||||
explicit SmallVector(const Allocator& allocator) : allocator_(allocator) {}
|
||||
|
|
@ -197,9 +198,17 @@ class SmallVector {
|
|||
end_ = begin_ + new_size;
|
||||
}
|
||||
|
||||
void reserve_no_init(size_t new_capacity) {
|
||||
// Resizing without initialization is safe if T is trivially copyable.
|
||||
ASSERT_TRIVIALLY_COPYABLE(T);
|
||||
void resize_and_init(size_t new_size) {
|
||||
static_assert(std::is_trivially_destructible_v<T>);
|
||||
if (new_size > capacity()) Grow(new_size);
|
||||
T* new_end = begin_ + new_size;
|
||||
if (new_end > end_) {
|
||||
std::uninitialized_fill(end_, new_end, T{});
|
||||
}
|
||||
end_ = new_end;
|
||||
}
|
||||
|
||||
void reserve(size_t new_capacity) {
|
||||
if (new_capacity > capacity()) Grow(new_capacity);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -63,6 +63,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
|
|
@ -400,9 +403,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ ldr(interrupt_budget,
|
||||
|
|
@ -423,9 +424,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ ldr(interrupt_budget,
|
||||
|
|
|
|||
|
|
@ -62,6 +62,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ Bind(label); }
|
||||
|
||||
|
|
@ -458,9 +461,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||
__ Ldr(interrupt_budget,
|
||||
|
|
@ -481,9 +482,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||
__ Ldr(interrupt_budget,
|
||||
|
|
|
|||
|
|
@ -49,32 +49,25 @@ void BaselineCompiler::PrologueFillFrame() {
|
|||
const int kLoopUnrollSize = 8;
|
||||
const int new_target_index = new_target_or_generator_register.index();
|
||||
const bool has_new_target = new_target_index != kMaxInt;
|
||||
// BaselineOutOfLinePrologue already pushed one undefined.
|
||||
register_count -= 1;
|
||||
if (has_new_target) {
|
||||
if (new_target_index == 0) {
|
||||
// Oops, need to fix up that undefined that BaselineOutOfLinePrologue
|
||||
// pushed.
|
||||
__ masm()->Poke(kJavaScriptCallNewTargetRegister, Operand(0));
|
||||
} else {
|
||||
DCHECK_LE(new_target_index, register_count);
|
||||
int index = 1;
|
||||
for (; index + 2 <= new_target_index; index += 2) {
|
||||
int before_new_target_count = 0;
|
||||
for (; before_new_target_count + 2 <= new_target_index;
|
||||
before_new_target_count += 2) {
|
||||
__ masm()->Push(kInterpreterAccumulatorRegister,
|
||||
kInterpreterAccumulatorRegister);
|
||||
}
|
||||
if (index == new_target_index) {
|
||||
if (before_new_target_count == new_target_index) {
|
||||
__ masm()->Push(kJavaScriptCallNewTargetRegister,
|
||||
kInterpreterAccumulatorRegister);
|
||||
} else {
|
||||
DCHECK_EQ(index, new_target_index - 1);
|
||||
DCHECK_EQ(before_new_target_count + 1, new_target_index);
|
||||
__ masm()->Push(kInterpreterAccumulatorRegister,
|
||||
kJavaScriptCallNewTargetRegister);
|
||||
}
|
||||
// We pushed "index" registers, minus the one the prologue pushed, plus
|
||||
// the two registers that included new_target.
|
||||
register_count -= (index - 1 + 2);
|
||||
}
|
||||
// We pushed before_new_target_count registers, plus the two registers
|
||||
// that included new_target.
|
||||
register_count -= (before_new_target_count + 2);
|
||||
}
|
||||
if (register_count < 2 * kLoopUnrollSize) {
|
||||
// If the frame is small enough, just unroll the frame fill completely.
|
||||
|
|
|
|||
|
|
@ -139,6 +139,13 @@ void BaselineAssembler::StoreRegister(interpreter::Register output,
|
|||
Move(output, value);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadFeedbackCell(Register output) {
|
||||
Move(output, FeedbackCellOperand());
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register scratch = scratch_scope.AcquireScratch();
|
||||
__ AssertFeedbackCell(output, scratch);
|
||||
}
|
||||
|
||||
template <typename Field>
|
||||
void BaselineAssembler::DecodeField(Register reg) {
|
||||
__ DecodeField<Field>(reg);
|
||||
|
|
|
|||
4
deps/v8/src/baseline/baseline-assembler.h
vendored
4
deps/v8/src/baseline/baseline-assembler.h
vendored
|
|
@ -30,6 +30,7 @@ class BaselineAssembler {
|
|||
inline MemOperand ContextOperand();
|
||||
inline MemOperand FunctionOperand();
|
||||
inline MemOperand FeedbackVectorOperand();
|
||||
inline MemOperand FeedbackCellOperand();
|
||||
|
||||
inline void GetCode(LocalIsolate* isolate, CodeDesc* desc);
|
||||
inline int pc_offset() const;
|
||||
|
|
@ -232,6 +233,9 @@ class BaselineAssembler {
|
|||
inline void LoadContext(Register output);
|
||||
inline void StoreContext(Register context);
|
||||
|
||||
inline void LoadFeedbackCell(Register output);
|
||||
inline void AssertFeedbackCell(Register object);
|
||||
|
||||
inline static void EmitReturn(MacroAssembler* masm);
|
||||
|
||||
MacroAssembler* masm() { return masm_; }
|
||||
|
|
|
|||
6
deps/v8/src/baseline/baseline-compiler.cc
vendored
6
deps/v8/src/baseline/baseline-compiler.cc
vendored
|
|
@ -78,7 +78,7 @@ namespace detail {
|
|||
#ifdef DEBUG
|
||||
bool Clobbers(Register target, Register reg) { return target == reg; }
|
||||
bool Clobbers(Register target, Handle<Object> handle) { return false; }
|
||||
bool Clobbers(Register target, Smi smi) { return false; }
|
||||
bool Clobbers(Register target, Tagged<Smi> smi) { return false; }
|
||||
bool Clobbers(Register target, Tagged<TaggedIndex> index) { return false; }
|
||||
bool Clobbers(Register target, int32_t imm) { return false; }
|
||||
bool Clobbers(Register target, RootIndex index) { return false; }
|
||||
|
|
@ -92,7 +92,7 @@ bool MachineTypeMatches(MachineType type, MemOperand reg) { return true; }
|
|||
bool MachineTypeMatches(MachineType type, Handle<HeapObject> handle) {
|
||||
return type.IsTagged() && !type.IsTaggedSigned();
|
||||
}
|
||||
bool MachineTypeMatches(MachineType type, Smi handle) {
|
||||
bool MachineTypeMatches(MachineType type, Tagged<Smi> handle) {
|
||||
return type.IsTagged() && !type.IsTaggedPointer();
|
||||
}
|
||||
bool MachineTypeMatches(MachineType type, Tagged<TaggedIndex> handle) {
|
||||
|
|
@ -712,7 +712,7 @@ void BaselineCompiler::VisitLdaZero() {
|
|||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaSmi() {
|
||||
Smi constant = Smi::FromInt(iterator().GetImmediateOperand(0));
|
||||
Tagged<Smi> constant = Smi::FromInt(iterator().GetImmediateOperand(0));
|
||||
__ Move(kInterpreterAccumulatorRegister, constant);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ class V8_EXPORT_PRIVATE BytecodeOffsetIterator {
|
|||
Address current_pc_start_offset_;
|
||||
Address current_pc_end_offset_;
|
||||
int current_bytecode_offset_;
|
||||
BytecodeArray bytecode_handle_storage_;
|
||||
Tagged<BytecodeArray> bytecode_handle_storage_;
|
||||
interpreter::BytecodeArrayIterator bytecode_iterator_;
|
||||
LocalHeap* local_heap_;
|
||||
base::Optional<DisallowGarbageCollection> no_gc_;
|
||||
|
|
|
|||
|
|
@ -68,6 +68,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(ebp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(ebp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
|
|
@ -384,9 +387,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(weight));
|
||||
if (skip_interrupt_label) {
|
||||
|
|
@ -401,9 +402,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
DCHECK(!AreAliased(feedback_cell, weight));
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
weight);
|
||||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||
|
|
|
|||
|
|
@ -61,6 +61,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
|
|
@ -389,9 +392,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ Ld_w(interrupt_budget,
|
||||
|
|
@ -409,9 +410,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ Ld_w(interrupt_budget,
|
||||
|
|
|
|||
|
|
@ -61,6 +61,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
|
|
@ -387,9 +390,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ Lw(interrupt_budget,
|
||||
|
|
@ -407,9 +408,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ Lw(interrupt_budget,
|
||||
|
|
|
|||
|
|
@ -88,6 +88,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
|
|
@ -471,9 +474,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ LoadU32(
|
||||
|
|
@ -496,9 +497,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ LoadU32(
|
||||
|
|
|
|||
|
|
@ -60,6 +60,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
|
|
@ -395,9 +398,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ Lw(interrupt_budget,
|
||||
|
|
@ -417,9 +418,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ Lw(interrupt_budget,
|
||||
|
|
|
|||
|
|
@ -87,6 +87,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
|
|
@ -488,9 +491,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ LoadU32(
|
||||
|
|
@ -513,9 +514,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ LoadU32(
|
||||
|
|
|
|||
|
|
@ -70,6 +70,9 @@ void BaselineAssembler::RegisterFrameAddress(
|
|||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(rbp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackCellOperand() {
|
||||
return MemOperand(rbp, BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
|
|
@ -398,11 +401,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
// Decompresses pointer by complex addressing mode when necessary.
|
||||
TaggedRegister tagged(feedback_cell);
|
||||
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
|
||||
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(weight));
|
||||
if (skip_interrupt_label) {
|
||||
DCHECK_LT(weight, 0);
|
||||
|
|
@ -415,11 +415,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
// Decompresses pointer by complex addressing mode when necessary.
|
||||
TaggedRegister tagged(feedback_cell);
|
||||
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
|
||||
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
|
||||
LoadFeedbackCell(feedback_cell);
|
||||
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
weight);
|
||||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||
}
|
||||
|
||||
|
|
|
|||
88
deps/v8/src/builtins/arm/builtins-arm.cc
vendored
88
deps/v8/src/builtins/arm/builtins-arm.cc
vendored
|
|
@ -933,18 +933,19 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
|
|||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
// Need a few extra registers
|
||||
temps.Include(r8, r9);
|
||||
temps.Include({r4, r8, r9});
|
||||
|
||||
auto descriptor =
|
||||
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
// Load the feedback cell and vector from the closure.
|
||||
Register feedback_cell = temps.Acquire();
|
||||
Register feedback_vector = temps.Acquire();
|
||||
__ ldr(feedback_vector,
|
||||
__ ldr(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ ldr(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
__ AssertFeedbackVector(feedback_vector);
|
||||
|
||||
// Check the tiering state.
|
||||
|
|
@ -1004,9 +1005,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
Register bytecodeArray = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
__ Push(argc, bytecodeArray);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
if (v8_flags.debug_code) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
|
|
@ -1014,6 +1012,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
FEEDBACK_VECTOR_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
__ Push(feedback_cell);
|
||||
__ Push(feedback_vector);
|
||||
}
|
||||
|
||||
|
|
@ -1075,9 +1074,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
|
||||
// check. Undo all its frame creation and call into the interpreter instead.
|
||||
|
||||
// Drop bytecode offset (was the feedback vector but got replaced during
|
||||
// deopt) and bytecode array.
|
||||
__ Drop(2);
|
||||
// Drop the feedback vector, the bytecode offset (was the feedback vector but
|
||||
// got replaced during deopt) and bytecode array.
|
||||
__ Drop(3);
|
||||
|
||||
// Context, closure, argc.
|
||||
__ Pop(kContextRegister, kJavaScriptCallTargetRegister,
|
||||
|
|
@ -1127,35 +1126,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
BYTECODE_ARRAY_TYPE);
|
||||
__ b(ne, &compile_lazy);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = r2;
|
||||
__ ldr(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ ldr(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ ldr(r4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ ldrh(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
|
||||
__ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
__ b(ne, &push_stack_frame);
|
||||
Register feedback_vector = r2;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, r4, &push_stack_frame);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count.
|
||||
Register flags = r4;
|
||||
Label flags_need_processing;
|
||||
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
|
||||
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&flags_need_processing);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
|
||||
}
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r4);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ ldr(r9, FieldMemOperand(feedback_vector,
|
||||
|
|
@ -1167,13 +1151,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done below).
|
||||
__ bind(&push_stack_frame);
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
// - kInvocationCountOffset: only used for tiering heuristics and code
|
||||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ PushStandardFrame(closure);
|
||||
|
||||
|
|
@ -1183,7 +1168,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
|
||||
// Push bytecode array and Smi tagged bytecode array offset.
|
||||
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, r4);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, r4, feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
|
|
@ -3517,9 +3502,9 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
// from the API function here.
|
||||
MemOperand stack_space_operand =
|
||||
ExitFrameStackSlotOperand(FCA::kLengthOffset + kSlotsToDropOnStackSize);
|
||||
__ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ +
|
||||
exit_frame_params_count) *
|
||||
kPointerSize));
|
||||
__ mov(scratch,
|
||||
Operand((FCA::kArgsLengthWithReceiver + exit_frame_params_count) *
|
||||
kPointerSize));
|
||||
__ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
|
||||
__ str(scratch, stack_space_operand);
|
||||
|
||||
|
|
@ -3540,9 +3525,10 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
|
||||
const bool with_profiling =
|
||||
mode != CallApiCallbackMode::kOptimizedNoProfiling;
|
||||
Label* no_done = nullptr;
|
||||
CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
|
||||
thunk_ref, thunk_arg, kUseStackSpaceOperand,
|
||||
&stack_space_operand, return_value_operand);
|
||||
&stack_space_operand, return_value_operand, no_done);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
|
|
@ -3638,9 +3624,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||
MemOperand* const kUseStackSpaceConstant = nullptr;
|
||||
|
||||
const bool with_profiling = true;
|
||||
CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
|
||||
thunk_ref, thunk_arg, kStackUnwindSpace,
|
||||
kUseStackSpaceConstant, return_value_operand);
|
||||
Label* no_done = nullptr;
|
||||
CallApiFunctionAndReturn(
|
||||
masm, with_profiling, api_function_address, thunk_ref, thunk_arg,
|
||||
kStackUnwindSpace, kUseStackSpaceConstant, return_value_operand, no_done);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
||||
|
|
@ -3977,12 +3964,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, r3);
|
||||
}
|
||||
|
||||
// Load the feedback vector.
|
||||
Register feedback_vector = r2;
|
||||
__ ldr(feedback_vector,
|
||||
// Load the feedback cell and vector.
|
||||
Register feedback_cell = r2;
|
||||
Register feedback_vector = r9;
|
||||
__ ldr(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ ldr(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
|
|
@ -3994,9 +3982,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
__ ldr(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
// Replace bytecode offset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ str(feedback_cell,
|
||||
MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp));
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ str(feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
|
|
|
|||
124
deps/v8/src/builtins/arm64/builtins-arm64.cc
vendored
124
deps/v8/src/builtins/arm64/builtins-arm64.cc
vendored
|
|
@ -1101,19 +1101,20 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
|
|||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
// Need a few extra registers
|
||||
temps.Include(x14, x15);
|
||||
temps.Include(CPURegList(kXRegSizeInBits, {x14, x15, x22}));
|
||||
|
||||
auto descriptor =
|
||||
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
// Load the feedback cell and vector from the closure.
|
||||
Register feedback_cell = temps.AcquireX();
|
||||
Register feedback_vector = temps.AcquireX();
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
__ AssertFeedbackVector(feedback_vector, x4);
|
||||
|
||||
// Check the tiering state.
|
||||
|
|
@ -1165,16 +1166,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
// the frame, so load it into a register.
|
||||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
__ Push(argc, bytecode_array);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
__ Push(argc, bytecode_array, feedback_cell, feedback_vector);
|
||||
__ AssertFeedbackVector(feedback_vector, x4);
|
||||
// Our stack is currently aligned. We have have to push something along with
|
||||
// the feedback vector to keep it that way -- we may as well start
|
||||
// initialising the register frame.
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
__ Push(feedback_vector, kInterpreterAccumulatorRegister);
|
||||
}
|
||||
|
||||
Label call_stack_guard;
|
||||
|
|
@ -1198,11 +1191,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
}
|
||||
|
||||
// Do "fast" return to the caller pc in lr.
|
||||
if (v8_flags.debug_code) {
|
||||
// The accumulator should already be "undefined", we don't have to load it.
|
||||
__ CompareRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
__ Assert(eq, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&flags_need_processing);
|
||||
|
|
@ -1237,9 +1226,8 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
|
||||
// check. Undo all its frame creation and call into the interpreter instead.
|
||||
|
||||
// Drop the accumulator register (we already started building the register
|
||||
// frame) and bytecode offset (was the feedback vector but got replaced
|
||||
// during deopt).
|
||||
// Drop the feedback vector and the bytecode offset (was the feedback vector
|
||||
// but got replaced during deopt).
|
||||
__ Drop(2);
|
||||
|
||||
// Bytecode array, argc, Closure, Context.
|
||||
|
|
@ -1291,38 +1279,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
BYTECODE_ARRAY_TYPE);
|
||||
__ B(ne, &compile_lazy);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = x2;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedField(x7,
|
||||
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
||||
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
||||
__ B(ne, &push_stack_frame);
|
||||
Register feedback_vector = x2;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, x7, &push_stack_frame);
|
||||
|
||||
// Check the tiering state.
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count.
|
||||
Label flags_need_processing;
|
||||
Register flags = w7;
|
||||
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
|
||||
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&flags_need_processing);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.AcquireW());
|
||||
}
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, w7);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ Ldr(w10, FieldMemOperand(feedback_vector,
|
||||
|
|
@ -1334,13 +1304,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done below).
|
||||
__ Bind(&push_stack_frame);
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
// - kInvocationCountOffset: only used for tiering heuristics and code
|
||||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ Bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ Push<MacroAssembler::kSignLR>(lr, fp);
|
||||
__ mov(fp, sp);
|
||||
|
|
@ -1351,12 +1322,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
// Push actual argument count, bytecode array, Smi tagged bytecode array
|
||||
// offset and an undefined (to properly align the stack pointer).
|
||||
static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
|
||||
// offset and the feedback vector.
|
||||
__ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
__ Push(x6, kInterpreterAccumulatorRegister);
|
||||
__ Push(x6, feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
|
|
@ -1380,9 +1349,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// register in the register file.
|
||||
Label loop_header;
|
||||
__ Lsr(x11, x11, kSystemPointerSizeLog2);
|
||||
// Round down (since we already have an undefined in the stack) the number
|
||||
// of registers to a multiple of 2, to align the stack to 16 bytes.
|
||||
// Round up the number of registers to a multiple of 2, to align the stack
|
||||
// to 16 bytes.
|
||||
__ Add(x11, x11, 1);
|
||||
__ Bic(x11, x11, 1);
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
__ PushMultipleTimes(kInterpreterAccumulatorRegister, x11);
|
||||
__ Bind(&loop_header);
|
||||
}
|
||||
|
|
@ -2528,6 +2499,8 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
|
|||
__ Bind(&even);
|
||||
__ Cbz(slots_to_claim, &exit);
|
||||
__ Claim(slots_to_claim);
|
||||
// An alignment slot may have been allocated above. If the number of stack
|
||||
// parameters is 0, the we have to initialize the alignment slot.
|
||||
__ Cbz(slots_to_copy, &init);
|
||||
|
||||
// Move the arguments already in the stack including the receiver.
|
||||
|
|
@ -2548,21 +2521,11 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
|
|||
// call.
|
||||
{
|
||||
__ Bind(&init);
|
||||
// Unconditionally initialize the last parameter slot. If `len` is odd, then
|
||||
// it is an alignment slot that we have to initialize to avoid issues in the
|
||||
// GC. If `len` is even, then the write is unnecessary, but faster than a
|
||||
// check + jump.
|
||||
// This code here is only reached when the number of stack parameters is 0.
|
||||
// In that case we have to initialize the alignment slot if there is one.
|
||||
__ Tbz(len, 0, &exit);
|
||||
__ Str(xzr, MemOperand(sp, len, LSL, kSystemPointerSizeLog2));
|
||||
}
|
||||
// Fill a possible alignment slot with a meaningful value.
|
||||
{
|
||||
Register total_num_args = x10;
|
||||
__ Add(total_num_args, argc, len);
|
||||
// If the sum is even, then there are no alignment slots that need
|
||||
// initialization.
|
||||
__ Tbz(total_num_args, 0, &exit);
|
||||
__ Str(xzr, MemOperand(sp, total_num_args, LSL, kSystemPointerSizeLog2));
|
||||
}
|
||||
__ Bind(&exit);
|
||||
}
|
||||
|
||||
|
|
@ -4849,9 +4812,10 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
|
||||
const bool with_profiling =
|
||||
mode != CallApiCallbackMode::kOptimizedNoProfiling;
|
||||
Label* no_done = nullptr;
|
||||
CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
|
||||
thunk_ref, thunk_arg, kUseStackSpaceOperand,
|
||||
&stack_space_operand, return_value_operand);
|
||||
&stack_space_operand, return_value_operand, no_done);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
|
|
@ -4966,9 +4930,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||
MemOperand* const kUseStackSpaceConstant = nullptr;
|
||||
|
||||
const bool with_profiling = true;
|
||||
CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
|
||||
thunk_ref, thunk_arg, kStackUnwindSpace,
|
||||
kUseStackSpaceConstant, return_value_operand);
|
||||
Label* no_done = nullptr;
|
||||
CallApiFunctionAndReturn(
|
||||
masm, with_profiling, api_function_address, thunk_ref, thunk_arg,
|
||||
kStackUnwindSpace, kUseStackSpaceConstant, return_value_operand, no_done);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
||||
|
|
@ -5328,13 +5293,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, x3);
|
||||
}
|
||||
|
||||
// Load the feedback vector.
|
||||
Register feedback_vector = x2;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
// Load the feedback cell and vector.
|
||||
Register feedback_cell = x2;
|
||||
Register feedback_vector = x15;
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
|
|
@ -5345,9 +5311,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
// Save BytecodeOffset from the stack frame.
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
// Replace bytecode offset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ Str(feedback_cell,
|
||||
MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp));
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ Str(feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
|
|
|
|||
646
deps/v8/src/builtins/array-from-async.tq
vendored
Normal file
646
deps/v8/src/builtins/array-from-async.tq
vendored
Normal file
|
|
@ -0,0 +1,646 @@
|
|||
// Copyright 2023 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
namespace array {
|
||||
|
||||
extern enum ArrayFromAsyncLabels extends uint31
|
||||
constexpr 'ArrayBuiltins::ArrayFromAsyncLabels' {
|
||||
kGetIteratorStep,
|
||||
kCheckIteratorValueAndMapping,
|
||||
kIteratorMapping,
|
||||
kGetIteratorValueWithMapping,
|
||||
kAddIteratorValueToTheArray,
|
||||
kGetArrayLikeValue,
|
||||
kCheckArrayLikeValueAndMapping,
|
||||
kGetArrayLikeValueWithMapping,
|
||||
kAddArrayLikeValueToTheArray,
|
||||
kDoneAndResolvePromise,
|
||||
kCloseAsyncIterator,
|
||||
kRejectPromise
|
||||
}
|
||||
|
||||
struct ArrayFromAsyncResumeState {
|
||||
step: ArrayFromAsyncLabels;
|
||||
awaitedValue: JSAny;
|
||||
len: Smi;
|
||||
index: Smi;
|
||||
}
|
||||
|
||||
type ArrayFromAsyncResolveContext extends FunctionContext;
|
||||
extern enum ArrayFromAsyncResolveContextSlots extends intptr
|
||||
constexpr 'ArrayBuiltins::ArrayFromAsyncResolveContextSlots' {
|
||||
kArrayFromAsyncResolveResumeStateStepSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, Smi>,
|
||||
kArrayFromAsyncResolveResumeStateAwaitedValueSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, JSAny>,
|
||||
kArrayFromAsyncResolveResumeStateLenSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, Smi>,
|
||||
kArrayFromAsyncResolveResumeStateIndexSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, Smi>,
|
||||
kArrayFromAsyncResolvePromiseSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, JSPromise>,
|
||||
kArrayFromAsyncResolvePromiseFunctionSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, JSReceiver>,
|
||||
kArrayFromAsyncResolveOnFulfilledFunctionSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, JSFunction|Undefined>,
|
||||
kArrayFromAsyncResolveOnRejectedFunctionSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, JSFunction|Undefined>,
|
||||
kArrayFromAsyncResolveResultArraySlot:
|
||||
Slot<ArrayFromAsyncResolveContext, JSReceiver>,
|
||||
kArrayFromAsyncResolveIteratorSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, JSReceiver>,
|
||||
kArrayFromAsyncResolveNextMethodSlot:
|
||||
Slot<ArrayFromAsyncResolveContext, JSAny>,
|
||||
kArrayFromAsyncResolveErrorSlot: Slot<ArrayFromAsyncResolveContext, JSAny>,
|
||||
kArrayFromAsyncResolveMapfnSlot: Slot<ArrayFromAsyncResolveContext, JSAny>,
|
||||
kArrayFromAsyncResolveThisArgSlot: Slot<ArrayFromAsyncResolveContext, JSAny>,
|
||||
kArrayFromAsyncResolveLength
|
||||
}
|
||||
|
||||
macro CreateArrayFromAsyncResolveContext(
|
||||
implicit context: Context)(resumeState: ArrayFromAsyncResumeState,
|
||||
promise: JSPromise, promiseFun: JSReceiver, map: Map, iterator: JSReceiver,
|
||||
next: JSAny, arr: JSReceiver, error: JSAny, mapfn: JSAny, thisArg: JSAny,
|
||||
nativeContext: NativeContext): ArrayFromAsyncResolveContext {
|
||||
const resolveContext = %RawDownCast<ArrayFromAsyncResolveContext>(
|
||||
AllocateSyntheticFunctionContext(
|
||||
nativeContext,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveLength));
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateStepSlot,
|
||||
SmiTag<ArrayFromAsyncLabels>(resumeState.step));
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateAwaitedValueSlot,
|
||||
resumeState.awaitedValue);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateLenSlot,
|
||||
resumeState.len);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateIndexSlot,
|
||||
resumeState.index);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolvePromiseSlot,
|
||||
promise);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolvePromiseFunctionSlot,
|
||||
promiseFun);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveOnFulfilledFunctionSlot,
|
||||
promise::AllocateFunctionWithMapAndContext(
|
||||
map, ArrayFromAsyncOnFulfilledSharedFunConstant(), resolveContext));
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveOnRejectedFunctionSlot,
|
||||
promise::AllocateFunctionWithMapAndContext(
|
||||
map, ArrayFromAsyncOnRejectedSharedFunConstant(), resolveContext));
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveResultArraySlot,
|
||||
arr);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveIteratorSlot,
|
||||
iterator);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveNextMethodSlot,
|
||||
next);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveErrorSlot,
|
||||
error);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveMapfnSlot,
|
||||
mapfn);
|
||||
InitContextSlot(
|
||||
resolveContext,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveThisArgSlot,
|
||||
thisArg);
|
||||
return resolveContext;
|
||||
}
|
||||
|
||||
macro GetIteratorRecordFromArrayFromAsyncResolveContext(
|
||||
context: ArrayFromAsyncResolveContext): iterator::IteratorRecord {
|
||||
const iterator = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveIteratorSlot);
|
||||
|
||||
const nextMethod = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveNextMethodSlot);
|
||||
|
||||
return iterator::IteratorRecord{object: iterator, next: nextMethod};
|
||||
}
|
||||
|
||||
transitioning macro CreateArrayFromIterableAsynchronously(
|
||||
context: ArrayFromAsyncResolveContext): JSAny {
|
||||
try {
|
||||
const fastIteratorResultMap = GetIteratorResultMap();
|
||||
|
||||
const mapfn = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveMapfnSlot);
|
||||
|
||||
const thisArg = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveThisArgSlot);
|
||||
|
||||
const arr = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResultArraySlot);
|
||||
|
||||
let resumeState = ArrayFromAsyncResumeState{
|
||||
step: SmiUntag<ArrayFromAsyncLabels>(
|
||||
%RawDownCast<SmiTagged<ArrayFromAsyncLabels>>(*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateStepSlot))),
|
||||
awaitedValue: *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateAwaitedValueSlot),
|
||||
len: *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateLenSlot),
|
||||
index: *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateIndexSlot)
|
||||
};
|
||||
|
||||
let mappedValue: JSAny = Undefined;
|
||||
let nextValue: JSAny = Undefined;
|
||||
|
||||
// TODO(v8:14290): Replace `if/else` with `switch/case` when the support
|
||||
// for `switch` is added.
|
||||
|
||||
while (true) {
|
||||
if (resumeState.step == ArrayFromAsyncLabels::kGetIteratorStep) {
|
||||
const iteratorRecord =
|
||||
GetIteratorRecordFromArrayFromAsyncResolveContext(context);
|
||||
let next: JSAny;
|
||||
// https://github.com/tc39/proposal-array-from-async/issues/33#issuecomment-1279296963
|
||||
// 3. Let nextResult be ? Call(iteratorRecord.[[NextMethod]],
|
||||
// iteratorRecord.[[Iterator]]).
|
||||
// 4. Set nextResult to ? Await(nextResult).
|
||||
next = Call(context, iteratorRecord.next, iteratorRecord.object);
|
||||
|
||||
return ArrayFromAsyncAwaitPoint(
|
||||
ArrayFromAsyncLabels::kCheckIteratorValueAndMapping, next);
|
||||
} else if (
|
||||
resumeState.step ==
|
||||
ArrayFromAsyncLabels::kCheckIteratorValueAndMapping) {
|
||||
// 5. If nextResult is not an Object, throw a TypeError exception.
|
||||
const nextJSReceiver = Cast<JSReceiver>(resumeState.awaitedValue)
|
||||
otherwise ThrowTypeError(
|
||||
MessageTemplate::kIteratorResultNotAnObject, 'Array.fromAsync');
|
||||
|
||||
try {
|
||||
// 6. Let done be ? IteratorComplete(nextResult).
|
||||
iterator::IteratorComplete(nextJSReceiver, fastIteratorResultMap)
|
||||
otherwise Done;
|
||||
|
||||
// 8. Let nextValue be ? IteratorValue(nextResult).
|
||||
nextValue =
|
||||
iterator::IteratorValue(nextJSReceiver, fastIteratorResultMap);
|
||||
|
||||
// When mapfn is not undefined, it is guaranteed to be callable as
|
||||
// checked upon entry.
|
||||
const mapping: bool = (mapfn != Undefined);
|
||||
|
||||
// 9. If mapping is true, then
|
||||
if (mapping) {
|
||||
resumeState.step = ArrayFromAsyncLabels::kIteratorMapping;
|
||||
} else {
|
||||
// 10. Else, let mappedValue be nextValue.
|
||||
mappedValue = nextValue;
|
||||
resumeState.step =
|
||||
ArrayFromAsyncLabels::kAddIteratorValueToTheArray;
|
||||
}
|
||||
} label Done {
|
||||
// 7. If done is true,
|
||||
// a. Perform ? Set(A, "length", 𝔽(k), true).
|
||||
array::SetPropertyLength(arr, resumeState.index);
|
||||
// b. Return Completion Record { [[Type]]: return, [[Value]]: A,
|
||||
// [[Target]]: empty }.
|
||||
resumeState.step = ArrayFromAsyncLabels::kDoneAndResolvePromise;
|
||||
}
|
||||
} else if (resumeState.step == ArrayFromAsyncLabels::kIteratorMapping) {
|
||||
// a. Let mappedValue be Call(mapfn, thisArg, « nextValue, 𝔽(k)
|
||||
// »).
|
||||
// b. IfAbruptCloseAsyncIterator(mappedValue,
|
||||
// iteratorRecord).
|
||||
const mapResult = Call(
|
||||
context, UnsafeCast<Callable>(mapfn), thisArg, nextValue,
|
||||
resumeState.index);
|
||||
|
||||
// c. Set mappedValue to Await(mappedValue).
|
||||
// d. IfAbruptCloseAsyncIterator(mappedValue, iteratorRecord).
|
||||
return ArrayFromAsyncAwaitPoint(
|
||||
ArrayFromAsyncLabels::kGetIteratorValueWithMapping, mapResult);
|
||||
} else if (
|
||||
resumeState.step ==
|
||||
ArrayFromAsyncLabels::kGetIteratorValueWithMapping) {
|
||||
mappedValue = resumeState.awaitedValue;
|
||||
resumeState.step = ArrayFromAsyncLabels::kAddIteratorValueToTheArray;
|
||||
} else if (
|
||||
resumeState.step ==
|
||||
ArrayFromAsyncLabels::kAddIteratorValueToTheArray) {
|
||||
// 11. Let defineStatus be CreateDataPropertyOrThrow(A, Pk,
|
||||
// mappedValue).
|
||||
// 12. If defineStatus is an abrupt completion, return ?
|
||||
// AsyncIteratorClose(iteratorRecord, defineStatus).
|
||||
FastCreateDataProperty(arr, resumeState.index, mappedValue);
|
||||
|
||||
// 13. Set k to k + 1.
|
||||
resumeState.index++;
|
||||
|
||||
*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateIndexSlot) = resumeState.index;
|
||||
|
||||
resumeState.step = ArrayFromAsyncLabels::kGetIteratorStep;
|
||||
} else if (resumeState.step == ArrayFromAsyncLabels::kGetArrayLikeValue) {
|
||||
// vii. Repeat, while k < len,
|
||||
// 1. Let Pk be ! ToString(𝔽(k)).
|
||||
// 2. Let kValue be ? Get(arrayLike, Pk).
|
||||
|
||||
resumeState.step = ArrayFromAsyncLabels::kCheckArrayLikeValueAndMapping;
|
||||
|
||||
*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateStepSlot) =
|
||||
SmiTag<ArrayFromAsyncLabels>(resumeState.step);
|
||||
|
||||
resumeState.index++;
|
||||
*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateIndexSlot) = resumeState.index;
|
||||
|
||||
// item.then((result) => asyncFunction(result));
|
||||
return Undefined;
|
||||
} else if (
|
||||
resumeState.step ==
|
||||
ArrayFromAsyncLabels::kCheckArrayLikeValueAndMapping) {
|
||||
if (resumeState.index == resumeState.len) {
|
||||
resumeState.step = ArrayFromAsyncLabels::kDoneAndResolvePromise;
|
||||
}
|
||||
|
||||
let mapping: bool;
|
||||
// a. If mapfn is undefined, let mapping be false.
|
||||
if (mapfn == Undefined) {
|
||||
mapping = false;
|
||||
} else {
|
||||
// b. Else,
|
||||
// i. If IsCallable(mapfn) is false, throw a TypeError exception.
|
||||
if (!Is<Callable>(mapfn)) deferred {
|
||||
ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfn);
|
||||
}
|
||||
// ii. Let mapping be true.
|
||||
mapping = true;
|
||||
}
|
||||
|
||||
// 4. If mapping is true, then
|
||||
if (mapping) {
|
||||
resumeState.step =
|
||||
ArrayFromAsyncLabels::kGetArrayLikeValueWithMapping;
|
||||
} else {
|
||||
resumeState.step = ArrayFromAsyncLabels::kAddArrayLikeValueToTheArray;
|
||||
}
|
||||
} else if (
|
||||
resumeState.step ==
|
||||
ArrayFromAsyncLabels::kGetArrayLikeValueWithMapping) {
|
||||
// a. Let mappedValue be ? Call(mapfn, thisArg, « kValue, 𝔽(k)
|
||||
// »). b. Set mappedValue to ? Await(mappedValue).
|
||||
const mapResult = Call(
|
||||
context, UnsafeCast<Callable>(mapfn), thisArg,
|
||||
resumeState.awaitedValue, resumeState.index);
|
||||
return ArrayFromAsyncAwaitPoint(
|
||||
ArrayFromAsyncLabels::kAddArrayLikeValueToTheArray, mapResult);
|
||||
} else if (
|
||||
resumeState.step ==
|
||||
ArrayFromAsyncLabels::kAddArrayLikeValueToTheArray) {
|
||||
// 5. Else, let mappedValue be kValue.
|
||||
// 6. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
|
||||
mappedValue = resumeState.awaitedValue;
|
||||
FastCreateDataProperty(arr, resumeState.index, mappedValue);
|
||||
resumeState.step = ArrayFromAsyncLabels::kGetArrayLikeValue;
|
||||
} else if (
|
||||
resumeState.step == ArrayFromAsyncLabels::kDoneAndResolvePromise) {
|
||||
const promise = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolvePromiseSlot);
|
||||
|
||||
promise::ResolvePromise(promise, arr);
|
||||
return Undefined;
|
||||
} else if (
|
||||
resumeState.step == ArrayFromAsyncLabels::kCloseAsyncIterator) {
|
||||
resumeState.step = ArrayFromAsyncLabels::kRejectPromise;
|
||||
|
||||
const iteratorRecord =
|
||||
GetIteratorRecordFromArrayFromAsyncResolveContext(context);
|
||||
try {
|
||||
ArrayFromAsyncAsyncIteratorCloseOnException(iteratorRecord)
|
||||
otherwise RejectPromise;
|
||||
return Undefined;
|
||||
} label RejectPromise {
|
||||
// Do nothing so the codeflow continues to the kRejectPromise label.
|
||||
}
|
||||
} else if (resumeState.step == ArrayFromAsyncLabels::kRejectPromise) {
|
||||
return RejectArrayFromAsyncPromise();
|
||||
}
|
||||
}
|
||||
} catch (e, _message) {
|
||||
*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveErrorSlot) = e;
|
||||
|
||||
const iteratorRecord =
|
||||
GetIteratorRecordFromArrayFromAsyncResolveContext(context);
|
||||
try {
|
||||
ArrayFromAsyncAsyncIteratorCloseOnException(iteratorRecord)
|
||||
otherwise RejectPromise;
|
||||
} label RejectPromise {
|
||||
return RejectArrayFromAsyncPromise();
|
||||
}
|
||||
}
|
||||
return Undefined;
|
||||
}
|
||||
|
||||
transitioning macro ArrayFromAsyncAwaitPoint(
|
||||
implicit context: Context)(step: ArrayFromAsyncLabels,
|
||||
value: JSAny): JSAny {
|
||||
const context = %RawDownCast<ArrayFromAsyncResolveContext>(context);
|
||||
*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateStepSlot) =
|
||||
SmiTag<ArrayFromAsyncLabels>(step);
|
||||
|
||||
const promiseFun = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolvePromiseFunctionSlot);
|
||||
const resolve = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveOnFulfilledFunctionSlot);
|
||||
const reject = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveOnRejectedFunctionSlot);
|
||||
|
||||
// TODO(v8:13321): Add a fast path for values that are already
|
||||
// built-in promises.
|
||||
const resultPromise = promise::PromiseResolve(promiseFun, value);
|
||||
|
||||
promise::PerformPromiseThenImpl(
|
||||
UnsafeCast<JSPromise>(resultPromise), resolve, reject, Undefined);
|
||||
|
||||
return Undefined;
|
||||
}
|
||||
|
||||
// `ArrayFromAsyncFulfilled` is the callback function for the fulfilled case of
|
||||
// the promise in `then` handler.
|
||||
transitioning javascript builtin ArrayFromAsyncOnFulfilled(
|
||||
js-implicit context: Context, receiver: JSAny, target: JSFunction)(
|
||||
result: JSAny): JSAny {
|
||||
const context = %RawDownCast<ArrayFromAsyncResolveContext>(context);
|
||||
*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateAwaitedValueSlot) = result;
|
||||
|
||||
return CreateArrayFromIterableAsynchronously(context);
|
||||
}
|
||||
|
||||
// `ArrayFromAsyncRejected` is the callback function for the rejected case of
|
||||
// the promise in `then` handler.
|
||||
transitioning javascript builtin ArrayFromAsyncOnRejected(
|
||||
js-implicit context: Context, receiver: JSAny, target: JSFunction)(
|
||||
result: JSAny): JSAny {
|
||||
const context = %RawDownCast<ArrayFromAsyncResolveContext>(context);
|
||||
|
||||
*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::
|
||||
kArrayFromAsyncResolveResumeStateStepSlot) =
|
||||
SmiTag<ArrayFromAsyncLabels>(ArrayFromAsyncLabels::kCloseAsyncIterator);
|
||||
*ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveErrorSlot) =
|
||||
result;
|
||||
|
||||
return CreateArrayFromIterableAsynchronously(context);
|
||||
}
|
||||
|
||||
// This macro reject the promise if any exception occurs in the execution of
|
||||
// the asynchronous code.
|
||||
transitioning macro RejectArrayFromAsyncPromise(
|
||||
implicit context: Context)(): JSAny {
|
||||
const context = %RawDownCast<ArrayFromAsyncResolveContext>(context);
|
||||
const error = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolveErrorSlot);
|
||||
const promise = *ContextSlot(
|
||||
context,
|
||||
ArrayFromAsyncResolveContextSlots::kArrayFromAsyncResolvePromiseSlot);
|
||||
|
||||
return promise::RejectPromise(promise, error, False);
|
||||
}
|
||||
|
||||
// This is the specialized implementation of `IfAbruptCloseAsyncIterator` for
|
||||
// Array.fromAsync
|
||||
// https://tc39.es/proposal-array-from-async/#sec-ifabruptcloseasynciterator
|
||||
transitioning macro ArrayFromAsyncAsyncIteratorCloseOnException(
|
||||
implicit context: Context)(
|
||||
iterator: iterator::IteratorRecord): void labels RejectPromise {
|
||||
try {
|
||||
const context = %RawDownCast<ArrayFromAsyncResolveContext>(context);
|
||||
// 3. Let innerResult be GetMethod(iterator, "return").
|
||||
const method = GetProperty(iterator.object, kReturnString);
|
||||
|
||||
// 4. If innerResult.[[Type]] is normal, then
|
||||
// a. Let return be innerResult.[[Value]].
|
||||
// b. If return is undefined, return Completion(completion).
|
||||
if (method == Undefined || method == Null) {
|
||||
goto RejectPromise;
|
||||
}
|
||||
|
||||
// c. Set innerResult to Call(return, iterator).
|
||||
// If an exception occurs, the original exception remains bound
|
||||
const innerResult = Call(context, method, iterator.object);
|
||||
|
||||
// d. If innerResult.[[Type]] is normal, set innerResult to
|
||||
// Completion(Await(innerResult.[[Value]])).
|
||||
const step = ArrayFromAsyncLabels::kRejectPromise;
|
||||
|
||||
ArrayFromAsyncAwaitPoint(step, innerResult);
|
||||
} catch (_e, _message) {
|
||||
// Swallow the exception.
|
||||
}
|
||||
|
||||
// (5. If completion.[[Type]] is throw) return Completion(completion).
|
||||
}
|
||||
|
||||
// https://tc39.es/proposal-array-from-async/#sec-array.fromAsync
|
||||
// Array.fromAsync ( asyncItems [ , mapfn [ , thisArg ] ] )
|
||||
// Since we do not have support for `await` in torque, we handled
|
||||
// asynchronous execution flow manually in torque. More information
|
||||
// is available in go/array-from-async-implementation.
|
||||
transitioning javascript builtin ArrayFromAsync(
|
||||
js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
|
||||
// 1. Let C be the this value.
|
||||
const c = HasBuiltinSubclassingFlag() ? receiver : GetArrayFunction();
|
||||
|
||||
const items = arguments[0];
|
||||
const mapfn = arguments[1];
|
||||
const thisArg = arguments[2];
|
||||
|
||||
// 2. Let promiseCapability be ! NewPromiseCapability(%Promise%).
|
||||
const promise = promise::NewJSPromise();
|
||||
|
||||
// 3. Let fromAsyncClosure be a new Abstract Closure with no parameters that
|
||||
// captures C, mapfn, and thisArg and performs the following steps when
|
||||
// called:
|
||||
|
||||
let usingAsyncIterator: JSAny = Undefined;
|
||||
let usingSyncIterator: JSAny = Undefined;
|
||||
let iteratorRecordObject: JSReceiver;
|
||||
let iteratorRecordNext: JSAny;
|
||||
let arr: JSReceiver;
|
||||
let firstStep: ArrayFromAsyncLabels;
|
||||
|
||||
try {
|
||||
if (mapfn != Undefined) {
|
||||
// i. If IsCallable(mapfn) is false, throw a TypeError exception.
|
||||
if (!Is<Callable>(mapfn)) deferred {
|
||||
ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfn);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// c. Let usingAsyncIterator be ?
|
||||
// GetMethod(asyncItems, @@asyncIterator).
|
||||
usingAsyncIterator = GetMethod(items, AsyncIteratorSymbolConstant())
|
||||
otherwise AsyncIteratorIsUndefined, AsyncIteratorNotCallable;
|
||||
} label AsyncIteratorIsUndefined {
|
||||
// d. If usingAsyncIterator is undefined, then
|
||||
// i. Let usingSyncIterator be ?
|
||||
// GetMethod(asyncItems, @@iterator).
|
||||
|
||||
usingSyncIterator = GetMethod(items, IteratorSymbolConstant())
|
||||
otherwise SyncIteratorIsUndefined, SyncIteratorNotCallable;
|
||||
} label SyncIteratorIsUndefined deferred {
|
||||
// i. Else, (iteratorRecord is undefined)
|
||||
// i. NOTE: asyncItems is neither an AsyncIterable nor an
|
||||
// Iterable so assume it is an array-like object.
|
||||
// ii. Let arrayLike be ! ToObject(asyncItems).
|
||||
// iii. Let len be ? LengthOfArrayLike(arrayLike).
|
||||
// iv. If IsConstructor(C) is
|
||||
// true, then
|
||||
// 1. Let A be ? Construct(C, « 𝔽(len) »).
|
||||
// v. Else,
|
||||
// 1. Let A be ? ArrayCreate(len).
|
||||
// vi. Let k be 0.
|
||||
|
||||
// TODO(v8:13321): Array-like path will be implemented later.
|
||||
// That means code inside the following labels are incomplete:
|
||||
// kGetArrayLikeValue, kCheckArrayLikeValueAndMapping,
|
||||
// kGetArrayLikeValueWithMapping, kAddArrayLikeValueToTheArray.
|
||||
|
||||
// firstStep = ArrayFromAsyncLabels::kGetArrayLikeValue;
|
||||
} label SyncIteratorNotCallable(_value: JSAny)
|
||||
deferred {
|
||||
ThrowTypeError(
|
||||
MessageTemplate::kFirstArgumentIteratorSymbolNonCallable,
|
||||
'Array.fromAsync');
|
||||
} label AsyncIteratorNotCallable(_value: JSAny)
|
||||
deferred {
|
||||
ThrowTypeError(
|
||||
MessageTemplate::kFirstArgumentAsyncIteratorSymbolNonCallable,
|
||||
'Array.fromAsync');
|
||||
}
|
||||
|
||||
// e. Let iteratorRecord be undefined.
|
||||
// f. If usingAsyncIterator is not undefined, then
|
||||
// i. Set iteratorRecord to ? GetIterator(asyncItems, async,
|
||||
// usingAsyncIterator).
|
||||
// g. Else if usingSyncIterator is not undefined, then
|
||||
// i. Set iteratorRecord to ?
|
||||
// CreateAsyncFromSyncIterator(GetIterator(asyncItems, sync,
|
||||
// usingSyncIterator)).
|
||||
|
||||
const iteratorRecord = (usingAsyncIterator != Undefined) ?
|
||||
iterator::GetIterator(items, usingAsyncIterator) :
|
||||
iterator::GetIteratorRecordAfterCreateAsyncFromSyncIterator(
|
||||
iterator::GetIterator(items, usingSyncIterator));
|
||||
|
||||
iteratorRecordObject = iteratorRecord.object;
|
||||
iteratorRecordNext = iteratorRecord.next;
|
||||
|
||||
// h. If iteratorRecord is not undefined, then
|
||||
typeswitch (c) {
|
||||
case (c: Constructor): {
|
||||
// i. If IsConstructor(C) is true, then
|
||||
// 1. Let A be ? Construct(C).
|
||||
arr = Construct(c);
|
||||
}
|
||||
case (JSAny): {
|
||||
// ii. Else,
|
||||
// 1. Let A be ! ArrayCreate(0).
|
||||
arr = ArrayCreate(0);
|
||||
}
|
||||
}
|
||||
|
||||
firstStep = ArrayFromAsyncLabels::kGetIteratorStep;
|
||||
} catch (e, _message) {
|
||||
promise::RejectPromise(promise, e, False);
|
||||
return promise;
|
||||
}
|
||||
|
||||
let resumeState = ArrayFromAsyncResumeState{
|
||||
step: firstStep,
|
||||
awaitedValue: Undefined,
|
||||
len: 0,
|
||||
index: 0
|
||||
};
|
||||
|
||||
const promiseFun = *NativeContextSlot(
|
||||
context, ContextSlot::PROMISE_FUNCTION_INDEX);
|
||||
const map = *NativeContextSlot(
|
||||
context, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
|
||||
|
||||
const resolveContext = CreateArrayFromAsyncResolveContext(
|
||||
resumeState, promise, promiseFun, map, iteratorRecordObject,
|
||||
iteratorRecordNext, arr, Undefined, mapfn, thisArg, context);
|
||||
|
||||
CreateArrayFromIterableAsynchronously(resolveContext);
|
||||
return promise;
|
||||
}
|
||||
|
||||
extern macro ArrayFromAsyncOnFulfilledSharedFunConstant(): SharedFunctionInfo;
|
||||
extern macro ArrayFromAsyncOnRejectedSharedFunConstant(): SharedFunctionInfo;
|
||||
}
|
||||
5
deps/v8/src/builtins/base.tq
vendored
5
deps/v8/src/builtins/base.tq
vendored
|
|
@ -470,6 +470,8 @@ extern enum MessageTemplate {
|
|||
kArgumentIsNonObject,
|
||||
kKeysMethodInvalid,
|
||||
kGeneratorRunning,
|
||||
kFirstArgumentAsyncIteratorSymbolNonCallable,
|
||||
kIteratorResultNotAnObject,
|
||||
...
|
||||
}
|
||||
|
||||
|
|
@ -485,8 +487,6 @@ extern enum PropertyAttributes extends int31 {
|
|||
|
||||
const kArrayBufferMaxByteLength:
|
||||
constexpr uintptr generates 'JSArrayBuffer::kMaxByteLength';
|
||||
const kTypedArrayMaxLength:
|
||||
constexpr uintptr generates 'JSTypedArray::kMaxLength';
|
||||
const kMaxTypedArrayInHeap:
|
||||
constexpr int31 generates 'JSTypedArray::kMaxSizeInHeap';
|
||||
// CSA does not support 64-bit types on 32-bit platforms so as a workaround the
|
||||
|
|
@ -551,6 +551,7 @@ extern macro Int32FalseConstant(): bool;
|
|||
extern macro Int32TrueConstant(): bool;
|
||||
extern macro IteratorSymbolConstant(): PublicSymbol;
|
||||
extern macro KeysStringConstant(): String;
|
||||
extern macro AsyncIteratorSymbolConstant(): PublicSymbol;
|
||||
extern macro LengthStringConstant(): String;
|
||||
extern macro MatchSymbolConstant(): Symbol;
|
||||
extern macro MessageStringConstant(): String;
|
||||
|
|
|
|||
2
deps/v8/src/builtins/builtins-array-gen.cc
vendored
2
deps/v8/src/builtins/builtins-array-gen.cc
vendored
|
|
@ -1675,7 +1675,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
|
|||
// 2. Perform ? CreateDataPropertyOrThrow(target,
|
||||
// ! ToString(targetIndex),
|
||||
// element).
|
||||
CallRuntime(Runtime::kCreateDataProperty, context, target,
|
||||
CallBuiltin(Builtin::kFastCreateDataProperty, context, target,
|
||||
target_index, element);
|
||||
|
||||
// 3. Increase targetIndex by 1.
|
||||
|
|
|
|||
36
deps/v8/src/builtins/builtins-array-gen.h
vendored
36
deps/v8/src/builtins/builtins-array-gen.h
vendored
|
|
@ -124,6 +124,42 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
|
|||
ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
|
||||
};
|
||||
|
||||
class ArrayBuiltins {
|
||||
public:
|
||||
enum ArrayFromAsyncResolveContextSlots {
|
||||
kArrayFromAsyncResolveResumeStateStepSlot = Context::MIN_CONTEXT_SLOTS,
|
||||
kArrayFromAsyncResolveResumeStateAwaitedValueSlot,
|
||||
kArrayFromAsyncResolveResumeStateLenSlot,
|
||||
kArrayFromAsyncResolveResumeStateIndexSlot,
|
||||
kArrayFromAsyncResolvePromiseSlot,
|
||||
kArrayFromAsyncResolvePromiseFunctionSlot,
|
||||
kArrayFromAsyncResolveOnFulfilledFunctionSlot,
|
||||
kArrayFromAsyncResolveOnRejectedFunctionSlot,
|
||||
kArrayFromAsyncResolveResultArraySlot,
|
||||
kArrayFromAsyncResolveIteratorSlot,
|
||||
kArrayFromAsyncResolveNextMethodSlot,
|
||||
kArrayFromAsyncResolveErrorSlot,
|
||||
kArrayFromAsyncResolveMapfnSlot,
|
||||
kArrayFromAsyncResolveThisArgSlot,
|
||||
kArrayFromAsyncResolveLength
|
||||
};
|
||||
|
||||
enum ArrayFromAsyncLabels {
|
||||
kGetIteratorStep,
|
||||
kCheckIteratorValueAndMapping,
|
||||
kIteratorMapping,
|
||||
kGetIteratorValueWithMapping,
|
||||
kAddIteratorValueToTheArray,
|
||||
kGetArrayLikeValue,
|
||||
kCheckArrayLikeValueAndMapping,
|
||||
kGetArrayLikeValueWithMapping,
|
||||
kAddArrayLikeValueToTheArray,
|
||||
kDoneAndResolvePromise,
|
||||
kCloseAsyncIterator,
|
||||
kRejectPromise
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
|
|
|||
7
deps/v8/src/builtins/builtins-array.cc
vendored
7
deps/v8/src/builtins/builtins-array.cc
vendored
|
|
@ -1589,12 +1589,5 @@ BUILTIN(ArrayConcat) {
|
|||
return Slow_ArrayConcat(&args, species, isolate);
|
||||
}
|
||||
|
||||
BUILTIN(ArrayFromAsync) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK(v8_flags.harmony_array_from_async);
|
||||
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
|
|
|||
11
deps/v8/src/builtins/builtins-arraybuffer.cc
vendored
11
deps/v8/src/builtins/builtins-arraybuffer.cc
vendored
|
|
@ -73,15 +73,10 @@ Tagged<Object> ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
|
|||
BackingStore::Allocate(isolate, byte_length, shared, initialized);
|
||||
max_byte_length = byte_length;
|
||||
} else {
|
||||
// We need to check the max length against both
|
||||
// JSArrayBuffer::kMaxByteLength and JSTypedArray::kMaxLength, since it's
|
||||
// possible to create length-tracking TypedArrays and resize the underlying
|
||||
// buffer. If the max byte length was larger than JSTypedArray::kMaxLength,
|
||||
// that'd result in having a TypedArray with length larger than
|
||||
// JSTypedArray::kMaxLength.
|
||||
static_assert(JSArrayBuffer::kMaxByteLength ==
|
||||
JSTypedArray::kMaxByteLength);
|
||||
if (!TryNumberToSize(*max_length, &max_byte_length) ||
|
||||
max_byte_length > JSArrayBuffer::kMaxByteLength ||
|
||||
max_byte_length > JSTypedArray::kMaxLength) {
|
||||
max_byte_length > JSArrayBuffer::kMaxByteLength) {
|
||||
THROW_NEW_ERROR_RETURN_FAILURE(
|
||||
isolate,
|
||||
NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
|
||||
|
|
|
|||
4
deps/v8/src/builtins/builtins-async-gen.cc
vendored
4
deps/v8/src/builtins/builtins-async-gen.cc
vendored
|
|
@ -181,8 +181,8 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
|
|||
TNode<Smi> builtin_id = LoadObjectField<Smi>(
|
||||
shared_info, SharedFunctionInfo::kFunctionDataOffset);
|
||||
TNode<Code> code = LoadBuiltin(builtin_id);
|
||||
StoreMaybeIndirectPointerFieldNoWriteBarrier(function,
|
||||
JSFunction::kCodeOffset, code);
|
||||
StoreMaybeIndirectPointerFieldNoWriteBarrier(
|
||||
function, JSFunction::kCodeOffset, kCodeIndirectPointerTag, code);
|
||||
}
|
||||
|
||||
TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure(
|
||||
|
|
|
|||
60
deps/v8/src/builtins/builtins-collections-gen.cc
vendored
60
deps/v8/src/builtins/builtins-collections-gen.cc
vendored
|
|
@ -28,7 +28,7 @@ void BaseCollectionsAssembler::AddConstructorEntry(
|
|||
Label* if_may_have_side_effects, Label* if_exception,
|
||||
TVariable<Object>* var_exception) {
|
||||
compiler::ScopedExceptionHandler handler(this, if_exception, var_exception);
|
||||
CSA_DCHECK(this, Word32BinaryNot(IsTheHole(key_value)));
|
||||
CSA_DCHECK(this, Word32BinaryNot(IsHashTableHole(key_value)));
|
||||
if (variant == kMap || variant == kWeakMap) {
|
||||
TorqueStructKeyValuePair pair =
|
||||
if_may_have_side_effects != nullptr
|
||||
|
|
@ -874,7 +874,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
|
|||
TNode<IntPtrT> entry_start_position;
|
||||
TNode<IntPtrT> cur_index;
|
||||
std::tie(entry_key, entry_start_position, cur_index) =
|
||||
NextSkipHoles<OrderedHashMap>(table, var_index.value(), &done);
|
||||
NextSkipHashTableHoles<OrderedHashMap>(table, var_index.value(), &done);
|
||||
|
||||
// Decide to write key or value.
|
||||
Branch(
|
||||
|
|
@ -981,7 +981,8 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
|
|||
TNode<IntPtrT> entry_start_position;
|
||||
TNode<IntPtrT> cur_index;
|
||||
std::tie(entry_key, entry_start_position, cur_index) =
|
||||
NextSkipHoles<OrderedHashSet>(table, var_index.value(), &finalize);
|
||||
NextSkipHashTableHoles<OrderedHashSet>(table, var_index.value(),
|
||||
&finalize);
|
||||
|
||||
Store(elements, var_offset.value(), entry_key);
|
||||
|
||||
|
|
@ -1284,9 +1285,9 @@ CollectionsBuiltinsAssembler::TransitionOrderedHashSetNoUpdate(
|
|||
|
||||
template <typename TableType>
|
||||
std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>>
|
||||
CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
|
||||
TNode<IntPtrT> index,
|
||||
Label* if_end) {
|
||||
CollectionsBuiltinsAssembler::NextSkipHashTableHoles(TNode<TableType> table,
|
||||
TNode<IntPtrT> index,
|
||||
Label* if_end) {
|
||||
// Compute the used capacity for the {table}.
|
||||
TNode<Int32T> number_of_buckets = LoadAndUntagToWord32ObjectField(
|
||||
table, TableType::NumberOfBucketsOffset());
|
||||
|
|
@ -1297,16 +1298,15 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
|
|||
TNode<Int32T> used_capacity =
|
||||
Int32Add(number_of_elements, number_of_deleted_elements);
|
||||
|
||||
return NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end);
|
||||
return NextSkipHashTableHoles(table, number_of_buckets, used_capacity, index,
|
||||
if_end);
|
||||
}
|
||||
|
||||
template <typename TableType>
|
||||
std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>>
|
||||
CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
|
||||
TNode<Int32T> number_of_buckets,
|
||||
TNode<Int32T> used_capacity,
|
||||
TNode<IntPtrT> index,
|
||||
Label* if_end) {
|
||||
CollectionsBuiltinsAssembler::NextSkipHashTableHoles(
|
||||
TNode<TableType> table, TNode<Int32T> number_of_buckets,
|
||||
TNode<Int32T> used_capacity, TNode<IntPtrT> index, Label* if_end) {
|
||||
CSA_DCHECK(this, Word32Equal(number_of_buckets,
|
||||
LoadAndUntagToWord32ObjectField(
|
||||
table, TableType::NumberOfBucketsOffset())));
|
||||
|
|
@ -1333,7 +1333,7 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
|
|||
entry_key = UnsafeLoadKeyFromOrderedHashTableEntry(
|
||||
table, ChangePositiveInt32ToIntPtr(entry_start_position));
|
||||
var_index = Int32Add(var_index.value(), Int32Constant(1));
|
||||
Branch(IsTheHole(entry_key), &loop, &done_loop);
|
||||
Branch(IsHashTableHole(entry_key), &loop, &done_loop);
|
||||
}
|
||||
|
||||
BIND(&done_loop);
|
||||
|
|
@ -1356,8 +1356,8 @@ CollectionsBuiltinsAssembler::NextKeyIndexPairUnmodifiedTable(
|
|||
TNode<IntPtrT> entry_start_position;
|
||||
TNode<IntPtrT> next_index;
|
||||
|
||||
std::tie(key, entry_start_position, next_index) =
|
||||
NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end);
|
||||
std::tie(key, entry_start_position, next_index) = NextSkipHashTableHoles(
|
||||
table, number_of_buckets, used_capacity, index, if_end);
|
||||
|
||||
return TorqueStructKeyIndexPair{key, next_index};
|
||||
}
|
||||
|
|
@ -1382,7 +1382,7 @@ TorqueStructKeyIndexPair CollectionsBuiltinsAssembler::NextKeyIndexPair(
|
|||
TNode<IntPtrT> next_index;
|
||||
|
||||
std::tie(key, entry_start_position, next_index) =
|
||||
NextSkipHoles<CollectionType>(table, index, if_end);
|
||||
NextSkipHashTableHoles<CollectionType>(table, index, if_end);
|
||||
|
||||
return TorqueStructKeyIndexPair{key, next_index};
|
||||
}
|
||||
|
|
@ -1405,8 +1405,8 @@ CollectionsBuiltinsAssembler::NextKeyValueIndexTupleUnmodifiedTable(
|
|||
TNode<IntPtrT> entry_start_position;
|
||||
TNode<IntPtrT> next_index;
|
||||
|
||||
std::tie(key, entry_start_position, next_index) =
|
||||
NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end);
|
||||
std::tie(key, entry_start_position, next_index) = NextSkipHashTableHoles(
|
||||
table, number_of_buckets, used_capacity, index, if_end);
|
||||
|
||||
TNode<Object> value =
|
||||
UnsafeLoadValueFromOrderedHashMapEntry(table, entry_start_position);
|
||||
|
|
@ -1423,7 +1423,7 @@ CollectionsBuiltinsAssembler::NextKeyValueIndexTuple(
|
|||
TNode<IntPtrT> next_index;
|
||||
|
||||
std::tie(key, entry_start_position, next_index) =
|
||||
NextSkipHoles(table, index, if_end);
|
||||
NextSkipHashTableHoles(table, index, if_end);
|
||||
|
||||
TNode<Object> value =
|
||||
UnsafeLoadValueFromOrderedHashMapEntry(table, entry_start_position);
|
||||
|
|
@ -1677,9 +1677,6 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
|
|||
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
|
||||
"Map.prototype.delete");
|
||||
|
||||
// This check breaks a known exploitation technique. See crbug.com/1263462
|
||||
CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant()));
|
||||
|
||||
const TNode<OrderedHashMap> table =
|
||||
LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset);
|
||||
|
||||
|
|
@ -1694,8 +1691,8 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
|
|||
|
||||
BIND(&entry_found);
|
||||
// If we found the entry, mark the entry as deleted.
|
||||
StoreKeyValueInOrderedHashMapEntry(table, TheHoleConstant(),
|
||||
TheHoleConstant(),
|
||||
StoreKeyValueInOrderedHashMapEntry(table, HashTableHoleConstant(),
|
||||
HashTableHoleConstant(),
|
||||
entry_start_position_or_hash.value());
|
||||
|
||||
// Decrement the number of elements, increment the number of deleted elements.
|
||||
|
|
@ -1823,7 +1820,7 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
|
|||
"Set.prototype.delete");
|
||||
|
||||
// This check breaks a known exploitation technique. See crbug.com/1263462
|
||||
CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant()));
|
||||
CSA_CHECK(this, TaggedNotEqual(key, HashTableHoleConstant()));
|
||||
|
||||
const TNode<OrderedHashSet> table =
|
||||
LoadObjectField<OrderedHashSet>(CAST(receiver), JSMap::kTableOffset);
|
||||
|
|
@ -1853,9 +1850,6 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
|
|||
TNode<Smi> CollectionsBuiltinsAssembler::DeleteFromSetTable(
|
||||
const TNode<Object> context, TNode<OrderedHashSet> table, TNode<Object> key,
|
||||
Label* not_found) {
|
||||
// This check breaks a known exploitation technique. See crbug.com/1263462
|
||||
CSA_CHECK(this, TaggedNotEqual(key, TheHoleConstant()));
|
||||
|
||||
TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0));
|
||||
Label entry_found(this);
|
||||
|
||||
|
|
@ -1864,7 +1858,7 @@ TNode<Smi> CollectionsBuiltinsAssembler::DeleteFromSetTable(
|
|||
|
||||
BIND(&entry_found);
|
||||
// If we found the entry, mark the entry as deleted.
|
||||
StoreKeyInOrderedHashSetEntry(table, TheHoleConstant(),
|
||||
StoreKeyInOrderedHashSetEntry(table, HashTableHoleConstant(),
|
||||
entry_start_position_or_hash.value());
|
||||
|
||||
// Decrement the number of elements, increment the number of deleted elements.
|
||||
|
|
@ -1937,7 +1931,7 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
|
|||
TNode<Object> entry_key;
|
||||
TNode<IntPtrT> entry_start_position;
|
||||
std::tie(entry_key, entry_start_position, index) =
|
||||
NextSkipHoles<OrderedHashMap>(table, index, &done_loop);
|
||||
NextSkipHashTableHoles<OrderedHashMap>(table, index, &done_loop);
|
||||
|
||||
// Load the entry value as well.
|
||||
TNode<Object> entry_value =
|
||||
|
|
@ -2019,7 +2013,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
|
|||
TNode<Object> entry_key;
|
||||
TNode<IntPtrT> entry_start_position;
|
||||
std::tie(entry_key, entry_start_position, index) =
|
||||
NextSkipHoles<OrderedHashMap>(table, index, &return_end);
|
||||
NextSkipHashTableHoles<OrderedHashMap>(table, index, &return_end);
|
||||
StoreObjectFieldNoWriteBarrier(receiver, JSMapIterator::kIndexOffset,
|
||||
SmiTag(index));
|
||||
var_value = entry_key;
|
||||
|
|
@ -2136,7 +2130,7 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
|
|||
TNode<Object> entry_key;
|
||||
TNode<IntPtrT> entry_start_position;
|
||||
std::tie(entry_key, entry_start_position, index) =
|
||||
NextSkipHoles<OrderedHashSet>(table, index, &done_loop);
|
||||
NextSkipHashTableHoles<OrderedHashSet>(table, index, &done_loop);
|
||||
|
||||
// Invoke the {callback} passing the {entry_key} (twice) and the {receiver}.
|
||||
Call(context, callback, this_arg, entry_key, entry_key, receiver);
|
||||
|
|
@ -2204,7 +2198,7 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
|
|||
TNode<Object> entry_key;
|
||||
TNode<IntPtrT> entry_start_position;
|
||||
std::tie(entry_key, entry_start_position, index) =
|
||||
NextSkipHoles<OrderedHashSet>(table, index, &return_end);
|
||||
NextSkipHashTableHoles<OrderedHashSet>(table, index, &return_end);
|
||||
StoreObjectFieldNoWriteBarrier(receiver, JSSetIterator::kIndexOffset,
|
||||
SmiTag(index));
|
||||
var_value = entry_key;
|
||||
|
|
|
|||
13
deps/v8/src/builtins/builtins-collections-gen.h
vendored
13
deps/v8/src/builtins/builtins-collections-gen.h
vendored
|
|
@ -245,12 +245,15 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
|
|||
const TNode<IteratorType> iterator);
|
||||
|
||||
template <typename TableType>
|
||||
std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>> NextSkipHoles(
|
||||
TNode<TableType> table, TNode<IntPtrT> index, Label* if_end);
|
||||
std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>>
|
||||
NextSkipHashTableHoles(TNode<TableType> table, TNode<IntPtrT> index,
|
||||
Label* if_end);
|
||||
template <typename TableType>
|
||||
std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>> NextSkipHoles(
|
||||
TNode<TableType> table, TNode<Int32T> number_of_buckets,
|
||||
TNode<Int32T> used_capacity, TNode<IntPtrT> index, Label* if_end);
|
||||
std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>>
|
||||
NextSkipHashTableHoles(TNode<TableType> table,
|
||||
TNode<Int32T> number_of_buckets,
|
||||
TNode<Int32T> used_capacity, TNode<IntPtrT> index,
|
||||
Label* if_end);
|
||||
|
||||
// Specialization for Smi.
|
||||
// The {result} variable will contain the entry index if the key was found,
|
||||
|
|
|
|||
|
|
@ -253,7 +253,8 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
|
|||
shared_function_info);
|
||||
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
|
||||
TNode<Code> lazy_builtin = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
|
||||
StoreMaybeIndirectPointerField(result, JSFunction::kCodeOffset, lazy_builtin);
|
||||
StoreMaybeIndirectPointerField(result, JSFunction::kCodeOffset,
|
||||
kCodeIndirectPointerTag, lazy_builtin);
|
||||
Return(result);
|
||||
}
|
||||
|
||||
|
|
|
|||
4
deps/v8/src/builtins/builtins-date.cc
vendored
4
deps/v8/src/builtins/builtins-date.cc
vendored
|
|
@ -58,8 +58,8 @@ double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
|
|||
return DateCache::TimeClip(date);
|
||||
}
|
||||
|
||||
Object SetLocalDateValue(Isolate* isolate, Handle<JSDate> date,
|
||||
double time_val) {
|
||||
Tagged<Object> SetLocalDateValue(Isolate* isolate, Handle<JSDate> date,
|
||||
double time_val) {
|
||||
if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
|
||||
time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
|
||||
time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
|
||||
|
|
|
|||
8
deps/v8/src/builtins/builtins-definitions.h
vendored
8
deps/v8/src/builtins/builtins-definitions.h
vendored
|
|
@ -55,8 +55,8 @@ namespace internal {
|
|||
|
||||
#define BUILTIN_LIST_BASE_TIER1(CPP, TFJ, TFC, TFS, TFH, ASM) \
|
||||
/* GC write barriers */ \
|
||||
TFC(IndirectPointerBarrierSaveFP, WriteBarrier) \
|
||||
TFC(IndirectPointerBarrierIgnoreFP, WriteBarrier) \
|
||||
TFC(IndirectPointerBarrierSaveFP, IndirectPointerWriteBarrier) \
|
||||
TFC(IndirectPointerBarrierIgnoreFP, IndirectPointerWriteBarrier) \
|
||||
\
|
||||
/* TSAN support for stores in generated code. */ \
|
||||
IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANStore) \
|
||||
|
|
@ -414,7 +414,6 @@ namespace internal {
|
|||
CPP(ArrayShift) \
|
||||
/* ES6 #sec-array.prototype.unshift */ \
|
||||
CPP(ArrayUnshift) \
|
||||
CPP(ArrayFromAsync) \
|
||||
/* Support for Array.from and other array-copying idioms */ \
|
||||
TFS(CloneFastJSArray, NeedsContext::kYes, kSource) \
|
||||
TFS(CloneFastJSArrayFillingHoles, NeedsContext::kYes, kSource) \
|
||||
|
|
@ -656,11 +655,9 @@ namespace internal {
|
|||
TFH(LoadSuperICBaseline, LoadWithReceiverBaseline) \
|
||||
TFH(KeyedLoadIC, KeyedLoadWithVector) \
|
||||
TFH(KeyedLoadIC_Megamorphic, KeyedLoadWithVector) \
|
||||
TFH(KeyedLoadIC_MegamorphicStringKey, KeyedLoadWithVector) \
|
||||
TFH(KeyedLoadICTrampoline, KeyedLoad) \
|
||||
TFH(KeyedLoadICBaseline, KeyedLoadBaseline) \
|
||||
TFH(KeyedLoadICTrampoline_Megamorphic, KeyedLoad) \
|
||||
TFH(KeyedLoadICTrampoline_MegamorphicStringKey, KeyedLoad) \
|
||||
TFH(StoreGlobalIC, StoreGlobalWithVector) \
|
||||
TFH(StoreGlobalICTrampoline, StoreGlobal) \
|
||||
TFH(StoreGlobalICBaseline, StoreGlobalBaseline) \
|
||||
|
|
@ -709,6 +706,7 @@ namespace internal {
|
|||
TFS(IterableToFixedArrayWithSymbolLookupSlow, NeedsContext::kYes, kIterable) \
|
||||
TFS(IterableToListMayPreserveHoles, NeedsContext::kYes, kIterable, \
|
||||
kIteratorFn) \
|
||||
TFS(IterableToListConvertHoles, NeedsContext::kYes, kIterable, kIteratorFn) \
|
||||
IF_WASM(TFS, IterableToFixedArrayForWasm, NeedsContext::kYes, kIterable, \
|
||||
kExpectedLength) \
|
||||
\
|
||||
|
|
|
|||
10
deps/v8/src/builtins/builtins-ic-gen.cc
vendored
10
deps/v8/src/builtins/builtins-ic-gen.cc
vendored
|
|
@ -58,11 +58,6 @@ void Builtins::Generate_KeyedLoadIC_Megamorphic(
|
|||
AccessorAssembler assembler(state);
|
||||
assembler.GenerateKeyedLoadIC_Megamorphic();
|
||||
}
|
||||
void Builtins::Generate_KeyedLoadIC_MegamorphicStringKey(
|
||||
compiler::CodeAssemblerState* state) {
|
||||
AccessorAssembler assembler(state);
|
||||
assembler.GenerateKeyedLoadIC_MegamorphicStringKey();
|
||||
}
|
||||
void Builtins::Generate_KeyedLoadIC_PolymorphicName(
|
||||
compiler::CodeAssemblerState* state) {
|
||||
AccessorAssembler assembler(state);
|
||||
|
|
@ -83,11 +78,6 @@ void Builtins::Generate_KeyedLoadICTrampoline_Megamorphic(
|
|||
AccessorAssembler assembler(state);
|
||||
assembler.GenerateKeyedLoadICTrampoline_Megamorphic();
|
||||
}
|
||||
void Builtins::Generate_KeyedLoadICTrampoline_MegamorphicStringKey(
|
||||
compiler::CodeAssemblerState* state) {
|
||||
AccessorAssembler assembler(state);
|
||||
assembler.GenerateKeyedLoadICTrampoline_MegamorphicStringKey();
|
||||
}
|
||||
void Builtins::Generate_LoadGlobalIC_NoFeedback(
|
||||
compiler::CodeAssemblerState* state) {
|
||||
AccessorAssembler assembler(state);
|
||||
|
|
|
|||
26
deps/v8/src/builtins/builtins-internal-gen.cc
vendored
26
deps/v8/src/builtins/builtins-internal-gen.cc
vendored
|
|
@ -270,7 +270,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
|
|||
BIND(&next);
|
||||
}
|
||||
|
||||
void PointerTableWriteBarrier(SaveFPRegsMode fp_mode) {
|
||||
void IndirectPointerWriteBarrier(SaveFPRegsMode fp_mode) {
|
||||
// Currently, only objects living in (local) old space are referenced
|
||||
// through a pointer table indirection and we have DCHECKs in the CPP write
|
||||
// barrier code to check that. This simplifies the write barrier code for
|
||||
|
|
@ -281,11 +281,15 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
|
|||
BIND(&marking_is_on);
|
||||
|
||||
// For this barrier, the slot contains an index into a pointer table and not
|
||||
// directly a pointer to a HeapObject.
|
||||
TNode<IntPtrT> slot =
|
||||
UncheckedParameter<IntPtrT>(WriteBarrierDescriptor::kSlotAddress);
|
||||
TNode<IntPtrT> object = BitcastTaggedToWord(
|
||||
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
|
||||
// directly a pointer to a HeapObject. Further, the slot address is tagged
|
||||
// with the indirect pointer tag of the slot, so it cannot directly be
|
||||
// dereferenced but needs to be decoded first.
|
||||
TNode<IntPtrT> slot = UncheckedParameter<IntPtrT>(
|
||||
IndirectPointerWriteBarrierDescriptor::kSlotAddress);
|
||||
TNode<IntPtrT> object = BitcastTaggedToWord(UncheckedParameter<Object>(
|
||||
IndirectPointerWriteBarrierDescriptor::kObject));
|
||||
TNode<IntPtrT> tag = UncheckedParameter<IntPtrT>(
|
||||
IndirectPointerWriteBarrierDescriptor::kIndirectPointerTag);
|
||||
|
||||
TNode<ExternalReference> function = ExternalConstant(
|
||||
ExternalReference::
|
||||
|
|
@ -293,7 +297,8 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
|
|||
CallCFunctionWithCallerSavedRegisters(
|
||||
function, MachineTypeOf<Int32T>::value, fp_mode,
|
||||
std::make_pair(MachineTypeOf<IntPtrT>::value, object),
|
||||
std::make_pair(MachineTypeOf<IntPtrT>::value, slot));
|
||||
std::make_pair(MachineTypeOf<IntPtrT>::value, slot),
|
||||
std::make_pair(MachineTypeOf<IntPtrT>::value, tag));
|
||||
Goto(&next);
|
||||
|
||||
BIND(&next);
|
||||
|
|
@ -561,7 +566,12 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
|
|||
return;
|
||||
}
|
||||
|
||||
PointerTableWriteBarrier(fp_mode);
|
||||
if (!V8_ENABLE_SANDBOX_BOOL) {
|
||||
Unreachable();
|
||||
return;
|
||||
}
|
||||
|
||||
IndirectPointerWriteBarrier(fp_mode);
|
||||
IncrementCounter(isolate()->counters()->write_barriers(), 1);
|
||||
Return(TrueConstant());
|
||||
}
|
||||
|
|
|
|||
65
deps/v8/src/builtins/builtins-iterator-gen.cc
vendored
65
deps/v8/src/builtins/builtins-iterator-gen.cc
vendored
|
|
@ -71,6 +71,30 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
|
|||
GotoIf(TaggedIsSmi(result), &if_notobject);
|
||||
TNode<HeapObject> heap_object_result = CAST(result);
|
||||
TNode<Map> result_map = LoadMap(heap_object_result);
|
||||
GotoIfNot(JSAnyIsNotPrimitiveMap(result_map), &if_notobject);
|
||||
|
||||
// IteratorComplete
|
||||
// 2. Return ToBoolean(? Get(iterResult, "done")).
|
||||
IteratorComplete(context, heap_object_result, if_done,
|
||||
fast_iterator_result_map);
|
||||
Goto(&return_result);
|
||||
|
||||
BIND(&if_notobject);
|
||||
CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
|
||||
Unreachable();
|
||||
|
||||
BIND(&return_result);
|
||||
return CAST(heap_object_result);
|
||||
}
|
||||
|
||||
void IteratorBuiltinsAssembler::IteratorComplete(
|
||||
TNode<Context> context, const TNode<HeapObject> iterator, Label* if_done,
|
||||
base::Optional<TNode<Map>> fast_iterator_result_map) {
|
||||
DCHECK_NOT_NULL(if_done);
|
||||
|
||||
Label return_result(this);
|
||||
|
||||
TNode<Map> result_map = LoadMap(iterator);
|
||||
|
||||
if (fast_iterator_result_map) {
|
||||
// Fast iterator result case:
|
||||
|
|
@ -79,10 +103,9 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
|
|||
// 4. Return result.
|
||||
GotoIfNot(TaggedEqual(result_map, *fast_iterator_result_map), &if_generic);
|
||||
|
||||
// IteratorComplete
|
||||
// 2. Return ToBoolean(? Get(iterResult, "done")).
|
||||
TNode<Object> done =
|
||||
LoadObjectField(heap_object_result, JSIteratorResult::kDoneOffset);
|
||||
LoadObjectField(iterator, JSIteratorResult::kDoneOffset);
|
||||
BranchIfToBooleanIsTrue(done, if_done, &return_result);
|
||||
|
||||
BIND(&if_generic);
|
||||
|
|
@ -90,22 +113,14 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
|
|||
|
||||
// Generic iterator result case:
|
||||
{
|
||||
// 3. If Type(result) is not Object, throw a TypeError exception.
|
||||
GotoIfNot(JSAnyIsNotPrimitiveMap(result_map), &if_notobject);
|
||||
|
||||
// IteratorComplete
|
||||
// 2. Return ToBoolean(? Get(iterResult, "done")).
|
||||
TNode<Object> done =
|
||||
GetProperty(context, heap_object_result, factory()->done_string());
|
||||
GetProperty(context, iterator, factory()->done_string());
|
||||
BranchIfToBooleanIsTrue(done, if_done, &return_result);
|
||||
}
|
||||
|
||||
BIND(&if_notobject);
|
||||
CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
|
||||
Unreachable();
|
||||
|
||||
BIND(&return_result);
|
||||
return CAST(heap_object_result);
|
||||
return;
|
||||
}
|
||||
|
||||
TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
|
||||
|
|
@ -340,7 +355,8 @@ TF_BUILTIN(StringFixedArrayFromIterable, IteratorBuiltinsAssembler) {
|
|||
// will be copied to the new array, which is inconsistent with the behavior of
|
||||
// an actual iteration, where holes should be replaced with undefined (if the
|
||||
// prototype has no elements). To maintain the correct behavior for holey
|
||||
// arrays, use the builtins IterableToList or IterableToListWithSymbolLookup.
|
||||
// arrays, use the builtins IterableToList or IterableToListWithSymbolLookup or
|
||||
// IterableToListConvertHoles.
|
||||
TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) {
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto iterable = Parameter<Object>(Descriptor::kIterable);
|
||||
|
|
@ -357,6 +373,29 @@ TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) {
|
|||
TailCallBuiltin(Builtin::kIterableToList, context, iterable, iterator_fn);
|
||||
}
|
||||
|
||||
// This builtin always returns a new JSArray and is thus safe to use even in the
|
||||
// presence of code that may call back into user-JS. This builtin will take the
|
||||
// fast path if the iterable is a fast array and the Array prototype and the
|
||||
// Symbol.iterator is untouched. The fast path skips the iterator and copies the
|
||||
// backing store to the new array. Note that if the array has holes, the holes
|
||||
// will be converted to undefined values in the new array (unlike
|
||||
// IterableToListMayPreserveHoles builtin).
|
||||
TF_BUILTIN(IterableToListConvertHoles, IteratorBuiltinsAssembler) {
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto iterable = Parameter<Object>(Descriptor::kIterable);
|
||||
auto iterator_fn = Parameter<Object>(Descriptor::kIteratorFn);
|
||||
|
||||
Label slow_path(this);
|
||||
|
||||
GotoIfNot(IsFastJSArrayWithNoCustomIteration(context, iterable), &slow_path);
|
||||
|
||||
// The fast path will convert holes to undefined values in the new array.
|
||||
TailCallBuiltin(Builtin::kCloneFastJSArrayFillingHoles, context, iterable);
|
||||
|
||||
BIND(&slow_path);
|
||||
TailCallBuiltin(Builtin::kIterableToList, context, iterable, iterator_fn);
|
||||
}
|
||||
|
||||
void IteratorBuiltinsAssembler::FastIterableToList(
|
||||
TNode<Context> context, TNode<Object> iterable,
|
||||
TVariable<JSArray>* var_result, Label* slow) {
|
||||
|
|
|
|||
12
deps/v8/src/builtins/builtins-iterator-gen.h
vendored
12
deps/v8/src/builtins/builtins-iterator-gen.h
vendored
|
|
@ -42,6 +42,18 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
|
|||
return IteratorStep(context, iterator, if_done, fast_iterator_result_map);
|
||||
}
|
||||
|
||||
// https://tc39.es/ecma262/#sec-iteratorcomplete
|
||||
void IteratorComplete(
|
||||
TNode<Context> context, const TNode<HeapObject> iterator, Label* if_done,
|
||||
base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt);
|
||||
void IteratorComplete(TNode<Context> context,
|
||||
const TNode<HeapObject> iterator,
|
||||
base::Optional<TNode<Map>> fast_iterator_result_map,
|
||||
Label* if_done) {
|
||||
return IteratorComplete(context, iterator, if_done,
|
||||
fast_iterator_result_map);
|
||||
}
|
||||
|
||||
// https://tc39.github.io/ecma262/#sec-iteratorvalue
|
||||
// Return the `value` field from an iterator.
|
||||
// `fast_iterator_result_map` refers to the map for the JSIteratorResult
|
||||
|
|
|
|||
8
deps/v8/src/builtins/builtins-lazy-gen.cc
vendored
8
deps/v8/src/builtins/builtins-lazy-gen.cc
vendored
|
|
@ -75,7 +75,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
|
|||
// Optimized code is good, get it into the closure and link the closure into
|
||||
// the optimized functions list, then tail call the optimized code.
|
||||
StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset,
|
||||
optimized_code);
|
||||
kCodeIndirectPointerTag, optimized_code);
|
||||
Comment("MaybeTailCallOptimizedCodeSlot:: GenerateTailCallToJSCode");
|
||||
GenerateTailCallToJSCode(optimized_code, function);
|
||||
|
||||
|
|
@ -111,7 +111,8 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
|||
|
||||
CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
|
||||
isolate(), CompileLazy))));
|
||||
StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset, sfi_code);
|
||||
StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset,
|
||||
kCodeIndirectPointerTag, sfi_code);
|
||||
|
||||
Label maybe_use_sfi_code(this);
|
||||
// If there is no feedback, don't check for optimized code.
|
||||
|
|
@ -168,7 +169,8 @@ TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
|
|||
|
||||
TNode<Code> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
|
||||
// Set the code slot inside the JSFunction to CompileLazy.
|
||||
StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset, code);
|
||||
StoreMaybeIndirectPointerField(function, JSFunction::kCodeOffset,
|
||||
kCodeIndirectPointerTag, code);
|
||||
GenerateTailCallToJSCode(code, function);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -356,7 +356,7 @@ void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount(
|
|||
TNode<Context> MicrotaskQueueBuiltinsAssembler::GetCurrentContext() {
|
||||
auto ref = ExternalReference::Create(kContextAddress, isolate());
|
||||
// TODO(delphick): Add a checked cast. For now this is not possible as context
|
||||
// can actually be Smi(0).
|
||||
// can actually be Tagged<Smi>(0).
|
||||
return TNode<Context>::UncheckedCast(LoadFullTagged(ExternalConstant(ref)));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -125,6 +125,9 @@ inline size_t GetAddress32(size_t index, size_t byte_offset) {
|
|||
// ES #sec-atomics.notify
|
||||
// Atomics.notify( typedArray, index, count )
|
||||
BUILTIN(AtomicsNotify) {
|
||||
// TODO(clemensb): This builtin only allocates (an exception) in the case of
|
||||
// an error; we could try to avoid allocating the HandleScope in the non-error
|
||||
// case.
|
||||
HandleScope scope(isolate);
|
||||
Handle<Object> array = args.atOrUndefined(isolate, 1);
|
||||
Handle<Object> index = args.atOrUndefined(isolate, 2);
|
||||
|
|
@ -163,20 +166,21 @@ BUILTIN(AtomicsNotify) {
|
|||
|
||||
// 10. If IsSharedArrayBuffer(buffer) is false, return 0.
|
||||
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
|
||||
size_t wake_addr;
|
||||
|
||||
if (V8_UNLIKELY(!sta->GetBuffer()->is_shared())) {
|
||||
return Smi::FromInt(0);
|
||||
if (V8_UNLIKELY(!array_buffer->is_shared())) {
|
||||
return Smi::zero();
|
||||
}
|
||||
|
||||
// Steps 11-17 performed in FutexEmulation::Wake.
|
||||
size_t wake_addr;
|
||||
if (sta->type() == kExternalBigInt64Array) {
|
||||
wake_addr = GetAddress64(i, sta->byte_offset());
|
||||
} else {
|
||||
DCHECK(sta->type() == kExternalInt32Array);
|
||||
wake_addr = GetAddress32(i, sta->byte_offset());
|
||||
}
|
||||
return FutexEmulation::Wake(array_buffer, wake_addr, c);
|
||||
int num_waiters_woken = FutexEmulation::Wake(*array_buffer, wake_addr, c);
|
||||
return Smi::FromInt(num_waiters_woken);
|
||||
}
|
||||
|
||||
Tagged<Object> DoWait(Isolate* isolate, FutexEmulation::WaitMode mode,
|
||||
|
|
|
|||
19
deps/v8/src/builtins/builtins-string.cc
vendored
19
deps/v8/src/builtins/builtins-string.cc
vendored
|
|
@ -244,9 +244,9 @@ inline bool ToUpperOverflows(base::uc32 character) {
|
|||
}
|
||||
|
||||
template <class Converter>
|
||||
V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper(
|
||||
Isolate* isolate, String string, SeqString result, int result_length,
|
||||
unibrow::Mapping<Converter, 128>* mapping) {
|
||||
V8_WARN_UNUSED_RESULT static Tagged<Object> ConvertCaseHelper(
|
||||
Isolate* isolate, Tagged<String> string, Tagged<SeqString> result,
|
||||
int result_length, unibrow::Mapping<Converter, 128>* mapping) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
// We try this twice, once with the assumption that the result is no longer
|
||||
// than the input and, if that assumption breaks, again with the exact
|
||||
|
|
@ -272,16 +272,16 @@ V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper(
|
|||
int char_length = mapping->get(current, next, chars);
|
||||
if (char_length == 0) {
|
||||
// The case conversion of this character is the character itself.
|
||||
result.Set(i, current);
|
||||
result->Set(i, current);
|
||||
i++;
|
||||
} else if (char_length == 1 &&
|
||||
(ignore_overflow || !ToUpperOverflows(current))) {
|
||||
// Common case: converting the letter resulted in one character.
|
||||
DCHECK(static_cast<base::uc32>(chars[0]) != current);
|
||||
result.Set(i, chars[0]);
|
||||
result->Set(i, chars[0]);
|
||||
has_changed_character = true;
|
||||
i++;
|
||||
} else if (result_length == string.length()) {
|
||||
} else if (result_length == string->length()) {
|
||||
bool overflows = ToUpperOverflows(current);
|
||||
// We've assumed that the result would be as long as the
|
||||
// input but here is a character that converts to several
|
||||
|
|
@ -322,7 +322,7 @@ V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper(
|
|||
: Smi::FromInt(current_length);
|
||||
} else {
|
||||
for (int j = 0; j < char_length; j++) {
|
||||
result.Set(i, chars[j]);
|
||||
result->Set(i, chars[j]);
|
||||
i++;
|
||||
}
|
||||
has_changed_character = true;
|
||||
|
|
@ -341,7 +341,7 @@ V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper(
|
|||
}
|
||||
|
||||
template <class Converter>
|
||||
V8_WARN_UNUSED_RESULT static Object ConvertCase(
|
||||
V8_WARN_UNUSED_RESULT static Tagged<Object> ConvertCase(
|
||||
Handle<String> s, Isolate* isolate,
|
||||
unibrow::Mapping<Converter, 128>* mapping) {
|
||||
s = String::Flatten(isolate, s);
|
||||
|
|
@ -379,7 +379,8 @@ V8_WARN_UNUSED_RESULT static Object ConvertCase(
|
|||
result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
|
||||
}
|
||||
|
||||
Object answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
|
||||
Tagged<Object> answer =
|
||||
ConvertCaseHelper(isolate, *s, *result, length, mapping);
|
||||
if (IsException(answer, isolate) || IsString(answer)) return answer;
|
||||
|
||||
DCHECK(IsSmi(answer));
|
||||
|
|
|
|||
|
|
@ -51,9 +51,9 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
|
|||
// - Set BitField to 0.
|
||||
// - Set IsExternal and IsDetachable bits of BitFieldSlot.
|
||||
// - Set the byte_length field to zero.
|
||||
// - Set backing_store to null/Smi(0).
|
||||
// - Set backing_store to null/Tagged<Smi>(0).
|
||||
// - Set extension to null.
|
||||
// - Set all embedder fields to Smi(0).
|
||||
// - Set all embedder fields to Tagged<Smi>(0).
|
||||
if (FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset) != 0) {
|
||||
DCHECK_EQ(4, FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset));
|
||||
StoreObjectFieldNoWriteBarrier(
|
||||
|
|
|
|||
12
deps/v8/src/builtins/builtins-utils.h
vendored
12
deps/v8/src/builtins/builtins-utils.h
vendored
|
|
@ -23,12 +23,12 @@ class BuiltinArguments : public JavaScriptArguments {
|
|||
: Arguments(length, arguments) {
|
||||
// Check we have at least the receiver.
|
||||
DCHECK_LE(1, this->length());
|
||||
DCHECK(Object((*at(0)).ptr()).IsObject());
|
||||
DCHECK(Tagged<Object>((*at(0)).ptr()).IsObject());
|
||||
}
|
||||
|
||||
Tagged<Object> operator[](int index) const {
|
||||
DCHECK_LT(index, length());
|
||||
return Object(*address_of_arg_at(index + kArgsOffset));
|
||||
return Tagged<Object>(*address_of_arg_at(index + kArgsOffset));
|
||||
}
|
||||
|
||||
template <class S = Object>
|
||||
|
|
@ -104,7 +104,7 @@ static_assert(BuiltinArguments::kNumExtraArgsWithReceiver ==
|
|||
// TODO(cbruni): add global flag to check whether any tracing events have been
|
||||
// enabled.
|
||||
#define BUILTIN_RCS(name) \
|
||||
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
|
||||
V8_WARN_UNUSED_RESULT static Tagged<Object> Builtin_Impl_##name( \
|
||||
BuiltinArguments args, Isolate* isolate); \
|
||||
\
|
||||
V8_NOINLINE static Address Builtin_Impl_Stats_##name( \
|
||||
|
|
@ -126,11 +126,11 @@ static_assert(BuiltinArguments::kNumExtraArgsWithReceiver ==
|
|||
return BUILTIN_CONVERT_RESULT(Builtin_Impl_##name(args, isolate)); \
|
||||
} \
|
||||
\
|
||||
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
|
||||
V8_WARN_UNUSED_RESULT static Tagged<Object> Builtin_Impl_##name( \
|
||||
BuiltinArguments args, Isolate* isolate)
|
||||
|
||||
#define BUILTIN_NO_RCS(name) \
|
||||
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
|
||||
V8_WARN_UNUSED_RESULT static Tagged<Object> Builtin_Impl_##name( \
|
||||
BuiltinArguments args, Isolate* isolate); \
|
||||
\
|
||||
V8_WARN_UNUSED_RESULT Address Builtin_##name( \
|
||||
|
|
@ -140,7 +140,7 @@ static_assert(BuiltinArguments::kNumExtraArgsWithReceiver ==
|
|||
return BUILTIN_CONVERT_RESULT(Builtin_Impl_##name(args, isolate)); \
|
||||
} \
|
||||
\
|
||||
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
|
||||
V8_WARN_UNUSED_RESULT static Tagged<Object> Builtin_Impl_##name( \
|
||||
BuiltinArguments args, Isolate* isolate)
|
||||
|
||||
#ifdef V8_RUNTIME_CALL_STATS
|
||||
|
|
|
|||
7
deps/v8/src/builtins/builtins.cc
vendored
7
deps/v8/src/builtins/builtins.cc
vendored
|
|
@ -188,7 +188,7 @@ void Builtins::set_code(Builtin builtin, Tagged<Code> code) {
|
|||
|
||||
Tagged<Code> Builtins::code(Builtin builtin) {
|
||||
Address ptr = isolate_->builtin_table()[Builtins::ToInt(builtin)];
|
||||
return Code::cast(Object(ptr));
|
||||
return Code::cast(Tagged<Object>(ptr));
|
||||
}
|
||||
|
||||
Handle<Code> Builtins::code_handle(Builtin builtin) {
|
||||
|
|
@ -262,6 +262,11 @@ const char* Builtins::NameForStackTrace(Builtin builtin) {
|
|||
case Builtin::kStringPrototypeIndexOf:
|
||||
case Builtin::kThrowIndexOfCalledOnNull:
|
||||
return "String.indexOf";
|
||||
case Builtin::kDataViewPrototypeGetInt32:
|
||||
case Builtin::kThrowDataViewGetInt32DetachedError:
|
||||
case Builtin::kThrowDataViewGetInt32OutOfBounds:
|
||||
case Builtin::kThrowDataViewGetInt32TypeError:
|
||||
return "DataView.getInt32";
|
||||
#if V8_INTL_SUPPORT
|
||||
case Builtin::kStringPrototypeToLowerCaseIntl:
|
||||
#endif
|
||||
|
|
|
|||
33
deps/v8/src/builtins/builtins.h
vendored
33
deps/v8/src/builtins/builtins.h
vendored
|
|
@ -126,23 +126,22 @@ class Builtins {
|
|||
static BytecodeOffset GetContinuationBytecodeOffset(Builtin builtin);
|
||||
static Builtin GetBuiltinFromBytecodeOffset(BytecodeOffset);
|
||||
|
||||
static constexpr Builtin GetRecordWriteStub(
|
||||
SaveFPRegsMode fp_mode, PointerType type = PointerType::kDirect) {
|
||||
switch (type) {
|
||||
case PointerType::kDirect:
|
||||
switch (fp_mode) {
|
||||
case SaveFPRegsMode::kIgnore:
|
||||
return Builtin::kRecordWriteIgnoreFP;
|
||||
case SaveFPRegsMode::kSave:
|
||||
return Builtin::kRecordWriteSaveFP;
|
||||
}
|
||||
case PointerType::kIndirect:
|
||||
switch (fp_mode) {
|
||||
case SaveFPRegsMode::kIgnore:
|
||||
return Builtin::kIndirectPointerBarrierIgnoreFP;
|
||||
case SaveFPRegsMode::kSave:
|
||||
return Builtin::kIndirectPointerBarrierSaveFP;
|
||||
}
|
||||
static constexpr Builtin GetRecordWriteStub(SaveFPRegsMode fp_mode) {
|
||||
switch (fp_mode) {
|
||||
case SaveFPRegsMode::kIgnore:
|
||||
return Builtin::kRecordWriteIgnoreFP;
|
||||
case SaveFPRegsMode::kSave:
|
||||
return Builtin::kRecordWriteSaveFP;
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr Builtin GetIndirectPointerBarrierStub(
|
||||
SaveFPRegsMode fp_mode) {
|
||||
switch (fp_mode) {
|
||||
case SaveFPRegsMode::kIgnore:
|
||||
return Builtin::kIndirectPointerBarrierIgnoreFP;
|
||||
case SaveFPRegsMode::kSave:
|
||||
return Builtin::kIndirectPointerBarrierSaveFP;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
6
deps/v8/src/builtins/convert.tq
vendored
6
deps/v8/src/builtins/convert.tq
vendored
|
|
@ -117,12 +117,6 @@ FromConstexpr<int8, constexpr int31>(i: constexpr int31): int8 {
|
|||
FromConstexpr<char8, constexpr int31>(i: constexpr int31): char8 {
|
||||
return %RawDownCast<char8>(FromConstexpr<uint8>(i));
|
||||
}
|
||||
FromConstexpr<Number, constexpr Smi>(s: constexpr Smi): Number {
|
||||
return SmiConstant(s);
|
||||
}
|
||||
FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
|
||||
return SmiConstant(s);
|
||||
}
|
||||
FromConstexpr<uint32, constexpr int31>(i: constexpr int31): uint32 {
|
||||
return Unsigned(Int32Constant(i));
|
||||
}
|
||||
|
|
|
|||
2
deps/v8/src/builtins/data-view.tq
vendored
2
deps/v8/src/builtins/data-view.tq
vendored
|
|
@ -77,7 +77,7 @@ macro ValidateDataView(context: Context, o: JSAny, method: String):
|
|||
return UnsafeCast<JSRabGsabDataView>(o);
|
||||
}
|
||||
case (_x: JSAny): {
|
||||
ThrowTypeError(MessageTemplate::kIncompatibleMethodReceiver, method);
|
||||
ThrowTypeError(MessageTemplate::kIncompatibleMethodReceiver, method, o);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
117
deps/v8/src/builtins/ia32/builtins-ia32.cc
vendored
117
deps/v8/src/builtins/ia32/builtins-ia32.cc
vendored
|
|
@ -902,39 +902,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, eax);
|
||||
__ j(not_equal, &compile_lazy);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
Register closure = edi;
|
||||
Register feedback_vector = ecx;
|
||||
Label push_stack_frame;
|
||||
// Load feedback vector and check if it is valid. If valid, check for
|
||||
// optimized code and update invocation count. Otherwise, setup the stack
|
||||
// frame.
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
__ mov(eax, FieldOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
|
||||
__ j(not_equal, &push_stack_frame);
|
||||
Register feedback_vector = ecx;
|
||||
Register closure = edi;
|
||||
Register scratch = eax;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, scratch, &push_stack_frame,
|
||||
Label::kNear);
|
||||
|
||||
// Load the optimization state from the feedback vector and re-use the
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count. Load the optimization state from the feedback vector and re-use the
|
||||
// register.
|
||||
Label flags_need_processing;
|
||||
Register flags = ecx;
|
||||
XMMRegister saved_feedback_vector = xmm1;
|
||||
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
|
||||
flags, xmm1, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing);
|
||||
flags, saved_feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&flags_need_processing);
|
||||
|
||||
// Reload the feedback vector.
|
||||
// TODO(jgruber): Don't clobber it above.
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
__ movd(feedback_vector, saved_feedback_vector);
|
||||
|
||||
{
|
||||
static constexpr Register scratch = eax;
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, scratch);
|
||||
}
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, scratch);
|
||||
|
||||
// Increment the invocation count.
|
||||
__ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
|
||||
|
|
@ -942,13 +931,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set
|
||||
// up the frame (that is done below).
|
||||
__ bind(&push_stack_frame);
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
// - kInvocationCountOffset: only used for tiering heuristics and code
|
||||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ push(ebp); // Caller's frame pointer.
|
||||
__ mov(ebp, esp);
|
||||
|
|
@ -979,6 +969,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ push(kInterpreterBytecodeArrayRegister);
|
||||
// Push Smi tagged initial bytecode array offset.
|
||||
__ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
|
||||
__ push(feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
|
|
@ -1603,7 +1594,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||
// Set the return address to the correct point in the interpreter entry
|
||||
// trampoline.
|
||||
Label builtin_trampoline, trampoline_loaded;
|
||||
Smi interpreter_entry_return_pc_offset(
|
||||
Tagged<Smi> interpreter_entry_return_pc_offset(
|
||||
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
|
||||
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
|
||||
|
||||
|
|
@ -1743,22 +1734,25 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
XMMRegister saved_arg_count = xmm0;
|
||||
XMMRegister saved_bytecode_array = xmm1;
|
||||
XMMRegister saved_frame_size = xmm2;
|
||||
XMMRegister saved_feedback_vector = xmm3;
|
||||
XMMRegister saved_feedback_cell = xmm3;
|
||||
XMMRegister saved_feedback_vector = xmm4;
|
||||
__ movd(saved_arg_count, arg_count);
|
||||
__ movd(saved_frame_size, frame_size);
|
||||
|
||||
// Use the arg count (eax) as the scratch register.
|
||||
Register scratch = arg_count;
|
||||
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = ecx;
|
||||
// Load the feedback cell and vector from the closure.
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
Register feedback_cell = ecx;
|
||||
__ mov(feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ movd(saved_feedback_cell, feedback_cell);
|
||||
Register feedback_vector = ecx;
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
__ AssertFeedbackVector(feedback_vector, scratch);
|
||||
feedback_cell = no_reg;
|
||||
|
||||
// Load the optimization state from the feedback vector and re-use the
|
||||
// register.
|
||||
|
|
@ -1779,7 +1773,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
// Increment the invocation count.
|
||||
__ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
|
||||
|
||||
XMMRegister return_address = xmm4;
|
||||
XMMRegister return_address = xmm5;
|
||||
// Save the return address, so that we can push it to the end of the newly
|
||||
// set-up frame once we're done setting it up.
|
||||
__ PopReturnAddressTo(return_address, scratch);
|
||||
|
|
@ -1803,12 +1797,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
|
||||
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
|
||||
// the frame, so load it into a register.
|
||||
Register bytecode_array = scratch;
|
||||
__ movd(bytecode_array, saved_bytecode_array);
|
||||
__ Push(bytecode_array);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
__ Push(saved_bytecode_array, scratch);
|
||||
__ Push(saved_feedback_cell, scratch);
|
||||
__ Push(saved_feedback_vector, scratch);
|
||||
}
|
||||
|
||||
|
|
@ -1877,6 +1867,8 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
|
||||
// check. Undo all its frame creation and call into the interpreter instead.
|
||||
|
||||
// Drop the feedback vector.
|
||||
__ Pop(ecx);
|
||||
// Drop bytecode offset (was the feedback vector but got replaced during
|
||||
// deopt).
|
||||
__ Pop(ecx);
|
||||
|
|
@ -3788,7 +3780,7 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
// FunctionCallbackInfo::values_ (points at the first varargs argument
|
||||
// passed on the stack).
|
||||
__ lea(holder,
|
||||
Operand(holder, (FCA::kArgsLength + 1) * kSystemPointerSize));
|
||||
Operand(holder, FCA::kArgsLengthWithReceiver * kSystemPointerSize));
|
||||
__ mov(ExitFrameStackSlotOperand(kApiArgsSize + FCA::kValuesOffset),
|
||||
holder);
|
||||
|
||||
|
|
@ -3802,10 +3794,10 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
constexpr int kBytesToDropOffset = FCA::kLengthOffset + kSystemPointerSize;
|
||||
static_assert(kBytesToDropOffset ==
|
||||
(kApiStackSpace - 1) * kSystemPointerSize);
|
||||
__ lea(scratch, Operand(argc, times_system_pointer_size,
|
||||
(FCA::kArgsLength + 1 /* receiver */ +
|
||||
exit_frame_params_count) *
|
||||
kSystemPointerSize));
|
||||
__ lea(scratch,
|
||||
Operand(argc, times_system_pointer_size,
|
||||
(FCA::kArgsLengthWithReceiver + exit_frame_params_count) *
|
||||
kSystemPointerSize));
|
||||
__ mov(ExitFrameStackSlotOperand(kApiArgsSize + kBytesToDropOffset), scratch);
|
||||
|
||||
__ RecordComment("v8::FunctionCallback's argument.");
|
||||
|
|
@ -4581,12 +4573,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, ecx);
|
||||
}
|
||||
|
||||
// Load the feedback vector.
|
||||
// Load the feedback cell and vector.
|
||||
Register feedback_cell = eax;
|
||||
Register feedback_vector = ecx;
|
||||
__ mov(feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
closure = no_reg;
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
|
|
@ -4599,8 +4592,16 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
__ mov(MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
|
||||
// Replace bytecode offset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ mov(MemOperand(ebp, BaselineFrameConstants::kFeedbackCellFromFp),
|
||||
feedback_cell);
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ mov(MemOperand(ebp, InterpreterFrameConstants::kFeedbackVectorFromFp),
|
||||
feedback_vector);
|
||||
feedback_vector = no_reg;
|
||||
|
||||
|
|
@ -4651,6 +4652,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
__ pop(kInterpreterAccumulatorRegister);
|
||||
|
||||
if (is_osr) {
|
||||
DCHECK_EQ(feedback_cell, no_reg);
|
||||
closure = ecx;
|
||||
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
|
||||
ResetJSFunctionAge(masm, closure, closure);
|
||||
Generate_OSREntry(masm, code_obj);
|
||||
|
|
@ -4673,21 +4676,19 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
|
||||
__ bind(&install_baseline_code);
|
||||
// Pop/re-push the accumulator so that it's spilled within the below frame
|
||||
// scope, to keep the stack valid. Use ecx for this -- we can't save it in
|
||||
// kInterpreterAccumulatorRegister because that aliases with closure.
|
||||
DCHECK(!AreAliased(ecx, kContextRegister, closure));
|
||||
__ pop(ecx);
|
||||
// scope, to keep the stack valid.
|
||||
__ pop(kInterpreterAccumulatorRegister);
|
||||
// Restore the clobbered context register.
|
||||
__ mov(kContextRegister,
|
||||
Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(ecx);
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
// Reload closure.
|
||||
closure = eax;
|
||||
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
|
||||
__ Push(closure);
|
||||
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
|
||||
// Now that we're restarting, we don't have to worry about closure and
|
||||
// accumulator aliasing, so pop the spilled accumulator directly back into
|
||||
// the right register.
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
// Retry from the start after installing baseline code.
|
||||
|
|
|
|||
18
deps/v8/src/builtins/iterator.tq
vendored
18
deps/v8/src/builtins/iterator.tq
vendored
|
|
@ -31,6 +31,10 @@ extern macro IteratorBuiltinsAssembler::IteratorStep(
|
|||
extern macro IteratorBuiltinsAssembler::IteratorStep(
|
||||
implicit context: Context)(IteratorRecord, Map): JSReceiver
|
||||
labels Done;
|
||||
extern macro IteratorBuiltinsAssembler::IteratorComplete(
|
||||
implicit context: Context)(JSReceiver): void labels Done;
|
||||
extern macro IteratorBuiltinsAssembler::IteratorComplete(
|
||||
implicit context: Context)(JSReceiver, Map): void labels Done;
|
||||
|
||||
extern macro IteratorBuiltinsAssembler::IteratorValue(
|
||||
implicit context: Context)(JSReceiver): JSAny;
|
||||
|
|
@ -97,6 +101,20 @@ transitioning builtin CreateAsyncFromSyncIteratorBaseline(syncIterator: JSAny):
|
|||
return CreateAsyncFromSyncIterator(context, syncIterator);
|
||||
}
|
||||
|
||||
@export
|
||||
transitioning macro GetIteratorRecordAfterCreateAsyncFromSyncIterator(
|
||||
asyncIterator: IteratorRecord): IteratorRecord {
|
||||
const context: Context = LoadContextFromBaseline();
|
||||
|
||||
const iterator = CreateAsyncFromSyncIterator(context, asyncIterator.object);
|
||||
|
||||
const nextMethod = GetProperty(iterator, kNextString);
|
||||
return IteratorRecord{
|
||||
object: UnsafeCast<JSReceiver>(iterator),
|
||||
next: nextMethod
|
||||
};
|
||||
}
|
||||
|
||||
macro GetLazyReceiver(receiver: JSAny): JSAny {
|
||||
return receiver;
|
||||
}
|
||||
|
|
|
|||
86
deps/v8/src/builtins/loong64/builtins-loong64.cc
vendored
86
deps/v8/src/builtins/loong64/builtins-loong64.cc
vendored
|
|
@ -921,19 +921,20 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
|
|||
// static
|
||||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
temps.Include({s1, s2});
|
||||
temps.Include({s1, s2, s3});
|
||||
temps.Exclude({t7});
|
||||
auto descriptor =
|
||||
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
// Load the feedback cell and vector from the closure.
|
||||
Register feedback_cell = temps.Acquire();
|
||||
Register feedback_vector = temps.Acquire();
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
|
|
@ -991,22 +992,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
// the frame, so load it into a register.
|
||||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
__ Push(argc, bytecode_array);
|
||||
__ Push(argc, bytecode_array, feedback_cell, feedback_vector);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register invocation_count = temps.Acquire();
|
||||
__ AssertFeedbackVector(feedback_vector, invocation_count);
|
||||
}
|
||||
// Our stack is currently aligned. We have have to push something along with
|
||||
// the feedback vector to keep it that way -- we may as well start
|
||||
// initialising the register frame.
|
||||
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
|
||||
// `undefined` in the accumulator register, to skip the load in the baseline
|
||||
// code.
|
||||
__ Push(feedback_vector);
|
||||
}
|
||||
|
||||
Label call_stack_guard;
|
||||
|
|
@ -1057,7 +1049,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
__ Pop(kJavaScriptCallNewTargetRegister);
|
||||
}
|
||||
__ Ret();
|
||||
temps.Exclude({s1, s2});
|
||||
temps.Exclude({s1, s2, s3});
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
@ -1065,9 +1057,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
|
||||
// check. Undo all its frame creation and call into the interpreter instead.
|
||||
|
||||
// Drop bytecode offset (was the feedback vector but got replaced during
|
||||
// deopt) and bytecode array.
|
||||
__ Drop(2);
|
||||
// Drop the feedback vector, the bytecode offset (was the feedback vector
|
||||
// but got replaced during deopt) and bytecode array.
|
||||
__ Drop(3);
|
||||
|
||||
// Context, closure, argc.
|
||||
__ Pop(kContextRegister, kJavaScriptCallTargetRegister,
|
||||
|
|
@ -1119,22 +1111,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ JumpIfObjectType(&compile_lazy, ne, kInterpreterBytecodeArrayRegister,
|
||||
BYTECODE_ARRAY_TYPE, kScratchReg);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = a2;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedField(a4,
|
||||
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
||||
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
Register feedback_vector = a2;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, a4, &push_stack_frame);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count.
|
||||
|
||||
// Check the tiering state.
|
||||
Label flags_need_processing;
|
||||
|
|
@ -1143,13 +1126,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&flags_need_processing);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
|
||||
}
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a4);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ Ld_w(a4, FieldMemOperand(feedback_vector,
|
||||
|
|
@ -1161,13 +1138,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done below).
|
||||
__ bind(&push_stack_frame);
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
// - kInvocationCountOffset: only used for tiering heuristics and code
|
||||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ PushStandardFrame(closure);
|
||||
|
||||
|
|
@ -1175,9 +1153,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ li(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
// Push bytecode array and Smi tagged bytecode array offset.
|
||||
// Push bytecode array, Smi tagged bytecode array offset and the feedback
|
||||
// vector.
|
||||
__ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, a4);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, a4, feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
|
|
@ -3795,13 +3774,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, t2);
|
||||
}
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = a2;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
// Load the feedback cell and vector.
|
||||
Register feedback_cell = a2;
|
||||
Register feedback_vector = t8;
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
|
|
@ -3812,9 +3792,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
// Save BytecodeOffset from the stack frame.
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
// Replace bytecode offset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ St_d(feedback_cell,
|
||||
MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp));
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ St_d(feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
|
|
|
|||
82
deps/v8/src/builtins/mips64/builtins-mips64.cc
vendored
82
deps/v8/src/builtins/mips64/builtins-mips64.cc
vendored
|
|
@ -902,17 +902,18 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
|
|||
// static
|
||||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
temps.Include({s1, s2});
|
||||
temps.Include({s1, s2, s3});
|
||||
auto descriptor =
|
||||
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_cell = temps.Acquire();
|
||||
Register feedback_vector = temps.Acquire();
|
||||
__ Ld(feedback_vector,
|
||||
__ Ld(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ Ld(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
|
|
@ -970,22 +971,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
// the frame, so load it into a register.
|
||||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
__ Push(argc, bytecode_array);
|
||||
__ Push(argc, bytecode_array, feedback_cell, feedback_vector);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register invocation_count = temps.Acquire();
|
||||
__ AssertFeedbackVector(feedback_vector, invocation_count);
|
||||
}
|
||||
// Our stack is currently aligned. We have have to push something along with
|
||||
// the feedback vector to keep it that way -- we may as well start
|
||||
// initialising the register frame.
|
||||
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
|
||||
// `undefined` in the accumulator register, to skip the load in the baseline
|
||||
// code.
|
||||
__ Push(feedback_vector);
|
||||
}
|
||||
|
||||
Label call_stack_guard;
|
||||
|
|
@ -1036,7 +1028,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
__ Pop(kJavaScriptCallNewTargetRegister);
|
||||
}
|
||||
__ Ret();
|
||||
temps.Exclude({kScratchReg, kScratchReg2});
|
||||
temps.Exclude({s1, s2, s3});
|
||||
}
|
||||
|
||||
// static
|
||||
|
|
@ -1044,9 +1036,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
|
||||
// check. Undo all its frame creation and call into the interpreter instead.
|
||||
|
||||
// Drop bytecode offset (was the feedback vector but got replaced during
|
||||
// deopt) and bytecode array.
|
||||
__ Drop(2);
|
||||
// Drop the feedback vector, the bytecode offset (was the feedback vector
|
||||
// but got replaced during deopt) and bytecode array.
|
||||
__ Drop(3);
|
||||
|
||||
// Context, closure, argc.
|
||||
__ Pop(kContextRegister, kJavaScriptCallTargetRegister,
|
||||
|
|
@ -1095,20 +1087,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
|
||||
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = a2;
|
||||
__ Ld(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ Ld(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
||||
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
Register feedback_vector = a2;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, a4, &push_stack_frame);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count.
|
||||
|
||||
// Check the tiering state.
|
||||
Label flags_need_processing;
|
||||
|
|
@ -1117,13 +1102,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&flags_need_processing);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
|
||||
}
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a4);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ Lw(a4, FieldMemOperand(feedback_vector,
|
||||
|
|
@ -1135,13 +1114,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done below).
|
||||
__ bind(&push_stack_frame);
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
// - kInvocationCountOffset: only used for tiering heuristics and code
|
||||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ PushStandardFrame(closure);
|
||||
|
||||
|
|
@ -1149,9 +1129,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ li(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
// Push bytecode array and Smi tagged bytecode array offset.
|
||||
// Push bytecode array, Smi tagged bytecode array offset, and the feedback
|
||||
// vector.
|
||||
__ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, a4);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, a4, feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
|
|
@ -3822,12 +3803,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, t2);
|
||||
}
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = a2;
|
||||
__ Ld(feedback_vector,
|
||||
// Load the feedback cell and vector.
|
||||
Register feedback_cell = a2;
|
||||
Register feedback_vector = t8;
|
||||
__ Ld(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ Ld(feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
|
|
@ -3838,9 +3820,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
// Save BytecodeOffset from the stack frame.
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
// Replace BytecodeOffset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ Sd(feedback_cell,
|
||||
MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp));
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ Sd(feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
|
|
|
|||
84
deps/v8/src/builtins/ppc/builtins-ppc.cc
vendored
84
deps/v8/src/builtins/ppc/builtins-ppc.cc
vendored
|
|
@ -173,14 +173,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, r6);
|
||||
}
|
||||
|
||||
// Load the feedback vector.
|
||||
Register feedback_vector = r5;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
// Load the feedback cell and vector.
|
||||
Register feedback_cell = r5;
|
||||
Register feedback_vector = ip;
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset), r0);
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset),
|
||||
r0);
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
|
|
@ -192,9 +193,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
__ LoadU64(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
// Replace bytecode offset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ StoreU64(feedback_cell,
|
||||
MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp));
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ StoreU64(feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
|
|
@ -1201,14 +1210,15 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
// Load the feedback cell and vector from the closure.
|
||||
Register feedback_cell = r7;
|
||||
Register feedback_vector = ip;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset), r0);
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset),
|
||||
r0);
|
||||
__ AssertFeedbackVector(feedback_vector, r11);
|
||||
|
||||
// Check for an tiering state.
|
||||
|
|
@ -1260,14 +1270,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
|
||||
__ Push(argc, bytecodeArray);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
if (v8_flags.debug_code) {
|
||||
Register scratch = r11;
|
||||
__ CompareObjectType(feedback_vector, scratch, scratch,
|
||||
FEEDBACK_VECTOR_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
__ Push(feedback_cell);
|
||||
__ Push(feedback_vector);
|
||||
}
|
||||
|
||||
|
|
@ -1331,9 +1340,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
|
||||
// check. Undo all its frame creation and call into the interpreter instead.
|
||||
|
||||
// Drop bytecode offset (was the feedback vector but got replaced during
|
||||
// deopt) and bytecode array.
|
||||
__ Drop(2);
|
||||
// Drop the feedback vector, the bytecode offset (was the feedback vector but
|
||||
// got replaced during deopt) and bytecode array.
|
||||
__ Drop(3);
|
||||
|
||||
// Context, closure, argc.
|
||||
__ Pop(kContextRegister, kJavaScriptCallTargetRegister,
|
||||
|
|
@ -1387,24 +1396,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
BYTECODE_ARRAY_TYPE);
|
||||
__ bne(&compile_lazy);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = r5;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||
r0);
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset), r0);
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedField(
|
||||
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
||||
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
|
||||
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
__ bne(&push_stack_frame);
|
||||
Register feedback_vector = r5;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, r7, &push_stack_frame);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count.
|
||||
|
||||
Register flags = r7;
|
||||
Label flags_need_processing;
|
||||
|
|
@ -1412,13 +1410,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&flags_need_processing);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, ip, r0);
|
||||
}
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ LoadU32(
|
||||
|
|
@ -1435,7 +1427,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done below).
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
|
|
@ -1443,6 +1434,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ PushStandardFrame(closure);
|
||||
|
||||
|
|
@ -1452,7 +1444,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
|
||||
// Push bytecode array and Smi tagged bytecode array offset.
|
||||
__ SmiTag(r7, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, r7);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, r7, feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
|
|
@ -3616,9 +3608,9 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
// from the API function here.
|
||||
MemOperand stack_space_operand =
|
||||
ExitFrameStackSlotOperand(FCA::kLengthOffset + kSlotsToDropOnStackSize);
|
||||
__ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ +
|
||||
exit_frame_params_count) *
|
||||
kSystemPointerSize));
|
||||
__ mov(scratch,
|
||||
Operand((FCA::kArgsLengthWithReceiver + exit_frame_params_count) *
|
||||
kSystemPointerSize));
|
||||
__ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2));
|
||||
__ add(scratch, scratch, ip);
|
||||
__ StoreU64(scratch, stack_space_operand);
|
||||
|
|
|
|||
2
deps/v8/src/builtins/regexp-replace.tq
vendored
2
deps/v8/src/builtins/regexp-replace.tq
vendored
|
|
@ -253,7 +253,7 @@ transitioning javascript builtin RegExpPrototypeReplace(
|
|||
// If Type(rx) is not Object, throw a TypeError exception.
|
||||
const rx = Cast<JSReceiver>(receiver)
|
||||
otherwise ThrowTypeError(
|
||||
MessageTemplate::kIncompatibleMethodReceiver, methodName);
|
||||
MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver);
|
||||
|
||||
// Let S be ? ToString(string).
|
||||
const s = ToString_Inline(string);
|
||||
|
|
|
|||
2
deps/v8/src/builtins/regexp-test.tq
vendored
2
deps/v8/src/builtins/regexp-test.tq
vendored
|
|
@ -14,7 +14,7 @@ transitioning javascript builtin RegExpPrototypeTest(
|
|||
const methodName: constexpr string = 'RegExp.prototype.test';
|
||||
const receiver = Cast<JSReceiver>(receiver)
|
||||
otherwise ThrowTypeError(
|
||||
MessageTemplate::kIncompatibleMethodReceiver, methodName);
|
||||
MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver);
|
||||
const str: String = ToString_Inline(string);
|
||||
if (IsFastRegExpPermissive(receiver)) {
|
||||
RegExpPrototypeExecBodyWithoutResultFast(
|
||||
|
|
|
|||
81
deps/v8/src/builtins/riscv/builtins-riscv.cc
vendored
81
deps/v8/src/builtins/riscv/builtins-riscv.cc
vendored
|
|
@ -959,7 +959,7 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
|
||||
// Drop bytecode offset (was the feedback vector but got replaced during
|
||||
// deopt) and bytecode array.
|
||||
__ AddWord(sp, sp, Operand(2 * kSystemPointerSize));
|
||||
__ AddWord(sp, sp, Operand(3 * kSystemPointerSize));
|
||||
|
||||
// Context, closure, argc.
|
||||
__ Pop(kContextRegister, kJavaScriptCallTargetRegister,
|
||||
|
|
@ -974,18 +974,19 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
|
||||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
temps.Include({kScratchReg, kScratchReg2});
|
||||
temps.Include({kScratchReg, kScratchReg2, s1});
|
||||
auto descriptor =
|
||||
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
// Load the feedback cell and vector from the closure.
|
||||
Register feedback_cell = temps.Acquire();
|
||||
Register feedback_vector = temps.Acquire();
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
{
|
||||
UseScratchRegisterScope temp(masm);
|
||||
Register type = temps.Acquire();
|
||||
|
|
@ -1039,7 +1040,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
// the frame, so load it into a register.
|
||||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
__ Push(argc, bytecode_array);
|
||||
__ Push(argc, bytecode_array, feedback_cell, feedback_vector);
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
{
|
||||
|
|
@ -1047,13 +1048,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
Register type = temps.Acquire();
|
||||
__ AssertFeedbackVector(feedback_vector, type);
|
||||
}
|
||||
// Our stack is currently aligned. We have have to push something along with
|
||||
// the feedback vector to keep it that way -- we may as well start
|
||||
// initialising the register frame.
|
||||
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
|
||||
// `undefined` in the accumulator register, to skip the load in the baseline
|
||||
// code.
|
||||
__ Push(feedback_vector);
|
||||
}
|
||||
|
||||
Label call_stack_guard;
|
||||
|
|
@ -1101,7 +1095,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
__ Pop(kJavaScriptCallNewTargetRegister);
|
||||
}
|
||||
__ Ret();
|
||||
temps.Exclude({kScratchReg, kScratchReg2});
|
||||
temps.Exclude({kScratchReg, kScratchReg2, s1});
|
||||
}
|
||||
|
||||
// Generate code for entering a JS function with the interpreter.
|
||||
|
|
@ -1141,23 +1135,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
|
||||
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
Register feedback_vector = a2;
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedField(a4,
|
||||
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
||||
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
|
||||
Label::Distance::kNear);
|
||||
Register feedback_vector = a2;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, a4, &push_stack_frame);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count.
|
||||
|
||||
// Check the tiering state.
|
||||
Label flags_need_processing;
|
||||
|
|
@ -1165,12 +1149,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
|
||||
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&flags_need_processing);
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
|
||||
}
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a4);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ Lw(a4, FieldMemOperand(feedback_vector,
|
||||
|
|
@ -1182,13 +1161,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done below).
|
||||
__ bind(&push_stack_frame);
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
// - kInvocationCountOffset: only used for tiering heuristics and code
|
||||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ PushStandardFrame(closure);
|
||||
|
||||
|
|
@ -1196,9 +1176,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ li(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
// Push bytecode array and Smi tagged bytecode array offset.
|
||||
// Push bytecode array, Smi tagged bytecode array offset, and the feedback
|
||||
// vector.
|
||||
__ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, a4);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, a4, feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
|
|
@ -3913,13 +3894,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, scratch);
|
||||
}
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = a2;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
// Load the feedback cell and vector.
|
||||
Register feedback_cell = a2;
|
||||
Register feedback_vector = t4;
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
|
|
@ -3930,10 +3912,18 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
// Save BytecodeOffset from the stack frame.
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
// Replace bytecode offset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ StoreWord(feedback_cell,
|
||||
MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp));
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ StoreWord(
|
||||
feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
|
|
@ -3975,6 +3965,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
__ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
|
||||
__ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ PrepareCallCFunction(3, 0, a4);
|
||||
__ CallCFunction(get_baseline_pc, 3, 0);
|
||||
}
|
||||
__ LoadCodeInstructionStart(code_obj, code_obj);
|
||||
|
|
|
|||
79
deps/v8/src/builtins/s390/builtins-s390.cc
vendored
79
deps/v8/src/builtins/s390/builtins-s390.cc
vendored
|
|
@ -171,13 +171,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, r5);
|
||||
}
|
||||
|
||||
// Load the feedback vector.
|
||||
Register feedback_vector = r4;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
// Load the feedback cell and vector.
|
||||
Register feedback_cell = r4;
|
||||
Register feedback_vector = r1;
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
|
|
@ -189,9 +190,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
__ LoadU64(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
// Replace bytecode offset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ StoreU64(feedback_cell,
|
||||
MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp));
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ StoreU64(feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
|
|
@ -1239,13 +1248,14 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
// Load the feedback cell and vector from the closure.
|
||||
Register feedback_cell = r6;
|
||||
Register feedback_vector = ip;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
__ AssertFeedbackVector(feedback_vector, r1);
|
||||
|
||||
// Check for an tiering state.
|
||||
|
|
@ -1298,14 +1308,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
|
||||
__ Push(argc, bytecodeArray);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
if (v8_flags.debug_code) {
|
||||
Register scratch = r1;
|
||||
__ CompareObjectType(feedback_vector, scratch, scratch,
|
||||
FEEDBACK_VECTOR_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
__ Push(feedback_cell);
|
||||
__ Push(feedback_vector);
|
||||
}
|
||||
|
||||
|
|
@ -1364,9 +1373,9 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
|
||||
// check. Undo all its frame creation and call into the interpreter instead.
|
||||
|
||||
// Drop bytecode offset (was the feedback vector but got replaced during
|
||||
// deopt) and bytecode array.
|
||||
__ Drop(2);
|
||||
// Drop the feedback vector, the bytecode offset (was the feedback vector but
|
||||
// got replaced during deopt) and bytecode array.
|
||||
__ Drop(3);
|
||||
|
||||
// Context, closure, argc.
|
||||
__ Pop(kContextRegister, kJavaScriptCallTargetRegister,
|
||||
|
|
@ -1420,23 +1429,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
BYTECODE_ARRAY_TYPE);
|
||||
__ bne(&compile_lazy);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = r4;
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedField(r6,
|
||||
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
|
||||
__ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
|
||||
__ bne(&push_stack_frame);
|
||||
Register feedback_vector = r4;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, r6, &push_stack_frame);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count.
|
||||
|
||||
Register flags = r6;
|
||||
Label flags_need_processing;
|
||||
|
|
@ -1444,13 +1443,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&flags_need_processing);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r1);
|
||||
}
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ LoadS32(r1, FieldMemOperand(feedback_vector,
|
||||
|
|
@ -1462,13 +1455,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done below).
|
||||
__ bind(&push_stack_frame);
|
||||
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
// - kInvocationCountOffset: only used for tiering heuristics and code
|
||||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ PushStandardFrame(closure);
|
||||
|
||||
|
|
@ -1477,8 +1472,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
// Push bytecode array and Smi tagged bytecode array offset.
|
||||
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, r4);
|
||||
__ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(kInterpreterBytecodeArrayRegister, r0, feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
|
|
@ -3590,9 +3585,9 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
// from the API function here.
|
||||
MemOperand stack_space_operand =
|
||||
ExitFrameStackSlotOperand(FCA::kLengthOffset + kSlotsToDropOnStackSize);
|
||||
__ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ +
|
||||
exit_frame_params_count) *
|
||||
kSystemPointerSize));
|
||||
__ mov(scratch,
|
||||
Operand((FCA::kArgsLengthWithReceiver + exit_frame_params_count) *
|
||||
kSystemPointerSize));
|
||||
__ ShiftLeftU64(r1, argc, Operand(kSystemPointerSizeLog2));
|
||||
__ AddS64(scratch, r1);
|
||||
__ StoreU64(scratch, stack_space_operand);
|
||||
|
|
|
|||
10
deps/v8/src/builtins/setup-builtins-internal.cc
vendored
10
deps/v8/src/builtins/setup-builtins-internal.cc
vendored
|
|
@ -242,10 +242,14 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
|
|||
++builtin) {
|
||||
Tagged<Code> code = builtins->code(builtin);
|
||||
Tagged<InstructionStream> istream = code->instruction_stream();
|
||||
CodePageMemoryModificationScope code_modification_scope(istream);
|
||||
WritableJitAllocation jit_allocation = ThreadIsolation::LookupJitAllocation(
|
||||
istream.address(), istream->Size(),
|
||||
ThreadIsolation::JitAllocationType::kInstructionStream);
|
||||
bool flush_icache = false;
|
||||
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
|
||||
RelocInfo* rinfo = it.rinfo();
|
||||
for (WritableRelocIterator it(jit_allocation, istream,
|
||||
code->constant_pool(), kRelocMask);
|
||||
!it.done(); it.next()) {
|
||||
WritableRelocInfo* rinfo = it.rinfo();
|
||||
if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) {
|
||||
Tagged<Code> target_code =
|
||||
Code::FromTargetAddress(rinfo->target_address());
|
||||
|
|
|
|||
|
|
@ -5,8 +5,7 @@
|
|||
#include 'src/builtins/builtins-constructor-gen.h'
|
||||
|
||||
namespace typed_array {
|
||||
extern builtin IterableToListMayPreserveHoles(Context, Object, Callable):
|
||||
JSArray;
|
||||
extern builtin IterableToListConvertHoles(Context, Object, Callable): JSArray;
|
||||
|
||||
extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
|
||||
implicit context: Context)(): JSArrayBuffer;
|
||||
|
|
@ -201,7 +200,7 @@ transitioning macro ConstructByIterable(
|
|||
iteratorFn: Callable): never
|
||||
labels IfConstructByArrayLike(JSArray, uintptr) {
|
||||
const array: JSArray =
|
||||
IterableToListMayPreserveHoles(context, iterable, iteratorFn);
|
||||
IterableToListConvertHoles(context, iterable, iteratorFn);
|
||||
// Max JSArray length is a valid JSTypedArray length so we just use it.
|
||||
goto IfConstructByArrayLike(array, array.length_uintptr);
|
||||
}
|
||||
|
|
@ -298,8 +297,7 @@ transitioning macro ConstructByArrayBuffer(
|
|||
if (bufferByteLength < offset) goto IfInvalidOffset;
|
||||
|
||||
newByteLength = bufferByteLength - offset;
|
||||
newLength = elementsInfo.CalculateLength(newByteLength)
|
||||
otherwise IfInvalidLength;
|
||||
newLength = elementsInfo.CalculateLength(newByteLength);
|
||||
} else {
|
||||
// b. Else,
|
||||
// i. Let newByteLength be newLength × elementSize.
|
||||
|
|
|
|||
2
deps/v8/src/builtins/typed-array-subarray.tq
vendored
2
deps/v8/src/builtins/typed-array-subarray.tq
vendored
|
|
@ -13,7 +13,7 @@ transitioning javascript builtin TypedArrayPrototypeSubArray(
|
|||
// 2. Perform ? RequireInternalSlot(O, [[TypedArrayName]]).
|
||||
const source = Cast<JSTypedArray>(receiver)
|
||||
otherwise ThrowTypeError(
|
||||
MessageTemplate::kIncompatibleMethodReceiver, methodName);
|
||||
MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver);
|
||||
|
||||
// 3. Assert: O has a [[ViewedArrayBuffer]] internal slot.
|
||||
// 4. Let buffer be O.[[ViewedArrayBuffer]].
|
||||
|
|
|
|||
7
deps/v8/src/builtins/typed-array.tq
vendored
7
deps/v8/src/builtins/typed-array.tq
vendored
|
|
@ -25,7 +25,6 @@ type RabGsabUint8Elements extends ElementsKind;
|
|||
struct TypedArrayElementsInfo {
|
||||
// Calculates the number of bytes required for specified number of elements.
|
||||
macro CalculateByteLength(length: uintptr): uintptr labels IfInvalid {
|
||||
if (length > kTypedArrayMaxLength) goto IfInvalid;
|
||||
const maxArrayLength = kArrayBufferMaxByteLength >>> this.sizeLog2;
|
||||
if (length > maxArrayLength) goto IfInvalid;
|
||||
const byteLength = length << this.sizeLog2;
|
||||
|
|
@ -34,10 +33,8 @@ struct TypedArrayElementsInfo {
|
|||
|
||||
// Calculates the maximum number of elements supported by a specified number
|
||||
// of bytes.
|
||||
macro CalculateLength(byteLength: uintptr): uintptr labels IfInvalid {
|
||||
const length = byteLength >>> this.sizeLog2;
|
||||
if (length > kTypedArrayMaxLength) goto IfInvalid;
|
||||
return length;
|
||||
macro CalculateLength(byteLength: uintptr): uintptr {
|
||||
return byteLength >>> this.sizeLog2;
|
||||
}
|
||||
|
||||
// Determines if `bytes` (byte offset or length) cannot be evenly divided by
|
||||
|
|
|
|||
12
deps/v8/src/builtins/wasm-to-js.tq
vendored
12
deps/v8/src/builtins/wasm-to-js.tq
vendored
|
|
@ -62,7 +62,10 @@ transitioning macro WasmToJSWrapper(ref: WasmApiFunctionRef): WasmToJSResult {
|
|||
const paramTypes = Subslice(serializedSig, returnCount + 1, paramCount)
|
||||
otherwise unreachable;
|
||||
|
||||
const outParams = WasmAllocateFixedArray(paramCount + 1);
|
||||
// The number of parameters that get pushed on the stack is (at least) the
|
||||
// number of incoming parameters plus the receiver.
|
||||
const numStackParams = paramCount + 1;
|
||||
const outParams = WasmAllocateZeroedFixedArray(numStackParams);
|
||||
let nextIndex: intptr = 0;
|
||||
// Set the receiver to `Undefined` as the default. If the receiver would be
|
||||
// different, e.g. the global proxy for sloppy functions, then the CallVarargs
|
||||
|
|
@ -134,18 +137,17 @@ transitioning macro WasmToJSWrapper(ref: WasmApiFunctionRef): WasmToJSResult {
|
|||
|
||||
const context = ref.native_context;
|
||||
// Reset the signature on the stack, so that incoming parameters don't get
|
||||
// scanned anymore. This performance optimization is possible because the
|
||||
// incoming parameters are not accessed anymore.
|
||||
// scanned anymore.
|
||||
*GetRefAt<intptr>(sigSlot, 0) = 0;
|
||||
|
||||
const result = CallVarargs(
|
||||
context, target, 0, Convert<int32>(paramCount) + 1, outParams);
|
||||
context, target, 0, Convert<int32>(numStackParams), outParams);
|
||||
|
||||
// Put a marker on the stack to indicate to the frame iterator that the call
|
||||
// to JavaScript is finished. For asm.js source positions it is important to
|
||||
// know if an exception happened in the call to JS, or in the ToNumber
|
||||
// conversion afterwards.
|
||||
*GetRefAt<intptr>(sigSlot, 0) = BitcastTaggedToWord(SmiConstant(2));
|
||||
*GetRefAt<intptr>(sigSlot, 0) = BitcastTaggedToWord(SmiConstant(-1));
|
||||
let resultFixedArray: FixedArray;
|
||||
if (returnCount > 1) {
|
||||
resultFixedArray =
|
||||
|
|
|
|||
30
deps/v8/src/builtins/wasm.tq
vendored
30
deps/v8/src/builtins/wasm.tq
vendored
|
|
@ -23,7 +23,9 @@ extern runtime WasmFunctionTableSet(
|
|||
Context, WasmInstanceObject, Smi, Smi, Object): JSAny;
|
||||
extern runtime ThrowRangeError(Context, Smi): never;
|
||||
extern runtime ThrowWasmError(Context, Smi): never;
|
||||
extern runtime WasmThrowRangeError(Context, Smi): never;
|
||||
extern runtime WasmThrowTypeError(Context, Smi, JSAny): never;
|
||||
extern runtime WasmThrowTypeErrorTwoArgs(Context, Smi, JSAny, JSAny): never;
|
||||
extern runtime WasmThrow(Context, Object, FixedArray): JSAny;
|
||||
extern runtime WasmReThrow(Context, Object): JSAny;
|
||||
extern runtime WasmTriggerTierUp(Context, WasmInstanceObject): JSAny;
|
||||
|
|
@ -331,6 +333,14 @@ builtin WasmInternalFunctionCreateExternal(
|
|||
return runtime::WasmInternalFunctionCreateExternal(context, func);
|
||||
}
|
||||
|
||||
builtin WasmAllocateZeroedFixedArray(size: intptr): FixedArray {
|
||||
if (size == 0) return kEmptyFixedArray;
|
||||
const result = UnsafeCast<FixedArray>(AllocateFixedArray(
|
||||
ElementsKind::PACKED_ELEMENTS, size, AllocationFlag::kNone));
|
||||
FillEntireFixedArrayWithSmiZero(ElementsKind::PACKED_ELEMENTS, result, size);
|
||||
return result;
|
||||
}
|
||||
|
||||
builtin WasmAllocateFixedArray(size: intptr): FixedArray {
|
||||
if (size == 0) return kEmptyFixedArray;
|
||||
return UnsafeCast<FixedArray>(AllocateFixedArray(
|
||||
|
|
@ -1074,6 +1084,26 @@ builtin ThrowIndexOfCalledOnNull(): JSAny {
|
|||
runtime::WasmThrowTypeError(context, SmiConstant(error), name);
|
||||
}
|
||||
|
||||
builtin ThrowDataViewGetInt32DetachedError(): JSAny {
|
||||
const context = LoadContextFromFrame();
|
||||
const error = MessageTemplate::kDetachedOperation;
|
||||
const name = StringConstant('DataView.prototype.getInt32');
|
||||
runtime::WasmThrowTypeError(context, SmiConstant(error), name);
|
||||
}
|
||||
|
||||
builtin ThrowDataViewGetInt32OutOfBounds(): JSAny {
|
||||
const context = LoadContextFromFrame();
|
||||
const error = MessageTemplate::kInvalidDataViewAccessorOffset;
|
||||
runtime::WasmThrowRangeError(context, SmiConstant(error));
|
||||
}
|
||||
|
||||
builtin ThrowDataViewGetInt32TypeError(value: JSAny): JSAny {
|
||||
const context = LoadContextFromFrame();
|
||||
const error = MessageTemplate::kIncompatibleMethodReceiver;
|
||||
const name = StringConstant('DataView.prototype.getInt32');
|
||||
runtime::WasmThrowTypeErrorTwoArgs(context, SmiConstant(error), name, value);
|
||||
}
|
||||
|
||||
builtin WasmStringConcat(a: String, b: String): String {
|
||||
const context = LoadContextFromFrame();
|
||||
tail StringAdd_CheckNone(a, b);
|
||||
|
|
|
|||
77
deps/v8/src/builtins/x64/builtins-x64.cc
vendored
77
deps/v8/src/builtins/x64/builtins-x64.cc
vendored
|
|
@ -1029,22 +1029,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
kScratchRegister);
|
||||
__ j(not_equal, &compile_lazy);
|
||||
|
||||
#ifndef V8_JITLESS
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = rbx;
|
||||
TaggedRegister feedback_cell(feedback_vector);
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
FieldOperand(feedback_cell, FeedbackCell::kValueOffset));
|
||||
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ IsObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, rcx);
|
||||
__ j(not_equal, &push_stack_frame);
|
||||
Register feedback_vector = rbx;
|
||||
__ LoadFeedbackVector(feedback_vector, closure, &push_stack_frame,
|
||||
Label::kNear);
|
||||
|
||||
// Check the tiering state.
|
||||
#ifndef V8_JITLESS
|
||||
// If feedback vector is valid, check for optimized code and update invocation
|
||||
// count.
|
||||
Label flags_need_processing;
|
||||
__ CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing(
|
||||
feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing);
|
||||
|
|
@ -1058,13 +1050,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done below).
|
||||
__ bind(&push_stack_frame);
|
||||
#else
|
||||
// Note: By omitting the above code in jitless mode we also disable:
|
||||
// - kFlagsLogNextExecution: only used for logging/profiling; and
|
||||
// - kInvocationCountOffset: only used for tiering heuristics and code
|
||||
// coverage.
|
||||
#endif // !V8_JITLESS
|
||||
|
||||
__ bind(&push_stack_frame);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ pushq(rbp); // Caller's frame pointer.
|
||||
__ movq(rbp, rsp);
|
||||
|
|
@ -1081,6 +1074,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||
__ SmiTag(rcx, kInterpreterBytecodeOffsetRegister);
|
||||
__ Push(rcx);
|
||||
|
||||
// Push feedback vector.
|
||||
__ Push(feedback_vector);
|
||||
|
||||
// Allocate the local and temporary register file on the stack.
|
||||
Label stack_overflow;
|
||||
{
|
||||
|
|
@ -1710,7 +1706,8 @@ void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
|
|||
|
||||
// static
|
||||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
Register feedback_vector = r8;
|
||||
Register feedback_cell = r8;
|
||||
Register feedback_vector = r11;
|
||||
Register return_address = r15;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
|
@ -1723,8 +1720,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
TaggedRegister feedback_cell(feedback_vector);
|
||||
// Load the feedback cell and vector from the closure.
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
|
|
@ -1769,9 +1765,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
__ Push(bytecode_array);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
__ Push(feedback_cell);
|
||||
__ Push(feedback_vector);
|
||||
}
|
||||
|
||||
|
|
@ -1845,6 +1839,8 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
|
|||
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
|
||||
// check. Undo all its frame creation and call into the interpreter instead.
|
||||
|
||||
// Drop feedback vector.
|
||||
__ Pop(kScratchRegister);
|
||||
// Drop bytecode offset (was the feedback vector but got replaced during
|
||||
// deopt).
|
||||
__ Pop(kScratchRegister);
|
||||
|
|
@ -2416,7 +2412,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||
Label convert_to_object, convert_receiver;
|
||||
__ movq(rcx, args.GetReceiverOperand());
|
||||
__ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
|
||||
__ JumpIfJSAnyIsNotPrimitive(rcx, rbx, &done_convert, Label::kNear);
|
||||
__ JumpIfJSAnyIsNotPrimitive(rcx, rbx, &done_convert,
|
||||
DEBUG_BOOL ? Label::kFar : Label::kNear);
|
||||
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
|
||||
Label convert_global_proxy;
|
||||
__ JumpIfRoot(rcx, RootIndex::kUndefinedValue, &convert_global_proxy,
|
||||
|
|
@ -4336,7 +4333,7 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
// FunctionCallbackInfo::values_ (points at the first varargs argument
|
||||
// passed on the stack).
|
||||
__ leaq(holder,
|
||||
Operand(holder, (FCA::kArgsLength + 1) * kSystemPointerSize));
|
||||
Operand(holder, FCA::kArgsLengthWithReceiver * kSystemPointerSize));
|
||||
__ movq(ExitFrameStackSlotOperand(FCA::kValuesOffset), holder);
|
||||
|
||||
// FunctionCallbackInfo::length_.
|
||||
|
|
@ -4350,9 +4347,8 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
(kApiStackSpace - 1) * kSystemPointerSize);
|
||||
__ leaq(kScratchRegister,
|
||||
Operand(argc, times_system_pointer_size,
|
||||
(FCA::kArgsLength + exit_frame_params_count) *
|
||||
kSystemPointerSize +
|
||||
kReceiverOnStackSize));
|
||||
(FCA::kArgsLengthWithReceiver + exit_frame_params_count) *
|
||||
kSystemPointerSize));
|
||||
__ movq(ExitFrameStackSlotOperand(kBytesToDropOffset), kScratchRegister);
|
||||
|
||||
__ RecordComment("v8::FunctionCallback's argument.");
|
||||
|
|
@ -4374,9 +4370,10 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
|
|||
|
||||
const bool with_profiling =
|
||||
mode != CallApiCallbackMode::kOptimizedNoProfiling;
|
||||
Label* no_done = nullptr;
|
||||
CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
|
||||
thunk_ref, thunk_arg, kUseExitFrameStackSlotOperand,
|
||||
&stack_space_operand, return_value_operand);
|
||||
&stack_space_operand, return_value_operand, no_done);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
|
|
@ -4491,9 +4488,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||
Operand* const kUseStackSpaceConstant = nullptr;
|
||||
|
||||
const bool with_profiling = true;
|
||||
CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
|
||||
thunk_ref, thunk_arg, kStackUnwindSpace,
|
||||
kUseStackSpaceConstant, return_value_operand);
|
||||
Label* no_done = nullptr;
|
||||
CallApiFunctionAndReturn(
|
||||
masm, with_profiling, api_function_address, thunk_ref, thunk_arg,
|
||||
kStackUnwindSpace, kUseStackSpaceConstant, return_value_operand, no_done);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
||||
|
|
@ -4759,10 +4757,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
AssertCodeIsBaseline(masm, code_obj, r11);
|
||||
}
|
||||
|
||||
// Load the feedback vector.
|
||||
// Load the feedback cell and feedback vector.
|
||||
Register feedback_cell = r8;
|
||||
Register feedback_vector = r11;
|
||||
|
||||
TaggedRegister feedback_cell(feedback_vector);
|
||||
__ LoadTaggedField(feedback_cell,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedField(feedback_vector,
|
||||
|
|
@ -4774,12 +4771,20 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||
__ IsObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
|
||||
__ j(not_equal, &install_baseline_code);
|
||||
|
||||
// Save BytecodeOffset from the stack frame.
|
||||
// Save bytecode offset from the stack frame.
|
||||
__ SmiUntagUnsigned(
|
||||
kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
__ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
|
||||
// Replace bytecode offset with feedback cell.
|
||||
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
|
||||
BaselineFrameConstants::kFeedbackCellFromFp);
|
||||
__ movq(MemOperand(rbp, BaselineFrameConstants::kFeedbackCellFromFp),
|
||||
feedback_cell);
|
||||
feedback_cell = no_reg;
|
||||
// Update feedback vector cache.
|
||||
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
|
||||
BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
__ movq(MemOperand(rbp, InterpreterFrameConstants::kFeedbackVectorFromFp),
|
||||
feedback_vector);
|
||||
feedback_vector = no_reg;
|
||||
|
||||
|
|
|
|||
21
deps/v8/src/codegen/arm/assembler-arm-inl.h
vendored
21
deps/v8/src/codegen/arm/assembler-arm-inl.h
vendored
|
|
@ -53,7 +53,7 @@ int DoubleRegister::SupportedRegisterCount() {
|
|||
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
|
||||
}
|
||||
|
||||
void RelocInfo::apply(intptr_t delta) {
|
||||
void WritableRelocInfo::apply(intptr_t delta) {
|
||||
if (RelocInfo::IsInternalReference(rmode_)) {
|
||||
// absolute code pointer inside code object moves with the code object.
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_);
|
||||
|
|
@ -94,7 +94,7 @@ int RelocInfo::target_address_size() { return kPointerSize; }
|
|||
Tagged<HeapObject> RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
|
||||
return HeapObject::cast(
|
||||
Object(Assembler::target_address_at(pc_, constant_pool_)));
|
||||
Tagged<Object>(Assembler::target_address_at(pc_, constant_pool_)));
|
||||
}
|
||||
|
||||
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
|
|
@ -106,8 +106,8 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
|
|||
return origin->relative_code_target_object_handle_at(pc_);
|
||||
}
|
||||
|
||||
void RelocInfo::set_target_object(Tagged<HeapObject> target,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
void WritableRelocInfo::set_target_object(Tagged<HeapObject> target,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
|
||||
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
|
||||
icache_flush_mode);
|
||||
|
|
@ -118,7 +118,7 @@ Address RelocInfo::target_external_reference() {
|
|||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
void RelocInfo::set_target_external_reference(
|
||||
void WritableRelocInfo::set_target_external_reference(
|
||||
Address target, ICacheFlushMode icache_flush_mode) {
|
||||
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
|
||||
Assembler::set_target_address_at(pc_, constant_pool_, target,
|
||||
|
|
@ -142,17 +142,6 @@ Address RelocInfo::target_off_heap_target() {
|
|||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
void RelocInfo::WipeOut() {
|
||||
DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
|
||||
IsExternalReference(rmode_) || IsInternalReference(rmode_) ||
|
||||
IsOffHeapTarget(rmode_));
|
||||
if (IsInternalReference(rmode_)) {
|
||||
Memory<Address>(pc_) = kNullAddress;
|
||||
} else {
|
||||
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
|
||||
}
|
||||
}
|
||||
|
||||
Handle<Code> Assembler::relative_code_target_object_handle_at(
|
||||
Address pc) const {
|
||||
Instruction* branch = Instruction::At(pc);
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user