mirror of
https://github.com/zebrajr/node.git
synced 2025-12-06 00:20:08 +01:00
deps: patch V8 to 14.2.231.14
Refs: https://github.com/v8/v8/compare/14.2.231.9...14.2.231.14 PR-URL: https://github.com/nodejs/node/pull/60413 Reviewed-By: Michaël Zasso <targos@protonmail.com> Reviewed-By: Marco Ippolito <marcoippolito54@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Antoine du Hamel <duhamelantoine1995@gmail.com>
This commit is contained in:
parent
1f6b681bf2
commit
e0ca993514
2
deps/v8/include/v8-version.h
vendored
2
deps/v8/include/v8-version.h
vendored
|
|
@ -11,7 +11,7 @@
|
|||
#define V8_MAJOR_VERSION 14
|
||||
#define V8_MINOR_VERSION 2
|
||||
#define V8_BUILD_NUMBER 231
|
||||
#define V8_PATCH_LEVEL 9
|
||||
#define V8_PATCH_LEVEL 14
|
||||
|
||||
// Use 1 for candidates and 0 otherwise.
|
||||
// (Boolean macro values are not supported by all preprocessors.)
|
||||
|
|
|
|||
45
deps/v8/src/builtins/loong64/builtins-loong64.cc
vendored
45
deps/v8/src/builtins/loong64/builtins-loong64.cc
vendored
|
|
@ -3185,12 +3185,9 @@ void ReloadParentStack(MacroAssembler* masm, Register return_reg,
|
|||
Register parent = tmp2;
|
||||
__ Ld_d(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
|
||||
// Update active stack.
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {return_reg, return_value, context, parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {return_reg, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false, tmp3);
|
||||
}
|
||||
|
||||
|
|
@ -3425,11 +3422,8 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
ResetWasmJspiFrameStackSlots(masm);
|
||||
|
||||
Label resume;
|
||||
DEFINE_REG(stack);
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(scratch);
|
||||
|
||||
// Update active stack.
|
||||
DEFINE_REG(parent);
|
||||
__ LoadProtectedPointerField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
|
|
@ -3437,10 +3431,9 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
__ LoadExternalPointerField(
|
||||
target_stack, FieldMemOperand(parent, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), stack, &resume,
|
||||
no_reg, {target_stack, suspender, parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_suspend_stack(), target_stack,
|
||||
&resume, no_reg, {target_stack, suspender, parent});
|
||||
__ StoreRootRelative(IsolateData::active_suspender_offset(), parent);
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
|
|
@ -3512,11 +3505,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
target_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), active_stack,
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_jspi_stack(), target_stack,
|
||||
&suspend, suspender, {target_stack});
|
||||
|
||||
regs.ResetExcept(target_stack);
|
||||
|
||||
// -------------------------------------------
|
||||
|
|
@ -3563,11 +3553,8 @@ void Builtins::Generate_WasmFXResume(MacroAssembler* masm) {
|
|||
__ EnterFrame(StackFrame::WASM_STACK_EXIT);
|
||||
Register target_stack = WasmFXResumeDescriptor::GetRegisterParameter(0);
|
||||
Label suspend;
|
||||
Register active_stack = a0;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, ExternalReference::wasm_resume_wasmfx_stack(),
|
||||
active_stack, &suspend, no_reg, {target_stack});
|
||||
target_stack, &suspend, no_reg, {target_stack});
|
||||
LoadJumpBuffer(masm, target_stack, true, a1);
|
||||
__ Trap();
|
||||
__ bind(&suspend);
|
||||
|
|
@ -3580,9 +3567,8 @@ void Builtins::Generate_WasmFXReturn(MacroAssembler* masm) {
|
|||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
Register parent = a1;
|
||||
__ Move(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), active_stack,
|
||||
nullptr, no_reg, {parent});
|
||||
SwitchStacks(masm, ExternalReference::wasm_return_stack(), parent, nullptr,
|
||||
no_reg, {parent});
|
||||
LoadJumpBuffer(masm, parent, true, a2);
|
||||
__ Trap();
|
||||
}
|
||||
|
|
@ -3599,14 +3585,15 @@ void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
Label* suspend) {
|
||||
ResetWasmJspiFrameStackSlots(masm);
|
||||
DEFINE_SCOPED(scratch)
|
||||
DEFINE_REG(parent_stack)
|
||||
__ LoadRootRelative(parent_stack, IsolateData::active_stack_offset());
|
||||
__ Ld_d(parent_stack, MemOperand(parent_stack, wasm::kStackParentOffset));
|
||||
DEFINE_REG(stack)
|
||||
__ LoadRootRelative(stack, IsolateData::active_suspender_offset());
|
||||
__ LoadExternalPointerField(
|
||||
stack, FieldMemOperand(stack, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), stack, suspend,
|
||||
no_reg, {wasm_instance, wrapper_buffer});
|
||||
|
||||
SwitchStacks(masm, ExternalReference::wasm_start_stack(), parent_stack,
|
||||
suspend, no_reg, {wasm_instance, wrapper_buffer});
|
||||
|
||||
FREE_REG(parent_stack);
|
||||
FREE_REG(stack);
|
||||
// Save the old stack's fp in t0, and use it to access the parameters in
|
||||
// the parent frame.
|
||||
regs.Pinned(t1, &original_fp);
|
||||
|
|
|
|||
|
|
@ -142,6 +142,58 @@ void MacroAssembler::PushStandardFrame(Register function_reg) {
|
|||
Add_d(fp, sp, Operand(offset));
|
||||
}
|
||||
|
||||
void MacroAssembler::PreCheckSkippedWriteBarrier(Register object,
|
||||
Register value,
|
||||
Register scratch, Label* ok) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(object, scratch));
|
||||
DCHECK(!AreAliased(value, scratch));
|
||||
|
||||
// The most common case: Static write barrier elimination is allowed on the
|
||||
// last young allocation.
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch1 = temps.Acquire();
|
||||
Sub_d(scratch, object, kHeapObjectTag);
|
||||
Ld_d(scratch1, MemOperand(kRootRegister,
|
||||
IsolateData::last_young_allocation_offset()));
|
||||
Branch(ok, Condition::kEqual, scratch, Operand(scratch1));
|
||||
}
|
||||
|
||||
// Write barier can also be removed if value is in read-only space.
|
||||
CheckPageFlag(value, MemoryChunk::kIsInReadOnlyHeapMask, ne, ok);
|
||||
|
||||
Label not_ok;
|
||||
|
||||
// Handle allocation folding, allow WB removal if:
|
||||
// LAB start <= last_young_allocation_ < (object address+1) < LAB top
|
||||
// Note that object has tag bit set, so object == object address+1.
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch1 = temps.Acquire();
|
||||
|
||||
// Check LAB start <= last_young_allocation_.
|
||||
Ld_d(scratch, MemOperand(kRootRegister,
|
||||
IsolateData::new_allocation_info_start_offset()));
|
||||
Ld_d(scratch1, MemOperand(kRootRegister,
|
||||
IsolateData::last_young_allocation_offset()));
|
||||
Branch(¬_ok, Condition::kUnsignedGreaterThan, scratch,
|
||||
Operand(scratch1));
|
||||
|
||||
// Check last_young_allocation_ < (object address+1).
|
||||
Branch(¬_ok, Condition::kUnsignedGreaterThanEqual, scratch1,
|
||||
Operand(object));
|
||||
|
||||
// Check (object address+1) < LAB top.
|
||||
Ld_d(scratch, MemOperand(kRootRegister,
|
||||
IsolateData::new_allocation_info_top_offset()));
|
||||
Branch(ok, Condition::kUnsignedLessThan, object, Operand(scratch));
|
||||
}
|
||||
|
||||
// Slow path: Potentially check more cases in C++.
|
||||
bind(¬_ok);
|
||||
}
|
||||
|
||||
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
|
||||
// The register 'object' contains a heap object pointer. The heap object
|
||||
// tag is shifted away.
|
||||
|
|
@ -582,11 +634,33 @@ void MacroAssembler::CallVerifySkippedWriteBarrierStubSaveRegisters(
|
|||
void MacroAssembler::CallVerifySkippedWriteBarrierStub(Register object,
|
||||
Register value) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.Acquire();
|
||||
PrepareCallCFunction(2, scratch);
|
||||
MovePair(kCArgRegs[0], object, kCArgRegs[1], value);
|
||||
CallCFunction(ExternalReference::verify_skipped_write_barrier(), 2,
|
||||
SetIsolateDataSlots::kNo);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallVerifySkippedIndirectWriteBarrierStubSaveRegisters(
|
||||
Register object, Register value, SaveFPRegsMode fp_mode) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
PushCallerSaved(fp_mode);
|
||||
CallVerifySkippedIndirectWriteBarrierStub(object, value);
|
||||
PopCallerSaved(fp_mode);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallVerifySkippedIndirectWriteBarrierStub(Register object,
|
||||
Register value) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.Acquire();
|
||||
PrepareCallCFunction(2, scratch);
|
||||
MovePair(kCArgRegs[0], object, kCArgRegs[1], value);
|
||||
CallCFunction(ExternalReference::verify_skipped_indirect_write_barrier(), 2,
|
||||
SetIsolateDataSlots::kNo);
|
||||
}
|
||||
|
||||
void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
|
||||
Register object, Operand offset) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
|
|
|
|||
|
|
@ -181,6 +181,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
void LoadRootRelative(Register destination, int32_t offset) final;
|
||||
void StoreRootRelative(int32_t offset, Register value) final;
|
||||
|
||||
void PreCheckSkippedWriteBarrier(Register object, Register value,
|
||||
Register scratch, Label* ok);
|
||||
|
||||
// Operand pointing to an external reference.
|
||||
// May emit code to set up the scratch register. The operand is
|
||||
// only guaranteed to be correct as long as the scratch register
|
||||
|
|
@ -360,6 +363,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
|||
SaveFPRegsMode fp_mode);
|
||||
void CallVerifySkippedWriteBarrierStub(Register object, Register value);
|
||||
|
||||
void CallVerifySkippedIndirectWriteBarrierStubSaveRegisters(
|
||||
Register object, Register value, SaveFPRegsMode fp_mode);
|
||||
void CallVerifySkippedIndirectWriteBarrierStub(Register object,
|
||||
Register value);
|
||||
|
||||
// For a given |object| and |offset|:
|
||||
// - Move |object| to |dst_object|.
|
||||
// - Compute the address of the slot pointed to by |offset| in |object| and
|
||||
|
|
|
|||
4
deps/v8/src/codegen/source-position-table.h
vendored
4
deps/v8/src/codegen/source-position-table.h
vendored
|
|
@ -36,6 +36,8 @@ struct PositionTableEntry {
|
|||
int code_offset;
|
||||
bool is_statement;
|
||||
bool is_breakable;
|
||||
|
||||
bool operator==(const PositionTableEntry&) const = default;
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
|
||||
|
|
@ -95,6 +97,8 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
|
|||
PositionTableEntry position_;
|
||||
IterationFilter iteration_filter_;
|
||||
FunctionEntryFilter function_entry_filter_;
|
||||
|
||||
bool operator==(const IndexAndPositionState&) const = default;
|
||||
};
|
||||
|
||||
// We expose three flavours of the iterator, depending on the argument passed
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||
#if V8_ENABLE_WEBASSEMBLY
|
||||
stub_mode_(stub_mode),
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
must_save_lr_(!gen->frame_access_state()->has_frame()),
|
||||
must_save_ra_(!gen->frame_access_state()->has_frame()),
|
||||
zone_(gen->zone()),
|
||||
indirect_pointer_tag_(indirect_pointer_tag) {
|
||||
}
|
||||
|
|
@ -175,7 +175,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
|
||||
? SaveFPRegsMode::kSave
|
||||
: SaveFPRegsMode::kIgnore;
|
||||
if (must_save_lr_) {
|
||||
if (must_save_ra_) {
|
||||
// We need to save and restore ra if the frame was elided.
|
||||
__ Push(ra);
|
||||
}
|
||||
|
|
@ -196,7 +196,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||
} else {
|
||||
__ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
|
||||
}
|
||||
if (must_save_lr_) {
|
||||
if (must_save_ra_) {
|
||||
__ Pop(ra);
|
||||
}
|
||||
}
|
||||
|
|
@ -209,7 +209,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||
#if V8_ENABLE_WEBASSEMBLY
|
||||
StubCallMode const stub_mode_;
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
bool must_save_lr_;
|
||||
bool must_save_ra_;
|
||||
Zone* zone_;
|
||||
IndirectPointerTag indirect_pointer_tag_;
|
||||
};
|
||||
|
|
@ -294,10 +294,12 @@ void RecordTrapInfoIfNeeded(Zone* zone, CodeGenerator* codegen,
|
|||
class OutOfLineVerifySkippedWriteBarrier final : public OutOfLineCode {
|
||||
public:
|
||||
OutOfLineVerifySkippedWriteBarrier(CodeGenerator* gen, Register object,
|
||||
Register value)
|
||||
Register value, Register scratch)
|
||||
: OutOfLineCode(gen),
|
||||
object_(object),
|
||||
value_(value),
|
||||
scratch_(scratch),
|
||||
must_save_ra_(!gen->frame_access_state()->has_frame()),
|
||||
zone_(gen->zone()) {}
|
||||
|
||||
void Generate() final {
|
||||
|
|
@ -305,12 +307,49 @@ class OutOfLineVerifySkippedWriteBarrier final : public OutOfLineCode {
|
|||
__ DecompressTagged(value_, value_);
|
||||
}
|
||||
|
||||
if (must_save_ra_) {
|
||||
// We need to save and restore ra if the frame was elided.
|
||||
__ Push(ra);
|
||||
}
|
||||
|
||||
__ PreCheckSkippedWriteBarrier(object_, value_, scratch_, exit());
|
||||
|
||||
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
|
||||
? SaveFPRegsMode::kSave
|
||||
: SaveFPRegsMode::kIgnore;
|
||||
|
||||
__ CallVerifySkippedWriteBarrierStubSaveRegisters(object_, value_,
|
||||
save_fp_mode);
|
||||
|
||||
if (must_save_ra_) {
|
||||
__ Pop(ra);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
Register const object_;
|
||||
Register const value_;
|
||||
Register const scratch_;
|
||||
const bool must_save_ra_;
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
class OutOfLineVerifySkippedIndirectWriteBarrier final : public OutOfLineCode {
|
||||
public:
|
||||
OutOfLineVerifySkippedIndirectWriteBarrier(CodeGenerator* gen,
|
||||
Register object, Register value)
|
||||
: OutOfLineCode(gen),
|
||||
object_(object),
|
||||
value_(value),
|
||||
zone_(gen->zone()) {}
|
||||
|
||||
void Generate() final {
|
||||
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
|
||||
? SaveFPRegsMode::kSave
|
||||
: SaveFPRegsMode::kIgnore;
|
||||
|
||||
__ CallVerifySkippedIndirectWriteBarrierStubSaveRegisters(object_, value_,
|
||||
save_fp_mode);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
@ -1025,12 +1064,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||
Operand(kClearedWeakHeapObjectLower32));
|
||||
}
|
||||
|
||||
if (v8_flags.verify_write_barriers) {
|
||||
auto ool = zone()->New<OutOfLineVerifySkippedWriteBarrier>(this, object,
|
||||
value);
|
||||
__ JumpIfNotSmi(value, ool->entry());
|
||||
__ bind(ool->exit());
|
||||
}
|
||||
DCHECK(v8_flags.verify_write_barriers);
|
||||
Register scratch = i.TempRegister(0);
|
||||
auto ool = zone()->New<OutOfLineVerifySkippedWriteBarrier>(
|
||||
this, object, value, scratch);
|
||||
__ JumpIfNotSmi(value, ool->entry());
|
||||
__ bind(ool->exit());
|
||||
|
||||
MacroAssemblerBase::BlockTrampolinePoolScope block_trampoline_pool(
|
||||
masm());
|
||||
|
|
@ -1085,12 +1124,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||
Register temp = i.TempRegister(0);
|
||||
__ Add_d(temp, object, offset);
|
||||
|
||||
if (v8_flags.verify_write_barriers) {
|
||||
auto ool = zone()->New<OutOfLineVerifySkippedWriteBarrier>(this, object,
|
||||
value);
|
||||
__ JumpIfNotSmi(value, ool->entry());
|
||||
__ bind(ool->exit());
|
||||
}
|
||||
DCHECK(v8_flags.verify_write_barriers);
|
||||
Register scratch = i.TempRegister(1);
|
||||
auto ool = zone()->New<OutOfLineVerifySkippedWriteBarrier>(
|
||||
this, object, value, scratch);
|
||||
__ JumpIfNotSmi(value, ool->entry());
|
||||
__ bind(ool->exit());
|
||||
|
||||
MacroAssemblerBase::BlockTrampolinePoolScope block_trampoline_pool(
|
||||
masm());
|
||||
|
|
@ -1150,6 +1189,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||
DCHECK(IsValidIndirectPointerTag(tag));
|
||||
#endif // DEBUG
|
||||
|
||||
DCHECK(v8_flags.verify_write_barriers);
|
||||
auto ool = zone()->New<OutOfLineVerifySkippedIndirectWriteBarrier>(
|
||||
this, object, value);
|
||||
__ jmp(ool->entry());
|
||||
__ bind(ool->exit());
|
||||
|
||||
MacroAssemblerBase::BlockTrampolinePoolScope block_trampoline_pool(
|
||||
masm());
|
||||
Operand offset(0);
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ class Loong64OperandGenerator final : public OperandGenerator {
|
|||
bool CanBeImmediate(int64_t value, InstructionCode opcode) {
|
||||
switch (ArchOpcodeField::decode(opcode)) {
|
||||
case kArchAtomicStoreWithWriteBarrier:
|
||||
case kArchAtomicStoreSkippedWriteBarrier:
|
||||
return false;
|
||||
case kLoong64Cmp32:
|
||||
case kLoong64Cmp64:
|
||||
|
|
@ -619,7 +620,9 @@ void InstructionSelector::VisitStore(OpIndex node) {
|
|||
DCHECK(write_barrier_kind == kIndirectPointerWriteBarrier ||
|
||||
write_barrier_kind == kSkippedWriteBarrier);
|
||||
// In this case we need to add the IndirectPointerTag as additional input.
|
||||
code = kArchStoreIndirectWithWriteBarrier;
|
||||
code = write_barrier_kind == kSkippedWriteBarrier
|
||||
? kArchStoreIndirectSkippedWriteBarrier
|
||||
: kArchStoreIndirectWithWriteBarrier;
|
||||
code |= RecordWriteModeField::encode(
|
||||
RecordWriteMode::kValueIsIndirectPointer);
|
||||
IndirectPointerTag tag = store_view.indirect_pointer_tag();
|
||||
|
|
@ -636,7 +639,13 @@ void InstructionSelector::VisitStore(OpIndex node) {
|
|||
if (store_view.is_store_trap_on_null()) {
|
||||
code |= AccessModeField::encode(kMemoryAccessProtectedNullDereference);
|
||||
}
|
||||
Emit(code, 0, nullptr, input_count, inputs);
|
||||
|
||||
InstructionOperand temps[1];
|
||||
size_t temp_count = 0;
|
||||
if (write_barrier_kind == kSkippedWriteBarrier) {
|
||||
temps[temp_count++] = g.TempRegister();
|
||||
}
|
||||
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1794,6 +1803,10 @@ void VisitAtomicStore(InstructionSelector* selector, OpIndex node,
|
|||
write_barrier_kind = kFullWriteBarrier;
|
||||
}
|
||||
|
||||
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
|
||||
g.UseRegisterOrImmediateZero(value)};
|
||||
InstructionOperand temps[2] = {};
|
||||
size_t temp_count = 0;
|
||||
InstructionCode code;
|
||||
|
||||
if (write_barrier_kind != kNoWriteBarrier &&
|
||||
|
|
@ -1804,6 +1817,8 @@ void VisitAtomicStore(InstructionSelector* selector, OpIndex node,
|
|||
if (write_barrier_kind == kSkippedWriteBarrier) {
|
||||
code = kArchAtomicStoreSkippedWriteBarrier;
|
||||
code |= RecordWriteModeField::encode(RecordWriteMode::kValueIsAny);
|
||||
temps[temp_count++] = g.TempRegister();
|
||||
temps[temp_count++] = g.TempRegister();
|
||||
} else {
|
||||
RecordWriteMode record_write_mode =
|
||||
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
|
||||
|
|
@ -1849,15 +1864,14 @@ void VisitAtomicStore(InstructionSelector* selector, OpIndex node,
|
|||
}
|
||||
|
||||
if (g.CanBeImmediate(index, code)) {
|
||||
inputs[1] = g.UseImmediate(index);
|
||||
selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
|
||||
AtomicWidthField::encode(width),
|
||||
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
|
||||
g.UseRegisterOrImmediateZero(value));
|
||||
0, nullptr, arraysize(inputs), inputs, temp_count, temps);
|
||||
} else {
|
||||
selector->Emit(code | AddressingModeField::encode(kMode_MRR) |
|
||||
AtomicWidthField::encode(width),
|
||||
g.NoOutput(), g.UseRegister(base), g.UseRegister(index),
|
||||
g.UseRegisterOrImmediateZero(value));
|
||||
0, nullptr, arraysize(inputs), inputs, temp_count, temps);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
58
deps/v8/src/json/json-parser.cc
vendored
58
deps/v8/src/json/json-parser.cc
vendored
|
|
@ -130,6 +130,15 @@ static const constexpr uint8_t character_json_scan_flags[256] = {
|
|||
#undef CALL_GET_SCAN_FLAGS
|
||||
};
|
||||
|
||||
#define EXPECT_RETURN_ON_ERROR(token, msg, ret) \
|
||||
if (V8_UNLIKELY(!Expect(token, msg))) { \
|
||||
return ret; \
|
||||
}
|
||||
#define EXPECT_NEXT_RETURN_ON_ERROR(token, msg, ret) \
|
||||
if (V8_UNLIKELY(!ExpectNext(token, msg))) { \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
MaybeHandle<Object> JsonParseInternalizer::Internalize(
|
||||
|
|
@ -1506,8 +1515,9 @@ bool JsonParser<Char>::FastKeyMatch(const uint8_t* key_chars,
|
|||
|
||||
template <typename Char>
|
||||
bool JsonParser<Char>::ParseJsonPropertyValue(const JsonString& key) {
|
||||
ExpectNext(JsonToken::COLON,
|
||||
MessageTemplate::kJsonParseExpectedColonAfterPropertyName);
|
||||
EXPECT_NEXT_RETURN_ON_ERROR(
|
||||
JsonToken::COLON,
|
||||
MessageTemplate::kJsonParseExpectedColonAfterPropertyName, false);
|
||||
Handle<Object> value;
|
||||
if (V8_UNLIKELY(!ParseJsonValueRecursive().ToHandle(&value))) return false;
|
||||
property_stack_.emplace_back(key, value);
|
||||
|
|
@ -1553,7 +1563,7 @@ bool JsonParser<Char>::ParseJsonObjectProperties(
|
|||
using FastIterableState = DescriptorArray::FastIterableState;
|
||||
if constexpr (fast_iterable_state == FastIterableState::kJsonSlow) {
|
||||
do {
|
||||
ExpectNext(JsonToken::STRING, first_token_msg);
|
||||
EXPECT_NEXT_RETURN_ON_ERROR(JsonToken::STRING, first_token_msg, false);
|
||||
first_token_msg =
|
||||
MessageTemplate::kJsonParseExpectedDoubleQuotedPropertyName;
|
||||
JsonString key = ScanJsonPropertyKey(cont);
|
||||
|
|
@ -1563,7 +1573,7 @@ bool JsonParser<Char>::ParseJsonObjectProperties(
|
|||
DCHECK_GT(descriptors->number_of_descriptors(), 0);
|
||||
InternalIndex idx{0};
|
||||
do {
|
||||
ExpectNext(JsonToken::STRING, first_token_msg);
|
||||
EXPECT_NEXT_RETURN_ON_ERROR(JsonToken::STRING, first_token_msg, false);
|
||||
first_token_msg =
|
||||
MessageTemplate::kJsonParseExpectedDoubleQuotedPropertyName;
|
||||
bool key_match;
|
||||
|
|
@ -1731,7 +1741,8 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonObject(Handle<Map> feedback) {
|
|||
return {};
|
||||
}
|
||||
|
||||
Expect(JsonToken::RBRACE, MessageTemplate::kJsonParseExpectedCommaOrRBrace);
|
||||
EXPECT_RETURN_ON_ERROR(JsonToken::RBRACE,
|
||||
MessageTemplate::kJsonParseExpectedCommaOrRBrace, {});
|
||||
Handle<Object> result = BuildJsonObject<false>(cont, feedback);
|
||||
property_stack_.resize(cont.index);
|
||||
return cont.scope.CloseAndEscape(result);
|
||||
|
|
@ -1777,8 +1788,9 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonArray() {
|
|||
SkipWhitespace();
|
||||
continue;
|
||||
} else {
|
||||
Expect(JsonToken::RBRACK,
|
||||
MessageTemplate::kJsonParseExpectedCommaOrRBrack);
|
||||
EXPECT_RETURN_ON_ERROR(JsonToken::RBRACK,
|
||||
MessageTemplate::kJsonParseExpectedCommaOrRBrack,
|
||||
{});
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
|
|
@ -1846,7 +1858,8 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonArray() {
|
|||
element_stack_.emplace_back(value);
|
||||
}
|
||||
|
||||
Expect(JsonToken::RBRACK, MessageTemplate::kJsonParseExpectedCommaOrRBrack);
|
||||
EXPECT_RETURN_ON_ERROR(JsonToken::RBRACK,
|
||||
MessageTemplate::kJsonParseExpectedCommaOrRBrack, {});
|
||||
Handle<Object> result = BuildJsonArray(start);
|
||||
element_stack_.resize(start);
|
||||
return handle_scope.CloseAndEscape(result);
|
||||
|
|
@ -1956,15 +1969,17 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
|
|||
property_stack_.size());
|
||||
|
||||
// Parse the property key.
|
||||
ExpectNext(JsonToken::STRING,
|
||||
MessageTemplate::kJsonParseExpectedPropNameOrRBrace);
|
||||
EXPECT_NEXT_RETURN_ON_ERROR(
|
||||
JsonToken::STRING,
|
||||
MessageTemplate::kJsonParseExpectedPropNameOrRBrace, {});
|
||||
property_stack_.emplace_back(ScanJsonPropertyKey(&cont));
|
||||
if constexpr (should_track_json_source) {
|
||||
property_val_node_stack.emplace_back(Handle<Object>());
|
||||
}
|
||||
|
||||
ExpectNext(JsonToken::COLON,
|
||||
MessageTemplate::kJsonParseExpectedColonAfterPropertyName);
|
||||
EXPECT_NEXT_RETURN_ON_ERROR(
|
||||
JsonToken::COLON,
|
||||
MessageTemplate::kJsonParseExpectedColonAfterPropertyName, {});
|
||||
|
||||
// Continue to start producing the first property value.
|
||||
continue;
|
||||
|
|
@ -2060,17 +2075,18 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
|
|||
|
||||
if (V8_LIKELY(Check(JsonToken::COMMA))) {
|
||||
// Parse the property key.
|
||||
ExpectNext(
|
||||
EXPECT_NEXT_RETURN_ON_ERROR(
|
||||
JsonToken::STRING,
|
||||
MessageTemplate::kJsonParseExpectedDoubleQuotedPropertyName);
|
||||
MessageTemplate::kJsonParseExpectedDoubleQuotedPropertyName,
|
||||
{});
|
||||
|
||||
property_stack_.emplace_back(ScanJsonPropertyKey(&cont));
|
||||
if constexpr (should_track_json_source) {
|
||||
property_val_node_stack.emplace_back(Handle<Object>());
|
||||
}
|
||||
ExpectNext(
|
||||
EXPECT_NEXT_RETURN_ON_ERROR(
|
||||
JsonToken::COLON,
|
||||
MessageTemplate::kJsonParseExpectedColonAfterPropertyName);
|
||||
MessageTemplate::kJsonParseExpectedColonAfterPropertyName, {});
|
||||
|
||||
// Break to start producing the subsequent property value.
|
||||
break;
|
||||
|
|
@ -2090,8 +2106,9 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
|
|||
}
|
||||
}
|
||||
value = BuildJsonObject<should_track_json_source>(cont, feedback);
|
||||
Expect(JsonToken::RBRACE,
|
||||
MessageTemplate::kJsonParseExpectedCommaOrRBrace);
|
||||
EXPECT_RETURN_ON_ERROR(
|
||||
JsonToken::RBRACE,
|
||||
MessageTemplate::kJsonParseExpectedCommaOrRBrace, {});
|
||||
// Return the object.
|
||||
if constexpr (should_track_json_source) {
|
||||
size_t start = cont.index;
|
||||
|
|
@ -2141,8 +2158,9 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
|
|||
if (V8_LIKELY(Check(JsonToken::COMMA))) break;
|
||||
|
||||
value = BuildJsonArray(cont.index);
|
||||
Expect(JsonToken::RBRACK,
|
||||
MessageTemplate::kJsonParseExpectedCommaOrRBrack);
|
||||
EXPECT_RETURN_ON_ERROR(
|
||||
JsonToken::RBRACK,
|
||||
MessageTemplate::kJsonParseExpectedCommaOrRBrack, {});
|
||||
// Return the array.
|
||||
if constexpr (should_track_json_source) {
|
||||
size_t start = cont.index;
|
||||
|
|
|
|||
21
deps/v8/src/json/json-parser.h
vendored
21
deps/v8/src/json/json-parser.h
vendored
|
|
@ -242,23 +242,26 @@ class JsonParser final {
|
|||
advance();
|
||||
}
|
||||
|
||||
void Expect(JsonToken token,
|
||||
std::optional<MessageTemplate> errorMessage = std::nullopt) {
|
||||
V8_WARN_UNUSED_RESULT bool Expect(
|
||||
JsonToken token,
|
||||
std::optional<MessageTemplate> errorMessage = std::nullopt) {
|
||||
if (V8_LIKELY(peek() == token)) {
|
||||
advance();
|
||||
} else {
|
||||
errorMessage ? ReportUnexpectedToken(peek(), errorMessage.value())
|
||||
: ReportUnexpectedToken(peek());
|
||||
return true;
|
||||
}
|
||||
errorMessage ? ReportUnexpectedToken(peek(), errorMessage.value())
|
||||
: ReportUnexpectedToken(peek());
|
||||
return false;
|
||||
}
|
||||
|
||||
void ExpectNext(JsonToken token,
|
||||
std::optional<MessageTemplate> errorMessage = std::nullopt) {
|
||||
V8_WARN_UNUSED_RESULT bool ExpectNext(
|
||||
JsonToken token,
|
||||
std::optional<MessageTemplate> errorMessage = std::nullopt) {
|
||||
SkipWhitespace();
|
||||
errorMessage ? Expect(token, errorMessage.value()) : Expect(token);
|
||||
return errorMessage ? Expect(token, errorMessage.value()) : Expect(token);
|
||||
}
|
||||
|
||||
bool Check(JsonToken token) {
|
||||
V8_WARN_UNUSED_RESULT bool Check(JsonToken token) {
|
||||
SkipWhitespace();
|
||||
if (next_ != token) return false;
|
||||
advance();
|
||||
|
|
|
|||
64
deps/v8/src/maglev/maglev-graph-builder.cc
vendored
64
deps/v8/src/maglev/maglev-graph-builder.cc
vendored
|
|
@ -6924,15 +6924,18 @@ MaglevGraphBuilder::FindContinuationForPolymorphicPropertyLoad() {
|
|||
}
|
||||
|
||||
int start_offset = iterator_.current_offset();
|
||||
#ifdef DEBUG
|
||||
SourcePositionTableIterator::IndexAndPositionState
|
||||
start_source_position_iterator_state =
|
||||
source_position_iterator_.GetState();
|
||||
#endif
|
||||
|
||||
std::optional<ContinuationOffsets> continuation =
|
||||
FindContinuationForPolymorphicPropertyLoadImpl();
|
||||
|
||||
iterator_.SetOffset(start_offset);
|
||||
source_position_iterator_.RestoreState(start_source_position_iterator_state);
|
||||
DCHECK_EQ(start_source_position_iterator_state,
|
||||
source_position_iterator_.GetState());
|
||||
return continuation;
|
||||
}
|
||||
|
||||
|
|
@ -6949,11 +6952,64 @@ MaglevGraphBuilder::FindContinuationForPolymorphicPropertyLoadImpl() {
|
|||
// Where <allowed bytecodes> are:
|
||||
// - not affecting control flow
|
||||
// - not storing into REG
|
||||
// - not the start or end of a try block
|
||||
// and the continuation is limited in length.
|
||||
|
||||
// Skip GetnamedProperty.
|
||||
// Try-block starts are not visible as control flow or basic blocks, so detect
|
||||
// them using the bytecode offset.
|
||||
int next_handler_change = kMaxInt;
|
||||
HandlerTable table(*bytecode().object());
|
||||
if (next_handler_table_index_ < table.NumberOfRangeEntries()) {
|
||||
next_handler_change = table.GetRangeStart(next_handler_table_index_);
|
||||
}
|
||||
// Try-block ends are detected via the top end offset in the current handler
|
||||
// stack.
|
||||
if (IsInsideTryBlock()) {
|
||||
const HandlerTableEntry& entry = catch_block_stack_.top();
|
||||
next_handler_change = std::min(next_handler_change, entry.end);
|
||||
}
|
||||
|
||||
auto IsOffsetAPolymorphicContinuationInterrupt =
|
||||
[this, next_handler_change](int offset) {
|
||||
// We can't continue a polymorphic load over a merge, since the
|
||||
// other side of the merge will observe the call without the load.
|
||||
//
|
||||
// TODO(leszeks): I guess we could split that merge if we wanted to,
|
||||
// introducing a new merge that has the polymorphic loads+calls on one
|
||||
// side and the generic call on the other.
|
||||
if (IsOffsetAMergePoint(offset)) return true;
|
||||
|
||||
// We currently can't continue a polymorphic load across a peeled
|
||||
// loop header -- not because of any actual semantic reason, a peeled
|
||||
// loop should be just like straightline code, but just because this
|
||||
// iteration isn't compatible with the PeelLoop iteration.
|
||||
//
|
||||
// TODO(leszeks): We could probably make loop peeling work happen on the
|
||||
// JumpLoop rather than loop header, and then this continuation code
|
||||
// would work. Only for the first peeled iteration though, not for
|
||||
// speeling.
|
||||
if (loop_headers_to_peel_.Contains(offset)) return true;
|
||||
|
||||
// Loop peeling should be the only reason there was no merge point for a
|
||||
// loop header.
|
||||
DCHECK(!bytecode_analysis_.IsLoopHeader(offset));
|
||||
|
||||
// We can't currently continue a polymorphic load over a try-catch
|
||||
// start/end -- again, not for any semantic reason, but just because
|
||||
// this iteration doesn't consider the catch handler stack.
|
||||
//
|
||||
// TODO(leszeks): If this saved/restore the handler stack, it would
|
||||
// probably work, but we'd need to confirm that later phases don't need
|
||||
// strict nesting of handlers (since the first polymorphic call would
|
||||
// be inside the handler range, but the second polymorphic load after it
|
||||
// in linear scan order would be outside of the handler range).
|
||||
if (offset >= next_handler_change) return true;
|
||||
return false;
|
||||
};
|
||||
|
||||
// Skip GetNamedProperty.
|
||||
iterator_.Advance();
|
||||
if (IsOffsetAMergePointOrLoopHeapder(iterator_.current_offset())) {
|
||||
if (IsOffsetAPolymorphicContinuationInterrupt(iterator_.current_offset())) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
@ -6975,7 +7031,7 @@ MaglevGraphBuilder::FindContinuationForPolymorphicPropertyLoadImpl() {
|
|||
int limit = 20;
|
||||
while (--limit > 0) {
|
||||
iterator_.Advance();
|
||||
if (IsOffsetAMergePointOrLoopHeapder(iterator_.current_offset())) {
|
||||
if (IsOffsetAPolymorphicContinuationInterrupt(iterator_.current_offset())) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
|
|||
7
deps/v8/src/maglev/maglev-graph-builder.h
vendored
7
deps/v8/src/maglev/maglev-graph-builder.h
vendored
|
|
@ -441,15 +441,10 @@ class MaglevGraphBuilder {
|
|||
|
||||
// Return true if the given offset is a merge point, i.e. there are jumps
|
||||
// targetting it.
|
||||
bool IsOffsetAMergePoint(int offset) {
|
||||
bool IsOffsetAMergePoint(int offset) const {
|
||||
return merge_states_[offset] != nullptr;
|
||||
}
|
||||
|
||||
bool IsOffsetAMergePointOrLoopHeapder(int offset) {
|
||||
return IsOffsetAMergePoint(offset) ||
|
||||
bytecode_analysis().IsLoopHeader(offset);
|
||||
}
|
||||
|
||||
ValueNode* GetContextAtDepth(ValueNode* context, size_t depth);
|
||||
bool CheckContextExtensions(size_t depth);
|
||||
|
||||
|
|
|
|||
|
|
@ -204,8 +204,7 @@ void RegExpBytecodeGenerator::LoadCurrentCharacterImpl(int cp_offset,
|
|||
check_bounds = false; // Load below doesn't need to check.
|
||||
}
|
||||
|
||||
DCHECK_LE(kMinCPOffset, cp_offset);
|
||||
DCHECK_GE(kMaxCPOffset, cp_offset);
|
||||
CHECK(base::IsInRange(cp_offset, kMinCPOffset, kMaxCPOffset));
|
||||
int bytecode;
|
||||
if (check_bounds) {
|
||||
if (characters == 4) {
|
||||
|
|
|
|||
7
deps/v8/src/regexp/regexp-compiler.cc
vendored
7
deps/v8/src/regexp/regexp-compiler.cc
vendored
|
|
@ -2313,6 +2313,7 @@ void AssertionNode::BacktrackIfPrevious(
|
|||
// If we've already checked that we are not at the start of input, it's okay
|
||||
// to load the previous character without bounds checks.
|
||||
const bool can_skip_bounds_check = !may_be_at_or_before_subject_string_start;
|
||||
static_assert(Trace::kCPOffsetSlack == 1);
|
||||
assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1, non_word,
|
||||
can_skip_bounds_check);
|
||||
EmitWordCheck(assembler, word, non_word, backtrack_if_previous == kIsNonWord);
|
||||
|
|
@ -2567,6 +2568,7 @@ void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
|
|||
}
|
||||
|
||||
bool first_elt_done = false;
|
||||
static_assert(Trace::kCPOffsetSlack == 1);
|
||||
int bound_checked_to = trace->cp_offset() - 1;
|
||||
bound_checked_to += trace->bound_checked_up_to();
|
||||
|
||||
|
|
@ -2611,7 +2613,10 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
|
|||
// characters by means of mask and compare.
|
||||
quick_check_performed_.Advance(by, compiler->one_byte());
|
||||
cp_offset_ += by;
|
||||
if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
|
||||
static_assert(RegExpMacroAssembler::kMaxCPOffset ==
|
||||
-RegExpMacroAssembler::kMinCPOffset);
|
||||
if (std::abs(cp_offset_) + kCPOffsetSlack >
|
||||
RegExpMacroAssembler::kMaxCPOffset) {
|
||||
compiler->SetRegExpTooBig();
|
||||
cp_offset_ = 0;
|
||||
}
|
||||
|
|
|
|||
10
deps/v8/src/regexp/regexp-compiler.h
vendored
10
deps/v8/src/regexp/regexp-compiler.h
vendored
|
|
@ -278,7 +278,17 @@ class Trace {
|
|||
};
|
||||
void Flush(RegExpCompiler* compiler, RegExpNode* successor,
|
||||
FlushMode mode = kFlushFull);
|
||||
|
||||
// Some callers add/subtract 1 from cp_offset, assuming that the result is
|
||||
// still valid. That's obviously not the case when our `cp_offset` is only
|
||||
// checked against kMinCPOffset/kMaxCPOffset, so we need to apply the some
|
||||
// slack.
|
||||
// TODO(jgruber): It would be better if all callers checked against limits
|
||||
// themselves when doing so; but unfortunately not all callers have
|
||||
// abort-compilation mechanisms.
|
||||
static constexpr int kCPOffsetSlack = 1;
|
||||
int cp_offset() const { return cp_offset_; }
|
||||
|
||||
// Does any trace in the chain have an action?
|
||||
bool has_any_actions() const { return has_any_actions_; }
|
||||
// Does this particular trace object have an action?
|
||||
|
|
|
|||
2
deps/v8/src/regexp/regexp-macro-assembler.cc
vendored
2
deps/v8/src/regexp/regexp-macro-assembler.cc
vendored
|
|
@ -261,7 +261,7 @@ void NativeRegExpMacroAssembler::LoadCurrentCharacterImpl(
|
|||
// path requires a large number of characters, but not the reverse.
|
||||
DCHECK_GE(eats_at_least, characters);
|
||||
|
||||
DCHECK(base::IsInRange(cp_offset, kMinCPOffset, kMaxCPOffset));
|
||||
CHECK(base::IsInRange(cp_offset, kMinCPOffset, kMaxCPOffset));
|
||||
if (check_bounds) {
|
||||
if (cp_offset >= 0) {
|
||||
CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
|
||||
|
|
|
|||
49
deps/v8/test/mjsunit/regress/regress-449549329.js
vendored
Normal file
49
deps/v8/test/mjsunit/regress/regress-449549329.js
vendored
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
//
|
||||
// Flags: --allow-natives-syntax
|
||||
|
||||
// Create two distinct iterator types which both have a getter for `next`.
|
||||
class Iterator1 {
|
||||
get next() {
|
||||
return () => ({ done: true });
|
||||
}
|
||||
}
|
||||
class Iterator2 {
|
||||
get next() {
|
||||
return () => ({ done: true });
|
||||
}
|
||||
}
|
||||
|
||||
// Create two iterables which return instances of these two distinct iterators.
|
||||
const iterable1 = {
|
||||
[Symbol.iterator]() {
|
||||
return new Iterator1();
|
||||
},
|
||||
};
|
||||
const iterable2 = {
|
||||
[Symbol.iterator]() {
|
||||
return new Iterator2();
|
||||
},
|
||||
};
|
||||
|
||||
// Iterate the iterable using for-of.
|
||||
function foo(iterable) {
|
||||
for (const x of iterable) {
|
||||
return x;
|
||||
}
|
||||
}
|
||||
|
||||
// Make foo polymorphic in the iterator, specifically so that the feedback for
|
||||
// the iterator.next named load is polymorphic, with the feedback being two
|
||||
// distinct getters.
|
||||
%PrepareFunctionForOptimization(foo);
|
||||
foo(iterable1);
|
||||
foo(iterable2);
|
||||
|
||||
// The optimization should be successful and not trigger any DCHECKs, despite
|
||||
// the iterator.next load being before the for-of's implicit try block, and the
|
||||
// iterator.next() call being inside it.
|
||||
%OptimizeMaglevOnNextCall(foo);
|
||||
foo(iterable1);
|
||||
9
deps/v8/test/mjsunit/regress/regress-451663011.js
vendored
Normal file
9
deps/v8/test/mjsunit/regress/regress-451663011.js
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
const length = 32767;
|
||||
const pattern_body = "^" + "a".repeat(length);
|
||||
const pattern = new RegExp("(?<=" + pattern_body + ")", "m");
|
||||
const input = "a".repeat(length) + "b" + '\n';
|
||||
assertThrows(() => pattern.exec(input));
|
||||
Loading…
Reference in New Issue
Block a user