diff --git a/BUILD.gn b/BUILD.gn index f7ca30dd95c7057c9617115d11bdf43442660e61..6cad57b74c05286248b814090929517940958772 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2893,7 +2893,7 @@ v8_header_set("v8_flags") { ] } -# This is split out to isolate instructions on ARMv8.4 +# This is split out to ioslate intructions on armv8.4 v8_header_set("pac_sign_feature") { sources = [ "src/codegen/arm64/pac-sign-ctx.h", diff --git a/gni/v8.gni b/gni/v8.gni index 77ae6f1e149691c72f818ee4615bb50cb8f36844..ac9e8e763521f3438961cc894de17d8fd80e0021 100644 --- a/gni/v8.gni +++ b/gni/v8.gni @@ -33,7 +33,7 @@ declare_args() { # Use external files for startup data blobs: # the JS builtins sources and the start snapshot. v8_use_external_startup_data = "" - + # v8_use_external_startup_data shared version. v8_use_external_startup_data_shared = false diff --git a/include/v8-internal.h b/include/v8-internal.h index 947dbef1dfc6b7d07a8ada3b6e8a9c08d412559c..1e9bbcbc8d9325b2e3baecc70f3d2b5107450cbf 100644 --- a/include/v8-internal.h +++ b/include/v8-internal.h @@ -517,7 +517,7 @@ class Internals { static const int kExternalOneByteRepresentationTag = 0x0a; static const uint32_t kNumIsolateDataSlots = 4; - static const int kStackGuardSize = 7 * kApiSystemPointerSize; + static const int kStackGuardSize = 8 * kApiSystemPointerSize; static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize; static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize; static const int kLinearAllocationAreaSize = 3 * kApiSystemPointerSize; diff --git a/include/v8-primitive.h b/include/v8-primitive.h index ad3386b8bbe438632b0244eee0d79513fb572def..aa664de219243e7323e7987161c43b4c4575d135 100644 --- a/include/v8-primitive.h +++ b/include/v8-primitive.h @@ -516,7 +516,8 @@ class V8_EXPORT String : public Name { */ class V8_EXPORT Utf8Value { public: - Utf8Value(Isolate* isolate, Local obj); + Utf8Value(Isolate* isolate, Local obj, + WriteOptions options = REPLACE_INVALID_UTF8); ~Utf8Value(); char* operator*() { return str_; } const char* operator*() const { return str_; } diff --git a/src/api/api-natives.cc b/src/api/api-natives.cc index 905f29bf253c443204f3f9f36fd4c948ed434f15..07cbe818c6d63c2f119952a1ce7efc746a803454 100644 --- a/src/api/api-natives.cc +++ b/src/api/api-natives.cc @@ -137,7 +137,7 @@ void DisableAccessChecks(Isolate* isolate, Handle object) { // Copy map so it won't interfere constructor's initial map. Handle new_map = Map::Copy(isolate, old_map, "DisableAccessChecks"); new_map->set_is_access_check_needed(false); - JSObject::MigrateToMap(isolate, Handle::cast(object), new_map); + JSObject::MigrateToMap(isolate, object, new_map); } void EnableAccessChecks(Isolate* isolate, Handle object) { diff --git a/src/api/api.cc b/src/api/api.cc index 6cdd75d93e6850aa15d2b0d78c9fb3accf07cef0..e702d70bf8b58e293cefc3bc87ebc5ca34e36245 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -4583,15 +4583,7 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, i::PropertyKey lookup_key(i_isolate, key_obj); i::LookupIterator it(i_isolate, self, lookup_key, i::LookupIterator::OWN); - if (self->IsJSProxy()) { - ENTER_V8(i_isolate, context, Object, CreateDataProperty, Nothing(), - i::HandleScope); - Maybe result = - i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); - has_pending_exception = result.IsNothing(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return result; - } else { + if (self->IsJSObject()) { ENTER_V8_NO_SCRIPT(i_isolate, context, Object, CreateDataProperty, Nothing(), i::HandleScope); Maybe result = @@ -4600,6 +4592,14 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; } + // JSProxy or WasmObject or other non-JSObject. + ENTER_V8(i_isolate, context, Object, CreateDataProperty, Nothing(), + i::HandleScope); + Maybe result = + i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return result; } Maybe v8::Object::CreateDataProperty(v8::Local context, @@ -4610,15 +4610,7 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, i::Handle value_obj = Utils::OpenHandle(*value); i::LookupIterator it(i_isolate, self, index, self, i::LookupIterator::OWN); - if (self->IsJSProxy()) { - ENTER_V8(i_isolate, context, Object, CreateDataProperty, Nothing(), - i::HandleScope); - Maybe result = - i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); - has_pending_exception = result.IsNothing(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return result; - } else { + if (self->IsJSObject()) { ENTER_V8_NO_SCRIPT(i_isolate, context, Object, CreateDataProperty, Nothing(), i::HandleScope); Maybe result = @@ -4627,6 +4619,14 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; } + // JSProxy or WasmObject or other non-JSObject. + ENTER_V8(i_isolate, context, Object, CreateDataProperty, Nothing(), + i::HandleScope); + Maybe result = + i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return result; } struct v8::PropertyDescriptor::PrivateData { @@ -4737,15 +4737,7 @@ Maybe v8::Object::DefineOwnProperty(v8::Local context, desc.set_configurable(!(attributes & v8::DontDelete)); desc.set_value(value_obj); - if (self->IsJSProxy()) { - ENTER_V8(i_isolate, context, Object, DefineOwnProperty, Nothing(), - i::HandleScope); - Maybe success = i::JSReceiver::DefineOwnProperty( - i_isolate, self, key_obj, &desc, Just(i::kDontThrow)); - // Even though we said kDontThrow, there might be accessors that do throw. - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return success; - } else { + if (self->IsJSObject()) { // If it's not a JSProxy, i::JSReceiver::DefineOwnProperty should never run // a script. ENTER_V8_NO_SCRIPT(i_isolate, context, Object, DefineOwnProperty, @@ -4755,6 +4747,14 @@ Maybe v8::Object::DefineOwnProperty(v8::Local context, RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return success; } + // JSProxy or WasmObject or other non-JSObject. + ENTER_V8(i_isolate, context, Object, DefineOwnProperty, Nothing(), + i::HandleScope); + Maybe success = i::JSReceiver::DefineOwnProperty( + i_isolate, self, key_obj, &desc, Just(i::kDontThrow)); + // Even though we said kDontThrow, there might be accessors that do throw. + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return success; } Maybe v8::Object::DefineProperty(v8::Local context, @@ -4781,6 +4781,15 @@ Maybe v8::Object::SetPrivate(Local context, Local key, auto self = Utils::OpenHandle(this); auto key_obj = Utils::OpenHandle(reinterpret_cast(*key)); auto value_obj = Utils::OpenHandle(*value); + if (self->IsJSObject()) { + auto js_object = i::Handle::cast(self); + i::LookupIterator it(i_isolate, js_object, key_obj, js_object); + has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes( + &it, value_obj, i::DONT_ENUM) + .is_null(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return Just(true); + } if (self->IsJSProxy()) { i::PropertyDescriptor desc; desc.set_writable(true); @@ -4791,13 +4800,8 @@ Maybe v8::Object::SetPrivate(Local context, Local key, i_isolate, i::Handle::cast(self), i::Handle::cast(key_obj), &desc, Just(i::kDontThrow)); } - auto js_object = i::Handle::cast(self); - i::LookupIterator it(i_isolate, js_object, key_obj, js_object); - has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes( - &it, value_obj, i::DONT_ENUM) - .is_null(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return Just(true); + // Wasm object, or other kind of special object not supported here. + return Just(false); } MaybeLocal v8::Object::Get(Local context, @@ -5075,6 +5079,7 @@ Maybe v8::Object::Delete(Local context, Local key) { } else { // If it's not a JSProxy, i::Runtime::DeleteObjectProperty should never run // a script. + DCHECK(self->IsJSObject() || self->IsWasmObject()); ENTER_V8_NO_SCRIPT(i_isolate, context, Object, Delete, Nothing(), i::HandleScope); Maybe result = i::Runtime::DeleteObjectProperty( @@ -5495,7 +5500,7 @@ bool v8::Object::IsApiWrapper() const { } bool v8::Object::IsUndetectable() const { - auto self = i::Handle::cast(Utils::OpenHandle(this)); + auto self = Utils::OpenHandle(this); return self->IsUndetectable(); } @@ -6452,7 +6457,7 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) { void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[], void* values[]) { i::Handle obj = Utils::OpenHandle(this); - + if (!obj->IsJSObject()) return; i::DisallowGarbageCollection no_gc; const char* location = "v8::Object::SetAlignedPointerInInternalFields()"; i::JSObject js_obj = i::JSObject::cast(*obj); @@ -8635,8 +8640,7 @@ MaybeLocal WasmModuleObject::FromCompiledModule( i::wasm::GetWasmEngine()->ImportNativeModule( i_isolate, compiled_module.native_module_, base::VectorOf(compiled_module.source_url())); - return Local::Cast( - Utils::ToLocal(i::Handle::cast(module_object))); + return Utils::ToLocal(module_object); #else UNREACHABLE(); #endif // V8_ENABLE_WEBASSEMBLY @@ -8651,7 +8655,7 @@ MaybeLocal WasmModuleObject::Compile( if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) { return MaybeLocal(); } - i::MaybeHandle maybe_compiled; + i::MaybeHandle maybe_compiled; { i::wasm::ErrorThrower thrower(i_isolate, "WasmModuleObject::Compile()"); auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate); @@ -8664,8 +8668,7 @@ MaybeLocal WasmModuleObject::Compile( i_isolate->OptionalRescheduleException(false); return MaybeLocal(); } - return Local::Cast( - Utils::ToLocal(maybe_compiled.ToHandleChecked())); + return Utils::ToLocal(maybe_compiled.ToHandleChecked()); #else Utils::ApiCheck(false, "WasmModuleObject::Compile", "WebAssembly support is not enabled"); @@ -10458,7 +10461,8 @@ bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8_isolate) { return microtask_queue->IsRunningMicrotasks(); } -String::Utf8Value::Utf8Value(v8::Isolate* v8_isolate, v8::Local obj) +String::Utf8Value::Utf8Value(v8::Isolate* v8_isolate, v8::Local obj, + WriteOptions options) : str_(nullptr), length_(0) { if (obj.IsEmpty()) return; i::Isolate* i_isolate = reinterpret_cast(v8_isolate); @@ -10470,7 +10474,7 @@ String::Utf8Value::Utf8Value(v8::Isolate* v8_isolate, v8::Local obj) if (!obj->ToString(context).ToLocal(&str)) return; length_ = str->Utf8Length(v8_isolate); str_ = i::NewArray(length_ + 1); - str->WriteUtf8(v8_isolate, str_); + str->WriteUtf8(v8_isolate, str_, -1, nullptr, options); } String::Utf8Value::~Utf8Value() { i::DeleteArray(str_); } diff --git a/src/api/api.h b/src/api/api.h index 461016c658574448a2f215c89e5da3d2f0a33ee7..c2065453f673f1d9a73a07eccabac042f4343bc7 100644 --- a/src/api/api.h +++ b/src/api/api.h @@ -132,7 +132,8 @@ class RegisteredExtension { V(ToLocalPrimitive, Object, Primitive) \ V(FixedArrayToLocal, FixedArray, FixedArray) \ V(PrimitiveArrayToLocal, FixedArray, PrimitiveArray) \ - V(ToLocal, ScriptOrModule, ScriptOrModule) + V(ToLocal, ScriptOrModule, ScriptOrModule) \ + IF_WASM(V, ToLocal, WasmModuleObject, WasmModuleObject) #define OPEN_HANDLE_LIST(V) \ V(Template, TemplateInfo) \ diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc index 9cdc30a01135258c5b93272f37cc164cc2d97d41..db977c8e228c838f7cf3044b422f2f2bd45285c6 100644 --- a/src/ast/scopes.cc +++ b/src/ast/scopes.cc @@ -1525,26 +1525,22 @@ DeclarationScope* Scope::GetConstructorScope() { } Scope* Scope::GetHomeObjectScope() { - Scope* scope = this; - while (scope != nullptr && !scope->is_home_object_scope()) { - if (scope->is_function_scope()) { - FunctionKind function_kind = scope->AsDeclarationScope()->function_kind(); - // "super" in arrow functions binds outside the arrow function. But if we - // find a function which doesn't bind "super" (is not a method etc.) and - // not an arrow function, we know "super" here doesn't bind anywhere and - // we can return nullptr. - if (!IsArrowFunction(function_kind) && !BindsSuper(function_kind)) { - return nullptr; - } - } - if (scope->private_name_lookup_skips_outer_class()) { - DCHECK(scope->outer_scope()->is_class_scope()); - scope = scope->outer_scope()->outer_scope(); - } else { - scope = scope->outer_scope(); - } - } - return scope; + Scope* scope = GetReceiverScope(); + DCHECK(scope->is_function_scope()); + FunctionKind kind = scope->AsDeclarationScope()->function_kind(); + // "super" in arrow functions binds outside the arrow function. Arrow + // functions are also never receiver scopes since they close over the + // receiver. + DCHECK(!IsArrowFunction(kind)); + // If we find a function which doesn't bind "super" (is not a method etc.), we + // know "super" here doesn't bind anywhere and we can return nullptr. + if (!BindsSuper(kind)) return nullptr; + // Functions that bind "super" can only syntactically occur nested inside home + // object scopes (i.e. class scopes and object literal scopes), so directly + // return the outer scope. + Scope* outer_scope = scope->outer_scope(); + CHECK(outer_scope->is_home_object_scope()); + return outer_scope; } DeclarationScope* Scope::GetScriptScope() { @@ -2297,7 +2293,7 @@ void Scope::ResolveVariable(VariableProxy* proxy) { // // Because of the above, start resolving home objects directly at the home // object scope instead of the current scope. - Scope* scope = GetDeclarationScope()->GetHomeObjectScope(); + Scope* scope = GetHomeObjectScope(); DCHECK_NOT_NULL(scope); if (scope->scope_info_.is_null()) { var = Lookup(proxy, scope, nullptr); diff --git a/src/base/logging.h b/src/base/logging.h index e333aefd0cadaf09adcaf79b9461af3a72e49309..3571f308fb4098040c635040251e45c353c1b564 100644 --- a/src/base/logging.h +++ b/src/base/logging.h @@ -428,4 +428,35 @@ DEFINE_CHECK_OP_IMPL(GT, > ) #define DCHECK_IMPLIES(v1, v2) ((void) 0) #endif +// When the sandbox is enabled, a SBXCHECK behaves exactly like a CHECK, but +// indicates that the check is required for the sandbox, i.e. prevents a +// sandbox bypass. When the sandbox is off, it becomes a DCHECK. +// +// As an example, consider a scenario where an in-sandbox object stores an +// index into an out-of-sandbox array (or a similar data structure). While +// under normal circumstances it can be guaranteed that the index will always +// be in bounds, with the sandbox attacker model, we have to assume that the +// in-sandbox object can be corrupted by an attacker and so the access can go +// out-of-bounds. In that case, a SBXCHECK can be used to both prevent memory +// corruption outside of the sandbox and document that there is a +// security-critical invariant that may be violated when an attacker can +// corrupt memory inside the sandbox, but otherwise holds true. +#ifdef V8_ENABLE_SANDBOX +#define SBXCHECK(condition) CHECK(condition) +#define SBXCHECK_EQ(lhs, rhs) CHECK_EQ(lhs, rhs) +#define SBXCHECK_NE(lhs, rhs) CHECK_NE(lhs, rhs) +#define SBXCHECK_GT(lhs, rhs) CHECK_GT(lhs, rhs) +#define SBXCHECK_GE(lhs, rhs) CHECK_GE(lhs, rhs) +#define SBXCHECK_LT(lhs, rhs) CHECK_LT(lhs, rhs) +#define SBXCHECK_LE(lhs, rhs) CHECK_LE(lhs, rhs) +#else +#define SBXCHECK(condition) DCHECK(condition) +#define SBXCHECK_EQ(lhs, rhs) DCHECK_EQ(lhs, rhs) +#define SBXCHECK_NE(lhs, rhs) DCHECK_NE(lhs, rhs) +#define SBXCHECK_GT(lhs, rhs) DCHECK_GT(lhs, rhs) +#define SBXCHECK_GE(lhs, rhs) DCHECK_GE(lhs, rhs) +#define SBXCHECK_LT(lhs, rhs) DCHECK_LT(lhs, rhs) +#define SBXCHECK_LE(lhs, rhs) DCHECK_LE(lhs, rhs) +#endif + #endif // V8_BASE_LOGGING_H_ diff --git a/src/builtins/builtins-array.cc b/src/builtins/builtins-array.cc index 3a36fa4e60fe96835ce6ec21a8bdd2085aca055a..d6ec472ff06c89dd9016dfdde0cc9ed19f7a6ef9 100644 --- a/src/builtins/builtins-array.cc +++ b/src/builtins/builtins-array.cc @@ -49,7 +49,7 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver receiver) { DisallowGarbageCollection no_gc; PrototypeIterator iter(isolate, receiver, kStartAtReceiver); for (; !iter.IsAtEnd(); iter.Advance()) { - if (!iter.GetCurrent().IsObject()) return false; + if (!iter.GetCurrent().IsJSObject()) return false; JSObject current = iter.GetCurrent(); if (!HasSimpleElements(current)) return false; } @@ -1073,6 +1073,8 @@ void CollectElementIndices(Isolate* isolate, Handle object, if (!iter.IsAtEnd()) { // The prototype will usually have no inherited element indices, // but we have to check. + // Casting to JSObject is safe because we ran {HasOnlySimpleElements} on + // the receiver before, which checks the prototype chain. CollectElementIndices( isolate, PrototypeIterator::GetCurrent(iter), range, indices); } diff --git a/src/builtins/builtins-intl-gen.cc b/src/builtins/builtins-intl-gen.cc index c2a873ae0ef4a27b777b97437e08602037ce8497..2ad29253b7e72f6b4b3d71219ec8f164abc7d4b8 100644 --- a/src/builtins/builtins-intl-gen.cc +++ b/src/builtins/builtins-intl-gen.cc @@ -231,71 +231,71 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl( ReturnFct(result); } - BIND(&two_byte_string); - { - const TNode dst = AllocateSeqTwoByteString(length); - const TNode dst_ptr = PointerToSeqStringData(dst); - const TNode to_lower_table_addr = - ExternalConstant(ExternalReference::intl_to_latin1_lower_table()); - TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0)); - const int kMaxShortStringLength = 24; // Determined empirically. - GotoIf(Uint32GreaterThan(length, Uint32Constant(kMaxShortStringLength)), - &runtime); - const TNode start_address = - ReinterpretCast(to_direct.PointerToData(&runtime)); - const TNode end_address = - Signed(IntPtrAdd(start_address, IntPtrMul(IntPtrConstant(kUInt16Size), - ChangeUint32ToWord(length)))); - - TVARIABLE(Word32T, var_did_change, Int32Constant(0)); - - VariableList push_vars({&var_cursor, &var_did_change}, zone()); - - BuildFastLoop( - push_vars, start_address, end_address, - [&](TNode current) { - TNode c = Load(current); - - Label is_assic(this), is_not_assic(this), inc_offset(this); - - Branch(Uint32LessThanOrEqual(c, Uint32Constant(0x00FF)), &is_assic, - &is_not_assic); - - BIND(&is_assic); - { - // For assic character, convert to lower case - TNode lower = - Load(to_lower_table_addr, ChangeInt32ToIntPtr(c)); - StoreNoWriteBarrier(MachineRepresentation::kWord16, dst_ptr, - var_cursor.value(), lower); - var_did_change = - Word32Or(Word32NotEqual(c, lower), var_did_change.value()); - Goto(&inc_offset); - } - - BIND(&is_not_assic); - { - // For non-assic character, check if is a Chinese character - GotoIfNot(IsChinese(c), &runtime); - StoreNoWriteBarrier(MachineRepresentation::kWord16, dst_ptr, - var_cursor.value(), c); - Goto(&inc_offset); - } - - BIND(&inc_offset); - { - // Store to dst string - Increment(&var_cursor, kUInt16Size); - } - }, - kUInt16Size, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); - - // Return the original string if it remained unchanged in order to preserve - // e.g. internalization and private symbols (such as the preserved object - // hash) on the source string. - GotoIfNot(var_did_change.value(), &return_string); - ReturnFct(dst); - } + BIND(&two_byte_string); + { + const TNode dst = AllocateSeqTwoByteString(length); + const TNode dst_ptr = PointerToSeqStringData(dst); + const TNode to_lower_table_addr = + ExternalConstant(ExternalReference::intl_to_latin1_lower_table()); + TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0)); + const int kMaxShortStringLength = 24; // Determined empirically. + GotoIf(Uint32GreaterThan(length, Uint32Constant(kMaxShortStringLength)), + &runtime); + const TNode start_address = + ReinterpretCast(to_direct.PointerToData(&runtime)); + const TNode end_address = + Signed(IntPtrAdd(start_address, IntPtrMul(IntPtrConstant(kUInt16Size), + ChangeUint32ToWord(length)))); + + TVARIABLE(Word32T, var_did_change, Int32Constant(0)); + + VariableList push_vars({&var_cursor, &var_did_change}, zone()); + + BuildFastLoop( + push_vars, start_address, end_address, + [&](TNode current) { + TNode c = Load(current); + + Label is_assic(this), is_not_assic(this), inc_offset(this); + + Branch(Uint32LessThanOrEqual(c, Uint32Constant(0x00FF)), &is_assic, + &is_not_assic); + + BIND(&is_assic); + { + // For assic character, convert to lower case + TNode lower = + Load(to_lower_table_addr, ChangeInt32ToIntPtr(c)); + StoreNoWriteBarrier(MachineRepresentation::kWord16, dst_ptr, + var_cursor.value(), lower); + var_did_change = + Word32Or(Word32NotEqual(c, lower), var_did_change.value()); + Goto(&inc_offset); + } + + BIND(&is_not_assic); + { + // For non-assic character, check if is a Chinese character + GotoIfNot(IsChinese(c), &runtime); + StoreNoWriteBarrier(MachineRepresentation::kWord16, dst_ptr, + var_cursor.value(), c); + Goto(&inc_offset); + } + + BIND(&inc_offset); + { + // Store to dst string + Increment(&var_cursor, kUInt16Size); + } + }, + kUInt16Size, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); + + // Return the original string if it remained unchanged in order to preserve + // e.g. internalization and private symbols (such as the preserved object + // hash) on the source string. + GotoIfNot(var_did_change.value(), &return_string); + ReturnFct(dst); + } BIND(&return_string); ReturnFct(string); diff --git a/src/builtins/promise-any.tq b/src/builtins/promise-any.tq index 45bafac0e6b09143b69b21a7292f9ed6b9c46239..d531d57a375ba33bf11ccf698da5918f1e25f38c 100644 --- a/src/builtins/promise-any.tq +++ b/src/builtins/promise-any.tq @@ -106,9 +106,10 @@ PromiseAnyRejectElementClosure( const index = identityHash - 1; // 6. Let errors be F.[[Errors]]. - let errors = *ContextSlot( + let errorsRef:&FixedArray = ContextSlot( context, PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementErrorsSlot); + let errors = *errorsRef; // 7. Let promiseCapability be F.[[Capability]]. @@ -134,10 +135,7 @@ PromiseAnyRejectElementClosure( IntPtrMax(SmiUntag(remainingElementsCount) - 1, index + 1); if (newCapacity > errors.length_intptr) deferred { errors = ExtractFixedArray(errors, 0, errors.length_intptr, newCapacity); - *ContextSlot( - context, - PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementErrorsSlot) = errors; + *errorsRef = errors; } errors.objects[index] = value; @@ -155,6 +153,10 @@ PromiseAnyRejectElementClosure( // b. Set error.[[AggregateErrors]] to errors. const error = ConstructAggregateError(errors); + + // After this point, errors escapes to user code. Clear the slot. + *errorsRef = kEmptyFixedArray; + // c. Return ? Call(promiseCapability.[[Reject]], undefined, « error »). const capability = *ContextSlot( context, diff --git a/src/codegen/arm64/assembler-arm64-inl.h b/src/codegen/arm64/assembler-arm64-inl.h index ef4c608e5cbca39e1c3833407dd604a9f5e5afec..13fa09242c6a51fde852e161ebd6cb0ef99c1b52 100644 --- a/src/codegen/arm64/assembler-arm64-inl.h +++ b/src/codegen/arm64/assembler-arm64-inl.h @@ -535,7 +535,7 @@ void Assembler::set_embedded_object_index_referenced_from( Address target = target_pointer_address_at(pc); #ifdef V8_ENABLE_JIT_CODE_SIGN TryPatchInstruction(jit_code_signer_, reinterpret_cast(target), - static_cast(data)); + static_cast(data)); #endif WriteUnalignedValue(target, static_cast(data)); diff --git a/src/codegen/arm64/assembler-arm64.h b/src/codegen/arm64/assembler-arm64.h index bf7f78cb0c13a96d7f78969aefccc1635a98d5a6..a6d162e136d058e81664ad970248928a3b84522b 100644 --- a/src/codegen/arm64/assembler-arm64.h +++ b/src/codegen/arm64/assembler-arm64.h @@ -3307,7 +3307,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap); #ifdef V8_ENABLE_JIT_CODE_SIGN - // Jit code signer for signing instruction + // Jit code signer for signing instructions JitCodeSignerBase *jit_code_signer_ = nullptr; #endif @@ -3443,7 +3443,7 @@ class PatchingAssembler : public Assembler { TrySetCompileMode(jit_code_signer_, static_cast(CompileMode::PATCH)); } - void ReleaseJitCodeSigner() { + void ReleaseJitCodeSigner() { if (jit_code_signer_ != nullptr) { TrySetCompileMode(jit_code_signer_, static_cast(CompileMode::APPEND)); jit_code_signer_ = nullptr; diff --git a/src/codegen/arm64/instructions-arm64.cc b/src/codegen/arm64/instructions-arm64.cc index 4842f7481715b6e7958b462660f0ea1cf258004a..6093185c615ecf749e7db9f8db813011e6a2e766 100644 --- a/src/codegen/arm64/instructions-arm64.cc +++ b/src/codegen/arm64/instructions-arm64.cc @@ -257,8 +257,8 @@ void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options, #ifdef V8_ENABLE_JIT_CODE_SIGN void Instruction::SetPCRelImmTarget(const AssemblerOptions& options, - Instruction* target, - JitCodeSignerBase* patch_signer) { + Instruction* target, + JitCodeSignerBase* patch_signer) { #else void Instruction::SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target) { @@ -322,7 +322,7 @@ void Instruction::SetBranchImmTarget(Instruction* target) { } Instr insn = Mask(~imm_mask) | branch_imm; #ifdef V8_ENABLE_JIT_CODE_SIGN - TryPatchInstruction(patch_signer, reinterpret_cast(this), insn); + TryPatchInstruction(patch_signer, reinterpret_cast(this), insn); #endif SetInstructionBits(insn); } @@ -353,7 +353,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget( #ifdef V8_ENABLE_JIT_CODE_SIGN void Instruction::SetImmLLiteral(Instruction* source, - JitCodeSignerBase* patch_signer) { + JitCodeSignerBase* patch_signer) { #else void Instruction::SetImmLLiteral(Instruction* source) { #endif @@ -366,7 +366,7 @@ void Instruction::SetImmLLiteral(Instruction* source) { Instr insn = Mask(~mask) | imm; #ifdef V8_ENABLE_JIT_CODE_SIGN - TryPatchInstruction(patch_signer, reinterpret_cast(this), insn); + TryPatchInstruction(patch_signer, reinterpret_cast(this), insn); #endif SetInstructionBits(insn); } diff --git a/src/codegen/arm64/jit-code-signer-base.cc b/src/codegen/arm64/jit-code-signer-base.cc index 5925e8a3b8351c8a72a403f202181167b2e1debd..2563d2e5c2951346381810949974c64853525b3a 100644 --- a/src/codegen/arm64/jit-code-signer-base.cc +++ b/src/codegen/arm64/jit-code-signer-base.cc @@ -15,7 +15,6 @@ #include "src/codegen/arm64/jit-code-signer-base.h" #include - namespace v8 { namespace internal { @@ -54,7 +53,7 @@ int JitCodeSignerBase::SignInstruction(void *buffer, Instr insn) return PatchInstruction(buffer, insn); } return CS_SUCCESS; - + } int JitCodeSignerBase::SignData(void *buffer, const void *const data, uint32_t size) @@ -149,7 +148,7 @@ bool JitCodeSignerBase::ConvertPatchOffsetToIndex(const int offset, int &cur_ind if (static_cast(cur_index) >= sign_table_.size()) { #ifdef JIT_CODE_SIGN_DEBUGGABLE LOG_ERROR("Offset is out of range, index = %d, signTable size = %zu", - cur_index, sign_table_.size()); + cur_index, sign_table_.size()); #endif return false; } @@ -165,14 +164,14 @@ int32_t JitCodeSignerBase::CheckDataCopy(Instr *jit_memory, void *tmp_buffer, in return CS_ERR_TMP_BUFFER; } - //update tmp buffer + // update tmp buffer tmp_buffer_ = tmp_buffer; if (((size & UNALIGNMENT_MASK) != 0) || - (static_cast(size) > sign_table_.size() * INSTRUCTION_SIZE)) { + (static_cast(size) > sign_table_.size() * INSTRUCTION_SIZE)) { #ifdef JIT_CODE_SIGN_DEBUGGABLE LOG_ERROR("Range invalid, size = %d, table size = %zu", - size, sign_table_.size()); + size, sign_table_.size()); #endif return CS_ERR_JIT_SIGN_SIZE; } diff --git a/src/codegen/arm64/jit-code-signer-base.h b/src/codegen/arm64/jit-code-signer-base.h index ef820184976750e86a7ab7d8978c942b515e7743..f4537035ff1759f7e692346209e67ccfb01cebb8 100644 --- a/src/codegen/arm64/jit-code-signer-base.h +++ b/src/codegen/arm64/jit-code-signer-base.h @@ -67,7 +67,7 @@ public: int32_t SignData(const void *data, uint32_t size); int32_t PatchInstruction(void *jit_buffer, Instr insn); int32_t PatchData(int offset, const void *const data, uint32_t size); - int32_t PatchData(void *buffer, const void *const data, uint32_t size); + int32_t PatchData(void *buffer, const void *const data, uint32_t size); protected: bool ConvertPatchOffsetToIndex(const int offset, int &cur_index); diff --git a/src/codegen/arm64/jit-code-signer-helper.cc b/src/codegen/arm64/jit-code-signer-helper.cc index 6fa46e311ae6436530c6d1ab1585776f98a90a27..57a0ec4741ed7fa998af77f6f944751ea03163aa 100644 --- a/src/codegen/arm64/jit-code-signer-helper.cc +++ b/src/codegen/arm64/jit-code-signer-helper.cc @@ -133,7 +133,7 @@ void TryValidateCodeCopy(JitCodeSignerBase *jit_code_signer, void *jit_memory, void *tmp_buffer, int size) { if (jit_code_signer != nullptr) { V8_LIKELY(jit_code_signer->ValidateCodeCopy(reinterpret_cast(jit_memory), - tmp_buffer, size)); + tmp_buffer, size)); } } } diff --git a/src/codegen/arm64/jit-code-signer-hybrid.cc b/src/codegen/arm64/jit-code-signer-hybrid.cc index cfaf38fc0d2fbd1d27d037935051f51f231eada7..11b11345ed39c44caf400302221f26e8605b11ad 100644 --- a/src/codegen/arm64/jit-code-signer-hybrid.cc +++ b/src/codegen/arm64/jit-code-signer-hybrid.cc @@ -70,9 +70,9 @@ int32_t JitCodeSignerHybrid::PatchInstruction(int offset, Instr insn) { #ifdef JIT_CODE_SIGN_DEBUGGABLE if (std::find(skipped_offset_.begin(), skipped_offset_.end(), offset) - == skipped_offset_.end()) { - LOG_ERROR("Update no skipped instruction failed at offset" \ - "= %x", offset); + == skipped_offset_.end()) { + LOG_ERROR("Update no skipped instruction failed at offset" \ + "= %x", offset); } #endif int cur_index = 0; @@ -102,7 +102,7 @@ int32_t JitCodeSignerHybrid::ValidateSubCode(Instr *jit_memory, PACSignCtx &veri uint32_t signature = verify_ctx.Update(*insn_ptr); if (signature != sign_table_[index]) { #ifdef JIT_CODE_SIGN_DEBUGGABLE - LOG_ERROR("Validate insn (%8x) failed at offset = %x, "\ + LOG_ERROR("Validate insn (%8x) failed at offset = %x, " \ "signature(%x) != wanted(%{pucblic}x)", *(insn_ptr), index * INSTRUCTION_SIZE, signature, sign_table_[index]); #endif @@ -156,7 +156,7 @@ int32_t JitCodeSignerHybrid::ValidateCodeCopy(Instr *jit_memory, if (ValidateSubCode(jit_memory, verify_ctx, tmp_buffer_, offset, size - offset) != CS_SUCCESS) { - return CS_ERR_VALIDATE_CODE; + return CS_ERR_VALIDATE_CODE; } return CS_SUCCESS; } diff --git a/src/codegen/arm64/pac-sign-ctx.h b/src/codegen/arm64/pac-sign-ctx.h index 5fe01840a72037f870fb5c080c13bf442999f0c2..e0aae9a88afd8f8812da311b7094957cf8983f0c 100644 --- a/src/codegen/arm64/pac-sign-ctx.h +++ b/src/codegen/arm64/pac-sign-ctx.h @@ -21,8 +21,8 @@ namespace v8 { namespace internal { enum CTXConfig { - SIGN_AND_AUTH, // auth context when signing - SIGN_NO_AUTH, //no auth context when signing + SIGN_AND_AUTH, // auth context when signing + SIGN_NO_AUTH, // no auth context when signing }; enum ContextType { diff --git a/src/codegen/assembler.cc b/src/codegen/assembler.cc index 1a82d0fd9d0084a4fa5c93a641480097261be2d0..af52a931b67e136160ce02bdcb1be69da3a00973 100644 --- a/src/codegen/assembler.cc +++ b/src/codegen/assembler.cc @@ -96,7 +96,7 @@ class DefaultAssemblerBuffer : public AssemblerBuffer { public: #ifdef V8_ENABLE_JIT_CODE_SIGN explicit DefaultAssemblerBuffer(int size, - std::unique_ptr signer = nullptr) + std::unique_ptr signer = nullptr) : buffer_(base::OwnedVector::NewForOverwrite( std::max(AssemblerBase::kMinimalBufferSize, size))), jit_code_signer_(std::move(signer)) { @@ -166,7 +166,7 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer { private: byte* const start_; const int size_; - + }; static thread_local std::aligned_storage_tGetJitCodeSigner(), 1); #endif diff --git a/src/codegen/external-reference.cc b/src/codegen/external-reference.cc index 9c38472f10d217003be0122535134cbdf6d00259..6c0fe6c6e08c5d6c63d979389027967b7f93fb45 100644 --- a/src/codegen/external-reference.cc +++ b/src/codegen/external-reference.cc @@ -514,6 +514,15 @@ ExternalReference ExternalReference::address_of_jslimit(Isolate* isolate) { return ExternalReference(address); } +ExternalReference ExternalReference::address_of_no_heap_write_interrupt_request( + Isolate* isolate) { + Address address = isolate->stack_guard()->address_of_interrupt_request( + StackGuard::InterruptLevel::kNoHeapWrites); + // For efficient generated code, this should be root-register-addressable. + DCHECK(isolate->root_register_addressable_region().contains(address)); + return ExternalReference(address); +} + ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) { Address address = isolate->stack_guard()->address_of_real_jslimit(); // For efficient generated code, this should be root-register-addressable. diff --git a/src/codegen/external-reference.h b/src/codegen/external-reference.h index 3a5e82832ff1fe3a35fb712eed824a26d4fa1f8d..1a9ba781cc809f01b7bcad500ca89358a4504c8b 100644 --- a/src/codegen/external-reference.h +++ b/src/codegen/external-reference.h @@ -38,6 +38,9 @@ class StatsCounter; V(isolate_root, "Isolate::isolate_root()") \ V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \ V(address_of_jslimit, "StackGuard::address_of_jslimit()") \ + V(address_of_no_heap_write_interrupt_request, \ + "StackGuard::address_of_interrupt_request(StackGuard::InterruptLevel::" \ + "kNoHeapWrites)") \ V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()") \ V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \ V(heap_is_minor_marking_flag_address, "heap_is_minor_marking_flag_address") \ diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index 3c485b77e318112488a2dfb3d8a2a7471856e552..9b2e4319425928f210f0a78dd6e75c3a7374144e 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -3391,6 +3391,14 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64(Node* node, const int kMaxRecursionDepth = 100; if (node->opcode() == IrOpcode::kPhi) { + // Intermediate results from previous calls are not necessarily correct. + if (recursion_depth == 0) { + static_assert(sizeof(Upper32BitsState) == 1); + memset(phi_states_.data(), + static_cast(Upper32BitsState::kNotYetChecked), + phi_states_.size()); + } + Upper32BitsState current = phi_states_[node->id()]; if (current != Upper32BitsState::kNotYetChecked) { return current == Upper32BitsState::kUpperBitsGuaranteedZero; diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc index 7a8d91cf89a870fe177f311fc5df7f73a1037c2e..0e60169f0c0ad092b69728b68558e49bd0e3e74c 100644 --- a/src/compiler/branch-elimination.cc +++ b/src/compiler/branch-elimination.cc @@ -279,8 +279,7 @@ Reduction BranchElimination::ReduceTrapConditional(Node* node) { // graph()->end(). ReplaceWithValue(node, dead(), dead(), dead()); Node* control = graph()->NewNode(common()->Throw(), node, node); - NodeProperties::MergeControlToEnd(graph(), common(), control); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), control); return Changed(node); } else { // This will not trap, remove it by relaxing effect/control. @@ -322,9 +321,7 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) { } else { control = graph()->NewNode(common()->Deoptimize(p.reason(), p.feedback()), frame_state, effect, control); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), control); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), control); } return Replace(dead()); } diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc index e190595d6ffd5b451b626958eba0ae7a5d4d35fc..493a8199e7118fc7b436ded5dbdba2c4a112f861 100644 --- a/src/compiler/common-operator-reducer.cc +++ b/src/compiler/common-operator-reducer.cc @@ -172,9 +172,7 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) { } else { control = graph()->NewNode(common()->Deoptimize(p.reason(), p.feedback()), frame_state, effect, control); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), control); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), control); } return Replace(dead()); } @@ -394,7 +392,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) { // the reducer logic will visit {end} again. Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i], effect, control_inputs[i]); - NodeProperties::MergeControlToEnd(graph(), common(), ret); + MergeControlToEnd(graph(), common(), ret); } // Mark the Merge {control} and Return {node} as {dead}. Replace(control, dead()); @@ -410,7 +408,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) { // the reducer logic will visit {end} again. Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i], effect_inputs[i], control_inputs[i]); - NodeProperties::MergeControlToEnd(graph(), common(), ret); + MergeControlToEnd(graph(), common(), ret); } // Mark the Merge {control} and Return {node} as {dead}. Replace(control, dead()); @@ -526,8 +524,7 @@ Reduction CommonOperatorReducer::ReduceTrapConditional(Node* trap) { // graph()->end(). ReplaceWithValue(trap, dead(), dead(), dead()); Node* control = graph()->NewNode(common()->Throw(), trap, trap); - NodeProperties::MergeControlToEnd(graph(), common(), control); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), control); return Changed(trap); } else { // This will not trap, remove it by relaxing effect/control. diff --git a/src/compiler/dead-code-elimination.cc b/src/compiler/dead-code-elimination.cc index 170b3d89d63c477f4a91b878c4ed6989713a73ff..fbbf356ba4859ddb0ea6b7327736ea5640c0dbcd 100644 --- a/src/compiler/dead-code-elimination.cc +++ b/src/compiler/dead-code-elimination.cc @@ -247,11 +247,10 @@ Reduction DeadCodeElimination::ReduceEffectPhi(Node* node) { // phi nodes. Node* control = NodeProperties::GetControlInput(merge, i); Node* throw_node = graph_->NewNode(common_->Throw(), effect, control); - NodeProperties::MergeControlToEnd(graph_, common_, throw_node); + MergeControlToEnd(graph_, common_, throw_node); NodeProperties::ReplaceEffectInput(node, dead_, i); NodeProperties::ReplaceControlInput(merge, dead_, i); Revisit(merge); - Revisit(graph_->end()); reduction = Changed(node); } } diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h index 0877945bb539e557a907978441583fac5361a534..ae8def719f3c66c26f0de583861f3e69857025a8 100644 --- a/src/compiler/graph-reducer.h +++ b/src/compiler/graph-reducer.h @@ -6,7 +6,9 @@ #define V8_COMPILER_GRAPH_REDUCER_H_ #include "src/base/compiler-specific.h" +#include "src/compiler/graph.h" #include "src/compiler/node-marker.h" +#include "src/compiler/node-properties.h" #include "src/zone/zone-containers.h" namespace v8 { @@ -133,6 +135,12 @@ class AdvancedReducer : public Reducer { ReplaceWithValue(node, node, node, nullptr); } + void MergeControlToEnd(Graph* graph, CommonOperatorBuilder* common, + Node* node) { + NodeProperties::MergeControlToEnd(graph, common, node); + Revisit(graph->end()); + } + private: Editor* const editor_; }; diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc index b6eca3f86f9ab0e8b74825ffcbbfcf8f1484ba4b..8e392f66be3016689cd30f52b439732f9d2151f0 100644 --- a/src/compiler/js-call-reducer.cc +++ b/src/compiler/js-call-reducer.cc @@ -3230,7 +3230,7 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) { // Connect the throwing path to end. if_false = graph()->NewNode(common()->Throw(), efalse, if_false); - NodeProperties::MergeControlToEnd(graph(), common(), if_false); + MergeControlToEnd(graph(), common(), if_false); // Continue on the regular path. ReplaceWithValue(node, vtrue, etrue, if_true); @@ -3296,7 +3296,7 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) { // Connect the throwing path to end. if_false = graph()->NewNode(common()->Throw(), efalse, if_false); - NodeProperties::MergeControlToEnd(graph(), common(), if_false); + MergeControlToEnd(graph(), common(), if_false); // Continue on the regular path. ReplaceWithValue(node, vtrue, etrue, if_true); @@ -4007,7 +4007,7 @@ void JSCallReducer::CheckIfConstructor(Node* construct) { // simply connect the successful completion to the graph end. Node* throw_node = graph()->NewNode(common()->Throw(), check_throw, check_fail); - NodeProperties::MergeControlToEnd(graph(), common(), throw_node); + MergeControlToEnd(graph(), common(), throw_node); } namespace { @@ -5549,9 +5549,7 @@ Reduction JSCallReducer::ReduceForInsufficientFeedback( Node* deoptimize = graph()->NewNode(common()->Deoptimize(reason, FeedbackSource()), frame_state, effect, control); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), deoptimize); node->TrimInputCount(0); NodeProperties::ChangeOp(node, common()->Dead()); return Changed(node); @@ -5935,7 +5933,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { Node* eloop = graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop); Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); + MergeControlToEnd(graph(), common(), terminate); Node* index = graph()->NewNode( common()->Phi(MachineRepresentation::kTagged, 2), @@ -7653,7 +7651,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext( Node* eloop = effect = graph()->NewNode(common()->EffectPhi(2), effect, effect, loop); Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); + MergeControlToEnd(graph(), common(), terminate); // Check if reached the final table of the {receiver}. Node* table = effect = graph()->NewNode( @@ -7749,7 +7747,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext( Node* eloop = graph()->NewNode(common()->EffectPhi(2), effect, effect, loop); Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); + MergeControlToEnd(graph(), common(), terminate); Node* iloop = graph()->NewNode( common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop); diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc index 794884520c439737d9a4beec2baa14bcc2073612..81fae7ddfa3329e095963d7d44d64af213d0ae4d 100644 --- a/src/compiler/js-generic-lowering.cc +++ b/src/compiler/js-generic-lowering.cc @@ -1141,15 +1141,28 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - Node* limit = effect = - graph()->NewNode(machine()->Load(MachineType::Pointer()), - jsgraph()->ExternalConstant( - ExternalReference::address_of_jslimit(isolate())), - jsgraph()->IntPtrConstant(0), effect, control); - StackCheckKind stack_check_kind = StackCheckKindOfJSStackCheck(node->op()); - Node* check = effect = graph()->NewNode( - machine()->StackPointerGreaterThan(stack_check_kind), limit, effect); + + Node* check; + if (stack_check_kind == StackCheckKind::kJSIterationBody) { + check = effect = graph()->NewNode( + machine()->Load(MachineType::Uint8()), + jsgraph()->ExternalConstant( + ExternalReference::address_of_no_heap_write_interrupt_request( + isolate())), + jsgraph()->IntPtrConstant(0), effect, control); + check = graph()->NewNode(machine()->Word32Equal(), check, + jsgraph()->Int32Constant(0)); + } else { + Node* limit = effect = + graph()->NewNode(machine()->Load(MachineType::Pointer()), + jsgraph()->ExternalConstant( + ExternalReference::address_of_jslimit(isolate())), + jsgraph()->IntPtrConstant(0), effect, control); + + check = effect = graph()->NewNode( + machine()->StackPointerGreaterThan(stack_check_kind), limit, effect); + } Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); @@ -1193,6 +1206,8 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) { node->InsertInput(zone(), 0, graph()->NewNode(machine()->LoadStackCheckOffset())); ReplaceWithRuntimeCall(node, Runtime::kStackGuardWithGap); + } else if (stack_check_kind == StackCheckKind::kJSIterationBody) { + ReplaceWithRuntimeCall(node, Runtime::kHandleNoHeapWritesInterrupts); } else { ReplaceWithRuntimeCall(node, Runtime::kStackGuard); } diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc index 3f70ece4909392cf32a0f761fa7801f6964de642..ce67b8704f06d6e10f95888e5720a79adfbebda3 100644 --- a/src/compiler/js-inlining.cc +++ b/src/compiler/js-inlining.cc @@ -210,8 +210,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context, case IrOpcode::kDeoptimize: case IrOpcode::kTerminate: case IrOpcode::kThrow: - NodeProperties::MergeControlToEnd(graph(), common(), input); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), input); break; default: UNREACHABLE(); @@ -795,7 +794,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { branch_is_receiver_false = graph()->NewNode(common()->Throw(), branch_is_receiver_false, branch_is_receiver_false); - NodeProperties::MergeControlToEnd(graph(), common(), + MergeControlToEnd(graph(), common(), branch_is_receiver_false); ReplaceWithValue(node_success, node_success, node_success, diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc index a628f66f9c2e4d9d492b2f34019e42f24361334a..729cc80c5dbfb15e75d4d66310cb713fd140121f 100644 --- a/src/compiler/js-intrinsic-lowering.cc +++ b/src/compiler/js-intrinsic-lowering.cc @@ -124,12 +124,10 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) { Node* const effect = NodeProperties::GetEffectInput(node); Node* const control = NodeProperties::GetControlInput(node); - // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer. Node* deoptimize = graph()->NewNode( common()->Deoptimize(DeoptimizeReason::kDeoptimizeNow, FeedbackSource()), frame_state, effect, control); - NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), deoptimize); node->TrimInputCount(0); NodeProperties::ChangeOp(node, common()->Dead()); diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc index fe7254ba11f3feb62a90a2929a65f74dbf483500..75b3ba8b988c5b4844e04ec168396d6b46c0aa20 100644 --- a/src/compiler/js-native-context-specialization.cc +++ b/src/compiler/js-native-context-specialization.cc @@ -1953,7 +1953,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) { } Node* throw_node = graph()->NewNode(common()->Throw(), call_runtime, control_not_iterator); - NodeProperties::MergeControlToEnd(graph(), common(), throw_node); + MergeControlToEnd(graph(), common(), throw_node); } control = graph()->NewNode(common()->IfFalse(), branch); @@ -2022,7 +2022,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) { } Node* throw_node = graph()->NewNode(common()->Throw(), call_runtime, control_not_receiver); - NodeProperties::MergeControlToEnd(graph(), common(), throw_node); + MergeControlToEnd(graph(), common(), throw_node); } Node* if_receiver = graph()->NewNode(common()->IfTrue(), branch_node); ReplaceWithValue(node, call_property, effect, if_receiver); @@ -2519,9 +2519,8 @@ Reduction JSNativeContextSpecialization::ReduceEagerDeoptimize( Node* deoptimize = graph()->NewNode(common()->Deoptimize(reason, FeedbackSource()), frame_state, effect, control); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); - Revisit(graph()->end()); + + MergeControlToEnd(graph(), common(), deoptimize); node->TrimInputCount(0); NodeProperties::ChangeOp(node, common()->Dead()); return Changed(node); diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc index 08b1dba801074327874117332187bd40d07f5f2b..10ad530b39c9558eb25b16b81bb956482a504aaf 100644 --- a/src/compiler/js-operator.cc +++ b/src/compiler/js-operator.cc @@ -10,6 +10,7 @@ #include "src/compiler/js-graph.h" #include "src/compiler/js-heap-broker.h" #include "src/compiler/node-matchers.h" +#include "src/compiler/operator-properties.h" #include "src/compiler/operator.h" #include "src/handles/handles-inl.h" #include "src/objects/objects-inl.h" diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc index a940783e47d7d01e132f60484207930ef1f31f10..69ab2bbc5ad4aaf9fa319253d15b47f1d6e7241c 100644 --- a/src/compiler/js-typed-lowering.cc +++ b/src/compiler/js-typed-lowering.cc @@ -660,9 +660,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) { // throw, making it impossible to return a successful completion in this // case. We simply connect the successful completion to the graph end. if_false = graph()->NewNode(common()->Throw(), efalse, if_false); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), if_false); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), if_false); } control = graph()->NewNode(common()->IfTrue(), branch); length = effect = @@ -1280,7 +1278,7 @@ Reduction JSTypedLowering::ReduceJSHasInPrototypeChain(Node* node) { Node* eloop = effect = graph()->NewNode(common()->EffectPhi(2), effect, effect, loop); Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); + MergeControlToEnd(graph(), common(), terminate); Node* vloop = value = graph()->NewNode( common()->Phi(MachineRepresentation::kTagged, 2), value, value, loop); NodeProperties::SetType(vloop, Type::NonInternal()); diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc index 5f02ab86b07cf901b5c3566869679b06861a9608..07fd4495e4f47ec9f2e5ce208722622925c67a89 100644 --- a/src/compiler/linkage.cc +++ b/src/compiler/linkage.cc @@ -249,11 +249,14 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, if (!info->closure().is_null()) { // If we are compiling a JS function, use a JS call descriptor, // plus the receiver. - SharedFunctionInfo shared = info->closure()->shared(); - return GetJSCallDescriptor( - zone, info->is_osr(), - shared.internal_formal_parameter_count_with_receiver(), - CallDescriptor::kCanUseRoots); + DCHECK(info->has_bytecode_array()); + DCHECK_EQ(info->closure() + ->shared() + .internal_formal_parameter_count_with_receiver(), + info->bytecode_array()->parameter_count()); + return GetJSCallDescriptor(zone, info->is_osr(), + info->bytecode_array()->parameter_count(), + CallDescriptor::kCanUseRoots); } return nullptr; // TODO(titzer): ? } diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc index 9eb44f180f17f0c061cbad06c0b508b63030d9d6..987037ca481c7eaeec1bd2dc638df76a88dfdfe5 100644 --- a/src/compiler/machine-operator.cc +++ b/src/compiler/machine-operator.cc @@ -1443,7 +1443,6 @@ struct MachineOperatorGlobalCache { StackPointerGreaterThan##Kind##Operator kStackPointerGreaterThan##Kind; STACK_POINTER_GREATER_THAN(JSFunctionEntry) - STACK_POINTER_GREATER_THAN(JSIterationBody) STACK_POINTER_GREATER_THAN(CodeStubAssembler) STACK_POINTER_GREATER_THAN(Wasm) #undef STACK_POINTER_GREATER_THAN @@ -1814,12 +1813,12 @@ const Operator* MachineOperatorBuilder::StackPointerGreaterThan( switch (kind) { case StackCheckKind::kJSFunctionEntry: return &cache_.kStackPointerGreaterThanJSFunctionEntry; - case StackCheckKind::kJSIterationBody: - return &cache_.kStackPointerGreaterThanJSIterationBody; case StackCheckKind::kCodeStubAssembler: return &cache_.kStackPointerGreaterThanCodeStubAssembler; case StackCheckKind::kWasm: return &cache_.kStackPointerGreaterThanWasm; + case StackCheckKind::kJSIterationBody: + UNREACHABLE(); } UNREACHABLE(); } diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc index bd57b79ed4906d51fe1c5fd8c15f1ca4164ca7c8..55049efaa16cd0e8ee63794fdd0fe23873c02563 100644 --- a/src/compiler/operation-typer.cc +++ b/src/compiler/operation-typer.cc @@ -1261,6 +1261,7 @@ Type JSType(Type type) { } // namespace Type OperationTyper::SameValue(Type lhs, Type rhs) { + if (lhs.IsNone() || rhs.IsNone()) return Type::None(); if (!JSType(lhs).Maybe(JSType(rhs))) return singleton_false(); if (lhs.Is(Type::NaN())) { if (rhs.Is(Type::NaN())) return singleton_true(); diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index ed017f0c27412aa7375da949f8dfd483777071ed..cf956cb9608535fedf3699c9a019212664f4d2be 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -2167,10 +2167,11 @@ struct WasmGCOptimizationPhase { temp_zone); WasmGCOperatorReducer wasm_gc(&graph_reducer, temp_zone, mcgraph, module, data->source_positions()); - // Note: if we want to add DeadCodeElimination here, we'll have to update - // the existing reducers to handle kDead and kDeadValue nodes everywhere. + DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), + data->common(), temp_zone); AddReducer(data, &graph_reducer, &load_elimination); AddReducer(data, &graph_reducer, &wasm_gc); + AddReducer(data, &graph_reducer, &dead_code_elimination); graph_reducer.ReduceGraph(); } }; diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc index aada8a8cfbcb46ceb57c71509066e61f1ac26111..6fdf51694c55e5b8e9edb4fa093d485998878ba1 100644 --- a/src/compiler/simplified-lowering.cc +++ b/src/compiler/simplified-lowering.cc @@ -2021,6 +2021,107 @@ class RepresentationSelector { SetOutput(node, MachineRepresentation::kTagged); } + template + bool TryOptimizeBigInt64Shift(Node* node, const Truncation& truncation, + SimplifiedLowering* lowering) { + DCHECK(Is64()); + if (!truncation.IsUsedAsWord64()) return false; + + Type input_type = GetUpperBound(node->InputAt(0)); + Type shift_amount_type = GetUpperBound(node->InputAt(1)); + + if (!shift_amount_type.IsHeapConstant()) return false; + HeapObjectRef ref = shift_amount_type.AsHeapConstant()->Ref(); + if (!ref.IsBigInt()) return false; + BigIntRef bigint = ref.AsBigInt(); + bool lossless = false; + int64_t shift_amount = bigint.AsInt64(&lossless); + // We bail out if we cannot represent the shift amount correctly. + if (!lossless) return false; + + // Canonicalize {shift_amount}. + bool is_shift_left = + node->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft; + if (shift_amount < 0) { + // A shift amount of abs(std::numeric_limits::min()) is not + // representable. + if (shift_amount == std::numeric_limits::min()) return false; + is_shift_left = !is_shift_left; + shift_amount = -shift_amount; + DCHECK_GT(shift_amount, 0); + } + DCHECK_GE(shift_amount, 0); + + // If the operation is a *real* left shift, propagate truncation. + // If it is a *real* right shift, the output representation is + // word64 only if we know the input type is BigInt64. + // Otherwise, fall through to using BigIntOperationHint. + if (is_shift_left) { + VisitBinop(node, + UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), + UseInfo::Any(), MachineRepresentation::kWord64); + if (lower()) { + if (shift_amount > 63) { + DeferReplacement(node, jsgraph_->Int64Constant(0)); + } else if (shift_amount == 0) { + DeferReplacement(node, node->InputAt(0)); + } else { + DCHECK_GE(shift_amount, 1); + DCHECK_LE(shift_amount, 63); + ReplaceWithPureNode( + node, graph()->NewNode(lowering->machine()->Word64Shl(), + node->InputAt(0), + jsgraph_->Int64Constant(shift_amount))); + } + } + return true; + } else if (input_type.Is(Type::SignedBigInt64())) { + VisitBinop(node, + UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), + UseInfo::Any(), MachineRepresentation::kWord64); + if (lower()) { + if (shift_amount > 63) { + ReplaceWithPureNode( + node, + graph()->NewNode(lowering->machine()->Word64Sar(), + node->InputAt(0), jsgraph_->Int64Constant(63))); + } else if (shift_amount == 0) { + DeferReplacement(node, node->InputAt(0)); + } else { + DCHECK_GE(shift_amount, 1); + DCHECK_LE(shift_amount, 63); + ReplaceWithPureNode( + node, graph()->NewNode(lowering->machine()->Word64Sar(), + node->InputAt(0), + jsgraph_->Int64Constant(shift_amount))); + } + } + return true; + } else if (input_type.Is(Type::UnsignedBigInt64())) { + VisitBinop(node, + UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), + UseInfo::Any(), MachineRepresentation::kWord64); + if (lower()) { + if (shift_amount > 63) { + DeferReplacement(node, jsgraph_->Int64Constant(0)); + } else if (shift_amount == 0) { + DeferReplacement(node, node->InputAt(0)); + } else { + DCHECK_GE(shift_amount, 1); + DCHECK_LE(shift_amount, 63); + ReplaceWithPureNode( + node, graph()->NewNode(lowering->machine()->Word64Shr(), + node->InputAt(0), + jsgraph_->Int64Constant(shift_amount))); + } + } + return true; + } + + // None of the cases we can optimize here. + return false; + } + #if V8_ENABLE_WEBASSEMBLY static MachineType MachineTypeForWasmReturnType(wasm::ValueType type) { switch (type.kind()) { @@ -3385,111 +3486,18 @@ class RepresentationSelector { VisitUnused(node); return; } - if (truncation.IsUsedAsWord64()) { - Type input_type = GetUpperBound(node->InputAt(0)); - Type shift_amount_type = GetUpperBound(node->InputAt(1)); - - if (shift_amount_type.IsHeapConstant()) { - HeapObjectRef ref = shift_amount_type.AsHeapConstant()->Ref(); - if (ref.IsBigInt()) { - BigIntRef bigint = ref.AsBigInt(); - bool lossless = false; - int64_t shift_amount = bigint.AsInt64(&lossless); - - // Canonicalize {shift_amount}. - bool is_shift_left = - node->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft; - if (shift_amount < 0) { - is_shift_left = !is_shift_left; - shift_amount = -shift_amount; - } - DCHECK_GE(shift_amount, 0); - - // If the operation is a *real* left shift, propagate truncation. - // If it is a *real* right shift, the output representation is - // word64 only if we know the input type is BigInt64. - // Otherwise, fall through to using BigIntOperationHint. - if (is_shift_left) { - VisitBinop( - node, - UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), - UseInfo::Any(), MachineRepresentation::kWord64); - if (lower()) { - if (!lossless || shift_amount > 63) { - DeferReplacement(node, jsgraph_->Int64Constant(0)); - } else if (shift_amount == 0) { - DeferReplacement(node, node->InputAt(0)); - } else { - DCHECK_GE(shift_amount, 1); - DCHECK_LE(shift_amount, 63); - ReplaceWithPureNode( - node, - graph()->NewNode( - lowering->machine()->Word64Shl(), node->InputAt(0), - jsgraph_->Int64Constant(shift_amount))); - } - } - return; - } else if (input_type.Is(Type::SignedBigInt64())) { - VisitBinop(node, UseInfo::Word64(), UseInfo::Any(), - MachineRepresentation::kWord64); - if (lower()) { - if (!lossless || shift_amount > 63) { - ReplaceWithPureNode( - node, graph()->NewNode(lowering->machine()->Word64Sar(), - node->InputAt(0), - jsgraph_->Int64Constant(63))); - } else if (shift_amount == 0) { - DeferReplacement(node, node->InputAt(0)); - } else { - DCHECK_GE(shift_amount, 1); - DCHECK_LE(shift_amount, 63); - ReplaceWithPureNode( - node, - graph()->NewNode( - lowering->machine()->Word64Sar(), node->InputAt(0), - jsgraph_->Int64Constant(shift_amount))); - } - } - return; - } else if (input_type.Is(Type::UnsignedBigInt64())) { - VisitBinop(node, UseInfo::Word64(), UseInfo::Any(), - MachineRepresentation::kWord64); - if (lower()) { - if (!lossless || shift_amount > 63) { - DeferReplacement(node, jsgraph_->Int64Constant(0)); - } else if (shift_amount == 0) { - DeferReplacement(node, node->InputAt(0)); - } else { - DCHECK_GE(shift_amount, 1); - DCHECK_LE(shift_amount, 63); - ReplaceWithPureNode( - node, - graph()->NewNode( - lowering->machine()->Word64Shr(), node->InputAt(0), - jsgraph_->Int64Constant(shift_amount))); - } - } - return; - } - } - } + if (TryOptimizeBigInt64Shift(node, truncation, lowering)) { + return; } - BigIntOperationHint hint = BigIntOperationHintOf(node->op()); - switch (hint) { - case BigIntOperationHint::kBigInt64: - // Do not collect or use BigInt64 feedback for shift operations. - UNREACHABLE(); - case BigIntOperationHint::kBigInt: { - VisitBinop( - node, UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}), - MachineRepresentation::kTaggedPointer); - if (lower()) { - ChangeOp(node, BigIntOp(node)); - } - return; - } + DCHECK_EQ(BigIntOperationHintOf(node->op()), + BigIntOperationHint::kBigInt); + VisitBinop(node, + UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}), + MachineRepresentation::kTaggedPointer); + if (lower()) { + ChangeOp(node, BigIntOp(node)); } + return; } case IrOpcode::kSpeculativeBigIntEqual: case IrOpcode::kSpeculativeBigIntLessThan: diff --git a/src/compiler/wasm-gc-operator-reducer.cc b/src/compiler/wasm-gc-operator-reducer.cc index ded568a5671d4e1aa077bb2d707cad20bff545b6..1fbf9a59f21c9b175e6f958e46e4a6cabb05cdea 100644 --- a/src/compiler/wasm-gc-operator-reducer.cc +++ b/src/compiler/wasm-gc-operator-reducer.cc @@ -191,6 +191,7 @@ Reduction WasmGCOperatorReducer::ReduceIf(Node* node, bool condition) { DCHECK(node->opcode() == IrOpcode::kIfTrue || node->opcode() == IrOpcode::kIfFalse); Node* branch = NodeProperties::GetControlInput(node); + if (branch->opcode() == IrOpcode::kDead) return NoChange(); DCHECK_EQ(branch->opcode(), IrOpcode::kBranch); if (!IsReduced(branch)) return NoChange(); ControlPathTypes parent_state = GetState(branch); @@ -317,11 +318,9 @@ Reduction WasmGCOperatorReducer::ReduceCheckNull(Node* node) { Reduction WasmGCOperatorReducer::ReduceWasmExternInternalize(Node* node) { DCHECK_EQ(node->opcode(), IrOpcode::kWasmExternInternalize); // Remove redundant extern.internalize(extern.externalize(...)) pattern. - // TODO(mliedtke): Currently this doesn't get fully removed, probably due to - // not running dead code elimination in this pipeline step. What would it cost - // us to run it here? - if (NodeProperties::GetValueInput(node, 0)->opcode() == - IrOpcode::kWasmExternExternalize) { + Node* object = NodeProperties::GetValueInput(node, 0); + if (object->opcode() == IrOpcode::kDead) return NoChange(); + if (object->opcode() == IrOpcode::kWasmExternExternalize) { Node* externalize = node->InputAt(0); Node* input = externalize->InputAt(0); ReplaceWithValue(node, input); diff --git a/src/compiler/wasm-inlining.cc b/src/compiler/wasm-inlining.cc index e6820b1081e0bd039296fb5db4a2f95559eca93f..7d9cb8a065b97c72aac8f04f334f2cdbdaff6719 100644 --- a/src/compiler/wasm-inlining.cc +++ b/src/compiler/wasm-inlining.cc @@ -287,7 +287,7 @@ void WasmInliner::InlineTailCall(Node* call, Node* callee_start, // inlined graph to the end of the caller graph. for (Node* const input : callee_end->inputs()) { DCHECK(IrOpcode::IsGraphTerminator(input->opcode())); - NodeProperties::MergeControlToEnd(graph(), common(), input); + MergeControlToEnd(graph(), common(), input); } for (Edge edge_to_end : call->use_edges()) { DCHECK_EQ(edge_to_end.from(), graph()->end()); @@ -321,8 +321,7 @@ void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end, case IrOpcode::kDeoptimize: case IrOpcode::kTerminate: case IrOpcode::kThrow: - NodeProperties::MergeControlToEnd(graph(), common(), input); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), input); break; case IrOpcode::kTailCall: { // A tail call in the callee inlined in a regular call in the caller has diff --git a/src/compiler/wasm-load-elimination.cc b/src/compiler/wasm-load-elimination.cc index 7292d61d74cbeb469539b2bb6d5836824488e3ba..1feffb84a9be3a0dfbe801ea55a05abd7ec2817b 100644 --- a/src/compiler/wasm-load-elimination.cc +++ b/src/compiler/wasm-load-elimination.cc @@ -146,6 +146,7 @@ Reduction WasmLoadElimination::ReduceWasmStructGet(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); + if (object->opcode() == IrOpcode::kDead) return NoChange(); AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); @@ -168,16 +169,11 @@ Reduction WasmLoadElimination::ReduceWasmStructGet(Node* node) { !(is_mutable ? state->immutable_state : state->mutable_state) .LookupField(field_info.field_index, object) .IsEmpty()) { - Node* unreachable = - graph()->NewNode(jsgraph()->common()->Unreachable(), effect, control); - MachineRepresentation rep = - field_info.type->field(field_info.field_index).machine_representation(); - Node* dead_value = - graph()->NewNode(jsgraph()->common()->DeadValue(rep), unreachable); - NodeProperties::SetType(dead_value, NodeProperties::GetType(node)); - ReplaceWithValue(node, dead_value, unreachable, control); + ReplaceWithValue(node, dead(), dead(), dead()); + MergeControlToEnd(graph(), common(), + graph()->NewNode(common()->Throw(), effect, control)); node->Kill(); - return Replace(dead_value); + return Replace(dead()); } // If the input type is not (ref null? none) or bottom and we don't have type // inconsistencies, then the result type must be valid. @@ -217,6 +213,7 @@ Reduction WasmLoadElimination::ReduceWasmStructSet(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); + if (object->opcode() == IrOpcode::kDead) return NoChange(); AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); @@ -240,11 +237,11 @@ Reduction WasmLoadElimination::ReduceWasmStructSet(Node* node) { !(is_mutable ? state->immutable_state : state->mutable_state) .LookupField(field_info.field_index, object) .IsEmpty()) { - Node* unreachable = - graph()->NewNode(jsgraph()->common()->Unreachable(), effect, control); - ReplaceWithValue(node, unreachable, unreachable, control); + ReplaceWithValue(node, dead(), dead(), dead()); + MergeControlToEnd(graph(), common(), + graph()->NewNode(common()->Throw(), effect, control)); node->Kill(); - return Replace(unreachable); + return Replace(dead()); } if (is_mutable) { @@ -302,6 +299,7 @@ Reduction WasmLoadElimination::ReduceWasmArrayInitializeLength(Node* node) { Node* value = NodeProperties::GetValueInput(node, 1); Node* effect = NodeProperties::GetEffectInput(node); + if (object->opcode() == IrOpcode::kDead) return NoChange(); AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); @@ -321,6 +319,7 @@ Reduction WasmLoadElimination::ReduceStringPrepareForGetCodeunit(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); + if (object->opcode() == IrOpcode::kDead) return NoChange(); AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); @@ -484,6 +483,12 @@ WasmLoadElimination::AbstractState const* WasmLoadElimination::ComputeLoopState( if (visited.insert(current).second) { if (current->opcode() == IrOpcode::kWasmStructSet) { Node* object = NodeProperties::GetValueInput(current, 0); + if (object->opcode() == IrOpcode::kDead || + object->opcode() == IrOpcode::kDeadValue) { + // We are in dead code. Bail out with no mutable state. + return zone()->New(HalfState(zone()), + state->immutable_state); + } WasmFieldInfo field_info = OpParameter(current->op()); bool is_mutable = field_info.type->mutability(field_info.field_index); if (is_mutable) { @@ -529,6 +534,7 @@ WasmLoadElimination::WasmLoadElimination(Editor* editor, JSGraph* jsgraph, empty_state_(zone), node_states_(jsgraph->graph()->NodeCount(), zone), jsgraph_(jsgraph), + dead_(jsgraph->Dead()), zone_(zone) {} CommonOperatorBuilder* WasmLoadElimination::common() const { diff --git a/src/compiler/wasm-load-elimination.h b/src/compiler/wasm-load-elimination.h index 76be09d27c5ef467639c5800fd86ad830b8a7347..b50a02ca213d1fed28d2383eaeece776ffa17e3b 100644 --- a/src/compiler/wasm-load-elimination.h +++ b/src/compiler/wasm-load-elimination.h @@ -141,12 +141,14 @@ class V8_EXPORT_PRIVATE WasmLoadElimination final Isolate* isolate() const; Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } + Node* dead() const { return dead_; } Zone* zone() const { return zone_; } AbstractState const* empty_state() const { return &empty_state_; } AbstractState const empty_state_; NodeAuxData node_states_; JSGraph* const jsgraph_; + Node* dead_; Zone* zone_; }; diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc index 4068b081220994652469764d402bf0d716d70b86..c43b90dcfb87ba9c81a34e940d9983a169de1ab3 100644 --- a/src/debug/debug-evaluate.cc +++ b/src/debug/debug-evaluate.cc @@ -385,6 +385,7 @@ bool DebugEvaluate::IsSideEffectFreeIntrinsic(Runtime::FunctionId id) { V(ObjectIsExtensible) \ V(RegExpInitializeAndCompile) \ V(StackGuard) \ + V(HandleNoHeapWritesInterrupts) \ V(StringAdd) \ V(StringCharCodeAt) \ V(StringEqual) \ diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc index 18425b96cd54979c1c7d6bb51f5cf70bf0aeef2e..944bf5f4ee31e99a1374eddfa5699bfbd7d077c0 100644 --- a/src/debug/debug-scopes.cc +++ b/src/debug/debug-scopes.cc @@ -954,7 +954,9 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode, // a proxy, return an empty object. Handle ScopeIterator::WithContextExtension() { DCHECK(context_->IsWithContext()); - if (context_->extension_receiver().IsJSProxy()) { + if (!context_->extension_receiver().IsJSObject()) { + DCHECK((context_->extension_receiver().IsJSProxy()) || + (context_->extension_receiver().IsWasmObject())); return isolate_->factory()->NewSlowJSObjectWithNullProto(); } return handle(JSObject::cast(context_->extension_receiver()), isolate_); diff --git a/src/execution/stack-guard.cc b/src/execution/stack-guard.cc index ff64beb8b22cc92c2710800c0b2c41fd200a3170..eb7f5794d3c29209caebb49c170b10e630a6465f 100644 --- a/src/execution/stack-guard.cc +++ b/src/execution/stack-guard.cc @@ -26,16 +26,22 @@ namespace v8 { namespace internal { -void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) { +void StackGuard::update_interrupt_requests_and_stack_limits( + const ExecutionAccess& lock) { DCHECK_NOT_NULL(isolate_); - thread_local_.set_jslimit(kInterruptLimit); - thread_local_.set_climit(kInterruptLimit); -} - -void StackGuard::reset_limits(const ExecutionAccess& lock) { - DCHECK_NOT_NULL(isolate_); - thread_local_.set_jslimit(thread_local_.real_jslimit_); - thread_local_.set_climit(thread_local_.real_climit_); + if (has_pending_interrupts(lock)) { + thread_local_.set_jslimit(kInterruptLimit); + thread_local_.set_climit(kInterruptLimit); + } else { + thread_local_.set_jslimit(thread_local_.real_jslimit_); + thread_local_.set_climit(thread_local_.real_climit_); + } + for (InterruptLevel level : + std::array{InterruptLevel::kNoGC, InterruptLevel::kNoHeapWrites, + InterruptLevel::kAnyEffect}) { + thread_local_.set_interrupt_requested( + level, InterruptLevelMask(level) & thread_local_.interrupt_flags_); + } } void StackGuard::SetStackLimit(uintptr_t limit) { @@ -64,18 +70,6 @@ void StackGuard::AdjustStackLimitForSimulator() { } } -void StackGuard::EnableInterrupts() { - ExecutionAccess access(isolate_); - if (has_pending_interrupts(access)) { - set_interrupt_limits(access); - } -} - -void StackGuard::DisableInterrupts() { - ExecutionAccess access(isolate_); - reset_limits(access); -} - void StackGuard::PushInterruptsScope(InterruptsScope* scope) { ExecutionAccess access(isolate_); DCHECK_NE(scope->mode_, InterruptsScope::kNoop); @@ -96,9 +90,8 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) { } thread_local_.interrupt_flags_ |= restored_flags; - if (has_pending_interrupts(access)) set_interrupt_limits(access); } - if (!has_pending_interrupts(access)) reset_limits(access); + update_interrupt_requests_and_stack_limits(access); // Add scope to the chain. scope->prev_ = thread_local_.interrupt_scopes_; thread_local_.interrupt_scopes_ = scope; @@ -126,7 +119,7 @@ void StackGuard::PopInterruptsScope() { } } } - if (has_pending_interrupts(access)) set_interrupt_limits(access); + update_interrupt_requests_and_stack_limits(access); // Remove scope from chain. thread_local_.interrupt_scopes_ = top->prev_; } @@ -146,7 +139,7 @@ void StackGuard::RequestInterrupt(InterruptFlag flag) { // Not intercepted. Set as active interrupt flag. thread_local_.interrupt_flags_ |= flag; - set_interrupt_limits(access); + update_interrupt_requests_and_stack_limits(access); // If this isolate is waiting in a futex, notify it to wake up. isolate_->futex_wait_list_node()->NotifyWake(); @@ -162,37 +155,36 @@ void StackGuard::ClearInterrupt(InterruptFlag flag) { // Clear the interrupt flag from the active interrupt flags. thread_local_.interrupt_flags_ &= ~flag; - if (!has_pending_interrupts(access)) reset_limits(access); + update_interrupt_requests_and_stack_limits(access); } bool StackGuard::HasTerminationRequest() { + if (!thread_local_.has_interrupt_requested(InterruptLevel::kNoGC)) { + return false; + } ExecutionAccess access(isolate_); if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) { thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION; - if (!has_pending_interrupts(access)) reset_limits(access); + update_interrupt_requests_and_stack_limits(access); return true; } return false; } -int StackGuard::FetchAndClearInterrupts() { +int StackGuard::FetchAndClearInterrupts(InterruptLevel level) { ExecutionAccess access(isolate_); - - int result = 0; + InterruptFlag mask = InterruptLevelMask(level); if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) { // The TERMINATE_EXECUTION interrupt is special, since it terminates // execution but should leave V8 in a resumable state. If it exists, we only // fetch and clear that bit. On resume, V8 can continue processing other // interrupts. - result = TERMINATE_EXECUTION; - thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION; - if (!has_pending_interrupts(access)) reset_limits(access); - } else { - result = static_cast(thread_local_.interrupt_flags_); - thread_local_.interrupt_flags_ = 0; - reset_limits(access); + mask = TERMINATE_EXECUTION; } + int result = static_cast(thread_local_.interrupt_flags_ & mask); + thread_local_.interrupt_flags_ &= ~mask; + update_interrupt_requests_and_stack_limits(access); return result; } @@ -264,7 +256,7 @@ class V8_NODISCARD ShouldBeZeroOnReturnScope final { } // namespace -Object StackGuard::HandleInterrupts() { +Object StackGuard::HandleInterrupts(InterruptLevel level) { TRACE_EVENT0("v8.execute", "V8.HandleInterrupts"); #if DEBUG @@ -278,7 +270,7 @@ Object StackGuard::HandleInterrupts() { // Fetch and clear interrupt bits in one go. See comments inside the method // for special handling of TERMINATE_EXECUTION. - int interrupt_flags = FetchAndClearInterrupts(); + int interrupt_flags = FetchAndClearInterrupts(level); // All interrupts should be fully processed when returning from this method. ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags); diff --git a/src/execution/stack-guard.h b/src/execution/stack-guard.h index 8cdf755c0d29bfcc1c7b01cc6e80c3e1b2a2282e..d5d0cbc90bdf4900e67719f7d7b49f0edcd37474 100644 --- a/src/execution/stack-guard.h +++ b/src/execution/stack-guard.h @@ -45,20 +45,30 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { // Sets up the default stack guard for this thread. void InitThread(const ExecutionAccess& lock); -#define INTERRUPT_LIST(V) \ - V(TERMINATE_EXECUTION, TerminateExecution, 0) \ - V(GC_REQUEST, GC, 1) \ - V(INSTALL_CODE, InstallCode, 2) \ - V(INSTALL_BASELINE_CODE, InstallBaselineCode, 3) \ - V(API_INTERRUPT, ApiInterrupt, 4) \ - V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5) \ - V(GROW_SHARED_MEMORY, GrowSharedMemory, 6) \ - V(LOG_WASM_CODE, LogWasmCode, 7) \ - V(WASM_CODE_GC, WasmCodeGC, 8) \ - V(INSTALL_MAGLEV_CODE, InstallMaglevCode, 9) \ - V(GLOBAL_SAFEPOINT, GlobalSafepoint, 10) - -#define V(NAME, Name, id) \ + // Code locations that check for interrupts might only handle a subset of the + // available interrupts, expressed as an `InterruptLevel`. These levels are + // also associated with side effects that are allowed for the respective + // level. The levels are inclusive, which is specified using the order in the + // enum. For example, a site that handles `kAnyEffect` will also handle the + // preceding levels. + enum class InterruptLevel { kNoGC, kNoHeapWrites, kAnyEffect }; + static constexpr int kNumberOfInterruptLevels = 3; + +#define INTERRUPT_LIST(V) \ + V(TERMINATE_EXECUTION, TerminateExecution, 0, InterruptLevel::kNoGC) \ + V(GC_REQUEST, GC, 1, InterruptLevel::kNoHeapWrites) \ + V(INSTALL_CODE, InstallCode, 2, InterruptLevel::kAnyEffect) \ + V(INSTALL_BASELINE_CODE, InstallBaselineCode, 3, InterruptLevel::kAnyEffect) \ + V(API_INTERRUPT, ApiInterrupt, 4, InterruptLevel::kNoHeapWrites) \ + V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5, \ + InterruptLevel::kNoHeapWrites) \ + V(GROW_SHARED_MEMORY, GrowSharedMemory, 6, InterruptLevel::kAnyEffect) \ + V(LOG_WASM_CODE, LogWasmCode, 7, InterruptLevel::kAnyEffect) \ + V(WASM_CODE_GC, WasmCodeGC, 8, InterruptLevel::kNoHeapWrites) \ + V(INSTALL_MAGLEV_CODE, InstallMaglevCode, 9, InterruptLevel::kAnyEffect) \ + V(GLOBAL_SAFEPOINT, GlobalSafepoint, 10, InterruptLevel::kNoHeapWrites) + +#define V(NAME, Name, id, interrupt_level) \ inline bool Check##Name() { return CheckInterrupt(NAME); } \ inline void Request##Name() { RequestInterrupt(NAME); } \ inline void Clear##Name() { ClearInterrupt(NAME); } @@ -67,16 +77,23 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { // Flag used to set the interrupt causes. enum InterruptFlag : uint32_t { -#define V(NAME, Name, id) NAME = (1 << id), +#define V(NAME, Name, id, interrupt_level) NAME = (1 << id), INTERRUPT_LIST(V) #undef V -#define V(NAME, Name, id) NAME | +#define V(NAME, Name, id, interrupt_level) NAME | ALL_INTERRUPTS = INTERRUPT_LIST(V) 0 #undef V }; static_assert(InterruptFlag::ALL_INTERRUPTS < std::numeric_limits::max()); + static constexpr InterruptFlag InterruptLevelMask(InterruptLevel level) { +#define V(NAME, Name, id, interrupt_level) \ + | (interrupt_level <= level ? NAME : 0) + return static_cast(0 INTERRUPT_LIST(V)); +#undef V + } + uintptr_t climit() { return thread_local_.climit(); } uintptr_t jslimit() { return thread_local_.jslimit(); } // This provides an asynchronous read of the stack limits for the current @@ -90,17 +107,23 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { Address address_of_real_jslimit() { return reinterpret_cast
(&thread_local_.real_jslimit_); } + Address address_of_interrupt_request(InterruptLevel level) { + return reinterpret_cast
( + &thread_local_.interrupt_requested_[static_cast(level)]); + } // If the stack guard is triggered, but it is not an actual // stack overflow, then handle the interruption accordingly. - Object HandleInterrupts(); + // Only interrupts that match the given `InterruptLevel` will be handled, + // leaving other interrupts pending as if this method had not been called. + Object HandleInterrupts(InterruptLevel level = InterruptLevel::kAnyEffect); // Special case of {HandleInterrupts}: checks for termination requests only. // This is guaranteed to never cause GC, so can be used to interrupt // long-running computations that are not GC-safe. bool HasTerminationRequest(); - static constexpr int kSizeInBytes = 7 * kSystemPointerSize; + static constexpr int kSizeInBytes = 8 * kSystemPointerSize; static char* Iterate(RootVisitor* v, char* thread_storage) { return thread_storage + ArchiveSpacePerThread(); @@ -110,7 +133,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { bool CheckInterrupt(InterruptFlag flag); void RequestInterrupt(InterruptFlag flag); void ClearInterrupt(InterruptFlag flag); - int FetchAndClearInterrupts(); + int FetchAndClearInterrupts(InterruptLevel level); // You should hold the ExecutionAccess lock when calling this method. bool has_pending_interrupts(const ExecutionAccess& lock) { @@ -118,15 +141,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { } // You should hold the ExecutionAccess lock when calling this method. - inline void set_interrupt_limits(const ExecutionAccess& lock); - - // Reset limits to actual values. For example after handling interrupt. - // You should hold the ExecutionAccess lock when calling this method. - inline void reset_limits(const ExecutionAccess& lock); - - // Enable or disable interrupts. - void EnableInterrupts(); - void DisableInterrupts(); + inline void update_interrupt_requests_and_stack_limits( + const ExecutionAccess& lock); #if V8_TARGET_ARCH_64_BIT static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe}; @@ -180,6 +196,21 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { static_cast(limit)); } + + // Interrupt request bytes can be read without any lock. + // Writing requires the ExecutionAccess lock. + base::Atomic8 interrupt_requested_[kNumberOfInterruptLevels] = { + false, false, false}; + + void set_interrupt_requested(InterruptLevel level, bool requested) { + base::Relaxed_Store(&interrupt_requested_[static_cast(level)], + requested); + } + + bool has_interrupt_requested(InterruptLevel level) { + return base::Relaxed_Load(&interrupt_requested_[static_cast(level)]); + } + InterruptsScope* interrupt_scopes_ = nullptr; uint32_t interrupt_flags_ = 0; }; diff --git a/src/heap/concurrent-marking.cc b/src/heap/concurrent-marking.cc index 6f7adf9cb9db5d05e2d5e424d296d7aea1d8a0a4..27ee56600fe7a43e60c0f70dc1f2863821c4ebc7 100644 --- a/src/heap/concurrent-marking.cc +++ b/src/heap/concurrent-marking.cc @@ -383,6 +383,8 @@ void ConcurrentMarking::RunMajor(JobDelegate* delegate, local_marking_worklists.PushOnHold(object); } else { Map map = object.map(isolate, kAcquireLoad); + // The marking worklist should never contain filler objects. + CHECK(!InstanceTypeChecker::IsFreeSpaceOrFiller(map)); if (is_per_context_mode) { Address context; if (native_context_inferrer.Infer(isolate, map, object, &context)) { diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 27ef0618f01823134b67c36479081784cb941fd5..f90c2c196c1441299f9c313f1f283200b5b983bb 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -2292,7 +2292,11 @@ void MarkCompactCollector::MarkTransitiveClosureLinear() { GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR); // This phase doesn't support parallel marking. DCHECK(heap()->concurrent_marking()->IsStopped()); - std::unordered_multimap key_to_values; + // We must use the full pointer comparison here as this map will be queried + // with objects from different cages (e.g. code- or trusted cage). + std::unordered_multimap + key_to_values; Ephemeron ephemeron; DCHECK( @@ -2410,21 +2414,8 @@ std::pair MarkCompactCollector::ProcessMarkingWorklist( while (local_marking_worklists()->Pop(&object) || local_marking_worklists()->PopOnHold(&object)) { - // Left trimming may result in grey or black filler objects on the marking - // worklist. Ignore these objects. - if (object.IsFreeSpaceOrFiller(cage_base)) { - // Due to copying mark bits and the fact that grey and black have their - // first bit set, one word fillers are always black. - DCHECK_IMPLIES(object.map(cage_base) == - ReadOnlyRoots(isolate).one_pointer_filler_map(), - marking_state()->IsMarked(object)); - // Other fillers may be black or grey depending on the color of the object - // that was trimmed. - DCHECK_IMPLIES(object.map(cage_base) != - ReadOnlyRoots(isolate).one_pointer_filler_map(), - marking_state()->IsMarked(object)); - continue; - } + // The marking worklist should never contain filler objects. + CHECK(!object.IsFreeSpaceOrFiller(cage_base)); DCHECK(object.IsHeapObject()); DCHECK(heap()->Contains(object)); DCHECK(!(marking_state()->IsUnmarked(object))); diff --git a/src/ic/ic.cc b/src/ic/ic.cc index b8f55270acc78c5c26a8c0acff99e5bff81ffd2e..f117eb028f1c2fb0a0740981ca33a2549f4a1683 100644 --- a/src/ic/ic.cc +++ b/src/ic/ic.cc @@ -1788,7 +1788,7 @@ MaybeHandle StoreIC::Store(Handle object, Handle name, // present. We can also skip this for private names since they are not // bound by configurability or extensibility checks, and errors would've // been thrown if the private field already exists in the object. - if (IsAnyDefineOwn() && !name->IsPrivateName() && !object->IsJSProxy() && + if (IsAnyDefineOwn() && !name->IsPrivateName() && object->IsJSObject() && !Handle::cast(object)->HasNamedInterceptor()) { Maybe can_define = JSObject::CheckIfCanDefineAsConfigurable( isolate(), &it, value, Nothing()); @@ -2262,15 +2262,16 @@ Handle KeyedStoreIC::StoreElementHandler( receiver_map->MayHaveReadOnlyElementsInPrototypeChain(isolate()), IsStoreInArrayLiteralIC()); - if (receiver_map->IsJSProxyMap()) { + if (!receiver_map->IsJSObjectMap()) { // DefineKeyedOwnIC, which is used to define computed fields in instances, - // should be handled by the slow stub. - if (IsDefineKeyedOwnIC()) { - TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub); - return StoreHandler::StoreSlow(isolate(), store_mode); + // should handled by the slow stub below instead of the proxy stub. + if (receiver_map->IsJSProxyMap() && !IsDefineKeyedOwnIC()) { + return StoreHandler::StoreProxy(isolate()); } - return StoreHandler::StoreProxy(isolate()); + // Wasm objects or other kind of special objects go through the slow stub. + TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub); + return StoreHandler::StoreSlow(isolate(), store_mode); } // TODO(ishell): move to StoreHandler::StoreElement(). @@ -2490,14 +2491,12 @@ MaybeHandle KeyedStoreIC::Store(Handle object, Handle receiver = Handle::cast(object); old_receiver_map = handle(receiver->map(), isolate()); is_arguments = receiver->IsJSArgumentsObject(); - bool is_proxy = receiver->IsJSProxy(); + bool is_jsobject = receiver->IsJSObject(); size_t index; key_is_valid_index = IntPtrKeyToSize(maybe_index, receiver, &index); - if (!is_arguments && !is_proxy) { - if (key_is_valid_index) { + if (is_jsobject && !is_arguments && key_is_valid_index) { Handle receiver_object = Handle::cast(object); store_mode = GetStoreMode(receiver_object, index); - } } } diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc index 139ed00b4b3f9766a392b84db1ee9f32af57c146..6b656c4a53abacb884a3f23f871229d6e63e9ea1 100644 --- a/src/interpreter/bytecode-generator.cc +++ b/src/interpreter/bytecode-generator.cc @@ -2422,8 +2422,16 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { VisitIterationBody(stmt, &loop_builder); builder()->SetExpressionAsStatementPosition(stmt->cond()); BytecodeLabels loop_backbranch(zone()); - VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(), - TestFallthrough::kThen); + if (!loop_builder.break_labels()->empty()) { + // The test may be conditionally executed if there was a break statement + // inside the loop body, and therefore requires its own elision scope. + HoleCheckElisionScope elider(this); + VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(), + TestFallthrough::kThen); + } else { + VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(), + TestFallthrough::kThen); + } loop_backbranch.Bind(builder()); } } diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc index fe89983f5478f21b6c01525058ed50a4c68d0e05..703d1a3432b49ecca6b40bc1fa841e32a0602126 100644 --- a/src/interpreter/interpreter.cc +++ b/src/interpreter/interpreter.cc @@ -292,6 +292,9 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl( } #ifdef DEBUG + if (parse_info()->literal()->shared_function_info().is_null()) { + parse_info()->literal()->set_shared_function_info(shared_info); + } CheckAndPrintBytecodeMismatch( isolate, handle(Script::cast(shared_info->script()), isolate), bytecodes); #endif diff --git a/src/json/json-stringifier.cc b/src/json/json-stringifier.cc index f718bcd9cf94c6c9762985e21a471ff235914742..347dbebd04b0ee0bdde149ea7773a80733b0a436 100644 --- a/src/json/json-stringifier.cc +++ b/src/json/json-stringifier.cc @@ -633,6 +633,8 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle object, if (InstanceTypeChecker::IsJSProxy(instance_type)) { return SerializeJSProxy(Handle::cast(object), key); } + // WASM_{STRUCT,ARRAY}_TYPE are handled in `case:` blocks above. + DCHECK(object->IsJSObject()); return SerializeJSObject(Handle::cast(object), key); } } diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index c29af103ae52b348f8ccbf8968dde789a034b821..f6f34d672b06d4432b4c533bd964f2b8cba6fdf1 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -621,8 +621,8 @@ DeoptFrame MaglevGraphBuilder::GetDeoptFrameForLazyDeoptHelper( // Currently only support builtin continuations for bytecodes that write to // the accumulator - DCHECK( - interpreter::Bytecodes::WritesAccumulator(iterator_.current_bytecode())); + DCHECK(interpreter::Bytecodes::WritesOrClobbersAccumulator( + iterator_.current_bytecode())); return BuiltinContinuationDeoptFrame( continuation_scope->continuation(), {}, GetContext(), // Mark the accumulator dead in parent frames since we know that the @@ -4635,7 +4635,7 @@ void MaglevGraphBuilder::VisitFindNonDefaultConstructorOrConstruct() { TryGetConstant(new_target); if (kind == FunctionKind::kDefaultBaseConstructor) { ValueNode* object; - if (new_target_function && new_target_function->IsJSFunction() && + if (new_target_function && new_target_function->IsJSFunction() && HasValidInitialMap(new_target_function->AsJSFunction(), current_function)) { object = BuildAllocateFastObject( diff --git a/src/objects/code.cc b/src/objects/code.cc index a3c6e810efed56c13eba8b103824e829f7ae3d8a..ecf3e6722bc37bcb96455511892b89ecc3d48793 100644 --- a/src/objects/code.cc +++ b/src/objects/code.cc @@ -83,10 +83,10 @@ void Code::CopyFromNoFlush(ByteArray reloc_info, Heap* heap, #ifdef V8_ENABLE_JIT_CODE_SIGN if (IsSupportJitCodeSigner()) { CHECK(desc.jit_code_signer->ValidateCodeCopy(reinterpret_cast(instruction_start()), - desc.buffer, desc.instr_size) == 0); + desc.buffer, desc.instr_size) == 0); } else { CopyBytes(reinterpret_cast(instruction_start()), desc.buffer, - static_cast(desc.instr_size)); + static_cast(desc.instr_size)); } #else CopyBytes(reinterpret_cast(instruction_start()), desc.buffer, diff --git a/src/objects/elements.cc b/src/objects/elements.cc index 4d5fa8f1f9a08942685499b0ed0318c02ff67b2c..47cfa50ba446435b2c78213f5ba75a77d93b7273 100644 --- a/src/objects/elements.cc +++ b/src/objects/elements.cc @@ -3836,7 +3836,8 @@ class TypedElementsAccessor // them. if (source_proto.IsNull(isolate)) return false; if (source_proto.IsJSProxy()) return true; - if (!context.native_context().is_initial_array_prototype( + if (source_proto.IsJSObject() && + !context.native_context().is_initial_array_prototype( JSObject::cast(source_proto))) { return true; } diff --git a/src/objects/js-objects.cc b/src/objects/js-objects.cc index 0c0815338b854393e28b3f43dc88de89c8adc943..a57252d9cce1e45cb74252bfeea53d6ef7ff51aa 100644 --- a/src/objects/js-objects.cc +++ b/src/objects/js-objects.cc @@ -1458,8 +1458,6 @@ Maybe JSReceiver::ValidateAndApplyPropertyDescriptor( Maybe should_throw, Handle property_name) { // We either need a LookupIterator, or a property name. DCHECK((it == nullptr) != property_name.is_null()); - Handle object; - if (it != nullptr) object = Handle::cast(it->GetReceiver()); bool desc_is_data_descriptor = PropertyDescriptor::IsDataDescriptor(desc); bool desc_is_accessor_descriptor = PropertyDescriptor::IsAccessorDescriptor(desc); @@ -3592,6 +3590,7 @@ void JSObject::AddProperty(Isolate* isolate, Handle object, #ifdef DEBUG uint32_t index; DCHECK(!object->IsJSProxy()); + DCHECK(!object->IsWasmObject()); DCHECK(!name->AsArrayIndex(&index)); Maybe maybe = GetPropertyAttributes(&it); DCHECK(maybe.IsJust()); @@ -5175,7 +5174,7 @@ Maybe JSObject::SetPrototype(Isolate* isolate, Handle object, DCHECK(!object->IsAccessCheckNeeded()); } - // Silently ignore the change if value is not a JSObject or null. + // Silently ignore the change if value is not a JSReceiver or null. // SpiderMonkey behaves this way. if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true); diff --git a/src/objects/keys.cc b/src/objects/keys.cc index ab9b931fcca98e3d5559c195c3be11b4f48e66eb..dc603b032a8e45952439be62deeb56aa2b0f00bc 100644 --- a/src/objects/keys.cc +++ b/src/objects/keys.cc @@ -314,9 +314,8 @@ void TrySettingEmptyEnumCache(JSReceiver object) { Map map = object.map(); DCHECK_EQ(kInvalidEnumCacheSentinel, map.EnumLength()); if (!map.OnlyHasSimpleProperties()) return; - if (map.IsJSProxyMap()) return; + DCHECK(map.IsJSObjectMap()); // Implied by {OnlyHasSimpleProperties}. if (map.NumberOfEnumerableProperties() > 0) return; - DCHECK(object.IsJSObject()); map.SetEnumLength(0); } diff --git a/src/objects/module.cc b/src/objects/module.cc index fccb1c238db19aabde7023e0967c09b9e64a076b..3a100e80c9951792c35407dd395a1db97234334e 100644 --- a/src/objects/module.cc +++ b/src/objects/module.cc @@ -354,8 +354,7 @@ Handle Module::GetModuleNamespace(Isolate* isolate, // Turbofan can use this for inlining the access. JSObject::OptimizeAsPrototype(ns); - Handle proto_info = - Map::GetOrCreatePrototypeInfo(Handle::cast(ns), isolate); + Handle proto_info = Map::GetOrCreatePrototypeInfo(ns, isolate); proto_info->set_module_namespace(*ns); return ns; } diff --git a/src/objects/objects.cc b/src/objects/objects.cc index e13773d9af4aa76811f32e9915fec3bac2f64ee3..10a305cbb655218b5c1809e9e3e66da914827400 100644 --- a/src/objects/objects.cc +++ b/src/objects/objects.cc @@ -459,14 +459,27 @@ Handle NoSideEffectsErrorToString(Isolate* isolate, if (name_str->length() == 0) return msg_str; if (msg_str->length() == 0) return name_str; - IncrementalStringBuilder builder(isolate); - builder.AppendString(name_str); - builder.AppendCStringLiteral(": "); + constexpr const char error_suffix[] = ""; + constexpr int error_suffix_size = sizeof(error_suffix); + int suffix_size = std::min(error_suffix_size, msg_str->length()); - if (builder.Length() + msg_str->length() <= String::kMaxLength) { - builder.AppendString(msg_str); + IncrementalStringBuilder builder(isolate); + if (name_str->length() + suffix_size + 2 /* ": " */ > String::kMaxLength) { + constexpr const char connector[] = "... : "; + int connector_size = sizeof(connector); + Handle truncated_name = isolate->factory()->NewProperSubString( + name_str, 0, name_str->length() - error_suffix_size - connector_size); + builder.AppendString(truncated_name); + builder.AppendCStringLiteral(connector); + builder.AppendCStringLiteral(error_suffix); } else { - builder.AppendCStringLiteral(""); + builder.AppendString(name_str); + builder.AppendCStringLiteral(": "); + if (builder.Length() + msg_str->length() <= String::kMaxLength) { + builder.AppendString(msg_str); + } else { + builder.AppendCStringLiteral(error_suffix); + } } return builder.Finish().ToHandleChecked(); diff --git a/src/objects/objects.h b/src/objects/objects.h index 7c08126e79d07c9135964043232548e8de08ac5a..1c125e83e0c100e806dcaeb44e323565dba5957c 100644 --- a/src/objects/objects.h +++ b/src/objects/objects.h @@ -693,8 +693,9 @@ class Object : public TaggedImpl { } }; - // For use with std::unordered_set/unordered_map when using both - // InstructionStream and non-InstructionStream objects as keys. + // For use with std::unordered_set/unordered_map when one of the objects may + // be located outside the main pointer compression cage, for example in + // trusted space. In this case, we must use full pointer comparison. struct KeyEqualSafe { bool operator()(const Object a, const Object b) const { return a.SafeEquals(b); diff --git a/src/objects/value-serializer.cc b/src/objects/value-serializer.cc index 2efca82aaaa00da30fdab8d66d97275e3b162a51..581fcd3166c7acc7c0988b7fd5f345b9e441f10d 100644 --- a/src/objects/value-serializer.cc +++ b/src/objects/value-serializer.cc @@ -1098,11 +1098,8 @@ Maybe ValueSerializer::WriteWasmModule(Handle object) { return ThrowDataCloneError(MessageTemplate::kDataCloneError, object); } - // TODO(titzer): introduce a Utils::ToLocal for WasmModuleObject. Maybe transfer_id = delegate_->GetWasmModuleTransferId( - reinterpret_cast(isolate_), - v8::Local::Cast( - Utils::ToLocal(Handle::cast(object)))); + reinterpret_cast(isolate_), Utils::ToLocal(object)); RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing()); uint32_t id = 0; if (transfer_id.To(&id)) { diff --git a/src/regexp/arm/regexp-macro-assembler-arm.cc b/src/regexp/arm/regexp-macro-assembler-arm.cc index 4cf2fcf8d54e08171bc7b3bd409a81ab0ee82a04..7abc77fbdf0ac2307a1431d58ad20c1b3b638d33 100644 --- a/src/regexp/arm/regexp-macro-assembler-arm.cc +++ b/src/regexp/arm/regexp-macro-assembler-arm.cc @@ -754,11 +754,13 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ sub(r0, sp, r0, SetCC); + Operand extra_space_for_variables(num_registers_ * kSystemPointerSize); + // Handle it if the stack pointer is already below the stack limit. __ b(ls, &stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ cmp(r0, Operand(num_registers_ * kSystemPointerSize)); + __ cmp(r0, extra_space_for_variables); __ b(hs, &stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. @@ -766,7 +768,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ jmp(&return_r0); __ bind(&stack_limit_hit); - CallCheckStackGuardState(); + CallCheckStackGuardState(extra_space_for_variables); __ cmp(r0, Operand::Zero()); // If returned value is non-zero, we exit with the returned value as result. __ b(ne, &return_r0); @@ -1157,16 +1159,18 @@ void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) { // Private methods: -void RegExpMacroAssemblerARM::CallCheckStackGuardState() { +void RegExpMacroAssemblerARM::CallCheckStackGuardState(Operand extra_space) { DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); DCHECK(!masm_->options().isolate_independent_code); - __ PrepareCallCFunction(3); + __ PrepareCallCFunction(4); + // Extra space for variables to consider in stack check. + __ mov(arg_reg_4, extra_space); // RegExp code frame pointer. - __ mov(r2, frame_pointer()); + __ mov(arg_reg_3, frame_pointer()); // InstructionStream of self. - __ mov(r1, Operand(masm_->CodeObject())); + __ mov(arg_reg_2, Operand(masm_->CodeObject())); // We need to make room for the return address on the stack. int stack_alignment = base::OS::ActivationFrameAlignment(); @@ -1194,7 +1198,6 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() { __ mov(code_pointer(), Operand(masm_->CodeObject())); } - // Helper function for reading a value out of a stack frame. template static T& frame_entry(Address re_frame, int frame_offset) { @@ -1209,7 +1212,8 @@ static T* frame_entry_address(Address re_frame, int frame_offset) { int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame) { + Address re_frame, + uintptr_t extra_space) { InstructionStream re_code = InstructionStream::cast(Object(raw_code)); return NativeRegExpMacroAssembler::CheckStackGuardState( frame_entry(re_frame, kIsolateOffset), @@ -1219,10 +1223,10 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, return_address, re_code, frame_entry_address
(re_frame, kInputStringOffset), frame_entry_address(re_frame, kInputStartOffset), - frame_entry_address(re_frame, kInputEndOffset)); + frame_entry_address(re_frame, kInputEndOffset), + extra_space); } - MemOperand RegExpMacroAssemblerARM::register_location(int register_index) { DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { diff --git a/src/regexp/arm/regexp-macro-assembler-arm.h b/src/regexp/arm/regexp-macro-assembler-arm.h index 44be0d920b66d8eaa25774057e85f94e7a84dd7c..e8d9f6d76de1b7a76e6abe4d9fc10d0d604d085e 100644 --- a/src/regexp/arm/regexp-macro-assembler-arm.h +++ b/src/regexp/arm/regexp-macro-assembler-arm.h @@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM // returning. // {raw_code} is an Address because this is called via ExternalReference. static int CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame); + Address re_frame, uintptr_t extra_space); private: // Offsets from frame_pointer() of function parameters and stored registers. @@ -152,7 +152,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - void CallCheckStackGuardState(); + void CallCheckStackGuardState( + Operand extra_space_for_variables = Operand::Zero()); void CallIsCharacterInRangeArray(const ZoneList* ranges); // The ebp-relative location of a regexp register. diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/src/regexp/arm64/regexp-macro-assembler-arm64.cc index fe1b0f6e0411252e9e43dfc76f3c5479ef1f6959..0f070e36ccf2be4c02a5aef73ffce542bfcb61cb 100644 --- a/src/regexp/arm64/regexp-macro-assembler-arm64.cc +++ b/src/regexp/arm64/regexp-macro-assembler-arm64.cc @@ -866,13 +866,14 @@ Handle RegExpMacroAssemblerARM64::GetCode(Handle source) { __ Mov(x10, stack_limit); __ Ldr(x10, MemOperand(x10)); __ Subs(x10, sp, x10); + Operand extra_space_for_variables(num_wreg_to_allocate * kWRegSize); // Handle it if the stack pointer is already below the stack limit. __ B(ls, &stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ Cmp(x10, num_wreg_to_allocate * kWRegSize); + __ Cmp(x10, extra_space_for_variables); __ B(hs, &stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack @@ -881,7 +882,7 @@ Handle RegExpMacroAssemblerARM64::GetCode(Handle source) { __ B(&return_w0); __ Bind(&stack_limit_hit); - CallCheckStackGuardState(x10); + CallCheckStackGuardState(x10, extra_space_for_variables); // If returned value is non-zero, we exit with the returned value as result. __ Cbnz(w0, &return_w0); @@ -1432,7 +1433,8 @@ static T* frame_entry_address(Address re_frame, int frame_offset) { int RegExpMacroAssemblerARM64::CheckStackGuardState( Address* return_address, Address raw_code, Address re_frame, - int start_index, const byte** input_start, const byte** input_end) { + int start_index, const byte** input_start, const byte** input_end, + uintptr_t extra_space) { InstructionStream re_code = InstructionStream::cast(Object(raw_code)); return NativeRegExpMacroAssembler::CheckStackGuardState( frame_entry(re_frame, kIsolateOffset), start_index, @@ -1440,7 +1442,7 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState( frame_entry(re_frame, kDirectCallOffset)), return_address, re_code, frame_entry_address
(re_frame, kInputStringOffset), input_start, - input_end); + input_end, extra_space); } @@ -1459,7 +1461,8 @@ void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset, // Private methods: -void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) { +void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch, + Operand extra_space) { DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); DCHECK(!masm_->options().isolate_independent_code); @@ -1474,6 +1477,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) { __ Claim(xreg_to_claim); + __ Mov(x6, extra_space); // CheckStackGuardState needs the end and start addresses of the input string. __ Poke(input_end(), 2 * kSystemPointerSize); __ Add(x5, sp, 2 * kSystemPointerSize); diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.h b/src/regexp/arm64/regexp-macro-assembler-arm64.h index a5164472b71f9037a3a0e6a0fd9c9c71d4f7e5e7..05b4eb5bd7b3762be44394cebbae380288181f6d 100644 --- a/src/regexp/arm64/regexp-macro-assembler-arm64.h +++ b/src/regexp/arm64/regexp-macro-assembler-arm64.h @@ -95,7 +95,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64 static int CheckStackGuardState(Address* return_address, Address raw_code, Address re_frame, int start_offset, const byte** input_start, - const byte** input_end); + const byte** input_end, + uintptr_t extra_space); private: static constexpr int kFramePointerOffset = 0; @@ -174,7 +175,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64 // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - void CallCheckStackGuardState(Register scratch); + void CallCheckStackGuardState(Register scratch, + Operand extra_space = Operand(0)); void CallIsCharacterInRangeArray(const ZoneList* ranges); // Location of a 32 bit position register. diff --git a/src/regexp/experimental/experimental-interpreter.cc b/src/regexp/experimental/experimental-interpreter.cc index 095cbd3a103c132409bd9d286b2952126701a0c5..456647fa30f2cb19677d5a9246f06beee7f75d2a 100644 --- a/src/regexp/experimental/experimental-interpreter.cc +++ b/src/regexp/experimental/experimental-interpreter.cc @@ -372,6 +372,8 @@ class NfaInterpreter { // the current input index. All remaining `active_threads_` are discarded. void RunActiveThread(InterpreterThread t) { while (true) { + SBXCHECK_GE(t.pc, 0); + SBXCHECK_LT(t.pc, bytecode_.length()); if (IsPcProcessed(t.pc)) return; MarkPcProcessed(t.pc); diff --git a/src/regexp/experimental/experimental.cc b/src/regexp/experimental/experimental.cc index 8bbf32265e7d64e565ed512e9ba9a443526a8df3..64b3447f611a26f53c1dd589972aaae7fac0c4d0 100644 --- a/src/regexp/experimental/experimental.cc +++ b/src/regexp/experimental/experimental.cc @@ -165,7 +165,7 @@ int32_t ExperimentalRegExp::ExecRaw(Isolate* isolate, int32_t* output_registers, int32_t output_register_count, int32_t subject_index) { - DCHECK(v8_flags.enable_experimental_regexp_engine); + CHECK(v8_flags.enable_experimental_regexp_engine); DisallowGarbageCollection no_gc; if (v8_flags.trace_experimental_regexp_engine) { @@ -262,7 +262,7 @@ int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate, int32_t* output_registers, int32_t output_register_count, int32_t subject_index) { - DCHECK(v8_flags.enable_experimental_regexp_engine_on_excessive_backtracks); + CHECK(v8_flags.enable_experimental_regexp_engine_on_excessive_backtracks); if (v8_flags.trace_experimental_regexp_engine) { StdoutStream{} << "Experimental execution (oneshot) of regexp " diff --git a/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/src/regexp/ia32/regexp-macro-assembler-ia32.cc index 70037dc169ced0e1dd3751426a729d0cbbd8c47b..3937e96c4d9508d76bf35047eb30e4f4308ea542 100644 --- a/src/regexp/ia32/regexp-macro-assembler-ia32.cc +++ b/src/regexp/ia32/regexp-macro-assembler-ia32.cc @@ -801,11 +801,13 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { ExternalReference::address_of_jslimit(isolate()); __ mov(eax, esp); __ sub(eax, StaticVariable(stack_limit)); + Immediate extra_space_for_variables(num_registers_ * kSystemPointerSize); + // Handle it if the stack pointer is already below the stack limit. __ j(below_equal, &stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ cmp(eax, num_registers_ * kSystemPointerSize); + __ cmp(eax, extra_space_for_variables); __ j(above_equal, &stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. @@ -814,7 +816,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ bind(&stack_limit_hit); __ push(backtrack_stackpointer()); - CallCheckStackGuardState(ebx); + CallCheckStackGuardState(ebx, extra_space_for_variables); __ pop(backtrack_stackpointer()); __ or_(eax, eax); // If returned value is non-zero, we exit with the returned value as result. @@ -1213,9 +1215,12 @@ void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) { // Private methods: -void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) { - static const int num_arguments = 3; +void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch, + Immediate extra_space) { + static const int num_arguments = 4; __ PrepareCallCFunction(num_arguments, scratch); + // Extra space for variables. + __ mov(Operand(esp, 3 * kSystemPointerSize), extra_space); // RegExp code frame pointer. __ mov(Operand(esp, 2 * kSystemPointerSize), ebp); // InstructionStream of self. @@ -1246,7 +1251,8 @@ static T* frame_entry_address(Address re_frame, int frame_offset) { int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame) { + Address re_frame, + uintptr_t extra_space) { InstructionStream re_code = InstructionStream::cast(Object(raw_code)); return NativeRegExpMacroAssembler::CheckStackGuardState( frame_entry(re_frame, kIsolateOffset), @@ -1256,10 +1262,10 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address, return_address, re_code, frame_entry_address
(re_frame, kInputStringOffset), frame_entry_address(re_frame, kInputStartOffset), - frame_entry_address(re_frame, kInputEndOffset)); + frame_entry_address(re_frame, kInputEndOffset), + extra_space); } - Operand RegExpMacroAssemblerIA32::register_location(int register_index) { DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { diff --git a/src/regexp/ia32/regexp-macro-assembler-ia32.h b/src/regexp/ia32/regexp-macro-assembler-ia32.h index 649c61d880e7dd7773914c6a418379597ac65295..a33b687c8c3349e70073a644549a2fa840caa0a3 100644 --- a/src/regexp/ia32/regexp-macro-assembler-ia32.h +++ b/src/regexp/ia32/regexp-macro-assembler-ia32.h @@ -89,7 +89,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32 // returning. // {raw_code} is an Address because this is called via ExternalReference. static int CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame); + Address re_frame, uintptr_t extra_space); private: Operand StaticVariable(const ExternalReference& ext); @@ -159,7 +159,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32 // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - void CallCheckStackGuardState(Register scratch); + void CallCheckStackGuardState(Register scratch, + Immediate extra_space = Immediate(0)); void CallIsCharacterInRangeArray(const ZoneList* ranges); // The ebp-relative location of a regexp register. diff --git a/src/regexp/regexp-compiler-tonode.cc b/src/regexp/regexp-compiler-tonode.cc index 3258bb51492aaa230907a3bae70fb1d48d4f9854..44f611907242e3569fbc3ce36668f52e518ef1c3 100644 --- a/src/regexp/regexp-compiler-tonode.cc +++ b/src/regexp/regexp-compiler-tonode.cc @@ -1190,6 +1190,8 @@ RegExpNode* RegExpLookaround::Builder::ForMatch(RegExpNode* match) { RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler, RegExpNode* on_success) { + compiler->ToNodeMaybeCheckForStackOverflow(); + int stack_pointer_register = compiler->AllocateRegister(); int position_register = compiler->AllocateRegister(); diff --git a/src/regexp/regexp-interpreter.cc b/src/regexp/regexp-interpreter.cc index 57eff39e7daed9137269ea8003577cccba473524..a2c67b1f58df09debdd081c8a7c0216901fd797b 100644 --- a/src/regexp/regexp-interpreter.cc +++ b/src/regexp/regexp-interpreter.cc @@ -177,22 +177,30 @@ class InterpreterRegisters { int output_register_count) : registers_(total_register_count), output_registers_(output_registers), + total_register_count_(total_register_count), output_register_count_(output_register_count) { // TODO(jgruber): Use int32_t consistently for registers. Currently, CSA // uses int32_t while runtime uses int. static_assert(sizeof(int) == sizeof(int32_t)); - DCHECK_GE(output_register_count, 2); // At least 2 for the match itself. - DCHECK_GE(total_register_count, output_register_count); - DCHECK_LE(total_register_count, RegExpMacroAssembler::kMaxRegisterCount); + SBXCHECK_GE(output_register_count, 2); // At least 2 for the match itself. + SBXCHECK_GE(total_register_count, output_register_count); + SBXCHECK_LE(total_register_count, RegExpMacroAssembler::kMaxRegisterCount); DCHECK_NOT_NULL(output_registers); // Initialize the output register region to -1 signifying 'no match'. std::memset(registers_.data(), -1, output_register_count * sizeof(RegisterT)); + USE(total_register_count_); } - const RegisterT& operator[](size_t index) const { return registers_[index]; } - RegisterT& operator[](size_t index) { return registers_[index]; } + const RegisterT& operator[](size_t index) const { + SBXCHECK_LT(index, total_register_count_); + return registers_[index]; + } + RegisterT& operator[](size_t index) { + SBXCHECK_LT(index, total_register_count_); + return registers_[index]; + } void CopyToOutputRegisters() { MemCopy(output_registers_, registers_.data(), @@ -203,6 +211,7 @@ class InterpreterRegisters { static constexpr int kStaticCapacity = 64; // Arbitrary. base::SmallVector registers_; RegisterT* const output_registers_; + const int total_register_count_; const int output_register_count_; }; diff --git a/src/regexp/regexp-macro-assembler.cc b/src/regexp/regexp-macro-assembler.cc index 8a248aaed581b2039f1d1d9f57fa8e5186f2b06a..5aff2b0fa5333fef2e43ee99932e0498b724ce29 100644 --- a/src/regexp/regexp-macro-assembler.cc +++ b/src/regexp/regexp-macro-assembler.cc @@ -284,14 +284,14 @@ bool NativeRegExpMacroAssembler::CanReadUnaligned() const { int NativeRegExpMacroAssembler::CheckStackGuardState( Isolate* isolate, int start_index, RegExp::CallOrigin call_origin, Address* return_address, InstructionStream re_code, Address* subject, - const byte** input_start, const byte** input_end) { + const byte** input_start, const byte** input_end, uintptr_t gap) { DisallowGarbageCollection no_gc; Address old_pc = PointerAuthentication::AuthenticatePC(return_address, 0); DCHECK_LE(re_code.instruction_start(), old_pc); DCHECK_LE(old_pc, re_code.code(kAcquireLoad).instruction_end()); StackLimitCheck check(isolate); - bool js_has_overflowed = check.JsHasOverflowed(); + bool js_has_overflowed = check.JsHasOverflowed(gap); if (call_origin == RegExp::CallOrigin::kFromJs) { // Direct calls from JavaScript can be interrupted in two ways: diff --git a/src/regexp/regexp-macro-assembler.h b/src/regexp/regexp-macro-assembler.h index 2ba9e2d28d0bc82a3f9ac7b0cd2cb346322c9dc1..4d16f8a98ad458e219a911c54003cd2182d9a9ed 100644 --- a/src/regexp/regexp-macro-assembler.h +++ b/src/regexp/regexp-macro-assembler.h @@ -334,7 +334,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler { Address* return_address, InstructionStream re_code, Address* subject, const byte** input_start, - const byte** input_end); + const byte** input_end, uintptr_t gap); static Address word_character_map_address() { return reinterpret_cast
(&word_character_map[0]); diff --git a/src/regexp/regexp.cc b/src/regexp/regexp.cc index e6e7d140a7a285c63056b586e90602051dabf4e2..c8bd9bee198b8d313c77b271ab30a6cf2aa5c2df 100644 --- a/src/regexp/regexp.cc +++ b/src/regexp/regexp.cc @@ -1201,6 +1201,15 @@ int32_t* RegExpGlobalCache::FetchNext() { if (num_matches_ <= 0) { return nullptr; } + + // Number of matches can't exceed maximum matches. + // This check is enough to prevent OOB accesses to register_array_ in the + // else branch below, since current_match_index < num_matches_ in this + // branch, it follows that current_match_index < max_matches_. And since + // max_matches_ = register_array_size_ / registers_per_match it follows + // that current_match_index * registers_per_match_ < register_array_size_. + SBXCHECK_LE(num_matches_, max_matches_); + current_match_index_ = 0; return register_array_; } else { diff --git a/src/regexp/x64/regexp-macro-assembler-x64.cc b/src/regexp/x64/regexp-macro-assembler-x64.cc index 53b2f5ab52c294740159d4bb9851e212bf0c266f..5a535ee57f8baa9b23ca306023bab9229c970b2f 100644 --- a/src/regexp/x64/regexp-macro-assembler-x64.cc +++ b/src/regexp/x64/regexp-macro-assembler-x64.cc @@ -842,11 +842,13 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ movq(r9, rsp); __ Move(kScratchRegister, stack_limit); __ subq(r9, Operand(kScratchRegister, 0)); + Immediate extra_space_for_variables(num_registers_ * kSystemPointerSize); + // Handle it if the stack pointer is already below the stack limit. __ j(below_equal, &stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ cmpq(r9, Immediate(num_registers_ * kSystemPointerSize)); + __ cmpq(r9, extra_space_for_variables); __ j(above_equal, &stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. @@ -856,7 +858,8 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ bind(&stack_limit_hit); __ Move(code_object_pointer(), masm_.CodeObject()); __ pushq(backtrack_stackpointer()); - CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp. + // CallCheckStackGuardState preserves no registers beside rbp and rsp. + CallCheckStackGuardState(extra_space_for_variables); __ popq(backtrack_stackpointer()); __ testq(rax, rax); // If returned value is non-zero, we exit with the returned value as result. @@ -1266,35 +1269,38 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) { // Private methods: -void RegExpMacroAssemblerX64::CallCheckStackGuardState() { +void RegExpMacroAssemblerX64::CallCheckStackGuardState(Immediate extra_space) { // This function call preserves no register values. Caller should // store anything volatile in a C call or overwritten by this function. - static const int num_arguments = 3; + static const int num_arguments = 4; __ PrepareCallCFunction(num_arguments); #ifdef V8_TARGET_OS_WIN + // Fourth argument: Extra space for variables. + __ movq(arg_reg_4, extra_space); // Second argument: InstructionStream of self. (Do this before overwriting - // r8). - __ movq(rdx, code_object_pointer()); + // r8 (arg_reg_3)). + __ movq(arg_reg_2, code_object_pointer()); // Third argument: RegExp code frame pointer. - __ movq(r8, rbp); + __ movq(arg_reg_3, rbp); // First argument: Next address on the stack (will be address of // return address). - __ leaq(rcx, Operand(rsp, -kSystemPointerSize)); + __ leaq(arg_reg_1, Operand(rsp, -kSystemPointerSize)); #else + // Fourth argument: Extra space for variables. + __ movq(arg_reg_4, extra_space); // Third argument: RegExp code frame pointer. - __ movq(rdx, rbp); + __ movq(arg_reg_3, rbp); // Second argument: InstructionStream of self. - __ movq(rsi, code_object_pointer()); + __ movq(arg_reg_2, code_object_pointer()); // First argument: Next address on the stack (will be address of // return address). - __ leaq(rdi, Operand(rsp, -kSystemPointerSize)); + __ leaq(arg_reg_1, Operand(rsp, -kSystemPointerSize)); #endif ExternalReference stack_check = ExternalReference::re_check_stack_guard_state(); CallCFunctionFromIrregexpCode(stack_check, num_arguments); } - // Helper function for reading a value out of a stack frame. template static T& frame_entry(Address re_frame, int frame_offset) { @@ -1309,7 +1315,8 @@ static T* frame_entry_address(Address re_frame, int frame_offset) { int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame) { + Address re_frame, + uintptr_t extra_space) { InstructionStream re_code = InstructionStream::cast(Object(raw_code)); return NativeRegExpMacroAssembler::CheckStackGuardState( frame_entry(re_frame, kIsolateOffset), @@ -1319,10 +1326,10 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address, return_address, re_code, frame_entry_address
(re_frame, kInputStringOffset), frame_entry_address(re_frame, kInputStartOffset), - frame_entry_address(re_frame, kInputEndOffset)); + frame_entry_address(re_frame, kInputEndOffset), + extra_space); } - Operand RegExpMacroAssemblerX64::register_location(int register_index) { DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { diff --git a/src/regexp/x64/regexp-macro-assembler-x64.h b/src/regexp/x64/regexp-macro-assembler-x64.h index bfe8290a19c0fb510010c7f8cf8b6595ef099cd4..85dacfddf6ab8892248f782c45e44cfda1603060 100644 --- a/src/regexp/x64/regexp-macro-assembler-x64.h +++ b/src/regexp/x64/regexp-macro-assembler-x64.h @@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64 // returning. // {raw_code} is an Address because this is called via ExternalReference. static int CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame); + Address re_frame, uintptr_t extra_space); private: // Offsets from rbp of function parameters and stored registers. @@ -198,7 +198,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64 // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - void CallCheckStackGuardState(); + void CallCheckStackGuardState(Immediate extra_space = Immediate(0)); void CallIsCharacterInRangeArray(const ZoneList* ranges); // The rbp-relative location of a regexp register. diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc index c54debfed7cb9a23ca9066694819644cf7212350..dce33fe161f4d83639d48945ecf0297a74bcb013 100644 --- a/src/runtime/runtime-internal.cc +++ b/src/runtime/runtime-internal.cc @@ -346,7 +346,23 @@ RUNTIME_FUNCTION(Runtime_StackGuard) { return isolate->StackOverflow(); } - return isolate->stack_guard()->HandleInterrupts(); + return isolate->stack_guard()->HandleInterrupts( + StackGuard::InterruptLevel::kAnyEffect); +} + +RUNTIME_FUNCTION(Runtime_HandleNoHeapWritesInterrupts) { + SealHandleScope shs(isolate); + DCHECK_EQ(0, args.length()); + TRACE_EVENT0("v8.execute", "V8.StackGuard"); + + // First check if this is a real stack overflow. + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { + return isolate->StackOverflow(); + } + + return isolate->stack_guard()->HandleInterrupts( + StackGuard::InterruptLevel::kNoHeapWrites); } RUNTIME_FUNCTION(Runtime_StackGuardWithGap) { @@ -361,7 +377,8 @@ RUNTIME_FUNCTION(Runtime_StackGuardWithGap) { return isolate->StackOverflow(); } - return isolate->stack_guard()->HandleInterrupts(); + return isolate->stack_guard()->HandleInterrupts( + StackGuard::InterruptLevel::kAnyEffect); } namespace { diff --git a/src/runtime/runtime-regexp.cc b/src/runtime/runtime-regexp.cc index 4399f4bb8faa47d0681f0847d59d071ac6b5b4b6..b561298aa056a30e13ed0025002ea95fe2beb940 100644 --- a/src/runtime/runtime-regexp.cc +++ b/src/runtime/runtime-regexp.cc @@ -1155,7 +1155,20 @@ Handle ConstructNamedCaptureGroupsObject( Handle capture_value(f_get_capture(capture_ix), isolate); DCHECK(capture_value->IsUndefined(isolate) || capture_value->IsString()); - JSObject::AddProperty(isolate, groups, capture_name, capture_value, NONE); + LookupIterator it(isolate, groups, capture_name, groups, + LookupIterator::OWN_SKIP_INTERCEPTOR); + if (it.IsFound()) { + DCHECK(v8_flags.js_regexp_duplicate_named_groups); + if (!capture_value->IsUndefined(isolate)) { + DCHECK(IsUndefined(*it.GetDataValue(), isolate)); + CHECK(Object::SetDataProperty(&it, capture_value).ToChecked()); + } + } else { + CHECK(Object::AddDataProperty(&it, capture_value, NONE, + Just(ShouldThrow::kThrowOnError), + StoreOrigin::kNamed) + .IsJust()); + } } return groups; diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 9b625f7e1c8acff3a742625c8a364d1e33f30a18..a63325dbd315baa9de121ee0212484ce17aa0a35 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -246,7 +246,8 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) { StackLimitCheck check(isolate); if (check.JsHasOverflowed()) return isolate->StackOverflow(); - return isolate->stack_guard()->HandleInterrupts(); + return isolate->stack_guard()->HandleInterrupts( + StackGuard::InterruptLevel::kAnyEffect); } RUNTIME_FUNCTION(Runtime_WasmCompileLazy) { diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index ab3ecfac97c9a165eae3cdc572b456fbc90a56f6..e09d6205ba9fd469a29f9ac381f43b88adffba9a 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -248,6 +248,7 @@ namespace internal { F(PerformMicrotaskCheckpoint, 0, 1) \ F(SharedValueBarrierSlow, 1, 1) \ F(StackGuard, 0, 1) \ + F(HandleNoHeapWritesInterrupts, 0, 1) \ F(StackGuardWithGap, 1, 1) \ F(Throw, 1, 1) \ F(ThrowApplyNonFunction, 1, 1) \ diff --git a/src/strings/string-stream.cc b/src/strings/string-stream.cc index 2c96c7cf99a89ad6f33390e868e41d80c6ab2980..618b138b9505eae1520d9eda8778bfb263b6bfcd 100644 --- a/src/strings/string-stream.cc +++ b/src/strings/string-stream.cc @@ -416,7 +416,7 @@ void StringStream::PrintPrototype(JSFunction fun, Object receiver) { bool print_name = false; Isolate* isolate = fun.GetIsolate(); if (receiver.IsNullOrUndefined(isolate) || receiver.IsTheHole(isolate) || - receiver.IsJSProxy()) { + receiver.IsJSProxy() || receiver.IsWasmObject()) { print_name = true; } else if (!isolate->context().is_null()) { if (!receiver.IsJSObject()) { @@ -426,7 +426,7 @@ void StringStream::PrintPrototype(JSFunction fun, Object receiver) { for (PrototypeIterator iter(isolate, JSObject::cast(receiver), kStartAtReceiver); !iter.IsAtEnd(); iter.Advance()) { - if (iter.GetCurrent().IsJSProxy()) break; + if (!iter.GetCurrent().IsJSObject()) break; Object key = iter.GetCurrent().SlowReverseLookup(fun); if (!key.IsUndefined(isolate)) { if (!name.IsString() || !key.IsString() || diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 121581c76cacf61f721e710f30d243d00df4a7f4..5f739ebfcbf60e4ac897d399b22ed637cad1b62d 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -342,9 +342,9 @@ void LiftoffAssembler::PatchPrepareStackFrame( // Emit the unconditional branch in the function prologue (from {offset} to // {pc_offset()}). patching_assembler.b((pc_offset() - offset) >> kInstrSizeLog2); -#ifdef V8_ENABLE_JIT_CODE_SIGN - patching_assembler.ReleaseJitCodeSigner(); -#endif + #ifdef V8_ENABLE_JIT_CODE_SIGN + patching_assembler.ReleaseJitCodeSigner(); + #endif // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc index 20a708623a169fe63f6f9e860b05fd0ed758e47f..e5e2083a83976e93e2095ef04d70ca310e7eb35f 100644 --- a/src/wasm/baseline/liftoff-assembler.cc +++ b/src/wasm/baseline/liftoff-assembler.cc @@ -765,29 +765,10 @@ void LiftoffAssembler::DropExceptionValueAtOffset(int offset) { cache_state_.stack_state.pop_back(); } -void LiftoffAssembler::PrepareLoopArgs(int num) { - for (int i = 0; i < num; ++i) { - VarState& slot = cache_state_.stack_state.end()[-1 - i]; - if (slot.is_stack()) continue; - RegClass rc = reg_class_for(slot.kind()); - if (slot.is_reg()) { - if (cache_state_.get_use_count(slot.reg()) > 1) { - // If the register is used more than once, we cannot use it for the - // merge. Move it to an unused register instead. - LiftoffRegList pinned; - pinned.set(slot.reg()); - LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned); - Move(dst_reg, slot.reg(), slot.kind()); - cache_state_.dec_used(slot.reg()); - cache_state_.inc_used(dst_reg); - slot.MakeRegister(dst_reg); - } - continue; - } - LiftoffRegister reg = GetUnusedRegister(rc, {}); - LoadConstant(reg, slot.constant()); - slot.MakeRegister(reg); - cache_state_.inc_used(reg); +void LiftoffAssembler::SpillLoopArgs(int num) { + for (VarState& slot : + base::VectorOf(cache_state_.stack_state.end() - num, num)) { + Spill(&slot); } } @@ -979,14 +960,14 @@ void LiftoffAssembler::Spill(VarState* slot) { } void LiftoffAssembler::SpillLocals() { - for (uint32_t i = 0; i < num_locals_; ++i) { - Spill(&cache_state_.stack_state[i]); + for (VarState& local_slot : + base::VectorOf(cache_state_.stack_state.data(), num_locals_)) { + Spill(&local_slot); } } void LiftoffAssembler::SpillAllRegisters() { - for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) { - auto& slot = cache_state_.stack_state[i]; + for (VarState& slot : cache_state_.stack_state) { if (!slot.is_reg()) continue; Spill(slot.offset(), slot.reg(), slot.kind()); slot.MakeStack(); diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index d2258af7f39f87333aed24e6055a7dd72ac6dc09..a78df8c7b4b6f24369f32d297a263b9a8f42d04b 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -549,9 +549,9 @@ class LiftoffAssembler : public MacroAssembler { // the bottom of the stack. void DropExceptionValueAtOffset(int offset); - // Ensure that the loop inputs are either in a register or spilled to the - // stack, so that we can merge different values on the back-edge. - void PrepareLoopArgs(int num); + // Spill all loop inputs to the stack to free registers and to ensure that we + // can merge different values on the back-edge. + void SpillLoopArgs(int num); V8_INLINE static int NextSpillOffset(ValueKind kind, int top_spill_offset) { int offset = top_spill_offset + SlotSizeForType(kind); diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index e58aeedf43a4fc13f5e1c5e18e29fdced244538a..3160eae6618a7f9450e671ee95ab401315233e6d 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -1262,7 +1262,7 @@ class LiftoffCompiler { // pre-analysis of the function. __ SpillLocals(); - __ PrepareLoopArgs(loop->start_merge.arity); + __ SpillLoopArgs(loop->start_merge.arity); // Loop labels bind at the beginning of the block. __ bind(loop->label.get()); @@ -1882,7 +1882,7 @@ class LiftoffCompiler { } case kExprExternExternalize: { LiftoffRegList pinned; - LiftoffRegister ref = pinned.set(__ PopToRegister(pinned)); + LiftoffRegister ref = pinned.set(__ PopToModifiableRegister(pinned)); LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned); LoadNullValueForCompare(null.gp(), pinned, kWasmAnyRef); Label label; diff --git a/src/wasm/canonical-types.cc b/src/wasm/canonical-types.cc index 253e888526a73d9709d73b80af69d0b24de06292..a4cf1cd2b20f11a5fc799064986ad5ec1234fe48 100644 --- a/src/wasm/canonical-types.cc +++ b/src/wasm/canonical-types.cc @@ -15,11 +15,21 @@ TypeCanonicalizer* GetTypeCanonicalizer() { return GetWasmEngine()->type_canonicalizer(); } -// We currently store canonical indices in {ValueType} instances, so they -// must fit into the range of valid module-relative (non-canonical type -// indices. -// TODO(jkummerow): Raise this limit, to make long-lived WasmEngines scale -// better. Plan: stop constructing ValueTypes from canonical type indices. +// Inside the TypeCanonicalizer, we use ValueType instances constructed +// from canonical type indices, so we can't let them get bigger than what +// we have storage space for. Code outside the TypeCanonicalizer already +// supports up to Smi range for canonical type indices. +// TODO(jkummerow): Raise this limit. Possible options: +// - increase the size of ValueType::HeapTypeField, using currently-unused bits. +// - change the encoding of ValueType: one bit says whether it's a ref type, +// the other bits then encode the index or the kind of non-ref type. +// - refactor the TypeCanonicalizer's internals to no longer use ValueTypes +// and related infrastructure, and use a wider encoding of canonicalized +// type indices only here. +// - wait for 32-bit platforms to no longer be relevant, and increase the +// size of ValueType to 64 bits. +// None of this seems urgent, as we have no evidence of the current limit +// being an actual limitation in practice. static constexpr size_t kMaxCanonicalTypes = kV8MaxWasmTypes; void TypeCanonicalizer::CheckMaxCanonicalIndex() const { @@ -101,7 +111,7 @@ uint32_t TypeCanonicalizer::AddRecursiveGroup(const FunctionSig* sig) { group.types[0].is_relative_supertype = false; canonical_groups_.emplace(group, canonical_index); canonical_supertypes_.emplace_back(kNoSuperType); - DCHECK_LE(canonical_supertypes_.size(), kMaxCanonicalTypes); + CheckMaxCanonicalIndex(); } return canonical_index; } @@ -110,6 +120,7 @@ ValueType TypeCanonicalizer::CanonicalizeValueType( const WasmModule* module, ValueType type, uint32_t recursive_group_start) const { if (!type.has_index()) return type; + static_assert(kMaxCanonicalTypes <= (1u << ValueType::kHeapTypeBits)); return type.ref_index() >= recursive_group_start ? ValueType::CanonicalWithRelativeIndex( type.kind(), type.ref_index() - recursive_group_start) diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index f93a66bc30cb3663fc2f24f9050152b92f7dfc94..8529646124e1e9c7dfd6fe3da9e9c226279193ca 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -3113,7 +3113,7 @@ class WasmFullDecoder : public WasmDecoder { } DECODE(BrOnNonNull) { - CHECK_PROTOTYPE_OPCODE(gc); + CHECK_PROTOTYPE_OPCODE(typed_funcref); BranchDepthImmediate imm(this, this->pc_ + 1, validate); if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0; Value ref_object = Peek(0); @@ -3693,6 +3693,11 @@ class WasmFullDecoder : public WasmDecoder { CHECK_PROTOTYPE_OPCODE(return_call); SigIndexImmediate imm(this, this->pc_ + 1, validate); if (!this->Validate(this->pc_ + 1, imm)) return 0; + if (!VALIDATE(this->CanReturnCall(imm.sig))) { + this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCallRef), + "tail call return types mismatch"); + return 0; + } Value func_ref = Peek(0, 0, ValueType::RefNull(imm.index)); ArgVector args = PeekArgs(imm.sig, 1); CALL_INTERFACE_IF_OK_AND_REACHABLE(ReturnCallRef, func_ref, imm.sig, @@ -4693,6 +4698,15 @@ class WasmFullDecoder : public WasmDecoder { if (!this->ValidateElementSegment(elem_index_pc, elem_segment)) { return 0; } + ValueType segment_type = + this->module_->elem_segments[elem_segment.index].type; + if (!VALIDATE(IsSubtypeOf(segment_type, element_type, this->module_))) { + this->DecodeError( + "array.init_elem: segment type %s is not a subtype of array " + "element type %s", + segment_type.name().c_str(), element_type.name().c_str()); + return 0; + } Value array = Peek(3, 0, ValueType::RefNull(array_imm.index)); Value array_index = Peek(2, 1, kWasmI32); diff --git a/src/wasm/value-type.h b/src/wasm/value-type.h index a977e044c7ca6af130e657b2f56a0ab27f054fbb..ec81455785dcf8f6cf1d07e4d0ecfd6d0de83565 100644 --- a/src/wasm/value-type.h +++ b/src/wasm/value-type.h @@ -395,6 +395,7 @@ class ValueType { static constexpr ValueType FromIndex(ValueKind kind, uint32_t index) { DCHECK(kind == kRefNull || kind == kRef || kind == kRtt); + CHECK_LT(index, kV8MaxWasmTypes); return ValueType(KindField::encode(kind) | HeapTypeField::encode(index)); } diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc index bf11ef74f26671ace899ac74962a529ff7e9d38d..c7f9d43b2e6bb6531f4aefcc307ad647dc2926e1 100644 --- a/src/wasm/wasm-js.cc +++ b/src/wasm/wasm-js.cc @@ -761,7 +761,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo& info) { } v8::ReturnValue return_value = info.GetReturnValue(); - return_value.Set(Utils::ToLocal(i::Handle::cast(module_obj))); + return_value.Set(Utils::ToLocal(module_obj)); } // WebAssembly.Module.imports(module) -> Array diff --git a/src/wasm/wasm-objects.cc b/src/wasm/wasm-objects.cc index be3e3a33f26ed81ee08c032e097e60cbd4552ffc..897d33098a297dd6c3767865d3b802175b1457e7 100644 --- a/src/wasm/wasm-objects.cc +++ b/src/wasm/wasm-objects.cc @@ -91,7 +91,7 @@ Handle WasmModuleObject::ExtractUtf8StringFromModuleBytes( base::Vector name_vec = wire_bytes.SubVector(ref.offset(), ref.end_offset()); // UTF8 validation happens at decode time. - DCHECK(unibrow::Utf8::ValidateEncoding(name_vec.begin(), name_vec.length())); + SBXCHECK(unibrow::Utf8::ValidateEncoding(name_vec.begin(), name_vec.length())); auto* factory = isolate->factory(); return internalize ? factory->InternalizeUtf8String( @@ -112,7 +112,7 @@ MaybeHandle WasmModuleObject::GetModuleNameOrNull( MaybeHandle WasmModuleObject::GetFunctionNameOrNull( Isolate* isolate, Handle module_object, uint32_t func_index) { - DCHECK_LT(func_index, module_object->module()->functions.size()); + SBXCHECK_LT(func_index, module_object->module()->functions.size()); wasm::WireBytesRef name = module_object->module()->lazily_generated_names.LookupFunctionName( wasm::ModuleWireBytes(module_object->native_module()->wire_bytes()), @@ -127,7 +127,7 @@ base::Vector WasmModuleObject::GetRawFunctionName( if (func_index == wasm::kAnonymousFuncIndex) { return base::Vector({nullptr, 0}); } - DCHECK_GT(module()->functions.size(), func_index); + SBXCHECK_GT(module()->functions.size(), func_index); wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes()); wasm::WireBytesRef name_ref = module()->lazily_generated_names.LookupFunctionName(wire_bytes, @@ -179,7 +179,7 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate, int table_index) { Handle dispatch_tables(table_obj->dispatch_tables(), isolate); int old_length = dispatch_tables->length(); - DCHECK_EQ(0, old_length % kDispatchTableNumElements); + SBXCHECK_EQ(0, old_length % kDispatchTableNumElements); if (instance.is_null()) return; // TODO(titzer): use weak cells here to avoid leaking instances. @@ -208,13 +208,13 @@ int WasmTableObject::Grow(Isolate* isolate, Handle table, max_size = v8_flags.wasm_max_table_size; } max_size = std::min(max_size, v8_flags.wasm_max_table_size.value()); - DCHECK_LE(old_size, max_size); + SBXCHECK_LE(old_size, max_size); if (max_size - old_size < count) return -1; uint32_t new_size = old_size + count; // Even with 2x over-allocation, there should not be an integer overflow. static_assert(wasm::kV8MaxWasmTableSize <= kMaxInt / 2); - DCHECK_GE(kMaxInt, new_size); + SBXCHECK_GE(kMaxInt, new_size); int old_capacity = table->entries().length(); if (new_size > static_cast(old_capacity)) { int grow = static_cast(new_size) - old_capacity; @@ -229,7 +229,7 @@ int WasmTableObject::Grow(Isolate* isolate, Handle table, table->set_current_length(new_size); Handle dispatch_tables(table->dispatch_tables(), isolate); - DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); // Tables are stored in the instance object, no code patching is // necessary. We simply have to grow the raw tables in each instance // that has imported this table. @@ -244,7 +244,7 @@ int WasmTableObject::Grow(Isolate* isolate, Handle table, Handle instance( WasmInstanceObject::cast(dispatch_tables->get(i)), isolate); - DCHECK_EQ(old_size, + SBXCHECK_EQ(old_size, instance->GetIndirectFunctionTable(isolate, table_index)->size()); WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize( instance, table_index, new_size); @@ -264,7 +264,7 @@ MaybeHandle WasmTableObject::JSToWasmElement( Isolate* isolate, Handle table, Handle entry, const char** error_message) { // Any `entry` has to be in its JS representation. - DCHECK(!entry->IsWasmInternalFunction()); + SBXCHECK(!entry->IsWasmInternalFunction()); const WasmModule* module = !table->instance().IsUndefined() ? WasmInstanceObject::cast(table->instance()).module() @@ -278,7 +278,7 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate, Handle entries, int entry_index, Handle entry) { - if (entry->IsWasmNull(isolate)) { + if (entry->IsWasmNull(isolate) || entry->IsNull(isolate)) { ClearDispatchTables(isolate, table, entry_index); // Degenerate case. entries->set(entry_index, ReadOnlyRoots(isolate).wasm_null()); return; @@ -291,14 +291,16 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate, Handle target_instance(exported_function->instance(), isolate); int func_index = exported_function->function_index(); - auto* wasm_function = &target_instance->module()->functions[func_index]; + const WasmModule* module = target_instance->module(); + SBXCHECK_LT(func_index, module->functions.size()); + auto* wasm_function = module->functions.data() + func_index; UpdateDispatchTables(isolate, *table, entry_index, wasm_function, *target_instance); } else if (WasmJSFunction::IsWasmJSFunction(*external)) { UpdateDispatchTables(isolate, table, entry_index, Handle::cast(external)); } else { - DCHECK(WasmCapiFunction::IsWasmCapiFunction(*external)); + SBXCHECK(WasmCapiFunction::IsWasmCapiFunction(*external)); UpdateDispatchTables(isolate, table, entry_index, Handle::cast(external)); } @@ -309,7 +311,7 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate, void WasmTableObject::Set(Isolate* isolate, Handle table, uint32_t index, Handle entry) { // Callers need to perform bounds checks, type check, and error handling. - DCHECK(table->is_in_bounds(index)); + SBXCHECK(table->is_in_bounds(index)); Handle entries(table->entries(), isolate); // The FixedArray is addressed with int's. @@ -337,7 +339,7 @@ void WasmTableObject::Set(Isolate* isolate, Handle table, case wasm::HeapType::kBottom: UNREACHABLE(); default: - DCHECK(!table->instance().IsUndefined()); + SBXCHECK(!table->instance().IsUndefined()); if (WasmInstanceObject::cast(table->instance()) .module() ->has_signature(table->type().ref_index())) { @@ -354,7 +356,7 @@ Handle WasmTableObject::Get(Isolate* isolate, uint32_t index) { Handle entries(table->entries(), isolate); // Callers need to perform bounds checks and error handling. - DCHECK(table->is_in_bounds(index)); + SBXCHECK(table->is_in_bounds(index)); // The FixedArray is addressed with int's. int entry_index = static_cast(index); @@ -386,14 +388,14 @@ Handle WasmTableObject::Get(Isolate* isolate, case wasm::HeapType::kBottom: UNREACHABLE(); default: - DCHECK(!table->instance().IsUndefined()); + SBXCHECK(!table->instance().IsUndefined()); const WasmModule* module = WasmInstanceObject::cast(table->instance()).module(); if (module->has_array(table->type().ref_index()) || module->has_struct(table->type().ref_index())) { return entry; } - DCHECK(module->has_signature(table->type().ref_index())); + SBXCHECK(module->has_signature(table->type().ref_index())); if (entry->IsWasmInternalFunction()) return entry; break; } @@ -417,9 +419,9 @@ void WasmTableObject::Fill(Isolate* isolate, Handle table, uint32_t start, Handle entry, uint32_t count) { // Bounds checks must be done by the caller. - DCHECK_LE(start, table->current_length()); - DCHECK_LE(count, table->current_length()); - DCHECK_LE(start + count, table->current_length()); + SBXCHECK_LE(start, table->current_length()); + SBXCHECK_LE(count, table->current_length()); + SBXCHECK_LE(start + count, table->current_length()); for (uint32_t i = 0; i < count; i++) { WasmTableObject::Set(isolate, table, start + i, entry); @@ -437,7 +439,7 @@ void WasmTableObject::UpdateDispatchTables(Isolate* isolate, // We simply need to update the IFTs for each instance that imports // this table. FixedArray dispatch_tables = table.dispatch_tables(); - DCHECK_EQ(0, dispatch_tables.length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables.length() % kDispatchTableNumElements); Object call_ref = func->imported @@ -469,11 +471,10 @@ void WasmTableObject::UpdateDispatchTables(Isolate* isolate, Handle table, int entry_index, Handle function) { - // We simply need to update the IFTs for each instance that imports - // this table. Handle dispatch_tables(table->dispatch_tables(), isolate); - DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + // Update the dispatch table for each instance that imports this table. for (int i = 0; i < dispatch_tables->length(); i += kDispatchTableNumElements) { int table_index = @@ -494,7 +495,7 @@ void WasmTableObject::UpdateDispatchTables( // We simply need to update the IFTs for each instance that imports // this table. Handle dispatch_tables(table->dispatch_tables(), isolate); - DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); // Reconstruct signature. // TODO(jkummerow): Unify with "SignatureHelper" in c-api.cc. @@ -514,6 +515,7 @@ void WasmTableObject::UpdateDispatchTables( int param_count = total_count - result_count; wasm::FunctionSig sig(result_count, param_count, reps.get()); + // Update the dispatch table for each instance that imports this table. for (int i = 0; i < dispatch_tables->length(); i += kDispatchTableNumElements) { int table_index = @@ -556,7 +558,7 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate, Handle table, int index) { Handle dispatch_tables(table->dispatch_tables(), isolate); - DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); for (int i = 0; i < dispatch_tables->length(); i += kDispatchTableNumElements) { int table_index = @@ -567,7 +569,7 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate, isolate); Handle function_table = target_instance->GetIndirectFunctionTable(isolate, table_index); - DCHECK_LT(index, function_table->size()); + SBXCHECK_LT(index, function_table->size()); function_table->Clear(index); } } @@ -591,8 +593,8 @@ void WasmTableObject::GetFunctionTableEntry( int entry_index, bool* is_valid, bool* is_null, MaybeHandle* instance, int* function_index, MaybeHandle* maybe_js_function) { - DCHECK(wasm::IsSubtypeOf(table->type(), wasm::kWasmFuncRef, module)); - DCHECK_LT(entry_index, table->current_length()); + SBXCHECK(wasm::IsSubtypeOf(table->type(), wasm::kWasmFuncRef, module)); + SBXCHECK_LT(entry_index, table->current_length()); // We initialize {is_valid} with {true}. We may change it later. *is_valid = true; Handle element(table->entries().get(entry_index), isolate); @@ -846,7 +848,7 @@ void WasmMemoryObject::update_instances(Isolate* isolate, WasmInstanceObject::cast(heap_object), isolate); SetInstanceMemory(instance, buffer); } else { - DCHECK(elem->IsCleared()); + SBXCHECK(elem->IsCleared()); } } } @@ -872,7 +874,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, // {GrowWasmMemoryInPlace} because memory is never allocated with more // capacity than that limit. size_t old_size = old_buffer->byte_length(); - DCHECK_EQ(0, old_size % wasm::kWasmPageSize); + SBXCHECK_EQ(0, old_size % wasm::kWasmPageSize); size_t old_pages = old_size / wasm::kWasmPageSize; size_t max_pages = memory_object->is_memory64() ? wasm::max_mem64_pages() : wasm::max_mem32_pages(); @@ -880,7 +882,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, max_pages = std::min(max_pages, static_cast(memory_object->maximum_pages())); } - DCHECK_GE(max_pages, old_pages); + SBXCHECK_GE(max_pages, old_pages); if (pages > max_pages - old_pages) return -1; base::Optional result_inplace = @@ -927,12 +929,12 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, Handle symbol = isolate->factory()->array_buffer_wasm_memory_symbol(); JSObject::SetProperty(isolate, new_buffer, symbol, memory_object).Check(); - DCHECK_EQ(result_inplace.value(), old_pages); + SBXCHECK_EQ(result_inplace.value(), old_pages); return static_cast(result_inplace.value()); // success } size_t new_pages = old_pages + pages; - DCHECK_LT(old_pages, new_pages); + SBXCHECK_LT(old_pages, new_pages); // Try allocating a new backing store and copying. // To avoid overall quadratic complexity of many small grow operations, we // grow by at least 0.5 MB + 12.5% of the existing memory size. @@ -943,7 +945,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, // {min_growth} can be bigger than {max_pages}, and in that case we want to // cap to {max_pages}. size_t new_capacity = std::min(max_pages, std::max(new_pages, min_growth)); - DCHECK_LT(old_pages, new_capacity); + SBXCHECK_LT(old_pages, new_capacity); std::unique_ptr new_backing_store = backing_store->CopyWasmMemory(isolate, new_pages, new_capacity, memory_object->is_memory64() @@ -989,7 +991,7 @@ MaybeHandle WasmGlobalObject::New( } if (type.is_reference()) { - DCHECK(maybe_untagged_buffer.is_null()); + SBXCHECK(maybe_untagged_buffer.is_null()); Handle tagged_buffer; if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) { // If no buffer was provided, create one. @@ -999,7 +1001,7 @@ MaybeHandle WasmGlobalObject::New( } global_obj->set_tagged_buffer(*tagged_buffer); } else { - DCHECK(maybe_tagged_buffer.is_null()); + SBXCHECK(maybe_tagged_buffer.is_null()); uint32_t type_size = type.value_kind_size(); Handle untagged_buffer; @@ -1044,7 +1046,7 @@ void ImportedFunctionEntry::SetWasmToJs( ", target=%p}\n", instance_->ptr(), index_, callable->ptr(), wasm_to_js_wrapper->instructions().begin()); - DCHECK(wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToJsWrapper || + SBXCHECK(wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToJsWrapper || wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToCapiWrapper); Handle ref = isolate->factory()->NewWasmApiFunctionRef(callable, suspend, instance_); @@ -1090,7 +1092,7 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize( Handle instance, int table_index, uint32_t minimum_size) { Isolate* isolate = instance->GetIsolate(); - DCHECK_LT(table_index, instance->indirect_function_tables().length()); + SBXCHECK_LT(table_index, instance->indirect_function_tables().length()); Handle table = instance->GetIndirectFunctionTable(isolate, table_index); WasmIndirectFunctionTable::Resize(isolate, table, minimum_size); @@ -1217,7 +1219,7 @@ void WasmInstanceObject::InitDataSegmentArrays( // since they cannot be used (since the validator checks that number of // declared data segments when validating the memory.init and memory.drop // instructions). - DCHECK(num_data_segments == 0 || + SBXCHECK(num_data_segments == 0 || num_data_segments == module->data_segments.size()); for (uint32_t i = 0; i < num_data_segments; ++i) { const wasm::WasmDataSegment& segment = module->data_segments[i]; @@ -1245,7 +1247,7 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) { Handle WasmInstanceObject::GetIndirectFunctionTable( Isolate* isolate, uint32_t table_index) { - DCHECK_LT(table_index, indirect_function_tables().length()); + SBXCHECK_LT(table_index, indirect_function_tables().length()); return handle(WasmIndirectFunctionTable::cast( indirect_function_tables().get(table_index)), isolate); @@ -1478,7 +1480,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable( wasm::WasmImportData resolved(callable, sig, canonical_sig_index); wasm::ImportCallKind kind = resolved.kind(); callable = resolved.callable(); // Update to ultimate target. - DCHECK_NE(wasm::ImportCallKind::kLinkError, kind); + SBXCHECK_NE(wasm::ImportCallKind::kLinkError, kind); wasm::CompilationEnv env = native_module->CreateCompilationEnv(); // {expected_arity} should only be used if kind != kJSFunctionArityMismatch. int expected_arity = -1; @@ -1518,7 +1520,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable( // static uint8_t* WasmInstanceObject::GetGlobalStorage( Handle instance, const wasm::WasmGlobal& global) { - DCHECK(!global.type.is_reference()); + SBXCHECK(!global.type.is_reference()); if (global.mutability && global.imported) { return reinterpret_cast( instance->imported_mutable_globals().get_sandboxed_pointer( @@ -1532,7 +1534,7 @@ uint8_t* WasmInstanceObject::GetGlobalStorage( std::pair, uint32_t> WasmInstanceObject::GetGlobalBufferAndIndex(Handle instance, const wasm::WasmGlobal& global) { - DCHECK(global.type.is_reference()); + SBXCHECK(global.type.is_reference()); Isolate* isolate = instance->GetIsolate(); if (global.mutability && global.imported) { Handle buffer( @@ -1541,7 +1543,7 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle instance, isolate); Address idx = instance->imported_mutable_globals().get_int( global.index * kSystemPointerSize); - DCHECK_LE(idx, std::numeric_limits::max()); + SBXCHECK_LE(idx, std::numeric_limits::max()); return {buffer, static_cast(idx)}; } return {handle(instance->tagged_globals_buffer(), isolate), global.offset}; @@ -1627,7 +1629,7 @@ wasm::WasmValue WasmArray::GetElement(uint32_t index) { void WasmArray::SetTaggedElement(uint32_t index, Handle value, WriteBarrierMode mode) { - DCHECK(type()->element_type().is_reference()); + SBXCHECK(type()->element_type().is_reference()); TaggedField::store(*this, element_offset(index), *value); CONDITIONAL_WRITE_BARRIER(*this, element_offset(index), *value, mode); } @@ -1641,8 +1643,8 @@ Handle WasmTagObject::New(Isolate* isolate, isolate); // Serialize the signature. - DCHECK_EQ(0, sig->return_count()); - DCHECK_LE(sig->parameter_count(), std::numeric_limits::max()); + SBXCHECK_EQ(0, sig->return_count()); + SBXCHECK_LE(sig->parameter_count(), std::numeric_limits::max()); int sig_size = static_cast(sig->parameter_count()); Handle> serialized_sig = PodArray::New(isolate, sig_size, AllocationType::kOld); @@ -1749,7 +1751,7 @@ Handle WasmExceptionPackage::GetExceptionValues( isolate, exception_package, isolate->factory()->wasm_exception_values_symbol()) .ToHandle(&values)) { - DCHECK_IMPLIES(!values->IsUndefined(), values->IsFixedArray()); + CHECK_IMPLIES(!values->IsUndefined(), values->IsFixedArray()); return values; } return ReadOnlyRoots(isolate).undefined_value_handle(); @@ -1859,8 +1861,8 @@ constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2; size_t ComputeEncodedElementSize(wasm::ValueType type) { size_t byte_size = type.value_kind_size(); - DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0); - DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement); + SBXCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0); + SBXCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement); return byte_size / kBytesPerExceptionValuesArrayElement; } @@ -1916,7 +1918,7 @@ bool WasmExportedFunction::IsWasmExportedFunction(Object object) { code.builtin_id() != Builtin::kWasmReturnPromiseOnSuspend) { return false; } - DCHECK(js_function.shared().HasWasmExportedFunctionData()); + SBXCHECK(js_function.shared().HasWasmExportedFunctionData()); return true; } @@ -1928,7 +1930,7 @@ bool WasmCapiFunction::IsWasmCapiFunction(Object object) { // if (js_function->code()->kind() != CodeKind::WASM_TO_CAPI_FUNCTION) { // return false; // } - // DCHECK(js_function->shared()->HasWasmCapiFunctionData()); + // SBXCHECK(js_function->shared()->HasWasmCapiFunctionData()); // return true; return js_function.shared().HasWasmCapiFunctionData(); } @@ -1970,7 +1972,7 @@ Handle WasmExportedFunction::New( Isolate* isolate, Handle instance, Handle internal, int func_index, int arity, Handle export_wrapper) { - DCHECK( + SBXCHECK( CodeKind::JS_TO_WASM_FUNCTION == export_wrapper->kind() || (export_wrapper->is_builtin() && (export_wrapper->builtin_id() == Builtin::kGenericJSToWasmWrapper || @@ -2031,7 +2033,7 @@ Handle WasmExportedFunction::New( // According to the spec, exported functions should not have a [[Construct]] // method. This does not apply to functions exported from asm.js however. - DCHECK_EQ(is_asm_js_module, js_function->IsConstructor()); + SBXCHECK_EQ(is_asm_js_module, js_function->IsConstructor()); shared->set_length(arity); shared->set_internal_formal_parameter_count(JSParameterCount(arity)); shared->set_script(instance->module_object().script()); @@ -2077,7 +2079,7 @@ Handle WasmJSFunction::New(Isolate* isolate, const wasm::FunctionSig* sig, Handle callable, wasm::Suspend suspend) { - DCHECK_LE(sig->all().size(), kMaxInt); + SBXCHECK_LE(sig->all().size(), kMaxInt); int sig_size = static_cast(sig->all().size()); int return_count = static_cast(sig->return_count()); int parameter_count = static_cast(sig->parameter_count()); @@ -2269,7 +2271,7 @@ namespace wasm { MaybeHandle JSToWasmObject(Isolate* isolate, Handle value, ValueType expected_canonical, const char** error_message) { - DCHECK(expected_canonical.is_object_reference()); + SBXCHECK(expected_canonical.is_object_reference()); if (expected_canonical.kind() == kRefNull && value->IsNull(isolate)) { switch (expected_canonical.heap_representation()) { case HeapType::kStringViewWtf8: @@ -2466,7 +2468,7 @@ MaybeHandle WasmToJSObject(Isolate* isolate, Handle value, if (value->IsWasmNull()) { return isolate->factory()->null_value(); } else { - DCHECK(value->IsWasmInternalFunction()); + SBXCHECK(value->IsWasmInternalFunction()); return i::WasmInternalFunction::GetOrCreateExternal( i::Handle::cast(value)); } diff --git a/src/wasm/wasm-serialization.cc b/src/wasm/wasm-serialization.cc index de5b7a2740f19ae8853494276390766a2b049577..d6e01f0f9632b1008390df2006bb6848c62d4271 100644 --- a/src/wasm/wasm-serialization.cc +++ b/src/wasm/wasm-serialization.cc @@ -153,7 +153,7 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) { DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); #ifdef V8_ENABLE_JIT_CODE_SIGN instr->SetBranchImmTarget( - reinterpret_cast(rinfo->pc() + tag * kInstrSize), nullptr); + reinterpret_cast(rinfo->pc() + tag * kInstrSize), nullptr); #else instr->SetBranchImmTarget( reinterpret_cast(rinfo->pc() + tag * kInstrSize)); @@ -997,4 +997,4 @@ MaybeHandle DeserializeNativeModule( } // namespace wasm } // namespace internal -} // namespace v8 \ No newline at end of file +} // namespace v8 diff --git a/test/mjsunit/compiler/bigint-shift-left.js b/test/mjsunit/compiler/bigint-shift-left.js index 1f17eeba6d4e8dfcfaa0257eeec327709c6d7689..2afce4b235e6267c24bd5ef3e9eb549693520683 100644 --- a/test/mjsunit/compiler/bigint-shift-left.js +++ b/test/mjsunit/compiler/bigint-shift-left.js @@ -87,7 +87,7 @@ })(); (function OptimizeAndTest() { - const bi = 2n ** 64n; + const bi = 2n ** 62n; function ShiftLeftByLarge(a) { return BigInt.asIntN(62, a << bi); } diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index 5408bddf59b43acc2ed1e0721039b27b1426d78b..b0576934594f1e808d74f12e1f0c65c902a06efe 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -1634,6 +1634,7 @@ 'regress/wasm/regress-1417516': [SKIP], 'regress/wasm/regress-13732': [SKIP], 'regress/wasm/regress-1408337': [SKIP], + 'regress/wasm/regress-343748812': [SKIP], 'regress/wasm/regress-crbug-1338980': [SKIP], 'regress/wasm/regress-crbug-1355070': [SKIP], 'regress/wasm/regress-crbug-1356718': [SKIP], diff --git a/test/mjsunit/regress/regress-345960102.js b/test/mjsunit/regress/regress-345960102.js new file mode 100644 index 0000000000000000000000000000000000000000..896277a7f7ac6f0c4db4e74177eb8fc9988432e7 --- /dev/null +++ b/test/mjsunit/regress/regress-345960102.js @@ -0,0 +1,19 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +y = BigInt("0xffffffffffffffff"); + +function test() { + let x = BigInt.asIntN(64, -1n); + let result = x >> (y); + return BigInt.asIntN(64, result); +} + +%PrepareFunctionForOptimization(test); +assertEquals(-1n, test()); +assertEquals(-1n, test()); +%OptimizeFunctionOnNextCall(test) +assertEquals(-1n, test()); diff --git a/test/mjsunit/regress/wasm/regress-14047.js b/test/mjsunit/regress/wasm/regress-14047.js new file mode 100644 index 0000000000000000000000000000000000000000..71ea130a991f2f12ea419ec966f1152d8dd63b68 --- /dev/null +++ b/test/mjsunit/regress/wasm/regress-14047.js @@ -0,0 +1,32 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --experimental-wasm-gc + +d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); + +let builder = new WasmModuleBuilder(); + +let struct_type = builder.addStruct([makeField(kWasmI32, true)]); + +builder.addFunction('main', kSig_v_v).exportFunc() + .addBody([ + kExprRefNull, struct_type, + kExprRefAsNonNull, + kGCPrefix, kExprStructGet, struct_type, 0, + kExprDrop, + kExprI32Const, 1, + ...wasmF32Const(42), + kExprF32Const, 0xd7, 0xff, 0xff, 0xff, // -nan:0x7fffd7 + kExprF32Gt, + kExprI32DivU, + kExprIf, kWasmVoid, + kExprUnreachable, + kExprEnd, +]); + +let main = builder.instantiate().exports.main; +assertThrows( + () => main(), WebAssembly.RuntimeError, /dereferencing a null pointer/); + diff --git a/test/mjsunit/regress/wasm/regress-343748812.js b/test/mjsunit/regress/wasm/regress-343748812.js new file mode 100644 index 0000000000000000000000000000000000000000..8dc456c413665e97c5f8e48f95a65370cf051753 --- /dev/null +++ b/test/mjsunit/regress/wasm/regress-343748812.js @@ -0,0 +1,30 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); + +const builder = new WasmModuleBuilder(); +let $sig0 = builder.addType(kSig_v_v); +let $sig7 = builder.addType( + makeSig([], [ kWasmExternRef, kWasmS128, kWasmExternRef ])); +let $func0 = builder.addImport('imports', 'func0', $sig0); +builder.addFunction("main", $sig0).exportFunc() + .addLocals(kWasmExternRef, 3) + .addBody([ + kExprTry, $sig7, + kExprCallFunction, $func0, + kExprUnreachable, + kExprCatchAll, + kExprRefNull, kExternRefCode, + ...wasmS128Const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + kExprRefNull, kExternRefCode, + kExprEnd, + kExprDrop, + kExprDrop, + kExprDrop, + ]); + +var instance = builder.instantiate({'imports': { 'func0': () => {} }}); + +assertThrows(instance.exports.main, WebAssembly.RuntimeError, /unreachable/); diff --git a/test/unittests/compiler/common-operator-reducer-unittest.cc b/test/unittests/compiler/common-operator-reducer-unittest.cc index c76423a17496ca4f85bb59d72b9a99db837250ee..fb375507a2a9bc376c3a876b4152e25bff9540d5 100644 --- a/test/unittests/compiler/common-operator-reducer-unittest.cc +++ b/test/unittests/compiler/common-operator-reducer-unittest.cc @@ -377,6 +377,7 @@ TEST_F(CommonOperatorReducerTest, ReturnWithPhiAndEffectPhiAndMerge) { graph()->SetEnd(graph()->NewNode(common()->End(1), ret)); StrictMock editor; EXPECT_CALL(editor, Replace(merge, IsDead())); + EXPECT_CALL(editor, Revisit(graph()->end())).Times(2); Reduction const r = Reduce(&editor, ret, BranchSemantics::kJS); ASSERT_TRUE(r.Changed()); EXPECT_THAT(r.replacement(), IsDead()); diff --git a/test/unittests/interpreter/bytecode-generator-unittest.cc b/test/unittests/interpreter/bytecode-generator-unittest.cc index 55315b2db8076161d02b97f4534fc2c56002a13f..14e4b28c0e963b178ecb0bc71b19d9e8fe267ef1 100644 --- a/test/unittests/interpreter/bytecode-generator-unittest.cc +++ b/test/unittests/interpreter/bytecode-generator-unittest.cc @@ -3237,6 +3237,10 @@ TEST_F(BytecodeGeneratorTest, ElideRedundantHoleChecks) { "do { x; } while (y);\n" "x; y;\n", + // do-while with break + "do { x; break; } while (y);\n" + "x; y;\n", + // C-style for "for (x; y; z) { w; }\n" "x; y; z; w;\n", diff --git a/test/unittests/interpreter/bytecode_expectations/ElideRedundantHoleChecks.golden b/test/unittests/interpreter/bytecode_expectations/ElideRedundantHoleChecks.golden index 2aeaf6f4aeb9bc8e03f44aa33a8f5dc3723ae61a..8d71dbb36878ddd068a9ca238d06a00bfe7d1a44 100644 --- a/test/unittests/interpreter/bytecode_expectations/ElideRedundantHoleChecks.golden +++ b/test/unittests/interpreter/bytecode_expectations/ElideRedundantHoleChecks.golden @@ -176,6 +176,38 @@ constant pool: [ handlers: [ ] +--- +snippet: " + { + f = function f(a) { + do { x; break; } while (y); + x; y; + } + let w, x, y, z; + f(); + } +" +frame size: 0 +parameter count: 2 +bytecode array length: 16 +bytecodes: [ + /* 29 S> */ B(LdaImmutableCurrentContextSlot), U8(2), + B(ThrowReferenceErrorIfHole), U8(0), + /* 32 S> */ B(Jump), U8(2), + /* 52 S> */ B(LdaImmutableCurrentContextSlot), U8(2), + B(ThrowReferenceErrorIfHole), U8(0), + /* 55 S> */ B(LdaImmutableCurrentContextSlot), U8(3), + B(ThrowReferenceErrorIfHole), U8(1), + B(LdaUndefined), + /* 60 S> */ B(Return), +] +constant pool: [ + ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"], + ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"], +] +handlers: [ +] + --- snippet: " { diff --git a/test/unittests/wasm/function-body-decoder-unittest.cc b/test/unittests/wasm/function-body-decoder-unittest.cc index ac3de26645d9f97d30ed83785eff3b6b71209d7d..b1327223bbba19c7e2a988ade3a875b6bc00e206 100644 --- a/test/unittests/wasm/function-body-decoder-unittest.cc +++ b/test/unittests/wasm/function-body-decoder-unittest.cc @@ -1128,7 +1128,7 @@ TEST_F(FunctionBodyDecoderTest, UnreachableRefTypes) { ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, kExprCallRef, sig_index}); ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, WASM_REF_FUNC(function_index), kExprCallRef, sig_index}); - ExpectValidates(sigs.v_v(), + ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, kExprReturnCallRef, sig_index}); ExpectValidates(sigs.v_v(), diff --git a/third_party/inspector_protocol/BUILD.gn b/third_party/inspector_protocol/BUILD.gn index 09c7d3ec121fe2283883b980856adbffe5912cad..47ffe18625330db8339b96f22e6b11f2be4eb236 100644 --- a/third_party/inspector_protocol/BUILD.gn +++ b/third_party/inspector_protocol/BUILD.gn @@ -40,7 +40,6 @@ v8_source_set_shared("crdtp_shared") { deps = [ ":crdtp_platform_shared" ] } -# A small adapter library which only :crdtp_shared may depend on. v8_source_set_shared("crdtp_platform_shared") { visibility = [ ":crdtp_shared" ] sources = [ diff --git a/tools/cp_v8_include.py b/tools/cp_v8_include.py index a192643278d9c69b6c250e5fdf601b0c740f5bf3..3c1b0cdf71e1cba36474788b211587f6439f718f 100644 --- a/tools/cp_v8_include.py +++ b/tools/cp_v8_include.py @@ -7,4 +7,4 @@ dst_folder = current_path + '/v8-include' if os.path.exists(dst_folder): shutil.rmtree(dst_folder) -shutil.copytree(src_folder, dst_folder) +shutil.copytree(src_folder, dst_folder) \ No newline at end of file diff --git a/v8_shared/BUILD.gn b/v8_shared/BUILD.gn index 260b30fec43e4795683ec86a00f256e0358cb1d9..2f177c945491d13609073215920c72ee9c2f2510 100644 --- a/v8_shared/BUILD.gn +++ b/v8_shared/BUILD.gn @@ -1741,7 +1741,7 @@ v8_header_set_shared("torque_runtime_support_shared") { sources = [ "../src/torque/runtime-support.h" ] - configs = [ ":v8_features_shared", ":internal_config_shared" ] + configs = [ ":internal_config_shared", ":v8_features_shared" ] } torque_files = [ @@ -2803,7 +2803,7 @@ v8_source_set_shared("v8_snapshot_shared") { } action("v8_dump_build_config") { - script = "tools/testrunner/utils/dump_build_config.py" + script = "../tools/testrunner/utils/dump_build_config.py" outputs = [ "$root_out_dir/../v8_build_config.json" ] is_DEBUG_defined = v8_enable_debugging_features || dcheck_always_on is_full_debug = v8_enable_debugging_features && !v8_optimized_debug @@ -5902,7 +5902,7 @@ v8_source_set_shared("v8_base_without_compiler_shared") { ] if (v8_enable_i18n_support_shared) { - deps += [ "//v8:run_gen-regexp-special-case" ] + deps += [ ":run_gen-regexp-special-case_shared" ] sources += [ "$target_gen_dir/src/regexp/special-case.cc" ] if (is_win) { deps += [ "$v8_icu_path:icudata" ]