From d1d1c5ecec86b7da9ac2d0f32046ad94657fbe1a Mon Sep 17 00:00:00 2001 From: belong326 Date: Tue, 24 Sep 2024 20:38:38 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E3=80=90=E8=93=9D=E9=BB=84=E5=90=8C?= =?UTF-8?q?=E6=AD=A5=E3=80=91=E5=90=8C=E6=AD=A5=E9=BB=84=E5=8C=BA=E4=BB=A3?= =?UTF-8?q?=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: Ie6b213781ff8bc83d08cf4586eabd710bae28a10 --- BUILD.gn | 2 +- gni/v8.gni | 2 +- include/v8-internal.h | 2 +- include/v8-primitive.h | 3 +- src/api/api-natives.cc | 2 +- src/api/api.cc | 90 ++++---- src/api/api.h | 3 +- src/ast/scopes.cc | 38 ++-- src/base/logging.h | 31 +++ src/builtins/builtins-array.cc | 4 +- src/builtins/builtins-intl-gen.cc | 130 +++++------ src/builtins/promise-any.tq | 12 +- src/codegen/arm64/assembler-arm64-inl.h | 2 +- src/codegen/arm64/assembler-arm64.h | 4 +- src/codegen/arm64/instructions-arm64.cc | 10 +- src/codegen/arm64/jit-code-signer-base.cc | 11 +- src/codegen/arm64/jit-code-signer-base.h | 2 +- src/codegen/arm64/jit-code-signer-helper.cc | 45 ++-- src/codegen/arm64/jit-code-signer-hybrid.cc | 10 +- src/codegen/arm64/pac-sign-ctx.h | 4 +- src/codegen/assembler.cc | 4 +- src/codegen/code-desc.h | 2 +- src/codegen/constant-pool.cc | 1 + src/codegen/external-reference.cc | 9 + src/codegen/external-reference.h | 3 + src/compiler/backend/instruction-selector.cc | 8 + src/compiler/branch-elimination.cc | 7 +- src/compiler/common-operator-reducer.cc | 11 +- src/compiler/dead-code-elimination.cc | 3 +- src/compiler/graph-reducer.h | 8 + src/compiler/js-call-reducer.cc | 16 +- src/compiler/js-generic-lowering.cc | 31 ++- src/compiler/js-inlining.cc | 5 +- src/compiler/js-intrinsic-lowering.cc | 4 +- .../js-native-context-specialization.cc | 9 +- src/compiler/js-operator.cc | 1 + src/compiler/js-typed-lowering.cc | 6 +- src/compiler/linkage.cc | 13 +- src/compiler/machine-operator.cc | 5 +- src/compiler/operation-typer.cc | 1 + src/compiler/pipeline.cc | 5 +- src/compiler/simplified-lowering.cc | 214 +++++++++--------- src/compiler/wasm-gc-operator-reducer.cc | 9 +- src/compiler/wasm-inlining.cc | 5 +- src/compiler/wasm-load-elimination.cc | 32 +-- src/compiler/wasm-load-elimination.h | 2 + src/debug/debug-evaluate.cc | 1 + src/debug/debug-scopes.cc | 4 +- src/execution/stack-guard.cc | 70 +++--- src/execution/stack-guard.h | 87 ++++--- src/heap/concurrent-marking.cc | 2 + src/heap/mark-compact.cc | 23 +- src/ic/ic.cc | 21 +- src/interpreter/bytecode-generator.cc | 12 +- src/interpreter/interpreter.cc | 3 + src/json/json-stringifier.cc | 2 + src/maglev/maglev-graph-builder.cc | 6 +- src/objects/code.cc | 4 +- src/objects/elements.cc | 3 +- src/objects/js-objects.cc | 5 +- src/objects/keys.cc | 3 +- src/objects/module.cc | 3 +- src/objects/objects.cc | 25 +- src/objects/objects.h | 5 +- src/objects/value-serializer.cc | 5 +- src/regexp/arm/regexp-macro-assembler-arm.cc | 24 +- src/regexp/arm/regexp-macro-assembler-arm.h | 5 +- .../arm64/regexp-macro-assembler-arm64.cc | 14 +- .../arm64/regexp-macro-assembler-arm64.h | 6 +- .../experimental/experimental-interpreter.cc | 2 + src/regexp/experimental/experimental.cc | 4 +- .../ia32/regexp-macro-assembler-ia32.cc | 20 +- src/regexp/ia32/regexp-macro-assembler-ia32.h | 5 +- src/regexp/regexp-compiler-tonode.cc | 2 + src/regexp/regexp-interpreter.cc | 19 +- src/regexp/regexp-macro-assembler.cc | 4 +- src/regexp/regexp-macro-assembler.h | 2 +- src/regexp/regexp.cc | 9 + src/regexp/x64/regexp-macro-assembler-x64.cc | 37 +-- src/regexp/x64/regexp-macro-assembler-x64.h | 4 +- src/runtime/runtime-internal.cc | 21 +- src/runtime/runtime-regexp.cc | 15 +- src/runtime/runtime-wasm.cc | 3 +- src/runtime/runtime.h | 1 + src/strings/string-stream.cc | 4 +- .../baseline/arm64/liftoff-assembler-arm64.h | 6 +- src/wasm/baseline/liftoff-assembler.cc | 35 +-- src/wasm/baseline/liftoff-assembler.h | 6 +- src/wasm/baseline/liftoff-compiler.cc | 4 +- src/wasm/canonical-types.cc | 23 +- src/wasm/function-body-decoder-impl.h | 16 +- src/wasm/value-type.h | 1 + src/wasm/wasm-js.cc | 2 +- src/wasm/wasm-objects.cc | 118 +++++----- src/wasm/wasm-serialization.cc | 4 +- test/mjsunit/compiler/bigint-shift-left.js | 2 +- test/mjsunit/mjsunit.status | 1 + .../common-operator-reducer-unittest.cc | 1 + .../bytecode-generator-unittest.cc | 4 + .../ElideRedundantHoleChecks.golden | 32 +++ .../wasm/function-body-decoder-unittest.cc | 2 +- third_party/inspector_protocol/BUILD.gn | 1 - tools/cp_v8_include.py | 2 +- v8_shared/BUILD.gn | 6 +- 104 files changed, 917 insertions(+), 655 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index f7ca30dd9..6cad57b74 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2893,7 +2893,7 @@ v8_header_set("v8_flags") { ] } -# This is split out to isolate instructions on ARMv8.4 +# This is split out to ioslate intructions on armv8.4 v8_header_set("pac_sign_feature") { sources = [ "src/codegen/arm64/pac-sign-ctx.h", diff --git a/gni/v8.gni b/gni/v8.gni index 77ae6f1e1..ac9e8e763 100644 --- a/gni/v8.gni +++ b/gni/v8.gni @@ -33,7 +33,7 @@ declare_args() { # Use external files for startup data blobs: # the JS builtins sources and the start snapshot. v8_use_external_startup_data = "" - + # v8_use_external_startup_data shared version. v8_use_external_startup_data_shared = false diff --git a/include/v8-internal.h b/include/v8-internal.h index 947dbef1d..1e9bbcbc8 100644 --- a/include/v8-internal.h +++ b/include/v8-internal.h @@ -517,7 +517,7 @@ class Internals { static const int kExternalOneByteRepresentationTag = 0x0a; static const uint32_t kNumIsolateDataSlots = 4; - static const int kStackGuardSize = 7 * kApiSystemPointerSize; + static const int kStackGuardSize = 8 * kApiSystemPointerSize; static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize; static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize; static const int kLinearAllocationAreaSize = 3 * kApiSystemPointerSize; diff --git a/include/v8-primitive.h b/include/v8-primitive.h index ad3386b8b..aa664de21 100644 --- a/include/v8-primitive.h +++ b/include/v8-primitive.h @@ -516,7 +516,8 @@ class V8_EXPORT String : public Name { */ class V8_EXPORT Utf8Value { public: - Utf8Value(Isolate* isolate, Local obj); + Utf8Value(Isolate* isolate, Local obj, + WriteOptions options = REPLACE_INVALID_UTF8); ~Utf8Value(); char* operator*() { return str_; } const char* operator*() const { return str_; } diff --git a/src/api/api-natives.cc b/src/api/api-natives.cc index 905f29bf2..07cbe818c 100644 --- a/src/api/api-natives.cc +++ b/src/api/api-natives.cc @@ -137,7 +137,7 @@ void DisableAccessChecks(Isolate* isolate, Handle object) { // Copy map so it won't interfere constructor's initial map. Handle new_map = Map::Copy(isolate, old_map, "DisableAccessChecks"); new_map->set_is_access_check_needed(false); - JSObject::MigrateToMap(isolate, Handle::cast(object), new_map); + JSObject::MigrateToMap(isolate, object, new_map); } void EnableAccessChecks(Isolate* isolate, Handle object) { diff --git a/src/api/api.cc b/src/api/api.cc index 6cdd75d93..e702d70bf 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -4583,15 +4583,7 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, i::PropertyKey lookup_key(i_isolate, key_obj); i::LookupIterator it(i_isolate, self, lookup_key, i::LookupIterator::OWN); - if (self->IsJSProxy()) { - ENTER_V8(i_isolate, context, Object, CreateDataProperty, Nothing(), - i::HandleScope); - Maybe result = - i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); - has_pending_exception = result.IsNothing(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return result; - } else { + if (self->IsJSObject()) { ENTER_V8_NO_SCRIPT(i_isolate, context, Object, CreateDataProperty, Nothing(), i::HandleScope); Maybe result = @@ -4600,6 +4592,14 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; } + // JSProxy or WasmObject or other non-JSObject. + ENTER_V8(i_isolate, context, Object, CreateDataProperty, Nothing(), + i::HandleScope); + Maybe result = + i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return result; } Maybe v8::Object::CreateDataProperty(v8::Local context, @@ -4610,15 +4610,7 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, i::Handle value_obj = Utils::OpenHandle(*value); i::LookupIterator it(i_isolate, self, index, self, i::LookupIterator::OWN); - if (self->IsJSProxy()) { - ENTER_V8(i_isolate, context, Object, CreateDataProperty, Nothing(), - i::HandleScope); - Maybe result = - i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); - has_pending_exception = result.IsNothing(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return result; - } else { + if (self->IsJSObject()) { ENTER_V8_NO_SCRIPT(i_isolate, context, Object, CreateDataProperty, Nothing(), i::HandleScope); Maybe result = @@ -4627,6 +4619,14 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return result; } + // JSProxy or WasmObject or other non-JSObject. + ENTER_V8(i_isolate, context, Object, CreateDataProperty, Nothing(), + i::HandleScope); + Maybe result = + i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return result; } struct v8::PropertyDescriptor::PrivateData { @@ -4737,15 +4737,7 @@ Maybe v8::Object::DefineOwnProperty(v8::Local context, desc.set_configurable(!(attributes & v8::DontDelete)); desc.set_value(value_obj); - if (self->IsJSProxy()) { - ENTER_V8(i_isolate, context, Object, DefineOwnProperty, Nothing(), - i::HandleScope); - Maybe success = i::JSReceiver::DefineOwnProperty( - i_isolate, self, key_obj, &desc, Just(i::kDontThrow)); - // Even though we said kDontThrow, there might be accessors that do throw. - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return success; - } else { + if (self->IsJSObject()) { // If it's not a JSProxy, i::JSReceiver::DefineOwnProperty should never run // a script. ENTER_V8_NO_SCRIPT(i_isolate, context, Object, DefineOwnProperty, @@ -4755,6 +4747,14 @@ Maybe v8::Object::DefineOwnProperty(v8::Local context, RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return success; } + // JSProxy or WasmObject or other non-JSObject. + ENTER_V8(i_isolate, context, Object, DefineOwnProperty, Nothing(), + i::HandleScope); + Maybe success = i::JSReceiver::DefineOwnProperty( + i_isolate, self, key_obj, &desc, Just(i::kDontThrow)); + // Even though we said kDontThrow, there might be accessors that do throw. + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return success; } Maybe v8::Object::DefineProperty(v8::Local context, @@ -4781,6 +4781,15 @@ Maybe v8::Object::SetPrivate(Local context, Local key, auto self = Utils::OpenHandle(this); auto key_obj = Utils::OpenHandle(reinterpret_cast(*key)); auto value_obj = Utils::OpenHandle(*value); + if (self->IsJSObject()) { + auto js_object = i::Handle::cast(self); + i::LookupIterator it(i_isolate, js_object, key_obj, js_object); + has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes( + &it, value_obj, i::DONT_ENUM) + .is_null(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return Just(true); + } if (self->IsJSProxy()) { i::PropertyDescriptor desc; desc.set_writable(true); @@ -4791,13 +4800,8 @@ Maybe v8::Object::SetPrivate(Local context, Local key, i_isolate, i::Handle::cast(self), i::Handle::cast(key_obj), &desc, Just(i::kDontThrow)); } - auto js_object = i::Handle::cast(self); - i::LookupIterator it(i_isolate, js_object, key_obj, js_object); - has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes( - &it, value_obj, i::DONT_ENUM) - .is_null(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return Just(true); + // Wasm object, or other kind of special object not supported here. + return Just(false); } MaybeLocal v8::Object::Get(Local context, @@ -5075,6 +5079,7 @@ Maybe v8::Object::Delete(Local context, Local key) { } else { // If it's not a JSProxy, i::Runtime::DeleteObjectProperty should never run // a script. + DCHECK(self->IsJSObject() || self->IsWasmObject()); ENTER_V8_NO_SCRIPT(i_isolate, context, Object, Delete, Nothing(), i::HandleScope); Maybe result = i::Runtime::DeleteObjectProperty( @@ -5495,7 +5500,7 @@ bool v8::Object::IsApiWrapper() const { } bool v8::Object::IsUndetectable() const { - auto self = i::Handle::cast(Utils::OpenHandle(this)); + auto self = Utils::OpenHandle(this); return self->IsUndetectable(); } @@ -6452,7 +6457,7 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) { void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[], void* values[]) { i::Handle obj = Utils::OpenHandle(this); - + if (!obj->IsJSObject()) return; i::DisallowGarbageCollection no_gc; const char* location = "v8::Object::SetAlignedPointerInInternalFields()"; i::JSObject js_obj = i::JSObject::cast(*obj); @@ -8635,8 +8640,7 @@ MaybeLocal WasmModuleObject::FromCompiledModule( i::wasm::GetWasmEngine()->ImportNativeModule( i_isolate, compiled_module.native_module_, base::VectorOf(compiled_module.source_url())); - return Local::Cast( - Utils::ToLocal(i::Handle::cast(module_object))); + return Utils::ToLocal(module_object); #else UNREACHABLE(); #endif // V8_ENABLE_WEBASSEMBLY @@ -8651,7 +8655,7 @@ MaybeLocal WasmModuleObject::Compile( if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) { return MaybeLocal(); } - i::MaybeHandle maybe_compiled; + i::MaybeHandle maybe_compiled; { i::wasm::ErrorThrower thrower(i_isolate, "WasmModuleObject::Compile()"); auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate); @@ -8664,8 +8668,7 @@ MaybeLocal WasmModuleObject::Compile( i_isolate->OptionalRescheduleException(false); return MaybeLocal(); } - return Local::Cast( - Utils::ToLocal(maybe_compiled.ToHandleChecked())); + return Utils::ToLocal(maybe_compiled.ToHandleChecked()); #else Utils::ApiCheck(false, "WasmModuleObject::Compile", "WebAssembly support is not enabled"); @@ -10458,7 +10461,8 @@ bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8_isolate) { return microtask_queue->IsRunningMicrotasks(); } -String::Utf8Value::Utf8Value(v8::Isolate* v8_isolate, v8::Local obj) +String::Utf8Value::Utf8Value(v8::Isolate* v8_isolate, v8::Local obj, + WriteOptions options) : str_(nullptr), length_(0) { if (obj.IsEmpty()) return; i::Isolate* i_isolate = reinterpret_cast(v8_isolate); @@ -10470,7 +10474,7 @@ String::Utf8Value::Utf8Value(v8::Isolate* v8_isolate, v8::Local obj) if (!obj->ToString(context).ToLocal(&str)) return; length_ = str->Utf8Length(v8_isolate); str_ = i::NewArray(length_ + 1); - str->WriteUtf8(v8_isolate, str_); + str->WriteUtf8(v8_isolate, str_, -1, nullptr, options); } String::Utf8Value::~Utf8Value() { i::DeleteArray(str_); } diff --git a/src/api/api.h b/src/api/api.h index 461016c65..c2065453f 100644 --- a/src/api/api.h +++ b/src/api/api.h @@ -132,7 +132,8 @@ class RegisteredExtension { V(ToLocalPrimitive, Object, Primitive) \ V(FixedArrayToLocal, FixedArray, FixedArray) \ V(PrimitiveArrayToLocal, FixedArray, PrimitiveArray) \ - V(ToLocal, ScriptOrModule, ScriptOrModule) + V(ToLocal, ScriptOrModule, ScriptOrModule) \ + IF_WASM(V, ToLocal, WasmModuleObject, WasmModuleObject) #define OPEN_HANDLE_LIST(V) \ V(Template, TemplateInfo) \ diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc index 9cdc30a01..db977c8e2 100644 --- a/src/ast/scopes.cc +++ b/src/ast/scopes.cc @@ -1525,26 +1525,22 @@ DeclarationScope* Scope::GetConstructorScope() { } Scope* Scope::GetHomeObjectScope() { - Scope* scope = this; - while (scope != nullptr && !scope->is_home_object_scope()) { - if (scope->is_function_scope()) { - FunctionKind function_kind = scope->AsDeclarationScope()->function_kind(); - // "super" in arrow functions binds outside the arrow function. But if we - // find a function which doesn't bind "super" (is not a method etc.) and - // not an arrow function, we know "super" here doesn't bind anywhere and - // we can return nullptr. - if (!IsArrowFunction(function_kind) && !BindsSuper(function_kind)) { - return nullptr; - } - } - if (scope->private_name_lookup_skips_outer_class()) { - DCHECK(scope->outer_scope()->is_class_scope()); - scope = scope->outer_scope()->outer_scope(); - } else { - scope = scope->outer_scope(); - } - } - return scope; + Scope* scope = GetReceiverScope(); + DCHECK(scope->is_function_scope()); + FunctionKind kind = scope->AsDeclarationScope()->function_kind(); + // "super" in arrow functions binds outside the arrow function. Arrow + // functions are also never receiver scopes since they close over the + // receiver. + DCHECK(!IsArrowFunction(kind)); + // If we find a function which doesn't bind "super" (is not a method etc.), we + // know "super" here doesn't bind anywhere and we can return nullptr. + if (!BindsSuper(kind)) return nullptr; + // Functions that bind "super" can only syntactically occur nested inside home + // object scopes (i.e. class scopes and object literal scopes), so directly + // return the outer scope. + Scope* outer_scope = scope->outer_scope(); + CHECK(outer_scope->is_home_object_scope()); + return outer_scope; } DeclarationScope* Scope::GetScriptScope() { @@ -2297,7 +2293,7 @@ void Scope::ResolveVariable(VariableProxy* proxy) { // // Because of the above, start resolving home objects directly at the home // object scope instead of the current scope. - Scope* scope = GetDeclarationScope()->GetHomeObjectScope(); + Scope* scope = GetHomeObjectScope(); DCHECK_NOT_NULL(scope); if (scope->scope_info_.is_null()) { var = Lookup(proxy, scope, nullptr); diff --git a/src/base/logging.h b/src/base/logging.h index e333aefd0..3571f308f 100644 --- a/src/base/logging.h +++ b/src/base/logging.h @@ -428,4 +428,35 @@ DEFINE_CHECK_OP_IMPL(GT, > ) #define DCHECK_IMPLIES(v1, v2) ((void) 0) #endif +// When the sandbox is enabled, a SBXCHECK behaves exactly like a CHECK, but +// indicates that the check is required for the sandbox, i.e. prevents a +// sandbox bypass. When the sandbox is off, it becomes a DCHECK. +// +// As an example, consider a scenario where an in-sandbox object stores an +// index into an out-of-sandbox array (or a similar data structure). While +// under normal circumstances it can be guaranteed that the index will always +// be in bounds, with the sandbox attacker model, we have to assume that the +// in-sandbox object can be corrupted by an attacker and so the access can go +// out-of-bounds. In that case, a SBXCHECK can be used to both prevent memory +// corruption outside of the sandbox and document that there is a +// security-critical invariant that may be violated when an attacker can +// corrupt memory inside the sandbox, but otherwise holds true. +#ifdef V8_ENABLE_SANDBOX +#define SBXCHECK(condition) CHECK(condition) +#define SBXCHECK_EQ(lhs, rhs) CHECK_EQ(lhs, rhs) +#define SBXCHECK_NE(lhs, rhs) CHECK_NE(lhs, rhs) +#define SBXCHECK_GT(lhs, rhs) CHECK_GT(lhs, rhs) +#define SBXCHECK_GE(lhs, rhs) CHECK_GE(lhs, rhs) +#define SBXCHECK_LT(lhs, rhs) CHECK_LT(lhs, rhs) +#define SBXCHECK_LE(lhs, rhs) CHECK_LE(lhs, rhs) +#else +#define SBXCHECK(condition) DCHECK(condition) +#define SBXCHECK_EQ(lhs, rhs) DCHECK_EQ(lhs, rhs) +#define SBXCHECK_NE(lhs, rhs) DCHECK_NE(lhs, rhs) +#define SBXCHECK_GT(lhs, rhs) DCHECK_GT(lhs, rhs) +#define SBXCHECK_GE(lhs, rhs) DCHECK_GE(lhs, rhs) +#define SBXCHECK_LT(lhs, rhs) DCHECK_LT(lhs, rhs) +#define SBXCHECK_LE(lhs, rhs) DCHECK_LE(lhs, rhs) +#endif + #endif // V8_BASE_LOGGING_H_ diff --git a/src/builtins/builtins-array.cc b/src/builtins/builtins-array.cc index 3a36fa4e6..d6ec472ff 100644 --- a/src/builtins/builtins-array.cc +++ b/src/builtins/builtins-array.cc @@ -49,7 +49,7 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver receiver) { DisallowGarbageCollection no_gc; PrototypeIterator iter(isolate, receiver, kStartAtReceiver); for (; !iter.IsAtEnd(); iter.Advance()) { - if (!iter.GetCurrent().IsObject()) return false; + if (!iter.GetCurrent().IsJSObject()) return false; JSObject current = iter.GetCurrent(); if (!HasSimpleElements(current)) return false; } @@ -1073,6 +1073,8 @@ void CollectElementIndices(Isolate* isolate, Handle object, if (!iter.IsAtEnd()) { // The prototype will usually have no inherited element indices, // but we have to check. + // Casting to JSObject is safe because we ran {HasOnlySimpleElements} on + // the receiver before, which checks the prototype chain. CollectElementIndices( isolate, PrototypeIterator::GetCurrent(iter), range, indices); } diff --git a/src/builtins/builtins-intl-gen.cc b/src/builtins/builtins-intl-gen.cc index c2a873ae0..2ad29253b 100644 --- a/src/builtins/builtins-intl-gen.cc +++ b/src/builtins/builtins-intl-gen.cc @@ -231,71 +231,71 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl( ReturnFct(result); } - BIND(&two_byte_string); - { - const TNode dst = AllocateSeqTwoByteString(length); - const TNode dst_ptr = PointerToSeqStringData(dst); - const TNode to_lower_table_addr = - ExternalConstant(ExternalReference::intl_to_latin1_lower_table()); - TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0)); - const int kMaxShortStringLength = 24; // Determined empirically. - GotoIf(Uint32GreaterThan(length, Uint32Constant(kMaxShortStringLength)), - &runtime); - const TNode start_address = - ReinterpretCast(to_direct.PointerToData(&runtime)); - const TNode end_address = - Signed(IntPtrAdd(start_address, IntPtrMul(IntPtrConstant(kUInt16Size), - ChangeUint32ToWord(length)))); - - TVARIABLE(Word32T, var_did_change, Int32Constant(0)); - - VariableList push_vars({&var_cursor, &var_did_change}, zone()); - - BuildFastLoop( - push_vars, start_address, end_address, - [&](TNode current) { - TNode c = Load(current); - - Label is_assic(this), is_not_assic(this), inc_offset(this); - - Branch(Uint32LessThanOrEqual(c, Uint32Constant(0x00FF)), &is_assic, - &is_not_assic); - - BIND(&is_assic); - { - // For assic character, convert to lower case - TNode lower = - Load(to_lower_table_addr, ChangeInt32ToIntPtr(c)); - StoreNoWriteBarrier(MachineRepresentation::kWord16, dst_ptr, - var_cursor.value(), lower); - var_did_change = - Word32Or(Word32NotEqual(c, lower), var_did_change.value()); - Goto(&inc_offset); - } - - BIND(&is_not_assic); - { - // For non-assic character, check if is a Chinese character - GotoIfNot(IsChinese(c), &runtime); - StoreNoWriteBarrier(MachineRepresentation::kWord16, dst_ptr, - var_cursor.value(), c); - Goto(&inc_offset); - } - - BIND(&inc_offset); - { - // Store to dst string - Increment(&var_cursor, kUInt16Size); - } - }, - kUInt16Size, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); - - // Return the original string if it remained unchanged in order to preserve - // e.g. internalization and private symbols (such as the preserved object - // hash) on the source string. - GotoIfNot(var_did_change.value(), &return_string); - ReturnFct(dst); - } + BIND(&two_byte_string); + { + const TNode dst = AllocateSeqTwoByteString(length); + const TNode dst_ptr = PointerToSeqStringData(dst); + const TNode to_lower_table_addr = + ExternalConstant(ExternalReference::intl_to_latin1_lower_table()); + TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0)); + const int kMaxShortStringLength = 24; // Determined empirically. + GotoIf(Uint32GreaterThan(length, Uint32Constant(kMaxShortStringLength)), + &runtime); + const TNode start_address = + ReinterpretCast(to_direct.PointerToData(&runtime)); + const TNode end_address = + Signed(IntPtrAdd(start_address, IntPtrMul(IntPtrConstant(kUInt16Size), + ChangeUint32ToWord(length)))); + + TVARIABLE(Word32T, var_did_change, Int32Constant(0)); + + VariableList push_vars({&var_cursor, &var_did_change}, zone()); + + BuildFastLoop( + push_vars, start_address, end_address, + [&](TNode current) { + TNode c = Load(current); + + Label is_assic(this), is_not_assic(this), inc_offset(this); + + Branch(Uint32LessThanOrEqual(c, Uint32Constant(0x00FF)), &is_assic, + &is_not_assic); + + BIND(&is_assic); + { + // For assic character, convert to lower case + TNode lower = + Load(to_lower_table_addr, ChangeInt32ToIntPtr(c)); + StoreNoWriteBarrier(MachineRepresentation::kWord16, dst_ptr, + var_cursor.value(), lower); + var_did_change = + Word32Or(Word32NotEqual(c, lower), var_did_change.value()); + Goto(&inc_offset); + } + + BIND(&is_not_assic); + { + // For non-assic character, check if is a Chinese character + GotoIfNot(IsChinese(c), &runtime); + StoreNoWriteBarrier(MachineRepresentation::kWord16, dst_ptr, + var_cursor.value(), c); + Goto(&inc_offset); + } + + BIND(&inc_offset); + { + // Store to dst string + Increment(&var_cursor, kUInt16Size); + } + }, + kUInt16Size, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); + + // Return the original string if it remained unchanged in order to preserve + // e.g. internalization and private symbols (such as the preserved object + // hash) on the source string. + GotoIfNot(var_did_change.value(), &return_string); + ReturnFct(dst); + } BIND(&return_string); ReturnFct(string); diff --git a/src/builtins/promise-any.tq b/src/builtins/promise-any.tq index 45bafac0e..d531d57a3 100644 --- a/src/builtins/promise-any.tq +++ b/src/builtins/promise-any.tq @@ -106,9 +106,10 @@ PromiseAnyRejectElementClosure( const index = identityHash - 1; // 6. Let errors be F.[[Errors]]. - let errors = *ContextSlot( + let errorsRef:&FixedArray = ContextSlot( context, PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementErrorsSlot); + let errors = *errorsRef; // 7. Let promiseCapability be F.[[Capability]]. @@ -134,10 +135,7 @@ PromiseAnyRejectElementClosure( IntPtrMax(SmiUntag(remainingElementsCount) - 1, index + 1); if (newCapacity > errors.length_intptr) deferred { errors = ExtractFixedArray(errors, 0, errors.length_intptr, newCapacity); - *ContextSlot( - context, - PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementErrorsSlot) = errors; + *errorsRef = errors; } errors.objects[index] = value; @@ -155,6 +153,10 @@ PromiseAnyRejectElementClosure( // b. Set error.[[AggregateErrors]] to errors. const error = ConstructAggregateError(errors); + + // After this point, errors escapes to user code. Clear the slot. + *errorsRef = kEmptyFixedArray; + // c. Return ? Call(promiseCapability.[[Reject]], undefined, « error »). const capability = *ContextSlot( context, diff --git a/src/codegen/arm64/assembler-arm64-inl.h b/src/codegen/arm64/assembler-arm64-inl.h index ef4c608e5..13fa09242 100644 --- a/src/codegen/arm64/assembler-arm64-inl.h +++ b/src/codegen/arm64/assembler-arm64-inl.h @@ -535,7 +535,7 @@ void Assembler::set_embedded_object_index_referenced_from( Address target = target_pointer_address_at(pc); #ifdef V8_ENABLE_JIT_CODE_SIGN TryPatchInstruction(jit_code_signer_, reinterpret_cast(target), - static_cast(data)); + static_cast(data)); #endif WriteUnalignedValue(target, static_cast(data)); diff --git a/src/codegen/arm64/assembler-arm64.h b/src/codegen/arm64/assembler-arm64.h index bf7f78cb0..a6d162e13 100644 --- a/src/codegen/arm64/assembler-arm64.h +++ b/src/codegen/arm64/assembler-arm64.h @@ -3307,7 +3307,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap); #ifdef V8_ENABLE_JIT_CODE_SIGN - // Jit code signer for signing instruction + // Jit code signer for signing instructions JitCodeSignerBase *jit_code_signer_ = nullptr; #endif @@ -3443,7 +3443,7 @@ class PatchingAssembler : public Assembler { TrySetCompileMode(jit_code_signer_, static_cast(CompileMode::PATCH)); } - void ReleaseJitCodeSigner() { + void ReleaseJitCodeSigner() { if (jit_code_signer_ != nullptr) { TrySetCompileMode(jit_code_signer_, static_cast(CompileMode::APPEND)); jit_code_signer_ = nullptr; diff --git a/src/codegen/arm64/instructions-arm64.cc b/src/codegen/arm64/instructions-arm64.cc index 4842f7481..6093185c6 100644 --- a/src/codegen/arm64/instructions-arm64.cc +++ b/src/codegen/arm64/instructions-arm64.cc @@ -257,8 +257,8 @@ void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options, #ifdef V8_ENABLE_JIT_CODE_SIGN void Instruction::SetPCRelImmTarget(const AssemblerOptions& options, - Instruction* target, - JitCodeSignerBase* patch_signer) { + Instruction* target, + JitCodeSignerBase* patch_signer) { #else void Instruction::SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target) { @@ -322,7 +322,7 @@ void Instruction::SetBranchImmTarget(Instruction* target) { } Instr insn = Mask(~imm_mask) | branch_imm; #ifdef V8_ENABLE_JIT_CODE_SIGN - TryPatchInstruction(patch_signer, reinterpret_cast(this), insn); + TryPatchInstruction(patch_signer, reinterpret_cast(this), insn); #endif SetInstructionBits(insn); } @@ -353,7 +353,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget( #ifdef V8_ENABLE_JIT_CODE_SIGN void Instruction::SetImmLLiteral(Instruction* source, - JitCodeSignerBase* patch_signer) { + JitCodeSignerBase* patch_signer) { #else void Instruction::SetImmLLiteral(Instruction* source) { #endif @@ -366,7 +366,7 @@ void Instruction::SetImmLLiteral(Instruction* source) { Instr insn = Mask(~mask) | imm; #ifdef V8_ENABLE_JIT_CODE_SIGN - TryPatchInstruction(patch_signer, reinterpret_cast(this), insn); + TryPatchInstruction(patch_signer, reinterpret_cast(this), insn); #endif SetInstructionBits(insn); } diff --git a/src/codegen/arm64/jit-code-signer-base.cc b/src/codegen/arm64/jit-code-signer-base.cc index 5925e8a3b..2563d2e5c 100644 --- a/src/codegen/arm64/jit-code-signer-base.cc +++ b/src/codegen/arm64/jit-code-signer-base.cc @@ -15,7 +15,6 @@ #include "src/codegen/arm64/jit-code-signer-base.h" #include - namespace v8 { namespace internal { @@ -54,7 +53,7 @@ int JitCodeSignerBase::SignInstruction(void *buffer, Instr insn) return PatchInstruction(buffer, insn); } return CS_SUCCESS; - + } int JitCodeSignerBase::SignData(void *buffer, const void *const data, uint32_t size) @@ -149,7 +148,7 @@ bool JitCodeSignerBase::ConvertPatchOffsetToIndex(const int offset, int &cur_ind if (static_cast(cur_index) >= sign_table_.size()) { #ifdef JIT_CODE_SIGN_DEBUGGABLE LOG_ERROR("Offset is out of range, index = %d, signTable size = %zu", - cur_index, sign_table_.size()); + cur_index, sign_table_.size()); #endif return false; } @@ -165,14 +164,14 @@ int32_t JitCodeSignerBase::CheckDataCopy(Instr *jit_memory, void *tmp_buffer, in return CS_ERR_TMP_BUFFER; } - //update tmp buffer + // update tmp buffer tmp_buffer_ = tmp_buffer; if (((size & UNALIGNMENT_MASK) != 0) || - (static_cast(size) > sign_table_.size() * INSTRUCTION_SIZE)) { + (static_cast(size) > sign_table_.size() * INSTRUCTION_SIZE)) { #ifdef JIT_CODE_SIGN_DEBUGGABLE LOG_ERROR("Range invalid, size = %d, table size = %zu", - size, sign_table_.size()); + size, sign_table_.size()); #endif return CS_ERR_JIT_SIGN_SIZE; } diff --git a/src/codegen/arm64/jit-code-signer-base.h b/src/codegen/arm64/jit-code-signer-base.h index ef8201849..f4537035f 100644 --- a/src/codegen/arm64/jit-code-signer-base.h +++ b/src/codegen/arm64/jit-code-signer-base.h @@ -67,7 +67,7 @@ public: int32_t SignData(const void *data, uint32_t size); int32_t PatchInstruction(void *jit_buffer, Instr insn); int32_t PatchData(int offset, const void *const data, uint32_t size); - int32_t PatchData(void *buffer, const void *const data, uint32_t size); + int32_t PatchData(void *buffer, const void *const data, uint32_t size); protected: bool ConvertPatchOffsetToIndex(const int offset, int &cur_index); diff --git a/src/codegen/arm64/jit-code-signer-helper.cc b/src/codegen/arm64/jit-code-signer-helper.cc index 49a8822af..57a0ec474 100644 --- a/src/codegen/arm64/jit-code-signer-helper.cc +++ b/src/codegen/arm64/jit-code-signer-helper.cc @@ -13,8 +13,8 @@ * limitations under the License. */ #include -#include #include +#include #include "src/codegen/arm64/jit-code-signer-helper.h" #include "src/codegen/arm64/jit-code-signer-base.h" #include "src/codegen/arm64/jit-code-signer-hybrid.h" @@ -22,24 +22,37 @@ namespace v8 { namespace internal { -enum class JitCodeSignerStatus { - UNINITIALIZED, - SUPPORT, - UNSUPPORT -}; +#define JITFORT_PRCTL_OPTION 0x6a6974 +#define JITFORT_CPU_FEATURES 7 -static JitCodeSignerStatus g_jitCodeSignerStatus = JitCodeSignerStatus::UNINITIALIZED; +static inline long Syscall( + unsigned long n, unsigned long a, unsigned long b, + unsigned long c, unsigned long d, unsigned long e) +{ + register unsigned long x8 __asm__("x8") = n; + register unsigned long x0 __asm__("x0") = a; + register unsigned long x1 __asm__("x1") = b; + register unsigned long x2 __asm__("x2") = c; + register unsigned long x3 __asm__("x3") = d; + register unsigned long x4 __asm__("x4") = e; + asm volatile("svc 0" : "=r"(x0) : "r"(x8), "0"(x0), "r"(x1), \ + "r"(x2), "r"(x3), "r"(x4) : "memory", "cc"); + return x0; +} + +static long inline PrctlWrapper( + int op, unsigned long a, unsigned long b = 0) +{ + return Syscall(SYS_prctl, op, a, b, 0, 0); +} bool IsSupportJitCodeSigner() { - if (g_jitCodeSignerStatus == JitCodeSignerStatus::UNINITIALIZED) { - unsigned long hwcaps = getauxval(AT_HWCAP); - if ((hwcaps & HWCAP_PACA) && (hwcaps & HWCAP_PACG)) { - g_jitCodeSignerStatus = JitCodeSignerStatus::SUPPORT; - } else { - g_jitCodeSignerStatus = JitCodeSignerStatus::UNSUPPORT; - } + unsigned long hwcaps = static_cast(PrctlWrapper( + JITFORT_PRCTL_OPTION, JITFORT_CPU_FEATURES, 0)); + if ((hwcaps & HWCAP_PACA) && (hwcaps & HWCAP_PACG)) { + return true; } - return g_jitCodeSignerStatus == JitCodeSignerStatus::SUPPORT; + return false; } void TryRegisterTmpBuffer(JitCodeSignerBase *jit_code_signer, void *tmp_buffer) @@ -120,7 +133,7 @@ void TryValidateCodeCopy(JitCodeSignerBase *jit_code_signer, void *jit_memory, void *tmp_buffer, int size) { if (jit_code_signer != nullptr) { V8_LIKELY(jit_code_signer->ValidateCodeCopy(reinterpret_cast(jit_memory), - tmp_buffer, size)); + tmp_buffer, size)); } } } diff --git a/src/codegen/arm64/jit-code-signer-hybrid.cc b/src/codegen/arm64/jit-code-signer-hybrid.cc index cfaf38fc0..11b11345e 100644 --- a/src/codegen/arm64/jit-code-signer-hybrid.cc +++ b/src/codegen/arm64/jit-code-signer-hybrid.cc @@ -70,9 +70,9 @@ int32_t JitCodeSignerHybrid::PatchInstruction(int offset, Instr insn) { #ifdef JIT_CODE_SIGN_DEBUGGABLE if (std::find(skipped_offset_.begin(), skipped_offset_.end(), offset) - == skipped_offset_.end()) { - LOG_ERROR("Update no skipped instruction failed at offset" \ - "= %x", offset); + == skipped_offset_.end()) { + LOG_ERROR("Update no skipped instruction failed at offset" \ + "= %x", offset); } #endif int cur_index = 0; @@ -102,7 +102,7 @@ int32_t JitCodeSignerHybrid::ValidateSubCode(Instr *jit_memory, PACSignCtx &veri uint32_t signature = verify_ctx.Update(*insn_ptr); if (signature != sign_table_[index]) { #ifdef JIT_CODE_SIGN_DEBUGGABLE - LOG_ERROR("Validate insn (%8x) failed at offset = %x, "\ + LOG_ERROR("Validate insn (%8x) failed at offset = %x, " \ "signature(%x) != wanted(%{pucblic}x)", *(insn_ptr), index * INSTRUCTION_SIZE, signature, sign_table_[index]); #endif @@ -156,7 +156,7 @@ int32_t JitCodeSignerHybrid::ValidateCodeCopy(Instr *jit_memory, if (ValidateSubCode(jit_memory, verify_ctx, tmp_buffer_, offset, size - offset) != CS_SUCCESS) { - return CS_ERR_VALIDATE_CODE; + return CS_ERR_VALIDATE_CODE; } return CS_SUCCESS; } diff --git a/src/codegen/arm64/pac-sign-ctx.h b/src/codegen/arm64/pac-sign-ctx.h index 5fe01840a..e0aae9a88 100644 --- a/src/codegen/arm64/pac-sign-ctx.h +++ b/src/codegen/arm64/pac-sign-ctx.h @@ -21,8 +21,8 @@ namespace v8 { namespace internal { enum CTXConfig { - SIGN_AND_AUTH, // auth context when signing - SIGN_NO_AUTH, //no auth context when signing + SIGN_AND_AUTH, // auth context when signing + SIGN_NO_AUTH, // no auth context when signing }; enum ContextType { diff --git a/src/codegen/assembler.cc b/src/codegen/assembler.cc index 1a82d0fd9..af52a931b 100644 --- a/src/codegen/assembler.cc +++ b/src/codegen/assembler.cc @@ -96,7 +96,7 @@ class DefaultAssemblerBuffer : public AssemblerBuffer { public: #ifdef V8_ENABLE_JIT_CODE_SIGN explicit DefaultAssemblerBuffer(int size, - std::unique_ptr signer = nullptr) + std::unique_ptr signer = nullptr) : buffer_(base::OwnedVector::NewForOverwrite( std::max(AssemblerBase::kMinimalBufferSize, size))), jit_code_signer_(std::move(signer)) { @@ -166,7 +166,7 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer { private: byte* const start_; const int size_; - + }; static thread_local std::aligned_storage_tGetJitCodeSigner(), 1); #endif diff --git a/src/codegen/external-reference.cc b/src/codegen/external-reference.cc index 9c38472f1..6c0fe6c6e 100644 --- a/src/codegen/external-reference.cc +++ b/src/codegen/external-reference.cc @@ -514,6 +514,15 @@ ExternalReference ExternalReference::address_of_jslimit(Isolate* isolate) { return ExternalReference(address); } +ExternalReference ExternalReference::address_of_no_heap_write_interrupt_request( + Isolate* isolate) { + Address address = isolate->stack_guard()->address_of_interrupt_request( + StackGuard::InterruptLevel::kNoHeapWrites); + // For efficient generated code, this should be root-register-addressable. + DCHECK(isolate->root_register_addressable_region().contains(address)); + return ExternalReference(address); +} + ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) { Address address = isolate->stack_guard()->address_of_real_jslimit(); // For efficient generated code, this should be root-register-addressable. diff --git a/src/codegen/external-reference.h b/src/codegen/external-reference.h index 3a5e82832..1a9ba781c 100644 --- a/src/codegen/external-reference.h +++ b/src/codegen/external-reference.h @@ -38,6 +38,9 @@ class StatsCounter; V(isolate_root, "Isolate::isolate_root()") \ V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \ V(address_of_jslimit, "StackGuard::address_of_jslimit()") \ + V(address_of_no_heap_write_interrupt_request, \ + "StackGuard::address_of_interrupt_request(StackGuard::InterruptLevel::" \ + "kNoHeapWrites)") \ V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()") \ V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \ V(heap_is_minor_marking_flag_address, "heap_is_minor_marking_flag_address") \ diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index 3c485b77e..9b2e43194 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -3391,6 +3391,14 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64(Node* node, const int kMaxRecursionDepth = 100; if (node->opcode() == IrOpcode::kPhi) { + // Intermediate results from previous calls are not necessarily correct. + if (recursion_depth == 0) { + static_assert(sizeof(Upper32BitsState) == 1); + memset(phi_states_.data(), + static_cast(Upper32BitsState::kNotYetChecked), + phi_states_.size()); + } + Upper32BitsState current = phi_states_[node->id()]; if (current != Upper32BitsState::kNotYetChecked) { return current == Upper32BitsState::kUpperBitsGuaranteedZero; diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc index 7a8d91cf8..0e60169f0 100644 --- a/src/compiler/branch-elimination.cc +++ b/src/compiler/branch-elimination.cc @@ -279,8 +279,7 @@ Reduction BranchElimination::ReduceTrapConditional(Node* node) { // graph()->end(). ReplaceWithValue(node, dead(), dead(), dead()); Node* control = graph()->NewNode(common()->Throw(), node, node); - NodeProperties::MergeControlToEnd(graph(), common(), control); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), control); return Changed(node); } else { // This will not trap, remove it by relaxing effect/control. @@ -322,9 +321,7 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) { } else { control = graph()->NewNode(common()->Deoptimize(p.reason(), p.feedback()), frame_state, effect, control); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), control); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), control); } return Replace(dead()); } diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc index e190595d6..493a8199e 100644 --- a/src/compiler/common-operator-reducer.cc +++ b/src/compiler/common-operator-reducer.cc @@ -172,9 +172,7 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) { } else { control = graph()->NewNode(common()->Deoptimize(p.reason(), p.feedback()), frame_state, effect, control); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), control); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), control); } return Replace(dead()); } @@ -394,7 +392,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) { // the reducer logic will visit {end} again. Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i], effect, control_inputs[i]); - NodeProperties::MergeControlToEnd(graph(), common(), ret); + MergeControlToEnd(graph(), common(), ret); } // Mark the Merge {control} and Return {node} as {dead}. Replace(control, dead()); @@ -410,7 +408,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) { // the reducer logic will visit {end} again. Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i], effect_inputs[i], control_inputs[i]); - NodeProperties::MergeControlToEnd(graph(), common(), ret); + MergeControlToEnd(graph(), common(), ret); } // Mark the Merge {control} and Return {node} as {dead}. Replace(control, dead()); @@ -526,8 +524,7 @@ Reduction CommonOperatorReducer::ReduceTrapConditional(Node* trap) { // graph()->end(). ReplaceWithValue(trap, dead(), dead(), dead()); Node* control = graph()->NewNode(common()->Throw(), trap, trap); - NodeProperties::MergeControlToEnd(graph(), common(), control); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), control); return Changed(trap); } else { // This will not trap, remove it by relaxing effect/control. diff --git a/src/compiler/dead-code-elimination.cc b/src/compiler/dead-code-elimination.cc index 170b3d89d..fbbf356ba 100644 --- a/src/compiler/dead-code-elimination.cc +++ b/src/compiler/dead-code-elimination.cc @@ -247,11 +247,10 @@ Reduction DeadCodeElimination::ReduceEffectPhi(Node* node) { // phi nodes. Node* control = NodeProperties::GetControlInput(merge, i); Node* throw_node = graph_->NewNode(common_->Throw(), effect, control); - NodeProperties::MergeControlToEnd(graph_, common_, throw_node); + MergeControlToEnd(graph_, common_, throw_node); NodeProperties::ReplaceEffectInput(node, dead_, i); NodeProperties::ReplaceControlInput(merge, dead_, i); Revisit(merge); - Revisit(graph_->end()); reduction = Changed(node); } } diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h index 0877945bb..ae8def719 100644 --- a/src/compiler/graph-reducer.h +++ b/src/compiler/graph-reducer.h @@ -6,7 +6,9 @@ #define V8_COMPILER_GRAPH_REDUCER_H_ #include "src/base/compiler-specific.h" +#include "src/compiler/graph.h" #include "src/compiler/node-marker.h" +#include "src/compiler/node-properties.h" #include "src/zone/zone-containers.h" namespace v8 { @@ -133,6 +135,12 @@ class AdvancedReducer : public Reducer { ReplaceWithValue(node, node, node, nullptr); } + void MergeControlToEnd(Graph* graph, CommonOperatorBuilder* common, + Node* node) { + NodeProperties::MergeControlToEnd(graph, common, node); + Revisit(graph->end()); + } + private: Editor* const editor_; }; diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc index b6eca3f86..8e392f66b 100644 --- a/src/compiler/js-call-reducer.cc +++ b/src/compiler/js-call-reducer.cc @@ -3230,7 +3230,7 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) { // Connect the throwing path to end. if_false = graph()->NewNode(common()->Throw(), efalse, if_false); - NodeProperties::MergeControlToEnd(graph(), common(), if_false); + MergeControlToEnd(graph(), common(), if_false); // Continue on the regular path. ReplaceWithValue(node, vtrue, etrue, if_true); @@ -3296,7 +3296,7 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) { // Connect the throwing path to end. if_false = graph()->NewNode(common()->Throw(), efalse, if_false); - NodeProperties::MergeControlToEnd(graph(), common(), if_false); + MergeControlToEnd(graph(), common(), if_false); // Continue on the regular path. ReplaceWithValue(node, vtrue, etrue, if_true); @@ -4007,7 +4007,7 @@ void JSCallReducer::CheckIfConstructor(Node* construct) { // simply connect the successful completion to the graph end. Node* throw_node = graph()->NewNode(common()->Throw(), check_throw, check_fail); - NodeProperties::MergeControlToEnd(graph(), common(), throw_node); + MergeControlToEnd(graph(), common(), throw_node); } namespace { @@ -5549,9 +5549,7 @@ Reduction JSCallReducer::ReduceForInsufficientFeedback( Node* deoptimize = graph()->NewNode(common()->Deoptimize(reason, FeedbackSource()), frame_state, effect, control); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), deoptimize); node->TrimInputCount(0); NodeProperties::ChangeOp(node, common()->Dead()); return Changed(node); @@ -5935,7 +5933,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { Node* eloop = graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop); Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); + MergeControlToEnd(graph(), common(), terminate); Node* index = graph()->NewNode( common()->Phi(MachineRepresentation::kTagged, 2), @@ -7653,7 +7651,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext( Node* eloop = effect = graph()->NewNode(common()->EffectPhi(2), effect, effect, loop); Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); + MergeControlToEnd(graph(), common(), terminate); // Check if reached the final table of the {receiver}. Node* table = effect = graph()->NewNode( @@ -7749,7 +7747,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext( Node* eloop = graph()->NewNode(common()->EffectPhi(2), effect, effect, loop); Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); + MergeControlToEnd(graph(), common(), terminate); Node* iloop = graph()->NewNode( common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop); diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc index 794884520..81fae7ddf 100644 --- a/src/compiler/js-generic-lowering.cc +++ b/src/compiler/js-generic-lowering.cc @@ -1141,15 +1141,28 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - Node* limit = effect = - graph()->NewNode(machine()->Load(MachineType::Pointer()), - jsgraph()->ExternalConstant( - ExternalReference::address_of_jslimit(isolate())), - jsgraph()->IntPtrConstant(0), effect, control); - StackCheckKind stack_check_kind = StackCheckKindOfJSStackCheck(node->op()); - Node* check = effect = graph()->NewNode( - machine()->StackPointerGreaterThan(stack_check_kind), limit, effect); + + Node* check; + if (stack_check_kind == StackCheckKind::kJSIterationBody) { + check = effect = graph()->NewNode( + machine()->Load(MachineType::Uint8()), + jsgraph()->ExternalConstant( + ExternalReference::address_of_no_heap_write_interrupt_request( + isolate())), + jsgraph()->IntPtrConstant(0), effect, control); + check = graph()->NewNode(machine()->Word32Equal(), check, + jsgraph()->Int32Constant(0)); + } else { + Node* limit = effect = + graph()->NewNode(machine()->Load(MachineType::Pointer()), + jsgraph()->ExternalConstant( + ExternalReference::address_of_jslimit(isolate())), + jsgraph()->IntPtrConstant(0), effect, control); + + check = effect = graph()->NewNode( + machine()->StackPointerGreaterThan(stack_check_kind), limit, effect); + } Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); @@ -1193,6 +1206,8 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) { node->InsertInput(zone(), 0, graph()->NewNode(machine()->LoadStackCheckOffset())); ReplaceWithRuntimeCall(node, Runtime::kStackGuardWithGap); + } else if (stack_check_kind == StackCheckKind::kJSIterationBody) { + ReplaceWithRuntimeCall(node, Runtime::kHandleNoHeapWritesInterrupts); } else { ReplaceWithRuntimeCall(node, Runtime::kStackGuard); } diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc index 3f70ece49..ce67b8704 100644 --- a/src/compiler/js-inlining.cc +++ b/src/compiler/js-inlining.cc @@ -210,8 +210,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context, case IrOpcode::kDeoptimize: case IrOpcode::kTerminate: case IrOpcode::kThrow: - NodeProperties::MergeControlToEnd(graph(), common(), input); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), input); break; default: UNREACHABLE(); @@ -795,7 +794,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { branch_is_receiver_false = graph()->NewNode(common()->Throw(), branch_is_receiver_false, branch_is_receiver_false); - NodeProperties::MergeControlToEnd(graph(), common(), + MergeControlToEnd(graph(), common(), branch_is_receiver_false); ReplaceWithValue(node_success, node_success, node_success, diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc index a628f66f9..729cc80c5 100644 --- a/src/compiler/js-intrinsic-lowering.cc +++ b/src/compiler/js-intrinsic-lowering.cc @@ -124,12 +124,10 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) { Node* const effect = NodeProperties::GetEffectInput(node); Node* const control = NodeProperties::GetControlInput(node); - // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer. Node* deoptimize = graph()->NewNode( common()->Deoptimize(DeoptimizeReason::kDeoptimizeNow, FeedbackSource()), frame_state, effect, control); - NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), deoptimize); node->TrimInputCount(0); NodeProperties::ChangeOp(node, common()->Dead()); diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc index fe7254ba1..75b3ba8b9 100644 --- a/src/compiler/js-native-context-specialization.cc +++ b/src/compiler/js-native-context-specialization.cc @@ -1953,7 +1953,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) { } Node* throw_node = graph()->NewNode(common()->Throw(), call_runtime, control_not_iterator); - NodeProperties::MergeControlToEnd(graph(), common(), throw_node); + MergeControlToEnd(graph(), common(), throw_node); } control = graph()->NewNode(common()->IfFalse(), branch); @@ -2022,7 +2022,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) { } Node* throw_node = graph()->NewNode(common()->Throw(), call_runtime, control_not_receiver); - NodeProperties::MergeControlToEnd(graph(), common(), throw_node); + MergeControlToEnd(graph(), common(), throw_node); } Node* if_receiver = graph()->NewNode(common()->IfTrue(), branch_node); ReplaceWithValue(node, call_property, effect, if_receiver); @@ -2519,9 +2519,8 @@ Reduction JSNativeContextSpecialization::ReduceEagerDeoptimize( Node* deoptimize = graph()->NewNode(common()->Deoptimize(reason, FeedbackSource()), frame_state, effect, control); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); - Revisit(graph()->end()); + + MergeControlToEnd(graph(), common(), deoptimize); node->TrimInputCount(0); NodeProperties::ChangeOp(node, common()->Dead()); return Changed(node); diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc index 08b1dba80..10ad530b3 100644 --- a/src/compiler/js-operator.cc +++ b/src/compiler/js-operator.cc @@ -10,6 +10,7 @@ #include "src/compiler/js-graph.h" #include "src/compiler/js-heap-broker.h" #include "src/compiler/node-matchers.h" +#include "src/compiler/operator-properties.h" #include "src/compiler/operator.h" #include "src/handles/handles-inl.h" #include "src/objects/objects-inl.h" diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc index a940783e4..69ab2bbc5 100644 --- a/src/compiler/js-typed-lowering.cc +++ b/src/compiler/js-typed-lowering.cc @@ -660,9 +660,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) { // throw, making it impossible to return a successful completion in this // case. We simply connect the successful completion to the graph end. if_false = graph()->NewNode(common()->Throw(), efalse, if_false); - // TODO(bmeurer): This should be on the AdvancedReducer somehow. - NodeProperties::MergeControlToEnd(graph(), common(), if_false); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), if_false); } control = graph()->NewNode(common()->IfTrue(), branch); length = effect = @@ -1280,7 +1278,7 @@ Reduction JSTypedLowering::ReduceJSHasInPrototypeChain(Node* node) { Node* eloop = effect = graph()->NewNode(common()->EffectPhi(2), effect, effect, loop); Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); + MergeControlToEnd(graph(), common(), terminate); Node* vloop = value = graph()->NewNode( common()->Phi(MachineRepresentation::kTagged, 2), value, value, loop); NodeProperties::SetType(vloop, Type::NonInternal()); diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc index 5f02ab86b..07fd4495e 100644 --- a/src/compiler/linkage.cc +++ b/src/compiler/linkage.cc @@ -249,11 +249,14 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, if (!info->closure().is_null()) { // If we are compiling a JS function, use a JS call descriptor, // plus the receiver. - SharedFunctionInfo shared = info->closure()->shared(); - return GetJSCallDescriptor( - zone, info->is_osr(), - shared.internal_formal_parameter_count_with_receiver(), - CallDescriptor::kCanUseRoots); + DCHECK(info->has_bytecode_array()); + DCHECK_EQ(info->closure() + ->shared() + .internal_formal_parameter_count_with_receiver(), + info->bytecode_array()->parameter_count()); + return GetJSCallDescriptor(zone, info->is_osr(), + info->bytecode_array()->parameter_count(), + CallDescriptor::kCanUseRoots); } return nullptr; // TODO(titzer): ? } diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc index 9eb44f180..987037ca4 100644 --- a/src/compiler/machine-operator.cc +++ b/src/compiler/machine-operator.cc @@ -1443,7 +1443,6 @@ struct MachineOperatorGlobalCache { StackPointerGreaterThan##Kind##Operator kStackPointerGreaterThan##Kind; STACK_POINTER_GREATER_THAN(JSFunctionEntry) - STACK_POINTER_GREATER_THAN(JSIterationBody) STACK_POINTER_GREATER_THAN(CodeStubAssembler) STACK_POINTER_GREATER_THAN(Wasm) #undef STACK_POINTER_GREATER_THAN @@ -1814,12 +1813,12 @@ const Operator* MachineOperatorBuilder::StackPointerGreaterThan( switch (kind) { case StackCheckKind::kJSFunctionEntry: return &cache_.kStackPointerGreaterThanJSFunctionEntry; - case StackCheckKind::kJSIterationBody: - return &cache_.kStackPointerGreaterThanJSIterationBody; case StackCheckKind::kCodeStubAssembler: return &cache_.kStackPointerGreaterThanCodeStubAssembler; case StackCheckKind::kWasm: return &cache_.kStackPointerGreaterThanWasm; + case StackCheckKind::kJSIterationBody: + UNREACHABLE(); } UNREACHABLE(); } diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc index bd57b79ed..55049efaa 100644 --- a/src/compiler/operation-typer.cc +++ b/src/compiler/operation-typer.cc @@ -1261,6 +1261,7 @@ Type JSType(Type type) { } // namespace Type OperationTyper::SameValue(Type lhs, Type rhs) { + if (lhs.IsNone() || rhs.IsNone()) return Type::None(); if (!JSType(lhs).Maybe(JSType(rhs))) return singleton_false(); if (lhs.Is(Type::NaN())) { if (rhs.Is(Type::NaN())) return singleton_true(); diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index ed017f0c2..cf956cb96 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -2167,10 +2167,11 @@ struct WasmGCOptimizationPhase { temp_zone); WasmGCOperatorReducer wasm_gc(&graph_reducer, temp_zone, mcgraph, module, data->source_positions()); - // Note: if we want to add DeadCodeElimination here, we'll have to update - // the existing reducers to handle kDead and kDeadValue nodes everywhere. + DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), + data->common(), temp_zone); AddReducer(data, &graph_reducer, &load_elimination); AddReducer(data, &graph_reducer, &wasm_gc); + AddReducer(data, &graph_reducer, &dead_code_elimination); graph_reducer.ReduceGraph(); } }; diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc index aada8a8cf..6fdf51694 100644 --- a/src/compiler/simplified-lowering.cc +++ b/src/compiler/simplified-lowering.cc @@ -2021,6 +2021,107 @@ class RepresentationSelector { SetOutput(node, MachineRepresentation::kTagged); } + template + bool TryOptimizeBigInt64Shift(Node* node, const Truncation& truncation, + SimplifiedLowering* lowering) { + DCHECK(Is64()); + if (!truncation.IsUsedAsWord64()) return false; + + Type input_type = GetUpperBound(node->InputAt(0)); + Type shift_amount_type = GetUpperBound(node->InputAt(1)); + + if (!shift_amount_type.IsHeapConstant()) return false; + HeapObjectRef ref = shift_amount_type.AsHeapConstant()->Ref(); + if (!ref.IsBigInt()) return false; + BigIntRef bigint = ref.AsBigInt(); + bool lossless = false; + int64_t shift_amount = bigint.AsInt64(&lossless); + // We bail out if we cannot represent the shift amount correctly. + if (!lossless) return false; + + // Canonicalize {shift_amount}. + bool is_shift_left = + node->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft; + if (shift_amount < 0) { + // A shift amount of abs(std::numeric_limits::min()) is not + // representable. + if (shift_amount == std::numeric_limits::min()) return false; + is_shift_left = !is_shift_left; + shift_amount = -shift_amount; + DCHECK_GT(shift_amount, 0); + } + DCHECK_GE(shift_amount, 0); + + // If the operation is a *real* left shift, propagate truncation. + // If it is a *real* right shift, the output representation is + // word64 only if we know the input type is BigInt64. + // Otherwise, fall through to using BigIntOperationHint. + if (is_shift_left) { + VisitBinop(node, + UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), + UseInfo::Any(), MachineRepresentation::kWord64); + if (lower()) { + if (shift_amount > 63) { + DeferReplacement(node, jsgraph_->Int64Constant(0)); + } else if (shift_amount == 0) { + DeferReplacement(node, node->InputAt(0)); + } else { + DCHECK_GE(shift_amount, 1); + DCHECK_LE(shift_amount, 63); + ReplaceWithPureNode( + node, graph()->NewNode(lowering->machine()->Word64Shl(), + node->InputAt(0), + jsgraph_->Int64Constant(shift_amount))); + } + } + return true; + } else if (input_type.Is(Type::SignedBigInt64())) { + VisitBinop(node, + UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), + UseInfo::Any(), MachineRepresentation::kWord64); + if (lower()) { + if (shift_amount > 63) { + ReplaceWithPureNode( + node, + graph()->NewNode(lowering->machine()->Word64Sar(), + node->InputAt(0), jsgraph_->Int64Constant(63))); + } else if (shift_amount == 0) { + DeferReplacement(node, node->InputAt(0)); + } else { + DCHECK_GE(shift_amount, 1); + DCHECK_LE(shift_amount, 63); + ReplaceWithPureNode( + node, graph()->NewNode(lowering->machine()->Word64Sar(), + node->InputAt(0), + jsgraph_->Int64Constant(shift_amount))); + } + } + return true; + } else if (input_type.Is(Type::UnsignedBigInt64())) { + VisitBinop(node, + UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), + UseInfo::Any(), MachineRepresentation::kWord64); + if (lower()) { + if (shift_amount > 63) { + DeferReplacement(node, jsgraph_->Int64Constant(0)); + } else if (shift_amount == 0) { + DeferReplacement(node, node->InputAt(0)); + } else { + DCHECK_GE(shift_amount, 1); + DCHECK_LE(shift_amount, 63); + ReplaceWithPureNode( + node, graph()->NewNode(lowering->machine()->Word64Shr(), + node->InputAt(0), + jsgraph_->Int64Constant(shift_amount))); + } + } + return true; + } + + // None of the cases we can optimize here. + return false; + } + #if V8_ENABLE_WEBASSEMBLY static MachineType MachineTypeForWasmReturnType(wasm::ValueType type) { switch (type.kind()) { @@ -3385,111 +3486,18 @@ class RepresentationSelector { VisitUnused(node); return; } - if (truncation.IsUsedAsWord64()) { - Type input_type = GetUpperBound(node->InputAt(0)); - Type shift_amount_type = GetUpperBound(node->InputAt(1)); - - if (shift_amount_type.IsHeapConstant()) { - HeapObjectRef ref = shift_amount_type.AsHeapConstant()->Ref(); - if (ref.IsBigInt()) { - BigIntRef bigint = ref.AsBigInt(); - bool lossless = false; - int64_t shift_amount = bigint.AsInt64(&lossless); - - // Canonicalize {shift_amount}. - bool is_shift_left = - node->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft; - if (shift_amount < 0) { - is_shift_left = !is_shift_left; - shift_amount = -shift_amount; - } - DCHECK_GE(shift_amount, 0); - - // If the operation is a *real* left shift, propagate truncation. - // If it is a *real* right shift, the output representation is - // word64 only if we know the input type is BigInt64. - // Otherwise, fall through to using BigIntOperationHint. - if (is_shift_left) { - VisitBinop( - node, - UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), - UseInfo::Any(), MachineRepresentation::kWord64); - if (lower()) { - if (!lossless || shift_amount > 63) { - DeferReplacement(node, jsgraph_->Int64Constant(0)); - } else if (shift_amount == 0) { - DeferReplacement(node, node->InputAt(0)); - } else { - DCHECK_GE(shift_amount, 1); - DCHECK_LE(shift_amount, 63); - ReplaceWithPureNode( - node, - graph()->NewNode( - lowering->machine()->Word64Shl(), node->InputAt(0), - jsgraph_->Int64Constant(shift_amount))); - } - } - return; - } else if (input_type.Is(Type::SignedBigInt64())) { - VisitBinop(node, UseInfo::Word64(), UseInfo::Any(), - MachineRepresentation::kWord64); - if (lower()) { - if (!lossless || shift_amount > 63) { - ReplaceWithPureNode( - node, graph()->NewNode(lowering->machine()->Word64Sar(), - node->InputAt(0), - jsgraph_->Int64Constant(63))); - } else if (shift_amount == 0) { - DeferReplacement(node, node->InputAt(0)); - } else { - DCHECK_GE(shift_amount, 1); - DCHECK_LE(shift_amount, 63); - ReplaceWithPureNode( - node, - graph()->NewNode( - lowering->machine()->Word64Sar(), node->InputAt(0), - jsgraph_->Int64Constant(shift_amount))); - } - } - return; - } else if (input_type.Is(Type::UnsignedBigInt64())) { - VisitBinop(node, UseInfo::Word64(), UseInfo::Any(), - MachineRepresentation::kWord64); - if (lower()) { - if (!lossless || shift_amount > 63) { - DeferReplacement(node, jsgraph_->Int64Constant(0)); - } else if (shift_amount == 0) { - DeferReplacement(node, node->InputAt(0)); - } else { - DCHECK_GE(shift_amount, 1); - DCHECK_LE(shift_amount, 63); - ReplaceWithPureNode( - node, - graph()->NewNode( - lowering->machine()->Word64Shr(), node->InputAt(0), - jsgraph_->Int64Constant(shift_amount))); - } - } - return; - } - } - } + if (TryOptimizeBigInt64Shift(node, truncation, lowering)) { + return; } - BigIntOperationHint hint = BigIntOperationHintOf(node->op()); - switch (hint) { - case BigIntOperationHint::kBigInt64: - // Do not collect or use BigInt64 feedback for shift operations. - UNREACHABLE(); - case BigIntOperationHint::kBigInt: { - VisitBinop( - node, UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}), - MachineRepresentation::kTaggedPointer); - if (lower()) { - ChangeOp(node, BigIntOp(node)); - } - return; - } + DCHECK_EQ(BigIntOperationHintOf(node->op()), + BigIntOperationHint::kBigInt); + VisitBinop(node, + UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}), + MachineRepresentation::kTaggedPointer); + if (lower()) { + ChangeOp(node, BigIntOp(node)); } + return; } case IrOpcode::kSpeculativeBigIntEqual: case IrOpcode::kSpeculativeBigIntLessThan: diff --git a/src/compiler/wasm-gc-operator-reducer.cc b/src/compiler/wasm-gc-operator-reducer.cc index ded568a56..1fbf9a59f 100644 --- a/src/compiler/wasm-gc-operator-reducer.cc +++ b/src/compiler/wasm-gc-operator-reducer.cc @@ -191,6 +191,7 @@ Reduction WasmGCOperatorReducer::ReduceIf(Node* node, bool condition) { DCHECK(node->opcode() == IrOpcode::kIfTrue || node->opcode() == IrOpcode::kIfFalse); Node* branch = NodeProperties::GetControlInput(node); + if (branch->opcode() == IrOpcode::kDead) return NoChange(); DCHECK_EQ(branch->opcode(), IrOpcode::kBranch); if (!IsReduced(branch)) return NoChange(); ControlPathTypes parent_state = GetState(branch); @@ -317,11 +318,9 @@ Reduction WasmGCOperatorReducer::ReduceCheckNull(Node* node) { Reduction WasmGCOperatorReducer::ReduceWasmExternInternalize(Node* node) { DCHECK_EQ(node->opcode(), IrOpcode::kWasmExternInternalize); // Remove redundant extern.internalize(extern.externalize(...)) pattern. - // TODO(mliedtke): Currently this doesn't get fully removed, probably due to - // not running dead code elimination in this pipeline step. What would it cost - // us to run it here? - if (NodeProperties::GetValueInput(node, 0)->opcode() == - IrOpcode::kWasmExternExternalize) { + Node* object = NodeProperties::GetValueInput(node, 0); + if (object->opcode() == IrOpcode::kDead) return NoChange(); + if (object->opcode() == IrOpcode::kWasmExternExternalize) { Node* externalize = node->InputAt(0); Node* input = externalize->InputAt(0); ReplaceWithValue(node, input); diff --git a/src/compiler/wasm-inlining.cc b/src/compiler/wasm-inlining.cc index e6820b108..7d9cb8a06 100644 --- a/src/compiler/wasm-inlining.cc +++ b/src/compiler/wasm-inlining.cc @@ -287,7 +287,7 @@ void WasmInliner::InlineTailCall(Node* call, Node* callee_start, // inlined graph to the end of the caller graph. for (Node* const input : callee_end->inputs()) { DCHECK(IrOpcode::IsGraphTerminator(input->opcode())); - NodeProperties::MergeControlToEnd(graph(), common(), input); + MergeControlToEnd(graph(), common(), input); } for (Edge edge_to_end : call->use_edges()) { DCHECK_EQ(edge_to_end.from(), graph()->end()); @@ -321,8 +321,7 @@ void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end, case IrOpcode::kDeoptimize: case IrOpcode::kTerminate: case IrOpcode::kThrow: - NodeProperties::MergeControlToEnd(graph(), common(), input); - Revisit(graph()->end()); + MergeControlToEnd(graph(), common(), input); break; case IrOpcode::kTailCall: { // A tail call in the callee inlined in a regular call in the caller has diff --git a/src/compiler/wasm-load-elimination.cc b/src/compiler/wasm-load-elimination.cc index 7292d61d7..1feffb84a 100644 --- a/src/compiler/wasm-load-elimination.cc +++ b/src/compiler/wasm-load-elimination.cc @@ -146,6 +146,7 @@ Reduction WasmLoadElimination::ReduceWasmStructGet(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); + if (object->opcode() == IrOpcode::kDead) return NoChange(); AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); @@ -168,16 +169,11 @@ Reduction WasmLoadElimination::ReduceWasmStructGet(Node* node) { !(is_mutable ? state->immutable_state : state->mutable_state) .LookupField(field_info.field_index, object) .IsEmpty()) { - Node* unreachable = - graph()->NewNode(jsgraph()->common()->Unreachable(), effect, control); - MachineRepresentation rep = - field_info.type->field(field_info.field_index).machine_representation(); - Node* dead_value = - graph()->NewNode(jsgraph()->common()->DeadValue(rep), unreachable); - NodeProperties::SetType(dead_value, NodeProperties::GetType(node)); - ReplaceWithValue(node, dead_value, unreachable, control); + ReplaceWithValue(node, dead(), dead(), dead()); + MergeControlToEnd(graph(), common(), + graph()->NewNode(common()->Throw(), effect, control)); node->Kill(); - return Replace(dead_value); + return Replace(dead()); } // If the input type is not (ref null? none) or bottom and we don't have type // inconsistencies, then the result type must be valid. @@ -217,6 +213,7 @@ Reduction WasmLoadElimination::ReduceWasmStructSet(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); + if (object->opcode() == IrOpcode::kDead) return NoChange(); AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); @@ -240,11 +237,11 @@ Reduction WasmLoadElimination::ReduceWasmStructSet(Node* node) { !(is_mutable ? state->immutable_state : state->mutable_state) .LookupField(field_info.field_index, object) .IsEmpty()) { - Node* unreachable = - graph()->NewNode(jsgraph()->common()->Unreachable(), effect, control); - ReplaceWithValue(node, unreachable, unreachable, control); + ReplaceWithValue(node, dead(), dead(), dead()); + MergeControlToEnd(graph(), common(), + graph()->NewNode(common()->Throw(), effect, control)); node->Kill(); - return Replace(unreachable); + return Replace(dead()); } if (is_mutable) { @@ -302,6 +299,7 @@ Reduction WasmLoadElimination::ReduceWasmArrayInitializeLength(Node* node) { Node* value = NodeProperties::GetValueInput(node, 1); Node* effect = NodeProperties::GetEffectInput(node); + if (object->opcode() == IrOpcode::kDead) return NoChange(); AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); @@ -321,6 +319,7 @@ Reduction WasmLoadElimination::ReduceStringPrepareForGetCodeunit(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); + if (object->opcode() == IrOpcode::kDead) return NoChange(); AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); @@ -484,6 +483,12 @@ WasmLoadElimination::AbstractState const* WasmLoadElimination::ComputeLoopState( if (visited.insert(current).second) { if (current->opcode() == IrOpcode::kWasmStructSet) { Node* object = NodeProperties::GetValueInput(current, 0); + if (object->opcode() == IrOpcode::kDead || + object->opcode() == IrOpcode::kDeadValue) { + // We are in dead code. Bail out with no mutable state. + return zone()->New(HalfState(zone()), + state->immutable_state); + } WasmFieldInfo field_info = OpParameter(current->op()); bool is_mutable = field_info.type->mutability(field_info.field_index); if (is_mutable) { @@ -529,6 +534,7 @@ WasmLoadElimination::WasmLoadElimination(Editor* editor, JSGraph* jsgraph, empty_state_(zone), node_states_(jsgraph->graph()->NodeCount(), zone), jsgraph_(jsgraph), + dead_(jsgraph->Dead()), zone_(zone) {} CommonOperatorBuilder* WasmLoadElimination::common() const { diff --git a/src/compiler/wasm-load-elimination.h b/src/compiler/wasm-load-elimination.h index 76be09d27..b50a02ca2 100644 --- a/src/compiler/wasm-load-elimination.h +++ b/src/compiler/wasm-load-elimination.h @@ -141,12 +141,14 @@ class V8_EXPORT_PRIVATE WasmLoadElimination final Isolate* isolate() const; Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } + Node* dead() const { return dead_; } Zone* zone() const { return zone_; } AbstractState const* empty_state() const { return &empty_state_; } AbstractState const empty_state_; NodeAuxData node_states_; JSGraph* const jsgraph_; + Node* dead_; Zone* zone_; }; diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc index 4068b0812..c43b90dcf 100644 --- a/src/debug/debug-evaluate.cc +++ b/src/debug/debug-evaluate.cc @@ -385,6 +385,7 @@ bool DebugEvaluate::IsSideEffectFreeIntrinsic(Runtime::FunctionId id) { V(ObjectIsExtensible) \ V(RegExpInitializeAndCompile) \ V(StackGuard) \ + V(HandleNoHeapWritesInterrupts) \ V(StringAdd) \ V(StringCharCodeAt) \ V(StringEqual) \ diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc index 18425b96c..944bf5f4e 100644 --- a/src/debug/debug-scopes.cc +++ b/src/debug/debug-scopes.cc @@ -954,7 +954,9 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode, // a proxy, return an empty object. Handle ScopeIterator::WithContextExtension() { DCHECK(context_->IsWithContext()); - if (context_->extension_receiver().IsJSProxy()) { + if (!context_->extension_receiver().IsJSObject()) { + DCHECK((context_->extension_receiver().IsJSProxy()) || + (context_->extension_receiver().IsWasmObject())); return isolate_->factory()->NewSlowJSObjectWithNullProto(); } return handle(JSObject::cast(context_->extension_receiver()), isolate_); diff --git a/src/execution/stack-guard.cc b/src/execution/stack-guard.cc index ff64beb8b..eb7f5794d 100644 --- a/src/execution/stack-guard.cc +++ b/src/execution/stack-guard.cc @@ -26,16 +26,22 @@ namespace v8 { namespace internal { -void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) { +void StackGuard::update_interrupt_requests_and_stack_limits( + const ExecutionAccess& lock) { DCHECK_NOT_NULL(isolate_); - thread_local_.set_jslimit(kInterruptLimit); - thread_local_.set_climit(kInterruptLimit); -} - -void StackGuard::reset_limits(const ExecutionAccess& lock) { - DCHECK_NOT_NULL(isolate_); - thread_local_.set_jslimit(thread_local_.real_jslimit_); - thread_local_.set_climit(thread_local_.real_climit_); + if (has_pending_interrupts(lock)) { + thread_local_.set_jslimit(kInterruptLimit); + thread_local_.set_climit(kInterruptLimit); + } else { + thread_local_.set_jslimit(thread_local_.real_jslimit_); + thread_local_.set_climit(thread_local_.real_climit_); + } + for (InterruptLevel level : + std::array{InterruptLevel::kNoGC, InterruptLevel::kNoHeapWrites, + InterruptLevel::kAnyEffect}) { + thread_local_.set_interrupt_requested( + level, InterruptLevelMask(level) & thread_local_.interrupt_flags_); + } } void StackGuard::SetStackLimit(uintptr_t limit) { @@ -64,18 +70,6 @@ void StackGuard::AdjustStackLimitForSimulator() { } } -void StackGuard::EnableInterrupts() { - ExecutionAccess access(isolate_); - if (has_pending_interrupts(access)) { - set_interrupt_limits(access); - } -} - -void StackGuard::DisableInterrupts() { - ExecutionAccess access(isolate_); - reset_limits(access); -} - void StackGuard::PushInterruptsScope(InterruptsScope* scope) { ExecutionAccess access(isolate_); DCHECK_NE(scope->mode_, InterruptsScope::kNoop); @@ -96,9 +90,8 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) { } thread_local_.interrupt_flags_ |= restored_flags; - if (has_pending_interrupts(access)) set_interrupt_limits(access); } - if (!has_pending_interrupts(access)) reset_limits(access); + update_interrupt_requests_and_stack_limits(access); // Add scope to the chain. scope->prev_ = thread_local_.interrupt_scopes_; thread_local_.interrupt_scopes_ = scope; @@ -126,7 +119,7 @@ void StackGuard::PopInterruptsScope() { } } } - if (has_pending_interrupts(access)) set_interrupt_limits(access); + update_interrupt_requests_and_stack_limits(access); // Remove scope from chain. thread_local_.interrupt_scopes_ = top->prev_; } @@ -146,7 +139,7 @@ void StackGuard::RequestInterrupt(InterruptFlag flag) { // Not intercepted. Set as active interrupt flag. thread_local_.interrupt_flags_ |= flag; - set_interrupt_limits(access); + update_interrupt_requests_and_stack_limits(access); // If this isolate is waiting in a futex, notify it to wake up. isolate_->futex_wait_list_node()->NotifyWake(); @@ -162,37 +155,36 @@ void StackGuard::ClearInterrupt(InterruptFlag flag) { // Clear the interrupt flag from the active interrupt flags. thread_local_.interrupt_flags_ &= ~flag; - if (!has_pending_interrupts(access)) reset_limits(access); + update_interrupt_requests_and_stack_limits(access); } bool StackGuard::HasTerminationRequest() { + if (!thread_local_.has_interrupt_requested(InterruptLevel::kNoGC)) { + return false; + } ExecutionAccess access(isolate_); if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) { thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION; - if (!has_pending_interrupts(access)) reset_limits(access); + update_interrupt_requests_and_stack_limits(access); return true; } return false; } -int StackGuard::FetchAndClearInterrupts() { +int StackGuard::FetchAndClearInterrupts(InterruptLevel level) { ExecutionAccess access(isolate_); - - int result = 0; + InterruptFlag mask = InterruptLevelMask(level); if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) { // The TERMINATE_EXECUTION interrupt is special, since it terminates // execution but should leave V8 in a resumable state. If it exists, we only // fetch and clear that bit. On resume, V8 can continue processing other // interrupts. - result = TERMINATE_EXECUTION; - thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION; - if (!has_pending_interrupts(access)) reset_limits(access); - } else { - result = static_cast(thread_local_.interrupt_flags_); - thread_local_.interrupt_flags_ = 0; - reset_limits(access); + mask = TERMINATE_EXECUTION; } + int result = static_cast(thread_local_.interrupt_flags_ & mask); + thread_local_.interrupt_flags_ &= ~mask; + update_interrupt_requests_and_stack_limits(access); return result; } @@ -264,7 +256,7 @@ class V8_NODISCARD ShouldBeZeroOnReturnScope final { } // namespace -Object StackGuard::HandleInterrupts() { +Object StackGuard::HandleInterrupts(InterruptLevel level) { TRACE_EVENT0("v8.execute", "V8.HandleInterrupts"); #if DEBUG @@ -278,7 +270,7 @@ Object StackGuard::HandleInterrupts() { // Fetch and clear interrupt bits in one go. See comments inside the method // for special handling of TERMINATE_EXECUTION. - int interrupt_flags = FetchAndClearInterrupts(); + int interrupt_flags = FetchAndClearInterrupts(level); // All interrupts should be fully processed when returning from this method. ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags); diff --git a/src/execution/stack-guard.h b/src/execution/stack-guard.h index 8cdf755c0..d5d0cbc90 100644 --- a/src/execution/stack-guard.h +++ b/src/execution/stack-guard.h @@ -45,20 +45,30 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { // Sets up the default stack guard for this thread. void InitThread(const ExecutionAccess& lock); -#define INTERRUPT_LIST(V) \ - V(TERMINATE_EXECUTION, TerminateExecution, 0) \ - V(GC_REQUEST, GC, 1) \ - V(INSTALL_CODE, InstallCode, 2) \ - V(INSTALL_BASELINE_CODE, InstallBaselineCode, 3) \ - V(API_INTERRUPT, ApiInterrupt, 4) \ - V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5) \ - V(GROW_SHARED_MEMORY, GrowSharedMemory, 6) \ - V(LOG_WASM_CODE, LogWasmCode, 7) \ - V(WASM_CODE_GC, WasmCodeGC, 8) \ - V(INSTALL_MAGLEV_CODE, InstallMaglevCode, 9) \ - V(GLOBAL_SAFEPOINT, GlobalSafepoint, 10) - -#define V(NAME, Name, id) \ + // Code locations that check for interrupts might only handle a subset of the + // available interrupts, expressed as an `InterruptLevel`. These levels are + // also associated with side effects that are allowed for the respective + // level. The levels are inclusive, which is specified using the order in the + // enum. For example, a site that handles `kAnyEffect` will also handle the + // preceding levels. + enum class InterruptLevel { kNoGC, kNoHeapWrites, kAnyEffect }; + static constexpr int kNumberOfInterruptLevels = 3; + +#define INTERRUPT_LIST(V) \ + V(TERMINATE_EXECUTION, TerminateExecution, 0, InterruptLevel::kNoGC) \ + V(GC_REQUEST, GC, 1, InterruptLevel::kNoHeapWrites) \ + V(INSTALL_CODE, InstallCode, 2, InterruptLevel::kAnyEffect) \ + V(INSTALL_BASELINE_CODE, InstallBaselineCode, 3, InterruptLevel::kAnyEffect) \ + V(API_INTERRUPT, ApiInterrupt, 4, InterruptLevel::kNoHeapWrites) \ + V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5, \ + InterruptLevel::kNoHeapWrites) \ + V(GROW_SHARED_MEMORY, GrowSharedMemory, 6, InterruptLevel::kAnyEffect) \ + V(LOG_WASM_CODE, LogWasmCode, 7, InterruptLevel::kAnyEffect) \ + V(WASM_CODE_GC, WasmCodeGC, 8, InterruptLevel::kNoHeapWrites) \ + V(INSTALL_MAGLEV_CODE, InstallMaglevCode, 9, InterruptLevel::kAnyEffect) \ + V(GLOBAL_SAFEPOINT, GlobalSafepoint, 10, InterruptLevel::kNoHeapWrites) + +#define V(NAME, Name, id, interrupt_level) \ inline bool Check##Name() { return CheckInterrupt(NAME); } \ inline void Request##Name() { RequestInterrupt(NAME); } \ inline void Clear##Name() { ClearInterrupt(NAME); } @@ -67,16 +77,23 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { // Flag used to set the interrupt causes. enum InterruptFlag : uint32_t { -#define V(NAME, Name, id) NAME = (1 << id), +#define V(NAME, Name, id, interrupt_level) NAME = (1 << id), INTERRUPT_LIST(V) #undef V -#define V(NAME, Name, id) NAME | +#define V(NAME, Name, id, interrupt_level) NAME | ALL_INTERRUPTS = INTERRUPT_LIST(V) 0 #undef V }; static_assert(InterruptFlag::ALL_INTERRUPTS < std::numeric_limits::max()); + static constexpr InterruptFlag InterruptLevelMask(InterruptLevel level) { +#define V(NAME, Name, id, interrupt_level) \ + | (interrupt_level <= level ? NAME : 0) + return static_cast(0 INTERRUPT_LIST(V)); +#undef V + } + uintptr_t climit() { return thread_local_.climit(); } uintptr_t jslimit() { return thread_local_.jslimit(); } // This provides an asynchronous read of the stack limits for the current @@ -90,17 +107,23 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { Address address_of_real_jslimit() { return reinterpret_cast
(&thread_local_.real_jslimit_); } + Address address_of_interrupt_request(InterruptLevel level) { + return reinterpret_cast
( + &thread_local_.interrupt_requested_[static_cast(level)]); + } // If the stack guard is triggered, but it is not an actual // stack overflow, then handle the interruption accordingly. - Object HandleInterrupts(); + // Only interrupts that match the given `InterruptLevel` will be handled, + // leaving other interrupts pending as if this method had not been called. + Object HandleInterrupts(InterruptLevel level = InterruptLevel::kAnyEffect); // Special case of {HandleInterrupts}: checks for termination requests only. // This is guaranteed to never cause GC, so can be used to interrupt // long-running computations that are not GC-safe. bool HasTerminationRequest(); - static constexpr int kSizeInBytes = 7 * kSystemPointerSize; + static constexpr int kSizeInBytes = 8 * kSystemPointerSize; static char* Iterate(RootVisitor* v, char* thread_storage) { return thread_storage + ArchiveSpacePerThread(); @@ -110,7 +133,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { bool CheckInterrupt(InterruptFlag flag); void RequestInterrupt(InterruptFlag flag); void ClearInterrupt(InterruptFlag flag); - int FetchAndClearInterrupts(); + int FetchAndClearInterrupts(InterruptLevel level); // You should hold the ExecutionAccess lock when calling this method. bool has_pending_interrupts(const ExecutionAccess& lock) { @@ -118,15 +141,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { } // You should hold the ExecutionAccess lock when calling this method. - inline void set_interrupt_limits(const ExecutionAccess& lock); - - // Reset limits to actual values. For example after handling interrupt. - // You should hold the ExecutionAccess lock when calling this method. - inline void reset_limits(const ExecutionAccess& lock); - - // Enable or disable interrupts. - void EnableInterrupts(); - void DisableInterrupts(); + inline void update_interrupt_requests_and_stack_limits( + const ExecutionAccess& lock); #if V8_TARGET_ARCH_64_BIT static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe}; @@ -180,6 +196,21 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { static_cast(limit)); } + + // Interrupt request bytes can be read without any lock. + // Writing requires the ExecutionAccess lock. + base::Atomic8 interrupt_requested_[kNumberOfInterruptLevels] = { + false, false, false}; + + void set_interrupt_requested(InterruptLevel level, bool requested) { + base::Relaxed_Store(&interrupt_requested_[static_cast(level)], + requested); + } + + bool has_interrupt_requested(InterruptLevel level) { + return base::Relaxed_Load(&interrupt_requested_[static_cast(level)]); + } + InterruptsScope* interrupt_scopes_ = nullptr; uint32_t interrupt_flags_ = 0; }; diff --git a/src/heap/concurrent-marking.cc b/src/heap/concurrent-marking.cc index 6f7adf9cb..27ee56600 100644 --- a/src/heap/concurrent-marking.cc +++ b/src/heap/concurrent-marking.cc @@ -383,6 +383,8 @@ void ConcurrentMarking::RunMajor(JobDelegate* delegate, local_marking_worklists.PushOnHold(object); } else { Map map = object.map(isolate, kAcquireLoad); + // The marking worklist should never contain filler objects. + CHECK(!InstanceTypeChecker::IsFreeSpaceOrFiller(map)); if (is_per_context_mode) { Address context; if (native_context_inferrer.Infer(isolate, map, object, &context)) { diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 27ef0618f..f90c2c196 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -2292,7 +2292,11 @@ void MarkCompactCollector::MarkTransitiveClosureLinear() { GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR); // This phase doesn't support parallel marking. DCHECK(heap()->concurrent_marking()->IsStopped()); - std::unordered_multimap key_to_values; + // We must use the full pointer comparison here as this map will be queried + // with objects from different cages (e.g. code- or trusted cage). + std::unordered_multimap + key_to_values; Ephemeron ephemeron; DCHECK( @@ -2410,21 +2414,8 @@ std::pair MarkCompactCollector::ProcessMarkingWorklist( while (local_marking_worklists()->Pop(&object) || local_marking_worklists()->PopOnHold(&object)) { - // Left trimming may result in grey or black filler objects on the marking - // worklist. Ignore these objects. - if (object.IsFreeSpaceOrFiller(cage_base)) { - // Due to copying mark bits and the fact that grey and black have their - // first bit set, one word fillers are always black. - DCHECK_IMPLIES(object.map(cage_base) == - ReadOnlyRoots(isolate).one_pointer_filler_map(), - marking_state()->IsMarked(object)); - // Other fillers may be black or grey depending on the color of the object - // that was trimmed. - DCHECK_IMPLIES(object.map(cage_base) != - ReadOnlyRoots(isolate).one_pointer_filler_map(), - marking_state()->IsMarked(object)); - continue; - } + // The marking worklist should never contain filler objects. + CHECK(!object.IsFreeSpaceOrFiller(cage_base)); DCHECK(object.IsHeapObject()); DCHECK(heap()->Contains(object)); DCHECK(!(marking_state()->IsUnmarked(object))); diff --git a/src/ic/ic.cc b/src/ic/ic.cc index b8f55270a..f117eb028 100644 --- a/src/ic/ic.cc +++ b/src/ic/ic.cc @@ -1788,7 +1788,7 @@ MaybeHandle StoreIC::Store(Handle object, Handle name, // present. We can also skip this for private names since they are not // bound by configurability or extensibility checks, and errors would've // been thrown if the private field already exists in the object. - if (IsAnyDefineOwn() && !name->IsPrivateName() && !object->IsJSProxy() && + if (IsAnyDefineOwn() && !name->IsPrivateName() && object->IsJSObject() && !Handle::cast(object)->HasNamedInterceptor()) { Maybe can_define = JSObject::CheckIfCanDefineAsConfigurable( isolate(), &it, value, Nothing()); @@ -2262,15 +2262,16 @@ Handle KeyedStoreIC::StoreElementHandler( receiver_map->MayHaveReadOnlyElementsInPrototypeChain(isolate()), IsStoreInArrayLiteralIC()); - if (receiver_map->IsJSProxyMap()) { + if (!receiver_map->IsJSObjectMap()) { // DefineKeyedOwnIC, which is used to define computed fields in instances, - // should be handled by the slow stub. - if (IsDefineKeyedOwnIC()) { - TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub); - return StoreHandler::StoreSlow(isolate(), store_mode); + // should handled by the slow stub below instead of the proxy stub. + if (receiver_map->IsJSProxyMap() && !IsDefineKeyedOwnIC()) { + return StoreHandler::StoreProxy(isolate()); } - return StoreHandler::StoreProxy(isolate()); + // Wasm objects or other kind of special objects go through the slow stub. + TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub); + return StoreHandler::StoreSlow(isolate(), store_mode); } // TODO(ishell): move to StoreHandler::StoreElement(). @@ -2490,14 +2491,12 @@ MaybeHandle KeyedStoreIC::Store(Handle object, Handle receiver = Handle::cast(object); old_receiver_map = handle(receiver->map(), isolate()); is_arguments = receiver->IsJSArgumentsObject(); - bool is_proxy = receiver->IsJSProxy(); + bool is_jsobject = receiver->IsJSObject(); size_t index; key_is_valid_index = IntPtrKeyToSize(maybe_index, receiver, &index); - if (!is_arguments && !is_proxy) { - if (key_is_valid_index) { + if (is_jsobject && !is_arguments && key_is_valid_index) { Handle receiver_object = Handle::cast(object); store_mode = GetStoreMode(receiver_object, index); - } } } diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc index 139ed00b4..6b656c4a5 100644 --- a/src/interpreter/bytecode-generator.cc +++ b/src/interpreter/bytecode-generator.cc @@ -2422,8 +2422,16 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { VisitIterationBody(stmt, &loop_builder); builder()->SetExpressionAsStatementPosition(stmt->cond()); BytecodeLabels loop_backbranch(zone()); - VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(), - TestFallthrough::kThen); + if (!loop_builder.break_labels()->empty()) { + // The test may be conditionally executed if there was a break statement + // inside the loop body, and therefore requires its own elision scope. + HoleCheckElisionScope elider(this); + VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(), + TestFallthrough::kThen); + } else { + VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(), + TestFallthrough::kThen); + } loop_backbranch.Bind(builder()); } } diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc index fe89983f5..703d1a343 100644 --- a/src/interpreter/interpreter.cc +++ b/src/interpreter/interpreter.cc @@ -292,6 +292,9 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl( } #ifdef DEBUG + if (parse_info()->literal()->shared_function_info().is_null()) { + parse_info()->literal()->set_shared_function_info(shared_info); + } CheckAndPrintBytecodeMismatch( isolate, handle(Script::cast(shared_info->script()), isolate), bytecodes); #endif diff --git a/src/json/json-stringifier.cc b/src/json/json-stringifier.cc index f718bcd9c..347dbebd0 100644 --- a/src/json/json-stringifier.cc +++ b/src/json/json-stringifier.cc @@ -633,6 +633,8 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle object, if (InstanceTypeChecker::IsJSProxy(instance_type)) { return SerializeJSProxy(Handle::cast(object), key); } + // WASM_{STRUCT,ARRAY}_TYPE are handled in `case:` blocks above. + DCHECK(object->IsJSObject()); return SerializeJSObject(Handle::cast(object), key); } } diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index c29af103a..f6f34d672 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -621,8 +621,8 @@ DeoptFrame MaglevGraphBuilder::GetDeoptFrameForLazyDeoptHelper( // Currently only support builtin continuations for bytecodes that write to // the accumulator - DCHECK( - interpreter::Bytecodes::WritesAccumulator(iterator_.current_bytecode())); + DCHECK(interpreter::Bytecodes::WritesOrClobbersAccumulator( + iterator_.current_bytecode())); return BuiltinContinuationDeoptFrame( continuation_scope->continuation(), {}, GetContext(), // Mark the accumulator dead in parent frames since we know that the @@ -4635,7 +4635,7 @@ void MaglevGraphBuilder::VisitFindNonDefaultConstructorOrConstruct() { TryGetConstant(new_target); if (kind == FunctionKind::kDefaultBaseConstructor) { ValueNode* object; - if (new_target_function && new_target_function->IsJSFunction() && + if (new_target_function && new_target_function->IsJSFunction() && HasValidInitialMap(new_target_function->AsJSFunction(), current_function)) { object = BuildAllocateFastObject( diff --git a/src/objects/code.cc b/src/objects/code.cc index a3c6e810e..ecf3e6722 100644 --- a/src/objects/code.cc +++ b/src/objects/code.cc @@ -83,10 +83,10 @@ void Code::CopyFromNoFlush(ByteArray reloc_info, Heap* heap, #ifdef V8_ENABLE_JIT_CODE_SIGN if (IsSupportJitCodeSigner()) { CHECK(desc.jit_code_signer->ValidateCodeCopy(reinterpret_cast(instruction_start()), - desc.buffer, desc.instr_size) == 0); + desc.buffer, desc.instr_size) == 0); } else { CopyBytes(reinterpret_cast(instruction_start()), desc.buffer, - static_cast(desc.instr_size)); + static_cast(desc.instr_size)); } #else CopyBytes(reinterpret_cast(instruction_start()), desc.buffer, diff --git a/src/objects/elements.cc b/src/objects/elements.cc index 4d5fa8f1f..47cfa50ba 100644 --- a/src/objects/elements.cc +++ b/src/objects/elements.cc @@ -3836,7 +3836,8 @@ class TypedElementsAccessor // them. if (source_proto.IsNull(isolate)) return false; if (source_proto.IsJSProxy()) return true; - if (!context.native_context().is_initial_array_prototype( + if (source_proto.IsJSObject() && + !context.native_context().is_initial_array_prototype( JSObject::cast(source_proto))) { return true; } diff --git a/src/objects/js-objects.cc b/src/objects/js-objects.cc index 0c0815338..a57252d9c 100644 --- a/src/objects/js-objects.cc +++ b/src/objects/js-objects.cc @@ -1458,8 +1458,6 @@ Maybe JSReceiver::ValidateAndApplyPropertyDescriptor( Maybe should_throw, Handle property_name) { // We either need a LookupIterator, or a property name. DCHECK((it == nullptr) != property_name.is_null()); - Handle object; - if (it != nullptr) object = Handle::cast(it->GetReceiver()); bool desc_is_data_descriptor = PropertyDescriptor::IsDataDescriptor(desc); bool desc_is_accessor_descriptor = PropertyDescriptor::IsAccessorDescriptor(desc); @@ -3592,6 +3590,7 @@ void JSObject::AddProperty(Isolate* isolate, Handle object, #ifdef DEBUG uint32_t index; DCHECK(!object->IsJSProxy()); + DCHECK(!object->IsWasmObject()); DCHECK(!name->AsArrayIndex(&index)); Maybe maybe = GetPropertyAttributes(&it); DCHECK(maybe.IsJust()); @@ -5175,7 +5174,7 @@ Maybe JSObject::SetPrototype(Isolate* isolate, Handle object, DCHECK(!object->IsAccessCheckNeeded()); } - // Silently ignore the change if value is not a JSObject or null. + // Silently ignore the change if value is not a JSReceiver or null. // SpiderMonkey behaves this way. if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true); diff --git a/src/objects/keys.cc b/src/objects/keys.cc index ab9b931fc..dc603b032 100644 --- a/src/objects/keys.cc +++ b/src/objects/keys.cc @@ -314,9 +314,8 @@ void TrySettingEmptyEnumCache(JSReceiver object) { Map map = object.map(); DCHECK_EQ(kInvalidEnumCacheSentinel, map.EnumLength()); if (!map.OnlyHasSimpleProperties()) return; - if (map.IsJSProxyMap()) return; + DCHECK(map.IsJSObjectMap()); // Implied by {OnlyHasSimpleProperties}. if (map.NumberOfEnumerableProperties() > 0) return; - DCHECK(object.IsJSObject()); map.SetEnumLength(0); } diff --git a/src/objects/module.cc b/src/objects/module.cc index fccb1c238..3a100e80c 100644 --- a/src/objects/module.cc +++ b/src/objects/module.cc @@ -354,8 +354,7 @@ Handle Module::GetModuleNamespace(Isolate* isolate, // Turbofan can use this for inlining the access. JSObject::OptimizeAsPrototype(ns); - Handle proto_info = - Map::GetOrCreatePrototypeInfo(Handle::cast(ns), isolate); + Handle proto_info = Map::GetOrCreatePrototypeInfo(ns, isolate); proto_info->set_module_namespace(*ns); return ns; } diff --git a/src/objects/objects.cc b/src/objects/objects.cc index e13773d9a..10a305cbb 100644 --- a/src/objects/objects.cc +++ b/src/objects/objects.cc @@ -459,14 +459,27 @@ Handle NoSideEffectsErrorToString(Isolate* isolate, if (name_str->length() == 0) return msg_str; if (msg_str->length() == 0) return name_str; - IncrementalStringBuilder builder(isolate); - builder.AppendString(name_str); - builder.AppendCStringLiteral(": "); + constexpr const char error_suffix[] = ""; + constexpr int error_suffix_size = sizeof(error_suffix); + int suffix_size = std::min(error_suffix_size, msg_str->length()); - if (builder.Length() + msg_str->length() <= String::kMaxLength) { - builder.AppendString(msg_str); + IncrementalStringBuilder builder(isolate); + if (name_str->length() + suffix_size + 2 /* ": " */ > String::kMaxLength) { + constexpr const char connector[] = "... : "; + int connector_size = sizeof(connector); + Handle truncated_name = isolate->factory()->NewProperSubString( + name_str, 0, name_str->length() - error_suffix_size - connector_size); + builder.AppendString(truncated_name); + builder.AppendCStringLiteral(connector); + builder.AppendCStringLiteral(error_suffix); } else { - builder.AppendCStringLiteral(""); + builder.AppendString(name_str); + builder.AppendCStringLiteral(": "); + if (builder.Length() + msg_str->length() <= String::kMaxLength) { + builder.AppendString(msg_str); + } else { + builder.AppendCStringLiteral(error_suffix); + } } return builder.Finish().ToHandleChecked(); diff --git a/src/objects/objects.h b/src/objects/objects.h index 7c08126e7..1c125e83e 100644 --- a/src/objects/objects.h +++ b/src/objects/objects.h @@ -693,8 +693,9 @@ class Object : public TaggedImpl { } }; - // For use with std::unordered_set/unordered_map when using both - // InstructionStream and non-InstructionStream objects as keys. + // For use with std::unordered_set/unordered_map when one of the objects may + // be located outside the main pointer compression cage, for example in + // trusted space. In this case, we must use full pointer comparison. struct KeyEqualSafe { bool operator()(const Object a, const Object b) const { return a.SafeEquals(b); diff --git a/src/objects/value-serializer.cc b/src/objects/value-serializer.cc index 2efca82aa..581fcd316 100644 --- a/src/objects/value-serializer.cc +++ b/src/objects/value-serializer.cc @@ -1098,11 +1098,8 @@ Maybe ValueSerializer::WriteWasmModule(Handle object) { return ThrowDataCloneError(MessageTemplate::kDataCloneError, object); } - // TODO(titzer): introduce a Utils::ToLocal for WasmModuleObject. Maybe transfer_id = delegate_->GetWasmModuleTransferId( - reinterpret_cast(isolate_), - v8::Local::Cast( - Utils::ToLocal(Handle::cast(object)))); + reinterpret_cast(isolate_), Utils::ToLocal(object)); RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing()); uint32_t id = 0; if (transfer_id.To(&id)) { diff --git a/src/regexp/arm/regexp-macro-assembler-arm.cc b/src/regexp/arm/regexp-macro-assembler-arm.cc index 4cf2fcf8d..7abc77fbd 100644 --- a/src/regexp/arm/regexp-macro-assembler-arm.cc +++ b/src/regexp/arm/regexp-macro-assembler-arm.cc @@ -754,11 +754,13 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ sub(r0, sp, r0, SetCC); + Operand extra_space_for_variables(num_registers_ * kSystemPointerSize); + // Handle it if the stack pointer is already below the stack limit. __ b(ls, &stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ cmp(r0, Operand(num_registers_ * kSystemPointerSize)); + __ cmp(r0, extra_space_for_variables); __ b(hs, &stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. @@ -766,7 +768,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ jmp(&return_r0); __ bind(&stack_limit_hit); - CallCheckStackGuardState(); + CallCheckStackGuardState(extra_space_for_variables); __ cmp(r0, Operand::Zero()); // If returned value is non-zero, we exit with the returned value as result. __ b(ne, &return_r0); @@ -1157,16 +1159,18 @@ void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) { // Private methods: -void RegExpMacroAssemblerARM::CallCheckStackGuardState() { +void RegExpMacroAssemblerARM::CallCheckStackGuardState(Operand extra_space) { DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); DCHECK(!masm_->options().isolate_independent_code); - __ PrepareCallCFunction(3); + __ PrepareCallCFunction(4); + // Extra space for variables to consider in stack check. + __ mov(arg_reg_4, extra_space); // RegExp code frame pointer. - __ mov(r2, frame_pointer()); + __ mov(arg_reg_3, frame_pointer()); // InstructionStream of self. - __ mov(r1, Operand(masm_->CodeObject())); + __ mov(arg_reg_2, Operand(masm_->CodeObject())); // We need to make room for the return address on the stack. int stack_alignment = base::OS::ActivationFrameAlignment(); @@ -1194,7 +1198,6 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() { __ mov(code_pointer(), Operand(masm_->CodeObject())); } - // Helper function for reading a value out of a stack frame. template static T& frame_entry(Address re_frame, int frame_offset) { @@ -1209,7 +1212,8 @@ static T* frame_entry_address(Address re_frame, int frame_offset) { int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame) { + Address re_frame, + uintptr_t extra_space) { InstructionStream re_code = InstructionStream::cast(Object(raw_code)); return NativeRegExpMacroAssembler::CheckStackGuardState( frame_entry(re_frame, kIsolateOffset), @@ -1219,10 +1223,10 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, return_address, re_code, frame_entry_address
(re_frame, kInputStringOffset), frame_entry_address(re_frame, kInputStartOffset), - frame_entry_address(re_frame, kInputEndOffset)); + frame_entry_address(re_frame, kInputEndOffset), + extra_space); } - MemOperand RegExpMacroAssemblerARM::register_location(int register_index) { DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { diff --git a/src/regexp/arm/regexp-macro-assembler-arm.h b/src/regexp/arm/regexp-macro-assembler-arm.h index 44be0d920..e8d9f6d76 100644 --- a/src/regexp/arm/regexp-macro-assembler-arm.h +++ b/src/regexp/arm/regexp-macro-assembler-arm.h @@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM // returning. // {raw_code} is an Address because this is called via ExternalReference. static int CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame); + Address re_frame, uintptr_t extra_space); private: // Offsets from frame_pointer() of function parameters and stored registers. @@ -152,7 +152,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - void CallCheckStackGuardState(); + void CallCheckStackGuardState( + Operand extra_space_for_variables = Operand::Zero()); void CallIsCharacterInRangeArray(const ZoneList* ranges); // The ebp-relative location of a regexp register. diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/src/regexp/arm64/regexp-macro-assembler-arm64.cc index fe1b0f6e0..0f070e36c 100644 --- a/src/regexp/arm64/regexp-macro-assembler-arm64.cc +++ b/src/regexp/arm64/regexp-macro-assembler-arm64.cc @@ -866,13 +866,14 @@ Handle RegExpMacroAssemblerARM64::GetCode(Handle source) { __ Mov(x10, stack_limit); __ Ldr(x10, MemOperand(x10)); __ Subs(x10, sp, x10); + Operand extra_space_for_variables(num_wreg_to_allocate * kWRegSize); // Handle it if the stack pointer is already below the stack limit. __ B(ls, &stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ Cmp(x10, num_wreg_to_allocate * kWRegSize); + __ Cmp(x10, extra_space_for_variables); __ B(hs, &stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack @@ -881,7 +882,7 @@ Handle RegExpMacroAssemblerARM64::GetCode(Handle source) { __ B(&return_w0); __ Bind(&stack_limit_hit); - CallCheckStackGuardState(x10); + CallCheckStackGuardState(x10, extra_space_for_variables); // If returned value is non-zero, we exit with the returned value as result. __ Cbnz(w0, &return_w0); @@ -1432,7 +1433,8 @@ static T* frame_entry_address(Address re_frame, int frame_offset) { int RegExpMacroAssemblerARM64::CheckStackGuardState( Address* return_address, Address raw_code, Address re_frame, - int start_index, const byte** input_start, const byte** input_end) { + int start_index, const byte** input_start, const byte** input_end, + uintptr_t extra_space) { InstructionStream re_code = InstructionStream::cast(Object(raw_code)); return NativeRegExpMacroAssembler::CheckStackGuardState( frame_entry(re_frame, kIsolateOffset), start_index, @@ -1440,7 +1442,7 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState( frame_entry(re_frame, kDirectCallOffset)), return_address, re_code, frame_entry_address
(re_frame, kInputStringOffset), input_start, - input_end); + input_end, extra_space); } @@ -1459,7 +1461,8 @@ void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset, // Private methods: -void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) { +void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch, + Operand extra_space) { DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); DCHECK(!masm_->options().isolate_independent_code); @@ -1474,6 +1477,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) { __ Claim(xreg_to_claim); + __ Mov(x6, extra_space); // CheckStackGuardState needs the end and start addresses of the input string. __ Poke(input_end(), 2 * kSystemPointerSize); __ Add(x5, sp, 2 * kSystemPointerSize); diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.h b/src/regexp/arm64/regexp-macro-assembler-arm64.h index a5164472b..05b4eb5bd 100644 --- a/src/regexp/arm64/regexp-macro-assembler-arm64.h +++ b/src/regexp/arm64/regexp-macro-assembler-arm64.h @@ -95,7 +95,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64 static int CheckStackGuardState(Address* return_address, Address raw_code, Address re_frame, int start_offset, const byte** input_start, - const byte** input_end); + const byte** input_end, + uintptr_t extra_space); private: static constexpr int kFramePointerOffset = 0; @@ -174,7 +175,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64 // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - void CallCheckStackGuardState(Register scratch); + void CallCheckStackGuardState(Register scratch, + Operand extra_space = Operand(0)); void CallIsCharacterInRangeArray(const ZoneList* ranges); // Location of a 32 bit position register. diff --git a/src/regexp/experimental/experimental-interpreter.cc b/src/regexp/experimental/experimental-interpreter.cc index 095cbd3a1..456647fa3 100644 --- a/src/regexp/experimental/experimental-interpreter.cc +++ b/src/regexp/experimental/experimental-interpreter.cc @@ -372,6 +372,8 @@ class NfaInterpreter { // the current input index. All remaining `active_threads_` are discarded. void RunActiveThread(InterpreterThread t) { while (true) { + SBXCHECK_GE(t.pc, 0); + SBXCHECK_LT(t.pc, bytecode_.length()); if (IsPcProcessed(t.pc)) return; MarkPcProcessed(t.pc); diff --git a/src/regexp/experimental/experimental.cc b/src/regexp/experimental/experimental.cc index 8bbf32265..64b3447f6 100644 --- a/src/regexp/experimental/experimental.cc +++ b/src/regexp/experimental/experimental.cc @@ -165,7 +165,7 @@ int32_t ExperimentalRegExp::ExecRaw(Isolate* isolate, int32_t* output_registers, int32_t output_register_count, int32_t subject_index) { - DCHECK(v8_flags.enable_experimental_regexp_engine); + CHECK(v8_flags.enable_experimental_regexp_engine); DisallowGarbageCollection no_gc; if (v8_flags.trace_experimental_regexp_engine) { @@ -262,7 +262,7 @@ int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate, int32_t* output_registers, int32_t output_register_count, int32_t subject_index) { - DCHECK(v8_flags.enable_experimental_regexp_engine_on_excessive_backtracks); + CHECK(v8_flags.enable_experimental_regexp_engine_on_excessive_backtracks); if (v8_flags.trace_experimental_regexp_engine) { StdoutStream{} << "Experimental execution (oneshot) of regexp " diff --git a/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/src/regexp/ia32/regexp-macro-assembler-ia32.cc index 70037dc16..3937e96c4 100644 --- a/src/regexp/ia32/regexp-macro-assembler-ia32.cc +++ b/src/regexp/ia32/regexp-macro-assembler-ia32.cc @@ -801,11 +801,13 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { ExternalReference::address_of_jslimit(isolate()); __ mov(eax, esp); __ sub(eax, StaticVariable(stack_limit)); + Immediate extra_space_for_variables(num_registers_ * kSystemPointerSize); + // Handle it if the stack pointer is already below the stack limit. __ j(below_equal, &stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ cmp(eax, num_registers_ * kSystemPointerSize); + __ cmp(eax, extra_space_for_variables); __ j(above_equal, &stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. @@ -814,7 +816,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ bind(&stack_limit_hit); __ push(backtrack_stackpointer()); - CallCheckStackGuardState(ebx); + CallCheckStackGuardState(ebx, extra_space_for_variables); __ pop(backtrack_stackpointer()); __ or_(eax, eax); // If returned value is non-zero, we exit with the returned value as result. @@ -1213,9 +1215,12 @@ void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) { // Private methods: -void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) { - static const int num_arguments = 3; +void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch, + Immediate extra_space) { + static const int num_arguments = 4; __ PrepareCallCFunction(num_arguments, scratch); + // Extra space for variables. + __ mov(Operand(esp, 3 * kSystemPointerSize), extra_space); // RegExp code frame pointer. __ mov(Operand(esp, 2 * kSystemPointerSize), ebp); // InstructionStream of self. @@ -1246,7 +1251,8 @@ static T* frame_entry_address(Address re_frame, int frame_offset) { int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame) { + Address re_frame, + uintptr_t extra_space) { InstructionStream re_code = InstructionStream::cast(Object(raw_code)); return NativeRegExpMacroAssembler::CheckStackGuardState( frame_entry(re_frame, kIsolateOffset), @@ -1256,10 +1262,10 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address, return_address, re_code, frame_entry_address
(re_frame, kInputStringOffset), frame_entry_address(re_frame, kInputStartOffset), - frame_entry_address(re_frame, kInputEndOffset)); + frame_entry_address(re_frame, kInputEndOffset), + extra_space); } - Operand RegExpMacroAssemblerIA32::register_location(int register_index) { DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { diff --git a/src/regexp/ia32/regexp-macro-assembler-ia32.h b/src/regexp/ia32/regexp-macro-assembler-ia32.h index 649c61d88..a33b687c8 100644 --- a/src/regexp/ia32/regexp-macro-assembler-ia32.h +++ b/src/regexp/ia32/regexp-macro-assembler-ia32.h @@ -89,7 +89,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32 // returning. // {raw_code} is an Address because this is called via ExternalReference. static int CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame); + Address re_frame, uintptr_t extra_space); private: Operand StaticVariable(const ExternalReference& ext); @@ -159,7 +159,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32 // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - void CallCheckStackGuardState(Register scratch); + void CallCheckStackGuardState(Register scratch, + Immediate extra_space = Immediate(0)); void CallIsCharacterInRangeArray(const ZoneList* ranges); // The ebp-relative location of a regexp register. diff --git a/src/regexp/regexp-compiler-tonode.cc b/src/regexp/regexp-compiler-tonode.cc index 3258bb514..44f611907 100644 --- a/src/regexp/regexp-compiler-tonode.cc +++ b/src/regexp/regexp-compiler-tonode.cc @@ -1190,6 +1190,8 @@ RegExpNode* RegExpLookaround::Builder::ForMatch(RegExpNode* match) { RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler, RegExpNode* on_success) { + compiler->ToNodeMaybeCheckForStackOverflow(); + int stack_pointer_register = compiler->AllocateRegister(); int position_register = compiler->AllocateRegister(); diff --git a/src/regexp/regexp-interpreter.cc b/src/regexp/regexp-interpreter.cc index 57eff39e7..a2c67b1f5 100644 --- a/src/regexp/regexp-interpreter.cc +++ b/src/regexp/regexp-interpreter.cc @@ -177,22 +177,30 @@ class InterpreterRegisters { int output_register_count) : registers_(total_register_count), output_registers_(output_registers), + total_register_count_(total_register_count), output_register_count_(output_register_count) { // TODO(jgruber): Use int32_t consistently for registers. Currently, CSA // uses int32_t while runtime uses int. static_assert(sizeof(int) == sizeof(int32_t)); - DCHECK_GE(output_register_count, 2); // At least 2 for the match itself. - DCHECK_GE(total_register_count, output_register_count); - DCHECK_LE(total_register_count, RegExpMacroAssembler::kMaxRegisterCount); + SBXCHECK_GE(output_register_count, 2); // At least 2 for the match itself. + SBXCHECK_GE(total_register_count, output_register_count); + SBXCHECK_LE(total_register_count, RegExpMacroAssembler::kMaxRegisterCount); DCHECK_NOT_NULL(output_registers); // Initialize the output register region to -1 signifying 'no match'. std::memset(registers_.data(), -1, output_register_count * sizeof(RegisterT)); + USE(total_register_count_); } - const RegisterT& operator[](size_t index) const { return registers_[index]; } - RegisterT& operator[](size_t index) { return registers_[index]; } + const RegisterT& operator[](size_t index) const { + SBXCHECK_LT(index, total_register_count_); + return registers_[index]; + } + RegisterT& operator[](size_t index) { + SBXCHECK_LT(index, total_register_count_); + return registers_[index]; + } void CopyToOutputRegisters() { MemCopy(output_registers_, registers_.data(), @@ -203,6 +211,7 @@ class InterpreterRegisters { static constexpr int kStaticCapacity = 64; // Arbitrary. base::SmallVector registers_; RegisterT* const output_registers_; + const int total_register_count_; const int output_register_count_; }; diff --git a/src/regexp/regexp-macro-assembler.cc b/src/regexp/regexp-macro-assembler.cc index 8a248aaed..5aff2b0fa 100644 --- a/src/regexp/regexp-macro-assembler.cc +++ b/src/regexp/regexp-macro-assembler.cc @@ -284,14 +284,14 @@ bool NativeRegExpMacroAssembler::CanReadUnaligned() const { int NativeRegExpMacroAssembler::CheckStackGuardState( Isolate* isolate, int start_index, RegExp::CallOrigin call_origin, Address* return_address, InstructionStream re_code, Address* subject, - const byte** input_start, const byte** input_end) { + const byte** input_start, const byte** input_end, uintptr_t gap) { DisallowGarbageCollection no_gc; Address old_pc = PointerAuthentication::AuthenticatePC(return_address, 0); DCHECK_LE(re_code.instruction_start(), old_pc); DCHECK_LE(old_pc, re_code.code(kAcquireLoad).instruction_end()); StackLimitCheck check(isolate); - bool js_has_overflowed = check.JsHasOverflowed(); + bool js_has_overflowed = check.JsHasOverflowed(gap); if (call_origin == RegExp::CallOrigin::kFromJs) { // Direct calls from JavaScript can be interrupted in two ways: diff --git a/src/regexp/regexp-macro-assembler.h b/src/regexp/regexp-macro-assembler.h index 2ba9e2d28..4d16f8a98 100644 --- a/src/regexp/regexp-macro-assembler.h +++ b/src/regexp/regexp-macro-assembler.h @@ -334,7 +334,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler { Address* return_address, InstructionStream re_code, Address* subject, const byte** input_start, - const byte** input_end); + const byte** input_end, uintptr_t gap); static Address word_character_map_address() { return reinterpret_cast
(&word_character_map[0]); diff --git a/src/regexp/regexp.cc b/src/regexp/regexp.cc index e6e7d140a..c8bd9bee1 100644 --- a/src/regexp/regexp.cc +++ b/src/regexp/regexp.cc @@ -1201,6 +1201,15 @@ int32_t* RegExpGlobalCache::FetchNext() { if (num_matches_ <= 0) { return nullptr; } + + // Number of matches can't exceed maximum matches. + // This check is enough to prevent OOB accesses to register_array_ in the + // else branch below, since current_match_index < num_matches_ in this + // branch, it follows that current_match_index < max_matches_. And since + // max_matches_ = register_array_size_ / registers_per_match it follows + // that current_match_index * registers_per_match_ < register_array_size_. + SBXCHECK_LE(num_matches_, max_matches_); + current_match_index_ = 0; return register_array_; } else { diff --git a/src/regexp/x64/regexp-macro-assembler-x64.cc b/src/regexp/x64/regexp-macro-assembler-x64.cc index 53b2f5ab5..5a535ee57 100644 --- a/src/regexp/x64/regexp-macro-assembler-x64.cc +++ b/src/regexp/x64/regexp-macro-assembler-x64.cc @@ -842,11 +842,13 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ movq(r9, rsp); __ Move(kScratchRegister, stack_limit); __ subq(r9, Operand(kScratchRegister, 0)); + Immediate extra_space_for_variables(num_registers_ * kSystemPointerSize); + // Handle it if the stack pointer is already below the stack limit. __ j(below_equal, &stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ cmpq(r9, Immediate(num_registers_ * kSystemPointerSize)); + __ cmpq(r9, extra_space_for_variables); __ j(above_equal, &stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. @@ -856,7 +858,8 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ bind(&stack_limit_hit); __ Move(code_object_pointer(), masm_.CodeObject()); __ pushq(backtrack_stackpointer()); - CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp. + // CallCheckStackGuardState preserves no registers beside rbp and rsp. + CallCheckStackGuardState(extra_space_for_variables); __ popq(backtrack_stackpointer()); __ testq(rax, rax); // If returned value is non-zero, we exit with the returned value as result. @@ -1266,35 +1269,38 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) { // Private methods: -void RegExpMacroAssemblerX64::CallCheckStackGuardState() { +void RegExpMacroAssemblerX64::CallCheckStackGuardState(Immediate extra_space) { // This function call preserves no register values. Caller should // store anything volatile in a C call or overwritten by this function. - static const int num_arguments = 3; + static const int num_arguments = 4; __ PrepareCallCFunction(num_arguments); #ifdef V8_TARGET_OS_WIN + // Fourth argument: Extra space for variables. + __ movq(arg_reg_4, extra_space); // Second argument: InstructionStream of self. (Do this before overwriting - // r8). - __ movq(rdx, code_object_pointer()); + // r8 (arg_reg_3)). + __ movq(arg_reg_2, code_object_pointer()); // Third argument: RegExp code frame pointer. - __ movq(r8, rbp); + __ movq(arg_reg_3, rbp); // First argument: Next address on the stack (will be address of // return address). - __ leaq(rcx, Operand(rsp, -kSystemPointerSize)); + __ leaq(arg_reg_1, Operand(rsp, -kSystemPointerSize)); #else + // Fourth argument: Extra space for variables. + __ movq(arg_reg_4, extra_space); // Third argument: RegExp code frame pointer. - __ movq(rdx, rbp); + __ movq(arg_reg_3, rbp); // Second argument: InstructionStream of self. - __ movq(rsi, code_object_pointer()); + __ movq(arg_reg_2, code_object_pointer()); // First argument: Next address on the stack (will be address of // return address). - __ leaq(rdi, Operand(rsp, -kSystemPointerSize)); + __ leaq(arg_reg_1, Operand(rsp, -kSystemPointerSize)); #endif ExternalReference stack_check = ExternalReference::re_check_stack_guard_state(); CallCFunctionFromIrregexpCode(stack_check, num_arguments); } - // Helper function for reading a value out of a stack frame. template static T& frame_entry(Address re_frame, int frame_offset) { @@ -1309,7 +1315,8 @@ static T* frame_entry_address(Address re_frame, int frame_offset) { int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame) { + Address re_frame, + uintptr_t extra_space) { InstructionStream re_code = InstructionStream::cast(Object(raw_code)); return NativeRegExpMacroAssembler::CheckStackGuardState( frame_entry(re_frame, kIsolateOffset), @@ -1319,10 +1326,10 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address, return_address, re_code, frame_entry_address
(re_frame, kInputStringOffset), frame_entry_address(re_frame, kInputStartOffset), - frame_entry_address(re_frame, kInputEndOffset)); + frame_entry_address(re_frame, kInputEndOffset), + extra_space); } - Operand RegExpMacroAssemblerX64::register_location(int register_index) { DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { diff --git a/src/regexp/x64/regexp-macro-assembler-x64.h b/src/regexp/x64/regexp-macro-assembler-x64.h index bfe8290a1..85dacfddf 100644 --- a/src/regexp/x64/regexp-macro-assembler-x64.h +++ b/src/regexp/x64/regexp-macro-assembler-x64.h @@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64 // returning. // {raw_code} is an Address because this is called via ExternalReference. static int CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame); + Address re_frame, uintptr_t extra_space); private: // Offsets from rbp of function parameters and stored registers. @@ -198,7 +198,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64 // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - void CallCheckStackGuardState(); + void CallCheckStackGuardState(Immediate extra_space = Immediate(0)); void CallIsCharacterInRangeArray(const ZoneList* ranges); // The rbp-relative location of a regexp register. diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc index c54debfed..dce33fe16 100644 --- a/src/runtime/runtime-internal.cc +++ b/src/runtime/runtime-internal.cc @@ -346,7 +346,23 @@ RUNTIME_FUNCTION(Runtime_StackGuard) { return isolate->StackOverflow(); } - return isolate->stack_guard()->HandleInterrupts(); + return isolate->stack_guard()->HandleInterrupts( + StackGuard::InterruptLevel::kAnyEffect); +} + +RUNTIME_FUNCTION(Runtime_HandleNoHeapWritesInterrupts) { + SealHandleScope shs(isolate); + DCHECK_EQ(0, args.length()); + TRACE_EVENT0("v8.execute", "V8.StackGuard"); + + // First check if this is a real stack overflow. + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { + return isolate->StackOverflow(); + } + + return isolate->stack_guard()->HandleInterrupts( + StackGuard::InterruptLevel::kNoHeapWrites); } RUNTIME_FUNCTION(Runtime_StackGuardWithGap) { @@ -361,7 +377,8 @@ RUNTIME_FUNCTION(Runtime_StackGuardWithGap) { return isolate->StackOverflow(); } - return isolate->stack_guard()->HandleInterrupts(); + return isolate->stack_guard()->HandleInterrupts( + StackGuard::InterruptLevel::kAnyEffect); } namespace { diff --git a/src/runtime/runtime-regexp.cc b/src/runtime/runtime-regexp.cc index 4399f4bb8..b561298aa 100644 --- a/src/runtime/runtime-regexp.cc +++ b/src/runtime/runtime-regexp.cc @@ -1155,7 +1155,20 @@ Handle ConstructNamedCaptureGroupsObject( Handle capture_value(f_get_capture(capture_ix), isolate); DCHECK(capture_value->IsUndefined(isolate) || capture_value->IsString()); - JSObject::AddProperty(isolate, groups, capture_name, capture_value, NONE); + LookupIterator it(isolate, groups, capture_name, groups, + LookupIterator::OWN_SKIP_INTERCEPTOR); + if (it.IsFound()) { + DCHECK(v8_flags.js_regexp_duplicate_named_groups); + if (!capture_value->IsUndefined(isolate)) { + DCHECK(IsUndefined(*it.GetDataValue(), isolate)); + CHECK(Object::SetDataProperty(&it, capture_value).ToChecked()); + } + } else { + CHECK(Object::AddDataProperty(&it, capture_value, NONE, + Just(ShouldThrow::kThrowOnError), + StoreOrigin::kNamed) + .IsJust()); + } } return groups; diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 9b625f7e1..a63325dbd 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -246,7 +246,8 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) { StackLimitCheck check(isolate); if (check.JsHasOverflowed()) return isolate->StackOverflow(); - return isolate->stack_guard()->HandleInterrupts(); + return isolate->stack_guard()->HandleInterrupts( + StackGuard::InterruptLevel::kAnyEffect); } RUNTIME_FUNCTION(Runtime_WasmCompileLazy) { diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index ab3ecfac9..e09d6205b 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -248,6 +248,7 @@ namespace internal { F(PerformMicrotaskCheckpoint, 0, 1) \ F(SharedValueBarrierSlow, 1, 1) \ F(StackGuard, 0, 1) \ + F(HandleNoHeapWritesInterrupts, 0, 1) \ F(StackGuardWithGap, 1, 1) \ F(Throw, 1, 1) \ F(ThrowApplyNonFunction, 1, 1) \ diff --git a/src/strings/string-stream.cc b/src/strings/string-stream.cc index 2c96c7cf9..618b138b9 100644 --- a/src/strings/string-stream.cc +++ b/src/strings/string-stream.cc @@ -416,7 +416,7 @@ void StringStream::PrintPrototype(JSFunction fun, Object receiver) { bool print_name = false; Isolate* isolate = fun.GetIsolate(); if (receiver.IsNullOrUndefined(isolate) || receiver.IsTheHole(isolate) || - receiver.IsJSProxy()) { + receiver.IsJSProxy() || receiver.IsWasmObject()) { print_name = true; } else if (!isolate->context().is_null()) { if (!receiver.IsJSObject()) { @@ -426,7 +426,7 @@ void StringStream::PrintPrototype(JSFunction fun, Object receiver) { for (PrototypeIterator iter(isolate, JSObject::cast(receiver), kStartAtReceiver); !iter.IsAtEnd(); iter.Advance()) { - if (iter.GetCurrent().IsJSProxy()) break; + if (!iter.GetCurrent().IsJSObject()) break; Object key = iter.GetCurrent().SlowReverseLookup(fun); if (!key.IsUndefined(isolate)) { if (!name.IsString() || !key.IsString() || diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 121581c76..5f739ebfc 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -342,9 +342,9 @@ void LiftoffAssembler::PatchPrepareStackFrame( // Emit the unconditional branch in the function prologue (from {offset} to // {pc_offset()}). patching_assembler.b((pc_offset() - offset) >> kInstrSizeLog2); -#ifdef V8_ENABLE_JIT_CODE_SIGN - patching_assembler.ReleaseJitCodeSigner(); -#endif + #ifdef V8_ENABLE_JIT_CODE_SIGN + patching_assembler.ReleaseJitCodeSigner(); + #endif // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc index 20a708623..e5e2083a8 100644 --- a/src/wasm/baseline/liftoff-assembler.cc +++ b/src/wasm/baseline/liftoff-assembler.cc @@ -765,29 +765,10 @@ void LiftoffAssembler::DropExceptionValueAtOffset(int offset) { cache_state_.stack_state.pop_back(); } -void LiftoffAssembler::PrepareLoopArgs(int num) { - for (int i = 0; i < num; ++i) { - VarState& slot = cache_state_.stack_state.end()[-1 - i]; - if (slot.is_stack()) continue; - RegClass rc = reg_class_for(slot.kind()); - if (slot.is_reg()) { - if (cache_state_.get_use_count(slot.reg()) > 1) { - // If the register is used more than once, we cannot use it for the - // merge. Move it to an unused register instead. - LiftoffRegList pinned; - pinned.set(slot.reg()); - LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned); - Move(dst_reg, slot.reg(), slot.kind()); - cache_state_.dec_used(slot.reg()); - cache_state_.inc_used(dst_reg); - slot.MakeRegister(dst_reg); - } - continue; - } - LiftoffRegister reg = GetUnusedRegister(rc, {}); - LoadConstant(reg, slot.constant()); - slot.MakeRegister(reg); - cache_state_.inc_used(reg); +void LiftoffAssembler::SpillLoopArgs(int num) { + for (VarState& slot : + base::VectorOf(cache_state_.stack_state.end() - num, num)) { + Spill(&slot); } } @@ -979,14 +960,14 @@ void LiftoffAssembler::Spill(VarState* slot) { } void LiftoffAssembler::SpillLocals() { - for (uint32_t i = 0; i < num_locals_; ++i) { - Spill(&cache_state_.stack_state[i]); + for (VarState& local_slot : + base::VectorOf(cache_state_.stack_state.data(), num_locals_)) { + Spill(&local_slot); } } void LiftoffAssembler::SpillAllRegisters() { - for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) { - auto& slot = cache_state_.stack_state[i]; + for (VarState& slot : cache_state_.stack_state) { if (!slot.is_reg()) continue; Spill(slot.offset(), slot.reg(), slot.kind()); slot.MakeStack(); diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index d2258af7f..a78df8c7b 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -549,9 +549,9 @@ class LiftoffAssembler : public MacroAssembler { // the bottom of the stack. void DropExceptionValueAtOffset(int offset); - // Ensure that the loop inputs are either in a register or spilled to the - // stack, so that we can merge different values on the back-edge. - void PrepareLoopArgs(int num); + // Spill all loop inputs to the stack to free registers and to ensure that we + // can merge different values on the back-edge. + void SpillLoopArgs(int num); V8_INLINE static int NextSpillOffset(ValueKind kind, int top_spill_offset) { int offset = top_spill_offset + SlotSizeForType(kind); diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index e58aeedf4..3160eae66 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -1262,7 +1262,7 @@ class LiftoffCompiler { // pre-analysis of the function. __ SpillLocals(); - __ PrepareLoopArgs(loop->start_merge.arity); + __ SpillLoopArgs(loop->start_merge.arity); // Loop labels bind at the beginning of the block. __ bind(loop->label.get()); @@ -1882,7 +1882,7 @@ class LiftoffCompiler { } case kExprExternExternalize: { LiftoffRegList pinned; - LiftoffRegister ref = pinned.set(__ PopToRegister(pinned)); + LiftoffRegister ref = pinned.set(__ PopToModifiableRegister(pinned)); LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned); LoadNullValueForCompare(null.gp(), pinned, kWasmAnyRef); Label label; diff --git a/src/wasm/canonical-types.cc b/src/wasm/canonical-types.cc index 253e88852..a4cf1cd2b 100644 --- a/src/wasm/canonical-types.cc +++ b/src/wasm/canonical-types.cc @@ -15,11 +15,21 @@ TypeCanonicalizer* GetTypeCanonicalizer() { return GetWasmEngine()->type_canonicalizer(); } -// We currently store canonical indices in {ValueType} instances, so they -// must fit into the range of valid module-relative (non-canonical type -// indices. -// TODO(jkummerow): Raise this limit, to make long-lived WasmEngines scale -// better. Plan: stop constructing ValueTypes from canonical type indices. +// Inside the TypeCanonicalizer, we use ValueType instances constructed +// from canonical type indices, so we can't let them get bigger than what +// we have storage space for. Code outside the TypeCanonicalizer already +// supports up to Smi range for canonical type indices. +// TODO(jkummerow): Raise this limit. Possible options: +// - increase the size of ValueType::HeapTypeField, using currently-unused bits. +// - change the encoding of ValueType: one bit says whether it's a ref type, +// the other bits then encode the index or the kind of non-ref type. +// - refactor the TypeCanonicalizer's internals to no longer use ValueTypes +// and related infrastructure, and use a wider encoding of canonicalized +// type indices only here. +// - wait for 32-bit platforms to no longer be relevant, and increase the +// size of ValueType to 64 bits. +// None of this seems urgent, as we have no evidence of the current limit +// being an actual limitation in practice. static constexpr size_t kMaxCanonicalTypes = kV8MaxWasmTypes; void TypeCanonicalizer::CheckMaxCanonicalIndex() const { @@ -101,7 +111,7 @@ uint32_t TypeCanonicalizer::AddRecursiveGroup(const FunctionSig* sig) { group.types[0].is_relative_supertype = false; canonical_groups_.emplace(group, canonical_index); canonical_supertypes_.emplace_back(kNoSuperType); - DCHECK_LE(canonical_supertypes_.size(), kMaxCanonicalTypes); + CheckMaxCanonicalIndex(); } return canonical_index; } @@ -110,6 +120,7 @@ ValueType TypeCanonicalizer::CanonicalizeValueType( const WasmModule* module, ValueType type, uint32_t recursive_group_start) const { if (!type.has_index()) return type; + static_assert(kMaxCanonicalTypes <= (1u << ValueType::kHeapTypeBits)); return type.ref_index() >= recursive_group_start ? ValueType::CanonicalWithRelativeIndex( type.kind(), type.ref_index() - recursive_group_start) diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index f93a66bc3..852964612 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -3113,7 +3113,7 @@ class WasmFullDecoder : public WasmDecoder { } DECODE(BrOnNonNull) { - CHECK_PROTOTYPE_OPCODE(gc); + CHECK_PROTOTYPE_OPCODE(typed_funcref); BranchDepthImmediate imm(this, this->pc_ + 1, validate); if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0; Value ref_object = Peek(0); @@ -3693,6 +3693,11 @@ class WasmFullDecoder : public WasmDecoder { CHECK_PROTOTYPE_OPCODE(return_call); SigIndexImmediate imm(this, this->pc_ + 1, validate); if (!this->Validate(this->pc_ + 1, imm)) return 0; + if (!VALIDATE(this->CanReturnCall(imm.sig))) { + this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCallRef), + "tail call return types mismatch"); + return 0; + } Value func_ref = Peek(0, 0, ValueType::RefNull(imm.index)); ArgVector args = PeekArgs(imm.sig, 1); CALL_INTERFACE_IF_OK_AND_REACHABLE(ReturnCallRef, func_ref, imm.sig, @@ -4693,6 +4698,15 @@ class WasmFullDecoder : public WasmDecoder { if (!this->ValidateElementSegment(elem_index_pc, elem_segment)) { return 0; } + ValueType segment_type = + this->module_->elem_segments[elem_segment.index].type; + if (!VALIDATE(IsSubtypeOf(segment_type, element_type, this->module_))) { + this->DecodeError( + "array.init_elem: segment type %s is not a subtype of array " + "element type %s", + segment_type.name().c_str(), element_type.name().c_str()); + return 0; + } Value array = Peek(3, 0, ValueType::RefNull(array_imm.index)); Value array_index = Peek(2, 1, kWasmI32); diff --git a/src/wasm/value-type.h b/src/wasm/value-type.h index a977e044c..ec8145578 100644 --- a/src/wasm/value-type.h +++ b/src/wasm/value-type.h @@ -395,6 +395,7 @@ class ValueType { static constexpr ValueType FromIndex(ValueKind kind, uint32_t index) { DCHECK(kind == kRefNull || kind == kRef || kind == kRtt); + CHECK_LT(index, kV8MaxWasmTypes); return ValueType(KindField::encode(kind) | HeapTypeField::encode(index)); } diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc index bf11ef74f..c7f9d43b2 100644 --- a/src/wasm/wasm-js.cc +++ b/src/wasm/wasm-js.cc @@ -761,7 +761,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo& info) { } v8::ReturnValue return_value = info.GetReturnValue(); - return_value.Set(Utils::ToLocal(i::Handle::cast(module_obj))); + return_value.Set(Utils::ToLocal(module_obj)); } // WebAssembly.Module.imports(module) -> Array diff --git a/src/wasm/wasm-objects.cc b/src/wasm/wasm-objects.cc index be3e3a33f..897d33098 100644 --- a/src/wasm/wasm-objects.cc +++ b/src/wasm/wasm-objects.cc @@ -91,7 +91,7 @@ Handle WasmModuleObject::ExtractUtf8StringFromModuleBytes( base::Vector name_vec = wire_bytes.SubVector(ref.offset(), ref.end_offset()); // UTF8 validation happens at decode time. - DCHECK(unibrow::Utf8::ValidateEncoding(name_vec.begin(), name_vec.length())); + SBXCHECK(unibrow::Utf8::ValidateEncoding(name_vec.begin(), name_vec.length())); auto* factory = isolate->factory(); return internalize ? factory->InternalizeUtf8String( @@ -112,7 +112,7 @@ MaybeHandle WasmModuleObject::GetModuleNameOrNull( MaybeHandle WasmModuleObject::GetFunctionNameOrNull( Isolate* isolate, Handle module_object, uint32_t func_index) { - DCHECK_LT(func_index, module_object->module()->functions.size()); + SBXCHECK_LT(func_index, module_object->module()->functions.size()); wasm::WireBytesRef name = module_object->module()->lazily_generated_names.LookupFunctionName( wasm::ModuleWireBytes(module_object->native_module()->wire_bytes()), @@ -127,7 +127,7 @@ base::Vector WasmModuleObject::GetRawFunctionName( if (func_index == wasm::kAnonymousFuncIndex) { return base::Vector({nullptr, 0}); } - DCHECK_GT(module()->functions.size(), func_index); + SBXCHECK_GT(module()->functions.size(), func_index); wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes()); wasm::WireBytesRef name_ref = module()->lazily_generated_names.LookupFunctionName(wire_bytes, @@ -179,7 +179,7 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate, int table_index) { Handle dispatch_tables(table_obj->dispatch_tables(), isolate); int old_length = dispatch_tables->length(); - DCHECK_EQ(0, old_length % kDispatchTableNumElements); + SBXCHECK_EQ(0, old_length % kDispatchTableNumElements); if (instance.is_null()) return; // TODO(titzer): use weak cells here to avoid leaking instances. @@ -208,13 +208,13 @@ int WasmTableObject::Grow(Isolate* isolate, Handle table, max_size = v8_flags.wasm_max_table_size; } max_size = std::min(max_size, v8_flags.wasm_max_table_size.value()); - DCHECK_LE(old_size, max_size); + SBXCHECK_LE(old_size, max_size); if (max_size - old_size < count) return -1; uint32_t new_size = old_size + count; // Even with 2x over-allocation, there should not be an integer overflow. static_assert(wasm::kV8MaxWasmTableSize <= kMaxInt / 2); - DCHECK_GE(kMaxInt, new_size); + SBXCHECK_GE(kMaxInt, new_size); int old_capacity = table->entries().length(); if (new_size > static_cast(old_capacity)) { int grow = static_cast(new_size) - old_capacity; @@ -229,7 +229,7 @@ int WasmTableObject::Grow(Isolate* isolate, Handle table, table->set_current_length(new_size); Handle dispatch_tables(table->dispatch_tables(), isolate); - DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); // Tables are stored in the instance object, no code patching is // necessary. We simply have to grow the raw tables in each instance // that has imported this table. @@ -244,7 +244,7 @@ int WasmTableObject::Grow(Isolate* isolate, Handle table, Handle instance( WasmInstanceObject::cast(dispatch_tables->get(i)), isolate); - DCHECK_EQ(old_size, + SBXCHECK_EQ(old_size, instance->GetIndirectFunctionTable(isolate, table_index)->size()); WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize( instance, table_index, new_size); @@ -264,7 +264,7 @@ MaybeHandle WasmTableObject::JSToWasmElement( Isolate* isolate, Handle table, Handle entry, const char** error_message) { // Any `entry` has to be in its JS representation. - DCHECK(!entry->IsWasmInternalFunction()); + SBXCHECK(!entry->IsWasmInternalFunction()); const WasmModule* module = !table->instance().IsUndefined() ? WasmInstanceObject::cast(table->instance()).module() @@ -278,7 +278,7 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate, Handle entries, int entry_index, Handle entry) { - if (entry->IsWasmNull(isolate)) { + if (entry->IsWasmNull(isolate) || entry->IsNull(isolate)) { ClearDispatchTables(isolate, table, entry_index); // Degenerate case. entries->set(entry_index, ReadOnlyRoots(isolate).wasm_null()); return; @@ -291,14 +291,16 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate, Handle target_instance(exported_function->instance(), isolate); int func_index = exported_function->function_index(); - auto* wasm_function = &target_instance->module()->functions[func_index]; + const WasmModule* module = target_instance->module(); + SBXCHECK_LT(func_index, module->functions.size()); + auto* wasm_function = module->functions.data() + func_index; UpdateDispatchTables(isolate, *table, entry_index, wasm_function, *target_instance); } else if (WasmJSFunction::IsWasmJSFunction(*external)) { UpdateDispatchTables(isolate, table, entry_index, Handle::cast(external)); } else { - DCHECK(WasmCapiFunction::IsWasmCapiFunction(*external)); + SBXCHECK(WasmCapiFunction::IsWasmCapiFunction(*external)); UpdateDispatchTables(isolate, table, entry_index, Handle::cast(external)); } @@ -309,7 +311,7 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate, void WasmTableObject::Set(Isolate* isolate, Handle table, uint32_t index, Handle entry) { // Callers need to perform bounds checks, type check, and error handling. - DCHECK(table->is_in_bounds(index)); + SBXCHECK(table->is_in_bounds(index)); Handle entries(table->entries(), isolate); // The FixedArray is addressed with int's. @@ -337,7 +339,7 @@ void WasmTableObject::Set(Isolate* isolate, Handle table, case wasm::HeapType::kBottom: UNREACHABLE(); default: - DCHECK(!table->instance().IsUndefined()); + SBXCHECK(!table->instance().IsUndefined()); if (WasmInstanceObject::cast(table->instance()) .module() ->has_signature(table->type().ref_index())) { @@ -354,7 +356,7 @@ Handle WasmTableObject::Get(Isolate* isolate, uint32_t index) { Handle entries(table->entries(), isolate); // Callers need to perform bounds checks and error handling. - DCHECK(table->is_in_bounds(index)); + SBXCHECK(table->is_in_bounds(index)); // The FixedArray is addressed with int's. int entry_index = static_cast(index); @@ -386,14 +388,14 @@ Handle WasmTableObject::Get(Isolate* isolate, case wasm::HeapType::kBottom: UNREACHABLE(); default: - DCHECK(!table->instance().IsUndefined()); + SBXCHECK(!table->instance().IsUndefined()); const WasmModule* module = WasmInstanceObject::cast(table->instance()).module(); if (module->has_array(table->type().ref_index()) || module->has_struct(table->type().ref_index())) { return entry; } - DCHECK(module->has_signature(table->type().ref_index())); + SBXCHECK(module->has_signature(table->type().ref_index())); if (entry->IsWasmInternalFunction()) return entry; break; } @@ -417,9 +419,9 @@ void WasmTableObject::Fill(Isolate* isolate, Handle table, uint32_t start, Handle entry, uint32_t count) { // Bounds checks must be done by the caller. - DCHECK_LE(start, table->current_length()); - DCHECK_LE(count, table->current_length()); - DCHECK_LE(start + count, table->current_length()); + SBXCHECK_LE(start, table->current_length()); + SBXCHECK_LE(count, table->current_length()); + SBXCHECK_LE(start + count, table->current_length()); for (uint32_t i = 0; i < count; i++) { WasmTableObject::Set(isolate, table, start + i, entry); @@ -437,7 +439,7 @@ void WasmTableObject::UpdateDispatchTables(Isolate* isolate, // We simply need to update the IFTs for each instance that imports // this table. FixedArray dispatch_tables = table.dispatch_tables(); - DCHECK_EQ(0, dispatch_tables.length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables.length() % kDispatchTableNumElements); Object call_ref = func->imported @@ -469,11 +471,10 @@ void WasmTableObject::UpdateDispatchTables(Isolate* isolate, Handle table, int entry_index, Handle function) { - // We simply need to update the IFTs for each instance that imports - // this table. Handle dispatch_tables(table->dispatch_tables(), isolate); - DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + // Update the dispatch table for each instance that imports this table. for (int i = 0; i < dispatch_tables->length(); i += kDispatchTableNumElements) { int table_index = @@ -494,7 +495,7 @@ void WasmTableObject::UpdateDispatchTables( // We simply need to update the IFTs for each instance that imports // this table. Handle dispatch_tables(table->dispatch_tables(), isolate); - DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); // Reconstruct signature. // TODO(jkummerow): Unify with "SignatureHelper" in c-api.cc. @@ -514,6 +515,7 @@ void WasmTableObject::UpdateDispatchTables( int param_count = total_count - result_count; wasm::FunctionSig sig(result_count, param_count, reps.get()); + // Update the dispatch table for each instance that imports this table. for (int i = 0; i < dispatch_tables->length(); i += kDispatchTableNumElements) { int table_index = @@ -556,7 +558,7 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate, Handle table, int index) { Handle dispatch_tables(table->dispatch_tables(), isolate); - DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); + SBXCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements); for (int i = 0; i < dispatch_tables->length(); i += kDispatchTableNumElements) { int table_index = @@ -567,7 +569,7 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate, isolate); Handle function_table = target_instance->GetIndirectFunctionTable(isolate, table_index); - DCHECK_LT(index, function_table->size()); + SBXCHECK_LT(index, function_table->size()); function_table->Clear(index); } } @@ -591,8 +593,8 @@ void WasmTableObject::GetFunctionTableEntry( int entry_index, bool* is_valid, bool* is_null, MaybeHandle* instance, int* function_index, MaybeHandle* maybe_js_function) { - DCHECK(wasm::IsSubtypeOf(table->type(), wasm::kWasmFuncRef, module)); - DCHECK_LT(entry_index, table->current_length()); + SBXCHECK(wasm::IsSubtypeOf(table->type(), wasm::kWasmFuncRef, module)); + SBXCHECK_LT(entry_index, table->current_length()); // We initialize {is_valid} with {true}. We may change it later. *is_valid = true; Handle element(table->entries().get(entry_index), isolate); @@ -846,7 +848,7 @@ void WasmMemoryObject::update_instances(Isolate* isolate, WasmInstanceObject::cast(heap_object), isolate); SetInstanceMemory(instance, buffer); } else { - DCHECK(elem->IsCleared()); + SBXCHECK(elem->IsCleared()); } } } @@ -872,7 +874,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, // {GrowWasmMemoryInPlace} because memory is never allocated with more // capacity than that limit. size_t old_size = old_buffer->byte_length(); - DCHECK_EQ(0, old_size % wasm::kWasmPageSize); + SBXCHECK_EQ(0, old_size % wasm::kWasmPageSize); size_t old_pages = old_size / wasm::kWasmPageSize; size_t max_pages = memory_object->is_memory64() ? wasm::max_mem64_pages() : wasm::max_mem32_pages(); @@ -880,7 +882,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, max_pages = std::min(max_pages, static_cast(memory_object->maximum_pages())); } - DCHECK_GE(max_pages, old_pages); + SBXCHECK_GE(max_pages, old_pages); if (pages > max_pages - old_pages) return -1; base::Optional result_inplace = @@ -927,12 +929,12 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, Handle symbol = isolate->factory()->array_buffer_wasm_memory_symbol(); JSObject::SetProperty(isolate, new_buffer, symbol, memory_object).Check(); - DCHECK_EQ(result_inplace.value(), old_pages); + SBXCHECK_EQ(result_inplace.value(), old_pages); return static_cast(result_inplace.value()); // success } size_t new_pages = old_pages + pages; - DCHECK_LT(old_pages, new_pages); + SBXCHECK_LT(old_pages, new_pages); // Try allocating a new backing store and copying. // To avoid overall quadratic complexity of many small grow operations, we // grow by at least 0.5 MB + 12.5% of the existing memory size. @@ -943,7 +945,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, // {min_growth} can be bigger than {max_pages}, and in that case we want to // cap to {max_pages}. size_t new_capacity = std::min(max_pages, std::max(new_pages, min_growth)); - DCHECK_LT(old_pages, new_capacity); + SBXCHECK_LT(old_pages, new_capacity); std::unique_ptr new_backing_store = backing_store->CopyWasmMemory(isolate, new_pages, new_capacity, memory_object->is_memory64() @@ -989,7 +991,7 @@ MaybeHandle WasmGlobalObject::New( } if (type.is_reference()) { - DCHECK(maybe_untagged_buffer.is_null()); + SBXCHECK(maybe_untagged_buffer.is_null()); Handle tagged_buffer; if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) { // If no buffer was provided, create one. @@ -999,7 +1001,7 @@ MaybeHandle WasmGlobalObject::New( } global_obj->set_tagged_buffer(*tagged_buffer); } else { - DCHECK(maybe_tagged_buffer.is_null()); + SBXCHECK(maybe_tagged_buffer.is_null()); uint32_t type_size = type.value_kind_size(); Handle untagged_buffer; @@ -1044,7 +1046,7 @@ void ImportedFunctionEntry::SetWasmToJs( ", target=%p}\n", instance_->ptr(), index_, callable->ptr(), wasm_to_js_wrapper->instructions().begin()); - DCHECK(wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToJsWrapper || + SBXCHECK(wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToJsWrapper || wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToCapiWrapper); Handle ref = isolate->factory()->NewWasmApiFunctionRef(callable, suspend, instance_); @@ -1090,7 +1092,7 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize( Handle instance, int table_index, uint32_t minimum_size) { Isolate* isolate = instance->GetIsolate(); - DCHECK_LT(table_index, instance->indirect_function_tables().length()); + SBXCHECK_LT(table_index, instance->indirect_function_tables().length()); Handle table = instance->GetIndirectFunctionTable(isolate, table_index); WasmIndirectFunctionTable::Resize(isolate, table, minimum_size); @@ -1217,7 +1219,7 @@ void WasmInstanceObject::InitDataSegmentArrays( // since they cannot be used (since the validator checks that number of // declared data segments when validating the memory.init and memory.drop // instructions). - DCHECK(num_data_segments == 0 || + SBXCHECK(num_data_segments == 0 || num_data_segments == module->data_segments.size()); for (uint32_t i = 0; i < num_data_segments; ++i) { const wasm::WasmDataSegment& segment = module->data_segments[i]; @@ -1245,7 +1247,7 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) { Handle WasmInstanceObject::GetIndirectFunctionTable( Isolate* isolate, uint32_t table_index) { - DCHECK_LT(table_index, indirect_function_tables().length()); + SBXCHECK_LT(table_index, indirect_function_tables().length()); return handle(WasmIndirectFunctionTable::cast( indirect_function_tables().get(table_index)), isolate); @@ -1478,7 +1480,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable( wasm::WasmImportData resolved(callable, sig, canonical_sig_index); wasm::ImportCallKind kind = resolved.kind(); callable = resolved.callable(); // Update to ultimate target. - DCHECK_NE(wasm::ImportCallKind::kLinkError, kind); + SBXCHECK_NE(wasm::ImportCallKind::kLinkError, kind); wasm::CompilationEnv env = native_module->CreateCompilationEnv(); // {expected_arity} should only be used if kind != kJSFunctionArityMismatch. int expected_arity = -1; @@ -1518,7 +1520,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable( // static uint8_t* WasmInstanceObject::GetGlobalStorage( Handle instance, const wasm::WasmGlobal& global) { - DCHECK(!global.type.is_reference()); + SBXCHECK(!global.type.is_reference()); if (global.mutability && global.imported) { return reinterpret_cast( instance->imported_mutable_globals().get_sandboxed_pointer( @@ -1532,7 +1534,7 @@ uint8_t* WasmInstanceObject::GetGlobalStorage( std::pair, uint32_t> WasmInstanceObject::GetGlobalBufferAndIndex(Handle instance, const wasm::WasmGlobal& global) { - DCHECK(global.type.is_reference()); + SBXCHECK(global.type.is_reference()); Isolate* isolate = instance->GetIsolate(); if (global.mutability && global.imported) { Handle buffer( @@ -1541,7 +1543,7 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle instance, isolate); Address idx = instance->imported_mutable_globals().get_int( global.index * kSystemPointerSize); - DCHECK_LE(idx, std::numeric_limits::max()); + SBXCHECK_LE(idx, std::numeric_limits::max()); return {buffer, static_cast(idx)}; } return {handle(instance->tagged_globals_buffer(), isolate), global.offset}; @@ -1627,7 +1629,7 @@ wasm::WasmValue WasmArray::GetElement(uint32_t index) { void WasmArray::SetTaggedElement(uint32_t index, Handle value, WriteBarrierMode mode) { - DCHECK(type()->element_type().is_reference()); + SBXCHECK(type()->element_type().is_reference()); TaggedField::store(*this, element_offset(index), *value); CONDITIONAL_WRITE_BARRIER(*this, element_offset(index), *value, mode); } @@ -1641,8 +1643,8 @@ Handle WasmTagObject::New(Isolate* isolate, isolate); // Serialize the signature. - DCHECK_EQ(0, sig->return_count()); - DCHECK_LE(sig->parameter_count(), std::numeric_limits::max()); + SBXCHECK_EQ(0, sig->return_count()); + SBXCHECK_LE(sig->parameter_count(), std::numeric_limits::max()); int sig_size = static_cast(sig->parameter_count()); Handle> serialized_sig = PodArray::New(isolate, sig_size, AllocationType::kOld); @@ -1749,7 +1751,7 @@ Handle WasmExceptionPackage::GetExceptionValues( isolate, exception_package, isolate->factory()->wasm_exception_values_symbol()) .ToHandle(&values)) { - DCHECK_IMPLIES(!values->IsUndefined(), values->IsFixedArray()); + CHECK_IMPLIES(!values->IsUndefined(), values->IsFixedArray()); return values; } return ReadOnlyRoots(isolate).undefined_value_handle(); @@ -1859,8 +1861,8 @@ constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2; size_t ComputeEncodedElementSize(wasm::ValueType type) { size_t byte_size = type.value_kind_size(); - DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0); - DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement); + SBXCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0); + SBXCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement); return byte_size / kBytesPerExceptionValuesArrayElement; } @@ -1916,7 +1918,7 @@ bool WasmExportedFunction::IsWasmExportedFunction(Object object) { code.builtin_id() != Builtin::kWasmReturnPromiseOnSuspend) { return false; } - DCHECK(js_function.shared().HasWasmExportedFunctionData()); + SBXCHECK(js_function.shared().HasWasmExportedFunctionData()); return true; } @@ -1928,7 +1930,7 @@ bool WasmCapiFunction::IsWasmCapiFunction(Object object) { // if (js_function->code()->kind() != CodeKind::WASM_TO_CAPI_FUNCTION) { // return false; // } - // DCHECK(js_function->shared()->HasWasmCapiFunctionData()); + // SBXCHECK(js_function->shared()->HasWasmCapiFunctionData()); // return true; return js_function.shared().HasWasmCapiFunctionData(); } @@ -1970,7 +1972,7 @@ Handle WasmExportedFunction::New( Isolate* isolate, Handle instance, Handle internal, int func_index, int arity, Handle export_wrapper) { - DCHECK( + SBXCHECK( CodeKind::JS_TO_WASM_FUNCTION == export_wrapper->kind() || (export_wrapper->is_builtin() && (export_wrapper->builtin_id() == Builtin::kGenericJSToWasmWrapper || @@ -2031,7 +2033,7 @@ Handle WasmExportedFunction::New( // According to the spec, exported functions should not have a [[Construct]] // method. This does not apply to functions exported from asm.js however. - DCHECK_EQ(is_asm_js_module, js_function->IsConstructor()); + SBXCHECK_EQ(is_asm_js_module, js_function->IsConstructor()); shared->set_length(arity); shared->set_internal_formal_parameter_count(JSParameterCount(arity)); shared->set_script(instance->module_object().script()); @@ -2077,7 +2079,7 @@ Handle WasmJSFunction::New(Isolate* isolate, const wasm::FunctionSig* sig, Handle callable, wasm::Suspend suspend) { - DCHECK_LE(sig->all().size(), kMaxInt); + SBXCHECK_LE(sig->all().size(), kMaxInt); int sig_size = static_cast(sig->all().size()); int return_count = static_cast(sig->return_count()); int parameter_count = static_cast(sig->parameter_count()); @@ -2269,7 +2271,7 @@ namespace wasm { MaybeHandle JSToWasmObject(Isolate* isolate, Handle value, ValueType expected_canonical, const char** error_message) { - DCHECK(expected_canonical.is_object_reference()); + SBXCHECK(expected_canonical.is_object_reference()); if (expected_canonical.kind() == kRefNull && value->IsNull(isolate)) { switch (expected_canonical.heap_representation()) { case HeapType::kStringViewWtf8: @@ -2466,7 +2468,7 @@ MaybeHandle WasmToJSObject(Isolate* isolate, Handle value, if (value->IsWasmNull()) { return isolate->factory()->null_value(); } else { - DCHECK(value->IsWasmInternalFunction()); + SBXCHECK(value->IsWasmInternalFunction()); return i::WasmInternalFunction::GetOrCreateExternal( i::Handle::cast(value)); } diff --git a/src/wasm/wasm-serialization.cc b/src/wasm/wasm-serialization.cc index de5b7a274..d6e01f0f9 100644 --- a/src/wasm/wasm-serialization.cc +++ b/src/wasm/wasm-serialization.cc @@ -153,7 +153,7 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) { DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); #ifdef V8_ENABLE_JIT_CODE_SIGN instr->SetBranchImmTarget( - reinterpret_cast(rinfo->pc() + tag * kInstrSize), nullptr); + reinterpret_cast(rinfo->pc() + tag * kInstrSize), nullptr); #else instr->SetBranchImmTarget( reinterpret_cast(rinfo->pc() + tag * kInstrSize)); @@ -997,4 +997,4 @@ MaybeHandle DeserializeNativeModule( } // namespace wasm } // namespace internal -} // namespace v8 \ No newline at end of file +} // namespace v8 diff --git a/test/mjsunit/compiler/bigint-shift-left.js b/test/mjsunit/compiler/bigint-shift-left.js index 1f17eeba6..2afce4b23 100644 --- a/test/mjsunit/compiler/bigint-shift-left.js +++ b/test/mjsunit/compiler/bigint-shift-left.js @@ -87,7 +87,7 @@ })(); (function OptimizeAndTest() { - const bi = 2n ** 64n; + const bi = 2n ** 62n; function ShiftLeftByLarge(a) { return BigInt.asIntN(62, a << bi); } diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index 5408bddf5..b05769345 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -1634,6 +1634,7 @@ 'regress/wasm/regress-1417516': [SKIP], 'regress/wasm/regress-13732': [SKIP], 'regress/wasm/regress-1408337': [SKIP], + 'regress/wasm/regress-343748812': [SKIP], 'regress/wasm/regress-crbug-1338980': [SKIP], 'regress/wasm/regress-crbug-1355070': [SKIP], 'regress/wasm/regress-crbug-1356718': [SKIP], diff --git a/test/unittests/compiler/common-operator-reducer-unittest.cc b/test/unittests/compiler/common-operator-reducer-unittest.cc index c76423a17..fb375507a 100644 --- a/test/unittests/compiler/common-operator-reducer-unittest.cc +++ b/test/unittests/compiler/common-operator-reducer-unittest.cc @@ -377,6 +377,7 @@ TEST_F(CommonOperatorReducerTest, ReturnWithPhiAndEffectPhiAndMerge) { graph()->SetEnd(graph()->NewNode(common()->End(1), ret)); StrictMock editor; EXPECT_CALL(editor, Replace(merge, IsDead())); + EXPECT_CALL(editor, Revisit(graph()->end())).Times(2); Reduction const r = Reduce(&editor, ret, BranchSemantics::kJS); ASSERT_TRUE(r.Changed()); EXPECT_THAT(r.replacement(), IsDead()); diff --git a/test/unittests/interpreter/bytecode-generator-unittest.cc b/test/unittests/interpreter/bytecode-generator-unittest.cc index 55315b2db..14e4b28c0 100644 --- a/test/unittests/interpreter/bytecode-generator-unittest.cc +++ b/test/unittests/interpreter/bytecode-generator-unittest.cc @@ -3237,6 +3237,10 @@ TEST_F(BytecodeGeneratorTest, ElideRedundantHoleChecks) { "do { x; } while (y);\n" "x; y;\n", + // do-while with break + "do { x; break; } while (y);\n" + "x; y;\n", + // C-style for "for (x; y; z) { w; }\n" "x; y; z; w;\n", diff --git a/test/unittests/interpreter/bytecode_expectations/ElideRedundantHoleChecks.golden b/test/unittests/interpreter/bytecode_expectations/ElideRedundantHoleChecks.golden index 2aeaf6f4a..8d71dbb36 100644 --- a/test/unittests/interpreter/bytecode_expectations/ElideRedundantHoleChecks.golden +++ b/test/unittests/interpreter/bytecode_expectations/ElideRedundantHoleChecks.golden @@ -176,6 +176,38 @@ constant pool: [ handlers: [ ] +--- +snippet: " + { + f = function f(a) { + do { x; break; } while (y); + x; y; + } + let w, x, y, z; + f(); + } +" +frame size: 0 +parameter count: 2 +bytecode array length: 16 +bytecodes: [ + /* 29 S> */ B(LdaImmutableCurrentContextSlot), U8(2), + B(ThrowReferenceErrorIfHole), U8(0), + /* 32 S> */ B(Jump), U8(2), + /* 52 S> */ B(LdaImmutableCurrentContextSlot), U8(2), + B(ThrowReferenceErrorIfHole), U8(0), + /* 55 S> */ B(LdaImmutableCurrentContextSlot), U8(3), + B(ThrowReferenceErrorIfHole), U8(1), + B(LdaUndefined), + /* 60 S> */ B(Return), +] +constant pool: [ + ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"], + ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"], +] +handlers: [ +] + --- snippet: " { diff --git a/test/unittests/wasm/function-body-decoder-unittest.cc b/test/unittests/wasm/function-body-decoder-unittest.cc index ac3de2664..b1327223b 100644 --- a/test/unittests/wasm/function-body-decoder-unittest.cc +++ b/test/unittests/wasm/function-body-decoder-unittest.cc @@ -1128,7 +1128,7 @@ TEST_F(FunctionBodyDecoderTest, UnreachableRefTypes) { ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, kExprCallRef, sig_index}); ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, WASM_REF_FUNC(function_index), kExprCallRef, sig_index}); - ExpectValidates(sigs.v_v(), + ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, kExprReturnCallRef, sig_index}); ExpectValidates(sigs.v_v(), diff --git a/third_party/inspector_protocol/BUILD.gn b/third_party/inspector_protocol/BUILD.gn index 09c7d3ec1..47ffe1862 100644 --- a/third_party/inspector_protocol/BUILD.gn +++ b/third_party/inspector_protocol/BUILD.gn @@ -40,7 +40,6 @@ v8_source_set_shared("crdtp_shared") { deps = [ ":crdtp_platform_shared" ] } -# A small adapter library which only :crdtp_shared may depend on. v8_source_set_shared("crdtp_platform_shared") { visibility = [ ":crdtp_shared" ] sources = [ diff --git a/tools/cp_v8_include.py b/tools/cp_v8_include.py index a19264327..3c1b0cdf7 100644 --- a/tools/cp_v8_include.py +++ b/tools/cp_v8_include.py @@ -7,4 +7,4 @@ dst_folder = current_path + '/v8-include' if os.path.exists(dst_folder): shutil.rmtree(dst_folder) -shutil.copytree(src_folder, dst_folder) +shutil.copytree(src_folder, dst_folder) \ No newline at end of file diff --git a/v8_shared/BUILD.gn b/v8_shared/BUILD.gn index 260b30fec..2f177c945 100644 --- a/v8_shared/BUILD.gn +++ b/v8_shared/BUILD.gn @@ -1741,7 +1741,7 @@ v8_header_set_shared("torque_runtime_support_shared") { sources = [ "../src/torque/runtime-support.h" ] - configs = [ ":v8_features_shared", ":internal_config_shared" ] + configs = [ ":internal_config_shared", ":v8_features_shared" ] } torque_files = [ @@ -2803,7 +2803,7 @@ v8_source_set_shared("v8_snapshot_shared") { } action("v8_dump_build_config") { - script = "tools/testrunner/utils/dump_build_config.py" + script = "../tools/testrunner/utils/dump_build_config.py" outputs = [ "$root_out_dir/../v8_build_config.json" ] is_DEBUG_defined = v8_enable_debugging_features || dcheck_always_on is_full_debug = v8_enable_debugging_features && !v8_optimized_debug @@ -5902,7 +5902,7 @@ v8_source_set_shared("v8_base_without_compiler_shared") { ] if (v8_enable_i18n_support_shared) { - deps += [ "//v8:run_gen-regexp-special-case" ] + deps += [ ":run_gen-regexp-special-case_shared" ] sources += [ "$target_gen_dir/src/regexp/special-case.cc" ] if (is_win) { deps += [ "$v8_icu_path:icudata" ] -- Gitee From a9d8c7012ccc4ed5d6dbeed7ba0cf79ca6d2d96c Mon Sep 17 00:00:00 2001 From: belong326 Date: Tue, 24 Sep 2024 20:39:01 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E3=80=90=E8=93=9D=E9=BB=84=E5=90=8C?= =?UTF-8?q?=E6=AD=A5=E3=80=91=E5=90=8C=E6=AD=A5=E9=BB=84=E5=8C=BA=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=20=E6=9B=B4=E6=96=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I0229e172ec699d6b85d52d24055b0bac822984b1 --- test/mjsunit/regress/regress-345960102.js | 19 +++++++++++ test/mjsunit/regress/wasm/regress-14047.js | 32 +++++++++++++++++++ .../mjsunit/regress/wasm/regress-343748812.js | 30 +++++++++++++++++ 3 files changed, 81 insertions(+) create mode 100644 test/mjsunit/regress/regress-345960102.js create mode 100644 test/mjsunit/regress/wasm/regress-14047.js create mode 100644 test/mjsunit/regress/wasm/regress-343748812.js diff --git a/test/mjsunit/regress/regress-345960102.js b/test/mjsunit/regress/regress-345960102.js new file mode 100644 index 000000000..896277a7f --- /dev/null +++ b/test/mjsunit/regress/regress-345960102.js @@ -0,0 +1,19 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +y = BigInt("0xffffffffffffffff"); + +function test() { + let x = BigInt.asIntN(64, -1n); + let result = x >> (y); + return BigInt.asIntN(64, result); +} + +%PrepareFunctionForOptimization(test); +assertEquals(-1n, test()); +assertEquals(-1n, test()); +%OptimizeFunctionOnNextCall(test) +assertEquals(-1n, test()); diff --git a/test/mjsunit/regress/wasm/regress-14047.js b/test/mjsunit/regress/wasm/regress-14047.js new file mode 100644 index 000000000..71ea130a9 --- /dev/null +++ b/test/mjsunit/regress/wasm/regress-14047.js @@ -0,0 +1,32 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --experimental-wasm-gc + +d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); + +let builder = new WasmModuleBuilder(); + +let struct_type = builder.addStruct([makeField(kWasmI32, true)]); + +builder.addFunction('main', kSig_v_v).exportFunc() + .addBody([ + kExprRefNull, struct_type, + kExprRefAsNonNull, + kGCPrefix, kExprStructGet, struct_type, 0, + kExprDrop, + kExprI32Const, 1, + ...wasmF32Const(42), + kExprF32Const, 0xd7, 0xff, 0xff, 0xff, // -nan:0x7fffd7 + kExprF32Gt, + kExprI32DivU, + kExprIf, kWasmVoid, + kExprUnreachable, + kExprEnd, +]); + +let main = builder.instantiate().exports.main; +assertThrows( + () => main(), WebAssembly.RuntimeError, /dereferencing a null pointer/); + diff --git a/test/mjsunit/regress/wasm/regress-343748812.js b/test/mjsunit/regress/wasm/regress-343748812.js new file mode 100644 index 000000000..8dc456c41 --- /dev/null +++ b/test/mjsunit/regress/wasm/regress-343748812.js @@ -0,0 +1,30 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); + +const builder = new WasmModuleBuilder(); +let $sig0 = builder.addType(kSig_v_v); +let $sig7 = builder.addType( + makeSig([], [ kWasmExternRef, kWasmS128, kWasmExternRef ])); +let $func0 = builder.addImport('imports', 'func0', $sig0); +builder.addFunction("main", $sig0).exportFunc() + .addLocals(kWasmExternRef, 3) + .addBody([ + kExprTry, $sig7, + kExprCallFunction, $func0, + kExprUnreachable, + kExprCatchAll, + kExprRefNull, kExternRefCode, + ...wasmS128Const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + kExprRefNull, kExternRefCode, + kExprEnd, + kExprDrop, + kExprDrop, + kExprDrop, + ]); + +var instance = builder.instantiate({'imports': { 'func0': () => {} }}); + +assertThrows(instance.exports.main, WebAssembly.RuntimeError, /unreachable/); -- Gitee