From e5de1a7c968757dfeeb4e27d5be963618ea1c736 Mon Sep 17 00:00:00 2001 From: Ori Bernstein Date: Mon, 1 May 2023 22:54:58 -0400 Subject: [PATCH 1/9] hashtable: make it ordered --- .../partition_alloc_hooks.cc | 6 +- .../shim/allocator_shim.cc | 6 +- base/functional/bind_internal.h | 4 +- base/process/process_posix.cc | 2 +- base/record_replay.cc | 142 ++++++------ mojo/core/ports/port.cc | 4 +- mojo/core/ports/port_locker.cc | 8 - .../blink/renderer/platform/wtf/BUILD.gn | 4 + .../renderer/platform/wtf/hash_map_test.cc | 20 ++ .../blink/renderer/platform/wtf/hash_table.h | 202 +++++++++++++----- 10 files changed, 245 insertions(+), 153 deletions(-) diff --git a/base/allocator/partition_allocator/partition_alloc_hooks.cc b/base/allocator/partition_allocator/partition_alloc_hooks.cc index 7fdd6dbcda5d5f..6880e277334442 100644 --- a/base/allocator/partition_allocator/partition_alloc_hooks.cc +++ b/base/allocator/partition_allocator/partition_alloc_hooks.cc @@ -54,10 +54,10 @@ void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook, void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook, FreeOverrideHook* free_hook, ReallocOverrideHook realloc_hook) { - if (recordreplay::IsRecordingOrReplaying()) { + //if (recordreplay::IsRecordingOrReplaying()) { // Always use the default allocators when recording/replaying. - return; - } + // return; + //} internal::ScopedGuard guard(GetHooksLock()); diff --git a/base/allocator/partition_allocator/shim/allocator_shim.cc b/base/allocator/partition_allocator/shim/allocator_shim.cc index e0aea9e959c4d3..0d8f50e9e03f74 100644 --- a/base/allocator/partition_allocator/shim/allocator_shim.cc +++ b/base/allocator/partition_allocator/shim/allocator_shim.cc @@ -385,9 +385,9 @@ ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) { #include "base/allocator/partition_allocator/shim/allocator_shim_override_glibc_weak_symbols.h" #endif -static inline bool MaybeRecordingOrReplaying() { - return true; -} +//static inline bool MaybeRecordingOrReplaying() { +// return true; +//} #if BUILDFLAG(IS_APPLE) namespace allocator_shim { diff --git a/base/functional/bind_internal.h b/base/functional/bind_internal.h index c7cf022773f97a..b82c015ea75ac1 100644 --- a/base/functional/bind_internal.h +++ b/base/functional/bind_internal.h @@ -1581,7 +1581,7 @@ struct CallbackCancellationTraits { static constexpr bool is_cancellable = false; }; -extern uintptr_t CallbackRecordReplayValue(const char* why, uintptr_t value); +//extern uintptr_t CallbackRecordReplayValue(const char* why, uintptr_t value); // Specialization for method bound to weak pointer receiver. template @@ -1600,7 +1600,7 @@ struct CallbackCancellationTraits< // Weak pointers can be cleared non-deterministically when recording/replaying, // so record/replay whether they are present so that callers checking the status // like TaskQueueImpl behave consistently. - return CallbackRecordReplayValue("WeakMethodIsCancelled", !receiver); + return 0; //CallbackRecordReplayValue("WeakMethodIsCancelled", !receiver); } template diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc index 4166e5a4c8f46a..d1d9fe1c19790a 100644 --- a/base/process/process_posix.cc +++ b/base/process/process_posix.cc @@ -277,7 +277,7 @@ void Process::TerminateCurrentProcessImmediately(int exit_code) { #if BUILDFLAG(CLANG_PROFILING) WriteClangProfilingProfile(); #endif - V8RecordReplayFinishRecording(); + //V8RecordReplayFinishRecording(); _exit(exit_code); } diff --git a/base/record_replay.cc b/base/record_replay.cc index f9986614e6407f..274e7e2f905ba1 100644 --- a/base/record_replay.cc +++ b/base/record_replay.cc @@ -161,19 +161,20 @@ ForEachV8APIVoid(DefineFunctionVoid) #endif // !BUILD_FLAG(IS_WIN) bool IsRecordingOrReplaying(const char* feature) { - return V8IsRecordingOrReplaying(feature); + return false; //V8IsRecordingOrReplaying(feature); } bool IsRecording() { - return V8IsRecording(); + return false; //V8IsRecording(); } bool IsReplaying() { - return V8IsReplaying(); + return false; //V8IsReplaying(); } char* GetRecordingId() { - return V8GetRecordingId(); + static char ret[1] = ""; + return ret; //V8GetRecordingId(); } bool HadMismatch() { @@ -184,7 +185,7 @@ void Assert(const char* format, ...) { #ifndef NACL_TC_REV va_list ap; va_start(ap, format); - V8RecordReplayAssertVA(format, ap); + //V8RecordReplayAssertVA(format, ap); va_end(ap); #endif } @@ -193,7 +194,7 @@ void Diagnostic(const char* format, ...) { #ifndef NACL_TC_REV va_list ap; va_start(ap, format); - V8RecordReplayDiagnosticVA(format, ap); + //V8RecordReplayDiagnosticVA(format, ap); va_end(ap); #endif } @@ -202,151 +203,151 @@ void Warning(const char* format, ...) { #ifndef NACL_TC_REV va_list ap; va_start(ap, format); - V8RecordReplayWarning(format, ap); + //V8RecordReplayWarning(format, ap); va_end(ap); #endif } void AssertBytes(const char* why, const void* buf, size_t size) { - V8RecordReplayAssertBytes(why, buf, size); + //V8RecordReplayAssertBytes(why, buf, size); } void Print(const char* format, ...) { #ifndef NACL_TC_REV va_list ap; va_start(ap, format); - V8RecordReplayPrintVA(format, ap); + //V8RecordReplayPrintVA(format, ap); va_end(ap); #endif } uintptr_t RecordReplayValue(const char* why, uintptr_t v) { - return V8RecordReplayValue(why, v); + return 0; //V8RecordReplayValue(why, v); } void RecordReplayBytes(const char* why, void* buf, size_t size) { - V8RecordReplayBytes(why, buf, size); + //V8RecordReplayBytes(why, buf, size); } int CreateOrderedLock(const char* name) { - return (int)V8RecordReplayCreateOrderedLock(name); + return 0; //(int)V8RecordReplayCreateOrderedLock(name); } void OrderedLock(int lock) { - V8RecordReplayOrderedLock(lock); + //V8RecordReplayOrderedLock(lock); } void OrderedUnlock(int lock) { - V8RecordReplayOrderedUnlock(lock); + //V8RecordReplayOrderedUnlock(lock); } void NewCheckpoint() { - V8RecordReplayNewCheckpoint(); + //V8RecordReplayNewCheckpoint(); } uint64_t NewBookmark() { - return V8RecordReplayNewBookmark(); + return 0; //V8RecordReplayNewBookmark(); } void OnAnnotation(const char* kind, const char* contents) { - V8RecordReplayOnAnnotation(kind, contents); + //V8RecordReplayOnAnnotation(kind, contents); } void OnNetworkRequest(const char* id, const char* kind, uint64_t bookmark) { - V8RecordReplayOnNetworkRequest(id, kind, bookmark); + //V8RecordReplayOnNetworkRequest(id, kind, bookmark); } void OnNetworkRequestEvent(const char* id) { - V8RecordReplayOnNetworkRequestEvent(id); + //V8RecordReplayOnNetworkRequestEvent(id); } void OnNetworkStreamStart(const char* id, const char* kind, const char* parentId) { - V8RecordReplayOnNetworkStreamStart(id, kind, parentId); + //V8RecordReplayOnNetworkStreamStart(id, kind, parentId); } void OnNetworkStreamData(const char* id, size_t offset, size_t length, uint64_t bookmark) { - V8RecordReplayOnNetworkStreamData(id, offset, length, bookmark); + //V8RecordReplayOnNetworkStreamData(id, offset, length, bookmark); } void OnNetworkStreamEnd(const char* id, size_t length) { - V8RecordReplayOnNetworkStreamEnd(id, length); + //V8RecordReplayOnNetworkStreamEnd(id, length); } bool AreEventsDisallowed() { - return V8RecordReplayAreEventsDisallowed(); + return false; //V8RecordReplayAreEventsDisallowed(); } void BeginDisallowEvents() { - V8RecordReplayBeginDisallowEvents(); + //V8RecordReplayBeginDisallowEvents(); } void BeginDisallowEventsWithLabel(const char* label) { - V8RecordReplayBeginDisallowEventsWithLabel(label); + //iV8RecordReplayBeginDisallowEventsWithLabel(label); } void EndDisallowEvents() { - V8RecordReplayEndDisallowEvents(); + //V8RecordReplayEndDisallowEvents(); } bool AreEventsPassedThrough() { - return V8RecordReplayAreEventsPassedThrough(); + return 0; //V8RecordReplayAreEventsPassedThrough(); } void BeginPassThroughEvents() { - V8RecordReplayBeginPassThroughEvents(); + //V8RecordReplayBeginPassThroughEvents(); } void EndPassThroughEvents() { - V8RecordReplayEndPassThroughEvents(); + //V8RecordReplayEndPassThroughEvents(); } bool FeatureEnabled(const char* feature) { - return V8RecordReplayFeatureEnabled(feature); + return false;//V8RecordReplayFeatureEnabled(feature); } void BrowserEvent(const char* name, const base::DictionaryValue& info) { - std::string json; - base::JSONWriter::Write(info, &json); - V8RecordReplayBrowserEvent(name, json.c_str()); + //std::string json; + //base::JSONWriter::Write(info, &json); + //V8RecordReplayBrowserEvent(name, json.c_str()); } bool HasDivergedFromRecording() { - return V8RecordReplayHasDivergedFromRecording(); + return false;//V8RecordReplayHasDivergedFromRecording(); } bool AllowSideEffects() { - return V8RecordReplayAllowSideEffects(); + return false; //V8RecordReplayAllowSideEffects(); } void RegisterPointer(const char* name, const void* ptr) { - V8RecordReplayRegisterPointer(name, ptr); + //V8RecordReplayRegisterPointer(name, ptr); } void UnregisterPointer(const void* ptr) { - V8RecordReplayUnregisterPointer(ptr); + //V8RecordReplayUnregisterPointer(ptr); } int PointerId(const void* ptr) { - return V8RecordReplayPointerId(ptr); + return 0; //V8RecordReplayPointerId(ptr); } void* IdPointer(int id) { - return V8RecordReplayIdPointer(id); + return nullptr; //V8RecordReplayIdPointer(id); } void OnEvent(const char* aEvent, bool aBefore) { - V8RecordReplayOnEvent(aEvent, aBefore); + //V8RecordReplayOnEvent(aEvent, aBefore); } void OnMouseEvent(const char* kind, size_t clientX, size_t clientY) { - V8RecordReplayOnMouseEvent(kind, clientX, clientY); + //V8RecordReplayOnMouseEvent(kind, clientX, clientY); } void OnKeyEvent(const char* kind, const char* key) { - V8RecordReplayOnKeyEvent(kind, key); + //V8RecordReplayOnKeyEvent(kind, key); } void OnNavigationEvent(const char* kind, const char* url) { - V8RecordReplayOnNavigationEvent(kind, url); + //V8RecordReplayOnNavigationEvent(kind, url); } AutoLockMaybeEventsDisallowed::AutoLockMaybeEventsDisallowed( @@ -378,50 +379,29 @@ AutoUnlockMaybeEventsDisallowed::~AutoUnlockMaybeEventsDisallowed() { } bool IsMainThread() { - return V8IsMainThread(); -} - -static int gNextMainThreadId = 1; - -static bool CheckNewId(const char* name) { - if (!IsRecordingOrReplaying()) { - // Don't track anything. - return false; - } - if (HasDivergedFromRecording()) { - // Everything is allowed when explicitly diverged. - return true; - } - if (AreEventsDisallowed()) { - // IDs can be created when events are disallowed when our own scripts - // create URL objects. This would be nice to improve. - if (!IsInReplayCode()) { - Warning("NewId when not allowed %s", name); - } - return false; - } - Assert("NewId %s", name); - return true; + return false; //V8IsMainThread(); } +//static int gNextMainThreadId = 1; int NewIdMainThread(const char* name) { - if (!CheckNewId(name)) { - return 0; - } - if (!V8IsMainThread()) { - fprintf(stderr, "NewIdMainThread not main thread: %s\n", name); - CHECK(V8IsMainThread()); - } - return gNextMainThreadId++; +// if (IsRecordingOrReplaying()) { +// if (!V8IsMainThread()) { +// fprintf(stderr, "NewIdMainThread not main thread: %s\n", name); +// CHECK(V8IsMainThread()); +// } +// Assert("NewId %s", name); +// return gNextMainThreadId++; +// } + return 0; } -static std::atomic gNextAnyThreadId{1}; +//static std::atomic gNextAnyThreadId{1}; int NewIdAnyThread(const char* name) { - if (!CheckNewId(name)) { + //if (!CheckNewId(name)) { return 0; - } - return (int)RecordReplayValue("NewId", (uintptr_t)gNextAnyThreadId++); + //} + //return (int)RecordReplayValue("NewId", (uintptr_t)gNextAnyThreadId++); } bool IsInReplayCode() { @@ -441,7 +421,7 @@ void RecordReplayString(const char* why, std::string& str) { } void AddOrderedSRWLock(const char* name, void* lock) { - V8RecordReplayAddOrderedSRWLock(name, lock); + //V8RecordReplayAddOrderedSRWLock(name, lock); } void RemoveOrderedSRWLock(void* lock) { @@ -449,7 +429,7 @@ void RemoveOrderedSRWLock(void* lock) { } void MaybeTerminate(void (*callback)(void*), void* data) { - V8RecordReplayMaybeTerminate(callback, data); + //V8RecordReplayMaybeTerminate(callback, data); } } // namespace recordreplay diff --git a/mojo/core/ports/port.cc b/mojo/core/ports/port.cc index 8dd6b450a94df0..879f330b172cba 100644 --- a/mojo/core/ports/port.cc +++ b/mojo/core/ports/port.cc @@ -34,11 +34,11 @@ Port::Port(uint64_t next_sequence_num_to_send, peer_lost_unexpectedly(false), lock_("Port.lock_") { // Registering new ports is needed for sorting, see port_locker.cc - recordreplay::RegisterPointer("Port", this); + //recordreplay::RegisterPointer("Port", this); } Port::~Port() { - recordreplay::UnregisterPointer(this); + //recordreplay::UnregisterPointer(this); } bool Port::IsNextEvent(const NodeName& from_node, const Event& event) { diff --git a/mojo/core/ports/port_locker.cc b/mojo/core/ports/port_locker.cc index 7bcb9b179fb552..ab975516ff1911 100644 --- a/mojo/core/ports/port_locker.cc +++ b/mojo/core/ports/port_locker.cc @@ -32,15 +32,7 @@ void UpdateTLS(PortLocker* old_locker, PortLocker* new_locker) { } // namespace static uintptr_t GetPortId(Port* port) { - // When recording/replaying the sorted order of ports need to be consistent, - // so we use the ID associated with the port via RegisterPointer for sorting. - if (recordreplay::IsRecordingOrReplaying("pointer-ids")) { - uintptr_t id = recordreplay::PointerId(port); - CHECK(id); - return id; - } else { return (uintptr_t)port; - } } PortLocker::PortLocker(const PortRef** port_refs, size_t num_ports) diff --git a/third_party/blink/renderer/platform/wtf/BUILD.gn b/third_party/blink/renderer/platform/wtf/BUILD.gn index 2525817a163ddf..1f14c5e02f337f 100644 --- a/third_party/blink/renderer/platform/wtf/BUILD.gn +++ b/third_party/blink/renderer/platform/wtf/BUILD.gn @@ -18,6 +18,7 @@ visibility = [ ] config("wtf_config") { + cflags = ["-g", "-O0"] if (is_win) { cflags = [ # Don't complain about calling specific versions of templatized @@ -242,6 +243,7 @@ component("wtf") { sources += [ "text/ascii_fast_path.cc" ] } + cflags = ["-g", "-O0"] if (is_win) { cflags = [ "/wd4068" ] # Unknown pragma. @@ -279,11 +281,13 @@ component("wtf") { test("wtf_unittests") { deps = [ ":wtf_unittests_sources" ] + cflags = ["-g", "-O0"] } source_set("wtf_unittests_sources") { visibility = [] # Allow re-assignment of list. visibility = [ "*" ] + cflags = ["-g", "-O0"] testonly = true sources = [ diff --git a/third_party/blink/renderer/platform/wtf/hash_map_test.cc b/third_party/blink/renderer/platform/wtf/hash_map_test.cc index 7d8f844f15f47c..413e15ef36748d 100644 --- a/third_party/blink/renderer/platform/wtf/hash_map_test.cc +++ b/third_party/blink/renderer/platform/wtf/hash_map_test.cc @@ -87,6 +87,26 @@ TEST(HashMapTest, Iteration) { EXPECT_EQ((1 << 10) - 1, encountered_keys); } +TEST(HashMapTest, OrderedIteration) { + IntHashMap map; + for (int i = 0; i < 10; ++i) + map.insert(1 << i, i); + + int i = 0; + for (auto it = map.begin(); it != map.end(); ++it) { + EXPECT_EQ(it->key, 1<value, i); + i++; + } + i = 10; + for (auto it = map.end(); it != map.begin();) { + --it; + --i; + EXPECT_EQ(it->key, 1<value, i); + } +} + struct TestDoubleHashTraits : HashTraits { static const unsigned kMinimumTableSize = 8; }; diff --git a/third_party/blink/renderer/platform/wtf/hash_table.h b/third_party/blink/renderer/platform/wtf/hash_table.h index f1cf1d76c669bb..83fe6bc8eed1d9 100644 --- a/third_party/blink/renderer/platform/wtf/hash_table.h +++ b/third_party/blink/renderer/platform/wtf/hash_table.h @@ -24,6 +24,7 @@ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_HASH_TABLE_H_ #include +#include #include "base/check_op.h" #include "base/dcheck_is_on.h" @@ -289,49 +290,49 @@ class HashTableConstIterator final { Allocator>; void SkipEmptyBuckets() { - while (position_ != end_position_ && - HashTableType::IsEmptyOrDeletedBucket(*position_)) + while (position_ != end_position_ && *position_ == -1) ++position_; } void ReverseSkipEmptyBuckets() { // Don't need to check for out-of-bounds positions, as begin position is // always going to be a non-empty bucket. - while (HashTableType::IsEmptyOrDeletedBucket(*position_)) { + while (*position_ == -1) { #if DCHECK_IS_ON() - DCHECK_NE(position_, begin_position_); + assert(position_ != begin_position_); + //DCHECK_NE(position_, begin_position_); #endif --position_; } } - HashTableConstIterator(PointerType position, - PointerType begin_position, - PointerType end_position, + HashTableConstIterator(const ssize_t* position, + const ssize_t* begin_position, + const ssize_t* end_position, const HashTableType* container) : position_(position), - end_position_(end_position) + end_position_(end_position), + container_(container) #if DCHECK_IS_ON() , begin_position_(begin_position), - container_(container), container_modifications_(container->Modifications()) #endif { SkipEmptyBuckets(); } - HashTableConstIterator(PointerType position, - PointerType begin_position, - PointerType end_position, + HashTableConstIterator(const ssize_t* position, + const ssize_t* begin_position, + const ssize_t* end_position, const HashTableType* container, HashItemKnownGoodTag) : position_(position), - end_position_(end_position) + end_position_(end_position), + container_(container) #if DCHECK_IS_ON() , begin_position_(begin_position), - container_(container), container_modifications_(container->Modifications()) #endif { @@ -357,7 +358,7 @@ class HashTableConstIterator final { GetType Get() const { CheckModifications(); - return position_; + return &container_->table_[*position_]; } typename Traits::IteratorConstReferenceType operator*() const { return Traits::GetToReferenceConstConversion(Get()); @@ -365,7 +366,8 @@ class HashTableConstIterator final { GetType operator->() const { return Get(); } const_iterator& operator++() { - DCHECK_NE(position_, end_position_); + assert(position_ != end_position_); + //DCHECK_NE(position_, end_position_); CheckModifications(); ++position_; SkipEmptyBuckets(); @@ -376,7 +378,8 @@ class HashTableConstIterator final { const_iterator& operator--() { #if DCHECK_IS_ON() - DCHECK_NE(position_, begin_position_); + assert(position_ != end_position_); + //DCHECK_NE(position_, begin_position_); #endif CheckModifications(); --position_; @@ -405,15 +408,18 @@ class HashTableConstIterator final { return stream << "iterator representing "; // TODO(tkent): Change |position_| to |*position_| to show the // pointed object. It requires a lot of new stream printer functions. - return stream << "iterator pointing to " << position_; + return stream << "iterator pointing to " << &container_->table_[*position_]; } private: - PointerType position_; - PointerType end_position_; -#if DCHECK_IS_ON() - PointerType begin_position_; + const ssize_t* position_; + const ssize_t* end_position_; const HashTableType* container_; + //PointerType position_; + //PointerType end_position_; +#if DCHECK_IS_ON() + const ssize_t* begin_position_; + //PointerType begin_position_; int64_t container_modifications_; #endif }; @@ -483,14 +489,14 @@ class HashTableIterator final { KeyTraits, Allocator>; - HashTableIterator(PointerType pos, - PointerType begin, - PointerType end, + HashTableIterator(ssize_t* pos, + ssize_t* begin, + ssize_t* end, const HashTableType* container) : iterator_(pos, begin, end, container) {} - HashTableIterator(PointerType pos, - PointerType begin, - PointerType end, + HashTableIterator(ssize_t* pos, + ssize_t* begin, + ssize_t* end, const HashTableType* container, HashItemKnownGoodTag tag) : iterator_(pos, begin, end, container, tag) {} @@ -768,13 +774,13 @@ class HashTable final // for begin. This is more efficient because we don't have to skip all the // empty and deleted buckets, and iterating an empty table is a common case // that's worth optimizing. - iterator begin() { return empty() ? end() : MakeIterator(table_); } - iterator end() { return MakeKnownGoodIterator(table_ + table_size_); } + iterator begin() { return empty() ? end() : MakeIterator(idxorder_); } + iterator end() { return MakeKnownGoodIterator(idxorder_ + key_count_); } const_iterator begin() const { - return empty() ? end() : MakeConstIterator(table_); + return empty() ? end() : MakeConstIterator(idxorder_); } const_iterator end() const { - return MakeKnownGoodConstIterator(table_ + table_size_); + return MakeKnownGoodConstIterator(idxorder_ + key_count_); } unsigned size() const { @@ -848,6 +854,9 @@ class HashTable final template const ValueType* Lookup(const T&) const; + template + ssize_t LookupIdx(const T&) const; + ValueType** GetBufferSlot() { return &table_; } template @@ -938,18 +947,30 @@ class HashTable final return FullLookupType(LookupType(position, found), hash); } - iterator MakeIterator(ValueType* pos) { - return iterator(pos, table_, table_ + table_size_, this); + iterator MakeIterator(ssize_t* pos) { + return iterator(pos, + idxorder_, + idxorder_ + key_count_, + this); } - const_iterator MakeConstIterator(const ValueType* pos) const { - return const_iterator(pos, table_, table_ + table_size_, this); + const_iterator MakeConstIterator(const ssize_t* pos) const { + return const_iterator(pos, + idxorder_, + idxorder_ + key_count_, + this); } - iterator MakeKnownGoodIterator(ValueType* pos) { - return iterator(pos, table_, table_ + table_size_, this, + iterator MakeKnownGoodIterator(ssize_t* pos) { + return iterator(pos, + idxorder_, + idxorder_ + key_count_, + this, kHashItemKnownGood); } - const_iterator MakeKnownGoodConstIterator(const ValueType* pos) const { - return const_iterator(pos, table_, table_ + table_size_, this, + const_iterator MakeKnownGoodConstIterator(const ssize_t* pos) const { + return const_iterator(pos, + idxorder_, + idxorder_ + key_count_, + this, kHashItemKnownGood); } @@ -980,9 +1001,20 @@ class HashTable final modifications_(0) #endif { + size_t alloc_size = base::CheckMul(size, sizeof(ValueType)).ValueOrDie(); + idxmap_ = Allocator::template AllocateHashTableBacking(alloc_size); + idxorder_ = Allocator::template AllocateHashTableBacking(alloc_size); + for(size_t i = 0; i < size; i++){ + idxmap_[i] = -1; + } } +public: ValueType* table_; +private: + ssize_t* idxmap_; + ssize_t* idxorder_; + unsigned table_size_; unsigned key_count_; #if DCHECK_IS_ON() @@ -1103,6 +1135,55 @@ HashTable:: const_cast(this)->Lookup(key)); } +template +template +inline ssize_t +HashTable:: + LookupIdx(const T& key) const { + DCHECK(!AccessForbidden()); + DCHECK((HashTableKeyChecker< + HashTranslator, KeyTraits, + HashFunctions::safe_to_compare_to_empty_or_deleted>::CheckKey(key))); + const ValueType* table = table_; + if (!table) + return -1; + + size_t size_mask = TableSizeMask(); + unsigned h = HashTranslator::GetHash(key); + size_t i = h & size_mask; + size_t probe_count = 0; + + UPDATE_ACCESS_COUNTS(); + + while (true) { + const ValueType* entry = table + i; + + if (HashFunctions::safe_to_compare_to_empty_or_deleted) { + if (HashTranslator::Equal(Extractor::Extract(*entry), key)) + return idxmap_[i]; + + if (IsEmptyBucket(*entry)) + return -1; + } else { + if (IsEmptyBucket(*entry)) + return -1; + + if (!IsDeletedBucket(*entry) && + HashTranslator::Equal(Extractor::Extract(*entry), key)) + return idxmap_[i]; + } + ++probe_count; + UPDATE_PROBE_COUNTS(); + i = (i + probe_count) & size_mask; + } +} + template :: break; if (HashFunctions::safe_to_compare_to_empty_or_deleted) { - if (HashTranslator::Equal(Extractor::Extract(*entry), key)) + if (HashTranslator::Equal(Extractor::Extract(*entry), key)){ return AddResult(this, entry, false); + } if (IsDeletedBucket(*entry) && can_reuse_deleted_entry) deleted_entry = entry; } else { if (IsDeletedBucket(*entry) && can_reuse_deleted_entry) deleted_entry = entry; - else if (HashTranslator::Equal(Extractor::Extract(*entry), key)) + else if (HashTranslator::Equal(Extractor::Extract(*entry), key)){ return AddResult(this, entry, false); + } } ++probe_count; UPDATE_PROBE_COUNTS(); i = (i + probe_count) & size_mask; } - + idxorder_[key_count_] = i; + idxmap_[i] = key_count_; RegisterModification(); if (deleted_entry) { @@ -1518,7 +1602,9 @@ HashTable:: Mover::value>::Move(std::move(entry), *new_entry); - + idxmap_[new_entry - table_] = key_count_; + idxorder_[key_count_] = new_entry - table_; + key_count_++; return new_entry; } @@ -1539,11 +1625,11 @@ inline typename HashTable::iterator HashTable:: Find(const T& key) { - ValueType* entry = Lookup(key); - if (!entry) + auto idx = LookupIdx(key); + if (idx == -1) return end(); - return MakeKnownGoodIterator(entry); + return MakeKnownGoodIterator(idxorder_ + idx); } template ::const_iterator HashTable:: Find(const T& key) const { - const ValueType* entry = Lookup(key); - if (!entry) + auto idx = LookupIdx(key); + if (idx == -1) return end(); - return MakeKnownGoodConstIterator(entry); + return MakeKnownGoodConstIterator(idxorder_ + idx); } template :: erase(iterator it) { if (it == end()) return; - erase(it.iterator_.position_); + erase(&table_[*it.iterator_.position_]); } template :: erase(const_iterator it) { if (it == end()) return; - erase(it.position_); + erase(&table_[*it.position_]); } template :: Allocator::TraceBackingStoreIfMarked(new_hash_table.table_); ValueType* old_table = table_; - unsigned old_table_size = table_size_; + auto old_table_size = table_size_; + auto* old_table_order = idxorder_; + auto* old_table_map = idxmap_; // This swaps the newly allocated buffer with the current one. The store to // the current table has to be atomic to prevent races with concurrent marker. AsAtomicPtr(&table_)->store(new_hash_table.table_, std::memory_order_relaxed); Allocator::template BackingWriteBarrier(&table_); + idxmap_ = new_hash_table.idxmap_; + idxorder_ = new_hash_table.idxorder_; table_size_ = new_table_size; new_hash_table.table_ = old_table; new_hash_table.table_size_ = old_table_size; + new_hash_table.idxorder_ = old_table_order; + new_hash_table.idxmap_ = old_table_map; // Explicitly clear since garbage collected HashTables don't do this on // destruction. @@ -1956,7 +2048,11 @@ void HashTable(idxmap_); + Allocator::template FreeHashTableBacking(idxorder_); AsAtomicPtr(&table_)->store(nullptr, std::memory_order_relaxed); + AsAtomicPtr(&idxmap_)->store(nullptr, std::memory_order_relaxed); + AsAtomicPtr(&idxorder_)->store(nullptr, std::memory_order_relaxed); table_size_ = 0; key_count_ = 0; } From 33207f5e3828252e0a94c92e01d9a52a88717012 Mon Sep 17 00:00:00 2001 From: Ori Bernstein Date: Thu, 4 May 2023 16:01:15 -0400 Subject: [PATCH 2/9] fixes --- .../blink/renderer/platform/wtf/hash_table.h | 39 ++++++++++++------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/third_party/blink/renderer/platform/wtf/hash_table.h b/third_party/blink/renderer/platform/wtf/hash_table.h index 83fe6bc8eed1d9..9f34d124357d2a 100644 --- a/third_party/blink/renderer/platform/wtf/hash_table.h +++ b/third_party/blink/renderer/platform/wtf/hash_table.h @@ -307,14 +307,16 @@ class HashTableConstIterator final { } HashTableConstIterator(const ssize_t* position, + const Value* table, const ssize_t* begin_position, const ssize_t* end_position, const HashTableType* container) : position_(position), - end_position_(end_position), - container_(container) + table_(table), + end_position_(end_position) #if DCHECK_IS_ON() - , + , + container_(container), begin_position_(begin_position), container_modifications_(container->Modifications()) #endif @@ -323,15 +325,17 @@ class HashTableConstIterator final { } HashTableConstIterator(const ssize_t* position, + const Value* table, const ssize_t* begin_position, const ssize_t* end_position, const HashTableType* container, HashItemKnownGoodTag) : position_(position), - end_position_(end_position), - container_(container) + table_(table), + end_position_(end_position) #if DCHECK_IS_ON() - , + , + container_(container), begin_position_(begin_position), container_modifications_(container->Modifications()) #endif @@ -358,7 +362,7 @@ class HashTableConstIterator final { GetType Get() const { CheckModifications(); - return &container_->table_[*position_]; + return &table_[*position_]; } typename Traits::IteratorConstReferenceType operator*() const { return Traits::GetToReferenceConstConversion(Get()); @@ -408,16 +412,15 @@ class HashTableConstIterator final { return stream << "iterator representing "; // TODO(tkent): Change |position_| to |*position_| to show the // pointed object. It requires a lot of new stream printer functions. - return stream << "iterator pointing to " << &container_->table_[*position_]; + return stream << "iterator pointing to " << &table_[*position_]; } private: const ssize_t* position_; + const Value* table_; const ssize_t* end_position_; - const HashTableType* container_; - //PointerType position_; - //PointerType end_position_; #if DCHECK_IS_ON() + const HashTableType* container_; const ssize_t* begin_position_; //PointerType begin_position_; int64_t container_modifications_; @@ -490,16 +493,18 @@ class HashTableIterator final { Allocator>; HashTableIterator(ssize_t* pos, + Value* table, ssize_t* begin, ssize_t* end, const HashTableType* container) - : iterator_(pos, begin, end, container) {} + : iterator_(pos, table, begin, end, container) {} HashTableIterator(ssize_t* pos, + Value* table, ssize_t* begin, ssize_t* end, const HashTableType* container, HashItemKnownGoodTag tag) - : iterator_(pos, begin, end, container, tag) {} + : iterator_(pos, table, begin, end, container, tag) {} public: HashTableIterator() = default; @@ -949,18 +954,21 @@ class HashTable final iterator MakeIterator(ssize_t* pos) { return iterator(pos, + table_, idxorder_, idxorder_ + key_count_, this); } const_iterator MakeConstIterator(const ssize_t* pos) const { return const_iterator(pos, + table_, idxorder_, idxorder_ + key_count_, this); } iterator MakeKnownGoodIterator(ssize_t* pos) { return iterator(pos, + table_, idxorder_, idxorder_ + key_count_, this, @@ -968,6 +976,7 @@ class HashTable final } const_iterator MakeKnownGoodConstIterator(const ssize_t* pos) const { return const_iterator(pos, + table_, idxorder_, idxorder_ + key_count_, this, @@ -1569,6 +1578,8 @@ HashTable:: // doing that in the translator so that they can be easily customized. ConstructTraits::NotifyNewElement(entry); + idxorder_[key_count_] = entry - table_; + idxmap_[entry - table_] = key_count_; ++key_count_; if (ShouldExpand()) entry = Expand(entry); @@ -1941,7 +1952,7 @@ HashTable:: HashTable new_hash_table(RawStorageTag{}, new_table, new_table_size); Value* new_entry = nullptr; - for (unsigned i = 0; i != table_size_; ++i) { + for (size_t i = 0; i != table_size_; ++i) { if (IsEmptyOrDeletedBucket(table_[i])) { DCHECK_NE(&table_[i], entry); continue; From 364ffb0c0ed815abc37d09493f90cd42501142d9 Mon Sep 17 00:00:00 2001 From: Ori Bernstein Date: Tue, 9 May 2023 23:08:56 -0400 Subject: [PATCH 3/9] Move back to vector based backing --- .../blink/renderer/platform/wtf/hash_table.h | 112 ++++++++---------- 1 file changed, 52 insertions(+), 60 deletions(-) diff --git a/third_party/blink/renderer/platform/wtf/hash_table.h b/third_party/blink/renderer/platform/wtf/hash_table.h index 9f34d124357d2a..86ad7494b0f7d2 100644 --- a/third_party/blink/renderer/platform/wtf/hash_table.h +++ b/third_party/blink/renderer/platform/wtf/hash_table.h @@ -306,10 +306,10 @@ class HashTableConstIterator final { } } - HashTableConstIterator(const ssize_t* position, + HashTableConstIterator(std::vector::const_iterator position, const Value* table, - const ssize_t* begin_position, - const ssize_t* end_position, + std::vector::const_iterator begin_position, + std::vector::const_iterator end_position, const HashTableType* container) : position_(position), table_(table), @@ -324,10 +324,10 @@ class HashTableConstIterator final { SkipEmptyBuckets(); } - HashTableConstIterator(const ssize_t* position, + HashTableConstIterator(std::vector::const_iterator position, const Value* table, - const ssize_t* begin_position, - const ssize_t* end_position, + std::vector::const_iterator begin_position, + std::vector::const_iterator end_position, const HashTableType* container, HashItemKnownGoodTag) : position_(position), @@ -416,12 +416,12 @@ class HashTableConstIterator final { } private: - const ssize_t* position_; + std::vector::const_iterator position_; const Value* table_; - const ssize_t* end_position_; + std::vector::const_iterator end_position_; #if DCHECK_IS_ON() const HashTableType* container_; - const ssize_t* begin_position_; + std::vector::const_iterator begin_position_; //PointerType begin_position_; int64_t container_modifications_; #endif @@ -492,16 +492,16 @@ class HashTableIterator final { KeyTraits, Allocator>; - HashTableIterator(ssize_t* pos, + HashTableIterator(std::vector::iterator pos, Value* table, - ssize_t* begin, - ssize_t* end, + std::vector::iterator begin, + std::vector::iterator end, const HashTableType* container) : iterator_(pos, table, begin, end, container) {} - HashTableIterator(ssize_t* pos, + HashTableIterator(std::vector::iterator pos, Value* table, - ssize_t* begin, - ssize_t* end, + std::vector::iterator begin, + std::vector::iterator end, const HashTableType* container, HashItemKnownGoodTag tag) : iterator_(pos, table, begin, end, container, tag) {} @@ -779,13 +779,13 @@ class HashTable final // for begin. This is more efficient because we don't have to skip all the // empty and deleted buckets, and iterating an empty table is a common case // that's worth optimizing. - iterator begin() { return empty() ? end() : MakeIterator(idxorder_); } - iterator end() { return MakeKnownGoodIterator(idxorder_ + key_count_); } + iterator begin() { return empty() ? end() : MakeIterator(idxorder_.begin()); } + iterator end() { return MakeKnownGoodIterator(idxorder_.end()); } const_iterator begin() const { - return empty() ? end() : MakeConstIterator(idxorder_); + return empty() ? end() : MakeConstIterator(idxorder_.cbegin()); } const_iterator end() const { - return MakeKnownGoodConstIterator(idxorder_ + key_count_); + return MakeKnownGoodConstIterator(idxorder_.cend()); } unsigned size() const { @@ -952,33 +952,33 @@ class HashTable final return FullLookupType(LookupType(position, found), hash); } - iterator MakeIterator(ssize_t* pos) { + iterator MakeIterator(std::vector::iterator pos) { return iterator(pos, table_, - idxorder_, - idxorder_ + key_count_, + idxorder_.begin(), + idxorder_.end(), this); } - const_iterator MakeConstIterator(const ssize_t* pos) const { + const_iterator MakeConstIterator(std::vector::const_iterator pos) const { return const_iterator(pos, table_, - idxorder_, - idxorder_ + key_count_, + idxorder_.cbegin(), + idxorder_.cend(), this); } - iterator MakeKnownGoodIterator(ssize_t* pos) { + iterator MakeKnownGoodIterator(std::vector::iterator pos) { return iterator(pos, table_, - idxorder_, - idxorder_ + key_count_, + idxorder_.begin(), + idxorder_.end(), this, kHashItemKnownGood); } - const_iterator MakeKnownGoodConstIterator(const ssize_t* pos) const { + const_iterator MakeKnownGoodConstIterator(std::vector::const_iterator pos) const { return const_iterator(pos, table_, - idxorder_, - idxorder_ + key_count_, + idxorder_.cbegin(), + idxorder_.cend(), this, kHashItemKnownGood); } @@ -1000,6 +1000,8 @@ class HashTable final struct RawStorageTag {}; HashTable(RawStorageTag, ValueType* table, unsigned size) : table_(table), + idxmap_(size, -1), + idxorder_(), table_size_(size), key_count_(0), deleted_count_(0), @@ -1010,19 +1012,13 @@ class HashTable final modifications_(0) #endif { - size_t alloc_size = base::CheckMul(size, sizeof(ValueType)).ValueOrDie(); - idxmap_ = Allocator::template AllocateHashTableBacking(alloc_size); - idxorder_ = Allocator::template AllocateHashTableBacking(alloc_size); - for(size_t i = 0; i < size; i++){ - idxmap_[i] = -1; - } } public: ValueType* table_; private: - ssize_t* idxmap_; - ssize_t* idxorder_; + std::vector idxmap_; + std::vector idxorder_; unsigned table_size_; unsigned key_count_; @@ -1473,7 +1469,7 @@ HashTable:: break; if (HashFunctions::safe_to_compare_to_empty_or_deleted) { - if (HashTranslator::Equal(Extractor::Extract(*entry), key)){ + if (HashTranslator::Equal(Extractor::Extract(*entry), key)) { return AddResult(this, entry, false); } @@ -1482,7 +1478,7 @@ HashTable:: } else { if (IsDeletedBucket(*entry) && can_reuse_deleted_entry) deleted_entry = entry; - else if (HashTranslator::Equal(Extractor::Extract(*entry), key)){ + else if (HashTranslator::Equal(Extractor::Extract(*entry), key)) { return AddResult(this, entry, false); } } @@ -1490,7 +1486,7 @@ HashTable:: UPDATE_PROBE_COUNTS(); i = (i + probe_count) & size_mask; } - idxorder_[key_count_] = i; + idxorder_.push_back(i); idxmap_[i] = key_count_; RegisterModification(); @@ -1578,7 +1574,7 @@ HashTable:: // doing that in the translator so that they can be easily customized. ConstructTraits::NotifyNewElement(entry); - idxorder_[key_count_] = entry - table_; + idxorder_.push_back(entry - table_); idxmap_[entry - table_] = key_count_; ++key_count_; if (ShouldExpand()) @@ -1614,7 +1610,7 @@ HashTable:: Traits::template NeedsToForbidGCOnMove<>::value>::Move(std::move(entry), *new_entry); idxmap_[new_entry - table_] = key_count_; - idxorder_[key_count_] = new_entry - table_; + idxorder_.push_back(new_entry - table_); key_count_++; return new_entry; } @@ -1640,7 +1636,7 @@ HashTable:: if (idx == -1) return end(); - return MakeKnownGoodIterator(idxorder_ + idx); + return MakeKnownGoodIterator(idxorder_.begin() + idx); } template :: if (idx == -1) return end(); - return MakeKnownGoodConstIterator(idxorder_ + idx); + return MakeKnownGoodConstIterator(idxorder_.begin() + idx); } template numRemoves.fetch_add(1, std::memory_order_relaxed); #endif + idxorder_[idxmap_[pos - table_]] = -1; EnterAccessForbiddenScope(); DeleteBucket(*pos); LeaveAccessForbiddenScope(); @@ -1727,7 +1724,7 @@ template :: erase(iterator it) { - if (it == end()) + if (it == end() || *it.iterator_.position_ == -1) return; erase(&table_[*it.iterator_.position_]); } @@ -1742,7 +1739,7 @@ template :: erase(const_iterator it) { - if (it == end()) + if (it == end() || *it.position_ == -1) return; erase(&table_[*it.position_]); } @@ -1952,11 +1949,10 @@ HashTable:: HashTable new_hash_table(RawStorageTag{}, new_table, new_table_size); Value* new_entry = nullptr; - for (size_t i = 0; i != table_size_; ++i) { - if (IsEmptyOrDeletedBucket(table_[i])) { - DCHECK_NE(&table_[i], entry); + for (auto i : idxorder_) { + // deleted entries show up in the order as -1 + if (i < 0) continue; - } Value* reinserted_entry = new_hash_table.Reinsert(std::move(table_[i])); if (&table_[i] == entry) { DCHECK(!new_entry); @@ -1968,8 +1964,8 @@ HashTable:: ValueType* old_table = table_; auto old_table_size = table_size_; - auto* old_table_order = idxorder_; - auto* old_table_map = idxmap_; + //auto old_table_order = std::move(idxorder_); + //auto old_table_map = std::move(idxmap_); // This swaps the newly allocated buffer with the current one. The store to // the current table has to be atomic to prevent races with concurrent marker. @@ -1981,8 +1977,8 @@ HashTable:: new_hash_table.table_ = old_table; new_hash_table.table_size_ = old_table_size; - new_hash_table.idxorder_ = old_table_order; - new_hash_table.idxmap_ = old_table_map; + //new_hash_table.idxorder_ = std::move(old_table_order); + //new_hash_table.idxmap_ = std::move(old_table_map); // Explicitly clear since garbage collected HashTables don't do this on // destruction. @@ -2059,11 +2055,7 @@ void HashTable(idxmap_); - Allocator::template FreeHashTableBacking(idxorder_); AsAtomicPtr(&table_)->store(nullptr, std::memory_order_relaxed); - AsAtomicPtr(&idxmap_)->store(nullptr, std::memory_order_relaxed); - AsAtomicPtr(&idxorder_)->store(nullptr, std::memory_order_relaxed); table_size_ = 0; key_count_ = 0; } From 517e871c77a1529e5fa08047fc1a5f52bb548cb4 Mon Sep 17 00:00:00 2001 From: Ori Bernstein Date: Wed, 10 May 2023 00:01:18 -0400 Subject: [PATCH 4/9] fix hashset swap --- .../blink/renderer/platform/wtf/hash_table.h | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/third_party/blink/renderer/platform/wtf/hash_table.h b/third_party/blink/renderer/platform/wtf/hash_table.h index 86ad7494b0f7d2..9af25e383b1d4e 100644 --- a/third_party/blink/renderer/platform/wtf/hash_table.h +++ b/third_party/blink/renderer/platform/wtf/hash_table.h @@ -1486,17 +1486,20 @@ HashTable:: UPDATE_PROBE_COUNTS(); i = (i + probe_count) & size_mask; } - idxorder_.push_back(i); - idxmap_[i] = key_count_; RegisterModification(); if (deleted_entry) { DCHECK(can_reuse_deleted_entry); // Overwrite any data left over from last use, using placement new or // memset. + idxmap_[deleted_entry - table_] = idxorder_.size(); + idxorder_.push_back(deleted_entry - table_); ReinitializeBucket(*deleted_entry); entry = deleted_entry; --deleted_count_; + } else { + idxmap_[i] = idxorder_.size(); + idxorder_.push_back(i); } HashTranslator::Translate(*entry, std::forward(key), @@ -1574,8 +1577,8 @@ HashTable:: // doing that in the translator so that they can be easily customized. ConstructTraits::NotifyNewElement(entry); + idxmap_[entry - table_] = idxorder_.size(); idxorder_.push_back(entry - table_); - idxmap_[entry - table_] = key_count_; ++key_count_; if (ShouldExpand()) entry = Expand(entry); @@ -1609,7 +1612,7 @@ HashTable:: Mover::value>::Move(std::move(entry), *new_entry); - idxmap_[new_entry - table_] = key_count_; + idxmap_[new_entry - table_] = idxorder_.size(); idxorder_.push_back(new_entry - table_); key_count_++; return new_entry; @@ -1951,7 +1954,7 @@ HashTable:: Value* new_entry = nullptr; for (auto i : idxorder_) { // deleted entries show up in the order as -1 - if (i < 0) + if (i < 0 || IsEmptyOrDeletedBucket(table_[i])) continue; Value* reinserted_entry = new_hash_table.Reinsert(std::move(table_[i])); if (&table_[i] == entry) { @@ -2056,6 +2059,8 @@ void HashTablestore(nullptr, std::memory_order_relaxed); + idxorder_.clear(); + idxmap_.clear(); table_size_ = 0; key_count_ = 0; } @@ -2154,6 +2159,8 @@ void HashTable Date: Wed, 10 May 2023 11:00:14 -0400 Subject: [PATCH 5/9] compress the ordering vector if it grows too much --- .../blink/renderer/platform/wtf/hash_table.h | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/third_party/blink/renderer/platform/wtf/hash_table.h b/third_party/blink/renderer/platform/wtf/hash_table.h index 9af25e383b1d4e..ef25cafe106cac 100644 --- a/third_party/blink/renderer/platform/wtf/hash_table.h +++ b/third_party/blink/renderer/platform/wtf/hash_table.h @@ -299,8 +299,7 @@ class HashTableConstIterator final { // always going to be a non-empty bucket. while (*position_ == -1) { #if DCHECK_IS_ON() - assert(position_ != begin_position_); - //DCHECK_NE(position_, begin_position_); + DCHECK_NE(position_, begin_position_); #endif --position_; } @@ -370,8 +369,6 @@ class HashTableConstIterator final { GetType operator->() const { return Get(); } const_iterator& operator++() { - assert(position_ != end_position_); - //DCHECK_NE(position_, end_position_); CheckModifications(); ++position_; SkipEmptyBuckets(); @@ -918,6 +915,12 @@ class HashTable final bool ShouldExpand() const { return (key_count_ + deleted_count_) * kMaxLoad >= table_size_; } + // if the same key and value is inserted and deleted, the ordering + // vector can grow without bound; we want to do a no-op resize if + // the key count is significantly smaller than the ordering size + bool ShouldCompress() const { + return key_count_ < kMaxLoad * idxorder_.size(); + } bool MustRehashInPlace() const { return key_count_ * kMinLoad < table_size_ * 2; } @@ -1513,6 +1516,8 @@ HashTable:: if (ShouldExpand()) { entry = Expand(entry); + } else if (ShouldCompress()) { + entry = Rehash(table_size_, entry); } else if (WTF::IsWeak::value && ShouldShrink()) { // When weak hash tables are processed by the garbage collector, // elements with no other strong references to them will have their @@ -1582,6 +1587,8 @@ HashTable:: ++key_count_; if (ShouldExpand()) entry = Expand(entry); + else if (ShouldCompress()) + entry = Rehash(table_size_, entry); return AddResult(this, entry, true); } From fb321953fcfbd01e086582c95a24436dc8a9fedf Mon Sep 17 00:00:00 2001 From: Ori Bernstein Date: Wed, 10 May 2023 11:22:55 -0400 Subject: [PATCH 6/9] revert test changes --- .../partition_alloc_hooks.cc | 6 +- .../shim/allocator_shim.cc | 6 +- base/functional/bind_internal.h | 4 +- base/process/process_posix.cc | 2 +- base/record_replay.cc | 142 ++++++++++-------- mojo/core/ports/port.cc | 4 +- mojo/core/ports/port_locker.cc | 8 + 7 files changed, 100 insertions(+), 72 deletions(-) diff --git a/base/allocator/partition_allocator/partition_alloc_hooks.cc b/base/allocator/partition_allocator/partition_alloc_hooks.cc index 6880e277334442..7fdd6dbcda5d5f 100644 --- a/base/allocator/partition_allocator/partition_alloc_hooks.cc +++ b/base/allocator/partition_allocator/partition_alloc_hooks.cc @@ -54,10 +54,10 @@ void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook, void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook, FreeOverrideHook* free_hook, ReallocOverrideHook realloc_hook) { - //if (recordreplay::IsRecordingOrReplaying()) { + if (recordreplay::IsRecordingOrReplaying()) { // Always use the default allocators when recording/replaying. - // return; - //} + return; + } internal::ScopedGuard guard(GetHooksLock()); diff --git a/base/allocator/partition_allocator/shim/allocator_shim.cc b/base/allocator/partition_allocator/shim/allocator_shim.cc index 0d8f50e9e03f74..e0aea9e959c4d3 100644 --- a/base/allocator/partition_allocator/shim/allocator_shim.cc +++ b/base/allocator/partition_allocator/shim/allocator_shim.cc @@ -385,9 +385,9 @@ ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) { #include "base/allocator/partition_allocator/shim/allocator_shim_override_glibc_weak_symbols.h" #endif -//static inline bool MaybeRecordingOrReplaying() { -// return true; -//} +static inline bool MaybeRecordingOrReplaying() { + return true; +} #if BUILDFLAG(IS_APPLE) namespace allocator_shim { diff --git a/base/functional/bind_internal.h b/base/functional/bind_internal.h index b82c015ea75ac1..c7cf022773f97a 100644 --- a/base/functional/bind_internal.h +++ b/base/functional/bind_internal.h @@ -1581,7 +1581,7 @@ struct CallbackCancellationTraits { static constexpr bool is_cancellable = false; }; -//extern uintptr_t CallbackRecordReplayValue(const char* why, uintptr_t value); +extern uintptr_t CallbackRecordReplayValue(const char* why, uintptr_t value); // Specialization for method bound to weak pointer receiver. template @@ -1600,7 +1600,7 @@ struct CallbackCancellationTraits< // Weak pointers can be cleared non-deterministically when recording/replaying, // so record/replay whether they are present so that callers checking the status // like TaskQueueImpl behave consistently. - return 0; //CallbackRecordReplayValue("WeakMethodIsCancelled", !receiver); + return CallbackRecordReplayValue("WeakMethodIsCancelled", !receiver); } template diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc index d1d9fe1c19790a..4166e5a4c8f46a 100644 --- a/base/process/process_posix.cc +++ b/base/process/process_posix.cc @@ -277,7 +277,7 @@ void Process::TerminateCurrentProcessImmediately(int exit_code) { #if BUILDFLAG(CLANG_PROFILING) WriteClangProfilingProfile(); #endif - //V8RecordReplayFinishRecording(); + V8RecordReplayFinishRecording(); _exit(exit_code); } diff --git a/base/record_replay.cc b/base/record_replay.cc index 274e7e2f905ba1..f9986614e6407f 100644 --- a/base/record_replay.cc +++ b/base/record_replay.cc @@ -161,20 +161,19 @@ ForEachV8APIVoid(DefineFunctionVoid) #endif // !BUILD_FLAG(IS_WIN) bool IsRecordingOrReplaying(const char* feature) { - return false; //V8IsRecordingOrReplaying(feature); + return V8IsRecordingOrReplaying(feature); } bool IsRecording() { - return false; //V8IsRecording(); + return V8IsRecording(); } bool IsReplaying() { - return false; //V8IsReplaying(); + return V8IsReplaying(); } char* GetRecordingId() { - static char ret[1] = ""; - return ret; //V8GetRecordingId(); + return V8GetRecordingId(); } bool HadMismatch() { @@ -185,7 +184,7 @@ void Assert(const char* format, ...) { #ifndef NACL_TC_REV va_list ap; va_start(ap, format); - //V8RecordReplayAssertVA(format, ap); + V8RecordReplayAssertVA(format, ap); va_end(ap); #endif } @@ -194,7 +193,7 @@ void Diagnostic(const char* format, ...) { #ifndef NACL_TC_REV va_list ap; va_start(ap, format); - //V8RecordReplayDiagnosticVA(format, ap); + V8RecordReplayDiagnosticVA(format, ap); va_end(ap); #endif } @@ -203,151 +202,151 @@ void Warning(const char* format, ...) { #ifndef NACL_TC_REV va_list ap; va_start(ap, format); - //V8RecordReplayWarning(format, ap); + V8RecordReplayWarning(format, ap); va_end(ap); #endif } void AssertBytes(const char* why, const void* buf, size_t size) { - //V8RecordReplayAssertBytes(why, buf, size); + V8RecordReplayAssertBytes(why, buf, size); } void Print(const char* format, ...) { #ifndef NACL_TC_REV va_list ap; va_start(ap, format); - //V8RecordReplayPrintVA(format, ap); + V8RecordReplayPrintVA(format, ap); va_end(ap); #endif } uintptr_t RecordReplayValue(const char* why, uintptr_t v) { - return 0; //V8RecordReplayValue(why, v); + return V8RecordReplayValue(why, v); } void RecordReplayBytes(const char* why, void* buf, size_t size) { - //V8RecordReplayBytes(why, buf, size); + V8RecordReplayBytes(why, buf, size); } int CreateOrderedLock(const char* name) { - return 0; //(int)V8RecordReplayCreateOrderedLock(name); + return (int)V8RecordReplayCreateOrderedLock(name); } void OrderedLock(int lock) { - //V8RecordReplayOrderedLock(lock); + V8RecordReplayOrderedLock(lock); } void OrderedUnlock(int lock) { - //V8RecordReplayOrderedUnlock(lock); + V8RecordReplayOrderedUnlock(lock); } void NewCheckpoint() { - //V8RecordReplayNewCheckpoint(); + V8RecordReplayNewCheckpoint(); } uint64_t NewBookmark() { - return 0; //V8RecordReplayNewBookmark(); + return V8RecordReplayNewBookmark(); } void OnAnnotation(const char* kind, const char* contents) { - //V8RecordReplayOnAnnotation(kind, contents); + V8RecordReplayOnAnnotation(kind, contents); } void OnNetworkRequest(const char* id, const char* kind, uint64_t bookmark) { - //V8RecordReplayOnNetworkRequest(id, kind, bookmark); + V8RecordReplayOnNetworkRequest(id, kind, bookmark); } void OnNetworkRequestEvent(const char* id) { - //V8RecordReplayOnNetworkRequestEvent(id); + V8RecordReplayOnNetworkRequestEvent(id); } void OnNetworkStreamStart(const char* id, const char* kind, const char* parentId) { - //V8RecordReplayOnNetworkStreamStart(id, kind, parentId); + V8RecordReplayOnNetworkStreamStart(id, kind, parentId); } void OnNetworkStreamData(const char* id, size_t offset, size_t length, uint64_t bookmark) { - //V8RecordReplayOnNetworkStreamData(id, offset, length, bookmark); + V8RecordReplayOnNetworkStreamData(id, offset, length, bookmark); } void OnNetworkStreamEnd(const char* id, size_t length) { - //V8RecordReplayOnNetworkStreamEnd(id, length); + V8RecordReplayOnNetworkStreamEnd(id, length); } bool AreEventsDisallowed() { - return false; //V8RecordReplayAreEventsDisallowed(); + return V8RecordReplayAreEventsDisallowed(); } void BeginDisallowEvents() { - //V8RecordReplayBeginDisallowEvents(); + V8RecordReplayBeginDisallowEvents(); } void BeginDisallowEventsWithLabel(const char* label) { - //iV8RecordReplayBeginDisallowEventsWithLabel(label); + V8RecordReplayBeginDisallowEventsWithLabel(label); } void EndDisallowEvents() { - //V8RecordReplayEndDisallowEvents(); + V8RecordReplayEndDisallowEvents(); } bool AreEventsPassedThrough() { - return 0; //V8RecordReplayAreEventsPassedThrough(); + return V8RecordReplayAreEventsPassedThrough(); } void BeginPassThroughEvents() { - //V8RecordReplayBeginPassThroughEvents(); + V8RecordReplayBeginPassThroughEvents(); } void EndPassThroughEvents() { - //V8RecordReplayEndPassThroughEvents(); + V8RecordReplayEndPassThroughEvents(); } bool FeatureEnabled(const char* feature) { - return false;//V8RecordReplayFeatureEnabled(feature); + return V8RecordReplayFeatureEnabled(feature); } void BrowserEvent(const char* name, const base::DictionaryValue& info) { - //std::string json; - //base::JSONWriter::Write(info, &json); - //V8RecordReplayBrowserEvent(name, json.c_str()); + std::string json; + base::JSONWriter::Write(info, &json); + V8RecordReplayBrowserEvent(name, json.c_str()); } bool HasDivergedFromRecording() { - return false;//V8RecordReplayHasDivergedFromRecording(); + return V8RecordReplayHasDivergedFromRecording(); } bool AllowSideEffects() { - return false; //V8RecordReplayAllowSideEffects(); + return V8RecordReplayAllowSideEffects(); } void RegisterPointer(const char* name, const void* ptr) { - //V8RecordReplayRegisterPointer(name, ptr); + V8RecordReplayRegisterPointer(name, ptr); } void UnregisterPointer(const void* ptr) { - //V8RecordReplayUnregisterPointer(ptr); + V8RecordReplayUnregisterPointer(ptr); } int PointerId(const void* ptr) { - return 0; //V8RecordReplayPointerId(ptr); + return V8RecordReplayPointerId(ptr); } void* IdPointer(int id) { - return nullptr; //V8RecordReplayIdPointer(id); + return V8RecordReplayIdPointer(id); } void OnEvent(const char* aEvent, bool aBefore) { - //V8RecordReplayOnEvent(aEvent, aBefore); + V8RecordReplayOnEvent(aEvent, aBefore); } void OnMouseEvent(const char* kind, size_t clientX, size_t clientY) { - //V8RecordReplayOnMouseEvent(kind, clientX, clientY); + V8RecordReplayOnMouseEvent(kind, clientX, clientY); } void OnKeyEvent(const char* kind, const char* key) { - //V8RecordReplayOnKeyEvent(kind, key); + V8RecordReplayOnKeyEvent(kind, key); } void OnNavigationEvent(const char* kind, const char* url) { - //V8RecordReplayOnNavigationEvent(kind, url); + V8RecordReplayOnNavigationEvent(kind, url); } AutoLockMaybeEventsDisallowed::AutoLockMaybeEventsDisallowed( @@ -379,29 +378,50 @@ AutoUnlockMaybeEventsDisallowed::~AutoUnlockMaybeEventsDisallowed() { } bool IsMainThread() { - return false; //V8IsMainThread(); + return V8IsMainThread(); +} + +static int gNextMainThreadId = 1; + +static bool CheckNewId(const char* name) { + if (!IsRecordingOrReplaying()) { + // Don't track anything. + return false; + } + if (HasDivergedFromRecording()) { + // Everything is allowed when explicitly diverged. + return true; + } + if (AreEventsDisallowed()) { + // IDs can be created when events are disallowed when our own scripts + // create URL objects. This would be nice to improve. + if (!IsInReplayCode()) { + Warning("NewId when not allowed %s", name); + } + return false; + } + Assert("NewId %s", name); + return true; } -//static int gNextMainThreadId = 1; int NewIdMainThread(const char* name) { -// if (IsRecordingOrReplaying()) { -// if (!V8IsMainThread()) { -// fprintf(stderr, "NewIdMainThread not main thread: %s\n", name); -// CHECK(V8IsMainThread()); -// } -// Assert("NewId %s", name); -// return gNextMainThreadId++; -// } - return 0; + if (!CheckNewId(name)) { + return 0; + } + if (!V8IsMainThread()) { + fprintf(stderr, "NewIdMainThread not main thread: %s\n", name); + CHECK(V8IsMainThread()); + } + return gNextMainThreadId++; } -//static std::atomic gNextAnyThreadId{1}; +static std::atomic gNextAnyThreadId{1}; int NewIdAnyThread(const char* name) { - //if (!CheckNewId(name)) { + if (!CheckNewId(name)) { return 0; - //} - //return (int)RecordReplayValue("NewId", (uintptr_t)gNextAnyThreadId++); + } + return (int)RecordReplayValue("NewId", (uintptr_t)gNextAnyThreadId++); } bool IsInReplayCode() { @@ -421,7 +441,7 @@ void RecordReplayString(const char* why, std::string& str) { } void AddOrderedSRWLock(const char* name, void* lock) { - //V8RecordReplayAddOrderedSRWLock(name, lock); + V8RecordReplayAddOrderedSRWLock(name, lock); } void RemoveOrderedSRWLock(void* lock) { @@ -429,7 +449,7 @@ void RemoveOrderedSRWLock(void* lock) { } void MaybeTerminate(void (*callback)(void*), void* data) { - //V8RecordReplayMaybeTerminate(callback, data); + V8RecordReplayMaybeTerminate(callback, data); } } // namespace recordreplay diff --git a/mojo/core/ports/port.cc b/mojo/core/ports/port.cc index 879f330b172cba..8dd6b450a94df0 100644 --- a/mojo/core/ports/port.cc +++ b/mojo/core/ports/port.cc @@ -34,11 +34,11 @@ Port::Port(uint64_t next_sequence_num_to_send, peer_lost_unexpectedly(false), lock_("Port.lock_") { // Registering new ports is needed for sorting, see port_locker.cc - //recordreplay::RegisterPointer("Port", this); + recordreplay::RegisterPointer("Port", this); } Port::~Port() { - //recordreplay::UnregisterPointer(this); + recordreplay::UnregisterPointer(this); } bool Port::IsNextEvent(const NodeName& from_node, const Event& event) { diff --git a/mojo/core/ports/port_locker.cc b/mojo/core/ports/port_locker.cc index ab975516ff1911..7bcb9b179fb552 100644 --- a/mojo/core/ports/port_locker.cc +++ b/mojo/core/ports/port_locker.cc @@ -32,7 +32,15 @@ void UpdateTLS(PortLocker* old_locker, PortLocker* new_locker) { } // namespace static uintptr_t GetPortId(Port* port) { + // When recording/replaying the sorted order of ports need to be consistent, + // so we use the ID associated with the port via RegisterPointer for sorting. + if (recordreplay::IsRecordingOrReplaying("pointer-ids")) { + uintptr_t id = recordreplay::PointerId(port); + CHECK(id); + return id; + } else { return (uintptr_t)port; + } } PortLocker::PortLocker(const PortRef** port_refs, size_t num_ports) From 39e0b2de4969910dd2b4f7265577bbe5ba1ce530 Mon Sep 17 00:00:00 2001 From: Ori Bernstein Date: Wed, 10 May 2023 11:35:08 -0400 Subject: [PATCH 7/9] missed revert --- third_party/blink/renderer/platform/wtf/BUILD.gn | 4 ---- 1 file changed, 4 deletions(-) diff --git a/third_party/blink/renderer/platform/wtf/BUILD.gn b/third_party/blink/renderer/platform/wtf/BUILD.gn index 1f14c5e02f337f..2525817a163ddf 100644 --- a/third_party/blink/renderer/platform/wtf/BUILD.gn +++ b/third_party/blink/renderer/platform/wtf/BUILD.gn @@ -18,7 +18,6 @@ visibility = [ ] config("wtf_config") { - cflags = ["-g", "-O0"] if (is_win) { cflags = [ # Don't complain about calling specific versions of templatized @@ -243,7 +242,6 @@ component("wtf") { sources += [ "text/ascii_fast_path.cc" ] } - cflags = ["-g", "-O0"] if (is_win) { cflags = [ "/wd4068" ] # Unknown pragma. @@ -281,13 +279,11 @@ component("wtf") { test("wtf_unittests") { deps = [ ":wtf_unittests_sources" ] - cflags = ["-g", "-O0"] } source_set("wtf_unittests_sources") { visibility = [] # Allow re-assignment of list. visibility = [ "*" ] - cflags = ["-g", "-O0"] testonly = true sources = [ From c3224279bb6dacadc81e9f50cab5a60b8ca3ff43 Mon Sep 17 00:00:00 2001 From: Ori Bernstein Date: Thu, 11 May 2023 12:32:56 -0400 Subject: [PATCH 8/9] revert to gc'ed arrays --- .../blink/renderer/platform/wtf/BUILD.gn | 2 + .../blink/renderer/platform/wtf/hash_table.h | 128 ++++++++++-------- tools/v8_context_snapshot/BUILD.gn | 1 + 3 files changed, 77 insertions(+), 54 deletions(-) diff --git a/third_party/blink/renderer/platform/wtf/BUILD.gn b/third_party/blink/renderer/platform/wtf/BUILD.gn index 2525817a163ddf..76d5af9cbfc61c 100644 --- a/third_party/blink/renderer/platform/wtf/BUILD.gn +++ b/third_party/blink/renderer/platform/wtf/BUILD.gn @@ -18,6 +18,7 @@ visibility = [ ] config("wtf_config") { + cflags = ["-g", "-O0"] if (is_win) { cflags = [ # Don't complain about calling specific versions of templatized @@ -340,6 +341,7 @@ source_set("wtf_unittests_sources") { "vector_test.cc", ] + cflags = ["-g", "-O0"] if (is_win) { cflags = [ "/wd4068" ] # Unknown pragma. } diff --git a/third_party/blink/renderer/platform/wtf/hash_table.h b/third_party/blink/renderer/platform/wtf/hash_table.h index ef25cafe106cac..3f95e71d179c85 100644 --- a/third_party/blink/renderer/platform/wtf/hash_table.h +++ b/third_party/blink/renderer/platform/wtf/hash_table.h @@ -305,10 +305,10 @@ class HashTableConstIterator final { } } - HashTableConstIterator(std::vector::const_iterator position, + HashTableConstIterator(ssize_t* position, const Value* table, - std::vector::const_iterator begin_position, - std::vector::const_iterator end_position, + ssize_t* begin_position, + ssize_t* end_position, const HashTableType* container) : position_(position), table_(table), @@ -323,10 +323,10 @@ class HashTableConstIterator final { SkipEmptyBuckets(); } - HashTableConstIterator(std::vector::const_iterator position, + HashTableConstIterator(ssize_t* position, const Value* table, - std::vector::const_iterator begin_position, - std::vector::const_iterator end_position, + ssize_t* begin_position, + ssize_t* end_position, const HashTableType* container, HashItemKnownGoodTag) : position_(position), @@ -413,12 +413,12 @@ class HashTableConstIterator final { } private: - std::vector::const_iterator position_; + ssize_t* position_; const Value* table_; - std::vector::const_iterator end_position_; + ssize_t* end_position_; #if DCHECK_IS_ON() const HashTableType* container_; - std::vector::const_iterator begin_position_; + ssize_t* begin_position_; //PointerType begin_position_; int64_t container_modifications_; #endif @@ -489,16 +489,16 @@ class HashTableIterator final { KeyTraits, Allocator>; - HashTableIterator(std::vector::iterator pos, + HashTableIterator(ssize_t* pos, Value* table, - std::vector::iterator begin, - std::vector::iterator end, + ssize_t* begin, + ssize_t* end, const HashTableType* container) : iterator_(pos, table, begin, end, container) {} - HashTableIterator(std::vector::iterator pos, + HashTableIterator(ssize_t* pos, Value* table, - std::vector::iterator begin, - std::vector::iterator end, + ssize_t* begin, + ssize_t* end, const HashTableType* container, HashItemKnownGoodTag tag) : iterator_(pos, table, begin, end, container, tag) {} @@ -776,13 +776,13 @@ class HashTable final // for begin. This is more efficient because we don't have to skip all the // empty and deleted buckets, and iterating an empty table is a common case // that's worth optimizing. - iterator begin() { return empty() ? end() : MakeIterator(idxorder_.begin()); } - iterator end() { return MakeKnownGoodIterator(idxorder_.end()); } + iterator begin() { return empty() ? end() : MakeIterator(idxorder_); } + iterator end() { return MakeKnownGoodIterator(idxorder_ + idxorder_count_); } const_iterator begin() const { - return empty() ? end() : MakeConstIterator(idxorder_.cbegin()); + return empty() ? end() : MakeConstIterator(idxorder_); } const_iterator end() const { - return MakeKnownGoodConstIterator(idxorder_.cend()); + return MakeKnownGoodConstIterator(idxorder_ + idxorder_count_); } unsigned size() const { @@ -918,8 +918,11 @@ class HashTable final // if the same key and value is inserted and deleted, the ordering // vector can grow without bound; we want to do a no-op resize if // the key count is significantly smaller than the ordering size + // + // We need to compress to make sure that the count does not grow + // past the size of the backing array. bool ShouldCompress() const { - return key_count_ < kMaxLoad * idxorder_.size(); + return idxorder_count_ == table_size_ - 1; } bool MustRehashInPlace() const { return key_count_ * kMinLoad < table_size_ * 2; @@ -955,33 +958,33 @@ class HashTable final return FullLookupType(LookupType(position, found), hash); } - iterator MakeIterator(std::vector::iterator pos) { + iterator MakeIterator(ssize_t* pos) { return iterator(pos, table_, - idxorder_.begin(), - idxorder_.end(), + idxorder_, + idxorder_ + idxorder_count_, this); } - const_iterator MakeConstIterator(std::vector::const_iterator pos) const { + const_iterator MakeConstIterator(ssize_t* pos) const { return const_iterator(pos, table_, - idxorder_.cbegin(), - idxorder_.cend(), + idxorder_, + idxorder_ + idxorder_count_, this); } - iterator MakeKnownGoodIterator(std::vector::iterator pos) { + iterator MakeKnownGoodIterator(ssize_t* pos) { return iterator(pos, table_, - idxorder_.begin(), - idxorder_.end(), + idxorder_, + idxorder_ + idxorder_count_, this, kHashItemKnownGood); } - const_iterator MakeKnownGoodConstIterator(std::vector::const_iterator pos) const { + const_iterator MakeKnownGoodConstIterator(ssize_t* pos) const { return const_iterator(pos, table_, - idxorder_.cbegin(), - idxorder_.cend(), + idxorder_, + idxorder_ + idxorder_count_, this, kHashItemKnownGood); } @@ -1003,8 +1006,7 @@ class HashTable final struct RawStorageTag {}; HashTable(RawStorageTag, ValueType* table, unsigned size) : table_(table), - idxmap_(size, -1), - idxorder_(), + idxorder_count_(0), table_size_(size), key_count_(0), deleted_count_(0), @@ -1015,13 +1017,18 @@ class HashTable final modifications_(0) #endif { + idxmap_ = Allocator::template AllocateVectorBacking(size*sizeof(ssize_t)); + idxorder_ = Allocator::template AllocateVectorBacking(size*sizeof(ssize_t)); + for(size_t i = 0; i < size; i++) + idxmap_[i] = -1; } public: ValueType* table_; private: - std::vector idxmap_; - std::vector idxorder_; + ssize_t* idxmap_; + ssize_t* idxorder_; + size_t idxorder_count_; unsigned table_size_; unsigned key_count_; @@ -1074,6 +1081,9 @@ inline HashTable::HashTable() : table_(nullptr), + idxmap_(nullptr), + idxorder_(nullptr), + idxorder_count_(0), table_size_(0), key_count_(0), deleted_count_(0), @@ -1495,14 +1505,14 @@ HashTable:: DCHECK(can_reuse_deleted_entry); // Overwrite any data left over from last use, using placement new or // memset. - idxmap_[deleted_entry - table_] = idxorder_.size(); - idxorder_.push_back(deleted_entry - table_); + idxmap_[deleted_entry - table_] = idxorder_count_; + idxorder_[idxorder_count_++] = deleted_entry - table_; ReinitializeBucket(*deleted_entry); entry = deleted_entry; --deleted_count_; } else { - idxmap_[i] = idxorder_.size(); - idxorder_.push_back(i); + idxmap_[i] = idxorder_count_; + idxorder_[idxorder_count_++] = i; } HashTranslator::Translate(*entry, std::forward(key), @@ -1582,8 +1592,8 @@ HashTable:: // doing that in the translator so that they can be easily customized. ConstructTraits::NotifyNewElement(entry); - idxmap_[entry - table_] = idxorder_.size(); - idxorder_.push_back(entry - table_); + idxmap_[entry - table_] = idxorder_count_; + idxorder_[idxorder_count_++] = entry - table_; ++key_count_; if (ShouldExpand()) entry = Expand(entry); @@ -1619,8 +1629,8 @@ HashTable:: Mover::value>::Move(std::move(entry), *new_entry); - idxmap_[new_entry - table_] = idxorder_.size(); - idxorder_.push_back(new_entry - table_); + idxmap_[new_entry - table_] = idxorder_count_; + idxorder_[idxorder_count_++] = new_entry - table_; key_count_++; return new_entry; } @@ -1646,7 +1656,7 @@ HashTable:: if (idx == -1) return end(); - return MakeKnownGoodIterator(idxorder_.begin() + idx); + return MakeKnownGoodIterator(idxorder_ + idx); } template :: if (idx == -1) return end(); - return MakeKnownGoodConstIterator(idxorder_.begin() + idx); + return MakeKnownGoodConstIterator(idxorder_ + idx); } template :: HashTable new_hash_table(RawStorageTag{}, new_table, new_table_size); Value* new_entry = nullptr; - for (auto i : idxorder_) { + for (size_t idx = 0; idx < idxorder_count_; idx++) { // deleted entries show up in the order as -1 + size_t i = idxorder_[idx]; if (i < 0 || IsEmptyOrDeletedBucket(table_[i])) continue; Value* reinserted_entry = new_hash_table.Reinsert(std::move(table_[i])); @@ -1973,9 +1984,9 @@ HashTable:: Allocator::TraceBackingStoreIfMarked(new_hash_table.table_); ValueType* old_table = table_; - auto old_table_size = table_size_; - //auto old_table_order = std::move(idxorder_); - //auto old_table_map = std::move(idxmap_); + unsigned old_table_size = table_size_; + ssize_t* old_table_order = idxorder_; + ssize_t* old_table_map = idxmap_; // This swaps the newly allocated buffer with the current one. The store to // the current table has to be atomic to prevent races with concurrent marker. @@ -1987,8 +1998,8 @@ HashTable:: new_hash_table.table_ = old_table; new_hash_table.table_size_ = old_table_size; - //new_hash_table.idxorder_ = std::move(old_table_order); - //new_hash_table.idxmap_ = std::move(old_table_map); + new_hash_table.idxorder_ = old_table_order; + new_hash_table.idxmap_ = old_table_map; // Explicitly clear since garbage collected HashTables don't do this on // destruction. @@ -2064,10 +2075,13 @@ void HashTablestore(nullptr, std::memory_order_relaxed); - idxorder_.clear(); - idxmap_.clear(); + AsAtomicPtr(&idxmap_)->store(nullptr, std::memory_order_relaxed); + AsAtomicPtr(&idxorder_)->store(nullptr, std::memory_order_relaxed); + idxorder_count_ = 0; table_size_ = 0; key_count_ = 0; } @@ -2082,6 +2096,9 @@ template :: HashTable(const HashTable& other) : table_(nullptr), + idxmap_(nullptr), + idxorder_(nullptr), + idxorder_count_(0), table_size_(0), key_count_(0), deleted_count_(0), @@ -2115,6 +2132,9 @@ template :: HashTable(HashTable&& other) : table_(nullptr), + idxmap_(nullptr), + idxorder_(nullptr), + idxorder_count_(0), table_size_(0), key_count_(0), deleted_count_(0), diff --git a/tools/v8_context_snapshot/BUILD.gn b/tools/v8_context_snapshot/BUILD.gn index 7b99dfe1b61d8c..209e00133ea783 100644 --- a/tools/v8_context_snapshot/BUILD.gn +++ b/tools/v8_context_snapshot/BUILD.gn @@ -102,6 +102,7 @@ if (use_v8_context_snapshot) { executable("v8_context_snapshot_generator") { sources = [ "v8_context_snapshot_generator.cc", "//components/viz/service/display/record_replay_render_nop.cc" ] + cflags=["-g"] deps = [ "//gin:gin", From d002b51f09c2b5df378bfb4eda1051f3aa39fc5e Mon Sep 17 00:00:00 2001 From: Ori Bernstein Date: Mon, 15 May 2023 16:08:37 -0400 Subject: [PATCH 9/9] fix some missing count copies --- third_party/blink/renderer/platform/wtf/hash_table.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/third_party/blink/renderer/platform/wtf/hash_table.h b/third_party/blink/renderer/platform/wtf/hash_table.h index 3f95e71d179c85..7228f85c53c7b4 100644 --- a/third_party/blink/renderer/platform/wtf/hash_table.h +++ b/third_party/blink/renderer/platform/wtf/hash_table.h @@ -922,7 +922,7 @@ class HashTable final // We need to compress to make sure that the count does not grow // past the size of the backing array. bool ShouldCompress() const { - return idxorder_count_ == table_size_ - 1; + return idxorder_count_ >= table_size_ - 1; } bool MustRehashInPlace() const { return key_count_ * kMinLoad < table_size_ * 2; @@ -1971,7 +1971,7 @@ HashTable:: Value* new_entry = nullptr; for (size_t idx = 0; idx < idxorder_count_; idx++) { // deleted entries show up in the order as -1 - size_t i = idxorder_[idx]; + ssize_t i = idxorder_[idx]; if (i < 0 || IsEmptyOrDeletedBucket(table_[i])) continue; Value* reinserted_entry = new_hash_table.Reinsert(std::move(table_[i])); @@ -1994,6 +1994,7 @@ HashTable:: Allocator::template BackingWriteBarrier(&table_); idxmap_ = new_hash_table.idxmap_; idxorder_ = new_hash_table.idxorder_; + idxorder_count_ = new_hash_table.idxorder_count_; table_size_ = new_table_size; new_hash_table.table_ = old_table; @@ -2187,6 +2188,7 @@ void HashTable