26 if (thread_list ==
nullptr) {
30 bool expected =
false;
34 if (curr->
active.compare_exchange_strong(expected,
true, std::memory_order_acq_rel,
35 std::memory_order_relaxed)) {
44 if (thread_list ==
nullptr) {
52 thread_list->
next = old_head;
53 }
while (!
head_.compare_exchange_weak(old_head, thread_list, std::memory_order_acq_rel,
54 std::memory_order_relaxed));
60 static thread_local struct thread_cleanup {
65 hazard_pointer_registry::instance().mark_inactive();
68 } cleanup{thread_list};
80 for (
auto& h : thread_list->
hazards) {
81 h.store(
nullptr, std::memory_order_release);
85 thread_list->
active.store(
false, std::memory_order_release);
92 std::vector<void*> protected_ptrs;
93 protected_ptrs.reserve(256);
96 const void* SLOT_OWNED_MARKER =
reinterpret_cast<void*
>(0x1);
100 static thread_local size_t scan_counter = 0;
101 static constexpr size_t CLEANUP_INTERVAL = 100;
102 bool should_cleanup = (++scan_counter % CLEANUP_INTERVAL == 0);
106 size_t inactive_count = 0;
115 for (
auto& hazard : curr->
hazards) {
116 void* ptr = hazard.load(std::memory_order_acquire);
118 if (ptr !=
nullptr && ptr != SLOT_OWNED_MARKER) {
119 protected_ptrs.push_back(ptr);
123 if (!curr->
active.load(std::memory_order_acquire)) {
132 std::sort(protected_ptrs.begin(), protected_ptrs.end());
136 protected_ptrs.erase(std::unique(protected_ptrs.begin(), protected_ptrs.end()),
137 protected_ptrs.end());
139 return protected_ptrs;
168 tail->
next = old_head;
169 }
while (!
head_.compare_exchange_weak(old_head, head, std::memory_order_acq_rel,
170 std::memory_order_relaxed));
172 count_.fetch_add(count, std::memory_order_relaxed);
183 size_t reclaimed = 0;
186 size_t keep_count = 0;
190 bool is_protected =
false;
194 if (std::binary_search(protected_ptrs.begin(), protected_ptrs.end(), curr->
ptr)) {
209 keep_tail->
next = curr;
212 curr->
next =
nullptr;
223 count_.fetch_sub(reclaimed + keep_count, std::memory_order_relaxed);
234 return count_.load(std::memory_order_relaxed);
246 void* expected =
nullptr;
250 if (thread_list->hazards[i].compare_exchange_strong(
252 std::memory_order_relaxed)) {
253 slot_ = &thread_list->hazards[i];
260 throw std::runtime_error(
"Hazard pointer slots exhausted");
264 : slot_(other.slot_), slot_index_(other.slot_index_) {
265 other.slot_ =
nullptr;
266 other.slot_index_ = 0;
270 if (
this != &other) {
274 slot_index_ = other.slot_index_;
276 other.slot_ =
nullptr;
277 other.slot_index_ = 0;
285 slot_->store(
nullptr, std::memory_order_release);
300 void* ptr =
slot_->load(std::memory_order_acquire);
308 void* ptr =
slot_->load(std::memory_order_acquire);
Global manager for orphaned nodes from terminated threads.
std::atomic< size_t > count_
std::atomic< retire_node * > head_
void add_orphaned_nodes(retire_node *head, size_t count)
Add a list of retired nodes to the global orphanage.
size_t get_orphaned_count() const
Get statistics.
static global_reclamation_manager & instance()
size_t reclaim(const std::vector< void * > &protected_ptrs)
Reclaim orphaned nodes that are no longer protected.
Global hazard pointer registry Manages all thread-local hazard lists.
static hazard_pointer_registry & instance()
std::atomic< thread_hazard_list * > head_
std::vector< void * > scan_hazard_pointers()
Scan all hazard pointers and collect protected pointers.
thread_hazard_list * get_thread_list()
Get or create thread-local hazard list.
std::atomic< size_t > thread_count_
void mark_inactive()
Mark current thread's list as inactive.
size_t get_active_thread_count() const
Get total number of active threads.
Single hazard pointer that protects one object from reclamation Uses RAII pattern - automatically rel...
void reset() noexcept
Release protection.
~hazard_pointer()
Destructor - automatically releases protection.
static const void * SLOT_OWNED_MARKER
void * get_protected() const noexcept
Get the protected pointer (may be null)
bool is_protected() const noexcept
Check if currently protecting a pointer.
hazard_pointer()
Default constructor - acquires a hazard pointer slot.
hazard_pointer & operator=(hazard_pointer &&other) noexcept
Move assignment.
std::atomic< void * > * slot_
Hazard pointer implementation for lock-free memory reclamation.
Core threading foundation of the thread system library.
Retire node for pending deletion.
std::function< void(void *)> deleter
Thread-local hazard pointer list Each thread maintains a small array of hazard pointers.
static constexpr size_t MAX_HAZARDS_PER_THREAD
std::atomic< bool > active
std::atomic< void * > hazards[MAX_HAZARDS_PER_THREAD]
thread_hazard_list * next