26class hazard_pointer_domain;
41 h.store(
nullptr, std::memory_order_relaxed);
77 std::atomic<thread_hazard_list*>
head_{
nullptr};
94 size_t reclaim(
const std::vector<void*>& protected_ptrs);
102 std::atomic<retire_node*>
head_{
nullptr};
136 template <
typename T>
139 slot_->store(
static_cast<void*
>(ptr), std::memory_order_seq_cst);
146 void reset() noexcept;
196 stats get_stats()
const;
213 static constexpr size_t BASE_RECLAIM_THRESHOLD = 64;
214 static constexpr size_t RECLAIM_THRESHOLD_PER_THREAD = 16;
217 auto& registry = detail::hazard_pointer_registry::instance();
218 size_t active_threads = registry.get_active_thread_count();
220 return std::min(
size_t(512),
221 BASE_RECLAIM_THRESHOLD + active_threads * RECLAIM_THRESHOLD_PER_THREAD);
225 size_t scan_and_reclaim(
const std::vector<void*>& protected_ptrs);
236 mutable std::atomic<size_t> objects_retired_{0};
237 mutable std::atomic<size_t> objects_reclaimed_{0};
238 mutable std::atomic<size_t> scan_count_{0};
249 auto& retire_list = get_thread_retire_list();
250 retire_list.add(ptr);
251 objects_retired_.fetch_add(1, std::memory_order_relaxed);
255 if (retire_list.count >= retire_list.get_adaptive_threshold()) {
262 scan_count_.fetch_add(1, std::memory_order_relaxed);
266 auto protected_ptrs = registry.scan_hazard_pointers();
269 auto& retire_list = get_thread_retire_list();
270 size_t reclaimed = retire_list.scan_and_reclaim(protected_ptrs);
276 objects_reclaimed_.fetch_add(reclaimed, std::memory_order_relaxed);
284 return stats{.hazard_pointers_allocated = registry.get_active_thread_count() *
286 .objects_retired = objects_retired_.load(std::memory_order_relaxed),
287 .objects_reclaimed = objects_reclaimed_.load(std::memory_order_relaxed),
288 .scan_count = scan_count_.load(std::memory_order_relaxed)};
294 auto& retire_list = get_thread_retire_list();
295 retire_list.reclaim_all();
301 [](
void* p) {
delete static_cast<T*
>(p); });
310 const std::vector<void*>& protected_ptrs) {
311 size_t reclaimed = 0;
318 if (std::binary_search(protected_ptrs.begin(), protected_ptrs.end(), (*curr)->ptr)) {
325 *curr = (*curr)->
next;
334 curr = &(*curr)->
next;
345 auto protected_ptrs = registry.scan_hazard_pointers();
346 scan_and_reclaim(protected_ptrs);
Global manager for orphaned nodes from terminated threads.
std::atomic< size_t > count_
std::atomic< retire_node * > head_
void add_orphaned_nodes(retire_node *head, size_t count)
Add a list of retired nodes to the global orphanage.
global_reclamation_manager()=default
size_t get_orphaned_count() const
Get statistics.
static global_reclamation_manager & instance()
size_t reclaim(const std::vector< void * > &protected_ptrs)
Reclaim orphaned nodes that are no longer protected.
Global hazard pointer registry Manages all thread-local hazard lists.
static hazard_pointer_registry & instance()
std::atomic< thread_hazard_list * > head_
std::vector< void * > scan_hazard_pointers()
Scan all hazard pointers and collect protected pointers.
thread_hazard_list * get_thread_list()
Get or create thread-local hazard list.
std::atomic< size_t > thread_count_
hazard_pointer_registry()=default
void mark_inactive()
Mark current thread's list as inactive.
size_t get_active_thread_count() const
Get total number of active threads.
Domain managing hazard pointers and retirement for a specific type.
hazard_pointer acquire()
Acquire a hazard pointer for this domain.
static thread_retire_list & get_thread_retire_list()
size_t reclaim()
Force reclamation scan (optional, for testing)
void retire(T *ptr)
Retire an object for later reclamation.
hazard_pointer_domain()=default
~hazard_pointer_domain()
Destructor - ensures all retire objects are reclaimed.
static hazard_pointer_domain & global()
Get the global domain instance for type T.
Single hazard pointer that protects one object from reclamation Uses RAII pattern - automatically rel...
void reset() noexcept
Release protection.
~hazard_pointer()
Destructor - automatically releases protection.
hazard_pointer & operator=(const hazard_pointer &)=delete
static const void * SLOT_OWNED_MARKER
void protect(T *ptr) noexcept
Protect a pointer from reclamation.
void * get_protected() const noexcept
Get the protected pointer (may be null)
bool is_protected() const noexcept
Check if currently protecting a pointer.
hazard_pointer(const hazard_pointer &)=delete
Non-copyable.
hazard_pointer()
Default constructor - acquires a hazard pointer slot.
hazard_pointer & operator=(hazard_pointer &&other) noexcept
Move assignment.
std::atomic< void * > * slot_
Core threading foundation of the thread system library.
Retire node for pending deletion.
retire_node(void *p, std::function< void(void *)> d)
std::function< void(void *)> deleter
Thread-local hazard pointer list Each thread maintains a small array of hazard pointers.
static constexpr size_t MAX_HAZARDS_PER_THREAD
std::atomic< bool > active
std::atomic< void * > hazards[MAX_HAZARDS_PER_THREAD]
thread_hazard_list * next
size_t hazard_pointers_allocated
size_t get_adaptive_threshold() const
size_t scan_and_reclaim(const std::vector< void * > &protected_ptrs)