| affinity_tracker_ | kcenon::thread::numa_work_stealer | private |
| backoff_calculator_ | kcenon::thread::numa_work_stealer | private |
| calculate_batch_size(std::size_t victim_queue_size) const -> std::size_t | kcenon::thread::numa_work_stealer | private |
| config_ | kcenon::thread::numa_work_stealer | private |
| cpu_accessor_ | kcenon::thread::numa_work_stealer | private |
| cpu_accessor_fn typedef | kcenon::thread::numa_work_stealer | |
| deque_accessor_ | kcenon::thread::numa_work_stealer | private |
| deque_accessor_fn typedef | kcenon::thread::numa_work_stealer | |
| get_config() const -> const enhanced_work_stealing_config & | kcenon::thread::numa_work_stealer | |
| get_stats() const -> const work_stealing_stats & | kcenon::thread::numa_work_stealer | |
| get_stats_snapshot() const -> work_stealing_stats_snapshot | kcenon::thread::numa_work_stealer | |
| get_topology() const -> const numa_topology & | kcenon::thread::numa_work_stealer | |
| get_worker_cpu(std::size_t worker_id) const -> int | kcenon::thread::numa_work_stealer | private |
| numa_work_stealer(std::size_t worker_count, deque_accessor_fn deque_accessor, cpu_accessor_fn cpu_accessor, enhanced_work_stealing_config config={}) | kcenon::thread::numa_work_stealer | |
| numa_work_stealer(const numa_work_stealer &)=delete | kcenon::thread::numa_work_stealer | |
| numa_work_stealer(numa_work_stealer &&)=delete | kcenon::thread::numa_work_stealer | |
| operator=(const numa_work_stealer &)=delete | kcenon::thread::numa_work_stealer | |
| operator=(numa_work_stealer &&)=delete | kcenon::thread::numa_work_stealer | |
| record_steal(std::size_t thief_id, std::size_t victim_id) | kcenon::thread::numa_work_stealer | private |
| reset_stats() | kcenon::thread::numa_work_stealer | |
| rng_ | kcenon::thread::numa_work_stealer | mutableprivate |
| round_robin_index_ | kcenon::thread::numa_work_stealer | mutableprivate |
| select_victims(std::size_t requester_id, std::size_t count) -> std::vector< std::size_t > | kcenon::thread::numa_work_stealer | private |
| select_victims_adaptive(std::size_t requester_id, std::size_t count) -> std::vector< std::size_t > | kcenon::thread::numa_work_stealer | private |
| select_victims_hierarchical(std::size_t requester_id, std::size_t count) -> std::vector< std::size_t > | kcenon::thread::numa_work_stealer | private |
| select_victims_locality_aware(std::size_t requester_id, std::size_t count) -> std::vector< std::size_t > | kcenon::thread::numa_work_stealer | private |
| select_victims_numa_aware(std::size_t requester_id, std::size_t count) -> std::vector< std::size_t > | kcenon::thread::numa_work_stealer | private |
| select_victims_random(std::size_t requester_id, std::size_t count) -> std::vector< std::size_t > | kcenon::thread::numa_work_stealer | private |
| select_victims_round_robin(std::size_t requester_id, std::size_t count) -> std::vector< std::size_t > | kcenon::thread::numa_work_stealer | private |
| set_config(const enhanced_work_stealing_config &config) | kcenon::thread::numa_work_stealer | |
| stats_ | kcenon::thread::numa_work_stealer | private |
| steal_batch_for(std::size_t worker_id, std::size_t max_count) -> std::vector< job * > | kcenon::thread::numa_work_stealer | |
| steal_for(std::size_t worker_id) -> job * | kcenon::thread::numa_work_stealer | |
| topology_ | kcenon::thread::numa_work_stealer | private |
| worker_count_ | kcenon::thread::numa_work_stealer | private |
| workers_on_same_node(std::size_t worker_a, std::size_t worker_b) const -> bool | kcenon::thread::numa_work_stealer | private |
| ~numa_work_stealer()=default | kcenon::thread::numa_work_stealer | |