Monitoring System 0.1.0
System resource monitoring with pluggable collectors and alerting
Loading...
Searching...
No Matches
memory_pool.h
Go to the documentation of this file.
1// BSD 3-Clause License
2// Copyright (c) 2021-2025, 🍀☀🌕🌥 🌊
3// See the LICENSE file in the project root for full license information.
4
11#pragma once
12
13#include <atomic>
14#include <cstddef>
15#include <cstdlib>
16#include <memory>
17#include <mutex>
18#include <new>
19#include <vector>
20
21#ifdef _MSC_VER
22#include <malloc.h> // For _aligned_malloc/_aligned_free on Windows
23#endif
24
26
27namespace detail {
28
35inline void* aligned_alloc_impl(size_t alignment, size_t size) {
36#ifdef _MSC_VER
37 // MSVC doesn't support std::aligned_alloc
38 // Size must be a multiple of alignment for _aligned_malloc
39 size_t aligned_size = (size + alignment - 1) & ~(alignment - 1);
40 return _aligned_malloc(aligned_size, alignment);
41#else
42 // POSIX/C11 aligned_alloc requires size to be multiple of alignment
43 size_t aligned_size = (size + alignment - 1) & ~(alignment - 1);
44 return std::aligned_alloc(alignment, aligned_size);
45#endif
46}
47
52inline void aligned_free_impl(void* ptr) {
53#ifdef _MSC_VER
54 _aligned_free(ptr);
55#else
56 std::free(ptr);
57#endif
58}
59
60} // namespace detail
61
62namespace kcenon::monitoring {
63
68 size_t initial_blocks = 256;
69 size_t max_blocks = 4096;
70 size_t block_size = 64;
71 size_t alignment = 8;
73
78 bool validate() const {
79 if (initial_blocks == 0) {
80 return false;
81 }
82 if (max_blocks != 0 && max_blocks < initial_blocks) {
83 return false;
84 }
85 if (block_size == 0) {
86 return false;
87 }
88 // Block size must be 8-byte aligned
89 if (block_size % 8 != 0) {
90 return false;
91 }
92 // Alignment must be power of 2
93 if (alignment == 0 || (alignment & (alignment - 1)) != 0) {
94 return false;
95 }
96 return true;
97 }
98};
99
104 std::atomic<size_t> total_allocations{0};
105 std::atomic<size_t> total_deallocations{0};
106 std::atomic<size_t> allocation_failures{0};
107 std::atomic<size_t> peak_usage{0};
108
115
117 if (this != &other) {
118 total_allocations.store(other.total_allocations.load());
119 total_deallocations.store(other.total_deallocations.load());
120 allocation_failures.store(other.allocation_failures.load());
121 peak_usage.store(other.peak_usage.load());
122 }
123 return *this;
124 }
125
127 : total_allocations(other.total_allocations.load())
128 , total_deallocations(other.total_deallocations.load())
129 , allocation_failures(other.allocation_failures.load())
130 , peak_usage(other.peak_usage.load()) {}
131
133 if (this != &other) {
134 total_allocations.store(other.total_allocations.load());
135 total_deallocations.store(other.total_deallocations.load());
136 allocation_failures.store(other.allocation_failures.load());
137 peak_usage.store(other.peak_usage.load());
138 }
139 return *this;
140 }
141
147 auto total = total_allocations.load() + allocation_failures.load();
148 if (total == 0) {
149 return 100.0;
150 }
151 return (static_cast<double>(total_allocations.load()) / static_cast<double>(total)) * 100.0;
152 }
153
157 void reset() {
158 total_allocations.store(0);
159 total_deallocations.store(0);
160 allocation_failures.store(0);
161 peak_usage.store(0);
162 }
163};
164
172public:
177
182 explicit memory_pool(const memory_pool_config& config)
183 : config_(config)
184 , block_size_(config.block_size)
185 , total_blocks_(config.initial_blocks) {
187 }
188
190 for (auto* chunk : memory_chunks_) {
192 }
193 }
194
195 // Disable copy
196 memory_pool(const memory_pool&) = delete;
198
199 // Enable move
201 : config_(other.config_)
202 , block_size_(other.block_size_)
203 , total_blocks_(other.total_blocks_)
204 , free_blocks_(std::move(other.free_blocks_))
205 , memory_chunks_(std::move(other.memory_chunks_))
206 , stats_(std::move(other.stats_)) {
207 other.total_blocks_ = 0;
208 }
209
211 if (this != &other) {
212 for (auto* chunk : memory_chunks_) {
214 }
215 config_ = other.config_;
216 block_size_ = other.block_size_;
217 total_blocks_ = other.total_blocks_;
218 free_blocks_ = std::move(other.free_blocks_);
219 memory_chunks_ = std::move(other.memory_chunks_);
220 stats_ = std::move(other.stats_);
221 other.total_blocks_ = 0;
222 }
223 return *this;
224 }
225
230 common::Result<void*> allocate() {
231 std::lock_guard<std::mutex> lock(mutex_);
232
233 if (free_blocks_.empty()) {
234 // Try to grow the pool
235 if (!grow_pool()) {
237 return common::Result<void*>::err(error_info(monitoring_error_code::resource_unavailable, "Memory pool exhausted").to_common_error());
238 }
239 }
240
241 void* block = free_blocks_.back();
242 free_blocks_.pop_back();
243
246
247 return common::ok(block);
248 }
249
255 common::VoidResult deallocate(void* ptr) {
256 if (ptr == nullptr) {
257 return common::VoidResult::err(error_info(monitoring_error_code::invalid_argument, "Cannot deallocate null pointer").to_common_error());
258 }
259
260 std::lock_guard<std::mutex> lock(mutex_);
261
262 // Verify the pointer belongs to this pool
263 if (!is_owned_block(ptr)) {
264 return common::VoidResult::err(error_info(monitoring_error_code::invalid_argument, "Pointer does not belong to this pool").to_common_error());
265 }
266
267 free_blocks_.push_back(ptr);
269
270 return common::ok();
271 }
272
280 template<typename T, typename... Args>
281 common::Result<T*> allocate_object(Args&&... args) {
282 if (sizeof(T) > block_size_) {
283 return common::Result<T*>::err(error_info(monitoring_error_code::invalid_argument, "Object size exceeds block size").to_common_error());
284 }
285
286 auto result = allocate();
287 if (result.is_err()) {
288 return common::Result<T*>::err(error_info(monitoring_error_code::resource_unavailable, "Failed to allocate memory for object").to_common_error());
289 }
290
291 void* ptr = result.value();
292 T* obj = new (ptr) T(std::forward<Args>(args)...);
293
294 return common::ok(obj);
295 }
296
303 template<typename T>
304 common::VoidResult deallocate_object(T* obj) {
305 if (obj == nullptr) {
306 return common::VoidResult::err(error_info(monitoring_error_code::invalid_argument, "Cannot deallocate null object").to_common_error());
307 }
308
309 obj->~T();
310 return deallocate(static_cast<void*>(obj));
311 }
312
317 size_t available_blocks() const {
318 std::lock_guard<std::mutex> lock(mutex_);
319 return free_blocks_.size();
320 }
321
326 size_t total_blocks() const {
327 return total_blocks_;
328 }
329
334 size_t block_size() const {
335 return block_size_;
336 }
337
343 return stats_;
344 }
345
350 stats_.reset();
351 }
352
353private:
355 size_t chunk_size = total_blocks_ * block_size_;
356 void* chunk = detail::aligned_alloc_impl(config_.alignment, chunk_size);
357
358 if (chunk == nullptr) {
359 throw std::bad_alloc();
360 }
361
362 memory_chunks_.push_back(chunk);
363
364 // Initialize free block list
366 char* ptr = static_cast<char*>(chunk);
367 for (size_t i = 0; i < total_blocks_; ++i) {
368 free_blocks_.push_back(ptr + i * block_size_);
369 }
370 }
371
372 bool grow_pool() {
374 return false;
375 }
376
377 size_t new_blocks = std::min(total_blocks_, config_.max_blocks - total_blocks_);
378 if (new_blocks == 0) {
379 new_blocks = total_blocks_; // Double the size
380 }
381
382 size_t chunk_size = new_blocks * block_size_;
383 void* chunk = detail::aligned_alloc_impl(config_.alignment, chunk_size);
384
385 if (chunk == nullptr) {
386 return false;
387 }
388
389 memory_chunks_.push_back(chunk);
390
391 char* ptr = static_cast<char*>(chunk);
392 for (size_t i = 0; i < new_blocks; ++i) {
393 free_blocks_.push_back(ptr + i * block_size_);
394 }
395
396 total_blocks_ += new_blocks;
397 return true;
398 }
399
400 bool is_owned_block(void* ptr) const {
401 for (size_t i = 0; i < memory_chunks_.size(); ++i) {
402 char* chunk_start = static_cast<char*>(memory_chunks_[i]);
403 size_t chunk_blocks = (i == 0) ? config_.initial_blocks :
405 char* chunk_end = chunk_start + chunk_blocks * block_size_;
406
407 if (ptr >= chunk_start && ptr < chunk_end) {
408 // Verify alignment
409 size_t offset = static_cast<char*>(ptr) - chunk_start;
410 if (offset % block_size_ == 0) {
411 return true;
412 }
413 }
414 }
415 return false;
416 }
417
419 size_t current_usage = total_blocks_ - free_blocks_.size();
420 size_t peak = stats_.peak_usage.load();
421 while (current_usage > peak) {
422 if (stats_.peak_usage.compare_exchange_weak(peak, current_usage)) {
423 break;
424 }
425 }
426 }
427
431 std::vector<void*> free_blocks_;
432 std::vector<void*> memory_chunks_;
433 mutable std::mutex mutex_;
435};
436
441inline std::unique_ptr<memory_pool> make_memory_pool() {
442 return std::make_unique<memory_pool>();
443}
444
450inline std::unique_ptr<memory_pool> make_memory_pool(const memory_pool_config& config) {
451 return std::make_unique<memory_pool>(config);
452}
453
458inline std::vector<memory_pool_config> create_default_pool_configs() {
459 return {
460 // Small objects pool
461 {.initial_blocks = 512, .max_blocks = 2048, .block_size = 32, .alignment = 8},
462 // Medium objects pool
463 {.initial_blocks = 256, .max_blocks = 1024, .block_size = 128, .alignment = 16},
464 // Large objects pool
465 {.initial_blocks = 64, .max_blocks = 256, .block_size = 512, .alignment = 32},
466 // Thread-local cache enabled pool
467 {.initial_blocks = 256, .max_blocks = 1024, .block_size = 64, .alignment = 8, .use_thread_local_cache = true}
468 };
469}
470
471} // namespace kcenon::monitoring
Thread-safe fixed-size block memory allocator.
void reset_statistics()
Reset statistics.
common::VoidResult deallocate_object(T *obj)
Destroy and deallocate an object.
std::vector< void * > free_blocks_
size_t block_size() const
Get block size.
memory_pool & operator=(memory_pool &&other) noexcept
const memory_pool_statistics & get_statistics() const
Get pool statistics.
memory_pool(const memory_pool &)=delete
bool is_owned_block(void *ptr) const
size_t available_blocks() const
Get number of available blocks.
memory_pool(memory_pool &&other) noexcept
memory_pool(const memory_pool_config &config)
Construct with configuration.
std::vector< void * > memory_chunks_
size_t total_blocks() const
Get total number of blocks.
memory_pool()
Default constructor with default configuration.
common::Result< void * > allocate()
Allocate a memory block.
common::VoidResult deallocate(void *ptr)
Deallocate a memory block.
memory_pool_statistics stats_
common::Result< T * > allocate_object(Args &&... args)
Allocate and construct an object.
memory_pool & operator=(const memory_pool &)=delete
Internal implementation details - not part of public API.
Definition memory_pool.h:27
void * aligned_alloc_impl(size_t alignment, size_t size)
Platform-specific aligned memory allocation.
Definition memory_pool.h:35
void aligned_free_impl(void *ptr)
Platform-specific aligned memory deallocation.
Definition memory_pool.h:52
std::unique_ptr< memory_pool > make_memory_pool()
Create a memory pool with default configuration.
std::vector< memory_pool_config > create_default_pool_configs()
Create default pool configurations for different use cases.
Result pattern type definitions for monitoring system.
Extended error information with context.
Configuration for memory pool.
Definition memory_pool.h:67
size_t block_size
Size of each block in bytes.
Definition memory_pool.h:70
bool use_thread_local_cache
Use thread-local caching.
Definition memory_pool.h:72
size_t alignment
Memory alignment (must be power of 2)
Definition memory_pool.h:71
size_t max_blocks
Maximum number of blocks (0 = unlimited)
Definition memory_pool.h:69
size_t initial_blocks
Initial number of blocks.
Definition memory_pool.h:68
bool validate() const
Validate configuration.
Definition memory_pool.h:78
Statistics for memory pool operations.
double get_allocation_success_rate() const
Get allocation success rate.
memory_pool_statistics(memory_pool_statistics &&other) noexcept
memory_pool_statistics & operator=(memory_pool_statistics &&other) noexcept
memory_pool_statistics & operator=(const memory_pool_statistics &other)
memory_pool_statistics(const memory_pool_statistics &other)
void reset()
Reset all statistics.