124 const int NUM_THREADS = 50;
125 const int OPERATIONS_PER_THREAD = 5000;
126 const auto TEST_DURATION = 60s;
128 const int NUM_THREADS = 100;
129 const int OPERATIONS_PER_THREAD = 10000;
130 const auto TEST_DURATION = 30s;
134 auto tracer = std::make_unique<distributed_tracer>();
138 std::atomic<int64_t> total_operations{0};
139 std::atomic<int64_t> failed_operations{0};
140 std::atomic<int64_t> total_latency_us{0};
141 std::vector<double> latencies;
142 std::mutex latency_mutex;
145 auto start_time = std::chrono::steady_clock::now();
148 std::vector<std::thread> workers;
149 for (
int t = 0; t < NUM_THREADS; ++t) {
150 workers.emplace_back([&, t]() {
151 std::random_device rd;
152 std::mt19937 gen(rd());
153 std::uniform_int_distribution<> dis(1, 100);
155 for (
int i = 0; i < OPERATIONS_PER_THREAD; ++i) {
156 auto op_start = std::chrono::high_resolution_clock::now();
159 auto span_result = tracer->start_span(
160 "stress_op_" + std::to_string(t) +
"_" + std::to_string(i));
162 if (span_result.is_ok()) {
164 std::this_thread::sleep_for(std::chrono::microseconds(dis(gen)));
167 span_result.value()->tags[
"thread_id"] = std::to_string(t);
168 span_result.value()->tags[
"operation_id"] = std::to_string(i);
175 auto op_end = std::chrono::high_resolution_clock::now();
176 auto latency = std::chrono::duration_cast<std::chrono::microseconds>(
177 op_end - op_start).count();
179 total_latency_us += latency;
183 std::lock_guard<std::mutex> lock(latency_mutex);
184 latencies.push_back(
static_cast<double>(latency));
188 if (std::chrono::steady_clock::now() - start_time > TEST_DURATION) {
196 for (
auto& worker : workers) {
201 auto end_time = std::chrono::steady_clock::now();
202 auto duration = std::chrono::duration_cast<std::chrono::seconds>(end_time - start_time);
205 std::sort(latencies.begin(), latencies.end());
206 double p50 = latencies.empty() ? 0 : latencies[
static_cast<size_t>(latencies.size() * 0.5)];
207 double p95 = latencies.empty() ? 0 : latencies[
static_cast<size_t>(latencies.size() * 0.95)];
208 double p99 = latencies.empty() ? 0 : latencies[
static_cast<size_t>(latencies.size() * 0.99)];
211 double throughput =
static_cast<double>(total_operations) / duration.count();
212 double avg_latency =
static_cast<double>(total_latency_us) / total_operations;
215 std::cout <<
"\n=== High Load Stress Test Results ===" << std::endl;
216 std::cout <<
"Duration: " << duration.count() <<
" seconds" << std::endl;
217 std::cout <<
"Total operations: " << total_operations << std::endl;
218 std::cout <<
"Failed operations: " << failed_operations << std::endl;
219 std::cout <<
"Throughput: " << throughput <<
" ops/sec" << std::endl;
220 std::cout <<
"Average latency: " << avg_latency <<
" μs" << std::endl;
221 std::cout <<
"P50 latency: " << p50 <<
" μs" << std::endl;
222 std::cout <<
"P95 latency: " << p95 <<
" μs" << std::endl;
223 std::cout <<
"P99 latency: " << p99 <<
" μs" << std::endl;
228 EXPECT_GT(throughput, 1000.0);
229 EXPECT_LT(failed_operations, total_operations * 0.01);
230 EXPECT_LT(p99, p99_threshold);
238 const int ITERATIONS = 1000;
239 const int OBJECTS_PER_ITERATION = 100;
242 std::vector<size_t> memory_samples;
244 for (
int iter = 0; iter < ITERATIONS; ++iter) {
246 std::vector<std::unique_ptr<distributed_tracer>> tracers;
247 std::vector<std::unique_ptr<circuit_breaker>> breakers;
250 cb_config.failure_threshold = 3;
251 cb_config.timeout = 100ms;
253 for (
int i = 0; i < OBJECTS_PER_ITERATION; ++i) {
254 tracers.push_back(std::make_unique<distributed_tracer>());
255 breakers.push_back(std::make_unique<circuit_breaker>(cb_config));
258 auto span = tracers.back()->start_span(
"test_span_" + std::to_string(i));
260 span.value()->tags[
"iteration"] = std::to_string(iter);
269 if (iter % 100 == 0) {
270 memory_samples.push_back(GetCurrentMemoryUsage());
275 if (memory_samples.size() > 2) {
277 double correlation = 0;
278 double mean_x = memory_samples.size() / 2.0;
279 double mean_y = std::accumulate(memory_samples.begin(), memory_samples.end(), 0.0) / memory_samples.size();
281 double sum_xy = 0, sum_xx = 0;
282 for (
size_t i = 0; i < memory_samples.size(); ++i) {
283 sum_xy += (i - mean_x) * (memory_samples[i] - mean_y);
284 sum_xx += (i - mean_x) * (i - mean_x);
288 correlation = sum_xy / sqrt(sum_xx);
292 EXPECT_LT(correlation, 0.8) <<
"Potential memory leak detected";
301 const int NUM_THREADS = 50;
302 const int OPERATIONS = 1000;
306 config.
type = storage_backend_type::memory_buffer;
308 auto storage = std::make_unique<file_storage_backend>(config);
311 std::atomic<bool> race_detected{
false};
314 std::mutex start_mutex;
315 std::condition_variable start_cv;
316 std::atomic<int> ready_threads{0};
317 bool start_flag =
false;
320 std::vector<std::thread> threads;
321 for (
int t = 0; t < NUM_THREADS; ++t) {
322 threads.emplace_back([&, t]() {
325 std::unique_lock<std::mutex> lock(start_mutex);
327 if (ready_threads == NUM_THREADS) {
329 start_cv.notify_all();
331 start_cv.wait(lock, [&] {
return start_flag; });
335 for (
int i = 0; i < OPERATIONS; ++i) {
338 snapshot.
add_metric(
"thread_" + std::to_string(t), i);
340 auto result =
storage->store(snapshot);
344 if (!result.is_ok()) {
345 race_detected =
true;
352 for (
auto& thread : threads) {
357 EXPECT_FALSE(race_detected) <<
"Race condition detected";
358 EXPECT_EQ(
counter, NUM_THREADS * OPERATIONS);
399 const auto TEST_DURATION = 5s;
400 const int NUM_THREADS = 10;
401 const int OPS_PER_CYCLE = 100;
403 auto tracer = std::make_unique<distributed_tracer>();
405 std::atomic<int64_t> total_operations{0};
406 std::atomic<int64_t> failed_operations{0};
409 std::vector<std::thread> workers;
410 auto start_time = std::chrono::steady_clock::now();
412 for (
int t = 0; t < NUM_THREADS; ++t) {
413 workers.emplace_back([&, t]() {
414 while (std::chrono::steady_clock::now() - start_time < TEST_DURATION) {
416 for (
int i = 0; i < OPS_PER_CYCLE; ++i) {
417 auto span = tracer->start_span(
"sustained_op");
419 span.value()->tags[
"thread"] = std::to_string(t);
427 std::this_thread::sleep_for(10ms);
433 for (
auto& worker : workers) {
438 auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
439 std::chrono::steady_clock::now() - start_time);
440 double avg_throughput = (
static_cast<double>(total_operations) * 1000.0) / duration.count();
442 std::cout <<
"\n=== Sustained Load Test Results ===" << std::endl;
443 std::cout <<
"Duration: " << duration.count() <<
" ms" << std::endl;
444 std::cout <<
"Total operations: " << total_operations << std::endl;
445 std::cout <<
"Failed operations: " << failed_operations << std::endl;
446 std::cout <<
"Average throughput: " << avg_throughput <<
" ops/sec" << std::endl;
449 EXPECT_EQ(failed_operations, 0);
450 EXPECT_GT(total_operations, 0);
451 EXPECT_GT(avg_throughput, 1000.0);
459 auto tracer = std::make_unique<distributed_tracer>();
461 const int BURST_SIZE = 10000;
462 const int NUM_BURSTS = 10;
463 const auto BURST_INTERVAL = 5s;
465 std::vector<double> burst_latencies;
467 for (
int burst = 0; burst < NUM_BURSTS; ++burst) {
468 auto burst_start = std::chrono::high_resolution_clock::now();
471 std::vector<std::future<bool>> futures;
472 for (
int i = 0; i < BURST_SIZE; ++i) {
473 futures.push_back(std::async(std::launch::async, [&tracer, i]() {
474 auto span = tracer->start_span(
"burst_op_" + std::to_string(i));
481 for (
auto& future : futures) {
487 auto burst_end = std::chrono::high_resolution_clock::now();
488 auto burst_duration = std::chrono::duration_cast<std::chrono::milliseconds>(
489 burst_end - burst_start).count();
491 burst_latencies.push_back(
static_cast<double>(burst_duration));
493 std::cout <<
"Burst " << burst <<
": " << successful <<
"/" << BURST_SIZE
494 <<
" successful, duration: " << burst_duration <<
"ms" << std::endl;
497 std::this_thread::sleep_for(BURST_INTERVAL);
501 double avg_latency = std::accumulate(burst_latencies.begin(), burst_latencies.end(), 0.0)
502 / burst_latencies.size();
503 double max_latency = *std::max_element(burst_latencies.begin(), burst_latencies.end());
505 std::cout <<
"\n=== Burst Load Test Results ===" << std::endl;
506 std::cout <<
"Average burst latency: " << avg_latency <<
"ms" << std::endl;
507 std::cout <<
"Max burst latency: " << max_latency <<
"ms" << std::endl;
513 EXPECT_LT(avg_latency, avg_threshold);
514 EXPECT_LT(max_latency, max_threshold);
522 const int NUM_THREADS = 10;
523 const int ITERATIONS = 100;
526 std::timed_mutex mutex1, mutex2;
527 std::atomic<int> deadlock_timeouts{0};
529 std::vector<std::thread> threads;
530 for (
int t = 0; t < NUM_THREADS; ++t) {
531 threads.emplace_back([&, t]() {
532 for (
int i = 0; i < ITERATIONS; ++i) {
536 if (mutex1.try_lock_for(100ms)) {
537 std::lock_guard<std::timed_mutex> lock1(mutex1, std::adopt_lock);
538 if (mutex2.try_lock_for(100ms)) {
539 std::lock_guard<std::timed_mutex> lock2(mutex2, std::adopt_lock);
541 std::this_thread::sleep_for(1ms);
550 if (mutex2.try_lock_for(100ms)) {
551 std::lock_guard<std::timed_mutex> lock2(mutex2, std::adopt_lock);
552 if (mutex1.try_lock_for(100ms)) {
553 std::lock_guard<std::timed_mutex> lock1(mutex1, std::adopt_lock);
555 std::this_thread::sleep_for(1ms);
568 auto start = std::chrono::steady_clock::now();
569 bool all_finished =
true;
571 for (
auto& thread : threads) {
572 if (thread.joinable()) {
577 if (std::chrono::steady_clock::now() - start > 30s) {
578 all_finished =
false;
583 EXPECT_TRUE(all_finished) <<
"Potential deadlock detected - test timed out";
584 std::cout <<
"Deadlock timeouts encountered: " << deadlock_timeouts << std::endl;
592 auto tracer = std::make_unique<distributed_tracer>();
601 std::vector<LoadLevel> load_levels = {
610 for (
auto& level : load_levels) {
611 std::atomic<int64_t> total_latency_us{0};
612 std::atomic<int> completed_ops{0};
614 auto start_time = std::chrono::high_resolution_clock::now();
616 std::vector<std::thread> threads;
617 for (
int t = 0; t < level.threads; ++t) {
618 threads.emplace_back([&]() {
619 for (
int i = 0; i < level.operations / level.threads; ++i) {
620 auto op_start = std::chrono::high_resolution_clock::now();
622 auto span = tracer->start_span(
"degradation_op");
624 span.value()->tags[
"load_level"] = std::to_string(level.threads);
628 auto op_end = std::chrono::high_resolution_clock::now();
629 total_latency_us += std::chrono::duration_cast<std::chrono::microseconds>(
630 op_end - op_start).count();
635 for (
auto& thread : threads) {
639 auto end_time = std::chrono::high_resolution_clock::now();
640 auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
641 end_time - start_time).count();
643 level.avg_latency =
static_cast<double>(total_latency_us) / completed_ops;
644 level.throughput = (completed_ops * 1000.0) / duration;
646 std::cout <<
"Load level " << level.threads <<
" threads: "
647 <<
"throughput=" << level.throughput <<
" ops/sec, "
648 <<
"avg_latency=" << level.avg_latency <<
" μs" << std::endl;
653 for (
const auto& level : load_levels) {
654 EXPECT_GT(level.throughput, 0) <<
"Load level " << level.threads
655 <<
" should have non-zero throughput";
656 EXPECT_GT(level.avg_latency, 0) <<
"Load level " << level.threads
657 <<
" should have measurable latency";
661 EXPECT_GT(load_levels.back().throughput, 10000.0)
662 <<
"System should maintain >10K ops/sec at high load";