Monitoring System 0.1.0
System resource monitoring with pluggable collectors and alerting
Loading...
Searching...
No Matches
graceful_degradation_example.cpp

This example demonstrates:

// BSD 3-Clause License
// Copyright (c) 2021-2025, 🍀☀🌕🌥 🌊
// See the LICENSE file in the project root for full license information.
#include <atomic>
#include <chrono>
#include <iostream>
#include <random>
#include <thread>
using namespace kcenon::monitoring;
using namespace std::chrono_literals;
// Simulated unreliable service
private:
std::mt19937 rng_;
std::uniform_real_distribution<> dist_;
double failure_rate_;
std::atomic<int> call_count_{0};
public:
explicit unreliable_service(double failure_rate)
: rng_(std::random_device{}())
, dist_(0.0, 1.0)
, failure_rate_(failure_rate) {}
kcenon::common::Result<std::string> call() {
std::this_thread::sleep_for(50ms);
return kcenon::common::Result<std::string>::err(
error_info{monitoring_error_code::service_unavailable,
"Service temporarily unavailable"}.to_common_error()
);
}
return kcenon::common::ok(std::string("Service response: SUCCESS"));
}
void set_failure_rate(double rate) {
failure_rate_ = rate;
}
};
// Demonstrate circuit breaker pattern
std::cout << "=== Circuit Breaker Pattern ===" << std::endl;
std::cout << std::endl;
unreliable_service service(0.7); // 70% failure rate
config.failure_threshold = 3;
config.timeout = 5000ms;
circuit_breaker breaker(config);
std::cout << "Circuit Breaker Configuration:" << std::endl;
std::cout << "- Failure threshold: " << config.failure_threshold << std::endl;
std::cout << "- Reset timeout: 5000ms" << std::endl;
std::cout << std::endl;
std::cout << "Making calls to unreliable service (70% failure rate):" << std::endl;
std::cout << std::endl;
for (int i = 0; i < 10; ++i) {
std::cout << "Call " << (i + 1) << ": ";
auto result = execute_with_circuit_breaker<std::string>(breaker, "external_service", [&service]() {
return service.call();
});
if (result.is_ok()) {
std::cout << "SUCCESS" << std::endl;
} else {
std::cout << "FAILED" << std::endl;
}
std::this_thread::sleep_for(200ms);
}
std::cout << std::endl;
auto stats = breaker.get_stats();
std::cout << "Circuit Breaker Stats:" << std::endl;
for (const auto& [key, val] : stats) {
std::visit([&key](const auto& v) {
std::cout << "- " << key << ": " << v << std::endl;
}, val);
}
std::cout << std::endl;
}
// Demonstrate retry policy
std::cout << "=== Retry Policy with Exponential Backoff ===" << std::endl;
std::cout << std::endl;
unreliable_service service(0.4); // 40% failure rate
retry_config retry_cfg;
retry_cfg.max_attempts = 5;
retry_cfg.strategy = retry_strategy::exponential_backoff;
retry_cfg.initial_delay = 100ms;
retry_cfg.backoff_multiplier = 2.0;
retry_executor<std::string> policy("service_retry", retry_cfg);
std::cout << "Retry Policy Configuration:" << std::endl;
std::cout << "- Strategy: Exponential backoff" << std::endl;
std::cout << "- Max attempts: 5" << std::endl;
std::cout << "- Initial delay: 100ms" << std::endl;
std::cout << std::endl;
std::cout << "Making calls with retry policy:" << std::endl;
std::cout << std::endl;
for (int i = 0; i < 5; ++i) {
std::cout << "Request " << (i + 1) << ": ";
auto result = policy.execute([&service]() {
return service.call();
});
if (result.is_ok()) {
std::cout << "SUCCESS" << std::endl;
} else {
std::cout << "FAILED after retries" << std::endl;
}
}
std::cout << std::endl;
auto metrics = policy.get_metrics();
std::cout << "Retry Policy Metrics:" << std::endl;
std::cout << "- Total executions: " << metrics.total_executions << std::endl;
std::cout << "- Successful: " << metrics.successful_executions << std::endl;
std::cout << "- Failed: " << metrics.failed_executions << std::endl;
std::cout << "- Total retries: " << metrics.total_retries << std::endl;
std::cout << std::endl;
}
// Demonstrate combined patterns
std::cout << "=== Combined Reliability Patterns ===" << std::endl;
std::cout << std::endl;
unreliable_service primary_service(0.5);
cb_config.failure_threshold = 3;
circuit_breaker breaker(cb_config);
retry_config retry_cfg2;
retry_cfg2.max_attempts = 3;
retry_cfg2.strategy = retry_strategy::exponential_backoff;
retry_cfg2.initial_delay = 100ms;
retry_executor<std::string> policy2("combined_retry", retry_cfg2);
std::cout << "Combining Circuit Breaker + Retry Policy" << std::endl;
std::cout << std::endl;
for (int i = 0; i < 10; ++i) {
std::cout << "Request " << (i + 1) << ": ";
auto result = execute_with_circuit_breaker<std::string>(breaker, "primary", [&]() {
return policy2.execute([&]() {
return primary_service.call();
});
});
if (result.is_ok()) {
std::cout << "SUCCESS" << std::endl;
} else {
std::cout << "FAILED" << std::endl;
}
std::this_thread::sleep_for(300ms);
}
std::cout << std::endl;
auto cb_stats = breaker.get_stats();
std::cout << "Circuit Breaker Stats:" << std::endl;
for (const auto& [key, val] : cb_stats) {
std::visit([&key](const auto& v) {
std::cout << "- " << key << ": " << v << std::endl;
}, val);
}
std::cout << std::endl;
auto retry_metrics = policy2.get_metrics();
std::cout << "Retry Policy:" << std::endl;
std::cout << "- Total executions: " << retry_metrics.total_executions << std::endl;
std::cout << "- Total retries: " << retry_metrics.total_retries << std::endl;
std::cout << std::endl;
}
int main() {
std::cout << "=== Graceful Degradation and Reliability Patterns ===" << std::endl;
std::cout << std::endl;
try {
std::cout << std::string(70, '=') << std::endl;
std::cout << std::endl;
std::cout << std::string(70, '=') << std::endl;
std::cout << std::endl;
std::cout << std::string(70, '=') << std::endl;
std::cout << std::endl;
std::cout << "=== All Reliability Patterns Demonstrated Successfully ===" << std::endl;
} catch (const std::exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
return 1;
}
return 0;
}
Circuit breaker integration for monitoring_system.
Retry executor template class.
kcenon::common::Result< std::string > call()
std::uniform_real_distribution dist_
unreliable_service(double failure_rate)
Monitoring system specific error codes.
Fault tolerance manager coordinating circuit breakers and retries.
void demonstrate_retry_policy()
void demonstrate_circuit_breaker()
void demonstrate_combined_patterns()
common::resilience::circuit_breaker circuit_breaker
common::resilience::circuit_breaker_config circuit_breaker_config
Result pattern type definitions for monitoring system.
Retry strategies with backoff for monitoring operations.
Extended error information with context.
common::error_info to_common_error() const
Convert to common_system error_info.
std::chrono::milliseconds initial_delay