Monitoring System 0.1.0
System resource monitoring with pluggable collectors and alerting
Loading...
Searching...
No Matches
graceful_degradation_example.cpp
Go to the documentation of this file.
1// BSD 3-Clause License
2// Copyright (c) 2021-2025, 🍀☀🌕🌥 🌊
3// See the LICENSE file in the project root for full license information.
4
19#include <atomic>
20#include <chrono>
21#include <iostream>
22#include <random>
23#include <thread>
24
30
31using namespace kcenon::monitoring;
32using namespace std::chrono_literals;
33
34// Simulated unreliable service
36private:
37 std::mt19937 rng_;
38 std::uniform_real_distribution<> dist_;
40 std::atomic<int> call_count_{0};
41
42public:
43 explicit unreliable_service(double failure_rate)
44 : rng_(std::random_device{}())
45 , dist_(0.0, 1.0)
46 , failure_rate_(failure_rate) {}
47
48 kcenon::common::Result<std::string> call() {
50 std::this_thread::sleep_for(50ms);
51
52 if (dist_(rng_) < failure_rate_) {
53 return kcenon::common::Result<std::string>::err(
54 error_info{monitoring_error_code::service_unavailable,
55 "Service temporarily unavailable"}.to_common_error()
56 );
57 }
58
59 return kcenon::common::ok(std::string("Service response: SUCCESS"));
60 }
61
62 void set_failure_rate(double rate) {
63 failure_rate_ = rate;
64 }
65};
66
67// Demonstrate circuit breaker pattern
69 std::cout << "=== Circuit Breaker Pattern ===" << std::endl;
70 std::cout << std::endl;
71
72 unreliable_service service(0.7); // 70% failure rate
73
75 config.failure_threshold = 3;
76 config.timeout = 5000ms;
77
78 circuit_breaker breaker(config);
79
80 std::cout << "Circuit Breaker Configuration:" << std::endl;
81 std::cout << "- Failure threshold: " << config.failure_threshold << std::endl;
82 std::cout << "- Reset timeout: 5000ms" << std::endl;
83 std::cout << std::endl;
84
85 std::cout << "Making calls to unreliable service (70% failure rate):" << std::endl;
86 std::cout << std::endl;
87
88 for (int i = 0; i < 10; ++i) {
89 std::cout << "Call " << (i + 1) << ": ";
90
91 auto result = execute_with_circuit_breaker<std::string>(breaker, "external_service", [&service]() {
92 return service.call();
93 });
94
95 if (result.is_ok()) {
96 std::cout << "SUCCESS" << std::endl;
97 } else {
98 std::cout << "FAILED" << std::endl;
99 }
100
101 std::this_thread::sleep_for(200ms);
102 }
103
104 std::cout << std::endl;
105
106 auto stats = breaker.get_stats();
107 std::cout << "Circuit Breaker Stats:" << std::endl;
108 for (const auto& [key, val] : stats) {
109 std::visit([&key](const auto& v) {
110 std::cout << "- " << key << ": " << v << std::endl;
111 }, val);
112 }
113 std::cout << std::endl;
114}
115
116// Demonstrate retry policy
118 std::cout << "=== Retry Policy with Exponential Backoff ===" << std::endl;
119 std::cout << std::endl;
120
121 unreliable_service service(0.4); // 40% failure rate
122
123 retry_config retry_cfg;
124 retry_cfg.max_attempts = 5;
125 retry_cfg.strategy = retry_strategy::exponential_backoff;
126 retry_cfg.initial_delay = 100ms;
127 retry_cfg.backoff_multiplier = 2.0;
128
129 retry_executor<std::string> policy("service_retry", retry_cfg);
130
131 std::cout << "Retry Policy Configuration:" << std::endl;
132 std::cout << "- Strategy: Exponential backoff" << std::endl;
133 std::cout << "- Max attempts: 5" << std::endl;
134 std::cout << "- Initial delay: 100ms" << std::endl;
135 std::cout << std::endl;
136
137 std::cout << "Making calls with retry policy:" << std::endl;
138 std::cout << std::endl;
139
140 for (int i = 0; i < 5; ++i) {
141 std::cout << "Request " << (i + 1) << ": ";
142
143 auto result = policy.execute([&service]() {
144 return service.call();
145 });
146
147 if (result.is_ok()) {
148 std::cout << "SUCCESS" << std::endl;
149 } else {
150 std::cout << "FAILED after retries" << std::endl;
151 }
152 }
153
154 std::cout << std::endl;
155
156 auto metrics = policy.get_metrics();
157 std::cout << "Retry Policy Metrics:" << std::endl;
158 std::cout << "- Total executions: " << metrics.total_executions << std::endl;
159 std::cout << "- Successful: " << metrics.successful_executions << std::endl;
160 std::cout << "- Failed: " << metrics.failed_executions << std::endl;
161 std::cout << "- Total retries: " << metrics.total_retries << std::endl;
162 std::cout << std::endl;
163}
164
165// Demonstrate combined patterns
167 std::cout << "=== Combined Reliability Patterns ===" << std::endl;
168 std::cout << std::endl;
169
170 unreliable_service primary_service(0.5);
171
172 circuit_breaker_config cb_config;
173 cb_config.failure_threshold = 3;
174 circuit_breaker breaker(cb_config);
175
176 retry_config retry_cfg2;
177 retry_cfg2.max_attempts = 3;
178 retry_cfg2.strategy = retry_strategy::exponential_backoff;
179 retry_cfg2.initial_delay = 100ms;
180
181 retry_executor<std::string> policy2("combined_retry", retry_cfg2);
182
183 std::cout << "Combining Circuit Breaker + Retry Policy" << std::endl;
184 std::cout << std::endl;
185
186 for (int i = 0; i < 10; ++i) {
187 std::cout << "Request " << (i + 1) << ": ";
188
189 auto result = execute_with_circuit_breaker<std::string>(breaker, "primary", [&]() {
190 return policy2.execute([&]() {
191 return primary_service.call();
192 });
193 });
194
195 if (result.is_ok()) {
196 std::cout << "SUCCESS" << std::endl;
197 } else {
198 std::cout << "FAILED" << std::endl;
199 }
200
201 std::this_thread::sleep_for(300ms);
202 }
203
204 std::cout << std::endl;
205
206 auto cb_stats = breaker.get_stats();
207 std::cout << "Circuit Breaker Stats:" << std::endl;
208 for (const auto& [key, val] : cb_stats) {
209 std::visit([&key](const auto& v) {
210 std::cout << "- " << key << ": " << v << std::endl;
211 }, val);
212 }
213 std::cout << std::endl;
214
215 auto retry_metrics = policy2.get_metrics();
216 std::cout << "Retry Policy:" << std::endl;
217 std::cout << "- Total executions: " << retry_metrics.total_executions << std::endl;
218 std::cout << "- Total retries: " << retry_metrics.total_retries << std::endl;
219 std::cout << std::endl;
220}
221
222int main() {
223 std::cout << "=== Graceful Degradation and Reliability Patterns ===" << std::endl;
224 std::cout << std::endl;
225
226 try {
228 std::cout << std::string(70, '=') << std::endl;
229 std::cout << std::endl;
230
232 std::cout << std::string(70, '=') << std::endl;
233 std::cout << std::endl;
234
236 std::cout << std::string(70, '=') << std::endl;
237 std::cout << std::endl;
238
239 std::cout << "=== All Reliability Patterns Demonstrated Successfully ===" << std::endl;
240
241 } catch (const std::exception& e) {
242 std::cerr << "Exception: " << e.what() << std::endl;
243 return 1;
244 }
245
246 return 0;
247}
Circuit breaker integration for monitoring_system.
Retry executor template class.
retry_metrics get_metrics() const
Get retry metrics.
common::Result< T > execute(Func &&func)
Execute a function with retry logic.
kcenon::common::Result< std::string > call()
std::uniform_real_distribution dist_
unreliable_service(double failure_rate)
Monitoring system specific error codes.
Fault tolerance manager coordinating circuit breakers and retries.
void demonstrate_retry_policy()
void demonstrate_circuit_breaker()
void demonstrate_combined_patterns()
common::Result< T > execute_with_circuit_breaker(circuit_breaker &cb, const std::string &name, Func &&func)
Execute an operation through a circuit breaker.
common::resilience::circuit_breaker circuit_breaker
common::resilience::circuit_breaker_config circuit_breaker_config
Result pattern type definitions for monitoring system.
Retry strategies with backoff for monitoring operations.
Extended error information with context.
common::error_info to_common_error() const
Convert to common_system error_info.
std::chrono::milliseconds initial_delay