#include <iostream>
#include <chrono>
#include <vector>
using namespace std::chrono;
std::cout << "=== Basic async_executor Usage ===\n";
std::cout <<
"Executor created with " << executor.
thread_count() <<
" threads\n";
std::cout << "Submitting 5 simple tasks...\n";
std::vector<std::future<int>> futures;
auto start = high_resolution_clock::now();
for (int i = 0; i < 5; ++i) {
auto future = executor.
submit([i]() {
std::this_thread::sleep_for(milliseconds(100));
return i * i;
});
futures.push_back(std::move(future));
}
std::cout << "Collecting results:\n";
for (size_t i = 0; i < futures.size(); ++i) {
int result = futures[i].get();
std::cout << " Task " << i << " result: " << result << "\n";
}
auto end = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(end - start);
std::cout << "Total time: " << duration.count() << "ms\n";
std::cout << "(Expected ~100ms with concurrent execution)\n\n";
}
std::cout << "=== High-Throughput Performance Test ===\n";
const int num_tasks = 10000;
std::cout << "Submitting " << num_tasks << " lightweight tasks...\n";
auto start = high_resolution_clock::now();
std::vector<std::future<int>> futures;
futures.reserve(num_tasks);
for (int i = 0; i < num_tasks; ++i) {
futures.push_back(executor.submit([i]() {
return i * 2;
}));
}
executor.wait_for_completion();
auto end = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(end - start);
std::cout << "Submitted " << num_tasks << " tasks in "
<< duration.count() << " microseconds\n";
std::cout << "Average latency: "
<< (duration.count() / num_tasks) << " microseconds/task\n";
if (executor.is_using_thread_system()) {
std::cout << "â
thread_system: Expected ~77ns latency per task\n";
} else {
std::cout << "â ïļ Fallback mode: Expected ~2-5Ξs latency per task\n";
}
std::cout << "\n";
}
std::cout << "=== Error Handling ===\n";
auto future = executor.submit([]() -> int {
throw std::runtime_error("Simulated error in task");
return 42;
});
try {
int result = future.get();
std::cout << "Result: " << result << "\n";
} catch (const std::exception& e) {
std::cout << "â
Caught exception: " << e.what() << "\n";
}
std::cout << "\n";
}
std::cout << "=== Graceful Shutdown ===\n";
std::cout << "Submitting 3 long-running tasks...\n";
std::vector<std::future<void>> futures;
for (int i = 0; i < 3; ++i) {
futures.push_back(executor.submit([i]() {
std::cout << " Task " << i << " starting...\n";
std::this_thread::sleep_for(milliseconds(200));
std::cout << " Task " << i << " completed\n";
}));
}
std::cout << "Pending tasks: " << executor.pending_tasks() << "\n";
std::cout << "Waiting for tasks to complete...\n";
for (auto& future : futures) {
future.get();
}
std::cout << "â
All tasks completed, shutting down...\n";
executor.shutdown();
std::cout << "â
Executor shut down gracefully\n\n";
}
std::cout << "=== Performance Comparison ===\n";
std::cout << "async_executor vs std::async\n\n";
const int num_tasks = 1000;
{
auto start = high_resolution_clock::now();
std::vector<std::future<int>> futures;
for (int i = 0; i < num_tasks; ++i) {
futures.push_back(executor.submit([i]() { return i * 2; }));
}
for (auto& f : futures) {
f.get();
}
auto end = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(end - start);
std::cout << "async_executor: " << duration.count() << " Ξs\n";
std::cout << " (" << (duration.count() / num_tasks) << " Ξs/task)\n";
}
{
auto start = high_resolution_clock::now();
std::vector<std::future<int>> futures;
for (int i = 0; i < num_tasks; ++i) {
futures.push_back(std::async(std::launch::async, [i]() { return i * 2; }));
}
for (auto& f : futures) {
f.get();
}
auto end = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(end - start);
std::cout << "std::async: " << duration.count() << " Ξs\n";
std::cout << " (" << (duration.count() / num_tasks) << " Ξs/task)\n";
}
std::cout << "\n";
}
std::cout << "ââââââââââââââââââââââââââââââââââââââââââââââââââ\n";
std::cout << "â async_executor Demonstration â\n";
std::cout << "â thread_system Integration for Database â\n";
std::cout << "ââââââââââââââââââââââââââââââââââââââââââââââââââ\n\n";
try {
std::cout << "ââââââââââââââââââââââââââââââââââââââââââââââââââ\n";
std::cout << "â â
All demonstrations completed successfully â\n";
std::cout << "ââââââââââââââââââââââââââââââââââââââââââââââââââ\n";
return 0;
}
catch (const std::exception& e) {
std::cerr << "â Error: " << e.what() << "\n";
return 1;
}
}
void demonstrate_basic_usage()
void compare_with_legacy()
void demonstrate_shutdown()
void demonstrate_high_throughput()
void demonstrate_error_handling()
High-performance asynchronous executor using thread_system.
auto submit(F &&func, Args &&... args) -> std::future< std::invoke_result_t< F, Args... > >
Submits a task for asynchronous execution.
size_t thread_count() const
Returns the number of worker threads.
constexpr bool is_using_thread_system() const
Checks if using thread_system implementation.