PACS System 0.1.0
PACS DICOM system library
Loading...
Searching...
No Matches
ai_service_connector.h
Go to the documentation of this file.
1// BSD 3-Clause License
2// Copyright (c) 2021-2025, 🍀☀🌕🌥 🌊
3// See the LICENSE file in the project root for full license information.
4
18#pragma once
19
20#include <chrono>
21#include <cstdint>
22#include <filesystem>
23#include <functional>
24#include <map>
25#include <memory>
26#include <optional>
27#include <string>
28#include <string_view>
29#include <vector>
30
31// KCENON_HAS_COMMON_SYSTEM is defined by CMake when common_system is available
32#ifndef KCENON_HAS_COMMON_SYSTEM
33#define KCENON_HAS_COMMON_SYSTEM 0
34#endif
35
36#if KCENON_HAS_COMMON_SYSTEM
37#include <kcenon/common/patterns/result.h>
38#endif
39
40namespace kcenon::pacs::ai {
41
42// =============================================================================
43// Result Type (fallback when common_system is unavailable)
44// =============================================================================
45
46#if KCENON_HAS_COMMON_SYSTEM
47template <typename T>
49
50using error_info = kcenon::common::error_info;
51#else
55struct error_info {
56 int code = -1;
57 std::string message;
58 std::string module;
59
60 error_info() = default;
61 explicit error_info(const std::string& msg) : message(msg) {}
62 error_info(int c, const std::string& msg, const std::string& mod = "")
63 : code(c), message(msg), module(mod) {}
64};
65
69template <typename T>
70class Result {
71public:
72 Result(T value) : data_(std::move(value)), has_value_(true) {}
73 Result(const error_info& err) : error_(err), has_value_(false) {}
74
75 [[nodiscard]] bool is_ok() const noexcept { return has_value_; }
76 [[nodiscard]] bool is_err() const noexcept { return !has_value_; }
77 [[nodiscard]] T& value() & { return data_; }
78 [[nodiscard]] const T& value() const& { return data_; }
79 [[nodiscard]] T&& value() && { return std::move(data_); }
80 [[nodiscard]] const error_info& error() const { return error_; }
81
82private:
83 T data_{};
86};
87#endif
88
89// =============================================================================
90// Enumerations
91// =============================================================================
92
98 pending,
99 running,
100 completed,
101 failed,
102 cancelled,
103 timeout
104};
105
111 none,
112 api_key,
114 basic
115};
116
117// =============================================================================
118// Configuration Structures
119// =============================================================================
120
127 std::string base_url;
128
130 std::string service_name{"ai_service"};
131
134
136 std::string api_key;
137
139 std::string username;
140
142 std::string password;
143
145 std::string bearer_token;
146
148 std::chrono::milliseconds connection_timeout{30000};
149
151 std::chrono::milliseconds request_timeout{300000}; // 5 minutes
152
154 std::size_t max_retries{3};
155
157 std::chrono::milliseconds retry_delay{1000};
158
160 bool verify_ssl{true};
161
163 std::optional<std::filesystem::path> ca_cert_path;
164
166 std::chrono::milliseconds polling_interval{5000};
167};
168
176
178 std::optional<std::string> series_instance_uid;
179
181 std::string model_id;
182
184 std::map<std::string, std::string> parameters;
185
187 int priority{0};
188
190 std::optional<std::string> callback_url;
191
193 std::map<std::string, std::string> metadata;
194};
195
202 std::string job_id;
203
206
208 int progress{0};
209
211 std::string message;
212
214 std::optional<std::string> error_message;
215
217 std::chrono::system_clock::time_point created_at;
218
220 std::optional<std::chrono::system_clock::time_point> started_at;
221
223 std::optional<std::chrono::system_clock::time_point> completed_at;
224
226 std::vector<std::string> result_uids;
227};
228
235 std::string model_id;
236
238 std::string name;
239
241 std::string description;
242
244 std::string version;
245
247 std::vector<std::string> supported_modalities;
248
250 std::vector<std::string> supported_sop_classes;
251
253 std::vector<std::string> output_types;
254
256 bool available{true};
257};
258
259// =============================================================================
260// AI Service Connector Class
261// =============================================================================
262
312public:
313 // =========================================================================
314 // Type Aliases
315 // =========================================================================
316
318 using status_callback = std::function<void(const inference_status&)>;
319
321 using completion_callback = std::function<void(const std::string& job_id,
322 bool success,
323 const std::vector<std::string>& result_uids)>;
324
325 // =========================================================================
326 // Initialization
327 // =========================================================================
328
338 [[nodiscard]] static auto initialize(const ai_service_config& config)
340
346 static void shutdown();
347
352 [[nodiscard]] static auto is_initialized() noexcept -> bool;
353
354 // =========================================================================
355 // Inference Operations
356 // =========================================================================
357
369 [[nodiscard]] static auto request_inference(const inference_request& request)
370 -> Result<std::string>;
371
378 [[nodiscard]] static auto check_status(const std::string& job_id)
380
390 [[nodiscard]] static auto cancel(const std::string& job_id)
391 -> Result<std::monostate>;
392
403 [[nodiscard]] static auto wait_for_completion(
404 const std::string& job_id,
405 std::chrono::milliseconds timeout = std::chrono::minutes{30},
406 status_callback callback = nullptr)
408
416 [[nodiscard]] static auto list_active_jobs()
418
419 // =========================================================================
420 // Model Management
421 // =========================================================================
422
428 [[nodiscard]] static auto list_models()
430
437 [[nodiscard]] static auto get_model_info(const std::string& model_id)
439
440 // =========================================================================
441 // Health Check
442 // =========================================================================
443
449 [[nodiscard]] static auto check_health() -> bool;
450
456 [[nodiscard]] static auto get_latency()
457 -> std::optional<std::chrono::milliseconds>;
458
459 // =========================================================================
460 // Configuration
461 // =========================================================================
462
467 [[nodiscard]] static auto get_config() -> const ai_service_config&;
468
476 [[nodiscard]] static auto update_credentials(
477 authentication_type auth_type,
478 const std::string& credentials)
480
481private:
482 // Prevent instantiation
487
488 // PIMPL implementation
489 class impl;
490 static std::unique_ptr<impl> pimpl_;
491};
492
493// =============================================================================
494// Helper Functions
495// =============================================================================
496
502[[nodiscard]] inline auto to_string(inference_status_code status) -> std::string {
503 switch (status) {
505 return "pending";
507 return "running";
509 return "completed";
511 return "failed";
513 return "cancelled";
515 return "timeout";
516 default:
517 return "unknown";
518 }
519}
520
526[[nodiscard]] inline auto to_string(authentication_type type) -> std::string {
527 switch (type) {
529 return "none";
531 return "api_key";
533 return "bearer_token";
535 return "basic";
536 default:
537 return "unknown";
538 }
539}
540
541} // namespace kcenon::pacs::ai
Simple result type for error handling.
bool is_err() const noexcept
const error_info & error() const
Result(const error_info &err)
bool is_ok() const noexcept
Connector for external AI inference services.
std::function< void(const inference_status &)> status_callback
Callback type for status updates.
static std::unique_ptr< impl > pimpl_
static auto update_credentials(authentication_type auth_type, const std::string &credentials) -> Result< std::monostate >
Update authentication credentials.
static auto request_inference(const inference_request &request) -> Result< std::string >
Request AI inference for a study.
static auto check_status(const std::string &job_id) -> Result< inference_status >
Check the status of an inference job.
ai_service_connector(const ai_service_connector &)=delete
static auto list_active_jobs() -> Result< std::vector< inference_status > >
List active inference jobs.
static auto is_initialized() noexcept -> bool
Check if the connector is initialized.
static auto initialize(const ai_service_config &config) -> Result< std::monostate >
Initialize the AI service connector.
static auto list_models() -> Result< std::vector< model_info > >
List available AI models.
static auto get_model_info(const std::string &model_id) -> Result< model_info >
Get information about a specific model.
static void shutdown()
Shutdown the AI service connector.
std::function< void(const std::string &job_id, bool success, const std::vector< std::string > &result_uids)> completion_callback
Callback type for completion notification.
static auto get_config() -> const ai_service_config &
Get the current configuration.
ai_service_connector & operator=(const ai_service_connector &)=delete
static auto cancel(const std::string &job_id) -> Result< std::monostate >
Cancel an inference job.
static auto get_latency() -> std::optional< std::chrono::milliseconds >
Get current latency to the AI service.
static auto check_health() -> bool
Check AI service health.
static auto wait_for_completion(const std::string &job_id, std::chrono::milliseconds timeout=std::chrono::minutes{30}, status_callback callback=nullptr) -> Result< inference_status >
Wait for a job to complete.
authentication_type
Types of authentication for AI services.
@ bearer_token
OAuth2 bearer token.
@ basic
HTTP basic authentication.
kcenon::common::Result< T > Result
Result type alias for operations returning a value.
auto to_string(inference_status_code status) -> std::string
Convert inference status code to string.
inference_status_code
Status codes for AI inference jobs.
@ running
Job is currently processing.
@ pending
Job is queued but not started.
@ completed
Job completed successfully.
kcenon::common::error_info error_info
Error information type.
Definition result.h:40
Configuration for AI service connection.
std::string bearer_token
Bearer token (for bearer_token auth type)
std::size_t max_retries
Maximum retry attempts on failure.
authentication_type auth_type
Authentication type.
std::chrono::milliseconds request_timeout
Request timeout for inference operations.
std::string password
Password (for basic auth)
std::string username
Username (for basic auth)
std::chrono::milliseconds connection_timeout
Connection timeout.
std::chrono::milliseconds polling_interval
Status polling interval.
std::string api_key
API key (for api_key auth type)
std::string base_url
Base URL of the AI service (e.g., "https://ai.example.com/v1")
bool verify_ssl
Enable TLS certificate verification.
std::optional< std::filesystem::path > ca_cert_path
Path to CA certificate bundle (optional)
std::string service_name
Service name for identification.
std::chrono::milliseconds retry_delay
Delay between retries (exponential backoff applied)
Simple error info for fallback when common_system is unavailable.
error_info(const std::string &msg)
error_info(int c, const std::string &msg, const std::string &mod="")
Request structure for AI inference.
std::optional< std::string > series_instance_uid
Series Instance UID (optional, for series-level inference)
std::string model_id
Model ID to use for inference.
int priority
Priority level (higher = more urgent)
std::map< std::string, std::string > metadata
Custom metadata to include with request.
std::string study_instance_uid
Study Instance UID to process.
std::map< std::string, std::string > parameters
Custom parameters for the model.
std::optional< std::string > callback_url
Callback URL for result notification (optional)
Status information for an inference job.
inference_status_code status
Current status code.
std::optional< std::chrono::system_clock::time_point > started_at
Time when job started processing.
std::optional< std::string > error_message
Error message (if status is failed)
std::string message
Human-readable status message.
int progress
Progress percentage (0-100)
std::optional< std::chrono::system_clock::time_point > completed_at
Time when job completed.
std::chrono::system_clock::time_point created_at
Time when job was created.
std::vector< std::string > result_uids
Result UIDs (if completed successfully)
std::string job_id
Unique job identifier.
Information about an available AI model.
std::vector< std::string > supported_modalities
Supported modalities (e.g., "CT", "MR", "CR")
std::string version
Model version.
std::vector< std::string > supported_sop_classes
Supported SOP classes.
std::vector< std::string > output_types
Output types (e.g., "SR", "SEG", "PR")
std::string description
Model description.
std::string model_id
Unique model identifier.
bool available
Whether model is currently available.
std::string name
Human-readable model name.