32#ifndef KCENON_HAS_COMMON_SYSTEM
33#define KCENON_HAS_COMMON_SYSTEM 0
36#if KCENON_HAS_COMMON_SYSTEM
37#include <kcenon/common/patterns/result.h>
46#if KCENON_HAS_COMMON_SYSTEM
62 error_info(
int c,
const std::string& msg,
const std::string& mod =
"")
79 [[nodiscard]] T&&
value() && {
return std::move(
data_); }
220 std::optional<std::chrono::system_clock::time_point>
started_at;
323 const std::vector<std::string>& result_uids)>;
390 [[nodiscard]] static auto
cancel(const std::
string& job_id)
391 ->
Result<std::monostate>;
404 const std::
string& job_id,
405 std::chrono::milliseconds
timeout = std::chrono::minutes{30},
457 -> std::optional<std::chrono::milliseconds>;
478 const std::string& credentials)
533 return "bearer_token";
Simple result type for error handling.
bool is_err() const noexcept
const error_info & error() const
const T & value() const &
Result(const error_info &err)
bool is_ok() const noexcept
Connector for external AI inference services.
std::function< void(const inference_status &)> status_callback
Callback type for status updates.
static std::unique_ptr< impl > pimpl_
static auto update_credentials(authentication_type auth_type, const std::string &credentials) -> Result< std::monostate >
Update authentication credentials.
ai_service_connector()=delete
static auto request_inference(const inference_request &request) -> Result< std::string >
Request AI inference for a study.
static auto check_status(const std::string &job_id) -> Result< inference_status >
Check the status of an inference job.
ai_service_connector(const ai_service_connector &)=delete
static auto list_active_jobs() -> Result< std::vector< inference_status > >
List active inference jobs.
static auto is_initialized() noexcept -> bool
Check if the connector is initialized.
static auto initialize(const ai_service_config &config) -> Result< std::monostate >
Initialize the AI service connector.
static auto list_models() -> Result< std::vector< model_info > >
List available AI models.
static auto get_model_info(const std::string &model_id) -> Result< model_info >
Get information about a specific model.
static void shutdown()
Shutdown the AI service connector.
std::function< void(const std::string &job_id, bool success, const std::vector< std::string > &result_uids)> completion_callback
Callback type for completion notification.
~ai_service_connector()=delete
static auto get_config() -> const ai_service_config &
Get the current configuration.
ai_service_connector & operator=(const ai_service_connector &)=delete
static auto cancel(const std::string &job_id) -> Result< std::monostate >
Cancel an inference job.
static auto get_latency() -> std::optional< std::chrono::milliseconds >
Get current latency to the AI service.
static auto check_health() -> bool
Check AI service health.
static auto wait_for_completion(const std::string &job_id, std::chrono::milliseconds timeout=std::chrono::minutes{30}, status_callback callback=nullptr) -> Result< inference_status >
Wait for a job to complete.
authentication_type
Types of authentication for AI services.
@ bearer_token
OAuth2 bearer token.
@ api_key
API key in header.
@ basic
HTTP basic authentication.
kcenon::common::Result< T > Result
Result type alias for operations returning a value.
auto to_string(inference_status_code status) -> std::string
Convert inference status code to string.
inference_status_code
Status codes for AI inference jobs.
@ failed
Job failed with error.
@ cancelled
Job was cancelled.
@ running
Job is currently processing.
@ pending
Job is queued but not started.
@ completed
Job completed successfully.
kcenon::common::error_info error_info
Error information type.
Configuration for AI service connection.
std::string bearer_token
Bearer token (for bearer_token auth type)
std::size_t max_retries
Maximum retry attempts on failure.
authentication_type auth_type
Authentication type.
std::chrono::milliseconds request_timeout
Request timeout for inference operations.
std::string password
Password (for basic auth)
std::string username
Username (for basic auth)
std::chrono::milliseconds connection_timeout
Connection timeout.
std::chrono::milliseconds polling_interval
Status polling interval.
std::string api_key
API key (for api_key auth type)
std::string base_url
Base URL of the AI service (e.g., "https://ai.example.com/v1")
bool verify_ssl
Enable TLS certificate verification.
std::optional< std::filesystem::path > ca_cert_path
Path to CA certificate bundle (optional)
std::string service_name
Service name for identification.
std::chrono::milliseconds retry_delay
Delay between retries (exponential backoff applied)
Simple error info for fallback when common_system is unavailable.
error_info(const std::string &msg)
error_info(int c, const std::string &msg, const std::string &mod="")
Request structure for AI inference.
std::optional< std::string > series_instance_uid
Series Instance UID (optional, for series-level inference)
std::string model_id
Model ID to use for inference.
int priority
Priority level (higher = more urgent)
std::map< std::string, std::string > metadata
Custom metadata to include with request.
std::string study_instance_uid
Study Instance UID to process.
std::map< std::string, std::string > parameters
Custom parameters for the model.
std::optional< std::string > callback_url
Callback URL for result notification (optional)
Status information for an inference job.
inference_status_code status
Current status code.
std::optional< std::chrono::system_clock::time_point > started_at
Time when job started processing.
std::optional< std::string > error_message
Error message (if status is failed)
std::string message
Human-readable status message.
int progress
Progress percentage (0-100)
std::optional< std::chrono::system_clock::time_point > completed_at
Time when job completed.
std::chrono::system_clock::time_point created_at
Time when job was created.
std::vector< std::string > result_uids
Result UIDs (if completed successfully)
std::string job_id
Unique job identifier.
Information about an available AI model.
std::vector< std::string > supported_modalities
Supported modalities (e.g., "CT", "MR", "CR")
std::string version
Model version.
std::vector< std::string > supported_sop_classes
Supported SOP classes.
std::vector< std::string > output_types
Output types (e.g., "SR", "SEG", "PR")
std::string description
Model description.
std::string model_id
Unique model identifier.
bool available
Whether model is currently available.
std::string name
Human-readable model name.