22#include <sys/socket.h>
29#include <condition_variable>
33#include <shared_mutex>
36#include <unordered_map>
63 result.reserve(str.size() + 10);
89 if (
static_cast<unsigned char>(c) < 0x20) {
91 std::snprintf(buf,
sizeof(buf),
"\\u%04x",
92 static_cast<unsigned char>(c));
108 std::chrono::system_clock::time_point tp) {
109 const auto time_t_val = std::chrono::system_clock::to_time_t(tp);
113 gmtime_s(&tm_val, &time_t_val);
115 gmtime_r(&time_t_val, &tm_val);
118 std::ostringstream oss;
119 oss << std::put_time(&tm_val,
"%Y-%m-%dT%H:%M:%SZ");
126[[nodiscard]]
inline std::optional<std::chrono::system_clock::time_point>
129 std::istringstream iss(str);
130 iss >> std::get_time(&tm_val,
"%Y-%m-%dT%H:%M:%SZ");
137 auto time_t_val = _mkgmtime(&tm_val);
139 auto time_t_val = timegm(&tm_val);
142 return std::chrono::system_clock::from_time_t(time_t_val);
149 const std::string& json,
150 const std::string& key) {
151 std::string search_key =
"\"" + key +
"\"";
152 auto pos = json.find(search_key);
153 if (pos == std::string::npos) {
157 pos = json.find(
':', pos);
158 if (pos == std::string::npos) {
162 pos = json.find(
'"', pos);
163 if (pos == std::string::npos) {
167 auto start = pos + 1;
168 auto end = json.find(
'"', start);
169 if (end == std::string::npos) {
173 return json.substr(start, end - start);
180 const std::string& json,
181 const std::string& key) {
182 std::string search_key =
"\"" + key +
"\"";
183 auto pos = json.find(search_key);
184 if (pos == std::string::npos) {
188 pos = json.find(
':', pos);
189 if (pos == std::string::npos) {
195 while (pos < json.size() && std::isspace(json[pos])) {
199 if (pos >= json.size()) {
205 int value = std::stoi(json.substr(pos), &end_pos);
216 std::ostringstream oss;
221 oss << R
"(,"series_instance_uid":")"
226 << R
"(,"priority":)" << request.priority;
233 oss << R
"(,"parameters":{)";
235 for (
const auto& [key, value] : request.
parameters) {
236 if (!first) oss <<
",";
244 oss << R
"(,"metadata":{)";
246 for (
const auto& [key, value] : request.
metadata) {
247 if (!first) oss <<
",";
262 const std::string& json) {
263 inference_status status;
269 status.job_id = *job_id;
273 if (*status_str ==
"pending") {
275 }
else if (*status_str ==
"running") {
277 }
else if (*status_str ==
"completed") {
279 }
else if (*status_str ==
"failed") {
281 }
else if (*status_str ==
"cancelled") {
283 }
else if (*status_str ==
"timeout") {
290 status.progress = *progress;
295 status.message = *message;
300 status.error_message = *error;
307 status.created_at = *tp;
315 status.started_at = *tp;
323 status.completed_at = *tp;
334 const std::string& json) {
341 info.model_id = *model_id;
350 info.description = *description;
355 info.version = *version;
365 static constexpr char table[] =
366 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
369 result.reserve(((input.size() + 2) / 3) * 4);
371 auto ptr =
reinterpret_cast<const unsigned char*
>(input.data());
372 auto len = input.size();
374 for (std::size_t i = 0; i < len; i += 3) {
375 uint32_t octet_a = ptr[i];
376 uint32_t octet_b = (i + 1 < len) ? ptr[i + 1] : 0;
377 uint32_t octet_c = (i + 2 < len) ? ptr[i + 2] : 0;
379 uint32_t triple = (octet_a << 16) | (octet_b << 8) | octet_c;
381 result += table[(triple >> 18) & 0x3F];
382 result += table[(triple >> 12) & 0x3F];
383 result += (i + 1 < len) ? table[(triple >> 6) & 0x3F] :
'=';
384 result += (i + 2 < len) ? table[triple & 0x3F] :
'=';
394 const std::string& json) {
395 std::vector<std::string> objects;
398 auto arr_start = json.find(
'[');
399 if (arr_start == std::string::npos) {
404 std::size_t obj_start = std::string::npos;
406 for (std::size_t i = arr_start + 1; i < json.size(); ++i) {
412 while (i < json.size() && json[i] !=
'"') {
413 if (json[i] ==
'\\') ++i;
424 }
else if (c ==
'}') {
426 if (depth == 0 && obj_start != std::string::npos) {
427 objects.push_back(json.substr(obj_start, i - obj_start + 1));
428 obj_start = std::string::npos;
430 }
else if (c ==
']' && depth == 0) {
453 return status_code >= 200 && status_code < 300;
464 static constexpr native_type invalid_value = INVALID_SOCKET;
478 other.fd_ = invalid_value;
482 if (
this != &other) {
485 other.fd_ = invalid_value;
490 [[nodiscard]]
bool valid() const noexcept {
return fd_ != invalid_value; }
494 if (fd_ != invalid_value) {
516 static std::optional<parsed_url>
parse(
const std::string& url) {
520 auto scheme_end = url.find(
"://");
521 if (scheme_end == std::string::npos)
return std::nullopt;
523 auto host_start = scheme_end + 3;
524 auto path_start = url.find(
'/', host_start);
526 std::string host_port;
527 if (path_start == std::string::npos) {
528 host_port = url.substr(host_start);
531 host_port = url.substr(host_start, path_start - host_start);
532 result.
path = url.substr(path_start);
535 auto colon = host_port.find(
':');
536 if (colon != std::string::npos) {
537 result.
host = host_port.substr(0, colon);
538 result.
port = host_port.substr(colon + 1);
540 result.
host = host_port;
559 [[nodiscard]]
auto post(
const std::string& path,
560 const std::string& body,
561 const std::string& content_type =
"application/json")
562 -> Result<http_response> {
564 "AI service POST request: {} (body size: {} bytes)",
565 config_.base_url + path, body.size());
567 auto headers = build_headers();
568 headers[
"Content-Type"] = content_type;
569 return send_request(
"POST", path, headers, body);
572 [[nodiscard]]
auto get(
const std::string& path)
573 -> Result<http_response> {
575 "AI service GET request: {}", config_.base_url + path);
577 auto headers = build_headers();
578 return send_request(
"GET", path, headers,
"");
581 [[nodiscard]]
auto del(
const std::string& path)
582 -> Result<http_response> {
584 "AI service DELETE request: {}", config_.base_url + path);
586 auto headers = build_headers();
587 return send_request(
"DELETE", path, headers,
"");
591 auto result = get(
"/health");
592 return result.is_ok() && result.value().is_success();
596 auto start = std::chrono::steady_clock::now();
597 auto result = get(
"/health");
598 auto end = std::chrono::steady_clock::now();
600 if (!result.is_ok()) {
604 return std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
611 const std::string& method,
612 const std::string& path,
613 const std::map<std::string, std::string>& headers,
614 const std::string& body) -> Result<http_response> {
616 std::string full_url = config_.base_url + path;
619 return error_info(-1,
"Invalid URL: " + full_url,
"ai_service_connector");
623 struct addrinfo hints{};
624 hints.ai_family = AF_UNSPEC;
625 hints.ai_socktype = SOCK_STREAM;
626 hints.ai_protocol = IPPROTO_TCP;
628 struct addrinfo* addr_result =
nullptr;
629 int gai_err = getaddrinfo(url->host.c_str(), url->port.c_str(),
630 &hints, &addr_result);
632 return error_info(-1,
633 "DNS resolution failed for " + url->host +
": " +
634 gai_strerror(gai_err),
635 "ai_service_connector");
639 auto addr_cleanup = [](
struct addrinfo* p) { freeaddrinfo(p); };
640 std::unique_ptr<
struct addrinfo, decltype(addr_cleanup)>
641 addr_guard(addr_result, addr_cleanup);
645 ::socket(addr_result->ai_family, addr_result->ai_socktype,
646 addr_result->ai_protocol));
648 return error_info(-1,
"Failed to create socket",
"ai_service_connector");
652 auto timeout_sec = std::chrono::duration_cast<std::chrono::seconds>(
653 config_.connection_timeout).count();
654 if (timeout_sec <= 0) timeout_sec = 30;
657 DWORD tv =
static_cast<DWORD
>(timeout_sec * 1000);
658 setsockopt(sock.get(), SOL_SOCKET, SO_RCVTIMEO,
659 reinterpret_cast<const char*
>(&tv),
sizeof(tv));
660 setsockopt(sock.get(), SOL_SOCKET, SO_SNDTIMEO,
661 reinterpret_cast<const char*
>(&tv),
sizeof(tv));
664 tv.tv_sec = timeout_sec;
665 setsockopt(sock.get(), SOL_SOCKET, SO_RCVTIMEO, &tv,
sizeof(tv));
666 setsockopt(sock.get(), SOL_SOCKET, SO_SNDTIMEO, &tv,
sizeof(tv));
670 if (::connect(sock.get(), addr_result->ai_addr,
671 static_cast<int>(addr_result->ai_addrlen)) != 0) {
672 return error_info(-1,
673 "Connection failed to " + url->host +
":" + url->port,
674 "ai_service_connector");
678 std::ostringstream request_stream;
679 request_stream << method <<
" " << url->path <<
" HTTP/1.1\r\n";
680 request_stream <<
"Host: " << url->host <<
"\r\n";
681 request_stream <<
"Connection: close\r\n";
683 for (
const auto& [
name, value] : headers) {
684 request_stream <<
name <<
": " << value <<
"\r\n";
688 request_stream <<
"Content-Length: " << body.size() <<
"\r\n";
690 request_stream <<
"\r\n";
691 request_stream << body;
693 std::string request_data = request_stream.str();
696 auto total_sent = std::size_t{0};
697 while (total_sent < request_data.size()) {
698 auto sent = ::send(sock.get(),
699 request_data.data() + total_sent,
700 static_cast<int>(request_data.size() - total_sent),
703 return error_info(-1,
"Failed to send HTTP request",
704 "ai_service_connector");
706 total_sent +=
static_cast<std::size_t
>(sent);
710 std::string response_data;
713 auto received = ::recv(sock.get(), buffer,
sizeof(buffer), 0);
715 return error_info(-1,
"Failed to receive HTTP response",
716 "ai_service_connector");
718 if (received == 0)
break;
719 response_data.append(buffer,
static_cast<std::size_t
>(received));
723 return parse_http_response(response_data);
727 -> Result<http_response> {
730 auto header_end = raw.find(
"\r\n\r\n");
731 if (header_end == std::string::npos) {
732 return error_info(-1,
"Malformed HTTP response",
"ai_service_connector");
736 auto first_line_end = raw.find(
"\r\n");
737 std::string status_line = raw.substr(0, first_line_end);
740 auto first_space = status_line.find(
' ');
741 if (first_space == std::string::npos) {
742 return error_info(-1,
"Invalid HTTP status line",
"ai_service_connector");
744 auto code_start = first_space + 1;
746 response.
status_code = std::stoi(status_line.substr(code_start));
748 return error_info(-1,
"Invalid HTTP status code",
"ai_service_connector");
752 auto headers_str = raw.substr(first_line_end + 2,
753 header_end - first_line_end - 2);
754 std::istringstream header_stream(headers_str);
756 while (std::getline(header_stream, line)) {
757 if (!line.empty() && line.back() ==
'\r') line.pop_back();
758 auto colon = line.find(
':');
759 if (colon != std::string::npos) {
760 auto name = line.substr(0, colon);
761 auto value = line.substr(colon + 1);
763 auto val_start = value.find_first_not_of(
' ');
764 if (val_start != std::string::npos) {
765 value = value.substr(val_start);
772 response.
body = raw.substr(header_end + 4);
778 std::map<std::string, std::string> headers;
779 headers[
"Content-Type"] =
"application/json";
781 switch (config_.auth_type) {
782 case authentication_type::api_key:
783 headers[
"X-API-Key"] = config_.api_key;
785 case authentication_type::bearer_token:
786 headers[
"Authorization"] =
"Bearer " + config_.bearer_token;
788 case authentication_type::basic: {
789 std::string credentials = config_.username +
":" + config_.password;
790 headers[
"Authorization"] =
"Basic " + json_util::base64_encode(credentials);
793 case authentication_type::none:
811 void add_job(
const std::string& job_id,
const inference_request& request) {
812 std::lock_guard lock(mutex_);
814 inference_status status;
815 status.job_id = job_id;
816 status.status = inference_status_code::pending;
818 status.message =
"Job submitted";
819 status.created_at = std::chrono::system_clock::now();
821 jobs_[job_id] = status;
822 request_map_[job_id] = request;
825 metrics::active_jobs,
static_cast<double>(jobs_.size()));
828 void update_status(
const std::string& job_id,
const inference_status& status) {
829 std::lock_guard lock(mutex_);
831 auto it = jobs_.find(job_id);
832 if (it != jobs_.end()) {
836 if (status.status == inference_status_code::completed ||
837 status.status == inference_status_code::failed) {
840 (void)request_map_[job_id];
841 auto duration = status.completed_at.value_or(std::chrono::system_clock::now())
845 metrics::inference_duration,
846 std::chrono::duration_cast<std::chrono::nanoseconds>(duration));
848 if (status.status == inference_status_code::completed) {
850 metrics::inference_requests_success);
853 metrics::inference_requests_failed);
860 std::lock_guard lock(mutex_);
862 request_map_.erase(job_id);
865 metrics::active_jobs,
static_cast<double>(jobs_.size()));
868 [[nodiscard]] std::optional<inference_status>
get_status(
const std::string& job_id)
const {
869 std::shared_lock lock(mutex_);
870 auto it = jobs_.find(job_id);
871 if (it != jobs_.end()) {
878 std::shared_lock lock(mutex_);
879 std::vector<inference_status> result;
880 for (
const auto& [
id, status] : jobs_) {
881 if (status.status == inference_status_code::pending ||
882 status.status == inference_status_code::running) {
883 result.push_back(status);
891 std::unordered_map<std::string, inference_status>
jobs_;
907 auto initialize(
const ai_service_config& config) -> Result<std::monostate> {
908 std::lock_guard lock(mutex_);
911 return std::monostate{};
917 if (config.base_url.empty()) {
918 return error_info(-1,
"Base URL is required",
"ai_service_connector");
922 http_client_ = std::make_unique<http_client>(config);
925 if (!http_client_->check_connectivity()) {
927 "AI service at {} is not responding, continuing anyway",
932 job_tracker_ = std::make_unique<job_tracker>();
938 "AI service connector initialized: url={}, auth={}",
939 config.base_url, to_string(config.auth_type));
942 return std::monostate{};
946 std::lock_guard lock(mutex_);
954 http_client_.reset();
955 job_tracker_.reset();
956 initialized_ =
false;
960 return initialized_.load();
965 return error_info(-1,
"AI service connector not initialized",
"ai_service_connector");
970 return error_info(-1,
"Study Instance UID is required",
"ai_service_connector");
973 return error_info(-1,
"Model ID is required",
"ai_service_connector");
977 std::string body = json_util::build_request_json(request);
980 auto response = http_client_->post(
"/inference", body);
981 if (response.is_err()) {
983 "Failed to submit inference request: {}",
984 response.error().message);
985 return response.error();
988 if (!response.value().is_success()) {
990 response.value().status_code,
991 "AI service returned error: " + response.value().body,
992 "ai_service_connector");
996 auto job_id = json_util::extract_string(response.value().body,
"job_id");
998 return error_info(-1,
"Failed to parse job ID from response",
"ai_service_connector");
1002 job_tracker_->add_job(*job_id, request);
1005 "Inference request submitted: job_id={}, study={}, model={}",
1009 metrics::inference_requests_total);
1015 if (!initialized_) {
1016 return error_info(-1,
"AI service connector not initialized",
"ai_service_connector");
1020 auto cached = job_tracker_->get_status(job_id);
1021 if (cached && (cached->status == inference_status_code::completed ||
1022 cached->status == inference_status_code::failed ||
1023 cached->status == inference_status_code::cancelled)) {
1028 auto response = http_client_->get(
"/inference/" + job_id);
1029 if (response.is_err()) {
1030 return response.error();
1033 if (!response.value().is_success()) {
1034 if (response.value().status_code == 404) {
1035 return error_info(404,
"Job not found: " + job_id,
"ai_service_connector");
1038 response.value().status_code,
1039 "Failed to get job status",
1040 "ai_service_connector");
1044 auto status = json_util::parse_status_json(response.value().body);
1046 return error_info(-1,
"Failed to parse status response",
"ai_service_connector");
1050 job_tracker_->update_status(job_id, *status);
1055 auto cancel(
const std::string& job_id) -> Result<std::monostate> {
1056 if (!initialized_) {
1057 return error_info(-1,
"AI service connector not initialized",
"ai_service_connector");
1060 auto response = http_client_->del(
"/inference/" + job_id);
1061 if (response.is_err()) {
1062 return response.error();
1065 if (!response.value().is_success() && response.value().status_code != 404) {
1067 response.value().status_code,
1068 "Failed to cancel job",
1069 "ai_service_connector");
1073 inference_status status;
1074 status.job_id = job_id;
1075 status.status = inference_status_code::cancelled;
1076 status.message =
"Job cancelled by user";
1077 status.completed_at = std::chrono::system_clock::now();
1078 job_tracker_->update_status(job_id, status);
1082 return std::monostate{};
1086 std::chrono::milliseconds timeout,
1087 status_callback callback) -> Result<inference_status> {
1088 if (!initialized_) {
1089 return error_info(-1,
"AI service connector not initialized",
"ai_service_connector");
1092 auto start_time = std::chrono::steady_clock::now();
1093 auto deadline = start_time + timeout;
1095 while (std::chrono::steady_clock::now() < deadline) {
1096 auto result = check_status(job_id);
1097 if (result.is_err()) {
1101 auto& status = result.value();
1109 if (status.status == inference_status_code::completed ||
1110 status.status == inference_status_code::failed ||
1111 status.status == inference_status_code::cancelled ||
1112 status.status == inference_status_code::timeout) {
1117 std::this_thread::sleep_for(config_.polling_interval);
1121 inference_status timeout_status;
1122 timeout_status.job_id = job_id;
1123 timeout_status.status = inference_status_code::timeout;
1124 timeout_status.message =
"Timed out waiting for job completion";
1125 return timeout_status;
1129 if (!initialized_) {
1130 return error_info(-1,
"AI service connector not initialized",
"ai_service_connector");
1133 return job_tracker_->get_active_jobs();
1137 if (!initialized_) {
1138 return error_info(-1,
"AI service connector not initialized",
"ai_service_connector");
1141 auto response = http_client_->get(
"/models");
1142 if (response.is_err()) {
1143 return response.error();
1146 if (!response.value().is_success()) {
1148 response.value().status_code,
1149 "Failed to list models",
1150 "ai_service_connector");
1154 std::vector<model_info> models;
1155 auto model_objects = json_util::extract_json_array_objects(
1156 response.value().body);
1158 for (
const auto& obj : model_objects) {
1159 auto info = json_util::parse_model_json(obj);
1161 models.push_back(std::move(*info));
1169 if (!initialized_) {
1170 return error_info(-1,
"AI service connector not initialized",
"ai_service_connector");
1173 auto response = http_client_->get(
"/models/" + model_id);
1174 if (response.is_err()) {
1175 return response.error();
1178 if (!response.value().is_success()) {
1180 response.value().status_code,
1181 "Failed to get model info",
1182 "ai_service_connector");
1185 auto info = json_util::parse_model_json(response.value().body);
1187 return error_info(-1,
"Failed to parse model info",
"ai_service_connector");
1194 if (!initialized_ || !http_client_) {
1197 return http_client_->check_connectivity();
1200 [[nodiscard]]
auto get_latency() -> std::optional<std::chrono::milliseconds> {
1201 if (!initialized_ || !http_client_) {
1202 return std::nullopt;
1204 return http_client_->measure_latency();
1212 const std::string& credentials) -> Result<std::monostate> {
1213 std::lock_guard lock(mutex_);
1215 config_.auth_type = auth_type;
1217 switch (auth_type) {
1218 case authentication_type::api_key:
1219 config_.api_key = credentials;
1221 case authentication_type::bearer_token:
1222 config_.bearer_token = credentials;
1224 case authentication_type::basic: {
1225 auto colon_pos = credentials.find(
':');
1226 if (colon_pos != std::string::npos) {
1227 config_.username = credentials.substr(0, colon_pos);
1228 config_.password = credentials.substr(colon_pos + 1);
1230 return error_info(-1,
"Basic auth credentials must be in format 'username:password'",
1231 "ai_service_connector");
1235 case authentication_type::none:
1240 http_client_ = std::make_unique<http_client>(config_);
1243 "AI service credentials updated: auth={}",
1244 to_string(auth_type));
1246 return std::monostate{};
1251 std::atomic<bool> initialized_{
false};
1262 std::make_unique<ai_service_connector::impl>();
1270 return pimpl_->initialize(config);
1278 return pimpl_->is_initialized();
1283 return pimpl_->request_inference(request);
1288 return pimpl_->check_status(job_id);
1293 return pimpl_->cancel(job_id);
1297 const std::string& job_id,
1298 std::chrono::milliseconds
timeout,
1299 status_callback callback)
1301 return pimpl_->wait_for_completion(job_id,
timeout, std::move(callback));
1306 return pimpl_->list_active_jobs();
1311 return pimpl_->list_models();
1316 return pimpl_->get_model_info(model_id);
1320 return pimpl_->check_health();
1324 -> std::optional<std::chrono::milliseconds> {
1325 return pimpl_->get_latency();
1329 return pimpl_->get_config();
1334 const std::string& credentials)
1336 return pimpl_->update_credentials(auth_type, credentials);
std::optional< inference_status > parse_status_json(const std::string &json)
Parse inference status from JSON response.
std::optional< model_info > parse_model_json(const std::string &json)
Parse model info from JSON.
std::string base64_encode(std::string_view input)
Encode data to Base64.
std::vector< std::string > extract_json_array_objects(const std::string &json)
Extract JSON objects from a JSON array string.
Connector for external AI inference services.
auto list_models() -> Result< std::vector< model_info > >
std::unique_ptr< job_tracker > job_tracker_
auto initialize(const ai_service_config &config) -> Result< std::monostate >
auto check_status(const std::string &job_id) -> Result< inference_status >
std::unique_ptr< http_client > http_client_
auto cancel(const std::string &job_id) -> Result< std::monostate >
auto update_credentials(authentication_type auth_type, const std::string &credentials) -> Result< std::monostate >
auto request_inference(const inference_request &request) -> Result< std::string >
const ai_service_config & get_config() const
bool is_initialized() const noexcept
auto get_model_info(const std::string &model_id) -> Result< model_info >
auto list_active_jobs() -> Result< std::vector< inference_status > >
auto get_latency() -> std::optional< std::chrono::milliseconds >
auto wait_for_completion(const std::string &job_id, std::chrono::milliseconds timeout, status_callback callback) -> Result< inference_status >
ai_service_config config_
HTTP client for AI service communication.
auto send_request(const std::string &method, const std::string &path, const std::map< std::string, std::string > &headers, const std::string &body) -> Result< http_response >
auto measure_latency() -> std::optional< std::chrono::milliseconds >
ai_service_config config_
auto check_connectivity() -> bool
auto get(const std::string &path) -> Result< http_response >
static auto parse_http_response(const std::string &raw) -> Result< http_response >
auto del(const std::string &path) -> Result< http_response >
http_client(const ai_service_config &config)
std::map< std::string, std::string > build_headers() const
auto post(const std::string &path, const std::string &body, const std::string &content_type="application/json") -> Result< http_response >
Tracks active inference jobs.
void add_job(const std::string &job_id, const inference_request &request)
std::optional< inference_status > get_status(const std::string &job_id) const
std::vector< inference_status > get_active_jobs() const
void update_status(const std::string &job_id, const inference_status &status)
void remove_job(const std::string &job_id)
std::unordered_map< std::string, inference_request > request_map_
std::unordered_map< std::string, inference_status > jobs_
static std::unique_ptr< impl > pimpl_
static auto update_credentials(authentication_type auth_type, const std::string &credentials) -> Result< std::monostate >
Update authentication credentials.
static auto request_inference(const inference_request &request) -> Result< std::string >
Request AI inference for a study.
static auto check_status(const std::string &job_id) -> Result< inference_status >
Check the status of an inference job.
static auto list_active_jobs() -> Result< std::vector< inference_status > >
List active inference jobs.
static auto is_initialized() noexcept -> bool
Check if the connector is initialized.
static auto initialize(const ai_service_config &config) -> Result< std::monostate >
Initialize the AI service connector.
static auto list_models() -> Result< std::vector< model_info > >
List available AI models.
static auto get_model_info(const std::string &model_id) -> Result< model_info >
Get information about a specific model.
static void shutdown()
Shutdown the AI service connector.
static auto get_config() -> const ai_service_config &
Get the current configuration.
static auto cancel(const std::string &job_id) -> Result< std::monostate >
Cancel an inference job.
static auto get_latency() -> std::optional< std::chrono::milliseconds >
Get current latency to the AI service.
static auto check_health() -> bool
Check AI service health.
static auto wait_for_completion(const std::string &job_id, std::chrono::milliseconds timeout=std::chrono::minutes{30}, status_callback callback=nullptr) -> Result< inference_status >
Wait for a job to complete.
static void debug(kcenon::pacs::compat::format_string< Args... > fmt, Args &&... args)
Log a debug-level message.
static void info(kcenon::pacs::compat::format_string< Args... > fmt, Args &&... args)
Log an info-level message.
static void warn(kcenon::pacs::compat::format_string< Args... > fmt, Args &&... args)
Log a warning-level message.
static void error(kcenon::pacs::compat::format_string< Args... > fmt, Args &&... args)
Log an error-level message.
static void record_timing(std::string_view name, std::chrono::nanoseconds duration)
Record a timing measurement.
static void increment_counter(std::string_view name, std::int64_t value=1)
Increment a counter metric.
static void set_gauge(std::string_view name, double value)
Set a gauge metric value.
RAII wrapper for platform socket handle.
bool valid() const noexcept
native_type get() const noexcept
socket_handle(native_type fd)
socket_handle & operator=(socket_handle &&other) noexcept
socket_handle(const socket_handle &)=delete
socket_handle(socket_handle &&other) noexcept
socket_handle & operator=(const socket_handle &)=delete
Adapter for DICOM audit logging using logger_system.
Adapter for PACS performance metrics and distributed tracing.
std::string escape_string(std::string_view str)
Escape special characters in JSON string.
std::string build_request_json(const inference_request &request)
Build inference request JSON.
std::string to_iso8601(std::chrono::system_clock::time_point tp)
Convert time_point to ISO 8601 string.
std::optional< std::string > extract_string(const std::string &json, const std::string &key)
Simple JSON value extractor (for basic parsing)
std::optional< std::chrono::system_clock::time_point > from_iso8601(const std::string &str)
Parse ISO 8601 string to time_point.
std::optional< int > extract_int(const std::string &json, const std::string &key)
Simple JSON integer extractor.
constexpr const char * inference_duration
constexpr const char * inference_requests_success
constexpr const char * inference_requests_total
constexpr const char * inference_requests_failed
constexpr const char * active_jobs
authentication_type
Types of authentication for AI services.
kcenon::common::Result< T > Result
Result type alias for operations returning a value.
@ failed
Job failed with error.
@ cancelled
Job was cancelled.
@ running
Job is currently processing.
@ pending
Job is queued but not started.
@ completed
Job completed successfully.
Simple HTTP response structure.
std::map< std::string, std::string > headers
bool is_success() const noexcept
Request structure for AI inference.
std::optional< std::string > series_instance_uid
Series Instance UID (optional, for series-level inference)
std::string model_id
Model ID to use for inference.
int priority
Priority level (higher = more urgent)
std::map< std::string, std::string > metadata
Custom metadata to include with request.
std::string study_instance_uid
Study Instance UID to process.
std::map< std::string, std::string > parameters
Custom parameters for the model.
std::optional< std::string > callback_url
Callback URL for result notification (optional)
Parse URL into host, port, and path components.
static std::optional< parsed_url > parse(const std::string &url)