PACS System 0.1.0
PACS DICOM system library
Loading...
Searching...
No Matches
migration_runner.cpp
Go to the documentation of this file.
1// BSD 3-Clause License
2// Copyright (c) 2021-2025, 🍀☀🌕🌥 🌊
3// See the LICENSE file in the project root for full license information.
4
17
18#include <sqlite3.h>
19
21
22#ifdef PACS_WITH_DATABASE_SYSTEM
24#endif
25
26namespace kcenon::pacs::storage {
27
28// Use common_system's ok() function
29using kcenon::common::ok;
30using kcenon::common::make_error;
31
32// ============================================================================
33// Construction
34// ============================================================================
35
37 // Register all migrations (SQLite version)
38 migrations_.push_back({1, [this](sqlite3* db) { return migrate_v1(db); }});
39 migrations_.push_back({2, [this](sqlite3* db) { return migrate_v2(db); }});
40 migrations_.push_back({3, [this](sqlite3* db) { return migrate_v3(db); }});
41 migrations_.push_back({4, [this](sqlite3* db) { return migrate_v4(db); }});
42 migrations_.push_back({5, [this](sqlite3* db) { return migrate_v5(db); }});
43 migrations_.push_back({6, [this](sqlite3* db) { return migrate_v6(db); }});
44 migrations_.push_back({7, [this](sqlite3* db) { return migrate_v7(db); }});
45 migrations_.push_back({8, [this](sqlite3* db) { return migrate_v8(db); }});
46 migrations_.push_back({9, [this](sqlite3* db) { return migrate_v9(db); }});
47
48#ifdef PACS_WITH_DATABASE_SYSTEM
49 // Register all migrations (pacs_database_adapter version)
50 adapter_migrations_.push_back(
51 {1, [this](pacs_database_adapter& db) { return migrate_v1(db); }});
52 adapter_migrations_.push_back(
53 {2, [this](pacs_database_adapter& db) { return migrate_v2(db); }});
54 adapter_migrations_.push_back(
55 {3, [this](pacs_database_adapter& db) { return migrate_v3(db); }});
56 adapter_migrations_.push_back(
57 {4, [this](pacs_database_adapter& db) { return migrate_v4(db); }});
58 adapter_migrations_.push_back(
59 {5, [this](pacs_database_adapter& db) { return migrate_v5(db); }});
60 adapter_migrations_.push_back(
61 {6, [this](pacs_database_adapter& db) { return migrate_v6(db); }});
62 adapter_migrations_.push_back(
63 {7, [this](pacs_database_adapter& db) { return migrate_v7(db); }});
64 adapter_migrations_.push_back(
65 {8, [this](pacs_database_adapter& db) { return migrate_v8(db); }});
66 adapter_migrations_.push_back(
67 {9, [this](pacs_database_adapter& db) { return migrate_v9(db); }});
68#endif
69}
70
71// ============================================================================
72// Migration Operations
73// ============================================================================
74
75auto migration_runner::run_migrations(sqlite3* db) -> VoidResult {
76 return run_migrations_to(db, LATEST_VERSION);
77}
78
79auto migration_runner::run_migrations_to(sqlite3* db, int target_version)
80 -> VoidResult {
81 if (target_version > LATEST_VERSION) {
82 return make_error<std::monostate>(
83 -1,
84 kcenon::pacs::compat::format("Target version {} exceeds latest version {}",
85 target_version, LATEST_VERSION),
86 "storage");
87 }
88
89 // Ensure schema_version table exists
90 auto ensure_result = ensure_schema_version_table(db);
91 if (ensure_result.is_err()) {
92 return ensure_result;
93 }
94
95 auto current_version = get_current_version(db);
96
97 // Nothing to do if already at or past target
98 if (current_version >= target_version) {
99 return ok();
100 }
101
102 // Apply each migration in a transaction
103 while (current_version < target_version) {
104 auto next_version = current_version + 1;
105
106 // Begin transaction
107 auto begin_result = execute_sql(db, "BEGIN TRANSACTION;");
108 if (begin_result.is_err()) {
109 return begin_result;
110 }
111
112 // Apply migration
113 auto migration_result = apply_migration(db, next_version);
114 if (migration_result.is_err()) {
115 // Rollback on failure
116 (void)execute_sql(db, "ROLLBACK;");
117 return migration_result;
118 }
119
120 // Commit transaction
121 auto commit_result = execute_sql(db, "COMMIT;");
122 if (commit_result.is_err()) {
123 (void)execute_sql(db, "ROLLBACK;");
124 return commit_result;
125 }
126
127 current_version = next_version;
128 }
129
130 return ok();
131}
132
133// ============================================================================
134// Version Information
135// ============================================================================
136
137auto migration_runner::get_current_version(sqlite3* db) const -> int {
138 // Check if schema_version table exists
139 const char* check_sql =
140 "SELECT name FROM sqlite_master WHERE type='table' AND name='schema_version';";
141
142 sqlite3_stmt* stmt = nullptr;
143 auto rc = sqlite3_prepare_v2(db, check_sql, -1, &stmt, nullptr);
144 if (rc != SQLITE_OK) {
145 return 0;
146 }
147
148 rc = sqlite3_step(stmt);
149 sqlite3_finalize(stmt);
150
151 if (rc != SQLITE_ROW) {
152 // Table doesn't exist
153 return 0;
154 }
155
156 // Get max version from table
157 const char* version_sql = "SELECT MAX(version) FROM schema_version;";
158 rc = sqlite3_prepare_v2(db, version_sql, -1, &stmt, nullptr);
159 if (rc != SQLITE_OK) {
160 return 0;
161 }
162
163 int version = 0;
164 if (sqlite3_step(stmt) == SQLITE_ROW) {
165 // sqlite3_column_int returns 0 for NULL
166 version = sqlite3_column_int(stmt, 0);
167 }
168 sqlite3_finalize(stmt);
169
170 return version;
171}
172
173auto migration_runner::get_latest_version() const noexcept -> int {
174 return LATEST_VERSION;
175}
176
177auto migration_runner::needs_migration(sqlite3* db) const -> bool {
178 return get_current_version(db) < LATEST_VERSION;
179}
180
181// ============================================================================
182// Migration History
183// ============================================================================
184
185auto migration_runner::get_history(sqlite3* db) const
186 -> std::vector<migration_record> {
187 std::vector<migration_record> history;
188
189 const char* sql =
190 "SELECT version, description, applied_at FROM schema_version ORDER BY version;";
191
192 sqlite3_stmt* stmt = nullptr;
193 auto rc = sqlite3_prepare_v2(db, sql, -1, &stmt, nullptr);
194 if (rc != SQLITE_OK) {
195 return history;
196 }
197
198 while (sqlite3_step(stmt) == SQLITE_ROW) {
199 migration_record record;
200 record.version = sqlite3_column_int(stmt, 0);
201
202 const auto* desc = reinterpret_cast<const char*>(sqlite3_column_text(stmt, 1));
203 record.description = desc ? desc : "";
204
205 const auto* applied = reinterpret_cast<const char*>(sqlite3_column_text(stmt, 2));
206 record.applied_at = applied ? applied : "";
207
208 history.push_back(std::move(record));
209 }
210
211 sqlite3_finalize(stmt);
212 return history;
213}
214
215// ============================================================================
216// Internal Implementation
217// ============================================================================
218
219auto migration_runner::ensure_schema_version_table(sqlite3* db) -> VoidResult {
220 const char* sql = R"(
221 CREATE TABLE IF NOT EXISTS schema_version (
222 version INTEGER PRIMARY KEY,
223 description TEXT NOT NULL,
224 applied_at TEXT NOT NULL DEFAULT (datetime('now'))
225 );
226 )";
227
228 return execute_sql(db, sql);
229}
230
231auto migration_runner::apply_migration(sqlite3* db, int version) -> VoidResult {
232 // Find the migration function
233 for (const auto& [ver, func] : migrations_) {
234 if (ver == version) {
235 return func(db);
236 }
237 }
238
239 return make_error<std::monostate>(
240 -1,
241 kcenon::pacs::compat::format("Migration for version {} not found", version),
242 "storage");
243}
244
245auto migration_runner::record_migration(sqlite3* db, int version,
246 std::string_view description)
247 -> VoidResult {
248 const char* sql =
249 "INSERT INTO schema_version (version, description) VALUES (?, ?);";
250
251 sqlite3_stmt* stmt = nullptr;
252 auto rc = sqlite3_prepare_v2(db, sql, -1, &stmt, nullptr);
253 if (rc != SQLITE_OK) {
254 return make_error<std::monostate>(
255 rc,
256 kcenon::pacs::compat::format("Failed to prepare statement: {}",
257 sqlite3_errmsg(db)),
258 "storage");
259 }
260
261 sqlite3_bind_int(stmt, 1, version);
262 sqlite3_bind_text(stmt, 2, description.data(),
263 static_cast<int>(description.size()), SQLITE_TRANSIENT);
264
265 rc = sqlite3_step(stmt);
266 sqlite3_finalize(stmt);
267
268 if (rc != SQLITE_DONE) {
269 return make_error<std::monostate>(
270 rc,
271 kcenon::pacs::compat::format("Failed to record migration: {}",
272 sqlite3_errmsg(db)),
273 "storage");
274 }
275
276 return ok();
277}
278
279auto migration_runner::execute_sql(sqlite3* db, std::string_view sql)
280 -> VoidResult {
281 char* errmsg = nullptr;
282 auto rc = sqlite3_exec(db, sql.data(), nullptr, nullptr, &errmsg);
283
284 if (rc != SQLITE_OK) {
285 auto error_str = errmsg ? std::string(errmsg) : "Unknown error";
286 sqlite3_free(errmsg);
287
288 return make_error<std::monostate>(
289 rc, kcenon::pacs::compat::format("SQL execution failed: {}", error_str),
290 "storage");
291 }
292
293 return ok();
294}
295
296// ============================================================================
297// Migration Implementations
298// ============================================================================
299
300auto migration_runner::migrate_v1(sqlite3* db) -> VoidResult {
301 // V1: Initial schema - Create all base tables
302 const char* sql = R"(
303 -- =====================================================================
304 -- PATIENTS TABLE
305 -- =====================================================================
306 CREATE TABLE IF NOT EXISTS patients (
307 patient_pk INTEGER PRIMARY KEY AUTOINCREMENT,
308 patient_id TEXT NOT NULL UNIQUE,
309 patient_name TEXT,
310 birth_date TEXT,
311 sex TEXT,
312 other_ids TEXT,
313 ethnic_group TEXT,
314 comments TEXT,
315 created_at TEXT NOT NULL DEFAULT (datetime('now')),
316 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
317 CHECK (length(patient_id) <= 64)
318 );
319
320 CREATE INDEX IF NOT EXISTS idx_patients_name ON patients(patient_name);
321 CREATE INDEX IF NOT EXISTS idx_patients_birth ON patients(birth_date);
322
323 -- =====================================================================
324 -- STUDIES TABLE
325 -- =====================================================================
326 CREATE TABLE IF NOT EXISTS studies (
327 study_pk INTEGER PRIMARY KEY AUTOINCREMENT,
328 patient_pk INTEGER NOT NULL REFERENCES patients(patient_pk)
329 ON DELETE CASCADE,
330 study_uid TEXT NOT NULL UNIQUE,
331 study_id TEXT,
332 study_date TEXT,
333 study_time TEXT,
334 accession_number TEXT,
335 referring_physician TEXT,
336 study_description TEXT,
337 modalities_in_study TEXT,
338 num_series INTEGER DEFAULT 0,
339 num_instances INTEGER DEFAULT 0,
340 created_at TEXT NOT NULL DEFAULT (datetime('now')),
341 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
342 CHECK (length(study_uid) <= 64)
343 );
344
345 CREATE INDEX IF NOT EXISTS idx_studies_patient ON studies(patient_pk);
346 CREATE INDEX IF NOT EXISTS idx_studies_date ON studies(study_date);
347 CREATE INDEX IF NOT EXISTS idx_studies_accession ON studies(accession_number);
348
349 -- =====================================================================
350 -- SERIES TABLE
351 -- =====================================================================
352 CREATE TABLE IF NOT EXISTS series (
353 series_pk INTEGER PRIMARY KEY AUTOINCREMENT,
354 study_pk INTEGER NOT NULL REFERENCES studies(study_pk)
355 ON DELETE CASCADE,
356 series_uid TEXT NOT NULL UNIQUE,
357 series_number INTEGER,
358 modality TEXT,
359 series_description TEXT,
360 body_part_examined TEXT,
361 station_name TEXT,
362 num_instances INTEGER DEFAULT 0,
363 created_at TEXT NOT NULL DEFAULT (datetime('now')),
364 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
365 CHECK (length(series_uid) <= 64)
366 );
367
368 CREATE INDEX IF NOT EXISTS idx_series_study ON series(study_pk);
369 CREATE INDEX IF NOT EXISTS idx_series_modality ON series(modality);
370
371 -- =====================================================================
372 -- INSTANCES TABLE
373 -- =====================================================================
374 CREATE TABLE IF NOT EXISTS instances (
375 instance_pk INTEGER PRIMARY KEY AUTOINCREMENT,
376 series_pk INTEGER NOT NULL REFERENCES series(series_pk)
377 ON DELETE CASCADE,
378 sop_uid TEXT NOT NULL UNIQUE,
379 sop_class_uid TEXT NOT NULL,
380 instance_number INTEGER,
381 transfer_syntax TEXT,
382 content_date TEXT,
383 content_time TEXT,
384 rows INTEGER,
385 columns INTEGER,
386 bits_allocated INTEGER,
387 number_of_frames INTEGER,
388 file_path TEXT NOT NULL,
389 file_size INTEGER NOT NULL,
390 file_hash TEXT,
391 created_at TEXT NOT NULL DEFAULT (datetime('now')),
392 CHECK (length(sop_uid) <= 64),
393 CHECK (file_size >= 0)
394 );
395
396 CREATE INDEX IF NOT EXISTS idx_instances_series ON instances(series_pk);
397 CREATE INDEX IF NOT EXISTS idx_instances_sop_class ON instances(sop_class_uid);
398 CREATE INDEX IF NOT EXISTS idx_instances_number ON instances(instance_number);
399 CREATE INDEX IF NOT EXISTS idx_instances_created ON instances(created_at);
400
401 -- =====================================================================
402 -- MPPS TABLE (Modality Performed Procedure Step)
403 -- =====================================================================
404 CREATE TABLE IF NOT EXISTS mpps (
405 mpps_pk INTEGER PRIMARY KEY AUTOINCREMENT,
406 mpps_uid TEXT NOT NULL UNIQUE,
407 status TEXT NOT NULL,
408 start_datetime TEXT,
409 end_datetime TEXT,
410 station_ae TEXT,
411 station_name TEXT,
412 modality TEXT,
413 study_uid TEXT,
414 accession_no TEXT,
415 scheduled_step_id TEXT,
416 requested_proc_id TEXT,
417 performed_series TEXT,
418 created_at TEXT NOT NULL DEFAULT (datetime('now')),
419 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
420 CHECK (status IN ('IN PROGRESS', 'COMPLETED', 'DISCONTINUED'))
421 );
422
423 CREATE INDEX IF NOT EXISTS idx_mpps_status ON mpps(status);
424 CREATE INDEX IF NOT EXISTS idx_mpps_station ON mpps(station_ae);
425 CREATE INDEX IF NOT EXISTS idx_mpps_study ON mpps(study_uid);
426 CREATE INDEX IF NOT EXISTS idx_mpps_date ON mpps(start_datetime);
427
428 -- =====================================================================
429 -- WORKLIST TABLE (Modality Worklist)
430 -- =====================================================================
431 CREATE TABLE IF NOT EXISTS worklist (
432 worklist_pk INTEGER PRIMARY KEY AUTOINCREMENT,
433 step_id TEXT NOT NULL,
434 step_status TEXT DEFAULT 'SCHEDULED',
435 patient_id TEXT NOT NULL,
436 patient_name TEXT,
437 birth_date TEXT,
438 sex TEXT,
439 accession_no TEXT,
440 requested_proc_id TEXT,
441 study_uid TEXT,
442 scheduled_datetime TEXT NOT NULL,
443 station_ae TEXT,
444 station_name TEXT,
445 modality TEXT NOT NULL,
446 procedure_desc TEXT,
447 protocol_code TEXT,
448 referring_phys TEXT,
449 referring_phys_id TEXT,
450 created_at TEXT NOT NULL DEFAULT (datetime('now')),
451 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
452 UNIQUE (step_id, accession_no)
453 );
454
455 CREATE INDEX IF NOT EXISTS idx_worklist_station ON worklist(station_ae);
456 CREATE INDEX IF NOT EXISTS idx_worklist_modality ON worklist(modality);
457 CREATE INDEX IF NOT EXISTS idx_worklist_scheduled ON worklist(scheduled_datetime);
458 CREATE INDEX IF NOT EXISTS idx_worklist_patient ON worklist(patient_id);
459 CREATE INDEX IF NOT EXISTS idx_worklist_accession ON worklist(accession_no);
460 CREATE INDEX IF NOT EXISTS idx_worklist_status ON worklist(step_status);
461 CREATE INDEX IF NOT EXISTS idx_worklist_station_date_mod
462 ON worklist(station_ae, scheduled_datetime, modality);
463
464 -- =====================================================================
465 -- TRIGGERS FOR PARENT COUNT UPDATES
466 -- =====================================================================
467 CREATE TRIGGER IF NOT EXISTS trg_instances_insert
468 AFTER INSERT ON instances
469 BEGIN
470 UPDATE series
471 SET num_instances = num_instances + 1,
472 updated_at = datetime('now')
473 WHERE series_pk = NEW.series_pk;
474
475 UPDATE studies
476 SET num_instances = num_instances + 1,
477 updated_at = datetime('now')
478 WHERE study_pk = (SELECT study_pk FROM series WHERE series_pk = NEW.series_pk);
479 END;
480
481 CREATE TRIGGER IF NOT EXISTS trg_instances_delete
482 AFTER DELETE ON instances
483 BEGIN
484 UPDATE series
485 SET num_instances = num_instances - 1,
486 updated_at = datetime('now')
487 WHERE series_pk = OLD.series_pk;
488
489 UPDATE studies
490 SET num_instances = num_instances - 1,
491 updated_at = datetime('now')
492 WHERE study_pk = (SELECT study_pk FROM series WHERE series_pk = OLD.series_pk);
493 END;
494
495 CREATE TRIGGER IF NOT EXISTS trg_series_insert
496 AFTER INSERT ON series
497 BEGIN
498 UPDATE studies
499 SET num_series = num_series + 1,
500 updated_at = datetime('now')
501 WHERE study_pk = NEW.study_pk;
502 END;
503
504 CREATE TRIGGER IF NOT EXISTS trg_series_delete
505 AFTER DELETE ON series
506 BEGIN
507 UPDATE studies
508 SET num_series = num_series - 1,
509 updated_at = datetime('now')
510 WHERE study_pk = OLD.study_pk;
511 END;
512 )";
513
514 auto result = execute_sql(db, sql);
515 if (result.is_err()) {
516 return result;
517 }
518
519 return record_migration(db, 1, "Initial schema creation");
520}
521
522auto migration_runner::migrate_v2(sqlite3* db) -> VoidResult {
523 // V2: Add audit_log table for REST API audit endpoints
524 const char* sql = R"(
525 -- =====================================================================
526 -- AUDIT_LOG TABLE (for REST API and HIPAA compliance)
527 -- =====================================================================
528 CREATE TABLE IF NOT EXISTS audit_log (
529 audit_pk INTEGER PRIMARY KEY AUTOINCREMENT,
530 event_type TEXT NOT NULL,
531 outcome TEXT DEFAULT 'SUCCESS',
532 timestamp TEXT NOT NULL DEFAULT (datetime('now')),
533 user_id TEXT,
534 source_ae TEXT,
535 target_ae TEXT,
536 source_ip TEXT,
537 patient_id TEXT,
538 study_uid TEXT,
539 message TEXT,
540 details TEXT,
541 CHECK (outcome IN ('SUCCESS', 'FAILURE', 'WARNING'))
542 );
543
544 CREATE INDEX IF NOT EXISTS idx_audit_event_type ON audit_log(event_type);
545 CREATE INDEX IF NOT EXISTS idx_audit_timestamp ON audit_log(timestamp);
546 CREATE INDEX IF NOT EXISTS idx_audit_user ON audit_log(user_id);
547 CREATE INDEX IF NOT EXISTS idx_audit_source_ae ON audit_log(source_ae);
548 CREATE INDEX IF NOT EXISTS idx_audit_patient ON audit_log(patient_id);
549 CREATE INDEX IF NOT EXISTS idx_audit_study ON audit_log(study_uid);
550 CREATE INDEX IF NOT EXISTS idx_audit_outcome ON audit_log(outcome);
551 )";
552
553 auto result = execute_sql(db, sql);
554 if (result.is_err()) {
555 return result;
556 }
557
558 return record_migration(db, 2, "Add audit_log table");
559}
560
561auto migration_runner::migrate_v3(sqlite3* db) -> VoidResult {
562 // V3: Add remote_nodes table for PACS client remote node management
563 const char* sql = R"(
564 -- =====================================================================
565 -- REMOTE_NODES TABLE (for PACS client SCU operations)
566 -- =====================================================================
567 CREATE TABLE IF NOT EXISTS remote_nodes (
568 pk INTEGER PRIMARY KEY AUTOINCREMENT,
569 node_id TEXT NOT NULL UNIQUE,
570 name TEXT,
571 ae_title TEXT NOT NULL,
572 host TEXT NOT NULL,
573 port INTEGER NOT NULL DEFAULT 104,
574 supports_find INTEGER NOT NULL DEFAULT 1,
575 supports_move INTEGER NOT NULL DEFAULT 1,
576 supports_get INTEGER NOT NULL DEFAULT 0,
577 supports_store INTEGER NOT NULL DEFAULT 1,
578 supports_worklist INTEGER NOT NULL DEFAULT 0,
579 connection_timeout_sec INTEGER NOT NULL DEFAULT 30,
580 dimse_timeout_sec INTEGER NOT NULL DEFAULT 60,
581 max_associations INTEGER NOT NULL DEFAULT 4,
582 status TEXT NOT NULL DEFAULT 'unknown',
583 last_verified TEXT,
584 last_error TEXT,
585 last_error_message TEXT,
586 created_at TEXT NOT NULL DEFAULT (datetime('now')),
587 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
588 CHECK (port > 0 AND port <= 65535),
589 CHECK (status IN ('unknown', 'online', 'offline', 'error', 'verifying'))
590 );
591
592 CREATE INDEX IF NOT EXISTS idx_remote_nodes_ae_title ON remote_nodes(ae_title);
593 CREATE INDEX IF NOT EXISTS idx_remote_nodes_host ON remote_nodes(host);
594 CREATE INDEX IF NOT EXISTS idx_remote_nodes_status ON remote_nodes(status);
595 )";
596
597 auto result = execute_sql(db, sql);
598 if (result.is_err()) {
599 return result;
600 }
601
602 return record_migration(db, 3, "Add remote_nodes table for PACS client");
603}
604
605auto migration_runner::migrate_v4(sqlite3* db) -> VoidResult {
606 // V4: Add jobs table for async DICOM operations
607 const char* sql = R"(
608 -- =====================================================================
609 -- JOBS TABLE (for async DICOM operations - Job Manager)
610 -- =====================================================================
611 CREATE TABLE IF NOT EXISTS jobs (
612 pk INTEGER PRIMARY KEY AUTOINCREMENT,
613 job_id TEXT NOT NULL UNIQUE,
614 type TEXT NOT NULL,
615 status TEXT NOT NULL DEFAULT 'pending',
616 priority INTEGER NOT NULL DEFAULT 1,
617 source_node_id TEXT,
618 destination_node_id TEXT,
619 patient_id TEXT,
620 study_uid TEXT,
621 series_uid TEXT,
622 sop_instance_uid TEXT,
623 instance_uids_json TEXT DEFAULT '[]',
624 total_items INTEGER DEFAULT 0,
625 completed_items INTEGER DEFAULT 0,
626 failed_items INTEGER DEFAULT 0,
627 skipped_items INTEGER DEFAULT 0,
628 bytes_transferred INTEGER DEFAULT 0,
629 current_item TEXT,
630 current_item_description TEXT,
631 error_message TEXT,
632 error_details TEXT,
633 retry_count INTEGER DEFAULT 0,
634 max_retries INTEGER DEFAULT 3,
635 created_by TEXT,
636 metadata_json TEXT DEFAULT '{}',
637 created_at TEXT NOT NULL DEFAULT (datetime('now')),
638 queued_at TEXT,
639 started_at TEXT,
640 completed_at TEXT,
641 CHECK (type IN ('query', 'retrieve', 'store', 'export', 'import', 'prefetch', 'sync')),
642 CHECK (status IN ('pending', 'queued', 'running', 'completed', 'failed', 'cancelled', 'paused')),
643 CHECK (priority >= 0 AND priority <= 3)
644 );
645
646 CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
647 CREATE INDEX IF NOT EXISTS idx_jobs_type ON jobs(type);
648 CREATE INDEX IF NOT EXISTS idx_jobs_priority ON jobs(priority DESC);
649 CREATE INDEX IF NOT EXISTS idx_jobs_created_at ON jobs(created_at DESC);
650 CREATE INDEX IF NOT EXISTS idx_jobs_source_node ON jobs(source_node_id);
651 CREATE INDEX IF NOT EXISTS idx_jobs_destination_node ON jobs(destination_node_id);
652 CREATE INDEX IF NOT EXISTS idx_jobs_study ON jobs(study_uid);
653 CREATE INDEX IF NOT EXISTS idx_jobs_patient ON jobs(patient_id);
654 )";
655
656 auto result = execute_sql(db, sql);
657 if (result.is_err()) {
658 return result;
659 }
660
661 return record_migration(db, 4, "Add jobs table for async DICOM operations");
662}
663
664auto migration_runner::migrate_v5(sqlite3* db) -> VoidResult {
665 // V5: Add routing_rules table for auto-forwarding
666 const char* sql = R"(
667 -- =====================================================================
668 -- ROUTING_RULES TABLE (for auto-forwarding - Routing Manager)
669 -- =====================================================================
670 CREATE TABLE IF NOT EXISTS routing_rules (
671 pk INTEGER PRIMARY KEY AUTOINCREMENT,
672 rule_id TEXT NOT NULL UNIQUE,
673 name TEXT NOT NULL,
674 description TEXT,
675 enabled INTEGER NOT NULL DEFAULT 1,
676 priority INTEGER NOT NULL DEFAULT 0,
677 conditions_json TEXT NOT NULL DEFAULT '[]',
678 actions_json TEXT NOT NULL DEFAULT '[]',
679 schedule_cron TEXT,
680 effective_from TEXT,
681 effective_until TEXT,
682 triggered_count INTEGER DEFAULT 0,
683 success_count INTEGER DEFAULT 0,
684 failure_count INTEGER DEFAULT 0,
685 last_triggered TEXT,
686 created_at TEXT NOT NULL DEFAULT (datetime('now')),
687 updated_at TEXT NOT NULL DEFAULT (datetime('now'))
688 );
689
690 CREATE INDEX IF NOT EXISTS idx_routing_rules_enabled ON routing_rules(enabled);
691 CREATE INDEX IF NOT EXISTS idx_routing_rules_priority ON routing_rules(priority DESC);
692 )";
693
694 auto result = execute_sql(db, sql);
695 if (result.is_err()) {
696 return result;
697 }
698
699 return record_migration(db, 5, "Add routing_rules table for auto-forwarding");
700}
701
702auto migration_runner::migrate_v6(sqlite3* db) -> VoidResult {
703 // V6: Add sync tables for bidirectional synchronization
704 const char* sql = R"(
705 -- =====================================================================
706 -- SYNC_CONFIGS TABLE (for Sync Manager)
707 -- =====================================================================
708 CREATE TABLE IF NOT EXISTS sync_configs (
709 pk INTEGER PRIMARY KEY AUTOINCREMENT,
710 config_id TEXT NOT NULL UNIQUE,
711 source_node_id TEXT NOT NULL,
712 name TEXT NOT NULL,
713 enabled INTEGER NOT NULL DEFAULT 1,
714 lookback_hours INTEGER NOT NULL DEFAULT 24,
715 modalities_json TEXT DEFAULT '[]',
716 patient_patterns_json TEXT DEFAULT '[]',
717 sync_direction TEXT NOT NULL DEFAULT 'pull',
718 delete_missing INTEGER NOT NULL DEFAULT 0,
719 overwrite_existing INTEGER NOT NULL DEFAULT 0,
720 sync_metadata_only INTEGER NOT NULL DEFAULT 0,
721 schedule_cron TEXT,
722 last_sync TEXT,
723 last_successful_sync TEXT,
724 total_syncs INTEGER DEFAULT 0,
725 studies_synced INTEGER DEFAULT 0,
726 created_at TEXT NOT NULL DEFAULT (datetime('now')),
727 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
728 CHECK (sync_direction IN ('pull', 'push', 'bidirectional'))
729 );
730
731 CREATE INDEX IF NOT EXISTS idx_sync_configs_enabled ON sync_configs(enabled);
732 CREATE INDEX IF NOT EXISTS idx_sync_configs_source ON sync_configs(source_node_id);
733
734 -- =====================================================================
735 -- SYNC_CONFLICTS TABLE (for conflict tracking)
736 -- =====================================================================
737 CREATE TABLE IF NOT EXISTS sync_conflicts (
738 pk INTEGER PRIMARY KEY AUTOINCREMENT,
739 config_id TEXT NOT NULL,
740 study_uid TEXT NOT NULL,
741 patient_id TEXT,
742 conflict_type TEXT NOT NULL,
743 local_modified TEXT,
744 remote_modified TEXT,
745 local_instance_count INTEGER DEFAULT 0,
746 remote_instance_count INTEGER DEFAULT 0,
747 resolved INTEGER NOT NULL DEFAULT 0,
748 resolution TEXT,
749 detected_at TEXT NOT NULL DEFAULT (datetime('now')),
750 resolved_at TEXT,
751 UNIQUE (config_id, study_uid),
752 CHECK (conflict_type IN ('missing_local', 'missing_remote', 'modified', 'count_mismatch')),
753 CHECK (resolution IS NULL OR resolution IN ('prefer_local', 'prefer_remote', 'prefer_newer'))
754 );
755
756 CREATE INDEX IF NOT EXISTS idx_sync_conflicts_config ON sync_conflicts(config_id);
757 CREATE INDEX IF NOT EXISTS idx_sync_conflicts_resolved ON sync_conflicts(resolved);
758 CREATE INDEX IF NOT EXISTS idx_sync_conflicts_study ON sync_conflicts(study_uid);
759
760 -- =====================================================================
761 -- SYNC_HISTORY TABLE (for sync operation history)
762 -- =====================================================================
763 CREATE TABLE IF NOT EXISTS sync_history (
764 pk INTEGER PRIMARY KEY AUTOINCREMENT,
765 config_id TEXT NOT NULL,
766 job_id TEXT NOT NULL,
767 success INTEGER NOT NULL DEFAULT 0,
768 studies_checked INTEGER DEFAULT 0,
769 studies_synced INTEGER DEFAULT 0,
770 conflicts_found INTEGER DEFAULT 0,
771 errors_json TEXT DEFAULT '[]',
772 started_at TEXT NOT NULL,
773 completed_at TEXT NOT NULL
774 );
775
776 CREATE INDEX IF NOT EXISTS idx_sync_history_config ON sync_history(config_id);
777 CREATE INDEX IF NOT EXISTS idx_sync_history_started ON sync_history(started_at DESC);
778 )";
779
780 auto result = execute_sql(db, sql);
781 if (result.is_err()) {
782 return result;
783 }
784
785 return record_migration(db, 6, "Add sync tables for bidirectional synchronization");
786}
787
788auto migration_runner::migrate_v7(sqlite3* db) -> VoidResult {
789 // V7: Add annotation and measurement tables for viewer functionality
790 const char* sql = R"(
791 -- =====================================================================
792 -- ANNOTATIONS TABLE (for image annotations)
793 -- =====================================================================
794 CREATE TABLE IF NOT EXISTS annotations (
795 pk INTEGER PRIMARY KEY AUTOINCREMENT,
796 annotation_id TEXT NOT NULL UNIQUE,
797 study_uid TEXT NOT NULL,
798 series_uid TEXT,
799 sop_instance_uid TEXT,
800 frame_number INTEGER,
801 user_id TEXT NOT NULL,
802 annotation_type TEXT NOT NULL,
803 geometry_json TEXT NOT NULL,
804 text TEXT,
805 style_json TEXT,
806 created_at TEXT NOT NULL DEFAULT (datetime('now')),
807 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
808 CHECK (annotation_type IN ('arrow', 'line', 'rectangle', 'ellipse', 'polygon', 'freehand', 'text', 'angle', 'roi'))
809 );
810
811 CREATE INDEX IF NOT EXISTS idx_annotations_study ON annotations(study_uid);
812 CREATE INDEX IF NOT EXISTS idx_annotations_instance ON annotations(sop_instance_uid);
813 CREATE INDEX IF NOT EXISTS idx_annotations_user ON annotations(user_id);
814
815 -- =====================================================================
816 -- MEASUREMENTS TABLE (for image measurements)
817 -- =====================================================================
818 CREATE TABLE IF NOT EXISTS measurements (
819 pk INTEGER PRIMARY KEY AUTOINCREMENT,
820 measurement_id TEXT NOT NULL UNIQUE,
821 sop_instance_uid TEXT NOT NULL,
822 frame_number INTEGER,
823 user_id TEXT NOT NULL,
824 measurement_type TEXT NOT NULL,
825 geometry_json TEXT NOT NULL,
826 value REAL NOT NULL,
827 unit TEXT NOT NULL,
828 label TEXT,
829 created_at TEXT NOT NULL DEFAULT (datetime('now')),
830 CHECK (measurement_type IN ('length', 'area', 'angle', 'hounsfield', 'suv', 'ellipse_area', 'polygon_area'))
831 );
832
833 CREATE INDEX IF NOT EXISTS idx_measurements_instance ON measurements(sop_instance_uid);
834 CREATE INDEX IF NOT EXISTS idx_measurements_user ON measurements(user_id);
835
836 -- =====================================================================
837 -- KEY_IMAGES TABLE (for key image markers)
838 -- =====================================================================
839 CREATE TABLE IF NOT EXISTS key_images (
840 pk INTEGER PRIMARY KEY AUTOINCREMENT,
841 key_image_id TEXT NOT NULL UNIQUE,
842 study_uid TEXT NOT NULL,
843 sop_instance_uid TEXT NOT NULL,
844 frame_number INTEGER,
845 user_id TEXT NOT NULL,
846 reason TEXT,
847 document_title TEXT,
848 created_at TEXT NOT NULL DEFAULT (datetime('now'))
849 );
850
851 CREATE INDEX IF NOT EXISTS idx_key_images_study ON key_images(study_uid);
852
853 -- =====================================================================
854 -- VIEWER_STATES TABLE (for saved viewer configurations)
855 -- =====================================================================
856 CREATE TABLE IF NOT EXISTS viewer_states (
857 pk INTEGER PRIMARY KEY AUTOINCREMENT,
858 state_id TEXT NOT NULL UNIQUE,
859 study_uid TEXT NOT NULL,
860 user_id TEXT NOT NULL,
861 state_json TEXT NOT NULL,
862 created_at TEXT NOT NULL DEFAULT (datetime('now')),
863 updated_at TEXT NOT NULL DEFAULT (datetime('now'))
864 );
865
866 CREATE INDEX IF NOT EXISTS idx_viewer_states_study ON viewer_states(study_uid);
867 CREATE INDEX IF NOT EXISTS idx_viewer_states_user ON viewer_states(user_id);
868
869 -- =====================================================================
870 -- RECENT_STUDIES TABLE (for tracking user study access)
871 -- =====================================================================
872 CREATE TABLE IF NOT EXISTS recent_studies (
873 pk INTEGER PRIMARY KEY AUTOINCREMENT,
874 user_id TEXT NOT NULL,
875 study_uid TEXT NOT NULL,
876 accessed_at TEXT NOT NULL DEFAULT (datetime('now')),
877 UNIQUE (user_id, study_uid)
878 );
879
880 CREATE INDEX IF NOT EXISTS idx_recent_studies_user ON recent_studies(user_id, accessed_at DESC);
881 )";
882
883 auto result = execute_sql(db, sql);
884 if (result.is_err()) {
885 return result;
886 }
887
888 return record_migration(db, 7, "Add annotation and measurement tables");
889}
890
891auto migration_runner::migrate_v8(sqlite3* db) -> VoidResult {
892 // V8: Add Storage Commitment tracking tables
893 const char* sql = R"(
894 -- =====================================================================
895 -- STORAGE_COMMITMENT TABLE (transaction-level tracking)
896 -- =====================================================================
897 CREATE TABLE IF NOT EXISTS storage_commitment (
898 transaction_uid TEXT PRIMARY KEY,
899 requesting_ae TEXT NOT NULL,
900 request_time TEXT NOT NULL,
901 completion_time TEXT,
902 status TEXT NOT NULL DEFAULT 'pending',
903 total_instances INTEGER NOT NULL DEFAULT 0,
904 success_count INTEGER NOT NULL DEFAULT 0,
905 failure_count INTEGER NOT NULL DEFAULT 0,
906 CHECK (status IN ('pending', 'success', 'partial', 'failed'))
907 );
908
909 CREATE INDEX IF NOT EXISTS idx_commitment_status
910 ON storage_commitment(status);
911 CREATE INDEX IF NOT EXISTS idx_commitment_request_time
912 ON storage_commitment(request_time);
913
914 -- =====================================================================
915 -- COMMITMENT_REFERENCES TABLE (per-instance tracking)
916 -- =====================================================================
917 CREATE TABLE IF NOT EXISTS commitment_references (
918 transaction_uid TEXT NOT NULL
919 REFERENCES storage_commitment(transaction_uid)
920 ON DELETE CASCADE,
921 sop_class_uid TEXT NOT NULL,
922 sop_instance_uid TEXT NOT NULL,
923 status TEXT NOT NULL DEFAULT 'pending',
924 failure_reason INTEGER,
925 PRIMARY KEY (transaction_uid, sop_instance_uid),
926 CHECK (status IN ('pending', 'success', 'failed'))
927 );
928
929 CREATE INDEX IF NOT EXISTS idx_commitment_ref_instance
930 ON commitment_references(sop_instance_uid);
931 )";
932
933 auto result = execute_sql(db, sql);
934 if (result.is_err()) {
935 return result;
936 }
937
938 return record_migration(db, 8, "Add Storage Commitment tracking tables");
939}
940
941auto migration_runner::migrate_v9(sqlite3* db) -> VoidResult {
942 // V9: Add Unified Procedure Step (UPS) tables
943 const char* sql = R"(
944 -- =====================================================================
945 -- UPS WORKITEMS TABLE (Unified Procedure Step - PS3.4 Annex CC)
946 -- =====================================================================
947 CREATE TABLE IF NOT EXISTS ups_workitems (
948 workitem_pk INTEGER PRIMARY KEY AUTOINCREMENT,
949 workitem_uid TEXT NOT NULL UNIQUE,
950 state TEXT NOT NULL DEFAULT 'SCHEDULED',
951 procedure_step_label TEXT,
952 worklist_label TEXT,
953 priority TEXT DEFAULT 'MEDIUM',
954 scheduled_start_datetime TEXT,
955 expected_completion_datetime TEXT,
956 scheduled_station_name TEXT,
957 scheduled_station_class TEXT,
958 scheduled_station_geographic TEXT,
959 scheduled_human_performers TEXT,
960 input_information TEXT,
961 performing_ae TEXT,
962 progress_description TEXT,
963 progress_percent INTEGER DEFAULT 0,
964 output_information TEXT,
965 transaction_uid TEXT,
966 created_at TEXT NOT NULL DEFAULT (datetime('now')),
967 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
968 CHECK (state IN ('SCHEDULED', 'IN PROGRESS', 'COMPLETED', 'CANCELED')),
969 CHECK (priority IN ('LOW', 'MEDIUM', 'HIGH'))
970 );
971
972 CREATE INDEX IF NOT EXISTS idx_ups_state ON ups_workitems(state);
973 CREATE INDEX IF NOT EXISTS idx_ups_priority ON ups_workitems(priority);
974 CREATE INDEX IF NOT EXISTS idx_ups_performing ON ups_workitems(performing_ae);
975 CREATE INDEX IF NOT EXISTS idx_ups_scheduled ON ups_workitems(scheduled_start_datetime);
976 CREATE INDEX IF NOT EXISTS idx_ups_worklist ON ups_workitems(worklist_label);
977
978 -- =====================================================================
979 -- UPS SUBSCRIPTIONS TABLE
980 -- =====================================================================
981 CREATE TABLE IF NOT EXISTS ups_subscriptions (
982 subscription_pk INTEGER PRIMARY KEY AUTOINCREMENT,
983 subscriber_ae TEXT NOT NULL,
984 workitem_uid TEXT,
985 deletion_lock INTEGER DEFAULT 0,
986 filter_criteria TEXT,
987 created_at TEXT NOT NULL DEFAULT (datetime('now')),
988 UNIQUE (subscriber_ae, workitem_uid)
989 );
990
991 CREATE INDEX IF NOT EXISTS idx_ups_sub_ae ON ups_subscriptions(subscriber_ae);
992 CREATE INDEX IF NOT EXISTS idx_ups_sub_workitem ON ups_subscriptions(workitem_uid);
993 )";
994
995 auto result = execute_sql(db, sql);
996 if (result.is_err()) {
997 return result;
998 }
999
1000 return record_migration(db, 9, "Add Unified Procedure Step (UPS) tables");
1001}
1002
1003#ifdef PACS_WITH_DATABASE_SYSTEM
1004// ============================================================================
1005// Migration Operations (pacs_database_adapter)
1006// ============================================================================
1007
1008auto migration_runner::run_migrations(pacs_database_adapter& db) -> VoidResult {
1009 return run_migrations_to(db, LATEST_VERSION);
1010}
1011
1012auto migration_runner::run_migrations_to(pacs_database_adapter& db,
1013 int target_version) -> VoidResult {
1014 if (!db.is_connected()) {
1015 return make_error<std::monostate>(
1016 -1, "Database adapter is not connected", "storage");
1017 }
1018
1019 if (target_version > LATEST_VERSION) {
1020 return make_error<std::monostate>(
1021 -1,
1022 kcenon::pacs::compat::format("Target version {} exceeds latest version {}",
1023 target_version, LATEST_VERSION),
1024 "storage");
1025 }
1026
1027 // Ensure schema_version table exists
1028 auto ensure_result = ensure_schema_version_table(db);
1029 if (ensure_result.is_err()) {
1030 return ensure_result;
1031 }
1032
1033 auto current_version = get_current_version(db);
1034
1035 // Nothing to do if already at or past target
1036 if (current_version >= target_version) {
1037 return ok();
1038 }
1039
1040 // Apply each migration in a transaction
1041 while (current_version < target_version) {
1042 auto next_version = current_version + 1;
1043
1044 // Begin transaction
1045 auto begin_result = db.begin_transaction();
1046 if (begin_result.is_err()) {
1047 return begin_result;
1048 }
1049
1050 // Apply migration
1051 auto migration_result = apply_migration(db, next_version);
1052 if (migration_result.is_err()) {
1053 // Rollback on failure
1054 (void)db.rollback();
1055 return migration_result;
1056 }
1057
1058 // Commit transaction
1059 auto commit_result = db.commit();
1060 if (commit_result.is_err()) {
1061 (void)db.rollback();
1062 return commit_result;
1063 }
1064
1065 current_version = next_version;
1066 }
1067
1068 return ok();
1069}
1070
1071// ============================================================================
1072// Version Information (pacs_database_adapter)
1073// ============================================================================
1074
1075auto migration_runner::get_current_version(pacs_database_adapter& db) const
1076 -> int {
1077 if (!db.is_connected()) {
1078 return 0;
1079 }
1080
1081 // Check if schema_version table exists by querying sqlite_master
1082 const std::string check_sql =
1083 "SELECT name FROM sqlite_master WHERE type='table' AND "
1084 "name='schema_version';";
1085
1086 auto check_result = db.select(check_sql);
1087 if (check_result.is_err() || check_result.value().empty()) {
1088 // Table doesn't exist
1089 return 0;
1090 }
1091
1092 // Get max version from table
1093 const std::string version_sql =
1094 "SELECT MAX(version) AS max_ver FROM schema_version;";
1095 auto version_result = db.select(version_sql);
1096 if (version_result.is_err() || version_result.value().empty()) {
1097 return 0;
1098 }
1099
1100 const auto& row = version_result.value()[0];
1101 auto it = row.find("max_ver");
1102 if (it == row.end()) {
1103 // Try alternative column names
1104 it = row.find("MAX(version)");
1105 if (it == row.end()) {
1106 it = row.find("max(version)");
1107 }
1108 }
1109
1110 if (it != row.end()) {
1111 try {
1112 return std::stoi(it->second);
1113 } catch (...) {
1114 return 0;
1115 }
1116 }
1117
1118 return 0;
1119}
1120
1121auto migration_runner::needs_migration(pacs_database_adapter& db) const
1122 -> bool {
1123 return get_current_version(db) < LATEST_VERSION;
1124}
1125
1126// ============================================================================
1127// Migration History (pacs_database_adapter)
1128// ============================================================================
1129
1130auto migration_runner::get_history(pacs_database_adapter& db) const
1131 -> std::vector<migration_record> {
1132 std::vector<migration_record> history;
1133
1134 if (!db.is_connected()) {
1135 return history;
1136 }
1137
1138 const std::string sql =
1139 "SELECT version, description, applied_at FROM schema_version ORDER BY "
1140 "version;";
1141
1142 auto result = db.select(sql);
1143 if (result.is_err()) {
1144 return history;
1145 }
1146
1147 for (const auto& row : result.value()) {
1148 migration_record record;
1149
1150 // Get version
1151 if (auto it = row.find("version"); it != row.end()) {
1152 try {
1153 record.version = std::stoi(it->second);
1154 } catch (...) {
1155 record.version = 0;
1156 }
1157 }
1158
1159 // Get description
1160 if (auto it = row.find("description"); it != row.end()) {
1161 record.description = it->second;
1162 }
1163
1164 // Get applied_at
1165 if (auto it = row.find("applied_at"); it != row.end()) {
1166 record.applied_at = it->second;
1167 }
1168
1169 history.push_back(std::move(record));
1170 }
1171
1172 return history;
1173}
1174
1175// ============================================================================
1176// Internal Implementation (pacs_database_adapter)
1177// ============================================================================
1178
1179auto migration_runner::ensure_schema_version_table(pacs_database_adapter& db)
1180 -> VoidResult {
1181 const std::string sql = R"(
1182 CREATE TABLE IF NOT EXISTS schema_version (
1183 version INTEGER PRIMARY KEY,
1184 description TEXT NOT NULL,
1185 applied_at TEXT NOT NULL DEFAULT (datetime('now'))
1186 );
1187 )";
1188
1189 return execute_sql(db, sql);
1190}
1191
1192auto migration_runner::apply_migration(pacs_database_adapter& db, int version)
1193 -> VoidResult {
1194 // Find the migration function
1195 for (const auto& [ver, func] : adapter_migrations_) {
1196 if (ver == version) {
1197 return func(db);
1198 }
1199 }
1200
1201 return make_error<std::monostate>(
1202 -1,
1203 kcenon::pacs::compat::format("Migration for version {} not found", version),
1204 "storage");
1205}
1206
1207auto migration_runner::record_migration(pacs_database_adapter& db, int version,
1208 std::string_view description)
1209 -> VoidResult {
1210 // Use raw SQL for INSERT (simpler and more reliable for migrations)
1211 // Note: description is sanitized by the caller and controlled internally
1212 const std::string sql = kcenon::pacs::compat::format(
1213 "INSERT INTO schema_version (version, description) VALUES ({}, '{}');",
1214 version, description);
1215
1216 auto result = db.insert(sql);
1217 if (result.is_err()) {
1218 return make_error<std::monostate>(
1219 result.error().code,
1220 kcenon::pacs::compat::format("Failed to record migration: {}",
1221 result.error().message),
1222 "storage");
1223 }
1224
1225 return ok();
1226}
1227
1228auto migration_runner::execute_sql(pacs_database_adapter& db,
1229 std::string_view sql) -> VoidResult {
1230 auto result = db.execute(std::string(sql));
1231
1232 if (result.is_err()) {
1233 return result;
1234 }
1235
1236 return ok();
1237}
1238
1239// ============================================================================
1240// Migration Implementations (pacs_database_adapter)
1241// ============================================================================
1242
1243auto migration_runner::migrate_v1(pacs_database_adapter& db) -> VoidResult {
1244 // V1: Initial schema - Create all base tables
1245 // Note: Same DDL as SQLite version since pacs_database_adapter supports raw
1246 // SQL
1247 const std::string sql = R"(
1248 -- =====================================================================
1249 -- PATIENTS TABLE
1250 -- =====================================================================
1251 CREATE TABLE IF NOT EXISTS patients (
1252 patient_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1253 patient_id TEXT NOT NULL UNIQUE,
1254 patient_name TEXT,
1255 birth_date TEXT,
1256 sex TEXT,
1257 other_ids TEXT,
1258 ethnic_group TEXT,
1259 comments TEXT,
1260 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1261 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1262 CHECK (length(patient_id) <= 64)
1263 );
1264
1265 CREATE INDEX IF NOT EXISTS idx_patients_name ON patients(patient_name);
1266 CREATE INDEX IF NOT EXISTS idx_patients_birth ON patients(birth_date);
1267
1268 -- =====================================================================
1269 -- STUDIES TABLE
1270 -- =====================================================================
1271 CREATE TABLE IF NOT EXISTS studies (
1272 study_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1273 patient_pk INTEGER NOT NULL REFERENCES patients(patient_pk)
1274 ON DELETE CASCADE,
1275 study_uid TEXT NOT NULL UNIQUE,
1276 study_id TEXT,
1277 study_date TEXT,
1278 study_time TEXT,
1279 accession_number TEXT,
1280 referring_physician TEXT,
1281 study_description TEXT,
1282 modalities_in_study TEXT,
1283 num_series INTEGER DEFAULT 0,
1284 num_instances INTEGER DEFAULT 0,
1285 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1286 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1287 CHECK (length(study_uid) <= 64)
1288 );
1289
1290 CREATE INDEX IF NOT EXISTS idx_studies_patient ON studies(patient_pk);
1291 CREATE INDEX IF NOT EXISTS idx_studies_date ON studies(study_date);
1292 CREATE INDEX IF NOT EXISTS idx_studies_accession ON studies(accession_number);
1293
1294 -- =====================================================================
1295 -- SERIES TABLE
1296 -- =====================================================================
1297 CREATE TABLE IF NOT EXISTS series (
1298 series_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1299 study_pk INTEGER NOT NULL REFERENCES studies(study_pk)
1300 ON DELETE CASCADE,
1301 series_uid TEXT NOT NULL UNIQUE,
1302 series_number INTEGER,
1303 modality TEXT,
1304 series_description TEXT,
1305 body_part_examined TEXT,
1306 station_name TEXT,
1307 num_instances INTEGER DEFAULT 0,
1308 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1309 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1310 CHECK (length(series_uid) <= 64)
1311 );
1312
1313 CREATE INDEX IF NOT EXISTS idx_series_study ON series(study_pk);
1314 CREATE INDEX IF NOT EXISTS idx_series_modality ON series(modality);
1315
1316 -- =====================================================================
1317 -- INSTANCES TABLE
1318 -- =====================================================================
1319 CREATE TABLE IF NOT EXISTS instances (
1320 instance_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1321 series_pk INTEGER NOT NULL REFERENCES series(series_pk)
1322 ON DELETE CASCADE,
1323 sop_uid TEXT NOT NULL UNIQUE,
1324 sop_class_uid TEXT NOT NULL,
1325 instance_number INTEGER,
1326 transfer_syntax TEXT,
1327 content_date TEXT,
1328 content_time TEXT,
1329 rows INTEGER,
1330 columns INTEGER,
1331 bits_allocated INTEGER,
1332 number_of_frames INTEGER,
1333 file_path TEXT NOT NULL,
1334 file_size INTEGER NOT NULL,
1335 file_hash TEXT,
1336 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1337 CHECK (length(sop_uid) <= 64),
1338 CHECK (file_size >= 0)
1339 );
1340
1341 CREATE INDEX IF NOT EXISTS idx_instances_series ON instances(series_pk);
1342 CREATE INDEX IF NOT EXISTS idx_instances_sop_class ON instances(sop_class_uid);
1343 CREATE INDEX IF NOT EXISTS idx_instances_number ON instances(instance_number);
1344 CREATE INDEX IF NOT EXISTS idx_instances_created ON instances(created_at);
1345
1346 -- =====================================================================
1347 -- MPPS TABLE (Modality Performed Procedure Step)
1348 -- =====================================================================
1349 CREATE TABLE IF NOT EXISTS mpps (
1350 mpps_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1351 mpps_uid TEXT NOT NULL UNIQUE,
1352 status TEXT NOT NULL,
1353 start_datetime TEXT,
1354 end_datetime TEXT,
1355 station_ae TEXT,
1356 station_name TEXT,
1357 modality TEXT,
1358 study_uid TEXT,
1359 accession_no TEXT,
1360 scheduled_step_id TEXT,
1361 requested_proc_id TEXT,
1362 performed_series TEXT,
1363 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1364 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1365 CHECK (status IN ('IN PROGRESS', 'COMPLETED', 'DISCONTINUED'))
1366 );
1367
1368 CREATE INDEX IF NOT EXISTS idx_mpps_status ON mpps(status);
1369 CREATE INDEX IF NOT EXISTS idx_mpps_station ON mpps(station_ae);
1370 CREATE INDEX IF NOT EXISTS idx_mpps_study ON mpps(study_uid);
1371 CREATE INDEX IF NOT EXISTS idx_mpps_date ON mpps(start_datetime);
1372
1373 -- =====================================================================
1374 -- WORKLIST TABLE (Modality Worklist)
1375 -- =====================================================================
1376 CREATE TABLE IF NOT EXISTS worklist (
1377 worklist_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1378 step_id TEXT NOT NULL,
1379 step_status TEXT DEFAULT 'SCHEDULED',
1380 patient_id TEXT NOT NULL,
1381 patient_name TEXT,
1382 birth_date TEXT,
1383 sex TEXT,
1384 accession_no TEXT,
1385 requested_proc_id TEXT,
1386 study_uid TEXT,
1387 scheduled_datetime TEXT NOT NULL,
1388 station_ae TEXT,
1389 station_name TEXT,
1390 modality TEXT NOT NULL,
1391 procedure_desc TEXT,
1392 protocol_code TEXT,
1393 referring_phys TEXT,
1394 referring_phys_id TEXT,
1395 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1396 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1397 UNIQUE (step_id, accession_no)
1398 );
1399
1400 CREATE INDEX IF NOT EXISTS idx_worklist_station ON worklist(station_ae);
1401 CREATE INDEX IF NOT EXISTS idx_worklist_modality ON worklist(modality);
1402 CREATE INDEX IF NOT EXISTS idx_worklist_scheduled ON worklist(scheduled_datetime);
1403 CREATE INDEX IF NOT EXISTS idx_worklist_patient ON worklist(patient_id);
1404 CREATE INDEX IF NOT EXISTS idx_worklist_accession ON worklist(accession_no);
1405 CREATE INDEX IF NOT EXISTS idx_worklist_status ON worklist(step_status);
1406 CREATE INDEX IF NOT EXISTS idx_worklist_station_date_mod
1407 ON worklist(station_ae, scheduled_datetime, modality);
1408
1409 -- =====================================================================
1410 -- TRIGGERS FOR PARENT COUNT UPDATES
1411 -- =====================================================================
1412 CREATE TRIGGER IF NOT EXISTS trg_instances_insert
1413 AFTER INSERT ON instances
1414 BEGIN
1415 UPDATE series
1416 SET num_instances = num_instances + 1,
1417 updated_at = datetime('now')
1418 WHERE series_pk = NEW.series_pk;
1419
1420 UPDATE studies
1421 SET num_instances = num_instances + 1,
1422 updated_at = datetime('now')
1423 WHERE study_pk = (SELECT study_pk FROM series WHERE series_pk = NEW.series_pk);
1424 END;
1425
1426 CREATE TRIGGER IF NOT EXISTS trg_instances_delete
1427 AFTER DELETE ON instances
1428 BEGIN
1429 UPDATE series
1430 SET num_instances = num_instances - 1,
1431 updated_at = datetime('now')
1432 WHERE series_pk = OLD.series_pk;
1433
1434 UPDATE studies
1435 SET num_instances = num_instances - 1,
1436 updated_at = datetime('now')
1437 WHERE study_pk = (SELECT study_pk FROM series WHERE series_pk = OLD.series_pk);
1438 END;
1439
1440 CREATE TRIGGER IF NOT EXISTS trg_series_insert
1441 AFTER INSERT ON series
1442 BEGIN
1443 UPDATE studies
1444 SET num_series = num_series + 1,
1445 updated_at = datetime('now')
1446 WHERE study_pk = NEW.study_pk;
1447 END;
1448
1449 CREATE TRIGGER IF NOT EXISTS trg_series_delete
1450 AFTER DELETE ON series
1451 BEGIN
1452 UPDATE studies
1453 SET num_series = num_series - 1,
1454 updated_at = datetime('now')
1455 WHERE study_pk = OLD.study_pk;
1456 END;
1457 )";
1458
1459 auto result = execute_sql(db, sql);
1460 if (result.is_err()) {
1461 return result;
1462 }
1463
1464 return record_migration(db, 1, "Initial schema creation");
1465}
1466
1467auto migration_runner::migrate_v2(pacs_database_adapter& db) -> VoidResult {
1468 // V2: Add audit_log table for REST API audit endpoints
1469 const std::string sql = R"(
1470 -- =====================================================================
1471 -- AUDIT_LOG TABLE (for REST API and HIPAA compliance)
1472 -- =====================================================================
1473 CREATE TABLE IF NOT EXISTS audit_log (
1474 audit_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1475 event_type TEXT NOT NULL,
1476 outcome TEXT DEFAULT 'SUCCESS',
1477 timestamp TEXT NOT NULL DEFAULT (datetime('now')),
1478 user_id TEXT,
1479 source_ae TEXT,
1480 target_ae TEXT,
1481 source_ip TEXT,
1482 patient_id TEXT,
1483 study_uid TEXT,
1484 message TEXT,
1485 details TEXT,
1486 CHECK (outcome IN ('SUCCESS', 'FAILURE', 'WARNING'))
1487 );
1488
1489 CREATE INDEX IF NOT EXISTS idx_audit_event_type ON audit_log(event_type);
1490 CREATE INDEX IF NOT EXISTS idx_audit_timestamp ON audit_log(timestamp);
1491 CREATE INDEX IF NOT EXISTS idx_audit_user ON audit_log(user_id);
1492 CREATE INDEX IF NOT EXISTS idx_audit_source_ae ON audit_log(source_ae);
1493 CREATE INDEX IF NOT EXISTS idx_audit_patient ON audit_log(patient_id);
1494 CREATE INDEX IF NOT EXISTS idx_audit_study ON audit_log(study_uid);
1495 CREATE INDEX IF NOT EXISTS idx_audit_outcome ON audit_log(outcome);
1496 )";
1497
1498 auto result = execute_sql(db, sql);
1499 if (result.is_err()) {
1500 return result;
1501 }
1502
1503 return record_migration(db, 2, "Add audit_log table");
1504}
1505
1506auto migration_runner::migrate_v3(pacs_database_adapter& db) -> VoidResult {
1507 // V3: Add remote_nodes table for PACS client remote node management
1508 const std::string sql = R"(
1509 -- =====================================================================
1510 -- REMOTE_NODES TABLE (for PACS client SCU operations)
1511 -- =====================================================================
1512 CREATE TABLE IF NOT EXISTS remote_nodes (
1513 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1514 node_id TEXT NOT NULL UNIQUE,
1515 name TEXT,
1516 ae_title TEXT NOT NULL,
1517 host TEXT NOT NULL,
1518 port INTEGER NOT NULL DEFAULT 104,
1519 supports_find INTEGER NOT NULL DEFAULT 1,
1520 supports_move INTEGER NOT NULL DEFAULT 1,
1521 supports_get INTEGER NOT NULL DEFAULT 0,
1522 supports_store INTEGER NOT NULL DEFAULT 1,
1523 supports_worklist INTEGER NOT NULL DEFAULT 0,
1524 connection_timeout_sec INTEGER NOT NULL DEFAULT 30,
1525 dimse_timeout_sec INTEGER NOT NULL DEFAULT 60,
1526 max_associations INTEGER NOT NULL DEFAULT 4,
1527 status TEXT NOT NULL DEFAULT 'unknown',
1528 last_verified TEXT,
1529 last_error TEXT,
1530 last_error_message TEXT,
1531 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1532 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1533 CHECK (port > 0 AND port <= 65535),
1534 CHECK (status IN ('unknown', 'online', 'offline', 'error', 'verifying'))
1535 );
1536
1537 CREATE INDEX IF NOT EXISTS idx_remote_nodes_ae_title ON remote_nodes(ae_title);
1538 CREATE INDEX IF NOT EXISTS idx_remote_nodes_host ON remote_nodes(host);
1539 CREATE INDEX IF NOT EXISTS idx_remote_nodes_status ON remote_nodes(status);
1540 )";
1541
1542 auto result = execute_sql(db, sql);
1543 if (result.is_err()) {
1544 return result;
1545 }
1546
1547 return record_migration(db, 3, "Add remote_nodes table for PACS client");
1548}
1549
1550auto migration_runner::migrate_v4(pacs_database_adapter& db) -> VoidResult {
1551 // V4: Add jobs table for async DICOM operations
1552 const std::string sql = R"(
1553 -- =====================================================================
1554 -- JOBS TABLE (for async DICOM operations - Job Manager)
1555 -- =====================================================================
1556 CREATE TABLE IF NOT EXISTS jobs (
1557 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1558 job_id TEXT NOT NULL UNIQUE,
1559 type TEXT NOT NULL,
1560 status TEXT NOT NULL DEFAULT 'pending',
1561 priority INTEGER NOT NULL DEFAULT 1,
1562 source_node_id TEXT,
1563 destination_node_id TEXT,
1564 patient_id TEXT,
1565 study_uid TEXT,
1566 series_uid TEXT,
1567 sop_instance_uid TEXT,
1568 instance_uids_json TEXT DEFAULT '[]',
1569 total_items INTEGER DEFAULT 0,
1570 completed_items INTEGER DEFAULT 0,
1571 failed_items INTEGER DEFAULT 0,
1572 skipped_items INTEGER DEFAULT 0,
1573 bytes_transferred INTEGER DEFAULT 0,
1574 current_item TEXT,
1575 current_item_description TEXT,
1576 error_message TEXT,
1577 error_details TEXT,
1578 retry_count INTEGER DEFAULT 0,
1579 max_retries INTEGER DEFAULT 3,
1580 created_by TEXT,
1581 metadata_json TEXT DEFAULT '{}',
1582 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1583 queued_at TEXT,
1584 started_at TEXT,
1585 completed_at TEXT,
1586 CHECK (type IN ('query', 'retrieve', 'store', 'export', 'import', 'prefetch', 'sync')),
1587 CHECK (status IN ('pending', 'queued', 'running', 'completed', 'failed', 'cancelled', 'paused')),
1588 CHECK (priority >= 0 AND priority <= 3)
1589 );
1590
1591 CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
1592 CREATE INDEX IF NOT EXISTS idx_jobs_type ON jobs(type);
1593 CREATE INDEX IF NOT EXISTS idx_jobs_priority ON jobs(priority DESC);
1594 CREATE INDEX IF NOT EXISTS idx_jobs_created_at ON jobs(created_at DESC);
1595 CREATE INDEX IF NOT EXISTS idx_jobs_source_node ON jobs(source_node_id);
1596 CREATE INDEX IF NOT EXISTS idx_jobs_destination_node ON jobs(destination_node_id);
1597 CREATE INDEX IF NOT EXISTS idx_jobs_study ON jobs(study_uid);
1598 CREATE INDEX IF NOT EXISTS idx_jobs_patient ON jobs(patient_id);
1599 )";
1600
1601 auto result = execute_sql(db, sql);
1602 if (result.is_err()) {
1603 return result;
1604 }
1605
1606 return record_migration(db, 4, "Add jobs table for async DICOM operations");
1607}
1608
1609auto migration_runner::migrate_v5(pacs_database_adapter& db) -> VoidResult {
1610 // V5: Add routing_rules table for auto-forwarding
1611 const std::string sql = R"(
1612 -- =====================================================================
1613 -- ROUTING_RULES TABLE (for auto-forwarding - Routing Manager)
1614 -- =====================================================================
1615 CREATE TABLE IF NOT EXISTS routing_rules (
1616 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1617 rule_id TEXT NOT NULL UNIQUE,
1618 name TEXT NOT NULL,
1619 description TEXT,
1620 enabled INTEGER NOT NULL DEFAULT 1,
1621 priority INTEGER NOT NULL DEFAULT 0,
1622 conditions_json TEXT NOT NULL DEFAULT '[]',
1623 actions_json TEXT NOT NULL DEFAULT '[]',
1624 schedule_cron TEXT,
1625 effective_from TEXT,
1626 effective_until TEXT,
1627 triggered_count INTEGER DEFAULT 0,
1628 success_count INTEGER DEFAULT 0,
1629 failure_count INTEGER DEFAULT 0,
1630 last_triggered TEXT,
1631 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1632 updated_at TEXT NOT NULL DEFAULT (datetime('now'))
1633 );
1634
1635 CREATE INDEX IF NOT EXISTS idx_routing_rules_enabled ON routing_rules(enabled);
1636 CREATE INDEX IF NOT EXISTS idx_routing_rules_priority ON routing_rules(priority DESC);
1637 )";
1638
1639 auto result = execute_sql(db, sql);
1640 if (result.is_err()) {
1641 return result;
1642 }
1643
1644 return record_migration(db, 5, "Add routing_rules table for auto-forwarding");
1645}
1646
1647auto migration_runner::migrate_v6(pacs_database_adapter& db) -> VoidResult {
1648 // V6: Add sync tables for bidirectional synchronization
1649 const std::string sql = R"(
1650 -- =====================================================================
1651 -- SYNC_CONFIGS TABLE (for Sync Manager)
1652 -- =====================================================================
1653 CREATE TABLE IF NOT EXISTS sync_configs (
1654 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1655 config_id TEXT NOT NULL UNIQUE,
1656 source_node_id TEXT NOT NULL,
1657 name TEXT NOT NULL,
1658 enabled INTEGER NOT NULL DEFAULT 1,
1659 lookback_hours INTEGER NOT NULL DEFAULT 24,
1660 modalities_json TEXT DEFAULT '[]',
1661 patient_patterns_json TEXT DEFAULT '[]',
1662 sync_direction TEXT NOT NULL DEFAULT 'pull',
1663 delete_missing INTEGER NOT NULL DEFAULT 0,
1664 overwrite_existing INTEGER NOT NULL DEFAULT 0,
1665 sync_metadata_only INTEGER NOT NULL DEFAULT 0,
1666 schedule_cron TEXT,
1667 last_sync TEXT,
1668 last_successful_sync TEXT,
1669 total_syncs INTEGER DEFAULT 0,
1670 studies_synced INTEGER DEFAULT 0,
1671 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1672 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1673 CHECK (sync_direction IN ('pull', 'push', 'bidirectional'))
1674 );
1675
1676 CREATE INDEX IF NOT EXISTS idx_sync_configs_enabled ON sync_configs(enabled);
1677 CREATE INDEX IF NOT EXISTS idx_sync_configs_source ON sync_configs(source_node_id);
1678
1679 -- =====================================================================
1680 -- SYNC_CONFLICTS TABLE (for conflict tracking)
1681 -- =====================================================================
1682 CREATE TABLE IF NOT EXISTS sync_conflicts (
1683 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1684 config_id TEXT NOT NULL,
1685 study_uid TEXT NOT NULL,
1686 patient_id TEXT,
1687 conflict_type TEXT NOT NULL,
1688 local_modified TEXT,
1689 remote_modified TEXT,
1690 local_instance_count INTEGER DEFAULT 0,
1691 remote_instance_count INTEGER DEFAULT 0,
1692 resolved INTEGER NOT NULL DEFAULT 0,
1693 resolution TEXT,
1694 detected_at TEXT NOT NULL DEFAULT (datetime('now')),
1695 resolved_at TEXT,
1696 UNIQUE (config_id, study_uid),
1697 CHECK (conflict_type IN ('missing_local', 'missing_remote', 'modified', 'count_mismatch')),
1698 CHECK (resolution IS NULL OR resolution IN ('prefer_local', 'prefer_remote', 'prefer_newer'))
1699 );
1700
1701 CREATE INDEX IF NOT EXISTS idx_sync_conflicts_config ON sync_conflicts(config_id);
1702 CREATE INDEX IF NOT EXISTS idx_sync_conflicts_resolved ON sync_conflicts(resolved);
1703 CREATE INDEX IF NOT EXISTS idx_sync_conflicts_study ON sync_conflicts(study_uid);
1704
1705 -- =====================================================================
1706 -- SYNC_HISTORY TABLE (for sync operation history)
1707 -- =====================================================================
1708 CREATE TABLE IF NOT EXISTS sync_history (
1709 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1710 config_id TEXT NOT NULL,
1711 job_id TEXT NOT NULL,
1712 success INTEGER NOT NULL DEFAULT 0,
1713 studies_checked INTEGER DEFAULT 0,
1714 studies_synced INTEGER DEFAULT 0,
1715 conflicts_found INTEGER DEFAULT 0,
1716 errors_json TEXT DEFAULT '[]',
1717 started_at TEXT NOT NULL,
1718 completed_at TEXT NOT NULL
1719 );
1720
1721 CREATE INDEX IF NOT EXISTS idx_sync_history_config ON sync_history(config_id);
1722 CREATE INDEX IF NOT EXISTS idx_sync_history_started ON sync_history(started_at DESC);
1723 )";
1724
1725 auto result = execute_sql(db, sql);
1726 if (result.is_err()) {
1727 return result;
1728 }
1729
1730 return record_migration(db, 6, "Add sync tables for bidirectional synchronization");
1731}
1732
1733auto migration_runner::migrate_v7(pacs_database_adapter& db) -> VoidResult {
1734 // V7: Add annotation and measurement tables for viewer functionality
1735 const std::string sql = R"(
1736 -- =====================================================================
1737 -- ANNOTATIONS TABLE (for image annotations)
1738 -- =====================================================================
1739 CREATE TABLE IF NOT EXISTS annotations (
1740 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1741 annotation_id TEXT NOT NULL UNIQUE,
1742 study_uid TEXT NOT NULL,
1743 series_uid TEXT,
1744 sop_instance_uid TEXT,
1745 frame_number INTEGER,
1746 user_id TEXT NOT NULL,
1747 annotation_type TEXT NOT NULL,
1748 geometry_json TEXT NOT NULL,
1749 text TEXT,
1750 style_json TEXT,
1751 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1752 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1753 CHECK (annotation_type IN ('arrow', 'line', 'rectangle', 'ellipse', 'polygon', 'freehand', 'text', 'angle', 'roi'))
1754 );
1755
1756 CREATE INDEX IF NOT EXISTS idx_annotations_study ON annotations(study_uid);
1757 CREATE INDEX IF NOT EXISTS idx_annotations_instance ON annotations(sop_instance_uid);
1758 CREATE INDEX IF NOT EXISTS idx_annotations_user ON annotations(user_id);
1759
1760 -- =====================================================================
1761 -- MEASUREMENTS TABLE (for image measurements)
1762 -- =====================================================================
1763 CREATE TABLE IF NOT EXISTS measurements (
1764 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1765 measurement_id TEXT NOT NULL UNIQUE,
1766 sop_instance_uid TEXT NOT NULL,
1767 frame_number INTEGER,
1768 user_id TEXT NOT NULL,
1769 measurement_type TEXT NOT NULL,
1770 geometry_json TEXT NOT NULL,
1771 value REAL NOT NULL,
1772 unit TEXT NOT NULL,
1773 label TEXT,
1774 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1775 CHECK (measurement_type IN ('length', 'area', 'angle', 'hounsfield', 'suv', 'ellipse_area', 'polygon_area'))
1776 );
1777
1778 CREATE INDEX IF NOT EXISTS idx_measurements_instance ON measurements(sop_instance_uid);
1779 CREATE INDEX IF NOT EXISTS idx_measurements_user ON measurements(user_id);
1780
1781 -- =====================================================================
1782 -- KEY_IMAGES TABLE (for key image markers)
1783 -- =====================================================================
1784 CREATE TABLE IF NOT EXISTS key_images (
1785 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1786 key_image_id TEXT NOT NULL UNIQUE,
1787 study_uid TEXT NOT NULL,
1788 sop_instance_uid TEXT NOT NULL,
1789 frame_number INTEGER,
1790 user_id TEXT NOT NULL,
1791 reason TEXT,
1792 document_title TEXT,
1793 created_at TEXT NOT NULL DEFAULT (datetime('now'))
1794 );
1795
1796 CREATE INDEX IF NOT EXISTS idx_key_images_study ON key_images(study_uid);
1797
1798 -- =====================================================================
1799 -- VIEWER_STATES TABLE (for saved viewer configurations)
1800 -- =====================================================================
1801 CREATE TABLE IF NOT EXISTS viewer_states (
1802 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1803 state_id TEXT NOT NULL UNIQUE,
1804 study_uid TEXT NOT NULL,
1805 user_id TEXT NOT NULL,
1806 state_json TEXT NOT NULL,
1807 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1808 updated_at TEXT NOT NULL DEFAULT (datetime('now'))
1809 );
1810
1811 CREATE INDEX IF NOT EXISTS idx_viewer_states_study ON viewer_states(study_uid);
1812 CREATE INDEX IF NOT EXISTS idx_viewer_states_user ON viewer_states(user_id);
1813
1814 -- =====================================================================
1815 -- RECENT_STUDIES TABLE (for tracking user study access)
1816 -- =====================================================================
1817 CREATE TABLE IF NOT EXISTS recent_studies (
1818 pk INTEGER PRIMARY KEY AUTOINCREMENT,
1819 user_id TEXT NOT NULL,
1820 study_uid TEXT NOT NULL,
1821 accessed_at TEXT NOT NULL DEFAULT (datetime('now')),
1822 UNIQUE (user_id, study_uid)
1823 );
1824
1825 CREATE INDEX IF NOT EXISTS idx_recent_studies_user ON recent_studies(user_id, accessed_at DESC);
1826 )";
1827
1828 auto result = execute_sql(db, sql);
1829 if (result.is_err()) {
1830 return result;
1831 }
1832
1833 return record_migration(db, 7, "Add annotation and measurement tables");
1834}
1835
1836auto migration_runner::migrate_v8(pacs_database_adapter& db) -> VoidResult {
1837 // V8: Add Storage Commitment tracking tables
1838 const std::string sql = R"(
1839 CREATE TABLE IF NOT EXISTS storage_commitment (
1840 transaction_uid TEXT PRIMARY KEY,
1841 requesting_ae TEXT NOT NULL,
1842 request_time TEXT NOT NULL,
1843 completion_time TEXT,
1844 status TEXT NOT NULL DEFAULT 'pending',
1845 total_instances INTEGER NOT NULL DEFAULT 0,
1846 success_count INTEGER NOT NULL DEFAULT 0,
1847 failure_count INTEGER NOT NULL DEFAULT 0,
1848 CHECK (status IN ('pending', 'success', 'partial', 'failed'))
1849 );
1850
1851 CREATE INDEX IF NOT EXISTS idx_commitment_status
1852 ON storage_commitment(status);
1853 CREATE INDEX IF NOT EXISTS idx_commitment_request_time
1854 ON storage_commitment(request_time);
1855
1856 CREATE TABLE IF NOT EXISTS commitment_references (
1857 transaction_uid TEXT NOT NULL
1858 REFERENCES storage_commitment(transaction_uid)
1859 ON DELETE CASCADE,
1860 sop_class_uid TEXT NOT NULL,
1861 sop_instance_uid TEXT NOT NULL,
1862 status TEXT NOT NULL DEFAULT 'pending',
1863 failure_reason INTEGER,
1864 PRIMARY KEY (transaction_uid, sop_instance_uid),
1865 CHECK (status IN ('pending', 'success', 'failed'))
1866 );
1867
1868 CREATE INDEX IF NOT EXISTS idx_commitment_ref_instance
1869 ON commitment_references(sop_instance_uid);
1870 )";
1871
1872 auto result = execute_sql(db, sql);
1873 if (result.is_err()) {
1874 return result;
1875 }
1876
1877 return record_migration(db, 8, "Add Storage Commitment tracking tables");
1878}
1879
1880auto migration_runner::migrate_v9(pacs_database_adapter& db) -> VoidResult {
1881 // V9: Add Unified Procedure Step (UPS) tables
1882 const std::string sql = R"(
1883 CREATE TABLE IF NOT EXISTS ups_workitems (
1884 workitem_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1885 workitem_uid TEXT NOT NULL UNIQUE,
1886 state TEXT NOT NULL DEFAULT 'SCHEDULED',
1887 procedure_step_label TEXT,
1888 worklist_label TEXT,
1889 priority TEXT DEFAULT 'MEDIUM',
1890 scheduled_start_datetime TEXT,
1891 expected_completion_datetime TEXT,
1892 scheduled_station_name TEXT,
1893 scheduled_station_class TEXT,
1894 scheduled_station_geographic TEXT,
1895 scheduled_human_performers TEXT,
1896 input_information TEXT,
1897 performing_ae TEXT,
1898 progress_description TEXT,
1899 progress_percent INTEGER DEFAULT 0,
1900 output_information TEXT,
1901 transaction_uid TEXT,
1902 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1903 updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1904 CHECK (state IN ('SCHEDULED', 'IN PROGRESS', 'COMPLETED', 'CANCELED')),
1905 CHECK (priority IN ('LOW', 'MEDIUM', 'HIGH'))
1906 );
1907
1908 CREATE INDEX IF NOT EXISTS idx_ups_state ON ups_workitems(state);
1909 CREATE INDEX IF NOT EXISTS idx_ups_priority ON ups_workitems(priority);
1910 CREATE INDEX IF NOT EXISTS idx_ups_performing ON ups_workitems(performing_ae);
1911 CREATE INDEX IF NOT EXISTS idx_ups_scheduled ON ups_workitems(scheduled_start_datetime);
1912 CREATE INDEX IF NOT EXISTS idx_ups_worklist ON ups_workitems(worklist_label);
1913
1914 CREATE TABLE IF NOT EXISTS ups_subscriptions (
1915 subscription_pk INTEGER PRIMARY KEY AUTOINCREMENT,
1916 subscriber_ae TEXT NOT NULL,
1917 workitem_uid TEXT,
1918 deletion_lock INTEGER DEFAULT 0,
1919 filter_criteria TEXT,
1920 created_at TEXT NOT NULL DEFAULT (datetime('now')),
1921 UNIQUE (subscriber_ae, workitem_uid)
1922 );
1923
1924 CREATE INDEX IF NOT EXISTS idx_ups_sub_ae ON ups_subscriptions(subscriber_ae);
1925 CREATE INDEX IF NOT EXISTS idx_ups_sub_workitem ON ups_subscriptions(workitem_uid);
1926 )";
1927
1928 auto result = execute_sql(db, sql);
1929 if (result.is_err()) {
1930 return result;
1931 }
1932
1933 return record_migration(db, 9, "Add Unified Procedure Step (UPS) tables");
1934}
1935
1936#endif // PACS_WITH_DATABASE_SYSTEM
1937
1938} // namespace kcenon::pacs::storage
auto migrate_v4(sqlite3 *db) -> VoidResult
auto migrate_v9(sqlite3 *db) -> VoidResult
auto migrate_v2(sqlite3 *db) -> VoidResult
auto run_migrations_to(sqlite3 *db, int target_version) -> VoidResult
Run migrations up to a specific version.
auto migrate_v6(sqlite3 *db) -> VoidResult
static constexpr int LATEST_VERSION
Latest schema version (increment when adding migrations)
std::vector< std::pair< int, migration_function > > migrations_
Migration function registry.
auto get_current_version(sqlite3 *db) const -> int
Get the current schema version.
auto migrate_v8(sqlite3 *db) -> VoidResult
auto ensure_schema_version_table(sqlite3 *db) -> VoidResult
Create the schema_version table if it doesn't exist.
auto migrate_v5(sqlite3 *db) -> VoidResult
auto migrate_v1(sqlite3 *db) -> VoidResult
auto get_latest_version() const noexcept -> int
Get the latest available schema version.
auto execute_sql(sqlite3 *db, std::string_view sql) -> VoidResult
Execute SQL statement and handle errors.
auto needs_migration(sqlite3 *db) const -> bool
Check if migration is needed.
auto apply_migration(sqlite3 *db, int version) -> VoidResult
Apply a single migration.
auto record_migration(sqlite3 *db, int version, std::string_view description) -> VoidResult
Record a migration in the schema_version table.
auto get_history(sqlite3 *db) const -> std::vector< migration_record >
Get the migration history.
auto run_migrations(sqlite3 *db) -> VoidResult
Run all pending migrations.
auto migrate_v3(sqlite3 *db) -> VoidResult
auto migrate_v7(sqlite3 *db) -> VoidResult
Compatibility header providing kcenon::pacs::compat::format as an alias for std::format.
Database schema migration runner.
Unified database adapter for PACS system.
Represents a record of an applied database migration.
Result of a migration operation.
Definition hsm_types.h:230