diff --git a/tests/integration_auto_resume_tests.rs b/tests/integration_auto_resume_tests.rs index 35be010..cd60120 100644 --- a/tests/integration_auto_resume_tests.rs +++ b/tests/integration_auto_resume_tests.rs @@ -54,18 +54,28 @@ async fn create_test_app_state() -> Arc { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, }; let db = Database::new(&config.database_url).await.unwrap(); + + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new( db.clone(), db.pool.clone(), 4, + file_service.clone(), )); Arc::new(AppState { db: db.clone(), config, + file_service, webdav_scheduler: None, source_scheduler: None, queue_service, diff --git a/tests/integration_cancellation_tests.rs b/tests/integration_cancellation_tests.rs index 4f55209..1ba3f8e 100644 --- a/tests/integration_cancellation_tests.rs +++ b/tests/integration_cancellation_tests.rs @@ -52,15 +52,23 @@ async fn create_test_app_state() -> Arc { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, }; let db = Database::new(&config.database_url).await.unwrap(); - let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2)); + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + + let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2, file_service.clone())); let sync_progress_tracker = Arc::new(readur::services::sync_progress_tracker::SyncProgressTracker::new()); Arc::new(AppState { db, config, + file_service, webdav_scheduler: None, source_scheduler: None, queue_service, diff --git a/tests/integration_config_oidc_tests.rs b/tests/integration_config_oidc_tests.rs index 7f2bef9..c85ce27 100644 --- a/tests/integration_config_oidc_tests.rs +++ b/tests/integration_config_oidc_tests.rs @@ -53,8 +53,9 @@ mod tests { jwt_secret: "test-secret".to_string(), upload_path: "./test-uploads".to_string(), watch_folder: "./test-watch".to_string(), - user_watch_base_dir: "./user_watch".to_string(), - enable_per_user_watch: false, allowed_file_types: vec!["pdf".to_string()], + user_watch_base_dir: "./user_watch".to_string(), + enable_per_user_watch: false, + allowed_file_types: vec!["pdf".to_string()], watch_interval_seconds: Some(30), file_stability_check_ms: Some(500), max_file_age_hours: None, @@ -69,6 +70,8 @@ mod tests { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, } } diff --git a/tests/integration_ignored_files_integration_tests.rs b/tests/integration_ignored_files_integration_tests.rs index 4e8c745..c0c2899 100644 --- a/tests/integration_ignored_files_integration_tests.rs +++ b/tests/integration_ignored_files_integration_tests.rs @@ -38,15 +38,24 @@ async fn create_test_app_state() -> Result> { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, } }); let db = Database::new(&config.database_url).await?; - let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 1)); + + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await?; + let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + + let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 1, file_service.clone())); Ok(Arc::new(AppState { db: db.clone(), config, + file_service, webdav_scheduler: None, source_scheduler: None, queue_service, diff --git a/tests/integration_ocr_pipeline_integration_test.rs b/tests/integration_ocr_pipeline_integration_test.rs index 13fd1d1..d27e99a 100644 --- a/tests/integration_ocr_pipeline_integration_test.rs +++ b/tests/integration_ocr_pipeline_integration_test.rs @@ -13,14 +13,12 @@ use tracing::{info, warn, error}; use uuid::Uuid; use readur::{ - config::Config, db::Database, - models::Document, services::file_service::FileService, storage::factory::create_storage_backend, storage::StorageConfig, ocr::enhanced::EnhancedOcrService, - ocr::queue::{OcrQueueService, OcrQueueItem}, + ocr::queue::OcrQueueService, db_guardrails_simple::DocumentTransactionManager, }; @@ -33,7 +31,7 @@ async fn create_test_file_service(temp_path: &str) -> FileService { struct OCRPipelineTestHarness { db: Database, pool: PgPool, - file_service: FileService, + file_service: Arc, ocr_service: EnhancedOcrService, queue_service: OcrQueueService, transaction_manager: DocumentTransactionManager, @@ -60,9 +58,9 @@ impl OCRPipelineTestHarness { upload_path: upload_path.clone() }; let storage_backend = create_storage_backend(storage_config).await?; - let file_service = FileService::with_storage(upload_path, storage_backend); - let ocr_service = EnhancedOcrService::new("/tmp".to_string(), file_service.clone()); - let queue_service = OcrQueueService::new(db.clone(), pool.clone(), 4, std::sync::Arc::new(file_service)); + let file_service = Arc::new(FileService::with_storage(upload_path, storage_backend)); + let ocr_service = EnhancedOcrService::new("/tmp".to_string(), (*file_service).clone()); + let queue_service = OcrQueueService::new(db.clone(), pool.clone(), 4, file_service.clone()); let transaction_manager = DocumentTransactionManager::new(pool.clone()); // Ensure test upload directory exists diff --git a/tests/integration_oidc_tests.rs b/tests/integration_oidc_tests.rs index 8fbce70..d8f0312 100644 --- a/tests/integration_oidc_tests.rs +++ b/tests/integration_oidc_tests.rs @@ -37,6 +37,8 @@ mod tests { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, }; let db = readur::db::Database::new(&config.database_url).await.unwrap(); @@ -54,21 +56,28 @@ mod tests { } } + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + let app = axum::Router::new() .nest("/api/auth", readur::routes::auth::router()) .with_state(Arc::new(AppState { db: db.clone(), config, + file_service: file_service.clone(), webdav_scheduler: None, source_scheduler: None, queue_service: Arc::new(readur::ocr::queue::OcrQueueService::new( db.clone(), db.pool.clone(), - 2 + 2, + file_service.clone() )), oidc_client: None, sync_progress_tracker: std::sync::Arc::new(readur::services::sync_progress_tracker::SyncProgressTracker::new()), - user_watch_service: None, + user_watch_service: None, })); (app, ()) @@ -120,6 +129,8 @@ mod tests { oidc_client_secret: Some("test-client-secret".to_string()), oidc_issuer_url: Some(mock_server.uri()), oidc_redirect_uri: Some("http://localhost:8000/auth/oidc/callback".to_string()), + s3_enabled: false, + s3_config: None, }; let oidc_client = match OidcClient::new(&config).await { @@ -145,22 +156,29 @@ mod tests { } } + // Create file service for OIDC app + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + // Create app with OIDC configuration let app = axum::Router::new() .nest("/api/auth", readur::routes::auth::router()) .with_state(Arc::new(AppState { db: db.clone(), config, + file_service: file_service.clone(), webdav_scheduler: None, source_scheduler: None, queue_service: Arc::new(readur::ocr::queue::OcrQueueService::new( db.clone(), db.pool.clone(), - 2 + 2, + file_service.clone() )), oidc_client, sync_progress_tracker: std::sync::Arc::new(readur::services::sync_progress_tracker::SyncProgressTracker::new()), - user_watch_service: None, + user_watch_service: None, })); (app, mock_server) diff --git a/tests/integration_simple_throttling_test.rs b/tests/integration_simple_throttling_test.rs index 05dc98e..1b7dee4 100644 --- a/tests/integration_simple_throttling_test.rs +++ b/tests/integration_simple_throttling_test.rs @@ -48,12 +48,14 @@ impl SimpleThrottleTest { .await?; let db = Database::new(&db_url).await?; + let file_service = Arc::new(create_test_file_service("/tmp/test_throttling").await); // Create queue service with throttling (max 15 concurrent jobs) let queue_service = Arc::new(OcrQueueService::new( db.clone(), pool.clone(), - 15 // This should prevent DB pool exhaustion + 15, // This should prevent DB pool exhaustion + file_service )); Ok(Self { diff --git a/tests/integration_source_scheduler_simple_tests.rs b/tests/integration_source_scheduler_simple_tests.rs index 1a9a1fb..2fac3c4 100644 --- a/tests/integration_source_scheduler_simple_tests.rs +++ b/tests/integration_source_scheduler_simple_tests.rs @@ -46,14 +46,22 @@ async fn create_test_app_state() -> Arc { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, }; let db = Database::new(&config.database_url).await.unwrap(); - let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2)); + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + + let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2, file_service.clone())); Arc::new(AppState { db: db.clone(), config, + file_service, webdav_scheduler: None, source_scheduler: None, queue_service, diff --git a/tests/integration_source_sync_cancellation_workflow_tests.rs b/tests/integration_source_sync_cancellation_workflow_tests.rs index eb6caff..afddda5 100644 --- a/tests/integration_source_sync_cancellation_workflow_tests.rs +++ b/tests/integration_source_sync_cancellation_workflow_tests.rs @@ -28,7 +28,7 @@ use readur::{ AppState, config::Config, db::Database, - models::{Source, SourceType, SourceStatus, User, CreateSource, CreateUser, UserRole, AuthProvider}, + models::{Source, SourceType, SourceStatus, User, CreateSource, CreateUser, UserRole}, auth::Claims, }; @@ -60,21 +60,31 @@ async fn create_test_app_state() -> Arc { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, }; let db = Database::new(&config.database_url).await.unwrap(); + + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new( db.clone(), db.pool.clone(), 2, + file_service.clone(), )); let sync_progress_tracker = Arc::new(readur::services::sync_progress_tracker::SyncProgressTracker::new()); // Create initial app state - let mut app_state = AppState { + let app_state = AppState { db: db.clone(), config, + file_service: file_service.clone(), webdav_scheduler: None, source_scheduler: None, queue_service, @@ -95,6 +105,7 @@ async fn create_test_app_state() -> Arc { Arc::new(AppState { db: state_arc.db.clone(), config: state_arc.config.clone(), + file_service: state_arc.file_service.clone(), webdav_scheduler: None, source_scheduler: Some(source_scheduler), queue_service: state_arc.queue_service.clone(), diff --git a/tests/integration_source_sync_hash_duplicate_tests.rs b/tests/integration_source_sync_hash_duplicate_tests.rs index 64a836e..268eb2c 100644 --- a/tests/integration_source_sync_hash_duplicate_tests.rs +++ b/tests/integration_source_sync_hash_duplicate_tests.rs @@ -143,16 +143,25 @@ async fn create_test_app_state() -> Result> { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, } }); let db = Database::new(&config.database_url).await?; + + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await?; + let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + let queue_service = std::sync::Arc::new( - readur::ocr::queue::OcrQueueService::new(db.clone(), db.get_pool().clone(), 1) + readur::ocr::queue::OcrQueueService::new(db.clone(), db.get_pool().clone(), 1, file_service.clone()) ); Ok(Arc::new(AppState { db: db.clone(), config, + file_service, webdav_scheduler: None, source_scheduler: None, queue_service, diff --git a/tests/integration_stop_sync_functionality_tests.rs b/tests/integration_stop_sync_functionality_tests.rs index 1de3b94..461d46e 100644 --- a/tests/integration_stop_sync_functionality_tests.rs +++ b/tests/integration_stop_sync_functionality_tests.rs @@ -37,7 +37,8 @@ async fn create_test_app_state() -> Arc { upload_path: "/tmp/test_uploads".to_string(), watch_folder: "/tmp/watch".to_string(), user_watch_base_dir: "./user_watch".to_string(), - enable_per_user_watch: false, allowed_file_types: vec!["pdf".to_string(), "txt".to_string()], + enable_per_user_watch: false, + allowed_file_types: vec!["pdf".to_string(), "txt".to_string()], watch_interval_seconds: Some(10), file_stability_check_ms: Some(1000), max_file_age_hours: Some(24), @@ -52,18 +53,28 @@ async fn create_test_app_state() -> Arc { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, }; let db = Database::new(&config.database_url).await.unwrap(); + + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new( db.clone(), db.pool.clone(), 4, + file_service.clone(), )); Arc::new(AppState { db: db.clone(), config, + file_service, webdav_scheduler: None, source_scheduler: None, queue_service, diff --git a/tests/integration_universal_source_sync_tests.rs b/tests/integration_universal_source_sync_tests.rs index 067652f..e1a46ce 100644 --- a/tests/integration_universal_source_sync_tests.rs +++ b/tests/integration_universal_source_sync_tests.rs @@ -138,7 +138,8 @@ async fn create_test_app_state() -> Arc { upload_path: "/tmp/test_uploads".to_string(), watch_folder: "/tmp/test_watch".to_string(), user_watch_base_dir: "./user_watch".to_string(), - enable_per_user_watch: false, allowed_file_types: vec!["pdf".to_string(), "txt".to_string()], + enable_per_user_watch: false, + allowed_file_types: vec!["pdf".to_string(), "txt".to_string()], watch_interval_seconds: Some(30), file_stability_check_ms: Some(500), max_file_age_hours: None, @@ -153,14 +154,22 @@ async fn create_test_app_state() -> Arc { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, }; let db = Database::new(&config.database_url).await.unwrap(); - let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2)); + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + + let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2, file_service.clone())); Arc::new(AppState { db, config, + file_service, webdav_scheduler: None, source_scheduler: None, queue_service, diff --git a/tests/integration_webdav_comprehensive_tests.rs b/tests/integration_webdav_comprehensive_tests.rs index 0ee335e..656c71e 100644 --- a/tests/integration_webdav_comprehensive_tests.rs +++ b/tests/integration_webdav_comprehensive_tests.rs @@ -348,6 +348,8 @@ fn test_webdav_scheduler_creation() { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, }; // Note: This is a minimal test since we can't easily mock the database diff --git a/tests/integration_webdav_hash_duplicate_tests.rs b/tests/integration_webdav_hash_duplicate_tests.rs index 99862ea..23f4f83 100644 --- a/tests/integration_webdav_hash_duplicate_tests.rs +++ b/tests/integration_webdav_hash_duplicate_tests.rs @@ -143,16 +143,25 @@ async fn create_test_app_state() -> Result> { oidc_client_secret: None, oidc_issuer_url: None, oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, } }); let db = Database::new(&config.database_url).await?; + + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await?; + let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend)); + let queue_service = std::sync::Arc::new( - readur::ocr::queue::OcrQueueService::new(db.clone(), db.get_pool().clone(), 1) + readur::ocr::queue::OcrQueueService::new(db.clone(), db.get_pool().clone(), 1, file_service.clone()) ); Ok(Arc::new(AppState { db: db.clone(), config, + file_service, webdav_scheduler: None, source_scheduler: None, queue_service, diff --git a/tests/integration_webdav_integration_tests.rs b/tests/integration_webdav_integration_tests.rs index 310a7e0..af79606 100644 --- a/tests/integration_webdav_integration_tests.rs +++ b/tests/integration_webdav_integration_tests.rs @@ -9,12 +9,11 @@ use serde_json::{json, Value}; use uuid::Uuid; use readur::{ - db::Database, - config::Config, models::*, routes, AppState, - test_helpers, + config::Config, + db::Database, }; // Removed constant - will use environment variables instead @@ -82,22 +81,42 @@ async fn setup_test_app() -> (Router, Arc) { .unwrap_or_else(|_| "postgresql://readur:readur@localhost:5432/readur".to_string()); // Create test configuration with custom database URL - let mut config = test_helpers::create_test_config(); - config.database_url = database_url.clone(); - config.jwt_secret = "test_jwt_secret_for_integration_tests".to_string(); - config.allowed_file_types = vec!["pdf".to_string(), "png".to_string()]; - config.watch_interval_seconds = Some(10); - config.file_stability_check_ms = Some(1000); - config.max_file_age_hours = Some(24); - config.memory_limit_mb = 512; - config.concurrent_ocr_jobs = 4; - config.max_file_size_mb = 50; - config.ocr_timeout_seconds = 300; + let config = Config { + database_url: database_url.clone(), + server_address: "127.0.0.1:0".to_string(), + jwt_secret: "test_jwt_secret_for_integration_tests".to_string(), + upload_path: "/tmp/test_uploads".to_string(), + watch_folder: "/tmp/test_watch".to_string(), + user_watch_base_dir: "/tmp/user_watch".to_string(), + enable_per_user_watch: false, + allowed_file_types: vec!["pdf".to_string(), "png".to_string()], + watch_interval_seconds: Some(10), + file_stability_check_ms: Some(1000), + max_file_age_hours: Some(24), + memory_limit_mb: 512, + concurrent_ocr_jobs: 4, + max_file_size_mb: 50, + ocr_timeout_seconds: 300, + ocr_language: "eng".to_string(), + cpu_priority: "normal".to_string(), + oidc_enabled: false, + oidc_client_id: None, + oidc_client_secret: None, + oidc_issuer_url: None, + oidc_redirect_uri: None, + s3_enabled: false, + s3_config: None, + }; // Create test services - let db = test_helpers::create_test_database().await; - let file_service = test_helpers::create_test_file_service(Some("/tmp/test_uploads")).await; - let queue_service = test_helpers::create_test_queue_service(db.clone(), db.pool.clone(), file_service.clone()); + let db = Database::new(&database_url).await.unwrap(); + + // Create file service + let storage_config = readur::storage::StorageConfig::Local { upload_path: "/tmp/test_uploads".to_string() }; + let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap(); + let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage("/tmp/test_uploads".to_string(), storage_backend)); + + let queue_service = std::sync::Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 4, file_service.clone())); // Create AppState let state = Arc::new(AppState {