From 24269ea51323271b61a14f208e060cd839fd5319 Mon Sep 17 00:00:00 2001 From: perf3ct Date: Sun, 27 Jul 2025 20:36:54 +0000 Subject: [PATCH] feat(tests): resolve duplicated test coverage for webdav functionality --- .../webdav/tests/atomic_operations_tests.rs | 259 ----------- .../webdav/tests/critical_fixes_tests.rs | 372 ---------------- src/services/webdav/tests/mod.rs | 2 - src/services/webdav/url_construction_tests.rs | 35 +- ...egration_webdav_atomic_operations_tests.rs | 309 +++++++++++++ ...integration_webdav_critical_fixes_tests.rs | 412 ++++++++++++++++++ 6 files changed, 731 insertions(+), 658 deletions(-) delete mode 100644 src/services/webdav/tests/atomic_operations_tests.rs delete mode 100644 src/services/webdav/tests/critical_fixes_tests.rs create mode 100644 tests/integration_webdav_atomic_operations_tests.rs create mode 100644 tests/integration_webdav_critical_fixes_tests.rs diff --git a/src/services/webdav/tests/atomic_operations_tests.rs b/src/services/webdav/tests/atomic_operations_tests.rs deleted file mode 100644 index 1308e44..0000000 --- a/src/services/webdav/tests/atomic_operations_tests.rs +++ /dev/null @@ -1,259 +0,0 @@ -use std::sync::Arc; -use uuid::Uuid; -use tokio; -use crate::models::CreateWebDAVDirectory; -use crate::test_utils::TestContext; -use crate::db::Database; - -#[cfg(test)] -mod tests { - use super::*; - - async fn setup_test_database() -> Arc { - let ctx = TestContext::new().await; - Arc::new(ctx.state.db.clone()) - } - - #[tokio::test] - async fn test_bulk_create_or_update_atomic() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - let directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir1".to_string(), - directory_etag: "etag1".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir2".to_string(), - directory_etag: "etag2".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir3".to_string(), - directory_etag: "etag3".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - ]; - - // Test bulk operation - let result = db.bulk_create_or_update_webdav_directories(&directories).await; - assert!(result.is_ok()); - - let saved_directories = result.unwrap(); - assert_eq!(saved_directories.len(), 3); - - // Verify all directories were saved with correct ETags - for (original, saved) in directories.iter().zip(saved_directories.iter()) { - assert_eq!(original.directory_path, saved.directory_path); - assert_eq!(original.directory_etag, saved.directory_etag); - assert_eq!(original.user_id, saved.user_id); - } - } - - #[tokio::test] - async fn test_sync_webdav_directories_atomic() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - // First, create some initial directories - let initial_directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir1".to_string(), - directory_etag: "etag1".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir2".to_string(), - directory_etag: "etag2".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - ]; - - let _ = db.bulk_create_or_update_webdav_directories(&initial_directories).await.unwrap(); - - // Now sync with a new set that has one update, one delete, and one new - let sync_directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir1".to_string(), - directory_etag: "etag1_updated".to_string(), // Updated - file_count: 5, - total_size_bytes: 1024, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir3".to_string(), // New - directory_etag: "etag3".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - // dir2 is missing, should be deleted - ]; - - let result = db.sync_webdav_directories(user_id, &sync_directories).await; - assert!(result.is_ok()); - - let (updated_directories, deleted_count) = result.unwrap(); - - // Should have 2 directories (dir1 updated, dir3 new) - assert_eq!(updated_directories.len(), 2); - - // Should have deleted 1 directory (dir2) - assert_eq!(deleted_count, 1); - - // Verify the updated directory has the new ETag - let dir1 = updated_directories.iter() - .find(|d| d.directory_path == "/test/dir1") - .unwrap(); - assert_eq!(dir1.directory_etag, "etag1_updated"); - assert_eq!(dir1.file_count, 5); - assert_eq!(dir1.total_size_bytes, 1024); - - // Verify the new directory exists - let dir3 = updated_directories.iter() - .find(|d| d.directory_path == "/test/dir3") - .unwrap(); - assert_eq!(dir3.directory_etag, "etag3"); - } - - #[tokio::test] - async fn test_delete_missing_directories() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - // Create some directories - let directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir1".to_string(), - directory_etag: "etag1".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir2".to_string(), - directory_etag: "etag2".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir3".to_string(), - directory_etag: "etag3".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - ]; - - let _ = db.bulk_create_or_update_webdav_directories(&directories).await.unwrap(); - - // Delete directories not in this list (should delete dir2 and dir3) - let existing_paths = vec!["/test/dir1".to_string()]; - let deleted_count = db.delete_missing_webdav_directories(user_id, &existing_paths).await.unwrap(); - - assert_eq!(deleted_count, 2); - - // Verify only dir1 remains - let remaining_directories = db.list_webdav_directories(user_id).await.unwrap(); - assert_eq!(remaining_directories.len(), 1); - assert_eq!(remaining_directories[0].directory_path, "/test/dir1"); - } - - #[tokio::test] - async fn test_atomic_rollback_on_failure() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - // Create a directory that would conflict - let initial_dir = CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir1".to_string(), - directory_etag: "etag1".to_string(), - file_count: 0, - total_size_bytes: 0, - }; - - let _ = db.create_or_update_webdav_directory(&initial_dir).await.unwrap(); - - // Try to bulk insert with one invalid entry that should cause rollback - let directories_with_invalid = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir2".to_string(), - directory_etag: "etag2".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - CreateWebDAVDirectory { - user_id: Uuid::nil(), // Invalid user ID should cause failure - directory_path: "/test/dir3".to_string(), - directory_etag: "etag3".to_string(), - file_count: 0, - total_size_bytes: 0, - }, - ]; - - // This should fail and rollback - let result = db.bulk_create_or_update_webdav_directories(&directories_with_invalid).await; - assert!(result.is_err()); - - // Verify that no partial changes were made (only original dir1 should exist) - let directories = db.list_webdav_directories(user_id).await.unwrap(); - assert_eq!(directories.len(), 1); - assert_eq!(directories[0].directory_path, "/test/dir1"); - } - - #[tokio::test] - async fn test_concurrent_directory_updates() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - // Spawn multiple concurrent tasks that try to update the same directory - let mut handles = vec![]; - - for i in 0..10 { - let db_clone = db.clone(); - let handle = tokio::spawn(async move { - let directory = CreateWebDAVDirectory { - user_id, - directory_path: "/test/concurrent".to_string(), - directory_etag: format!("etag_{}", i), - file_count: i as i64, - total_size_bytes: (i * 1024) as i64, - }; - - db_clone.create_or_update_webdav_directory(&directory).await - }); - handles.push(handle); - } - - // Wait for all tasks to complete - let results: Vec<_> = futures::future::join_all(handles).await; - - // All operations should succeed (last writer wins) - for result in results { - assert!(result.is_ok()); - assert!(result.unwrap().is_ok()); - } - - // Verify final state - let directories = db.list_webdav_directories(user_id).await.unwrap(); - assert_eq!(directories.len(), 1); - assert_eq!(directories[0].directory_path, "/test/concurrent"); - // ETag should be from one of the concurrent updates - assert!(directories[0].directory_etag.starts_with("etag_")); - } -} \ No newline at end of file diff --git a/src/services/webdav/tests/critical_fixes_tests.rs b/src/services/webdav/tests/critical_fixes_tests.rs deleted file mode 100644 index bab8542..0000000 --- a/src/services/webdav/tests/critical_fixes_tests.rs +++ /dev/null @@ -1,372 +0,0 @@ -use std::sync::Arc; -use std::time::{Duration, Instant}; -use uuid::Uuid; -use tokio; -use crate::models::CreateWebDAVDirectory; -use crate::db::Database; -use crate::test_utils::TestContext; - -#[cfg(test)] -mod tests { - use super::*; - - /// Integration test that validates the race condition fix - /// Tests that concurrent directory updates are atomic and consistent - #[tokio::test] - async fn test_race_condition_fix_atomic_updates() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - // Create initial directories - let initial_directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir1".to_string(), - directory_etag: "initial_etag1".to_string(), - file_count: 5, - total_size_bytes: 1024, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir2".to_string(), - directory_etag: "initial_etag2".to_string(), - file_count: 10, - total_size_bytes: 2048, - }, - ]; - - let _ = db.bulk_create_or_update_webdav_directories(&initial_directories).await.unwrap(); - - // Simulate race condition: multiple tasks trying to update directories simultaneously - let mut handles = vec![]; - - for i in 0..5 { - let db_clone = Arc::clone(&db); - let handle = tokio::spawn(async move { - let updated_directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir1".to_string(), - directory_etag: format!("race_etag1_{}", i), - file_count: 5 + i as i64, - total_size_bytes: 1024 + (i * 100) as i64, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/dir2".to_string(), - directory_etag: format!("race_etag2_{}", i), - file_count: 10 + i as i64, - total_size_bytes: 2048 + (i * 200) as i64, - }, - CreateWebDAVDirectory { - user_id, - directory_path: format!("/test/new_dir_{}", i), - directory_etag: format!("new_etag_{}", i), - file_count: i as i64, - total_size_bytes: (i * 512) as i64, - }, - ]; - - // Use the atomic sync operation - db_clone.sync_webdav_directories(user_id, &updated_directories).await - }); - handles.push(handle); - } - - // Wait for all operations to complete - let results: Vec<_> = futures::future::join_all(handles).await; - - // All operations should succeed (transactions ensure atomicity) - for result in results { - assert!(result.is_ok()); - let sync_result = result.unwrap(); - assert!(sync_result.is_ok()); - } - - // Final state should be consistent - let final_directories = db.list_webdav_directories(user_id).await.unwrap(); - - // Should have 3 directories (dir1, dir2, and one of the new_dir_X) - assert_eq!(final_directories.len(), 3); - - // All ETags should be from one consistent transaction - let dir1 = final_directories.iter().find(|d| d.directory_path == "/test/dir1").unwrap(); - let dir2 = final_directories.iter().find(|d| d.directory_path == "/test/dir2").unwrap(); - - // ETags should be from the same transaction (both should end with same number) - let etag1_suffix = dir1.directory_etag.chars().last().unwrap(); - let etag2_suffix = dir2.directory_etag.chars().last().unwrap(); - assert_eq!(etag1_suffix, etag2_suffix, "ETags should be from same atomic transaction"); - } - - /// Test that validates directory deletion detection works correctly - #[tokio::test] - async fn test_deletion_detection_fix() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - // Create initial directories - let initial_directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/documents/folder1".to_string(), - directory_etag: "etag1".to_string(), - file_count: 5, - total_size_bytes: 1024, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/documents/folder2".to_string(), - directory_etag: "etag2".to_string(), - file_count: 3, - total_size_bytes: 512, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/documents/folder3".to_string(), - directory_etag: "etag3".to_string(), - file_count: 8, - total_size_bytes: 2048, - }, - ]; - - let _ = db.bulk_create_or_update_webdav_directories(&initial_directories).await.unwrap(); - - // Verify all 3 directories exist - let directories_before = db.list_webdav_directories(user_id).await.unwrap(); - assert_eq!(directories_before.len(), 3); - - // Simulate sync where folder2 and folder3 are deleted from WebDAV server - let current_directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/documents/folder1".to_string(), - directory_etag: "etag1_updated".to_string(), // Updated - file_count: 6, - total_size_bytes: 1200, - }, - // folder2 and folder3 are missing (deleted from server) - ]; - - // Use atomic sync which should detect and remove deleted directories - let (updated_directories, deleted_count) = db.sync_webdav_directories(user_id, ¤t_directories).await.unwrap(); - - // Should have 1 updated directory and 2 deletions - assert_eq!(updated_directories.len(), 1); - assert_eq!(deleted_count, 2); - - // Verify only folder1 remains with updated ETag - let final_directories = db.list_webdav_directories(user_id).await.unwrap(); - assert_eq!(final_directories.len(), 1); - assert_eq!(final_directories[0].directory_path, "/documents/folder1"); - assert_eq!(final_directories[0].directory_etag, "etag1_updated"); - assert_eq!(final_directories[0].file_count, 6); - } - - /// Test that validates proper ETag comparison handling - #[tokio::test] - async fn test_etag_comparison_fix() { - use crate::webdav_xml_parser::{compare_etags, weak_compare_etags, strong_compare_etags}; - - // Test weak vs strong ETag comparison - let strong_etag = "\"abc123\""; - let weak_etag = "W/\"abc123\""; - let different_etag = "\"def456\""; - - // Smart comparison should handle weak/strong equivalence - assert!(compare_etags(strong_etag, weak_etag), "Smart comparison should match weak and strong with same content"); - assert!(!compare_etags(strong_etag, different_etag), "Smart comparison should reject different content"); - - // Weak comparison should match regardless of weak/strong - assert!(weak_compare_etags(strong_etag, weak_etag), "Weak comparison should match"); - assert!(weak_compare_etags(weak_etag, strong_etag), "Weak comparison should be symmetrical"); - - // Strong comparison should reject weak ETags - assert!(!strong_compare_etags(strong_etag, weak_etag), "Strong comparison should reject weak ETags"); - assert!(!strong_compare_etags(weak_etag, strong_etag), "Strong comparison should reject weak ETags"); - assert!(strong_compare_etags(strong_etag, "\"abc123\""), "Strong comparison should match strong ETags"); - - // Test case sensitivity (ETags should be case-sensitive per RFC) - assert!(!compare_etags("\"ABC123\"", "\"abc123\""), "ETags should be case-sensitive"); - - // Test various real-world formats - let nextcloud_etag = "\"5f3e7e8a9b2c1d4\""; - let apache_etag = "\"1234-567-890abcdef\""; - let nginx_weak = "W/\"5f3e7e8a\""; - - assert!(!compare_etags(nextcloud_etag, apache_etag), "Different ETag values should not match"); - assert!(weak_compare_etags(nginx_weak, "\"5f3e7e8a\""), "Weak and strong with same content should match in weak comparison"); - } - - /// Test performance of bulk operations vs individual operations - #[tokio::test] - async fn test_bulk_operations_performance() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - // Create test data - let test_directories: Vec<_> = (0..100).map(|i| CreateWebDAVDirectory { - user_id, - directory_path: format!("/test/perf/dir{}", i), - directory_etag: format!("etag{}", i), - file_count: i as i64, - total_size_bytes: (i * 1024) as i64, - }).collect(); - - // Test individual operations (old way) - let start_individual = Instant::now(); - for directory in &test_directories { - let _ = db.create_or_update_webdav_directory(directory).await; - } - let individual_duration = start_individual.elapsed(); - - // Clear data - let _ = db.clear_webdav_directories(user_id).await; - - // Test bulk operation (new way) - let start_bulk = Instant::now(); - let _ = db.bulk_create_or_update_webdav_directories(&test_directories).await; - let bulk_duration = start_bulk.elapsed(); - - // Bulk should be faster - assert!(bulk_duration < individual_duration, - "Bulk operations should be faster than individual operations. Bulk: {:?}, Individual: {:?}", - bulk_duration, individual_duration); - - // Verify all data was saved correctly - let saved_directories = db.list_webdav_directories(user_id).await.unwrap(); - assert_eq!(saved_directories.len(), 100); - } - - /// Test transaction rollback behavior - #[tokio::test] - async fn test_transaction_rollback_consistency() { - let db = setup_test_database().await; - let user_id = Uuid::new_v4(); - - // Create some initial data - let initial_directory = CreateWebDAVDirectory { - user_id, - directory_path: "/test/initial".to_string(), - directory_etag: "initial_etag".to_string(), - file_count: 1, - total_size_bytes: 100, - }; - - let _ = db.create_or_update_webdav_directory(&initial_directory).await.unwrap(); - - // Try to create directories where one has invalid data that should cause rollback - let directories_with_failure = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/test/valid1".to_string(), - directory_etag: "valid_etag1".to_string(), - file_count: 2, - total_size_bytes: 200, - }, - CreateWebDAVDirectory { - user_id: Uuid::nil(), // This should cause a constraint violation - directory_path: "/test/invalid".to_string(), - directory_etag: "invalid_etag".to_string(), - file_count: 3, - total_size_bytes: 300, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/test/valid2".to_string(), - directory_etag: "valid_etag2".to_string(), - file_count: 4, - total_size_bytes: 400, - }, - ]; - - // This should fail and rollback - let result = db.bulk_create_or_update_webdav_directories(&directories_with_failure).await; - assert!(result.is_err(), "Transaction should fail due to invalid user_id"); - - // Verify that no partial changes were made - only initial directory should exist - let final_directories = db.list_webdav_directories(user_id).await.unwrap(); - assert_eq!(final_directories.len(), 1); - assert_eq!(final_directories[0].directory_path, "/test/initial"); - assert_eq!(final_directories[0].directory_etag, "initial_etag"); - } - - /// Integration test simulating real WebDAV sync scenario - #[tokio::test] - async fn test_full_sync_integration() { - use crate::services::webdav::SmartSyncService; - - let app_state = Arc::new(setup_test_app_state().await); - let smart_sync = SmartSyncService::new(app_state.clone()); - let user_id = Uuid::new_v4(); - - // Simulate initial sync with some directories - let initial_directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/documents".to_string(), - directory_etag: "docs_etag_v1".to_string(), - file_count: 10, - total_size_bytes: 10240, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/pictures".to_string(), - directory_etag: "pics_etag_v1".to_string(), - file_count: 5, - total_size_bytes: 51200, - }, - ]; - - let (saved_dirs, _) = app_state.db.sync_webdav_directories(user_id, &initial_directories).await.unwrap(); - assert_eq!(saved_dirs.len(), 2); - - // Simulate second sync with changes - let updated_directories = vec![ - CreateWebDAVDirectory { - user_id, - directory_path: "/documents".to_string(), - directory_etag: "docs_etag_v2".to_string(), // Changed - file_count: 12, - total_size_bytes: 12288, - }, - CreateWebDAVDirectory { - user_id, - directory_path: "/videos".to_string(), // New directory - directory_etag: "videos_etag_v1".to_string(), - file_count: 3, - total_size_bytes: 102400, - }, - // /pictures directory was deleted from server - ]; - - let (updated_dirs, deleted_count) = app_state.db.sync_webdav_directories(user_id, &updated_directories).await.unwrap(); - - // Should have 2 directories (updated documents + new videos) and 1 deletion (pictures) - assert_eq!(updated_dirs.len(), 2); - assert_eq!(deleted_count, 1); - - // Verify final state - let final_dirs = app_state.db.list_webdav_directories(user_id).await.unwrap(); - assert_eq!(final_dirs.len(), 2); - - let docs_dir = final_dirs.iter().find(|d| d.directory_path == "/documents").unwrap(); - assert_eq!(docs_dir.directory_etag, "docs_etag_v2"); - assert_eq!(docs_dir.file_count, 12); - - let videos_dir = final_dirs.iter().find(|d| d.directory_path == "/videos").unwrap(); - assert_eq!(videos_dir.directory_etag, "videos_etag_v1"); - assert_eq!(videos_dir.file_count, 3); - } - - // Helper functions - async fn setup_test_database() -> Arc { - let ctx = TestContext::new().await; - Arc::new(ctx.state.db.clone()) - } - - async fn setup_test_app_state() -> crate::AppState { - let ctx = TestContext::new().await; - (*ctx.state).clone() - } -} \ No newline at end of file diff --git a/src/services/webdav/tests/mod.rs b/src/services/webdav/tests/mod.rs index 3572feb..50595e6 100644 --- a/src/services/webdav/tests/mod.rs +++ b/src/services/webdav/tests/mod.rs @@ -1,4 +1,2 @@ -pub mod critical_fixes_tests; pub mod etag_comparison_tests; -pub mod atomic_operations_tests; pub mod deletion_detection_tests; \ No newline at end of file diff --git a/src/services/webdav/url_construction_tests.rs b/src/services/webdav/url_construction_tests.rs index b22afba..0f905d9 100644 --- a/src/services/webdav/url_construction_tests.rs +++ b/src/services/webdav/url_construction_tests.rs @@ -382,10 +382,7 @@ async fn test_connection_get_url_for_path_normalization() { }; let service = WebDAVService::new(config).unwrap(); - let connection = super::super::connection::WebDAVConnection::new( - service.get_config().clone(), - super::super::config::RetryConfig::default() - ).unwrap(); + // Connection functionality is now integrated into WebDAVService // Test various path scenarios let test_cases = vec![ @@ -396,7 +393,7 @@ async fn test_connection_get_url_for_path_normalization() { ]; for (input_path, expected_url) in test_cases { - let result_url = connection.get_url_for_path(input_path); + let result_url = service.relative_path_to_url(input_path); // Verify the URL matches expected assert_eq!(result_url, expected_url, "URL construction failed for path: {}", input_path); @@ -490,10 +487,7 @@ async fn test_service_download_file_url_construction() { }; let service = WebDAVService::new(config).unwrap(); - let connection = super::super::connection::WebDAVConnection::new( - service.get_config().clone(), - super::super::config::RetryConfig::default() - ).unwrap(); + // Connection functionality is now integrated into WebDAVService // These are the actual paths that would come from XML parser responses let xml_parser_paths = vec![ @@ -513,7 +507,7 @@ async fn test_service_download_file_url_construction() { for (xml_path, expected_url) in xml_parser_paths.iter().zip(expected_urls.iter()) { // Test the conversion from full XML path to relative path (the correct approach) let relative_path = service.convert_to_relative_path(xml_path); - let constructed_url = connection.get_url_for_path(&relative_path); + let constructed_url = service.relative_path_to_url(&relative_path); println!("XML path: {}", xml_path); println!("Relative path: {}", relative_path); @@ -557,10 +551,7 @@ async fn test_file_fetch_url_construction_with_convert_to_relative_path() { }; let service = WebDAVService::new(config).unwrap(); - let connection = super::super::connection::WebDAVConnection::new( - service.get_config().clone(), - super::super::config::RetryConfig::default() - ).unwrap(); + // Connection functionality is now integrated into WebDAVService // XML parser returns this full WebDAV path let xml_full_path = "/remote.php/dav/files/testuser/Documents/TestFolder/file.pdf"; @@ -574,7 +565,7 @@ async fn test_file_fetch_url_construction_with_convert_to_relative_path() { let correct_url = format!("{}{}", base_webdav_url, relative_path); // Method 3: Using get_url_for_path with relative path (the correct way) - let connection_url = connection.get_url_for_path(&relative_path); + let connection_url = service.relative_path_to_url(&relative_path); println!("XML full path: {}", xml_full_path); println!("Base WebDAV URL: {}", base_webdav_url); @@ -613,10 +604,7 @@ async fn test_file_fetch_real_world_error_scenario() { }; let service = WebDAVService::new(config).unwrap(); - let connection = super::super::connection::WebDAVConnection::new( - service.get_config().clone(), - super::super::config::RetryConfig::default() - ).unwrap(); + // Connection functionality is now integrated into WebDAVService // This is the exact path from the error message let problematic_path = "/remote.php/dav/files/Alex/Photos/PC%20Screenshots/zjoQcWqldv.png"; @@ -627,7 +615,7 @@ async fn test_file_fetch_real_world_error_scenario() { let corrected_url = format!("{}{}", base_url, relative_path); // Also test using connection with relative path - let connection_url = connection.get_url_for_path(&relative_path); + let connection_url = service.relative_path_to_url(&relative_path); println!("Problematic path: {}", problematic_path); println!("Relative path: {}", relative_path); @@ -691,14 +679,11 @@ async fn test_file_fetch_different_server_types() { }; let service = WebDAVService::new(config).unwrap(); - let connection = super::super::connection::WebDAVConnection::new( - service.get_config().clone(), - super::super::config::RetryConfig::default() - ).unwrap(); + // Connection functionality is now integrated into WebDAVService // Test the CORRECT approach: convert to relative path first let relative_path = service.convert_to_relative_path(xml_path); - let download_url = connection.get_url_for_path(&relative_path); + let download_url = service.relative_path_to_url(&relative_path); println!("Server type: {}", server_type); println!("XML path: {}", xml_path); diff --git a/tests/integration_webdav_atomic_operations_tests.rs b/tests/integration_webdav_atomic_operations_tests.rs new file mode 100644 index 0000000..f4f447c --- /dev/null +++ b/tests/integration_webdav_atomic_operations_tests.rs @@ -0,0 +1,309 @@ +use std::sync::Arc; +use uuid::Uuid; +use tokio; +use futures::future::join_all; +use readur::{ + models::{CreateWebDAVDirectory, CreateUser, UserRole}, + test_utils::TestContext, +}; + +#[tokio::test] +async fn test_bulk_create_or_update_atomic() { + let test_context = TestContext::new().await; + let db = &test_context.state.db; + + // Create a test user first + let create_user = CreateUser { + username: "testuser".to_string(), + email: "test@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + let directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir1".to_string(), + directory_etag: "etag1".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir2".to_string(), + directory_etag: "etag2".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir3".to_string(), + directory_etag: "etag3".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + ]; + + // Test bulk operation + let result = db.bulk_create_or_update_webdav_directories(&directories).await; + if let Err(e) = &result { + eprintln!("Error in bulk_create_or_update_webdav_directories: {}", e); + } + assert!(result.is_ok()); + + let saved_directories = result.unwrap(); + assert_eq!(saved_directories.len(), 3); + + // Verify all directories were saved with correct ETags + for (original, saved) in directories.iter().zip(saved_directories.iter()) { + assert_eq!(original.directory_path, saved.directory_path); + assert_eq!(original.directory_etag, saved.directory_etag); + assert_eq!(original.user_id, saved.user_id); + } +} + +#[tokio::test] +async fn test_sync_webdav_directories_atomic() { + let test_context = TestContext::new().await; + let db = &test_context.state.db; + + // Create a test user first + let create_user = CreateUser { + username: "testuser2".to_string(), + email: "test2@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // First, create some initial directories + let initial_directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir1".to_string(), + directory_etag: "etag1".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir2".to_string(), + directory_etag: "etag2".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + ]; + + let _ = db.bulk_create_or_update_webdav_directories(&initial_directories).await.unwrap(); + + // Now sync with a new set that has one update, one delete, and one new + let sync_directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir1".to_string(), + directory_etag: "etag1_updated".to_string(), // Updated + file_count: 5, + total_size_bytes: 1024, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir3".to_string(), // New + directory_etag: "etag3".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + // dir2 is missing, should be deleted + ]; + + let result = db.sync_webdav_directories(user_id, &sync_directories).await; + assert!(result.is_ok()); + + let (updated_directories, deleted_count) = result.unwrap(); + + // Should have 2 directories (dir1 updated, dir3 new) + assert_eq!(updated_directories.len(), 2); + + // Should have deleted 1 directory (dir2) + assert_eq!(deleted_count, 1); + + // Verify the updated directory has the new ETag + let dir1 = updated_directories.iter() + .find(|d| d.directory_path == "/test/dir1") + .unwrap(); + assert_eq!(dir1.directory_etag, "etag1_updated"); + assert_eq!(dir1.file_count, 5); + assert_eq!(dir1.total_size_bytes, 1024); + + // Verify the new directory exists + let dir3 = updated_directories.iter() + .find(|d| d.directory_path == "/test/dir3") + .unwrap(); + assert_eq!(dir3.directory_etag, "etag3"); +} + +#[tokio::test] +async fn test_delete_missing_directories() { + let test_context = TestContext::new().await; + let db = &test_context.state.db; + + // Create a test user first + let create_user = CreateUser { + username: "testuser3".to_string(), + email: "test3@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // Create some directories + let directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir1".to_string(), + directory_etag: "etag1".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir2".to_string(), + directory_etag: "etag2".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir3".to_string(), + directory_etag: "etag3".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + ]; + + let _ = db.bulk_create_or_update_webdav_directories(&directories).await.unwrap(); + + // Delete directories not in this list (should delete dir2 and dir3) + let existing_paths = vec!["/test/dir1".to_string()]; + let deleted_count = db.delete_missing_webdav_directories(user_id, &existing_paths).await.unwrap(); + + assert_eq!(deleted_count, 2); + + // Verify only dir1 remains + let remaining_directories = db.list_webdav_directories(user_id).await.unwrap(); + assert_eq!(remaining_directories.len(), 1); + assert_eq!(remaining_directories[0].directory_path, "/test/dir1"); +} + +#[tokio::test] +async fn test_atomic_rollback_on_failure() { + let test_context = TestContext::new().await; + let db = &test_context.state.db; + + // Create a test user first + let create_user = CreateUser { + username: "testuser4".to_string(), + email: "test4@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // Create a directory that would conflict + let initial_dir = CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir1".to_string(), + directory_etag: "etag1".to_string(), + file_count: 0, + total_size_bytes: 0, + }; + + let _ = db.create_or_update_webdav_directory(&initial_dir).await.unwrap(); + + // Try to bulk insert with one invalid entry that should cause rollback + let directories_with_invalid = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir2".to_string(), + directory_etag: "etag2".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + CreateWebDAVDirectory { + user_id: Uuid::nil(), // Invalid user ID should cause failure + directory_path: "/test/dir3".to_string(), + directory_etag: "etag3".to_string(), + file_count: 0, + total_size_bytes: 0, + }, + ]; + + // This should fail and rollback + let result = db.bulk_create_or_update_webdav_directories(&directories_with_invalid).await; + assert!(result.is_err()); + + // Verify that no partial changes were made (only original dir1 should exist) + let directories = db.list_webdav_directories(user_id).await.unwrap(); + assert_eq!(directories.len(), 1); + assert_eq!(directories[0].directory_path, "/test/dir1"); +} + +#[tokio::test] +async fn test_concurrent_directory_updates() { + let test_context = TestContext::new().await; + let db = Arc::new(test_context.state.db.clone()); + + // Create a test user first + let create_user = CreateUser { + username: "testuser5".to_string(), + email: "test5@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // Spawn multiple concurrent tasks that try to update the same directory + let mut handles = vec![]; + + for i in 0..10 { + let db_clone = db.clone(); + let handle = tokio::spawn(async move { + let directory = CreateWebDAVDirectory { + user_id, + directory_path: "/test/concurrent".to_string(), + directory_etag: format!("etag_{}", i), + file_count: i as i64, + total_size_bytes: (i * 1024) as i64, + }; + + db_clone.create_or_update_webdav_directory(&directory).await + }); + handles.push(handle); + } + + // Wait for all tasks to complete + let results: Vec<_> = join_all(handles).await; + + // All operations should succeed (last writer wins) + for result in results { + assert!(result.is_ok()); + assert!(result.unwrap().is_ok()); + } + + // Verify final state + let directories = db.list_webdav_directories(user_id).await.unwrap(); + assert_eq!(directories.len(), 1); + assert_eq!(directories[0].directory_path, "/test/concurrent"); + // ETag should be from one of the concurrent updates + assert!(directories[0].directory_etag.starts_with("etag_")); +} \ No newline at end of file diff --git a/tests/integration_webdav_critical_fixes_tests.rs b/tests/integration_webdav_critical_fixes_tests.rs new file mode 100644 index 0000000..2db0234 --- /dev/null +++ b/tests/integration_webdav_critical_fixes_tests.rs @@ -0,0 +1,412 @@ +use std::sync::Arc; +use std::time::Instant; +use uuid::Uuid; +use tokio; +use futures::future::join_all; +use readur::{ + models::{CreateWebDAVDirectory, CreateUser, UserRole}, + db::Database, + test_utils::TestContext, + AppState, +}; + +/// Integration test that validates the race condition fix +/// Tests that concurrent directory updates are atomic and consistent +#[tokio::test] +async fn test_race_condition_fix_atomic_updates() { + let test_context = TestContext::new().await; + let db = Arc::new(test_context.state.db.clone()); + + // Create a test user first + let create_user = CreateUser { + username: "race_testuser".to_string(), + email: "race@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // Create initial directories + let initial_directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir1".to_string(), + directory_etag: "initial_etag1".to_string(), + file_count: 5, + total_size_bytes: 1024, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir2".to_string(), + directory_etag: "initial_etag2".to_string(), + file_count: 10, + total_size_bytes: 2048, + }, + ]; + + let _ = db.bulk_create_or_update_webdav_directories(&initial_directories).await.unwrap(); + + // Simulate race condition: multiple tasks trying to update directories simultaneously + let mut handles = vec![]; + + for i in 0..5 { + let db_clone = Arc::clone(&db); + let handle = tokio::spawn(async move { + let updated_directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir1".to_string(), + directory_etag: format!("race_etag1_{}", i), + file_count: 5 + i as i64, + total_size_bytes: 1024 + (i * 100) as i64, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/dir2".to_string(), + directory_etag: format!("race_etag2_{}", i), + file_count: 10 + i as i64, + total_size_bytes: 2048 + (i * 200) as i64, + }, + CreateWebDAVDirectory { + user_id, + directory_path: format!("/test/new_dir_{}", i), + directory_etag: format!("new_etag_{}", i), + file_count: i as i64, + total_size_bytes: (i * 512) as i64, + }, + ]; + + // Use the atomic sync operation + db_clone.sync_webdav_directories(user_id, &updated_directories).await + }); + handles.push(handle); + } + + // Wait for all operations to complete + let results: Vec<_> = join_all(handles).await; + + // All operations should succeed (transactions ensure atomicity) + for result in results { + assert!(result.is_ok()); + let sync_result = result.unwrap(); + assert!(sync_result.is_ok()); + } + + // Final state should be consistent + let final_directories = db.list_webdav_directories(user_id).await.unwrap(); + + // Should have 3 directories (dir1, dir2, and one of the new_dir_X) + assert_eq!(final_directories.len(), 3); + + // All ETags should be from one consistent transaction + let dir1 = final_directories.iter().find(|d| d.directory_path == "/test/dir1").unwrap(); + let dir2 = final_directories.iter().find(|d| d.directory_path == "/test/dir2").unwrap(); + + // ETags should be from the same transaction (both should end with same number) + let etag1_suffix = dir1.directory_etag.chars().last().unwrap(); + let etag2_suffix = dir2.directory_etag.chars().last().unwrap(); + assert_eq!(etag1_suffix, etag2_suffix, "ETags should be from same atomic transaction"); +} + +/// Test that validates directory deletion detection works correctly +#[tokio::test] +async fn test_deletion_detection_fix() { + let test_context = TestContext::new().await; + let db = &test_context.state.db; + + // Create a test user first + let create_user = CreateUser { + username: "deletion_testuser".to_string(), + email: "deletion@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // Create initial directories + let initial_directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/documents/folder1".to_string(), + directory_etag: "etag1".to_string(), + file_count: 5, + total_size_bytes: 1024, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/documents/folder2".to_string(), + directory_etag: "etag2".to_string(), + file_count: 3, + total_size_bytes: 512, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/documents/folder3".to_string(), + directory_etag: "etag3".to_string(), + file_count: 8, + total_size_bytes: 2048, + }, + ]; + + let _ = db.bulk_create_or_update_webdav_directories(&initial_directories).await.unwrap(); + + // Verify all 3 directories exist + let directories_before = db.list_webdav_directories(user_id).await.unwrap(); + assert_eq!(directories_before.len(), 3); + + // Simulate sync where folder2 and folder3 are deleted from WebDAV server + let current_directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/documents/folder1".to_string(), + directory_etag: "etag1_updated".to_string(), // Updated + file_count: 6, + total_size_bytes: 1200, + }, + // folder2 and folder3 are missing (deleted from server) + ]; + + // Use atomic sync which should detect and remove deleted directories + let (updated_directories, deleted_count) = db.sync_webdav_directories(user_id, ¤t_directories).await.unwrap(); + + // Should have 1 updated directory and 2 deletions + assert_eq!(updated_directories.len(), 1); + assert_eq!(deleted_count, 2); + + // Verify only folder1 remains with updated ETag + let final_directories = db.list_webdav_directories(user_id).await.unwrap(); + assert_eq!(final_directories.len(), 1); + assert_eq!(final_directories[0].directory_path, "/documents/folder1"); + assert_eq!(final_directories[0].directory_etag, "etag1_updated"); + assert_eq!(final_directories[0].file_count, 6); +} + +/// Test that validates proper ETag comparison handling +#[tokio::test] +async fn test_etag_comparison_fix() { + use readur::webdav_xml_parser::{compare_etags, weak_compare_etags, strong_compare_etags}; + + // Test weak vs strong ETag comparison + let strong_etag = "\"abc123\""; + let weak_etag = "W/\"abc123\""; + let different_etag = "\"def456\""; + + // Smart comparison should handle weak/strong equivalence + assert!(compare_etags(strong_etag, weak_etag), "Smart comparison should match weak and strong with same content"); + assert!(!compare_etags(strong_etag, different_etag), "Smart comparison should reject different content"); + + // Weak comparison should match regardless of weak/strong + assert!(weak_compare_etags(strong_etag, weak_etag), "Weak comparison should match"); + assert!(weak_compare_etags(weak_etag, strong_etag), "Weak comparison should be symmetrical"); + + // Strong comparison should reject weak ETags + assert!(!strong_compare_etags(strong_etag, weak_etag), "Strong comparison should reject weak ETags"); + assert!(!strong_compare_etags(weak_etag, strong_etag), "Strong comparison should reject weak ETags"); + assert!(strong_compare_etags(strong_etag, "\"abc123\""), "Strong comparison should match strong ETags"); + + // Test case sensitivity (ETags should be case-sensitive per RFC) + assert!(!compare_etags("\"ABC123\"", "\"abc123\""), "ETags should be case-sensitive"); + + // Test various real-world formats + let nextcloud_etag = "\"5f3e7e8a9b2c1d4\""; + let apache_etag = "\"1234-567-890abcdef\""; + let nginx_weak = "W/\"5f3e7e8a\""; + + assert!(!compare_etags(nextcloud_etag, apache_etag), "Different ETag values should not match"); + assert!(weak_compare_etags(nginx_weak, "\"5f3e7e8a\""), "Weak and strong with same content should match in weak comparison"); +} + +/// Test performance of bulk operations vs individual operations +#[tokio::test] +async fn test_bulk_operations_performance() { + let test_context = TestContext::new().await; + let db = &test_context.state.db; + + // Create a test user first + let create_user = CreateUser { + username: "perf_testuser".to_string(), + email: "perf@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // Create test data + let test_directories: Vec<_> = (0..100).map(|i| CreateWebDAVDirectory { + user_id, + directory_path: format!("/test/perf/dir{}", i), + directory_etag: format!("etag{}", i), + file_count: i as i64, + total_size_bytes: (i * 1024) as i64, + }).collect(); + + // Test individual operations (old way) + let start_individual = Instant::now(); + for directory in &test_directories { + let _ = db.create_or_update_webdav_directory(directory).await; + } + let individual_duration = start_individual.elapsed(); + + // Clear data + let _ = db.clear_webdav_directories(user_id).await; + + // Test bulk operation (new way) + let start_bulk = Instant::now(); + let _ = db.bulk_create_or_update_webdav_directories(&test_directories).await; + let bulk_duration = start_bulk.elapsed(); + + // Bulk should be faster + assert!(bulk_duration < individual_duration, + "Bulk operations should be faster than individual operations. Bulk: {:?}, Individual: {:?}", + bulk_duration, individual_duration); + + // Verify all data was saved correctly + let saved_directories = db.list_webdav_directories(user_id).await.unwrap(); + assert_eq!(saved_directories.len(), 100); +} + +/// Test transaction rollback behavior +#[tokio::test] +async fn test_transaction_rollback_consistency() { + let test_context = TestContext::new().await; + let db = &test_context.state.db; + + // Create a test user first + let create_user = CreateUser { + username: "rollback_testuser".to_string(), + email: "rollback@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // Create some initial data + let initial_directory = CreateWebDAVDirectory { + user_id, + directory_path: "/test/initial".to_string(), + directory_etag: "initial_etag".to_string(), + file_count: 1, + total_size_bytes: 100, + }; + + let _ = db.create_or_update_webdav_directory(&initial_directory).await.unwrap(); + + // Try to create directories where one has invalid data that should cause rollback + let directories_with_failure = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/test/valid1".to_string(), + directory_etag: "valid_etag1".to_string(), + file_count: 2, + total_size_bytes: 200, + }, + CreateWebDAVDirectory { + user_id: Uuid::nil(), // This should cause a constraint violation + directory_path: "/test/invalid".to_string(), + directory_etag: "invalid_etag".to_string(), + file_count: 3, + total_size_bytes: 300, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/test/valid2".to_string(), + directory_etag: "valid_etag2".to_string(), + file_count: 4, + total_size_bytes: 400, + }, + ]; + + // This should fail and rollback + let result = db.bulk_create_or_update_webdav_directories(&directories_with_failure).await; + assert!(result.is_err(), "Transaction should fail due to invalid user_id"); + + // Verify that no partial changes were made - only initial directory should exist + let final_directories = db.list_webdav_directories(user_id).await.unwrap(); + assert_eq!(final_directories.len(), 1); + assert_eq!(final_directories[0].directory_path, "/test/initial"); + assert_eq!(final_directories[0].directory_etag, "initial_etag"); +} + +/// Integration test simulating real WebDAV sync scenario +#[tokio::test] +async fn test_full_sync_integration() { + let test_context = TestContext::new().await; + let app_state = &test_context.state; + + // Create a test user first + let create_user = CreateUser { + username: "sync_testuser".to_string(), + email: "sync@example.com".to_string(), + password: "password123".to_string(), + role: Some(UserRole::User), + }; + let user = app_state.db.create_user(create_user).await + .expect("Failed to create test user"); + let user_id = user.id; + + // Simulate initial sync with some directories + let initial_directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/documents".to_string(), + directory_etag: "docs_etag_v1".to_string(), + file_count: 10, + total_size_bytes: 10240, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/pictures".to_string(), + directory_etag: "pics_etag_v1".to_string(), + file_count: 5, + total_size_bytes: 51200, + }, + ]; + + let (saved_dirs, _) = app_state.db.sync_webdav_directories(user_id, &initial_directories).await.unwrap(); + assert_eq!(saved_dirs.len(), 2); + + // Simulate second sync with changes + let updated_directories = vec![ + CreateWebDAVDirectory { + user_id, + directory_path: "/documents".to_string(), + directory_etag: "docs_etag_v2".to_string(), // Changed + file_count: 12, + total_size_bytes: 12288, + }, + CreateWebDAVDirectory { + user_id, + directory_path: "/videos".to_string(), // New directory + directory_etag: "videos_etag_v1".to_string(), + file_count: 3, + total_size_bytes: 102400, + }, + // /pictures directory was deleted from server + ]; + + let (updated_dirs, deleted_count) = app_state.db.sync_webdav_directories(user_id, &updated_directories).await.unwrap(); + + // Should have 2 directories (updated documents + new videos) and 1 deletion (pictures) + assert_eq!(updated_dirs.len(), 2); + assert_eq!(deleted_count, 1); + + // Verify final state + let final_dirs = app_state.db.list_webdav_directories(user_id).await.unwrap(); + assert_eq!(final_dirs.len(), 2); + + let docs_dir = final_dirs.iter().find(|d| d.directory_path == "/documents").unwrap(); + assert_eq!(docs_dir.directory_etag, "docs_etag_v2"); + assert_eq!(docs_dir.file_count, 12); + + let videos_dir = final_dirs.iter().find(|d| d.directory_path == "/videos").unwrap(); + assert_eq!(videos_dir.directory_etag, "videos_etag_v1"); + assert_eq!(videos_dir.file_count, 3); +} \ No newline at end of file