feat(tests): resolve compilation errors in integration tests due to new test context

This commit is contained in:
perf3ct 2025-07-28 19:07:46 +00:00
parent 4e14ca5eb7
commit f141f0460b
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
5 changed files with 209 additions and 45 deletions

View File

@ -1,5 +1,6 @@
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use anyhow::Result;
use super::*; use super::*;
use readur::models::UserRole; use readur::models::UserRole;
use readur::routes::labels::{CreateLabel, UpdateLabel, LabelAssignment, Label}; use readur::routes::labels::{CreateLabel, UpdateLabel, LabelAssignment, Label};
@ -17,7 +18,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -79,7 +80,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -129,7 +130,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -203,7 +204,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -261,7 +262,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -327,7 +328,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -420,7 +421,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -514,7 +515,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -614,7 +615,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -707,7 +708,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;
@ -744,7 +745,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
// Check that system labels were created by migration // Check that system labels were created by migration
let system_labels = sqlx::query( let system_labels = sqlx::query(
@ -788,7 +789,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let user = auth_helper.create_test_user().await; let user = auth_helper.create_test_user().await;

View File

@ -306,15 +306,34 @@ mod tests {
// Integration Test: Deep scan should perform well even with large numbers of directories // Integration Test: Deep scan should perform well even with large numbers of directories
// This tests the scalability of the deep scan reset operation // This tests the scalability of the deep scan reset operation
let test_start_time = std::time::Instant::now();
eprintln!("[DEEP_SCAN_TEST] {:?} - Test starting", test_start_time.elapsed());
eprintln!("[DEEP_SCAN_TEST] {:?} - Creating test setup...", test_start_time.elapsed());
let setup_start = std::time::Instant::now();
let (state, user, test_context) = create_test_setup().await; let (state, user, test_context) = create_test_setup().await;
let _cleanup_guard = TestCleanupGuard::new(test_context); let _cleanup_guard = TestCleanupGuard::new(test_context);
eprintln!("[DEEP_SCAN_TEST] {:?} - Test setup completed in {:?}", test_start_time.elapsed(), setup_start.elapsed());
eprintln!("[DEEP_SCAN_TEST] {:?} - User ID: {}", test_start_time.elapsed(), user.id);
// Create a large number of old directories // Create a large number of old directories
let num_old_dirs = 250; let num_old_dirs = 250;
let mut old_directories = Vec::new(); let mut old_directories = Vec::new();
eprintln!("[DEEP_SCAN_TEST] {:?} - Starting creation of {} old directories", test_start_time.elapsed(), num_old_dirs);
let create_start = std::time::Instant::now(); let create_start = std::time::Instant::now();
for i in 0..num_old_dirs { for i in 0..num_old_dirs {
if i % 50 == 0 || i < 10 {
eprintln!("[DEEP_SCAN_TEST] {:?} - Creating directory {}/{} ({}%)",
test_start_time.elapsed(),
i + 1,
num_old_dirs,
((i + 1) * 100) / num_old_dirs
);
}
let dir_create_start = std::time::Instant::now();
let dir = CreateWebDAVDirectory { let dir = CreateWebDAVDirectory {
user_id: user.id, user_id: user.id,
directory_path: format!("/Documents/Old{:03}", i), directory_path: format!("/Documents/Old{:03}", i),
@ -323,33 +342,137 @@ mod tests {
total_size_bytes: (i as i64 + 1) * 4000, // Varying sizes total_size_bytes: (i as i64 + 1) * 4000, // Varying sizes
}; };
state.db.create_or_update_webdav_directory(&dir).await eprintln!("[DEEP_SCAN_TEST] {:?} - About to call create_or_update_webdav_directory for dir {}", test_start_time.elapsed(), i);
.expect("Failed to create old directory"); match state.db.create_or_update_webdav_directory(&dir).await {
Ok(_) => {
if i < 10 || dir_create_start.elapsed().as_millis() > 100 {
eprintln!("[DEEP_SCAN_TEST] {:?} - Successfully created directory {} in {:?}",
test_start_time.elapsed(), i, dir_create_start.elapsed());
}
}
Err(e) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - ERROR: Failed to create old directory {}: {}", test_start_time.elapsed(), i, e);
panic!("Failed to create old directory {}: {}", i, e);
}
}
old_directories.push(dir); old_directories.push(dir);
// Check for potential infinite loops by timing individual operations
if dir_create_start.elapsed().as_secs() > 5 {
eprintln!("[DEEP_SCAN_TEST] {:?} - WARNING: Directory creation {} took {:?} (> 5s)",
test_start_time.elapsed(), i, dir_create_start.elapsed());
}
} }
let create_duration = create_start.elapsed(); let create_duration = create_start.elapsed();
eprintln!("[DEEP_SCAN_TEST] {:?} - Completed creation of {} directories in {:?}",
test_start_time.elapsed(), num_old_dirs, create_duration);
// Verify old directories were created // Verify old directories were created
let before_count = state.db.list_webdav_directories(user.id).await.unwrap().len(); eprintln!("[DEEP_SCAN_TEST] {:?} - Verifying old directories were created...", test_start_time.elapsed());
let list_start = std::time::Instant::now();
let before_count = match state.db.list_webdav_directories(user.id).await {
Ok(dirs) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - Successfully listed {} directories in {:?}",
test_start_time.elapsed(), dirs.len(), list_start.elapsed());
dirs.len()
}
Err(e) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - ERROR: Failed to list directories: {}", test_start_time.elapsed(), e);
panic!("Failed to list directories: {}", e);
}
};
assert_eq!(before_count, num_old_dirs, "Should have created {} old directories", num_old_dirs); assert_eq!(before_count, num_old_dirs, "Should have created {} old directories", num_old_dirs);
eprintln!("[DEEP_SCAN_TEST] {:?} - Verification passed: {} directories found", test_start_time.elapsed(), before_count);
// Simulate deep scan reset - delete all existing // Simulate deep scan reset - delete all existing
eprintln!("[DEEP_SCAN_TEST] {:?} - Starting deletion phase...", test_start_time.elapsed());
let delete_start = std::time::Instant::now(); let delete_start = std::time::Instant::now();
let dirs_to_delete = state.db.list_webdav_directories(user.id).await.unwrap();
for dir in &dirs_to_delete { eprintln!("[DEEP_SCAN_TEST] {:?} - Fetching directories to delete...", test_start_time.elapsed());
state.db.delete_webdav_directory(user.id, &dir.directory_path).await let fetch_delete_start = std::time::Instant::now();
.expect("Failed to delete directory during deep scan"); let dirs_to_delete = match state.db.list_webdav_directories(user.id).await {
Ok(dirs) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - Fetched {} directories to delete in {:?}",
test_start_time.elapsed(), dirs.len(), fetch_delete_start.elapsed());
dirs
}
Err(e) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - ERROR: Failed to fetch directories for deletion: {}", test_start_time.elapsed(), e);
panic!("Failed to fetch directories for deletion: {}", e);
}
};
eprintln!("[DEEP_SCAN_TEST] {:?} - Beginning deletion of {} directories...", test_start_time.elapsed(), dirs_to_delete.len());
for (idx, dir) in dirs_to_delete.iter().enumerate() {
if idx % 50 == 0 || idx < 10 {
eprintln!("[DEEP_SCAN_TEST] {:?} - Deleting directory {}/{} ({}%): {}",
test_start_time.elapsed(),
idx + 1,
dirs_to_delete.len(),
((idx + 1) * 100) / dirs_to_delete.len(),
dir.directory_path
);
}
let delete_item_start = std::time::Instant::now();
eprintln!("[DEEP_SCAN_TEST] {:?} - About to delete directory: {}", test_start_time.elapsed(), dir.directory_path);
match state.db.delete_webdav_directory(user.id, &dir.directory_path).await {
Ok(_) => {
if idx < 10 || delete_item_start.elapsed().as_millis() > 100 {
eprintln!("[DEEP_SCAN_TEST] {:?} - Successfully deleted directory {} in {:?}",
test_start_time.elapsed(), dir.directory_path, delete_item_start.elapsed());
}
}
Err(e) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - ERROR: Failed to delete directory {}: {}",
test_start_time.elapsed(), dir.directory_path, e);
panic!("Failed to delete directory during deep scan: {}", e);
}
}
// Check for potential infinite loops
if delete_item_start.elapsed().as_secs() > 5 {
eprintln!("[DEEP_SCAN_TEST] {:?} - WARNING: Directory deletion {} took {:?} (> 5s)",
test_start_time.elapsed(), dir.directory_path, delete_item_start.elapsed());
}
} }
let delete_duration = delete_start.elapsed(); let delete_duration = delete_start.elapsed();
eprintln!("[DEEP_SCAN_TEST] {:?} - Completed deletion of {} directories in {:?}",
test_start_time.elapsed(), dirs_to_delete.len(), delete_duration);
// Verify cleanup // Verify cleanup
let cleared_count = state.db.list_webdav_directories(user.id).await.unwrap().len(); eprintln!("[DEEP_SCAN_TEST] {:?} - Verifying cleanup...", test_start_time.elapsed());
let verify_cleanup_start = std::time::Instant::now();
let cleared_count = match state.db.list_webdav_directories(user.id).await {
Ok(dirs) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - Cleanup verification: {} directories remaining in {:?}",
test_start_time.elapsed(), dirs.len(), verify_cleanup_start.elapsed());
dirs.len()
}
Err(e) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - ERROR: Failed to verify cleanup: {}", test_start_time.elapsed(), e);
panic!("Failed to verify cleanup: {}", e);
}
};
assert_eq!(cleared_count, 0, "Should have cleared all directories"); assert_eq!(cleared_count, 0, "Should have cleared all directories");
eprintln!("[DEEP_SCAN_TEST] {:?} - Cleanup verification passed: 0 directories remaining", test_start_time.elapsed());
// Create new directories (simulating rediscovery) // Create new directories (simulating rediscovery)
let num_new_dirs = 300; // Slightly different number let num_new_dirs = 300; // Slightly different number
eprintln!("[DEEP_SCAN_TEST] {:?} - Starting recreation of {} new directories", test_start_time.elapsed(), num_new_dirs);
let recreate_start = std::time::Instant::now(); let recreate_start = std::time::Instant::now();
for i in 0..num_new_dirs { for i in 0..num_new_dirs {
if i % 50 == 0 || i < 10 {
eprintln!("[DEEP_SCAN_TEST] {:?} - Creating new directory {}/{} ({}%)",
test_start_time.elapsed(),
i + 1,
num_new_dirs,
((i + 1) * 100) / num_new_dirs
);
}
let recreate_item_start = std::time::Instant::now();
let dir = CreateWebDAVDirectory { let dir = CreateWebDAVDirectory {
user_id: user.id, user_id: user.id,
directory_path: format!("/Documents/New{:03}", i), directory_path: format!("/Documents/New{:03}", i),
@ -358,26 +481,65 @@ mod tests {
total_size_bytes: (i as i64 + 1) * 5000, // Different sizing total_size_bytes: (i as i64 + 1) * 5000, // Different sizing
}; };
state.db.create_or_update_webdav_directory(&dir).await eprintln!("[DEEP_SCAN_TEST] {:?} - About to create new directory {}", test_start_time.elapsed(), i);
.expect("Failed to create new directory"); match state.db.create_or_update_webdav_directory(&dir).await {
Ok(_) => {
if i < 10 || recreate_item_start.elapsed().as_millis() > 100 {
eprintln!("[DEEP_SCAN_TEST] {:?} - Successfully created new directory {} in {:?}",
test_start_time.elapsed(), i, recreate_item_start.elapsed());
}
}
Err(e) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - ERROR: Failed to create new directory {}: {}", test_start_time.elapsed(), i, e);
panic!("Failed to create new directory {}: {}", i, e);
}
}
// Check for potential infinite loops
if recreate_item_start.elapsed().as_secs() > 5 {
eprintln!("[DEEP_SCAN_TEST] {:?} - WARNING: New directory creation {} took {:?} (> 5s)",
test_start_time.elapsed(), i, recreate_item_start.elapsed());
}
} }
let recreate_duration = recreate_start.elapsed(); let recreate_duration = recreate_start.elapsed();
eprintln!("[DEEP_SCAN_TEST] {:?} - Completed recreation of {} directories in {:?}",
test_start_time.elapsed(), num_new_dirs, recreate_duration);
// Verify final state // Verify final state
let final_count = state.db.list_webdav_directories(user.id).await.unwrap().len(); eprintln!("[DEEP_SCAN_TEST] {:?} - Verifying final state...", test_start_time.elapsed());
let final_verify_start = std::time::Instant::now();
let final_count = match state.db.list_webdav_directories(user.id).await {
Ok(dirs) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - Final verification: {} directories found in {:?}",
test_start_time.elapsed(), dirs.len(), final_verify_start.elapsed());
dirs.len()
}
Err(e) => {
eprintln!("[DEEP_SCAN_TEST] {:?} - ERROR: Failed to verify final state: {}", test_start_time.elapsed(), e);
panic!("Failed to verify final state: {}", e);
}
};
assert_eq!(final_count, num_new_dirs, "Should have created {} new directories", num_new_dirs); assert_eq!(final_count, num_new_dirs, "Should have created {} new directories", num_new_dirs);
eprintln!("[DEEP_SCAN_TEST] {:?} - Final verification passed: {} directories found", test_start_time.elapsed(), final_count);
// Performance assertions - should complete within reasonable time // Performance assertions - should complete within reasonable time
eprintln!("[DEEP_SCAN_TEST] {:?} - Running performance assertions...", test_start_time.elapsed());
assert!(create_duration.as_secs() < 30, "Creating {} directories should take < 30s, took {:?}", num_old_dirs, create_duration); assert!(create_duration.as_secs() < 30, "Creating {} directories should take < 30s, took {:?}", num_old_dirs, create_duration);
assert!(delete_duration.as_secs() < 15, "Deleting {} directories should take < 15s, took {:?}", num_old_dirs, delete_duration); assert!(delete_duration.as_secs() < 15, "Deleting {} directories should take < 15s, took {:?}", num_old_dirs, delete_duration);
assert!(recreate_duration.as_secs() < 30, "Recreating {} directories should take < 30s, took {:?}", num_new_dirs, recreate_duration); assert!(recreate_duration.as_secs() < 30, "Recreating {} directories should take < 30s, took {:?}", num_new_dirs, recreate_duration);
let total_duration = create_duration + delete_duration + recreate_duration; let total_duration = create_duration + delete_duration + recreate_duration;
let overall_test_duration = test_start_time.elapsed();
eprintln!("[DEEP_SCAN_TEST] {:?} - All performance assertions passed", test_start_time.elapsed());
println!("✅ Deep scan performance test completed successfully"); println!("✅ Deep scan performance test completed successfully");
println!(" Created {} old directories in {:?}", num_old_dirs, create_duration); println!(" Created {} old directories in {:?}", num_old_dirs, create_duration);
println!(" Deleted {} directories in {:?}", num_old_dirs, delete_duration); println!(" Deleted {} directories in {:?}", num_old_dirs, delete_duration);
println!(" Created {} new directories in {:?}", num_new_dirs, recreate_duration); println!(" Created {} new directories in {:?}", num_new_dirs, recreate_duration);
println!(" Total deep scan simulation time: {:?}", total_duration); println!(" Total deep scan simulation time: {:?}", total_duration);
println!(" Overall test duration: {:?}", overall_test_duration);
eprintln!("[DEEP_SCAN_TEST] {:?} - Test completed successfully!", test_start_time.elapsed());
} }
} }

View File

@ -1,5 +1,6 @@
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use anyhow::Result;
use readur::models::{CreateUser, UpdateUser, UserResponse, AuthProvider, UserRole}; use readur::models::{CreateUser, UpdateUser, UserResponse, AuthProvider, UserRole};
use readur::test_utils::{TestContext, TestAuthHelper}; use readur::test_utils::{TestContext, TestAuthHelper};
use axum::http::StatusCode; use axum::http::StatusCode;
@ -12,7 +13,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
// Create admin user using TestAuthHelper for unique credentials // Create admin user using TestAuthHelper for unique credentials
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let admin = auth_helper.create_admin_user().await; let admin = auth_helper.create_admin_user().await;
@ -61,7 +62,7 @@ mod tests {
let ctx = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result = async { let result: Result<()> = async {
let auth_helper = TestAuthHelper::new(ctx.app.clone()); let auth_helper = TestAuthHelper::new(ctx.app.clone());
let admin = auth_helper.create_admin_user().await; let admin = auth_helper.create_admin_user().await;
let token = auth_helper.login_user(&admin.username, "adminpass123").await; let token = auth_helper.login_user(&admin.username, "adminpass123").await;

View File

@ -10,11 +10,11 @@ use readur::{
#[tokio::test] #[tokio::test]
async fn test_bulk_create_or_update_atomic() { async fn test_bulk_create_or_update_atomic() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = &test_context.state.db; let db = &ctx.state.db;
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {
@ -81,11 +81,11 @@ async fn test_bulk_create_or_update_atomic() {
#[tokio::test] #[tokio::test]
async fn test_sync_webdav_directories_atomic() { async fn test_sync_webdav_directories_atomic() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = &test_context.state.db; let db = &ctx.state.db;
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {
@ -175,11 +175,11 @@ async fn test_sync_webdav_directories_atomic() {
#[tokio::test] #[tokio::test]
async fn test_delete_missing_directories() { async fn test_delete_missing_directories() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = &test_context.state.db; let db = &ctx.state.db;
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {
@ -243,11 +243,11 @@ async fn test_delete_missing_directories() {
#[tokio::test] #[tokio::test]
async fn test_atomic_rollback_on_failure() { async fn test_atomic_rollback_on_failure() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = &test_context.state.db; let db = &ctx.state.db;
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {
@ -311,11 +311,11 @@ async fn test_atomic_rollback_on_failure() {
#[tokio::test] #[tokio::test]
async fn test_concurrent_directory_updates() { async fn test_concurrent_directory_updates() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = Arc::new(test_context.state.db.clone()); let db = Arc::new(ctx.state.db.clone());
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {

View File

@ -15,11 +15,11 @@ use readur::{
/// Tests that concurrent directory updates are atomic and consistent /// Tests that concurrent directory updates are atomic and consistent
#[tokio::test] #[tokio::test]
async fn test_race_condition_fix_atomic_updates() { async fn test_race_condition_fix_atomic_updates() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = Arc::new(test_context.state.db.clone()); let db = Arc::new(ctx.state.db.clone());
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {
@ -127,11 +127,11 @@ async fn test_race_condition_fix_atomic_updates() {
/// Test that validates directory deletion detection works correctly /// Test that validates directory deletion detection works correctly
#[tokio::test] #[tokio::test]
async fn test_deletion_detection_fix() { async fn test_deletion_detection_fix() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = &test_context.state.db; let db = &ctx.state.db;
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {
@ -250,11 +250,11 @@ async fn test_etag_comparison_fix() {
/// Test performance of bulk operations vs individual operations /// Test performance of bulk operations vs individual operations
#[tokio::test] #[tokio::test]
async fn test_bulk_operations_performance() { async fn test_bulk_operations_performance() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = &test_context.state.db; let db = &ctx.state.db;
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {
@ -314,11 +314,11 @@ async fn test_bulk_operations_performance() {
/// Test transaction rollback behavior /// Test transaction rollback behavior
#[tokio::test] #[tokio::test]
async fn test_transaction_rollback_consistency() { async fn test_transaction_rollback_consistency() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let db = &test_context.state.db; let db = &ctx.state.db;
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {
@ -391,11 +391,11 @@ async fn test_transaction_rollback_consistency() {
/// Integration test simulating real WebDAV sync scenario /// Integration test simulating real WebDAV sync scenario
#[tokio::test] #[tokio::test]
async fn test_full_sync_integration() { async fn test_full_sync_integration() {
let test_context = TestContext::new().await; let ctx = TestContext::new().await;
// Ensure cleanup happens even if test fails // Ensure cleanup happens even if test fails
let result: Result<()> = async { let result: Result<()> = async {
let app_state = &test_context.state; let app_state = &ctx.state;
// Create a test user first // Create a test user first
let create_user = CreateUser { let create_user = CreateUser {