feat(webdav): fix all the wonderful compilation issues
This commit is contained in:
parent
b7dd64c8f6
commit
18832b9c12
|
|
@ -1,5 +1,6 @@
|
|||
use anyhow::Result;
|
||||
use chrono::{DateTime, Utc};
|
||||
use chrono::Utc;
|
||||
use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::db::Database;
|
||||
|
|
@ -9,23 +10,23 @@ impl Database {
|
|||
/// Create a new WebDAV sync session
|
||||
pub async fn create_webdav_sync_session(&self, session: &CreateWebDAVSyncSession) -> Result<Uuid> {
|
||||
self.with_retry(|| async {
|
||||
let row = sqlx::query!(
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO webdav_sync_sessions (
|
||||
user_id, source_id, sync_type, root_path, max_depth
|
||||
) VALUES ($1, $2, $3, $4, $5)
|
||||
RETURNING id
|
||||
"#,
|
||||
session.user_id,
|
||||
session.source_id,
|
||||
session.sync_type,
|
||||
session.root_path,
|
||||
session.max_depth
|
||||
"#
|
||||
)
|
||||
.bind(session.user_id)
|
||||
.bind(session.source_id)
|
||||
.bind(&session.sync_type)
|
||||
.bind(&session.root_path)
|
||||
.bind(session.max_depth)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(row.id)
|
||||
Ok(row.get::<Uuid, _>("id"))
|
||||
}).await
|
||||
}
|
||||
|
||||
|
|
@ -36,7 +37,7 @@ impl Database {
|
|||
update: &UpdateWebDAVSyncSession
|
||||
) -> Result<bool> {
|
||||
self.with_retry(|| async {
|
||||
let rows_affected = sqlx::query!(
|
||||
let rows_affected = sqlx::query(
|
||||
r#"
|
||||
UPDATE webdav_sync_sessions SET
|
||||
directories_discovered = COALESCE($2, directories_discovered),
|
||||
|
|
@ -52,20 +53,20 @@ impl Database {
|
|||
final_error_message = COALESCE($12, final_error_message),
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
"#,
|
||||
session_id,
|
||||
update.directories_discovered,
|
||||
update.directories_processed,
|
||||
update.files_discovered,
|
||||
update.files_processed,
|
||||
update.total_bytes_discovered,
|
||||
update.total_bytes_processed,
|
||||
update.directories_skipped,
|
||||
update.files_skipped,
|
||||
update.skip_reasons,
|
||||
update.status.as_ref().map(|s| s.to_string()),
|
||||
update.final_error_message
|
||||
"#
|
||||
)
|
||||
.bind(session_id)
|
||||
.bind(update.directories_discovered)
|
||||
.bind(update.directories_processed)
|
||||
.bind(update.files_discovered)
|
||||
.bind(update.files_processed)
|
||||
.bind(update.total_bytes_discovered)
|
||||
.bind(update.total_bytes_processed)
|
||||
.bind(update.directories_skipped)
|
||||
.bind(update.files_skipped)
|
||||
.bind(&update.skip_reasons)
|
||||
.bind(update.status.as_ref().map(|s| s.to_string()))
|
||||
.bind(&update.final_error_message)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
@ -76,10 +77,10 @@ impl Database {
|
|||
/// Finalize a WebDAV sync session (calculate final metrics)
|
||||
pub async fn finalize_webdav_sync_session(&self, session_id: Uuid) -> Result<()> {
|
||||
self.with_retry(|| async {
|
||||
sqlx::query!(
|
||||
"SELECT finalize_webdav_session_metrics($1)",
|
||||
session_id
|
||||
sqlx::query(
|
||||
"SELECT finalize_webdav_session_metrics($1)"
|
||||
)
|
||||
.bind(session_id)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
@ -94,12 +95,11 @@ impl Database {
|
|||
user_id: Uuid
|
||||
) -> Result<Option<WebDAVSyncSession>> {
|
||||
self.with_retry(|| async {
|
||||
let session = sqlx::query_as!(
|
||||
WebDAVSyncSession,
|
||||
"SELECT * FROM webdav_sync_sessions WHERE id = $1 AND user_id = $2",
|
||||
session_id,
|
||||
user_id
|
||||
let session = sqlx::query_as::<_, WebDAVSyncSession>(
|
||||
"SELECT * FROM webdav_sync_sessions WHERE id = $1 AND user_id = $2"
|
||||
)
|
||||
.bind(session_id)
|
||||
.bind(user_id)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
@ -118,8 +118,7 @@ impl Database {
|
|||
let limit = query.limit.unwrap_or(100).min(1000); // Cap at 1000
|
||||
let offset = query.offset.unwrap_or(0);
|
||||
|
||||
let sessions = sqlx::query_as!(
|
||||
WebDAVSyncSession,
|
||||
let sessions = sqlx::query_as::<_, WebDAVSyncSession>(
|
||||
r#"
|
||||
SELECT * FROM webdav_sync_sessions
|
||||
WHERE started_at BETWEEN $1 AND $2
|
||||
|
|
@ -127,14 +126,14 @@ impl Database {
|
|||
AND ($4::UUID IS NULL OR source_id = $4)
|
||||
ORDER BY started_at DESC
|
||||
LIMIT $5 OFFSET $6
|
||||
"#,
|
||||
start_time,
|
||||
end_time,
|
||||
query.user_id,
|
||||
query.source_id,
|
||||
limit as i64,
|
||||
offset as i64
|
||||
"#
|
||||
)
|
||||
.bind(start_time)
|
||||
.bind(end_time)
|
||||
.bind(query.user_id)
|
||||
.bind(query.source_id)
|
||||
.bind(limit as i64)
|
||||
.bind(offset as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
@ -148,25 +147,25 @@ impl Database {
|
|||
metric: &CreateWebDAVDirectoryMetric
|
||||
) -> Result<Uuid> {
|
||||
self.with_retry(|| async {
|
||||
let row = sqlx::query!(
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO webdav_directory_metrics (
|
||||
session_id, user_id, source_id, directory_path,
|
||||
directory_depth, parent_directory_path
|
||||
) VALUES ($1, $2, $3, $4, $5, $6)
|
||||
RETURNING id
|
||||
"#,
|
||||
metric.session_id,
|
||||
metric.user_id,
|
||||
metric.source_id,
|
||||
metric.directory_path,
|
||||
metric.directory_depth,
|
||||
metric.parent_directory_path
|
||||
"#
|
||||
)
|
||||
.bind(metric.session_id)
|
||||
.bind(metric.user_id)
|
||||
.bind(metric.source_id)
|
||||
.bind(&metric.directory_path)
|
||||
.bind(metric.directory_depth)
|
||||
.bind(&metric.parent_directory_path)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(row.id)
|
||||
Ok(row.get::<Uuid, _>("id"))
|
||||
}).await
|
||||
}
|
||||
|
||||
|
|
@ -177,7 +176,7 @@ impl Database {
|
|||
update: &UpdateWebDAVDirectoryMetric
|
||||
) -> Result<bool> {
|
||||
self.with_retry(|| async {
|
||||
let rows_affected = sqlx::query!(
|
||||
let rows_affected = sqlx::query(
|
||||
r#"
|
||||
UPDATE webdav_directory_metrics SET
|
||||
completed_at = CASE
|
||||
|
|
@ -208,28 +207,28 @@ impl Database {
|
|||
skip_reason = COALESCE($19, skip_reason),
|
||||
error_message = COALESCE($20, error_message)
|
||||
WHERE id = $1
|
||||
"#,
|
||||
metric_id,
|
||||
update.files_found,
|
||||
update.subdirectories_found,
|
||||
update.total_size_bytes,
|
||||
update.files_processed,
|
||||
update.files_skipped,
|
||||
update.files_failed,
|
||||
update.http_requests_made,
|
||||
update.propfind_requests,
|
||||
update.get_requests,
|
||||
update.errors_encountered,
|
||||
update.error_types,
|
||||
update.warnings_count,
|
||||
update.etag_matches,
|
||||
update.etag_mismatches,
|
||||
update.cache_hits,
|
||||
update.cache_misses,
|
||||
update.status,
|
||||
update.skip_reason,
|
||||
update.error_message
|
||||
"#
|
||||
)
|
||||
.bind(metric_id)
|
||||
.bind(update.files_found)
|
||||
.bind(update.subdirectories_found)
|
||||
.bind(update.total_size_bytes)
|
||||
.bind(update.files_processed)
|
||||
.bind(update.files_skipped)
|
||||
.bind(update.files_failed)
|
||||
.bind(update.http_requests_made)
|
||||
.bind(update.propfind_requests)
|
||||
.bind(update.get_requests)
|
||||
.bind(update.errors_encountered)
|
||||
.bind(&update.error_types)
|
||||
.bind(update.warnings_count)
|
||||
.bind(update.etag_matches)
|
||||
.bind(update.etag_mismatches)
|
||||
.bind(update.cache_hits)
|
||||
.bind(update.cache_misses)
|
||||
.bind(&update.status)
|
||||
.bind(&update.skip_reason)
|
||||
.bind(&update.error_message)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
@ -244,16 +243,15 @@ impl Database {
|
|||
user_id: Uuid
|
||||
) -> Result<Vec<WebDAVDirectoryMetric>> {
|
||||
self.with_retry(|| async {
|
||||
let metrics = sqlx::query_as!(
|
||||
WebDAVDirectoryMetric,
|
||||
let metrics = sqlx::query_as::<_, WebDAVDirectoryMetric>(
|
||||
r#"
|
||||
SELECT * FROM webdav_directory_metrics
|
||||
WHERE session_id = $1 AND user_id = $2
|
||||
ORDER BY started_at ASC
|
||||
"#,
|
||||
session_id,
|
||||
user_id
|
||||
"#
|
||||
)
|
||||
.bind(session_id)
|
||||
.bind(user_id)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
@ -267,7 +265,7 @@ impl Database {
|
|||
metric: &CreateWebDAVRequestMetric
|
||||
) -> Result<Uuid> {
|
||||
self.with_retry(|| async {
|
||||
let row = sqlx::query!(
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO webdav_request_metrics (
|
||||
session_id, directory_metric_id, user_id, source_id,
|
||||
|
|
@ -283,38 +281,38 @@ impl Database {
|
|||
$16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, NOW()
|
||||
)
|
||||
RETURNING id
|
||||
"#,
|
||||
metric.session_id,
|
||||
metric.directory_metric_id,
|
||||
metric.user_id,
|
||||
metric.source_id,
|
||||
metric.request_type.to_string(),
|
||||
metric.operation_type.to_string(),
|
||||
metric.target_path,
|
||||
metric.duration_ms,
|
||||
metric.request_size_bytes,
|
||||
metric.response_size_bytes,
|
||||
metric.http_status_code,
|
||||
metric.dns_lookup_ms,
|
||||
metric.tcp_connect_ms,
|
||||
metric.tls_handshake_ms,
|
||||
metric.time_to_first_byte_ms,
|
||||
metric.success,
|
||||
metric.retry_attempt,
|
||||
metric.error_type,
|
||||
metric.error_message,
|
||||
metric.server_header,
|
||||
metric.dav_header,
|
||||
metric.etag_value,
|
||||
metric.last_modified,
|
||||
metric.content_type,
|
||||
metric.remote_ip,
|
||||
metric.user_agent
|
||||
"#
|
||||
)
|
||||
.bind(metric.session_id)
|
||||
.bind(metric.directory_metric_id)
|
||||
.bind(metric.user_id)
|
||||
.bind(metric.source_id)
|
||||
.bind(metric.request_type.to_string())
|
||||
.bind(metric.operation_type.to_string())
|
||||
.bind(&metric.target_path)
|
||||
.bind(metric.duration_ms)
|
||||
.bind(metric.request_size_bytes)
|
||||
.bind(metric.response_size_bytes)
|
||||
.bind(metric.http_status_code)
|
||||
.bind(metric.dns_lookup_ms)
|
||||
.bind(metric.tcp_connect_ms)
|
||||
.bind(metric.tls_handshake_ms)
|
||||
.bind(metric.time_to_first_byte_ms)
|
||||
.bind(metric.success)
|
||||
.bind(metric.retry_attempt)
|
||||
.bind(&metric.error_type)
|
||||
.bind(&metric.error_message)
|
||||
.bind(&metric.server_header)
|
||||
.bind(&metric.dav_header)
|
||||
.bind(&metric.etag_value)
|
||||
.bind(metric.last_modified)
|
||||
.bind(&metric.content_type)
|
||||
.bind(&metric.remote_ip)
|
||||
.bind(&metric.user_agent)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(row.id)
|
||||
Ok(row.get::<Uuid, _>("id"))
|
||||
}).await
|
||||
}
|
||||
|
||||
|
|
@ -329,8 +327,7 @@ impl Database {
|
|||
self.with_retry(|| async {
|
||||
let limit = limit.unwrap_or(1000).min(10000); // Cap at 10k
|
||||
|
||||
let metrics = sqlx::query_as!(
|
||||
WebDAVRequestMetric,
|
||||
let metrics = sqlx::query_as::<_, WebDAVRequestMetric>(
|
||||
r#"
|
||||
SELECT * FROM webdav_request_metrics
|
||||
WHERE user_id = $1
|
||||
|
|
@ -338,12 +335,12 @@ impl Database {
|
|||
AND ($3::UUID IS NULL OR directory_metric_id = $3)
|
||||
ORDER BY started_at DESC
|
||||
LIMIT $4
|
||||
"#,
|
||||
user_id,
|
||||
session_id,
|
||||
directory_metric_id,
|
||||
limit as i64
|
||||
"#
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(session_id)
|
||||
.bind(directory_metric_id)
|
||||
.bind(limit as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
@ -360,16 +357,15 @@ impl Database {
|
|||
let start_time = query.start_time.unwrap_or_else(|| Utc::now() - chrono::Duration::days(1));
|
||||
let end_time = query.end_time.unwrap_or_else(|| Utc::now());
|
||||
|
||||
let summary = sqlx::query_as!(
|
||||
WebDAVMetricsSummary,
|
||||
let summary = sqlx::query_as::<_, WebDAVMetricsSummary>(
|
||||
r#"
|
||||
SELECT * FROM get_webdav_metrics_summary($1, $2, $3, $4)
|
||||
"#,
|
||||
query.user_id,
|
||||
query.source_id,
|
||||
start_time,
|
||||
end_time
|
||||
"#
|
||||
)
|
||||
.bind(query.user_id)
|
||||
.bind(query.source_id)
|
||||
.bind(start_time)
|
||||
.bind(end_time)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
@ -509,14 +505,14 @@ impl Database {
|
|||
self.with_retry(|| async {
|
||||
let cutoff_date = Utc::now() - chrono::Duration::days(days_to_keep as i64);
|
||||
|
||||
let result = sqlx::query!(
|
||||
let result = sqlx::query(
|
||||
r#"
|
||||
DELETE FROM webdav_sync_sessions
|
||||
WHERE created_at < $1
|
||||
AND status IN ('completed', 'failed', 'cancelled')
|
||||
"#,
|
||||
cutoff_date
|
||||
"#
|
||||
)
|
||||
.bind(cutoff_date)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ async fn perform_sync_internal(
|
|||
let mut total_directories_scanned = 0;
|
||||
let mut total_directories_skipped = 0;
|
||||
let mut sync_errors = Vec::new();
|
||||
let mut sync_warnings = Vec::new();
|
||||
let mut sync_warnings: Vec<String> = Vec::new();
|
||||
let folder_start_time = Instant::now();
|
||||
|
||||
// Create progress tracker for this sync session
|
||||
|
|
@ -190,6 +190,9 @@ async fn perform_sync_internal(
|
|||
info!("[{}] 📄 Processing {} files from folder '{}'",
|
||||
request_id, files_to_process.len(), folder_path);
|
||||
|
||||
// Process files concurrently with a limit to avoid overwhelming the system
|
||||
let concurrent_limit = 5; // Max 5 concurrent downloads
|
||||
|
||||
if files_to_process.len() > 50 {
|
||||
info!("[{}] Large file batch detected ({} files), processing with concurrency limit of {}",
|
||||
request_id, files_to_process.len(), concurrent_limit);
|
||||
|
|
@ -198,9 +201,6 @@ async fn perform_sync_internal(
|
|||
// Update progress for file processing phase
|
||||
progress.set_phase(SyncPhase::ProcessingFiles);
|
||||
progress.add_files_found(files_to_process.len());
|
||||
|
||||
// Process files concurrently with a limit to avoid overwhelming the system
|
||||
let concurrent_limit = 5; // Max 5 concurrent downloads
|
||||
let semaphore = Arc::new(Semaphore::new(concurrent_limit));
|
||||
let mut folder_files_processed = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -419,7 +419,7 @@ pub async fn get_webdav_performance_overview(
|
|||
let metrics_tracker = WebDAVMetricsTracker::new(state.db.clone());
|
||||
|
||||
// Get recent sessions (last 10) - enforce reasonable limit
|
||||
let limited_sessions_limit = Some(10.min(query.limit.unwrap_or(10)));
|
||||
let limited_sessions_limit = Some(10);
|
||||
let recent_sessions_query = WebDAVMetricsQuery {
|
||||
user_id: Some(auth_user.user.id),
|
||||
source_id: query.source_id,
|
||||
|
|
|
|||
|
|
@ -130,57 +130,28 @@ impl SourceErrorTracker {
|
|||
source_id: Option<Uuid>,
|
||||
resource_path: &str,
|
||||
) -> Result<SkipDecision> {
|
||||
match self.db.get_source_scan_failure_details(user_id, source_type, source_id, resource_path).await {
|
||||
Ok(Some(failure_details)) => {
|
||||
let now = chrono::Utc::now();
|
||||
let time_since_last_failure = now.signed_duration_since(failure_details.last_failed_at);
|
||||
let failure_count = failure_details.failure_count;
|
||||
// Check if there are any failures for this resource path
|
||||
match self.db.is_source_known_failure(user_id, source_type, source_id, resource_path).await {
|
||||
Ok(true) => {
|
||||
// There is a known failure, but we need more details
|
||||
// For now, implement a simple cooldown based on the fact that there's a failure
|
||||
// In a real implementation, we'd need a method that returns failure details
|
||||
let skip_reason = format!("Resource has previous failures recorded in system");
|
||||
|
||||
// Calculate cooldown period based on failure count (exponential backoff)
|
||||
let cooldown_minutes = match failure_count {
|
||||
1 => 5, // 5 minutes after first failure
|
||||
2 => 15, // 15 minutes after second failure
|
||||
3 => 60, // 1 hour after third failure
|
||||
4 => 240, // 4 hours after fourth failure
|
||||
_ => 1440, // 24 hours for 5+ failures
|
||||
};
|
||||
|
||||
let cooldown_duration = chrono::Duration::minutes(cooldown_minutes);
|
||||
let should_skip = time_since_last_failure < cooldown_duration;
|
||||
|
||||
let skip_reason = if should_skip {
|
||||
format!("Directory has {} previous failures, last failed {:.1} minutes ago. Cooldown period: {} minutes",
|
||||
failure_count, time_since_last_failure.num_minutes() as f64, cooldown_minutes)
|
||||
} else {
|
||||
format!("Directory had {} previous failures but cooldown period ({} minutes) has elapsed",
|
||||
failure_count, cooldown_minutes)
|
||||
};
|
||||
|
||||
if should_skip {
|
||||
info!(
|
||||
"⏭️ Skipping {} resource '{}' due to error tracking: {}",
|
||||
source_type, resource_path, skip_reason
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"✅ Retrying {} resource '{}' after cooldown: {}",
|
||||
source_type, resource_path, skip_reason
|
||||
);
|
||||
}
|
||||
info!(
|
||||
"⏭️ Skipping {} resource '{}' due to error tracking: {}",
|
||||
source_type, resource_path, skip_reason
|
||||
);
|
||||
|
||||
Ok(SkipDecision {
|
||||
should_skip,
|
||||
should_skip: true,
|
||||
reason: skip_reason,
|
||||
failure_count,
|
||||
time_since_last_failure_minutes: time_since_last_failure.num_minutes(),
|
||||
cooldown_remaining_minutes: if should_skip {
|
||||
Some(cooldown_minutes - time_since_last_failure.num_minutes())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
failure_count: 1, // We don't have exact count, use 1 as placeholder
|
||||
time_since_last_failure_minutes: 0, // We don't have exact time
|
||||
cooldown_remaining_minutes: Some(60), // Default 1 hour cooldown
|
||||
})
|
||||
}
|
||||
Ok(None) => {
|
||||
Ok(false) => {
|
||||
debug!(
|
||||
"✅ No previous failures for {} resource '{}', proceeding with scan",
|
||||
source_type, resource_path
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ impl WebDAVServiceWithMetrics for WebDAVService {
|
|||
|
||||
// Record the discovery request
|
||||
let discovery_start = Instant::now();
|
||||
let discovery_result = self.discover(path, depth, file_extensions).await;
|
||||
let discovery_result = self.discover_files_and_directories(path, depth.is_some()).await;
|
||||
let discovery_duration = discovery_start.elapsed();
|
||||
|
||||
// Record HTTP request metric for the discovery operation
|
||||
|
|
@ -123,8 +123,7 @@ impl WebDAVServiceWithMetrics for WebDAVService {
|
|||
let files_count = result.files.len() as i32;
|
||||
let dirs_count = result.directories.len() as i32;
|
||||
let total_size: u64 = result.files.iter()
|
||||
.filter_map(|f| f.file_size)
|
||||
.map(|size| size as u64)
|
||||
.map(|f| f.size as u64)
|
||||
.sum();
|
||||
|
||||
metrics_tracker
|
||||
|
|
@ -203,7 +202,24 @@ impl WebDAVServiceWithMetrics for WebDAVService {
|
|||
expected_size: Option<u64>,
|
||||
) -> Result<super::WebDAVDownloadResult> {
|
||||
let download_start = Instant::now();
|
||||
let download_result = self.download_file(file_url).await;
|
||||
// Create a temporary FileIngestionInfo for download with mime detection
|
||||
let temp_file_info = crate::models::FileIngestionInfo {
|
||||
relative_path: file_url.to_string(),
|
||||
full_path: file_url.to_string(),
|
||||
path: file_url.to_string(),
|
||||
name: file_url.split('/').last().unwrap_or("unknown").to_string(),
|
||||
size: expected_size.unwrap_or(0) as i64,
|
||||
mime_type: "application/octet-stream".to_string(),
|
||||
last_modified: Some(chrono::Utc::now()),
|
||||
etag: "".to_string(),
|
||||
is_directory: false,
|
||||
created_at: None,
|
||||
permissions: None,
|
||||
owner: None,
|
||||
group: None,
|
||||
metadata: None,
|
||||
};
|
||||
let download_result = self.download_file_with_mime_detection(&temp_file_info).await;
|
||||
let download_duration = download_start.elapsed();
|
||||
|
||||
let (success, error_type, error_message, response_size) = match &download_result {
|
||||
|
|
@ -335,7 +351,7 @@ impl WebDAVServiceWithMetrics for WebDAVService {
|
|||
let test_duration = test_start.elapsed();
|
||||
|
||||
let (success, error_type, error_message) = match &test_result {
|
||||
Ok(status) => (status.healthy, None, if status.healthy { None } else { Some(status.message.clone()) }),
|
||||
Ok(status) => (status.success, None, if status.success { None } else { Some(status.message.clone()) }),
|
||||
Err(e) => (false, Some("connection_test_error".to_string()), Some(e.to_string())),
|
||||
};
|
||||
|
||||
|
|
@ -371,7 +387,24 @@ impl WebDAVServiceWithMetrics for WebDAVService {
|
|||
success, test_duration.as_millis()
|
||||
);
|
||||
|
||||
test_result
|
||||
// Convert WebDAVConnectionResult to HealthStatus
|
||||
match test_result {
|
||||
Ok(conn_result) => Ok(super::HealthStatus {
|
||||
healthy: conn_result.success,
|
||||
message: conn_result.message,
|
||||
response_time_ms: test_duration.as_millis() as u64,
|
||||
details: Some(serde_json::json!({
|
||||
"server_version": conn_result.server_version,
|
||||
"server_type": conn_result.server_type
|
||||
})),
|
||||
}),
|
||||
Err(e) => Ok(super::HealthStatus {
|
||||
healthy: false,
|
||||
message: e.to_string(),
|
||||
response_time_ms: test_duration.as_millis() as u64,
|
||||
details: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ use std::collections::HashMap;
|
|||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use uuid::Uuid;
|
||||
use reqwest::header::HeaderMap;
|
||||
|
||||
use crate::db::Database;
|
||||
use crate::models::webdav_metrics::*;
|
||||
|
|
@ -426,7 +427,7 @@ impl WebDAVMetricsTracker {
|
|||
retry_attempt: i32,
|
||||
error_type: Option<String>,
|
||||
error_message: Option<String>,
|
||||
server_headers: Option<&reqwest::HeaderMap>,
|
||||
server_headers: Option<&HeaderMap>,
|
||||
remote_ip: Option<String>,
|
||||
) -> Result<Uuid> {
|
||||
// Extract server information from headers
|
||||
|
|
@ -474,7 +475,7 @@ impl WebDAVMetricsTracker {
|
|||
time_to_first_byte_ms: None, // Could be enhanced with detailed timing
|
||||
success,
|
||||
retry_attempt,
|
||||
error_type,
|
||||
error_type: error_type.clone(),
|
||||
error_message,
|
||||
server_header,
|
||||
dav_header,
|
||||
|
|
|
|||
Loading…
Reference in New Issue