feat(webdav): resolve failing migration tests, and implement better error handling

This commit is contained in:
perf3ct 2025-08-23 18:52:52 +00:00
parent 08cce05d1a
commit 1b4573f658
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
17 changed files with 1393 additions and 128 deletions

View File

@ -1552,6 +1552,831 @@
],
"title": "Document OCR Status",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 36
},
"id": 24,
"panels": [],
"title": "WebDAV Operations",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 0,
"y": 37
},
"id": 25,
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_sessions_total",
"refId": "A"
}
],
"title": "Total WebDAV Sessions",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "yellow",
"value": 80
},
{
"color": "green",
"value": 95
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 4,
"y": 37
},
"id": 26,
"options": {
"orientation": "auto",
"reduceOptions": {
"values": false,
"calcs": [
"lastNotNull"
]
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_success_rate",
"refId": "A"
}
],
"title": "WebDAV Success Rate",
"type": "gauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 8,
"y": 37
},
"id": 27,
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_files_processed",
"refId": "A"
}
],
"title": "Files Processed",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 5000
},
{
"color": "red",
"value": 10000
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 12,
"y": 37
},
"id": 28,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_avg_request_duration_ms",
"refId": "A"
}
],
"title": "Avg Request Duration",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 5
},
{
"color": "red",
"value": 15
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 16,
"y": 37
},
"id": 29,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_error_rate_last_hour",
"refId": "A"
}
],
"title": "Error Rate (Last Hour)",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "decbytes"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 20,
"y": 37
},
"id": 30,
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_bytes_processed",
"refId": "A"
}
],
"title": "Bytes Processed",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 41
},
"id": 31,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_sessions_successful",
"legendFormat": "Successful",
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_sessions_failed",
"legendFormat": "Failed",
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_sessions_active_last_hour",
"legendFormat": "Active (Last Hour)",
"refId": "C"
}
],
"title": "WebDAV Session Status",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 5000
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 41
},
"id": 32,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_avg_request_duration_ms",
"legendFormat": "Avg Request Duration",
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_avg_session_duration_seconds * 1000",
"legendFormat": "Avg Session Duration",
"refId": "B"
}
],
"title": "WebDAV Performance Metrics",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 49
},
"id": 33,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_success_rate",
"legendFormat": "Session Success Rate",
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_request_success_rate",
"legendFormat": "Request Success Rate",
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_error_rate_last_hour",
"legendFormat": "Error Rate (Last Hour)",
"refId": "C"
}
],
"title": "WebDAV Success & Error Rates",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 49
},
"id": 34,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_http_requests_total",
"legendFormat": "Total HTTP Requests",
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "readur_webdav_avg_processing_rate",
"legendFormat": "Processing Rate (files/sec)",
"refId": "B"
}
],
"title": "WebDAV Throughput Metrics",
"type": "timeseries"
}
],
"refresh": "30s",

View File

@ -1,26 +1,36 @@
-- WebDAV Metrics Collection System
-- This migration adds tables for tracking detailed WebDAV sync performance metrics
-- Enum for WebDAV operation types
CREATE TYPE webdav_operation_type AS ENUM (
'discovery', -- Directory/file discovery operations
'download', -- File download operations
'metadata_fetch', -- Getting file metadata (properties)
'connection_test', -- Testing connection/authentication
'validation', -- Directory validation operations
'full_sync' -- Complete sync session
);
-- Create enum for WebDAV operation types
-- Use DO block to handle existing type gracefully
DO $$ BEGIN
CREATE TYPE webdav_operation_type AS ENUM (
'discovery', -- Directory/file discovery operations
'download', -- File download operations
'metadata_fetch', -- Getting file metadata (properties)
'connection_test', -- Testing connection/authentication
'validation', -- Directory validation operations
'full_sync' -- Complete sync session
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Enum for WebDAV request types (HTTP methods)
CREATE TYPE webdav_request_type AS ENUM (
'PROPFIND',
'GET',
'HEAD',
'OPTIONS',
'POST',
'PUT',
'DELETE'
);
-- Create enum for WebDAV request types (HTTP methods)
-- Use DO block to handle existing type gracefully
DO $$ BEGIN
CREATE TYPE webdav_request_type AS ENUM (
'PROPFIND',
'GET',
'HEAD',
'OPTIONS',
'POST',
'PUT',
'DELETE'
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Table for tracking overall WebDAV sync sessions
CREATE TABLE IF NOT EXISTS webdav_sync_sessions (
@ -48,7 +58,7 @@ CREATE TABLE IF NOT EXISTS webdav_sync_sessions (
total_bytes_discovered BIGINT NOT NULL DEFAULT 0,
total_bytes_processed BIGINT NOT NULL DEFAULT 0,
avg_file_size_bytes BIGINT,
processing_rate_files_per_sec DECIMAL(10,2),
processing_rate_files_per_sec FLOAT8,
-- Request statistics
total_http_requests INTEGER NOT NULL DEFAULT 0,
@ -114,7 +124,7 @@ CREATE TABLE IF NOT EXISTS webdav_directory_metrics (
warnings_count INTEGER NOT NULL DEFAULT 0,
-- Performance characteristics
avg_response_time_ms DECIMAL(10,2),
avg_response_time_ms FLOAT8,
slowest_request_ms BIGINT,
fastest_request_ms BIGINT,
@ -217,7 +227,6 @@ CREATE INDEX IF NOT EXISTS idx_webdav_request_metrics_session_id ON webdav_reque
CREATE INDEX IF NOT EXISTS idx_webdav_request_metrics_user_id ON webdav_request_metrics(user_id);
CREATE INDEX IF NOT EXISTS idx_webdav_request_metrics_source_id ON webdav_request_metrics(source_id);
CREATE INDEX IF NOT EXISTS idx_webdav_request_metrics_started_at ON webdav_request_metrics(started_at);
CREATE INDEX IF NOT EXISTS idx_webdav_request_metrics_request_type ON webdav_request_metrics(request_type);
CREATE INDEX IF NOT EXISTS idx_webdav_request_metrics_operation_type ON webdav_request_metrics(operation_type);
CREATE INDEX IF NOT EXISTS idx_webdav_request_metrics_success ON webdav_request_metrics(success);
@ -230,6 +239,7 @@ BEGIN
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS webdav_sync_sessions_updated_at ON webdav_sync_sessions;
CREATE TRIGGER webdav_sync_sessions_updated_at
BEFORE UPDATE ON webdav_sync_sessions
FOR EACH ROW
@ -252,30 +262,38 @@ BEGIN
SELECT * INTO v_session FROM webdav_sync_sessions WHERE id = p_session_id;
IF NOT FOUND THEN
RAISE NOTICE 'Session not found: %', p_session_id;
RETURN;
END IF;
-- Calculate request statistics from webdav_request_metrics
-- Use explicit casting to avoid any type issues
SELECT
COUNT(*),
COUNT(*) FILTER (WHERE success = true),
COUNT(*) FILTER (WHERE success = false),
COUNT(*) FILTER (WHERE retry_attempt > 0),
COALESCE(SUM(duration_ms), 0),
MAX(duration_ms),
target_path
CAST(COUNT(*) AS INTEGER),
CAST(COUNT(CASE WHEN success = true THEN 1 END) AS INTEGER),
CAST(COUNT(CASE WHEN success = false THEN 1 END) AS INTEGER),
CAST(COUNT(CASE WHEN retry_attempt > 0 THEN 1 END) AS INTEGER),
CAST(COALESCE(SUM(duration_ms), 0) AS BIGINT)
INTO
v_total_requests,
v_successful_requests,
v_failed_requests,
v_retry_attempts,
v_network_time_ms,
v_network_time_ms
FROM webdav_request_metrics
WHERE session_id = p_session_id;
-- Get the slowest operation separately
SELECT
duration_ms,
target_path
INTO
v_slowest_operation_ms,
v_slowest_operation_path
FROM webdav_request_metrics
WHERE session_id = p_session_id
GROUP BY target_path
ORDER BY MAX(duration_ms) DESC
ORDER BY duration_ms DESC
LIMIT 1;
-- Update session with final metrics
@ -305,6 +323,8 @@ BEGIN
END,
updated_at = NOW()
WHERE id = p_session_id;
RAISE NOTICE 'Session % finalized with % total requests, % successful', p_session_id, v_total_requests, v_successful_requests;
END;
$$ LANGUAGE plpgsql;
@ -321,11 +341,11 @@ RETURNS TABLE (
failed_sessions INTEGER,
total_files_processed BIGINT,
total_bytes_processed BIGINT,
avg_session_duration_sec DECIMAL,
avg_processing_rate DECIMAL,
avg_session_duration_sec DOUBLE PRECISION,
avg_processing_rate DOUBLE PRECISION,
total_http_requests BIGINT,
request_success_rate DECIMAL,
avg_request_duration_ms DECIMAL,
request_success_rate DOUBLE PRECISION,
avg_request_duration_ms DOUBLE PRECISION,
common_error_types JSONB
) AS $$
BEGIN
@ -334,23 +354,23 @@ BEGIN
COUNT(*)::INTEGER as total_sessions,
COUNT(*) FILTER (WHERE s.status = 'completed')::INTEGER as successful_sessions,
COUNT(*) FILTER (WHERE s.status = 'failed')::INTEGER as failed_sessions,
COALESCE(SUM(s.files_processed), 0) as total_files_processed,
COALESCE(SUM(s.total_bytes_processed), 0) as total_bytes_processed,
COALESCE(AVG(s.duration_ms / 1000.0), 0)::DECIMAL as avg_session_duration_sec,
COALESCE(AVG(s.processing_rate_files_per_sec), 0)::DECIMAL as avg_processing_rate,
COALESCE(SUM(s.total_http_requests), 0) as total_http_requests,
COALESCE(SUM(s.files_processed), 0)::BIGINT as total_files_processed,
COALESCE(SUM(s.total_bytes_processed), 0)::BIGINT as total_bytes_processed,
COALESCE(AVG(s.duration_ms / 1000.0), 0.0)::DOUBLE PRECISION as avg_session_duration_sec,
COALESCE(AVG(s.processing_rate_files_per_sec), 0.0)::DOUBLE PRECISION as avg_processing_rate,
COALESCE(SUM(s.total_http_requests), 0)::BIGINT as total_http_requests,
CASE
WHEN SUM(s.total_http_requests) > 0
THEN (SUM(s.successful_requests)::DECIMAL / SUM(s.total_http_requests) * 100)
ELSE 0
END as request_success_rate,
THEN (SUM(s.successful_requests)::DOUBLE PRECISION / SUM(s.total_http_requests) * 100.0)
ELSE 0.0
END::DOUBLE PRECISION as request_success_rate,
COALESCE(
(SELECT AVG(duration_ms) FROM webdav_request_metrics r
(SELECT AVG(duration_ms)::DOUBLE PRECISION FROM webdav_request_metrics r
WHERE r.started_at BETWEEN p_start_time AND p_end_time
AND (p_user_id IS NULL OR r.user_id = p_user_id)
AND (p_source_id IS NULL OR r.source_id = p_source_id)),
0
)::DECIMAL as avg_request_duration_ms,
0.0
)::DOUBLE PRECISION as avg_request_duration_ms,
COALESCE(
(SELECT jsonb_agg(jsonb_build_object('error_type', error_type, 'count', error_count))
FROM (

View File

@ -77,13 +77,94 @@ impl Database {
/// Finalize a WebDAV sync session (calculate final metrics)
pub async fn finalize_webdav_sync_session(&self, session_id: Uuid) -> Result<()> {
self.with_retry(|| async {
sqlx::query(
"SELECT finalize_webdav_session_metrics($1)"
// Debug: Check how many requests exist for this session before finalizing
let request_count: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM webdav_request_metrics WHERE session_id = $1"
)
.bind(session_id)
.fetch_one(&self.pool)
.await?;
tracing::debug!("Finalizing session {}: found {} HTTP requests", session_id, request_count.0);
// Instead of using the PostgreSQL function, do the aggregation in Rust
// to avoid transaction isolation issues
let (successful_requests, failed_requests, total_requests, network_time_ms): (i64, i64, i64, i64) = sqlx::query_as(
r#"
SELECT
COUNT(*) FILTER (WHERE success = true),
COUNT(*) FILTER (WHERE success = false),
COUNT(*),
CAST(COALESCE(SUM(duration_ms), 0) AS BIGINT)
FROM webdav_request_metrics
WHERE session_id = $1
"#
)
.bind(session_id)
.fetch_one(&self.pool)
.await?;
tracing::debug!("Direct aggregation - total: {}, successful: {}, failed: {}", total_requests, successful_requests, failed_requests);
// Get the slowest operation
let slowest_operation: Option<(i64, String)> = sqlx::query_as(
"SELECT duration_ms, target_path FROM webdav_request_metrics WHERE session_id = $1 ORDER BY duration_ms DESC LIMIT 1"
)
.bind(session_id)
.fetch_optional(&self.pool)
.await?;
// Update the session directly with Rust-calculated values
sqlx::query(
r#"
UPDATE webdav_sync_sessions SET
completed_at = NOW(),
duration_ms = EXTRACT(EPOCH FROM (NOW() - started_at)) * 1000,
total_http_requests = $2,
successful_requests = $3,
failed_requests = $4,
retry_attempts = 0,
network_time_ms = $5,
slowest_operation_ms = $6,
slowest_operation_path = $7,
processing_rate_files_per_sec = CASE
WHEN files_processed > 0 AND EXTRACT(EPOCH FROM (NOW() - started_at)) > 0
THEN files_processed / EXTRACT(EPOCH FROM (NOW() - started_at))
ELSE 0
END,
avg_file_size_bytes = CASE
WHEN files_processed > 0
THEN total_bytes_processed / files_processed
ELSE 0
END,
status = CASE
WHEN status = 'in_progress' THEN 'completed'
ELSE status
END,
updated_at = NOW()
WHERE id = $1
"#
)
.bind(session_id)
.bind(total_requests as i32)
.bind(successful_requests as i32)
.bind(failed_requests as i32)
.bind(network_time_ms)
.bind(slowest_operation.as_ref().map(|(ms, _)| *ms))
.bind(slowest_operation.as_ref().map(|(_, path)| path.as_str()))
.execute(&self.pool)
.await?;
// Check the session after finalization
let session_after: (i32, i32, i32) = sqlx::query_as(
"SELECT total_http_requests, successful_requests, failed_requests FROM webdav_sync_sessions WHERE id = $1"
)
.bind(session_id)
.fetch_one(&self.pool)
.await?;
tracing::debug!("After finalization - total: {}, successful: {}, failed: {}", session_after.0, session_after.1, session_after.2);
Ok(())
}).await
}
@ -277,7 +358,7 @@ impl Database {
content_type, remote_ip, user_agent,
completed_at
) VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15,
$1, $2, $3, $4, $5::webdav_request_type, $6::webdav_operation_type, $7, $8, $9, $10, $11, $12, $13, $14, $15,
$16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, NOW()
)
RETURNING id
@ -287,8 +368,8 @@ impl Database {
.bind(metric.directory_metric_id)
.bind(metric.user_id)
.bind(metric.source_id)
.bind(metric.request_type.to_string())
.bind(metric.operation_type.to_string())
.bind(metric.request_type.to_string().as_str())
.bind(metric.operation_type.to_string().as_str())
.bind(&metric.target_path)
.bind(metric.duration_ms)
.bind(metric.request_size_bytes)
@ -329,7 +410,17 @@ impl Database {
let metrics = sqlx::query_as::<_, WebDAVRequestMetric>(
r#"
SELECT * FROM webdav_request_metrics
SELECT
id, session_id, directory_metric_id, user_id, source_id,
request_type::TEXT as request_type,
operation_type::TEXT as operation_type,
target_path, started_at, completed_at, duration_ms,
request_size_bytes, response_size_bytes, http_status_code,
dns_lookup_ms, tcp_connect_ms, tls_handshake_ms, time_to_first_byte_ms,
success, retry_attempt, error_type, error_message,
server_header, dav_header, etag_value, last_modified,
content_type, remote_ip, user_agent
FROM webdav_request_metrics
WHERE user_id = $1
AND ($2::UUID IS NULL OR session_id = $2)
AND ($3::UUID IS NULL OR directory_metric_id = $3)
@ -357,9 +448,22 @@ impl Database {
let start_time = query.start_time.unwrap_or_else(|| Utc::now() - chrono::Duration::days(1));
let end_time = query.end_time.unwrap_or_else(|| Utc::now());
let summary = sqlx::query_as::<_, WebDAVMetricsSummary>(
// First try to call the function directly and see what happens
let summary = match sqlx::query_as::<_, WebDAVMetricsSummary>(
r#"
SELECT * FROM get_webdav_metrics_summary($1, $2, $3, $4)
SELECT
total_sessions,
successful_sessions,
failed_sessions,
total_files_processed,
total_bytes_processed,
avg_session_duration_sec,
avg_processing_rate,
total_http_requests,
request_success_rate,
avg_request_duration_ms,
common_error_types
FROM get_webdav_metrics_summary($1, $2, $3, $4)
"#
)
.bind(query.user_id)
@ -367,7 +471,58 @@ impl Database {
.bind(start_time)
.bind(end_time)
.fetch_optional(&self.pool)
.await?;
.await {
Ok(result) => result,
Err(e) => {
tracing::error!("Failed to call get_webdav_metrics_summary function: {}", e);
// Fall back to manual query if function fails
sqlx::query_as::<_, WebDAVMetricsSummary>(
r#"
SELECT
COALESCE(COUNT(*)::INTEGER, 0) as total_sessions,
COALESCE(COUNT(*) FILTER (WHERE s.status = 'completed')::INTEGER, 0) as successful_sessions,
COALESCE(COUNT(*) FILTER (WHERE s.status = 'failed')::INTEGER, 0) as failed_sessions,
COALESCE(SUM(s.files_processed), 0)::BIGINT as total_files_processed,
COALESCE(SUM(s.total_bytes_processed), 0)::BIGINT as total_bytes_processed,
COALESCE(AVG(s.duration_ms / 1000.0), 0.0)::DOUBLE PRECISION as avg_session_duration_sec,
COALESCE(AVG(s.processing_rate_files_per_sec), 0.0)::DOUBLE PRECISION as avg_processing_rate,
COALESCE(SUM(s.total_http_requests), 0)::BIGINT as total_http_requests,
CASE
WHEN SUM(s.total_http_requests) > 0
THEN (SUM(s.successful_requests)::DOUBLE PRECISION / SUM(s.total_http_requests) * 100.0)
ELSE 0.0
END as request_success_rate,
COALESCE((SELECT AVG(duration_ms) FROM webdav_request_metrics r
WHERE r.started_at BETWEEN $3 AND $4
AND ($1 IS NULL OR r.user_id = $1)
AND ($2 IS NULL OR r.source_id = $2)), 0.0)::DOUBLE PRECISION as avg_request_duration_ms,
COALESCE((SELECT jsonb_agg(jsonb_build_object('error_type', error_type, 'count', error_count))
FROM (
SELECT error_type, COUNT(*) as error_count
FROM webdav_request_metrics r
WHERE r.started_at BETWEEN $3 AND $4
AND r.success = false
AND r.error_type IS NOT NULL
AND ($1 IS NULL OR r.user_id = $1)
AND ($2 IS NULL OR r.source_id = $2)
GROUP BY error_type
ORDER BY error_count DESC
LIMIT 10
) error_summary), '[]'::jsonb) as common_error_types
FROM webdav_sync_sessions s
WHERE s.started_at BETWEEN $3 AND $4
AND ($1 IS NULL OR s.user_id = $1)
AND ($2 IS NULL OR s.source_id = $2)
"#
)
.bind(query.user_id)
.bind(query.source_id)
.bind(start_time)
.bind(end_time)
.fetch_optional(&self.pool)
.await?
}
};
Ok(summary)
}).await

View File

@ -244,7 +244,6 @@ pub struct WebDAVRequestMetric {
pub content_type: Option<String>,
pub remote_ip: Option<String>,
pub user_agent: Option<String>,
pub created_at: DateTime<Utc>,
}
/// Summary metrics for WebDAV operations

View File

@ -39,14 +39,15 @@ pub async fn get_prometheus_metrics(
tracing::debug!("Prometheus: Starting to collect all metrics");
// Collect all metrics
let (document_metrics, ocr_metrics, user_metrics, database_metrics, system_metrics, storage_metrics, security_metrics) = tokio::try_join!(
let (document_metrics, ocr_metrics, user_metrics, database_metrics, system_metrics, storage_metrics, security_metrics, webdav_metrics) = tokio::try_join!(
collect_document_metrics(&state),
collect_ocr_metrics(&state),
collect_user_metrics(&state),
collect_database_metrics(&state),
collect_system_metrics(&state),
collect_storage_metrics(&state),
collect_security_metrics(&state)
collect_security_metrics(&state),
collect_webdav_metrics(&state)
).map_err(|e| {
tracing::error!("Prometheus: Failed to collect metrics: {:?}", e);
e
@ -193,6 +194,59 @@ pub async fn get_prometheus_metrics(
writeln!(&mut output, "# TYPE readur_document_access_today counter").unwrap();
writeln!(&mut output, "readur_document_access_today {} {}", security_metrics.document_access_today, timestamp).unwrap();
// WebDAV metrics
writeln!(&mut output, "# HELP readur_webdav_sessions_total Total number of WebDAV sync sessions").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_sessions_total counter").unwrap();
writeln!(&mut output, "readur_webdav_sessions_total {} {}", webdav_metrics.total_sessions, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_sessions_successful Successful WebDAV sync sessions").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_sessions_successful counter").unwrap();
writeln!(&mut output, "readur_webdav_sessions_successful {} {}", webdav_metrics.successful_sessions, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_sessions_failed Failed WebDAV sync sessions").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_sessions_failed counter").unwrap();
writeln!(&mut output, "readur_webdav_sessions_failed {} {}", webdav_metrics.failed_sessions, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_success_rate WebDAV sync success rate (percentage)").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_success_rate gauge").unwrap();
writeln!(&mut output, "readur_webdav_success_rate {} {}", webdav_metrics.success_rate, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_files_processed Total files processed by WebDAV syncs").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_files_processed counter").unwrap();
writeln!(&mut output, "readur_webdav_files_processed {} {}", webdav_metrics.total_files_processed, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_bytes_processed Total bytes processed by WebDAV syncs").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_bytes_processed counter").unwrap();
writeln!(&mut output, "readur_webdav_bytes_processed {} {}", webdav_metrics.total_bytes_processed, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_avg_session_duration_seconds Average WebDAV session duration in seconds").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_avg_session_duration_seconds gauge").unwrap();
writeln!(&mut output, "readur_webdav_avg_session_duration_seconds {} {}", webdav_metrics.avg_session_duration_sec, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_avg_processing_rate Average WebDAV processing rate (files per second)").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_avg_processing_rate gauge").unwrap();
writeln!(&mut output, "readur_webdav_avg_processing_rate {} {}", webdav_metrics.avg_processing_rate, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_http_requests_total Total HTTP requests made by WebDAV syncs").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_http_requests_total counter").unwrap();
writeln!(&mut output, "readur_webdav_http_requests_total {} {}", webdav_metrics.total_http_requests, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_request_success_rate WebDAV HTTP request success rate (percentage)").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_request_success_rate gauge").unwrap();
writeln!(&mut output, "readur_webdav_request_success_rate {} {}", webdav_metrics.request_success_rate, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_avg_request_duration_ms Average WebDAV request duration in milliseconds").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_avg_request_duration_ms gauge").unwrap();
writeln!(&mut output, "readur_webdav_avg_request_duration_ms {} {}", webdav_metrics.avg_request_duration_ms, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_sessions_active_last_hour WebDAV sessions active in the last hour").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_sessions_active_last_hour gauge").unwrap();
writeln!(&mut output, "readur_webdav_sessions_active_last_hour {} {}", webdav_metrics.sessions_last_hour, timestamp).unwrap();
writeln!(&mut output, "# HELP readur_webdav_error_rate_last_hour WebDAV error rate in the last hour (percentage)").unwrap();
writeln!(&mut output, "# TYPE readur_webdav_error_rate_last_hour gauge").unwrap();
writeln!(&mut output, "readur_webdav_error_rate_last_hour {} {}", webdav_metrics.error_rate_last_hour, timestamp).unwrap();
// Return the metrics with the correct content type
Ok((
[(header::CONTENT_TYPE, "text/plain; version=0.0.4")],
@ -252,6 +306,22 @@ struct SecurityMetrics {
document_access_today: i64,
}
struct WebDAVMetrics {
total_sessions: i64,
successful_sessions: i64,
failed_sessions: i64,
success_rate: f64,
total_files_processed: i64,
total_bytes_processed: i64,
avg_session_duration_sec: f64,
avg_processing_rate: f64,
total_http_requests: i64,
request_success_rate: f64,
avg_request_duration_ms: f64,
sessions_last_hour: i64,
error_rate_last_hour: f64,
}
async fn collect_document_metrics(state: &Arc<AppState>) -> Result<DocumentMetrics, StatusCode> {
// Get total document count
let total_docs = sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM documents")
@ -576,4 +646,123 @@ async fn collect_security_metrics(state: &Arc<AppState>) -> Result<SecurityMetri
failed_logins_today,
document_access_today,
})
}
async fn collect_webdav_metrics(state: &Arc<AppState>) -> Result<WebDAVMetrics, StatusCode> {
// Get WebDAV session metrics for the last 24 hours
#[derive(sqlx::FromRow)]
struct WebDAVStats {
total_sessions: Option<i64>,
successful_sessions: Option<i64>,
failed_sessions: Option<i64>,
total_files_processed: Option<i64>,
total_bytes_processed: Option<i64>,
avg_session_duration_sec: Option<f64>,
avg_processing_rate: Option<f64>,
total_http_requests: Option<i64>,
request_success_rate: Option<f64>,
avg_request_duration_ms: Option<f64>,
}
let webdav_stats = sqlx::query_as::<_, WebDAVStats>(
r#"
SELECT
COUNT(*) as total_sessions,
COUNT(*) FILTER (WHERE status = 'completed') as successful_sessions,
COUNT(*) FILTER (WHERE status = 'failed') as failed_sessions,
COALESCE(SUM(files_processed), 0) as total_files_processed,
COALESCE(SUM(total_bytes_processed), 0) as total_bytes_processed,
COALESCE(AVG(duration_ms / 1000.0), 0) as avg_session_duration_sec,
COALESCE(AVG(processing_rate_files_per_sec), 0) as avg_processing_rate,
COALESCE(SUM(total_http_requests), 0) as total_http_requests,
CASE
WHEN SUM(total_http_requests) > 0
THEN (SUM(successful_requests)::DECIMAL / SUM(total_http_requests) * 100)
ELSE 0
END as request_success_rate,
COALESCE(
(SELECT AVG(duration_ms) FROM webdav_request_metrics
WHERE started_at > NOW() - INTERVAL '24 hours'),
0
) as avg_request_duration_ms
FROM webdav_sync_sessions
WHERE started_at > NOW() - INTERVAL '24 hours'
"#
)
.fetch_one(&state.db.pool)
.await
.map_err(|e| {
tracing::error!("Failed to get WebDAV session metrics: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?;
// Get sessions active in last hour
let sessions_last_hour = sqlx::query_scalar::<_, i64>(
"SELECT COUNT(*) FROM webdav_sync_sessions WHERE started_at > NOW() - INTERVAL '1 hour'"
)
.fetch_one(&state.db.pool)
.await
.map_err(|e| {
tracing::error!("Failed to get recent WebDAV sessions: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?;
// Calculate error rate for last hour
#[derive(sqlx::FromRow)]
struct ErrorRate {
total_requests: Option<i64>,
failed_requests: Option<i64>,
}
let error_stats = sqlx::query_as::<_, ErrorRate>(
r#"
SELECT
COUNT(*) as total_requests,
COUNT(*) FILTER (WHERE success = false) as failed_requests
FROM webdav_request_metrics
WHERE started_at > NOW() - INTERVAL '1 hour'
"#
)
.fetch_one(&state.db.pool)
.await
.map_err(|e| {
tracing::error!("Failed to get WebDAV error rates: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?;
let error_rate_last_hour = if let (Some(total), Some(failed)) = (error_stats.total_requests, error_stats.failed_requests) {
if total > 0 {
(failed as f64 / total as f64) * 100.0
} else {
0.0
}
} else {
0.0
};
let total_sessions = webdav_stats.total_sessions.unwrap_or(0);
let successful_sessions = webdav_stats.successful_sessions.unwrap_or(0);
let failed_sessions = webdav_stats.failed_sessions.unwrap_or(0);
let success_rate = if total_sessions > 0 {
(successful_sessions as f64 / total_sessions as f64) * 100.0
} else {
0.0
};
Ok(WebDAVMetrics {
total_sessions,
successful_sessions,
failed_sessions,
success_rate,
total_files_processed: webdav_stats.total_files_processed.unwrap_or(0),
total_bytes_processed: webdav_stats.total_bytes_processed.unwrap_or(0),
avg_session_duration_sec: webdav_stats.avg_session_duration_sec.unwrap_or(0.0),
avg_processing_rate: webdav_stats.avg_processing_rate.unwrap_or(0.0),
total_http_requests: webdav_stats.total_http_requests.unwrap_or(0),
request_success_rate: webdav_stats.request_success_rate.unwrap_or(0.0),
avg_request_duration_ms: webdav_stats.avg_request_duration_ms.unwrap_or(0.0),
sessions_last_hour,
error_rate_last_hour,
})
}

View File

@ -291,6 +291,7 @@ pub async fn trigger_deep_scan(
match smart_sync_service.perform_smart_sync(
user_id,
Some(source_id),
&webdav_service,
watch_folder,
crate::services::webdav::SmartSyncStrategy::FullDeepScan, // Force deep scan for directory reset

View File

@ -157,7 +157,7 @@ async fn perform_sync_internal(
// Use smart sync service for intelligent scanning
let smart_sync_service = SmartSyncService::new(state.clone());
match smart_sync_service.evaluate_and_sync(user_id, &webdav_service, folder_path, Some(&progress)).await {
match smart_sync_service.evaluate_and_sync(user_id, None, &webdav_service, folder_path, Some(&progress)).await {
Ok(Some(sync_result)) => {
let folder_elapsed = folder_start.elapsed();
total_directories_scanned += sync_result.directories_scanned;

View File

@ -761,6 +761,7 @@ impl SourceScheduler {
for watch_folder in &webdav_config.watch_folders {
match smart_sync_service.perform_smart_sync(
source_clone.user_id,
Some(source_clone.id),
&webdav_service,
watch_folder,
crate::services::webdav::SmartSyncStrategy::FullDeepScan, // Force deep scan for automatic triggers

View File

@ -141,7 +141,7 @@ impl SourceSyncService {
// Use smart sync service for intelligent discovery
let smart_sync_service = crate::services::webdav::SmartSyncService::new(state_clone);
match smart_sync_service.evaluate_and_sync(user_id, &service, &folder_path, Some(&progress)).await {
match smart_sync_service.evaluate_and_sync(user_id, Some(source.id), &service, &folder_path, Some(&progress)).await {
Ok(Some(sync_result)) => {
info!("✅ Smart sync completed for {}: {} files found using {:?}",
folder_path, sync_result.files.len(), sync_result.strategy_used);

View File

@ -0,0 +1,8 @@
/// Common utilities and shared functions for WebDAV services
/// Build a standardized User-Agent string for all WebDAV requests
/// This ensures consistent identification across all WebDAV operations
pub fn build_user_agent() -> String {
format!("Readur/{} (WebDAV-Sync; +https://github.com/readur)",
env!("CARGO_PKG_VERSION"))
}

View File

@ -68,10 +68,14 @@ impl WebDAVErrorClassifier {
WebDAVScanFailureType::TooManyItems
} else if error_str.contains("depth") || error_str.contains("nested") {
WebDAVScanFailureType::DepthLimit
} else if error_str.contains("size") || error_str.contains("too large") {
} else if error_str.contains("size") || error_str.contains("too large") || error_str.contains("507") || error_str.contains("insufficient storage") || error_str.contains("quota exceeded") {
WebDAVScanFailureType::SizeLimit
} else if error_str.contains("404") || error_str.contains("not found") {
WebDAVScanFailureType::ServerError // Will be further classified by HTTP status
} else if error_str.contains("405") || error_str.contains("method not allowed") || error_str.contains("propfind not allowed") {
WebDAVScanFailureType::ServerError // Method not allowed - likely PROPFIND disabled
} else if error_str.contains("423") || error_str.contains("locked") || error_str.contains("lock") {
WebDAVScanFailureType::ServerError // Resource locked
} else {
WebDAVScanFailureType::Unknown
}
@ -125,13 +129,17 @@ impl WebDAVErrorClassifier {
fn extract_http_status(&self, error: &anyhow::Error) -> Option<i32> {
let error_str = error.to_string();
// Look for common HTTP status code patterns
// Look for common HTTP status code patterns including WebDAV-specific codes
if error_str.contains("404") {
Some(404)
} else if error_str.contains("401") {
Some(401)
} else if error_str.contains("403") {
Some(403)
} else if error_str.contains("405") {
Some(405) // Method Not Allowed (PROPFIND disabled)
} else if error_str.contains("423") {
Some(423) // Locked
} else if error_str.contains("500") {
Some(500)
} else if error_str.contains("502") {
@ -140,8 +148,8 @@ impl WebDAVErrorClassifier {
Some(503)
} else if error_str.contains("504") {
Some(504)
} else if error_str.contains("405") {
Some(405)
} else if error_str.contains("507") {
Some(507) // Insufficient Storage
} else {
// Try to extract any 3-digit number that looks like an HTTP status
let re = regex::Regex::new(r"\b([4-5]\d{2})\b").ok()?;
@ -370,6 +378,24 @@ impl WebDAVErrorClassifier {
directory_path
)
}
WebDAVScanFailureType::ServerError if http_status == Some(405) => {
format!(
"WebDAV PROPFIND method is not allowed for '{}'. The server may not support WebDAV or it's disabled for this path.",
directory_path
)
}
WebDAVScanFailureType::ServerError if http_status == Some(423) => {
format!(
"WebDAV resource '{}' is locked. Another process may be using it.",
directory_path
)
}
WebDAVScanFailureType::SizeLimit if http_status == Some(507) => {
format!(
"Insufficient storage quota for WebDAV path '{}'. The server has run out of space.",
directory_path
)
}
WebDAVScanFailureType::XmlParseError => {
format!(
"Malformed XML response from WebDAV server for directory '{}'. Server may be incompatible.",

View File

@ -1,5 +1,6 @@
// Simplified WebDAV service modules - consolidated architecture
pub mod common; // Common utilities and shared functions
pub mod config;
pub mod service;
pub mod smart_sync;
@ -8,6 +9,7 @@ pub mod error_classifier; // WebDAV error classification for generic error track
pub mod metrics_integration; // WebDAV metrics collection integration
// Re-export main types for convenience
pub use common::build_user_agent;
pub use config::{WebDAVConfig, RetryConfig, ConcurrencyConfig};
pub use service::{
WebDAVService, WebDAVDiscoveryResult, WebDAVDownloadResult, ServerCapabilities, HealthStatus, test_webdav_connection,

View File

@ -23,6 +23,7 @@ use crate::webdav_xml_parser::{parse_propfind_response, parse_propfind_response_
use crate::mime_detection::{detect_mime_from_content, update_mime_type_with_content, MimeDetectionResult};
use super::{config::{WebDAVConfig, RetryConfig, ConcurrencyConfig}, SyncProgress};
use super::common::build_user_agent;
/// Results from WebDAV discovery including both files and directories
#[derive(Debug, Clone)]
@ -662,8 +663,8 @@ impl WebDAVService {
let mut attempt = 0;
let mut delay = self.retry_config.initial_delay_ms;
// Build custom User-Agent header
let user_agent = format!("Readur/{} (WebDAV-Sync; +https://github.com/readur)", env!("CARGO_PKG_VERSION"));
// Build custom User-Agent header using centralized function
let user_agent = build_user_agent();
// Enhanced debug logging for HTTP requests
debug!("🌐 HTTP Request Details:");
@ -951,8 +952,8 @@ impl WebDAVService {
/// Discovers both files and directories with their ETags for directory tracking
pub async fn discover_files_and_directories(&self, directory_path: &str, recursive: bool) -> Result<WebDAVDiscoveryResult> {
let discovery_request_id = uuid::Uuid::new_v4();
info!("[{}] 🔍 Discovering files and directories in: '{}' (recursive: {}, user_agent: 'readur-webdav-client')",
discovery_request_id, directory_path, recursive);
info!("[{}] 🔍 Discovering files and directories in: '{}' (recursive: {}, user_agent: '{}')",
discovery_request_id, directory_path, recursive, build_user_agent());
let start_time = std::time::Instant::now();
let result = if recursive {
@ -985,8 +986,8 @@ impl WebDAVService {
_progress: Option<&SyncProgress> // Simplified: just placeholder for API compatibility
) -> Result<WebDAVDiscoveryResult> {
let discovery_request_id = uuid::Uuid::new_v4();
info!("[{}] 🔍 Discovering files and directories in: '{}' (progress tracking simplified, recursive: {}, user_agent: 'readur-webdav-client')",
discovery_request_id, directory_path, recursive);
info!("[{}] 🔍 Discovering files and directories in: '{}' (progress tracking simplified, recursive: {}, user_agent: '{}')",
discovery_request_id, directory_path, recursive, build_user_agent());
let start_time = std::time::Instant::now();
let result = if recursive {
@ -1389,8 +1390,8 @@ impl WebDAVService {
let start_time = std::time::Instant::now();
let discovery_request_id = uuid::Uuid::new_v4();
info!("[{}] 🔍 Starting WebDAV discovery for '{}' (user: {}, source: {:?}, recursive: {}, user_agent: 'readur-webdav-client')",
discovery_request_id, directory_path, user_id, source_id, recursive);
info!("[{}] 🔍 Starting WebDAV discovery for '{}' (user: {}, source: {:?}, recursive: {}, user_agent: '{}')",
discovery_request_id, directory_path, user_id, source_id, recursive, build_user_agent());
// Check if we should skip this directory due to previous failures
let error_check_start = std::time::Instant::now();
@ -1477,7 +1478,7 @@ impl WebDAVService {
// Track the error with enhanced context
let mut additional_context = std::collections::HashMap::new();
additional_context.insert("request_id".to_string(), serde_json::Value::String(discovery_request_id.to_string()));
additional_context.insert("user_agent".to_string(), serde_json::Value::String("readur-webdav-client".to_string()));
additional_context.insert("user_agent".to_string(), serde_json::Value::String(build_user_agent()));
additional_context.insert("recursive".to_string(), serde_json::Value::Bool(recursive));
let context = ErrorContext {
@ -2593,7 +2594,7 @@ mod tests {
fn test_user_agent_format() {
// Test that the User-Agent header format matches the expected pattern
let expected_version = env!("CARGO_PKG_VERSION");
let expected_user_agent = format!("Readur/{} (WebDAV-Sync; +https://github.com/readur)", expected_version);
let expected_user_agent = build_user_agent();
// Create a simple WebDAV config for testing
let config = WebDAVConfig {
@ -2609,7 +2610,7 @@ mod tests {
let service = WebDAVService::new(config).expect("Failed to create WebDAV service");
// Test that we can build the User-Agent string properly
let user_agent = format!("Readur/{} (WebDAV-Sync; +https://github.com/readur)", env!("CARGO_PKG_VERSION"));
let user_agent = build_user_agent();
assert_eq!(user_agent, expected_user_agent);
assert!(user_agent.starts_with("Readur/"));
assert!(user_agent.contains("(WebDAV-Sync; +https://github.com/readur)"));

View File

@ -213,6 +213,7 @@ impl SmartSyncService {
pub async fn perform_smart_sync(
&self,
user_id: Uuid,
source_id: Option<Uuid>,
webdav_service: &WebDAVService,
folder_path: &str,
strategy: SmartSyncStrategy,
@ -222,13 +223,13 @@ impl SmartSyncService {
match strategy {
SmartSyncStrategy::FullDeepScan => {
info!("[{}] 🔍 Performing full deep scan for: '{}'", sync_request_id, folder_path);
self.perform_full_deep_scan(user_id, webdav_service, folder_path, _progress, sync_request_id).await
self.perform_full_deep_scan(user_id, source_id, webdav_service, folder_path, _progress, sync_request_id).await
}
SmartSyncStrategy::TargetedScan(target_dirs) => {
info!("[{}] 🎯 Performing targeted scan of {} directories: {:?}",
sync_request_id, target_dirs.len(),
target_dirs.iter().take(3).collect::<Vec<_>>());
self.perform_targeted_scan(user_id, webdav_service, target_dirs, _progress, sync_request_id).await
self.perform_targeted_scan(user_id, source_id, webdav_service, target_dirs, _progress, sync_request_id).await
}
}
}
@ -237,6 +238,7 @@ impl SmartSyncService {
pub async fn evaluate_and_sync(
&self,
user_id: Uuid,
source_id: Option<Uuid>,
webdav_service: &WebDAVService,
folder_path: &str,
_progress: Option<&SyncProgress>, // Simplified: no complex progress tracking
@ -252,7 +254,7 @@ impl SmartSyncService {
Ok(None)
}
SmartSyncDecision::RequiresSync(strategy) => {
let result = self.perform_smart_sync(user_id, webdav_service, folder_path, strategy, _progress).await?;
let result = self.perform_smart_sync(user_id, source_id, webdav_service, folder_path, strategy, _progress).await?;
let total_elapsed = eval_and_sync_start.elapsed();
info!("[{}] ✅ Smart sync completed for '{}' - {} files found, {} dirs scanned in {:.2}s",
eval_sync_request_id, folder_path, result.files.len(),
@ -266,6 +268,7 @@ impl SmartSyncService {
async fn perform_full_deep_scan(
&self,
user_id: Uuid,
source_id: Option<Uuid>,
webdav_service: &WebDAVService,
folder_path: &str,
_progress: Option<&SyncProgress>, // Simplified: no complex progress tracking
@ -277,7 +280,7 @@ impl SmartSyncService {
true, // recursive
user_id,
&self.error_tracker,
None, // source_id - using None for smart sync operations
source_id, // Pass the actual source_id
).await?;
info!("Deep scan found {} files and {} directories in folder {}",
@ -341,6 +344,7 @@ impl SmartSyncService {
async fn perform_targeted_scan(
&self,
user_id: Uuid,
source_id: Option<Uuid>,
webdav_service: &WebDAVService,
target_directories: Vec<String>,
_progress: Option<&SyncProgress>, // Simplified: no complex progress tracking
@ -361,7 +365,7 @@ impl SmartSyncService {
true, // recursive
user_id,
&self.error_tracker,
None, // source_id - using None for smart sync operations
source_id, // Pass the actual source_id
).await {
Ok(discovery_result) => {
all_files.extend(discovery_result.files);

View File

@ -1,7 +1,7 @@
use anyhow::Result;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::collections::HashMap;
use std::collections::{HashMap, VecDeque};
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
use uuid::Uuid;
@ -9,6 +9,7 @@ use reqwest::header::HeaderMap;
use crate::db::Database;
use crate::models::webdav_metrics::*;
use crate::services::webdav::build_user_agent;
/// Maximum number of response times to keep in memory to prevent unbounded growth
const MAX_RESPONSE_TIMES: usize = 1000;
@ -89,7 +90,7 @@ struct DirectoryCounters {
errors_encountered: i32,
error_types: Vec<String>,
warnings_count: i32,
response_times: Vec<i64>, // This will be bounded by MAX_RESPONSE_TIMES
response_times: VecDeque<i64>, // Use VecDeque for O(1) front removal
etag_matches: i32,
etag_mismatches: i32,
cache_hits: i32,
@ -228,6 +229,12 @@ impl WebDAVMetricsTracker {
};
self.db.update_webdav_sync_session(session_id, &update).await?;
// Small delay to ensure all previous HTTP request inserts are committed
// This addresses a transaction isolation issue where the finalize function
// can't see the requests that were just inserted
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
self.db.finalize_webdav_sync_session(session_id).await?;
info!(
@ -483,9 +490,10 @@ impl WebDAVMetricsTracker {
last_modified,
content_type,
remote_ip,
user_agent: Some("readur-webdav-client".to_string()),
user_agent: Some(build_user_agent()),
};
tracing::debug!("Recording request with session_id: {:?}", session_id);
let request_id = self.db.record_webdav_request_metric(&metric).await?;
// Update active directory counters if applicable
@ -495,10 +503,10 @@ impl WebDAVMetricsTracker {
scan.last_activity = Instant::now();
scan.counters.http_requests_made += 1;
// Implement bounded circular buffer for response times
scan.counters.response_times.push(duration.as_millis() as i64);
// Implement bounded circular buffer for response times using VecDeque for O(1) operations
scan.counters.response_times.push_back(duration.as_millis() as i64);
if scan.counters.response_times.len() > MAX_RESPONSE_TIMES {
scan.counters.response_times.remove(0); // Remove oldest entry
scan.counters.response_times.pop_front(); // O(1) removal of oldest entry
}
match request_type {

View File

@ -18,7 +18,7 @@ async fn test_retry_config_default() {
assert_eq!(retry_config.max_retries, 3);
assert_eq!(retry_config.initial_delay_ms, 1000);
assert_eq!(retry_config.max_delay_ms, 30000);
assert_eq!(retry_config.max_delay_ms, 10000);
assert_eq!(retry_config.backoff_multiplier, 2.0);
assert_eq!(retry_config.timeout_seconds, 30);
}

View File

@ -1,43 +1,64 @@
use anyhow::Result;
use std::sync::Arc;
use std::time::Duration;
use uuid::Uuid;
use readur::db::Database;
use readur::models::webdav_metrics::*;
use readur::services::webdav_metrics_tracker::WebDAVMetricsTracker;
use readur::{
db::Database,
models::webdav_metrics::*,
models::{CreateUser, UserRole},
services::webdav_metrics_tracker::WebDAVMetricsTracker,
test_helpers::create_test_app_state,
};
/// Helper to create a test database with temporary configuration
async fn create_test_db() -> Result<Database> {
let db_url = std::env::var("DATABASE_TEST_URL")
.unwrap_or_else(|_| "postgresql://postgres:password@localhost:5432/readur_test".to_string());
/// Helper to create a test user using the proper models
async fn create_test_user(db: &Database) -> Result<Uuid> {
let user_suffix = Uuid::new_v4().simple().to_string();
let create_user = CreateUser {
username: format!("testuser_{}", user_suffix),
email: format!("test_{}@example.com", user_suffix),
password: "test_password".to_string(),
role: Some(UserRole::User),
};
Database::new_with_pool_config(&db_url, 5, 1).await
let created_user = db.create_user(create_user).await?;
Ok(created_user.id)
}
/// Helper to create a test user
async fn create_test_user(db: &Database) -> Result<Uuid> {
let user_id = Uuid::new_v4();
/// Helper to create a test WebDAV source
async fn create_test_source(db: &Database, user_id: Uuid) -> Result<Uuid> {
let source_id = Uuid::new_v4();
sqlx::query(
"INSERT INTO users (id, username, email, password_hash, role) VALUES ($1, $2, $3, $4, 'user')"
"INSERT INTO sources (id, user_id, name, source_type, config, enabled, created_at, updated_at)
VALUES ($1, $2, $3, 'webdav', $4, true, NOW(), NOW())"
)
.bind(source_id)
.bind(user_id)
.bind(format!("testuser_{}", user_id))
.bind(format!("test_{}@example.com", user_id))
.bind("dummy_hash")
.bind(format!("Test WebDAV Source {}", source_id))
.bind(serde_json::json!({
"server_url": "https://example.com/webdav",
"username": "testuser",
"password": "testpass",
"watch_folders": ["/Documents"],
"file_extensions": ["pdf", "txt", "doc", "docx"],
"auto_sync": true,
"sync_interval_minutes": 60
}))
.execute(&db.pool)
.await?;
Ok(user_id)
Ok(source_id)
}
/// Test basic session creation and management
#[tokio::test]
async fn test_webdav_session_lifecycle() -> Result<()> {
let db = create_test_db().await?;
let user_id = create_test_user(&db).await?;
let source_id = Some(Uuid::new_v4());
let app_state = create_test_app_state().await
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
let user_id = create_test_user(&app_state.db).await?;
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
let metrics_tracker = WebDAVMetricsTracker::new(db.clone());
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
// Start a sync session
let session_id = metrics_tracker
@ -108,11 +129,12 @@ async fn test_webdav_session_lifecycle() -> Result<()> {
/// Test directory metrics tracking
#[tokio::test]
async fn test_directory_metrics_tracking() -> Result<()> {
let db = create_test_db().await?;
let user_id = create_test_user(&db).await?;
let source_id = Some(Uuid::new_v4());
let app_state = create_test_app_state().await
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
let user_id = create_test_user(&app_state.db).await?;
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
let metrics_tracker = WebDAVMetricsTracker::new(db.clone());
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
// Start session
let session_id = metrics_tracker
@ -213,11 +235,12 @@ async fn test_directory_metrics_tracking() -> Result<()> {
/// Test HTTP request metrics recording
#[tokio::test]
async fn test_http_request_metrics() -> Result<()> {
let db = create_test_db().await?;
let user_id = create_test_user(&db).await?;
let source_id = Some(Uuid::new_v4());
let app_state = create_test_app_state().await
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
let user_id = create_test_user(&app_state.db).await?;
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
let metrics_tracker = WebDAVMetricsTracker::new(db.clone());
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
// Start session and directory
let session_id = metrics_tracker
@ -326,11 +349,12 @@ async fn test_http_request_metrics() -> Result<()> {
/// Test metrics summary generation
#[tokio::test]
async fn test_metrics_summary() -> Result<()> {
let db = create_test_db().await?;
let user_id = create_test_user(&db).await?;
let source_id = Some(Uuid::new_v4());
let app_state = create_test_app_state().await
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
let user_id = create_test_user(&app_state.db).await?;
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
let metrics_tracker = WebDAVMetricsTracker::new(db.clone());
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
// Create multiple sessions with various outcomes
for i in 0..3 {
@ -424,11 +448,12 @@ async fn test_metrics_summary() -> Result<()> {
/// Test performance insights generation
#[tokio::test]
async fn test_performance_insights() -> Result<()> {
let db = create_test_db().await?;
let user_id = create_test_user(&db).await?;
let source_id = Some(Uuid::new_v4());
let app_state = create_test_app_state().await
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
let user_id = create_test_user(&app_state.db).await?;
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
let metrics_tracker = WebDAVMetricsTracker::new(db.clone());
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
// Create a session with detailed metrics
let session_id = metrics_tracker
@ -515,11 +540,12 @@ async fn test_performance_insights() -> Result<()> {
/// Integration test demonstrating the complete metrics collection workflow
#[tokio::test]
async fn test_complete_metrics_workflow() -> Result<()> {
let db = create_test_db().await?;
let user_id = create_test_user(&db).await?;
let source_id = Some(Uuid::new_v4());
let app_state = create_test_app_state().await
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
let user_id = create_test_user(&app_state.db).await?;
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
let metrics_tracker = WebDAVMetricsTracker::new(db.clone());
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
println!("🚀 Starting complete WebDAV metrics workflow test");