diff --git a/.gitignore b/.gitignore index df61bc3..ff1ce92 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ node_modules/ assets/ frontend/dist/ .claude/ +uploads/ diff --git a/src/main.rs b/src/main.rs index 46b468d..ad77b49 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,6 @@ use axum::{ http::StatusCode, - response::{Json, Html}, + response::Html, routing::get, Router, }; @@ -144,8 +144,7 @@ async fn main() -> Result<(), Box> { .nest("/api/users", readur::routes::users::router()) .nest("/api/webdav", readur::routes::webdav::router()) .merge(readur::swagger::create_swagger_router()) - .nest_service("/", ServeDir::new("/app/frontend").fallback(ServeFile::new("/app/frontend/index.html"))) - .fallback(serve_spa) + .fallback_service(ServeDir::new("frontend/dist").fallback(ServeFile::new("frontend/dist/index.html"))) .layer(CorsLayer::permissive()) .with_state(state.clone()); @@ -215,7 +214,7 @@ async fn main() -> Result<(), Box> { async fn serve_spa() -> Result, StatusCode> { - match tokio::fs::read_to_string("/app/frontend/index.html").await { + match tokio::fs::read_to_string("frontend/dist/index.html").await { Ok(html) => Ok(Html(html)), Err(_) => Err(StatusCode::NOT_FOUND), } diff --git a/src/routes/documents.rs b/src/routes/documents.rs index 216112d..913cd14 100644 --- a/src/routes/documents.rs +++ b/src/routes/documents.rs @@ -27,10 +27,10 @@ pub fn router() -> Router> { Router::new() .route("/", post(upload_document)) .route("/", get(list_documents)) - .route("/:id/download", get(download_document)) - .route("/:id/view", get(view_document)) - .route("/:id/thumbnail", get(get_document_thumbnail)) - .route("/:id/ocr", get(get_document_ocr)) + .route("/{id}/download", get(download_document)) + .route("/{id}/view", get(view_document)) + .route("/{id}/thumbnail", get(get_document_thumbnail)) + .route("/{id}/ocr", get(get_document_ocr)) } #[utoipa::path( diff --git a/src/routes/notifications.rs b/src/routes/notifications.rs index 1e38071..4e6efbf 100644 --- a/src/routes/notifications.rs +++ b/src/routes/notifications.rs @@ -24,9 +24,9 @@ pub fn router() -> Router> { Router::new() .route("/", get(get_notifications)) .route("/summary", get(get_notification_summary)) - .route("/:id/read", post(mark_notification_read)) + .route("/{id}/read", post(mark_notification_read)) .route("/read-all", post(mark_all_notifications_read)) - .route("/:id", delete(delete_notification)) + .route("/{id}", delete(delete_notification)) } #[utoipa::path( diff --git a/src/routes/users.rs b/src/routes/users.rs index 96de24d..8a456cc 100644 --- a/src/routes/users.rs +++ b/src/routes/users.rs @@ -17,7 +17,7 @@ use crate::{ pub fn router() -> Router> { Router::new() .route("/", get(list_users).post(create_user)) - .route("/:id", get(get_user).put(update_user).delete(delete_user)) + .route("/{id}", get(get_user).put(update_user).delete(delete_user)) } #[utoipa::path( diff --git a/src/routes/webdav.rs b/src/routes/webdav.rs index b672d3b..b5c147e 100644 --- a/src/routes/webdav.rs +++ b/src/routes/webdav.rs @@ -64,7 +64,7 @@ async fn get_user_webdav_config(state: &Arc, user_id: uuid::Uuid) -> R password, watch_folders: settings.webdav_watch_folders, file_extensions: settings.webdav_file_extensions, - timeout_seconds: 30, // Default timeout + timeout_seconds: 300, // 5 minutes timeout for crawl estimation server_type: Some("nextcloud".to_string()), // Default to Nextcloud }) } @@ -99,7 +99,7 @@ async fn test_webdav_connection( password: test_config.password.clone(), watch_folders: Vec::new(), file_extensions: Vec::new(), - timeout_seconds: 30, + timeout_seconds: 300, // 5 minutes timeout for crawl estimation server_type: test_config.server_type.clone(), }; diff --git a/src/webdav_service.rs b/src/webdav_service.rs index 6d42b58..1abafba 100644 --- a/src/webdav_service.rs +++ b/src/webdav_service.rs @@ -39,7 +39,7 @@ impl Default for RetryConfig { initial_delay_ms: 1000, // 1 second max_delay_ms: 30000, // 30 seconds backoff_multiplier: 2.0, - timeout_seconds: 120, // 2 minutes total timeout + timeout_seconds: 300, // 5 minutes total timeout for crawl operations } } }