feat(webdav): add some stress test utilities
This commit is contained in:
parent
7863b9100f
commit
88c376f655
|
|
@ -0,0 +1,401 @@
|
|||
name: WebDAV Stress Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
paths:
|
||||
- 'src/services/webdav/**'
|
||||
- 'src/routes/webdav/**'
|
||||
- 'src/webdav_xml_parser.rs'
|
||||
- 'tests/stress/**'
|
||||
- '.github/workflows/webdav-stress-test.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
paths:
|
||||
- 'src/services/webdav/**'
|
||||
- 'src/routes/webdav/**'
|
||||
- 'src/webdav_xml_parser.rs'
|
||||
- 'tests/stress/**'
|
||||
- '.github/workflows/webdav-stress-test.yml'
|
||||
schedule:
|
||||
# Run stress tests daily at 2 AM UTC
|
||||
- cron: '0 2 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
stress_level:
|
||||
description: 'Stress test intensity level'
|
||||
required: true
|
||||
default: 'medium'
|
||||
type: choice
|
||||
options:
|
||||
- light
|
||||
- medium
|
||||
- heavy
|
||||
- extreme
|
||||
timeout_minutes:
|
||||
description: 'Test timeout in minutes'
|
||||
required: false
|
||||
default: '30'
|
||||
type: string
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_LOG: debug,webdav_stress=trace
|
||||
RUST_BACKTRACE: full
|
||||
DATABASE_URL: postgresql://readur_test:readur_test@localhost:5433/readur_test
|
||||
|
||||
jobs:
|
||||
webdav-stress-tests:
|
||||
name: WebDAV Stress Testing
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: ${{ fromJson(github.event.inputs.timeout_minutes || '45') }}
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17-alpine
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
env:
|
||||
POSTGRES_USER: readur_test
|
||||
POSTGRES_PASSWORD: readur_test
|
||||
POSTGRES_DB: readur_test
|
||||
ports:
|
||||
- 5433:5432
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
--tmpfs /var/lib/postgresql/data:rw,noexec,nosuid,size=512m
|
||||
|
||||
steps:
|
||||
- name: Free disk space
|
||||
run: |
|
||||
echo "=== Initial disk usage ==="
|
||||
df -h
|
||||
|
||||
# Remove unnecessary packages to free up space
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf "/usr/local/share/boost"
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo rm -rf /usr/share/swift
|
||||
sudo apt-get clean
|
||||
sudo docker system prune -af --volumes
|
||||
|
||||
# Set up efficient temp directories
|
||||
echo "TMPDIR=${{ runner.temp }}" >> $GITHUB_ENV
|
||||
echo "WEBDAV_TEST_ROOT=${{ runner.temp }}/webdav-stress" >> $GITHUB_ENV
|
||||
mkdir -p ${{ runner.temp }}/webdav-stress
|
||||
|
||||
echo "=== Disk usage after cleanup ==="
|
||||
df -h
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Cache cargo registry
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
key: ${{ runner.os }}-cargo-stress-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-stress-
|
||||
|
||||
- name: Cache target directory
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-cargo-target-stress-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('**/*.rs') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-target-stress-${{ hashFiles('**/Cargo.lock') }}-
|
||||
${{ runner.os }}-cargo-target-stress-
|
||||
|
||||
- name: Setup Dufs WebDAV server
|
||||
run: |
|
||||
# Install Dufs (Rust-based WebDAV server)
|
||||
cargo install dufs --features webdav
|
||||
|
||||
# Create WebDAV test directory structure
|
||||
mkdir -p ${{ env.WEBDAV_TEST_ROOT }}/webdav-server
|
||||
|
||||
# Start Dufs server in background
|
||||
dufs ${{ env.WEBDAV_TEST_ROOT }}/webdav-server \
|
||||
--bind 0.0.0.0:8080 \
|
||||
--enable-cors \
|
||||
--allow-all \
|
||||
--auth ${{ secrets.WEBDAV_TEST_USERNAME || 'testuser' }}:${{ secrets.WEBDAV_TEST_PASSWORD || 'securepassword123' }} \
|
||||
--log-level debug > dufs.log 2>&1 &
|
||||
|
||||
echo $! > dufs.pid
|
||||
echo "DUFS_PID=$(cat dufs.pid)" >> $GITHUB_ENV
|
||||
|
||||
# Store credentials in environment for reuse
|
||||
echo "WEBDAV_TEST_USERNAME=${{ secrets.WEBDAV_TEST_USERNAME || 'testuser' }}" >> $GITHUB_ENV
|
||||
echo "WEBDAV_TEST_PASSWORD=${{ secrets.WEBDAV_TEST_PASSWORD || 'securepassword123' }}" >> $GITHUB_ENV
|
||||
|
||||
# Wait for server to start with exponential backoff
|
||||
attempt=1
|
||||
max_attempts=30
|
||||
base_delay=1
|
||||
|
||||
while [ $attempt -le $max_attempts ]; do
|
||||
if curl -f "http://${{ secrets.WEBDAV_TEST_USERNAME || 'testuser' }}:${{ secrets.WEBDAV_TEST_PASSWORD || 'securepassword123' }}@localhost:8080/" > /dev/null 2>&1; then
|
||||
echo "Dufs WebDAV server is ready after $attempt attempts"
|
||||
break
|
||||
fi
|
||||
|
||||
# Exponential backoff with jitter
|
||||
delay=$(( base_delay * attempt + RANDOM % 3 ))
|
||||
echo "Waiting for Dufs server... (attempt $attempt/$max_attempts, delay ${delay}s)"
|
||||
sleep $delay
|
||||
attempt=$(( attempt + 1 ))
|
||||
done
|
||||
|
||||
# Verify server with proper credentials
|
||||
if ! curl -f "http://${{ secrets.WEBDAV_TEST_USERNAME || 'testuser' }}:${{ secrets.WEBDAV_TEST_PASSWORD || 'securepassword123' }}@localhost:8080/" > /dev/null 2>&1; then
|
||||
echo "ERROR: Dufs server failed to start!"
|
||||
cat dufs.log
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "WebDAV server ready at http://localhost:8080"
|
||||
|
||||
- name: Generate complex test data structures
|
||||
run: |
|
||||
chmod +x scripts/generate-webdav-test-data.sh
|
||||
./scripts/generate-webdav-test-data.sh \
|
||||
--webdav-root "${{ env.WEBDAV_TEST_ROOT }}/webdav-server" \
|
||||
--stress-level "${{ github.event.inputs.stress_level || 'medium' }}" \
|
||||
--include-git-repos \
|
||||
--include-permission-issues \
|
||||
--include-symlinks \
|
||||
--include-large-directories \
|
||||
--include-unicode-names \
|
||||
--include-problematic-files
|
||||
env:
|
||||
STRESS_LEVEL: ${{ github.event.inputs.stress_level || 'medium' }}
|
||||
|
||||
- name: Build readur with stress testing features
|
||||
run: |
|
||||
cargo build --release --features stress-testing
|
||||
|
||||
- name: Start readur server for stress testing
|
||||
run: |
|
||||
# Set up directories
|
||||
mkdir -p uploads watch stress-test-logs
|
||||
|
||||
# Start server with stress testing configuration
|
||||
./target/release/readur > readur-stress.log 2>&1 &
|
||||
echo $! > readur.pid
|
||||
echo "READUR_PID=$(cat readur.pid)" >> $GITHUB_ENV
|
||||
|
||||
# Wait for readur to start
|
||||
for i in {1..30}; do
|
||||
if curl -f http://localhost:8000/api/health > /dev/null 2>&1; then
|
||||
echo "Readur server is ready for stress testing"
|
||||
break
|
||||
fi
|
||||
echo "Waiting for readur server... ($i/30)"
|
||||
sleep 2
|
||||
done
|
||||
|
||||
if ! curl -f http://localhost:8000/api/health > /dev/null 2>&1; then
|
||||
echo "ERROR: Readur server failed to start!"
|
||||
cat readur-stress.log
|
||||
exit 1
|
||||
fi
|
||||
env:
|
||||
DATABASE_URL: ${{ env.DATABASE_URL }}
|
||||
JWT_SECRET: stress-test-secret
|
||||
SERVER_ADDRESS: 0.0.0.0:8000
|
||||
UPLOAD_PATH: ./uploads
|
||||
WATCH_FOLDER: ./watch
|
||||
RUST_LOG: debug,webdav=trace
|
||||
WEBDAV_STRESS_TESTING: "true"
|
||||
WEBDAV_LOOP_DETECTION_ENABLED: "true"
|
||||
WEBDAV_MAX_SCAN_DEPTH: "50"
|
||||
WEBDAV_SCAN_TIMEOUT_SECONDS: "300"
|
||||
|
||||
- name: Run WebDAV infinite loop detection tests
|
||||
id: loop_detection
|
||||
run: |
|
||||
echo "=== Starting WebDAV Loop Detection Stress Tests ==="
|
||||
|
||||
# Run the stress tests with loop monitoring
|
||||
timeout 1800s cargo test --release --test webdav_stress_tests \
|
||||
--features stress-testing -- \
|
||||
--test-threads=4 \
|
||||
--nocapture \
|
||||
test_infinite_loop_detection || test_exit_code=$?
|
||||
|
||||
# Check if tests passed or timed out due to infinite loops
|
||||
if [ "${test_exit_code:-0}" -eq 124 ]; then
|
||||
echo "::error::Tests timed out - possible infinite loop detected!"
|
||||
echo "INFINITE_LOOP_DETECTED=true" >> $GITHUB_ENV
|
||||
exit 1
|
||||
elif [ "${test_exit_code:-0}" -ne 0 ]; then
|
||||
echo "::error::Stress tests failed with exit code: ${test_exit_code}"
|
||||
exit $test_exit_code
|
||||
fi
|
||||
|
||||
echo "Loop detection tests completed successfully"
|
||||
env:
|
||||
WEBDAV_SERVER_URL: http://localhost:8080
|
||||
WEBDAV_USERNAME: ${{ secrets.WEBDAV_TEST_USERNAME || 'testuser' }}
|
||||
WEBDAV_PASSWORD: ${{ secrets.WEBDAV_TEST_PASSWORD || 'securepassword123' }}
|
||||
STRESS_LEVEL: ${{ github.event.inputs.stress_level || 'medium' }}
|
||||
TEST_TIMEOUT_SECONDS: 1800
|
||||
|
||||
- name: Run WebDAV directory scanning stress tests
|
||||
run: |
|
||||
echo "=== Starting Directory Scanning Stress Tests ==="
|
||||
|
||||
cargo test --release --test webdav_stress_tests \
|
||||
--features stress-testing -- \
|
||||
--test-threads=2 \
|
||||
--nocapture \
|
||||
test_directory_scanning_stress
|
||||
env:
|
||||
WEBDAV_SERVER_URL: http://localhost:8080
|
||||
WEBDAV_USERNAME: ${{ secrets.WEBDAV_TEST_USERNAME || 'testuser' }}
|
||||
WEBDAV_PASSWORD: ${{ secrets.WEBDAV_TEST_PASSWORD || 'securepassword123' }}
|
||||
|
||||
- name: Run WebDAV concurrent access stress tests
|
||||
run: |
|
||||
echo "=== Starting Concurrent Access Stress Tests ==="
|
||||
|
||||
cargo test --release --test webdav_stress_tests \
|
||||
--features stress-testing -- \
|
||||
--test-threads=8 \
|
||||
--nocapture \
|
||||
test_concurrent_webdav_access
|
||||
env:
|
||||
WEBDAV_SERVER_URL: http://localhost:8080
|
||||
WEBDAV_USERNAME: ${{ secrets.WEBDAV_TEST_USERNAME || 'testuser' }}
|
||||
WEBDAV_PASSWORD: ${{ secrets.WEBDAV_TEST_PASSWORD || 'securepassword123' }}
|
||||
|
||||
- name: Run WebDAV edge case handling tests
|
||||
run: |
|
||||
echo "=== Starting Edge Case Handling Tests ==="
|
||||
|
||||
cargo test --release --test webdav_stress_tests \
|
||||
--features stress-testing -- \
|
||||
--test-threads=2 \
|
||||
--nocapture \
|
||||
test_edge_case_handling
|
||||
env:
|
||||
WEBDAV_SERVER_URL: http://localhost:8080
|
||||
WEBDAV_USERNAME: ${{ secrets.WEBDAV_TEST_USERNAME || 'testuser' }}
|
||||
WEBDAV_PASSWORD: ${{ secrets.WEBDAV_TEST_PASSWORD || 'securepassword123' }}
|
||||
|
||||
- name: Analyze WebDAV performance metrics
|
||||
if: always()
|
||||
run: |
|
||||
echo "=== WebDAV Performance Analysis ==="
|
||||
|
||||
# Run performance analysis
|
||||
if [ -f "stress-test-metrics.json" ]; then
|
||||
cargo run --release --bin analyze-webdav-performance -- \
|
||||
--metrics-file stress-test-metrics.json \
|
||||
--output-format github-summary
|
||||
fi
|
||||
|
||||
# Generate summary report
|
||||
echo "## WebDAV Stress Test Summary" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **Stress Level**: ${{ github.event.inputs.stress_level || 'medium' }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **Test Duration**: $(date -d @$SECONDS -u +%H:%M:%S)" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **Infinite Loop Detection**: ${INFINITE_LOOP_DETECTED:-false}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
if [ -f "webdav-performance-report.md" ]; then
|
||||
cat webdav-performance-report.md >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
- name: Collect and analyze logs
|
||||
if: always()
|
||||
run: |
|
||||
echo "=== Collecting logs for analysis ==="
|
||||
|
||||
# Create logs directory
|
||||
mkdir -p stress-test-artifacts/logs
|
||||
|
||||
# Collect all relevant logs
|
||||
cp readur-stress.log stress-test-artifacts/logs/ || echo "No readur log"
|
||||
cp dufs.log stress-test-artifacts/logs/ || echo "No dufs log"
|
||||
|
||||
# Analyze logs for loop patterns
|
||||
if [ -f scripts/analyze-webdav-loops.py ]; then
|
||||
python3 scripts/analyze-webdav-loops.py \
|
||||
--log-file stress-test-artifacts/logs/readur-stress.log \
|
||||
--output stress-test-artifacts/loop-analysis.json
|
||||
fi
|
||||
|
||||
# Check for problematic patterns
|
||||
echo "=== Log Analysis Results ==="
|
||||
if grep -q "already scanned directory" stress-test-artifacts/logs/readur-stress.log 2>/dev/null; then
|
||||
echo "::warning::Detected repeated directory scanning patterns"
|
||||
fi
|
||||
|
||||
if grep -q "timeout" stress-test-artifacts/logs/readur-stress.log 2>/dev/null; then
|
||||
echo "::warning::Detected timeout issues during WebDAV operations"
|
||||
fi
|
||||
|
||||
- name: Upload stress test artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: webdav-stress-test-artifacts-${{ github.run_id }}
|
||||
path: |
|
||||
stress-test-artifacts/
|
||||
stress-test-metrics.json
|
||||
webdav-performance-report.md
|
||||
retention-days: 30
|
||||
|
||||
- name: Report critical issues
|
||||
if: failure() && env.INFINITE_LOOP_DETECTED == 'true'
|
||||
run: |
|
||||
echo "::error title=Infinite Loop Detected::WebDAV sync entered an infinite loop during stress testing"
|
||||
echo "::error::Check the uploaded artifacts for detailed analysis"
|
||||
|
||||
# Create GitHub issue for infinite loop detection
|
||||
if [ "${{ github.event_name }}" = "schedule" ] || [ "${{ github.event_name }}" = "push" ]; then
|
||||
echo "This would create a GitHub issue for infinite loop detection"
|
||||
fi
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
# Stop servers
|
||||
if [ -n "$READUR_PID" ] && kill -0 $READUR_PID 2>/dev/null; then
|
||||
kill $READUR_PID || true
|
||||
fi
|
||||
|
||||
if [ -n "$DUFS_PID" ] && kill -0 $DUFS_PID 2>/dev/null; then
|
||||
kill $DUFS_PID || true
|
||||
fi
|
||||
|
||||
# Clean up temp files
|
||||
rm -rf ${{ env.WEBDAV_TEST_ROOT }} || true
|
||||
|
||||
echo "=== Final disk usage ==="
|
||||
df -h
|
||||
16
Cargo.toml
16
Cargo.toml
|
|
@ -11,6 +11,14 @@ path = "src/main.rs"
|
|||
name = "test_runner"
|
||||
path = "src/bin/test_runner.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "analyze-webdav-performance"
|
||||
path = "src/bin/analyze-webdav-performance.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "webdav_loop_detection_stress"
|
||||
path = "tests/stress/webdav_loop_detection_stress.rs"
|
||||
|
||||
|
||||
|
||||
[dependencies]
|
||||
|
|
@ -71,6 +79,7 @@ default = ["ocr", "s3"]
|
|||
ocr = ["tesseract", "image", "imageproc", "raw-cpuid"]
|
||||
s3 = ["aws-config", "aws-sdk-s3", "aws-credential-types", "aws-types"]
|
||||
test-utils = ["testcontainers", "testcontainers-modules"]
|
||||
stress-testing = ["test-utils"]
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
|
|
@ -96,3 +105,10 @@ debug = false
|
|||
name = "integration_smart_sync_deep_scan"
|
||||
path = "tests/integration_smart_sync_deep_scan.rs"
|
||||
harness = true
|
||||
|
||||
# WebDAV Stress Testing configuration
|
||||
[[test]]
|
||||
name = "webdav_stress_tests"
|
||||
path = "tests/webdav_stress_tests.rs"
|
||||
required-features = ["stress-testing"]
|
||||
harness = true
|
||||
|
|
|
|||
|
|
@ -0,0 +1,81 @@
|
|||
# Multi-stage Dockerfile for WebDAV Stress Testing
|
||||
FROM rust:1.75-alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
musl-dev \
|
||||
openssl-dev \
|
||||
pkgconfig \
|
||||
git \
|
||||
curl \
|
||||
build-base
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy manifests
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
|
||||
# Copy source code
|
||||
COPY src ./src
|
||||
COPY tests ./tests
|
||||
COPY scripts ./scripts
|
||||
|
||||
# Build with stress testing features
|
||||
RUN cargo build --release --features stress-testing
|
||||
|
||||
# Runtime stage
|
||||
FROM alpine:3.18
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
python3 \
|
||||
py3-pip \
|
||||
bash \
|
||||
jq \
|
||||
git
|
||||
|
||||
# Install Python dependencies for analysis scripts
|
||||
RUN pip3 install --no-cache-dir \
|
||||
requests \
|
||||
python-dateutil \
|
||||
pyyaml
|
||||
|
||||
# Create app user
|
||||
RUN addgroup -g 1000 readur && \
|
||||
adduser -D -s /bin/bash -u 1000 -G readur readur
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /app/target/release/readur ./readur
|
||||
|
||||
# Copy scripts and configurations
|
||||
COPY --from=builder /app/scripts ./scripts
|
||||
COPY --from=builder /app/tests/stress ./tests/stress
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x ./scripts/*.sh ./scripts/*.py
|
||||
|
||||
# Create directories for test results
|
||||
RUN mkdir -p /tmp/stress-results /app/logs && \
|
||||
chown -R readur:readur /app /tmp/stress-results
|
||||
|
||||
# Switch to non-root user
|
||||
USER readur
|
||||
|
||||
# Environment variables for stress testing
|
||||
ENV RUST_LOG=debug,webdav_stress=trace
|
||||
ENV RUST_BACKTRACE=1
|
||||
ENV WEBDAV_STRESS_TESTING=true
|
||||
ENV WEBDAV_LOOP_DETECTION_ENABLED=true
|
||||
|
||||
# Health check for container
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8000/api/health || exit 1
|
||||
|
||||
# Default command runs stress tests
|
||||
CMD ["./scripts/run-stress-tests.sh"]
|
||||
|
|
@ -129,6 +129,63 @@ services:
|
|||
profiles:
|
||||
- e2e-tests
|
||||
|
||||
# Dufs WebDAV server for stress testing
|
||||
dufs_webdav:
|
||||
image: sigoden/dufs:latest
|
||||
container_name: readur_dufs_webdav
|
||||
command: >
|
||||
/data
|
||||
--bind 0.0.0.0:8080
|
||||
--enable-cors
|
||||
--allow-all
|
||||
--auth webdav_user:webdav_pass
|
||||
--log-level debug
|
||||
volumes:
|
||||
# Using tmpfs for ephemeral WebDAV test data
|
||||
- type: tmpfs
|
||||
target: /data
|
||||
tmpfs:
|
||||
size: 1G
|
||||
ports:
|
||||
- "8080:8080"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "--auth-no-challenge", "--http-user=webdav_user", "--http-password=webdav_pass", "http://localhost:8080/"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
start_period: 10s
|
||||
networks:
|
||||
- readur_test_network
|
||||
profiles:
|
||||
- webdav-stress
|
||||
|
||||
|
||||
# WebDAV stress test orchestrator
|
||||
webdav_stress_orchestrator:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.stress
|
||||
container_name: readur_stress_orchestrator
|
||||
environment:
|
||||
- RUST_LOG=debug,webdav_stress=trace
|
||||
- WEBDAV_DUFS_URL=http://dufs_webdav:8080
|
||||
- WEBDAV_USERNAME=webdav_user
|
||||
- WEBDAV_PASSWORD=webdav_pass
|
||||
- STRESS_TEST_DURATION=300
|
||||
- LOOP_DETECTION_TIMEOUT=60
|
||||
- MAX_DIRECTORY_DEPTH=20
|
||||
volumes:
|
||||
- ./tests/stress:/app/tests/stress:ro
|
||||
- type: tmpfs
|
||||
target: /tmp/stress-results
|
||||
depends_on:
|
||||
dufs_webdav:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- readur_test_network
|
||||
profiles:
|
||||
- webdav-stress
|
||||
|
||||
networks:
|
||||
readur_test_network:
|
||||
name: readur_test_network
|
||||
|
|
|
|||
|
|
@ -0,0 +1,341 @@
|
|||
#!/bin/bash
|
||||
|
||||
# WebDAV Test Data Generation Script
|
||||
# Generates complex directory structures for stress testing WebDAV sync functionality
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Default values
|
||||
WEBDAV_ROOT=""
|
||||
STRESS_LEVEL="medium"
|
||||
INCLUDE_GIT_REPOS=false
|
||||
INCLUDE_PERMISSION_ISSUES=false
|
||||
INCLUDE_SYMLINKS=false
|
||||
INCLUDE_LARGE_DIRECTORIES=false
|
||||
INCLUDE_UNICODE_NAMES=false
|
||||
INCLUDE_PROBLEMATIC_FILES=false
|
||||
|
||||
# Function to show usage
|
||||
show_usage() {
|
||||
cat << EOF
|
||||
Usage: $0 --webdav-root <path> [options]
|
||||
|
||||
Required:
|
||||
--webdav-root <path> Root directory for WebDAV test data
|
||||
|
||||
Options:
|
||||
--stress-level <level> Stress test level: light, medium, heavy, extreme (default: medium)
|
||||
--include-git-repos Include Git repository structures
|
||||
--include-permission-issues Include files with permission problems
|
||||
--include-symlinks Include symbolic links
|
||||
--include-large-directories Include directories with many files
|
||||
--include-unicode-names Include files with Unicode names
|
||||
--include-problematic-files Include files with problematic names
|
||||
-h, --help Show this help message
|
||||
|
||||
Example:
|
||||
$0 --webdav-root /tmp/webdav-test --stress-level heavy --include-symlinks --include-unicode-names
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--webdav-root)
|
||||
WEBDAV_ROOT="$2"
|
||||
shift 2
|
||||
;;
|
||||
--stress-level)
|
||||
STRESS_LEVEL="$2"
|
||||
shift 2
|
||||
;;
|
||||
--include-git-repos)
|
||||
INCLUDE_GIT_REPOS=true
|
||||
shift
|
||||
;;
|
||||
--include-permission-issues)
|
||||
INCLUDE_PERMISSION_ISSUES=true
|
||||
shift
|
||||
;;
|
||||
--include-symlinks)
|
||||
INCLUDE_SYMLINKS=true
|
||||
shift
|
||||
;;
|
||||
--include-large-directories)
|
||||
INCLUDE_LARGE_DIRECTORIES=true
|
||||
shift
|
||||
;;
|
||||
--include-unicode-names)
|
||||
INCLUDE_UNICODE_NAMES=true
|
||||
shift
|
||||
;;
|
||||
--include-problematic-files)
|
||||
INCLUDE_PROBLEMATIC_FILES=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown option $1" >&2
|
||||
show_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate required parameters
|
||||
if [[ -z "$WEBDAV_ROOT" ]]; then
|
||||
echo "Error: --webdav-root is required" >&2
|
||||
show_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate stress level
|
||||
case "$STRESS_LEVEL" in
|
||||
light|medium|heavy|extreme)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid stress level '$STRESS_LEVEL'. Must be: light, medium, heavy, or extreme" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Generating WebDAV test data..."
|
||||
echo "Root directory: $WEBDAV_ROOT"
|
||||
echo "Stress level: $STRESS_LEVEL"
|
||||
echo "Git repos: $INCLUDE_GIT_REPOS"
|
||||
echo "Permission issues: $INCLUDE_PERMISSION_ISSUES"
|
||||
echo "Symlinks: $INCLUDE_SYMLINKS"
|
||||
echo "Large directories: $INCLUDE_LARGE_DIRECTORIES"
|
||||
echo "Unicode names: $INCLUDE_UNICODE_NAMES"
|
||||
echo "Problematic files: $INCLUDE_PROBLEMATIC_FILES"
|
||||
|
||||
# Create root directory
|
||||
mkdir -p "$WEBDAV_ROOT"
|
||||
cd "$WEBDAV_ROOT"
|
||||
|
||||
# Set parameters based on stress level
|
||||
case "$STRESS_LEVEL" in
|
||||
light)
|
||||
MAX_DEPTH=3
|
||||
FILES_PER_DIR=5
|
||||
DIRS_PER_LEVEL=3
|
||||
LARGE_DIR_SIZE=20
|
||||
;;
|
||||
medium)
|
||||
MAX_DEPTH=5
|
||||
FILES_PER_DIR=10
|
||||
DIRS_PER_LEVEL=5
|
||||
LARGE_DIR_SIZE=50
|
||||
;;
|
||||
heavy)
|
||||
MAX_DEPTH=8
|
||||
FILES_PER_DIR=20
|
||||
DIRS_PER_LEVEL=8
|
||||
LARGE_DIR_SIZE=100
|
||||
;;
|
||||
extreme)
|
||||
MAX_DEPTH=12
|
||||
FILES_PER_DIR=50
|
||||
DIRS_PER_LEVEL=10
|
||||
LARGE_DIR_SIZE=500
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Configuration: max_depth=$MAX_DEPTH, files_per_dir=$FILES_PER_DIR, dirs_per_level=$DIRS_PER_LEVEL"
|
||||
|
||||
# Function to create a file with content
|
||||
create_test_file() {
|
||||
local filepath="$1"
|
||||
local content="$2"
|
||||
|
||||
mkdir -p "$(dirname "$filepath")"
|
||||
echo "$content" > "$filepath"
|
||||
echo "$(date): Test file created at $filepath" >> "$filepath"
|
||||
}
|
||||
|
||||
# Function to create directory structure recursively
|
||||
create_directory_structure() {
|
||||
local base_path="$1"
|
||||
local current_depth="$2"
|
||||
local max_depth="$3"
|
||||
local prefix="$4"
|
||||
|
||||
if [[ $current_depth -ge $max_depth ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
mkdir -p "$base_path"
|
||||
|
||||
# Create files in current directory
|
||||
for ((i=1; i<=FILES_PER_DIR; i++)); do
|
||||
local filename="${prefix}_file_${i}.txt"
|
||||
create_test_file "$base_path/$filename" "Test file $i in $base_path (depth $current_depth)"
|
||||
done
|
||||
|
||||
# Create subdirectories
|
||||
for ((i=1; i<=DIRS_PER_LEVEL; i++)); do
|
||||
local dirname="${prefix}_subdir_${i}"
|
||||
create_directory_structure "$base_path/$dirname" $((current_depth + 1)) $max_depth "${prefix}_${i}"
|
||||
done
|
||||
}
|
||||
|
||||
# Create main structure
|
||||
echo "Creating main directory structure..."
|
||||
create_directory_structure "main-structure" 0 $MAX_DEPTH "main"
|
||||
|
||||
# Create documents structure
|
||||
echo "Creating documents structure..."
|
||||
mkdir -p docs-structure
|
||||
create_test_file "docs-structure/README.md" "# Test Documents\nThis is a test document repository."
|
||||
create_test_file "docs-structure/manual.pdf" "Fake PDF content for testing"
|
||||
create_test_file "docs-structure/presentation.pptx" "Fake PowerPoint content"
|
||||
|
||||
# Create images structure
|
||||
echo "Creating images structure..."
|
||||
mkdir -p images-structure
|
||||
for i in {1..10}; do
|
||||
create_test_file "images-structure/image_${i}.jpg" "Fake JPEG image content $i"
|
||||
create_test_file "images-structure/photo_${i}.png" "Fake PNG image content $i"
|
||||
done
|
||||
|
||||
# Create potential loop trap directories
|
||||
echo "Creating loop trap directories..."
|
||||
mkdir -p loop-traps/deep-nesting
|
||||
create_directory_structure "loop-traps/deep-nesting" 0 $((MAX_DEPTH + 2)) "loop"
|
||||
|
||||
# Create test repositories if requested
|
||||
if [[ "$INCLUDE_GIT_REPOS" == "true" ]]; then
|
||||
echo "Creating Git repository structures..."
|
||||
|
||||
for i in {1..3}; do
|
||||
repo_dir="test-repo-$i"
|
||||
mkdir -p "$repo_dir"
|
||||
cd "$repo_dir"
|
||||
|
||||
# Initialize git repo (but don't actually use git to avoid dependency)
|
||||
mkdir -p .git/objects .git/refs/heads .git/refs/tags
|
||||
echo "ref: refs/heads/main" > .git/HEAD
|
||||
|
||||
# Create typical git repo structure
|
||||
create_test_file "src/main.rs" "fn main() { println!(\"Hello, world!\"); }"
|
||||
create_test_file "Cargo.toml" "[package]\nname = \"test-repo-$i\"\nversion = \"0.1.0\""
|
||||
create_test_file "README.md" "# Test Repository $i"
|
||||
|
||||
cd "$WEBDAV_ROOT"
|
||||
done
|
||||
fi
|
||||
|
||||
# Create large directories if requested
|
||||
if [[ "$INCLUDE_LARGE_DIRECTORIES" == "true" ]]; then
|
||||
echo "Creating large directories..."
|
||||
|
||||
mkdir -p large-directory
|
||||
for ((i=1; i<=LARGE_DIR_SIZE; i++)); do
|
||||
create_test_file "large-directory/file_$(printf "%04d" $i).txt" "Content of file $i in large directory"
|
||||
done
|
||||
fi
|
||||
|
||||
# Create symlinks if requested
|
||||
if [[ "$INCLUDE_SYMLINKS" == "true" ]]; then
|
||||
echo "Creating symbolic links..."
|
||||
|
||||
mkdir -p symlink-test
|
||||
create_test_file "symlink-test/target.txt" "This is the target file"
|
||||
|
||||
# Create various types of symlinks
|
||||
cd symlink-test
|
||||
ln -sf target.txt link_to_file.txt
|
||||
ln -sf ../main-structure link_to_dir
|
||||
ln -sf nonexistent.txt broken_link.txt
|
||||
ln -sf link_to_file.txt link_to_link.txt # Link to link
|
||||
cd "$WEBDAV_ROOT"
|
||||
fi
|
||||
|
||||
# Create Unicode filenames if requested
|
||||
if [[ "$INCLUDE_UNICODE_NAMES" == "true" ]]; then
|
||||
echo "Creating files with Unicode names..."
|
||||
|
||||
mkdir -p unicode-test
|
||||
create_test_file "unicode-test/café.txt" "French café file"
|
||||
create_test_file "unicode-test/résumé.pdf" "French résumé file"
|
||||
create_test_file "unicode-test/日本語.txt" "Japanese filename"
|
||||
create_test_file "unicode-test/emoji_😀.txt" "File with emoji"
|
||||
create_test_file "unicode-test/математика.doc" "Russian filename"
|
||||
fi
|
||||
|
||||
# Create problematic files if requested
|
||||
if [[ "$INCLUDE_PROBLEMATIC_FILES" == "true" ]]; then
|
||||
echo "Creating problematic files..."
|
||||
|
||||
mkdir -p problematic-files
|
||||
|
||||
# Files with special characters (properly escaped)
|
||||
create_test_file "problematic-files/file with spaces.txt" "File with spaces in name"
|
||||
create_test_file "problematic-files/file&with&ersands.txt" "File with ampersands"
|
||||
create_test_file "problematic-files/file[with]brackets.txt" "File with brackets"
|
||||
create_test_file "problematic-files/file'with'quotes.txt" "File with single quotes"
|
||||
create_test_file 'problematic-files/file"with"doublequotes.txt' "File with double quotes"
|
||||
|
||||
# Very long filename
|
||||
long_name=$(printf 'very_long_filename_%.0s' {1..20})
|
||||
create_test_file "problematic-files/${long_name}.txt" "File with very long name"
|
||||
|
||||
# File with just dots
|
||||
create_test_file "problematic-files/...txt" "File starting with dots"
|
||||
fi
|
||||
|
||||
# Create restricted access files if requested
|
||||
if [[ "$INCLUDE_PERMISSION_ISSUES" == "true" ]]; then
|
||||
echo "Creating permission test files..."
|
||||
|
||||
mkdir -p restricted-access
|
||||
create_test_file "restricted-access/readonly.txt" "Read-only file"
|
||||
create_test_file "restricted-access/normal.txt" "Normal file"
|
||||
|
||||
# Make one file read-only
|
||||
chmod 444 "restricted-access/readonly.txt"
|
||||
|
||||
# Create a directory with restricted permissions
|
||||
mkdir -p restricted-access/restricted-dir
|
||||
create_test_file "restricted-access/restricted-dir/hidden.txt" "Hidden file"
|
||||
chmod 700 "restricted-access/restricted-dir"
|
||||
fi
|
||||
|
||||
# Create summary file
|
||||
echo "Creating test data summary..."
|
||||
create_test_file "TEST_DATA_SUMMARY.txt" "WebDAV Test Data Summary
|
||||
Generated: $(date)
|
||||
Stress Level: $STRESS_LEVEL
|
||||
Configuration:
|
||||
- Max Depth: $MAX_DEPTH
|
||||
- Files per Directory: $FILES_PER_DIR
|
||||
- Directories per Level: $DIRS_PER_LEVEL
|
||||
- Large Directory Size: $LARGE_DIR_SIZE
|
||||
|
||||
Features Included:
|
||||
- Git Repos: $INCLUDE_GIT_REPOS
|
||||
- Permission Issues: $INCLUDE_PERMISSION_ISSUES
|
||||
- Symlinks: $INCLUDE_SYMLINKS
|
||||
- Large Directories: $INCLUDE_LARGE_DIRECTORIES
|
||||
- Unicode Names: $INCLUDE_UNICODE_NAMES
|
||||
- Problematic Files: $INCLUDE_PROBLEMATIC_FILES
|
||||
|
||||
Total files created: $(find . -type f | wc -l)
|
||||
Total directories created: $(find . -type d | wc -l)
|
||||
"
|
||||
|
||||
echo "WebDAV test data generation completed!"
|
||||
echo "Root directory: $WEBDAV_ROOT"
|
||||
echo "Total files: $(find "$WEBDAV_ROOT" -type f | wc -l)"
|
||||
echo "Total directories: $(find "$WEBDAV_ROOT" -type d | wc -l)"
|
||||
|
||||
# Display directory structure summary
|
||||
echo ""
|
||||
echo "Directory structure summary:"
|
||||
find "$WEBDAV_ROOT" -type d | head -20
|
||||
if [[ $(find "$WEBDAV_ROOT" -type d | wc -l) -gt 20 ]]; then
|
||||
echo "... and $(($(find "$WEBDAV_ROOT" -type d | wc -l) - 20)) more directories"
|
||||
fi
|
||||
|
|
@ -0,0 +1,354 @@
|
|||
#!/bin/bash
|
||||
|
||||
# WebDAV Stress Test Orchestrator
|
||||
# Coordinates running comprehensive stress tests and collecting results
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
STRESS_LEVEL="${STRESS_LEVEL:-medium}"
|
||||
TEST_DURATION="${STRESS_TEST_DURATION:-300}"
|
||||
WEBDAV_DUFS_URL="${WEBDAV_DUFS_URL:-http://dufs_webdav:8080}"
|
||||
WEBDAV_USERNAME="${WEBDAV_USERNAME:-webdav_user}"
|
||||
WEBDAV_PASSWORD="${WEBDAV_PASSWORD:-webdav_pass}"
|
||||
LOOP_DETECTION_TIMEOUT="${LOOP_DETECTION_TIMEOUT:-60}"
|
||||
RESULTS_DIR="/tmp/stress-results"
|
||||
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" >&2
|
||||
}
|
||||
|
||||
# Wait for WebDAV servers to be ready
|
||||
wait_for_webdav_server() {
|
||||
local url="$1"
|
||||
local name="$2"
|
||||
local max_attempts=30
|
||||
local attempt=1
|
||||
|
||||
log "Waiting for $name WebDAV server at $url..."
|
||||
|
||||
while [ $attempt -le $max_attempts ]; do
|
||||
if curl -f -s --connect-timeout 5 --max-time 10 \
|
||||
--user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" \
|
||||
"$url/" > /dev/null 2>&1; then
|
||||
log "$name WebDAV server is ready"
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Attempt $attempt/$max_attempts: $name server not ready yet..."
|
||||
sleep 5
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
|
||||
log "ERROR: $name WebDAV server did not become ready within timeout"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Setup test environment
|
||||
setup_test_environment() {
|
||||
log "Setting up WebDAV stress test environment..."
|
||||
|
||||
# Create results directory
|
||||
mkdir -p "$RESULTS_DIR/logs" "$RESULTS_DIR/reports"
|
||||
|
||||
# Wait for WebDAV server
|
||||
wait_for_webdav_server "$WEBDAV_DUFS_URL" "Dufs"
|
||||
|
||||
# Test WebDAV connectivity
|
||||
log "Testing WebDAV connectivity..."
|
||||
curl -f --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" \
|
||||
-X PROPFIND -H "Depth: 0" \
|
||||
"$WEBDAV_DUFS_URL/" > /dev/null
|
||||
|
||||
log "WebDAV connectivity test passed"
|
||||
}
|
||||
|
||||
# Generate test data on WebDAV server
|
||||
generate_webdav_test_data() {
|
||||
log "Generating test data on WebDAV server..."
|
||||
|
||||
# Use a temporary directory to generate data, then upload to WebDAV
|
||||
local temp_dir="/tmp/webdav-test-data"
|
||||
rm -rf "$temp_dir"
|
||||
|
||||
# Generate test data locally
|
||||
./scripts/generate-webdav-test-data.sh \
|
||||
--webdav-root "$temp_dir" \
|
||||
--stress-level "$STRESS_LEVEL" \
|
||||
--include-git-repos \
|
||||
--include-symlinks \
|
||||
--include-large-directories \
|
||||
--include-unicode-names \
|
||||
--include-problematic-files \
|
||||
--verbose
|
||||
|
||||
# Upload test data to WebDAV server using curl
|
||||
log "Uploading test data to WebDAV server..."
|
||||
upload_directory_to_webdav "$temp_dir" "$WEBDAV_DUFS_URL"
|
||||
|
||||
# Cleanup local data
|
||||
rm -rf "$temp_dir"
|
||||
|
||||
log "Test data generation and upload completed"
|
||||
}
|
||||
|
||||
# Upload directory structure to WebDAV server
|
||||
upload_directory_to_webdav() {
|
||||
local source_dir="$1"
|
||||
local webdav_base_url="$2"
|
||||
|
||||
# Create directories first
|
||||
find "$source_dir" -type d | while read -r dir; do
|
||||
local rel_path="${dir#$source_dir}"
|
||||
if [ -n "$rel_path" ]; then
|
||||
local webdav_url="$webdav_base_url$rel_path"
|
||||
curl -f --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" \
|
||||
-X MKCOL "$webdav_url/" > /dev/null 2>&1 || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Upload files
|
||||
find "$source_dir" -type f | while read -r file; do
|
||||
local rel_path="${file#$source_dir}"
|
||||
local webdav_url="$webdav_base_url$rel_path"
|
||||
curl -f --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" \
|
||||
-X PUT --data-binary "@$file" \
|
||||
"$webdav_url" > /dev/null 2>&1 || true
|
||||
done
|
||||
}
|
||||
|
||||
# Run stress tests with monitoring
|
||||
run_stress_tests() {
|
||||
log "Starting WebDAV stress tests..."
|
||||
|
||||
# Set environment variables for tests
|
||||
export WEBDAV_DUFS_URL="$WEBDAV_DUFS_URL"
|
||||
export WEBDAV_SERVER_URL="$WEBDAV_DUFS_URL"
|
||||
export WEBDAV_USERNAME="$WEBDAV_USERNAME"
|
||||
export WEBDAV_PASSWORD="$WEBDAV_PASSWORD"
|
||||
export STRESS_LEVEL="$STRESS_LEVEL"
|
||||
export STRESS_TEST_DURATION="$TEST_DURATION"
|
||||
export TEST_TIMEOUT_SECONDS="$TEST_DURATION"
|
||||
export LOOP_DETECTION_TIMEOUT="$LOOP_DETECTION_TIMEOUT"
|
||||
export CONCURRENT_SYNCS="4"
|
||||
export TRIGGER_TEST_LOOPS="true"
|
||||
export STRESS_RESULTS_DIR="$RESULTS_DIR"
|
||||
export RUST_LOG="info,webdav_loop_detection_stress=debug,readur::services::webdav=debug"
|
||||
export RUST_BACKTRACE="full"
|
||||
|
||||
# Start readur server for testing (if needed)
|
||||
if [ "${START_READUR_SERVER:-true}" = "true" ]; then
|
||||
log "Starting readur server for stress testing..."
|
||||
./readur > "$RESULTS_DIR/logs/readur-server.log" 2>&1 &
|
||||
local readur_pid=$!
|
||||
echo "$readur_pid" > "$RESULTS_DIR/readur.pid"
|
||||
|
||||
# Wait for server to start
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Run the stress tests
|
||||
log "Executing stress test suite..."
|
||||
|
||||
local test_start_time=$(date +%s)
|
||||
local test_exit_code=0
|
||||
|
||||
# Run the new instrumented loop detection stress test
|
||||
log "Running instrumented WebDAV loop detection stress test..."
|
||||
timeout "$((TEST_DURATION + 60))" cargo run --release \
|
||||
--bin webdav_loop_detection_stress \
|
||||
> "$RESULTS_DIR/logs/loop-detection-stress.log" 2>&1 || test_exit_code=$?
|
||||
|
||||
# Also run the original stress tests for comparison
|
||||
log "Running legacy stress tests for comparison..."
|
||||
timeout "$((TEST_DURATION + 60))" cargo test --release \
|
||||
--features stress-testing \
|
||||
--test webdav_stress_tests \
|
||||
-- --test-threads=4 --nocapture > "$RESULTS_DIR/logs/legacy-stress-tests.log" 2>&1 || {
|
||||
local legacy_exit_code=$?
|
||||
log "Legacy stress tests exited with code $legacy_exit_code"
|
||||
}
|
||||
|
||||
local test_end_time=$(date +%s)
|
||||
local test_duration=$((test_end_time - test_start_time))
|
||||
|
||||
log "Stress tests completed in ${test_duration}s with exit code $test_exit_code"
|
||||
|
||||
# Stop readur server if we started it
|
||||
if [ -f "$RESULTS_DIR/readur.pid" ]; then
|
||||
local readur_pid=$(cat "$RESULTS_DIR/readur.pid")
|
||||
kill "$readur_pid" 2>/dev/null || true
|
||||
rm -f "$RESULTS_DIR/readur.pid"
|
||||
fi
|
||||
|
||||
return $test_exit_code
|
||||
}
|
||||
|
||||
# Analyze test results and generate reports
|
||||
analyze_results() {
|
||||
log "Analyzing stress test results..."
|
||||
|
||||
# Analyze logs for infinite loop patterns
|
||||
if [ -f "$RESULTS_DIR/logs/stress-tests.log" ]; then
|
||||
log "Running loop detection analysis..."
|
||||
|
||||
python3 ./scripts/analyze-webdav-loops.py \
|
||||
--log-file "$RESULTS_DIR/logs/stress-tests.log" \
|
||||
--output "$RESULTS_DIR/reports/loop-analysis.json" \
|
||||
--github-actions || true
|
||||
|
||||
# Generate summary report
|
||||
if [ -f "$RESULTS_DIR/reports/loop-analysis.json" ]; then
|
||||
local health_score=$(jq -r '.health_score // 0' "$RESULTS_DIR/reports/loop-analysis.json")
|
||||
local infinite_loops=$(jq -r '.summary.infinite_loops_detected // 0' "$RESULTS_DIR/reports/loop-analysis.json")
|
||||
|
||||
log "WebDAV Health Score: $health_score/100"
|
||||
log "Infinite Loops Detected: $infinite_loops"
|
||||
|
||||
if [ "$infinite_loops" -gt 0 ]; then
|
||||
log "WARNING: Infinite loop patterns detected!"
|
||||
jq -r '.infinite_loops[] | " - \(.path): \(.type) (severity: \(.severity))"' \
|
||||
"$RESULTS_DIR/reports/loop-analysis.json" | while read -r line; do
|
||||
log "$line"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Generate performance report
|
||||
log "Generating performance analysis..."
|
||||
|
||||
cat > "$RESULTS_DIR/reports/performance-summary.json" << EOF
|
||||
{
|
||||
"test_timestamp": "$(date -Iseconds)",
|
||||
"test_configuration": {
|
||||
"stress_level": "$STRESS_LEVEL",
|
||||
"test_duration_seconds": $TEST_DURATION,
|
||||
"webdav_server_url": "$WEBDAV_DUFS_URL",
|
||||
"loop_detection_timeout": $LOOP_DETECTION_TIMEOUT
|
||||
},
|
||||
"test_environment": {
|
||||
"container_id": "$(hostname)",
|
||||
"rust_version": "$(rustc --version)",
|
||||
"available_memory_mb": $(free -m | awk '/^Mem:/ {print $7}'),
|
||||
"cpu_cores": $(nproc)
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create GitHub Actions summary if running in CI
|
||||
if [ "${GITHUB_ACTIONS:-false}" = "true" ]; then
|
||||
generate_github_summary
|
||||
fi
|
||||
|
||||
log "Result analysis completed"
|
||||
}
|
||||
|
||||
# Generate GitHub Actions summary
|
||||
generate_github_summary() {
|
||||
if [ -z "${GITHUB_STEP_SUMMARY:-}" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
log "Generating GitHub Actions summary..."
|
||||
|
||||
cat >> "$GITHUB_STEP_SUMMARY" << EOF
|
||||
# WebDAV Stress Test Results
|
||||
|
||||
## Configuration
|
||||
- **Stress Level**: $STRESS_LEVEL
|
||||
- **Test Duration**: ${TEST_DURATION}s
|
||||
- **WebDAV Server**: $WEBDAV_DUFS_URL
|
||||
|
||||
## Results Summary
|
||||
EOF
|
||||
|
||||
if [ -f "$RESULTS_DIR/reports/loop-analysis.json" ]; then
|
||||
local health_score=$(jq -r '.health_score // 0' "$RESULTS_DIR/reports/loop-analysis.json")
|
||||
local infinite_loops=$(jq -r '.summary.infinite_loops_detected // 0' "$RESULTS_DIR/reports/loop-analysis.json")
|
||||
local total_directories=$(jq -r '.summary.total_directories_scanned // 0' "$RESULTS_DIR/reports/loop-analysis.json")
|
||||
local total_errors=$(jq -r '.summary.total_errors // 0' "$RESULTS_DIR/reports/loop-analysis.json")
|
||||
|
||||
cat >> "$GITHUB_STEP_SUMMARY" << EOF
|
||||
- **Health Score**: $health_score/100
|
||||
- **Directories Scanned**: $total_directories
|
||||
- **Infinite Loops Detected**: $infinite_loops
|
||||
- **Total Errors**: $total_errors
|
||||
|
||||
## Recommendations
|
||||
EOF
|
||||
|
||||
if [ -f "$RESULTS_DIR/reports/loop-analysis.json" ]; then
|
||||
jq -r '.recommendations[]?' "$RESULTS_DIR/reports/loop-analysis.json" | while read -r rec; do
|
||||
echo "- $rec" >> "$GITHUB_STEP_SUMMARY"
|
||||
done
|
||||
fi
|
||||
else
|
||||
echo "- Analysis data not available" >> "$GITHUB_STEP_SUMMARY"
|
||||
fi
|
||||
|
||||
cat >> "$GITHUB_STEP_SUMMARY" << EOF
|
||||
|
||||
## Artifacts
|
||||
- Test logs: Available in workflow artifacts
|
||||
- Analysis reports: Available in workflow artifacts
|
||||
EOF
|
||||
}
|
||||
|
||||
# Cleanup function
|
||||
cleanup() {
|
||||
log "Cleaning up stress test environment..."
|
||||
|
||||
# Kill any remaining processes
|
||||
if [ -f "$RESULTS_DIR/readur.pid" ]; then
|
||||
local readur_pid=$(cat "$RESULTS_DIR/readur.pid")
|
||||
kill "$readur_pid" 2>/dev/null || true
|
||||
rm -f "$RESULTS_DIR/readur.pid"
|
||||
fi
|
||||
|
||||
# Create final artifact archive
|
||||
if command -v tar > /dev/null; then
|
||||
tar -czf "$RESULTS_DIR/stress-test-artifacts.tar.gz" -C "$RESULTS_DIR" . 2>/dev/null || true
|
||||
log "Artifacts archived to: $RESULTS_DIR/stress-test-artifacts.tar.gz"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
local exit_code=0
|
||||
|
||||
log "=== WebDAV Stress Test Orchestrator Starting ==="
|
||||
log "Configuration:"
|
||||
log " - Stress Level: $STRESS_LEVEL"
|
||||
log " - Test Duration: ${TEST_DURATION}s"
|
||||
log " - WebDAV Server: $WEBDAV_DUFS_URL"
|
||||
log " - Results Directory: $RESULTS_DIR"
|
||||
|
||||
# Set up trap for cleanup
|
||||
trap cleanup EXIT
|
||||
|
||||
# Execute test phases
|
||||
setup_test_environment || exit_code=$?
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
generate_webdav_test_data || exit_code=$?
|
||||
fi
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
run_stress_tests || exit_code=$?
|
||||
fi
|
||||
|
||||
# Always run analysis, even if tests failed
|
||||
analyze_results
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
log "=== WebDAV Stress Tests PASSED ==="
|
||||
else
|
||||
log "=== WebDAV Stress Tests FAILED (exit code: $exit_code) ==="
|
||||
fi
|
||||
|
||||
return $exit_code
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
|
|
@ -0,0 +1,394 @@
|
|||
/*!
|
||||
* WebDAV Performance Analysis Tool
|
||||
*
|
||||
* Analyzes stress test metrics and generates comprehensive reports for CI/CD pipeline
|
||||
*/
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use clap::{Arg, Command};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct LoopDetectionStatistics {
|
||||
total_directories_monitored: usize,
|
||||
total_directory_accesses: usize,
|
||||
suspected_loop_count: usize,
|
||||
max_accesses_per_directory: usize,
|
||||
average_accesses_per_directory: f64,
|
||||
suspected_directories: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct WebDAVPerformanceMetrics {
|
||||
total_operations: usize,
|
||||
successful_operations: usize,
|
||||
failed_operations: usize,
|
||||
average_operation_duration_ms: f64,
|
||||
max_operation_duration_ms: u64,
|
||||
min_operation_duration_ms: u64,
|
||||
timeout_count: usize,
|
||||
error_patterns: std::collections::HashMap<String, usize>,
|
||||
loop_detection_stats: LoopDetectionStatistics,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct StressTestReport {
|
||||
test_suite_version: String,
|
||||
test_timestamp: chrono::DateTime<chrono::Utc>,
|
||||
overall_result: String,
|
||||
test_summary: TestSummary,
|
||||
recommendations: Vec<String>,
|
||||
performance_metrics: Option<WebDAVPerformanceMetrics>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct TestSummary {
|
||||
total_tests: usize,
|
||||
passed_tests: usize,
|
||||
failed_tests: usize,
|
||||
skipped_tests: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PerformanceAnalysis {
|
||||
overall_health: HealthStatus,
|
||||
critical_issues: Vec<String>,
|
||||
warnings: Vec<String>,
|
||||
recommendations: Vec<String>,
|
||||
metrics_summary: MetricsSummary,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum HealthStatus {
|
||||
Healthy,
|
||||
Warning,
|
||||
Critical,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MetricsSummary {
|
||||
success_rate: f64,
|
||||
average_response_time: f64,
|
||||
max_response_time: u64,
|
||||
timeout_rate: f64,
|
||||
loop_detection_triggered: bool,
|
||||
total_operations: usize,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let matches = Command::new("WebDAV Performance Analyzer")
|
||||
.version("1.0.0")
|
||||
.about("Analyzes WebDAV stress test metrics and generates reports")
|
||||
.arg(
|
||||
Arg::new("metrics-file")
|
||||
.long("metrics-file")
|
||||
.value_name("FILE")
|
||||
.help("Path to the stress test metrics JSON file")
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("output-format")
|
||||
.long("output-format")
|
||||
.value_name("FORMAT")
|
||||
.help("Output format: json, markdown, github-summary")
|
||||
.default_value("markdown"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("output-file")
|
||||
.long("output-file")
|
||||
.value_name("FILE")
|
||||
.help("Output file path (stdout if not specified)"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let metrics_file = matches.get_one::<String>("metrics-file").unwrap();
|
||||
let output_format = matches.get_one::<String>("output-format").unwrap();
|
||||
let output_file = matches.get_one::<String>("output-file");
|
||||
|
||||
// Load and parse metrics file
|
||||
let report = load_stress_test_report(metrics_file)?;
|
||||
|
||||
// Analyze performance metrics
|
||||
let analysis = analyze_performance(&report)?;
|
||||
|
||||
// Generate output based on format
|
||||
let output_content = match output_format.as_str() {
|
||||
"json" => generate_json_report(&analysis)?,
|
||||
"markdown" => generate_markdown_report(&analysis, &report)?,
|
||||
"github-summary" => generate_github_summary(&analysis, &report)?,
|
||||
_ => return Err(anyhow!("Unsupported output format: {}", output_format)),
|
||||
};
|
||||
|
||||
// Write output
|
||||
if let Some(output_path) = output_file {
|
||||
fs::write(output_path, &output_content)?;
|
||||
println!("Report written to: {}", output_path);
|
||||
} else {
|
||||
println!("{}", output_content);
|
||||
}
|
||||
|
||||
// Exit with appropriate code
|
||||
match analysis.overall_health {
|
||||
HealthStatus::Critical => std::process::exit(1),
|
||||
HealthStatus::Warning => std::process::exit(0), // Still success, but with warnings
|
||||
HealthStatus::Healthy => std::process::exit(0),
|
||||
HealthStatus::Unknown => std::process::exit(2),
|
||||
}
|
||||
}
|
||||
|
||||
fn load_stress_test_report(file_path: &str) -> Result<StressTestReport> {
|
||||
if !Path::new(file_path).exists() {
|
||||
return Err(anyhow!("Metrics file not found: {}", file_path));
|
||||
}
|
||||
|
||||
let content = fs::read_to_string(file_path)?;
|
||||
let report: StressTestReport = serde_json::from_str(&content)
|
||||
.map_err(|e| anyhow!("Failed to parse metrics file: {}", e))?;
|
||||
|
||||
Ok(report)
|
||||
}
|
||||
|
||||
fn analyze_performance(report: &StressTestReport) -> Result<PerformanceAnalysis> {
|
||||
let mut critical_issues = Vec::new();
|
||||
let mut warnings = Vec::new();
|
||||
let mut recommendations = Vec::new();
|
||||
|
||||
let metrics_summary = if let Some(metrics) = &report.performance_metrics {
|
||||
let success_rate = if metrics.total_operations > 0 {
|
||||
(metrics.successful_operations as f64 / metrics.total_operations as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let timeout_rate = if metrics.total_operations > 0 {
|
||||
(metrics.timeout_count as f64 / metrics.total_operations as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
// Analyze critical issues
|
||||
if success_rate < 50.0 {
|
||||
critical_issues.push(format!(
|
||||
"Critical: Very low success rate ({:.1}%) - indicates severe WebDAV connectivity issues",
|
||||
success_rate
|
||||
));
|
||||
} else if success_rate < 80.0 {
|
||||
warnings.push(format!(
|
||||
"Warning: Low success rate ({:.1}%) - investigate WebDAV server performance",
|
||||
success_rate
|
||||
));
|
||||
}
|
||||
|
||||
if metrics.loop_detection_stats.suspected_loop_count > 0 {
|
||||
critical_issues.push(format!(
|
||||
"Critical: {} suspected infinite loops detected - immediate investigation required",
|
||||
metrics.loop_detection_stats.suspected_loop_count
|
||||
));
|
||||
|
||||
for dir in &metrics.loop_detection_stats.suspected_directories {
|
||||
critical_issues.push(format!(" - Suspected loop in directory: {}", dir));
|
||||
}
|
||||
}
|
||||
|
||||
if timeout_rate > 20.0 {
|
||||
critical_issues.push(format!(
|
||||
"Critical: High timeout rate ({:.1}%) - server may be overloaded or unresponsive",
|
||||
timeout_rate
|
||||
));
|
||||
} else if timeout_rate > 10.0 {
|
||||
warnings.push(format!(
|
||||
"Warning: Elevated timeout rate ({:.1}%) - monitor server performance",
|
||||
timeout_rate
|
||||
));
|
||||
}
|
||||
|
||||
if metrics.average_operation_duration_ms > 5000.0 {
|
||||
warnings.push(format!(
|
||||
"Warning: Slow average response time ({:.1}ms) - consider server optimization",
|
||||
metrics.average_operation_duration_ms
|
||||
));
|
||||
}
|
||||
|
||||
// Generate recommendations
|
||||
if success_rate < 90.0 {
|
||||
recommendations.push("Consider increasing retry configuration for WebDAV operations".to_string());
|
||||
}
|
||||
|
||||
if timeout_rate > 5.0 {
|
||||
recommendations.push("Review WebDAV server timeout configuration and network stability".to_string());
|
||||
}
|
||||
|
||||
if metrics.loop_detection_stats.suspected_loop_count > 0 {
|
||||
recommendations.push("Implement additional safeguards against directory loop patterns".to_string());
|
||||
recommendations.push("Review symlink handling and directory structure validation".to_string());
|
||||
}
|
||||
|
||||
if metrics.average_operation_duration_ms > 2000.0 {
|
||||
recommendations.push("Consider implementing caching strategies for frequently accessed directories".to_string());
|
||||
}
|
||||
|
||||
MetricsSummary {
|
||||
success_rate,
|
||||
average_response_time: metrics.average_operation_duration_ms,
|
||||
max_response_time: metrics.max_operation_duration_ms,
|
||||
timeout_rate,
|
||||
loop_detection_triggered: metrics.loop_detection_stats.suspected_loop_count > 0,
|
||||
total_operations: metrics.total_operations,
|
||||
}
|
||||
} else {
|
||||
warnings.push("Warning: No performance metrics available in the report".to_string());
|
||||
MetricsSummary {
|
||||
success_rate: 0.0,
|
||||
average_response_time: 0.0,
|
||||
max_response_time: 0,
|
||||
timeout_rate: 0.0,
|
||||
loop_detection_triggered: false,
|
||||
total_operations: 0,
|
||||
}
|
||||
};
|
||||
|
||||
// Determine overall health
|
||||
let overall_health = if !critical_issues.is_empty() {
|
||||
HealthStatus::Critical
|
||||
} else if !warnings.is_empty() {
|
||||
HealthStatus::Warning
|
||||
} else if metrics_summary.total_operations > 0 {
|
||||
HealthStatus::Healthy
|
||||
} else {
|
||||
HealthStatus::Unknown
|
||||
};
|
||||
|
||||
Ok(PerformanceAnalysis {
|
||||
overall_health,
|
||||
critical_issues,
|
||||
warnings,
|
||||
recommendations,
|
||||
metrics_summary,
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_json_report(analysis: &PerformanceAnalysis) -> Result<String> {
|
||||
let json_report = serde_json::json!({
|
||||
"overall_health": format!("{:?}", analysis.overall_health),
|
||||
"critical_issues": analysis.critical_issues,
|
||||
"warnings": analysis.warnings,
|
||||
"recommendations": analysis.recommendations,
|
||||
"metrics_summary": {
|
||||
"success_rate": analysis.metrics_summary.success_rate,
|
||||
"average_response_time_ms": analysis.metrics_summary.average_response_time,
|
||||
"max_response_time_ms": analysis.metrics_summary.max_response_time,
|
||||
"timeout_rate": analysis.metrics_summary.timeout_rate,
|
||||
"loop_detection_triggered": analysis.metrics_summary.loop_detection_triggered,
|
||||
"total_operations": analysis.metrics_summary.total_operations,
|
||||
}
|
||||
});
|
||||
|
||||
Ok(serde_json::to_string_pretty(&json_report)?)
|
||||
}
|
||||
|
||||
fn generate_markdown_report(analysis: &PerformanceAnalysis, report: &StressTestReport) -> Result<String> {
|
||||
let mut markdown = String::new();
|
||||
|
||||
markdown.push_str("# WebDAV Performance Analysis Report\n\n");
|
||||
|
||||
// Overall status
|
||||
markdown.push_str(&format!("**Overall Health Status:** {:?}\n\n", analysis.overall_health));
|
||||
markdown.push_str(&format!("**Generated:** {}\n\n", chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC")));
|
||||
|
||||
// Test summary
|
||||
markdown.push_str("## Test Summary\n\n");
|
||||
markdown.push_str(&format!("- **Total Tests:** {}\n", report.test_summary.total_tests));
|
||||
markdown.push_str(&format!("- **Passed:** {}\n", report.test_summary.passed_tests));
|
||||
markdown.push_str(&format!("- **Failed:** {}\n", report.test_summary.failed_tests));
|
||||
markdown.push_str(&format!("- **Skipped:** {}\n\n", report.test_summary.skipped_tests));
|
||||
|
||||
// Performance metrics
|
||||
markdown.push_str("## Performance Metrics\n\n");
|
||||
markdown.push_str(&format!("- **Success Rate:** {:.1}%\n", analysis.metrics_summary.success_rate));
|
||||
markdown.push_str(&format!("- **Average Response Time:** {:.1}ms\n", analysis.metrics_summary.average_response_time));
|
||||
markdown.push_str(&format!("- **Max Response Time:** {}ms\n", analysis.metrics_summary.max_response_time));
|
||||
markdown.push_str(&format!("- **Timeout Rate:** {:.1}%\n", analysis.metrics_summary.timeout_rate));
|
||||
markdown.push_str(&format!("- **Total Operations:** {}\n", analysis.metrics_summary.total_operations));
|
||||
markdown.push_str(&format!("- **Loop Detection Triggered:** {}\n\n", analysis.metrics_summary.loop_detection_triggered));
|
||||
|
||||
// Critical issues
|
||||
if !analysis.critical_issues.is_empty() {
|
||||
markdown.push_str("## 🚨 Critical Issues\n\n");
|
||||
for issue in &analysis.critical_issues {
|
||||
markdown.push_str(&format!("- {}\n", issue));
|
||||
}
|
||||
markdown.push_str("\n");
|
||||
}
|
||||
|
||||
// Warnings
|
||||
if !analysis.warnings.is_empty() {
|
||||
markdown.push_str("## ⚠️ Warnings\n\n");
|
||||
for warning in &analysis.warnings {
|
||||
markdown.push_str(&format!("- {}\n", warning));
|
||||
}
|
||||
markdown.push_str("\n");
|
||||
}
|
||||
|
||||
// Recommendations
|
||||
if !analysis.recommendations.is_empty() {
|
||||
markdown.push_str("## 💡 Recommendations\n\n");
|
||||
for recommendation in &analysis.recommendations {
|
||||
markdown.push_str(&format!("- {}\n", recommendation));
|
||||
}
|
||||
markdown.push_str("\n");
|
||||
}
|
||||
|
||||
// Write to file for GitHub Actions
|
||||
fs::write("webdav-performance-report.md", &markdown)?;
|
||||
|
||||
Ok(markdown)
|
||||
}
|
||||
|
||||
fn generate_github_summary(analysis: &PerformanceAnalysis, report: &StressTestReport) -> Result<String> {
|
||||
let mut summary = String::new();
|
||||
|
||||
// Status icon based on health
|
||||
let status_icon = match analysis.overall_health {
|
||||
HealthStatus::Healthy => "✅",
|
||||
HealthStatus::Warning => "⚠️",
|
||||
HealthStatus::Critical => "🚨",
|
||||
HealthStatus::Unknown => "❓",
|
||||
};
|
||||
|
||||
summary.push_str(&format!("{} **WebDAV Stress Test Results**\n\n", status_icon));
|
||||
|
||||
// Quick stats table
|
||||
summary.push_str("| Metric | Value |\n");
|
||||
summary.push_str("|--------|-------|\n");
|
||||
summary.push_str(&format!("| Success Rate | {:.1}% |\n", analysis.metrics_summary.success_rate));
|
||||
summary.push_str(&format!("| Total Operations | {} |\n", analysis.metrics_summary.total_operations));
|
||||
summary.push_str(&format!("| Avg Response Time | {:.1}ms |\n", analysis.metrics_summary.average_response_time));
|
||||
summary.push_str(&format!("| Timeout Rate | {:.1}% |\n", analysis.metrics_summary.timeout_rate));
|
||||
summary.push_str(&format!("| Loop Detection | {} |\n", if analysis.metrics_summary.loop_detection_triggered { "⚠️ TRIGGERED" } else { "✅ OK" }));
|
||||
summary.push_str("\n");
|
||||
|
||||
// Critical issues (collapsed section)
|
||||
if !analysis.critical_issues.is_empty() {
|
||||
summary.push_str("<details>\n");
|
||||
summary.push_str("<summary>🚨 Critical Issues</summary>\n\n");
|
||||
for issue in &analysis.critical_issues {
|
||||
summary.push_str(&format!("- {}\n", issue));
|
||||
}
|
||||
summary.push_str("\n</details>\n\n");
|
||||
}
|
||||
|
||||
// Warnings (collapsed section)
|
||||
if !analysis.warnings.is_empty() {
|
||||
summary.push_str("<details>\n");
|
||||
summary.push_str("<summary>⚠️ Warnings</summary>\n\n");
|
||||
for warning in &analysis.warnings {
|
||||
summary.push_str(&format!("- {}\n", warning));
|
||||
}
|
||||
summary.push_str("\n</details>\n\n");
|
||||
}
|
||||
|
||||
Ok(summary)
|
||||
}
|
||||
|
|
@ -95,6 +95,7 @@ async fn estimate_webdav_crawl_internal(
|
|||
file_extensions: config.file_extensions.clone(),
|
||||
timeout_seconds: 300,
|
||||
server_type: config.server_type.clone(),
|
||||
loop_detection: crate::services::webdav::loop_detection::LoopDetectionConfig::default(),
|
||||
};
|
||||
|
||||
// Create WebDAV service and estimate crawl
|
||||
|
|
|
|||
|
|
@ -240,6 +240,7 @@ pub async fn trigger_deep_scan(
|
|||
file_extensions: config.file_extensions.clone(),
|
||||
timeout_seconds: 600, // 10 minutes for deep scan
|
||||
server_type: config.server_type.clone(),
|
||||
loop_detection: crate::services::webdav::LoopDetectionConfig::default(),
|
||||
};
|
||||
|
||||
let webdav_service = crate::services::webdav::WebDAVService::new(webdav_config.clone())
|
||||
|
|
|
|||
|
|
@ -72,6 +72,7 @@ async fn get_user_webdav_config(state: &Arc<AppState>, user_id: uuid::Uuid) -> R
|
|||
file_extensions: settings.webdav_file_extensions,
|
||||
timeout_seconds: 300, // 5 minutes timeout for crawl estimation
|
||||
server_type: Some("nextcloud".to_string()), // Default to Nextcloud
|
||||
loop_detection: crate::services::webdav::loop_detection::LoopDetectionConfig::default(),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -107,6 +108,7 @@ async fn test_webdav_connection(
|
|||
file_extensions: Vec::new(),
|
||||
timeout_seconds: 300, // 5 minutes timeout for crawl estimation
|
||||
server_type: test_config.server_type.clone(),
|
||||
loop_detection: crate::services::webdav::loop_detection::LoopDetectionConfig::default(),
|
||||
};
|
||||
|
||||
// Create WebDAV service and test connection
|
||||
|
|
|
|||
|
|
@ -745,6 +745,7 @@ impl SourceScheduler {
|
|||
file_extensions: webdav_config.file_extensions.clone(),
|
||||
timeout_seconds: 600, // 10 minutes for deep scan
|
||||
server_type: webdav_config.server_type.clone(),
|
||||
loop_detection: crate::services::webdav::loop_detection::LoopDetectionConfig::default(),
|
||||
}
|
||||
)?;
|
||||
|
||||
|
|
@ -1032,6 +1033,7 @@ impl SourceScheduler {
|
|||
file_extensions: config.file_extensions.clone(),
|
||||
timeout_seconds: 30, // Quick connectivity test
|
||||
server_type: config.server_type.clone(),
|
||||
loop_detection: crate::services::webdav::loop_detection::LoopDetectionConfig::default(),
|
||||
};
|
||||
|
||||
let webdav_service = crate::services::webdav::WebDAVService::new(webdav_config)
|
||||
|
|
|
|||
|
|
@ -109,6 +109,7 @@ impl SourceSyncService {
|
|||
file_extensions: config.file_extensions,
|
||||
timeout_seconds: 180, // 3 minutes for discover_files_in_folder operations
|
||||
server_type: config.server_type,
|
||||
loop_detection: crate::services::webdav::loop_detection::LoopDetectionConfig::default(),
|
||||
};
|
||||
|
||||
let webdav_service = WebDAVService::new(webdav_config.clone())
|
||||
|
|
|
|||
|
|
@ -275,6 +275,7 @@ impl WebDAVScheduler {
|
|||
file_extensions: settings.webdav_file_extensions.clone(),
|
||||
timeout_seconds: 30,
|
||||
server_type: Some("nextcloud".to_string()),
|
||||
loop_detection: crate::services::webdav::loop_detection::LoopDetectionConfig::default(),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
|
||||
use super::loop_detection::LoopDetectionConfig;
|
||||
|
||||
/// WebDAV server configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WebDAVConfig {
|
||||
|
|
@ -9,6 +11,7 @@ pub struct WebDAVConfig {
|
|||
pub file_extensions: Vec<String>,
|
||||
pub timeout_seconds: u64,
|
||||
pub server_type: Option<String>, // "nextcloud", "owncloud", "generic"
|
||||
pub loop_detection: LoopDetectionConfig,
|
||||
}
|
||||
|
||||
/// Retry configuration for WebDAV operations
|
||||
|
|
@ -100,6 +103,7 @@ impl WebDAVConfig {
|
|||
file_extensions,
|
||||
timeout_seconds: 30,
|
||||
server_type: None,
|
||||
loop_detection: LoopDetectionConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,323 @@
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::super::*;
|
||||
use super::super::loop_detection::{LoopDetectionService, LoopDetectionConfig, LoopType};
|
||||
use crate::{AppState, config::Config};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Helper to create a test WebDAV service with loop detection enabled
|
||||
async fn create_test_webdav_service_with_loop_detection() -> WebDAVService {
|
||||
let mut config = WebDAVConfig::new(
|
||||
"http://localhost:8080".to_string(),
|
||||
"test_user".to_string(),
|
||||
"test_pass".to_string(),
|
||||
vec!["/test".to_string()],
|
||||
vec!["pdf".to_string(), "txt".to_string()],
|
||||
);
|
||||
|
||||
// Configure loop detection with tight thresholds for testing
|
||||
config.loop_detection = LoopDetectionConfig {
|
||||
enabled: true,
|
||||
max_access_count: 2, // Very low for testing
|
||||
time_window_secs: 10, // Short window
|
||||
max_scan_duration_secs: 5, // Short timeout
|
||||
min_scan_interval_secs: 1, // Short interval
|
||||
max_pattern_depth: 5,
|
||||
max_tracked_directories: 100,
|
||||
enable_pattern_analysis: true,
|
||||
log_level: "debug".to_string(),
|
||||
};
|
||||
|
||||
WebDAVService::new(config).expect("Failed to create WebDAV service")
|
||||
}
|
||||
|
||||
/// Helper to create a mock WebDAV server response for testing
|
||||
fn create_mock_webdav_response(num_files: usize, num_dirs: usize) -> WebDAVDiscoveryResult {
|
||||
let mut files = Vec::new();
|
||||
let mut directories = Vec::new();
|
||||
|
||||
for i in 0..num_files {
|
||||
files.push(crate::models::FileIngestionInfo {
|
||||
uuid: Uuid::new_v4(),
|
||||
filename: format!("file_{}.pdf", i),
|
||||
relative_path: format!("/test/file_{}.pdf", i),
|
||||
absolute_url: format!("http://localhost:8080/test/file_{}.pdf", i),
|
||||
file_size_bytes: 1024 * (i + 1) as i64,
|
||||
last_modified: chrono::Utc::now(),
|
||||
etag: format!("etag_{}", i),
|
||||
content_type: "application/pdf".to_string(),
|
||||
is_directory: false,
|
||||
});
|
||||
}
|
||||
|
||||
for i in 0..num_dirs {
|
||||
directories.push(crate::models::FileIngestionInfo {
|
||||
uuid: Uuid::new_v4(),
|
||||
filename: format!("dir_{}", i),
|
||||
relative_path: format!("/test/dir_{}", i),
|
||||
absolute_url: format!("http://localhost:8080/test/dir_{}/", i),
|
||||
file_size_bytes: 0,
|
||||
last_modified: chrono::Utc::now(),
|
||||
etag: format!("dir_etag_{}", i),
|
||||
content_type: "httpd/unix-directory".to_string(),
|
||||
is_directory: true,
|
||||
});
|
||||
}
|
||||
|
||||
WebDAVDiscoveryResult { files, directories }
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_loop_detection_immediate_rescan() {
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
// First access should succeed
|
||||
let access1 = service.loop_detector.start_access("/test/path", "test_scan").unwrap();
|
||||
service.loop_detector.complete_access(access1, Some(5), Some(2), None).unwrap();
|
||||
|
||||
// Immediate second access should fail due to min_scan_interval
|
||||
let result = service.loop_detector.start_access("/test/path", "test_scan");
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("re-accessed after only"));
|
||||
|
||||
// Metrics should show the loop detection
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert_eq!(metrics["total_loops_detected"], 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_loop_detection_concurrent_access() {
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
// Start first access
|
||||
let _access1 = service.loop_detector.start_access("/test/path", "scan1").unwrap();
|
||||
|
||||
// Concurrent access should fail
|
||||
let result = service.loop_detector.start_access("/test/path", "scan2");
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("Concurrent access detected"));
|
||||
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert_eq!(metrics["total_loops_detected"], 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_loop_detection_frequency_limit() {
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
// Clear state to start fresh
|
||||
service.clear_loop_detection_state().unwrap();
|
||||
|
||||
// Do multiple accesses that complete quickly
|
||||
for i in 0..3 {
|
||||
if i > 0 {
|
||||
// Wait minimum interval to avoid immediate re-scan detection
|
||||
sleep(Duration::from_millis(1100)).await;
|
||||
}
|
||||
|
||||
let access = service.loop_detector.start_access("/test/freq_path", &format!("scan_{}", i));
|
||||
|
||||
if i < 2 {
|
||||
// First two should succeed
|
||||
assert!(access.is_ok());
|
||||
let access_id = access.unwrap();
|
||||
service.loop_detector.complete_access(access_id, Some(i * 2), Some(i), None).unwrap();
|
||||
} else {
|
||||
// Third should fail due to frequency limit
|
||||
assert!(access.is_err());
|
||||
assert!(access.unwrap_err().to_string().contains("accessed 2 times"));
|
||||
}
|
||||
}
|
||||
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert_eq!(metrics["total_loops_detected"], 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_loop_detection_disabled() {
|
||||
let mut config = WebDAVConfig::new(
|
||||
"http://localhost:8080".to_string(),
|
||||
"test_user".to_string(),
|
||||
"test_pass".to_string(),
|
||||
vec!["/test".to_string()],
|
||||
vec!["pdf".to_string()],
|
||||
);
|
||||
|
||||
// Disable loop detection
|
||||
config.loop_detection.enabled = false;
|
||||
|
||||
let service = WebDAVService::new(config).unwrap();
|
||||
|
||||
// Multiple rapid accesses should all succeed when disabled
|
||||
for i in 0..5 {
|
||||
let access = service.loop_detector.start_access("/test/path", &format!("scan_{}", i)).unwrap();
|
||||
service.loop_detector.complete_access(access, Some(i), Some(1), None).unwrap();
|
||||
}
|
||||
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert!(!metrics["enabled"].as_bool().unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_loop_detection_error_tracking() {
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
// Test error tracking in loop detection
|
||||
let access = service.loop_detector.start_access("/test/error_path", "error_scan").unwrap();
|
||||
service.loop_detector.complete_access(
|
||||
access,
|
||||
None,
|
||||
None,
|
||||
Some("Test error message".to_string())
|
||||
).unwrap();
|
||||
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert_eq!(metrics["total_accesses"], 1);
|
||||
assert_eq!(metrics["total_loops_detected"], 0); // No loops, just an error
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_loop_detection_cleanup() {
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
// Add some access data
|
||||
for i in 0..3 {
|
||||
let access = service.loop_detector.start_access(&format!("/test/cleanup_{}", i), "cleanup_scan").unwrap();
|
||||
service.loop_detector.complete_access(access, Some(i), Some(1), None).unwrap();
|
||||
sleep(Duration::from_millis(100)).await; // Small delay between accesses
|
||||
}
|
||||
|
||||
let metrics_before = service.get_loop_detection_metrics().unwrap();
|
||||
assert_eq!(metrics_before["total_accesses"], 3);
|
||||
|
||||
// Clear state
|
||||
service.clear_loop_detection_state().unwrap();
|
||||
|
||||
let metrics_after = service.get_loop_detection_metrics().unwrap();
|
||||
assert_eq!(metrics_after["active_accesses"], 0);
|
||||
assert_eq!(metrics_after["history_size"], 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_loop_detection_config_update() {
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
// Update configuration
|
||||
let mut new_config = LoopDetectionConfig::default();
|
||||
new_config.max_access_count = 10; // Much higher limit
|
||||
new_config.log_level = "info".to_string();
|
||||
|
||||
service.update_loop_detection_config(new_config).unwrap();
|
||||
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert_eq!(metrics["config"]["max_access_count"], 10);
|
||||
assert_eq!(metrics["config"]["log_level"], "info");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pattern_analysis_circular_detection() {
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
service.clear_loop_detection_state().unwrap();
|
||||
|
||||
// Simulate A -> B -> A pattern with proper timing
|
||||
let paths = ["/test/path_a", "/test/path_b", "/test/path_a"];
|
||||
|
||||
for (i, path) in paths.iter().enumerate() {
|
||||
if i > 0 {
|
||||
sleep(Duration::from_millis(1100)).await; // Wait minimum interval
|
||||
}
|
||||
|
||||
let access = service.loop_detector.start_access(path, &format!("pattern_scan_{}", i));
|
||||
|
||||
if i < 2 {
|
||||
// First two should succeed
|
||||
assert!(access.is_ok());
|
||||
let access_id = access.unwrap();
|
||||
service.loop_detector.complete_access(access_id, Some(1), Some(0), None).unwrap();
|
||||
} else {
|
||||
// Third access to path_a might trigger pattern detection
|
||||
// Note: The exact behavior depends on the pattern detection algorithm
|
||||
if let Err(e) = access {
|
||||
println!("Pattern detection triggered: {}", e);
|
||||
} else {
|
||||
let access_id = access.unwrap();
|
||||
service.loop_detector.complete_access(access_id, Some(1), Some(0), None).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
println!("Pattern analysis metrics: {}", serde_json::to_string_pretty(&metrics).unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_webdav_service_integration_with_loop_detection() {
|
||||
// This test would ideally connect to a real WebDAV server
|
||||
// For now, we test the integration points
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
// Test that the service has loop detection enabled
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert!(metrics["enabled"].as_bool().unwrap());
|
||||
|
||||
// Test configuration access
|
||||
assert_eq!(metrics["config"]["max_access_count"], 2);
|
||||
assert_eq!(metrics["config"]["time_window_secs"], 10);
|
||||
|
||||
// Test that we can update the config
|
||||
let mut new_config = LoopDetectionConfig::default();
|
||||
new_config.enabled = false;
|
||||
service.update_loop_detection_config(new_config).unwrap();
|
||||
|
||||
let updated_metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert!(!updated_metrics["enabled"].as_bool().unwrap());
|
||||
}
|
||||
|
||||
/// Integration test with SmartSyncService
|
||||
#[tokio::test]
|
||||
async fn test_smart_sync_loop_detection_integration() {
|
||||
// Create test app state
|
||||
let test_config = Config::test_default();
|
||||
let app_state = Arc::new(AppState::new_for_testing(test_config).await.unwrap());
|
||||
|
||||
let smart_sync = SmartSyncService::new(app_state);
|
||||
let webdav_service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
// Test that SmartSyncService can access loop detection metrics
|
||||
let metrics = smart_sync.get_loop_detection_metrics(&webdav_service).unwrap();
|
||||
assert!(metrics["enabled"].as_bool().unwrap());
|
||||
|
||||
// Test that metrics are properly structured
|
||||
assert!(metrics.get("total_accesses").is_some());
|
||||
assert!(metrics.get("total_loops_detected").is_some());
|
||||
assert!(metrics.get("config").is_some());
|
||||
}
|
||||
|
||||
/// Performance test to ensure loop detection doesn't significantly impact performance
|
||||
#[tokio::test]
|
||||
async fn test_loop_detection_performance() {
|
||||
let service = create_test_webdav_service_with_loop_detection().await;
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
// Perform many operations with different paths to avoid triggering detection
|
||||
for i in 0..100 {
|
||||
let path = format!("/test/perf_path_{}", i);
|
||||
let access = service.loop_detector.start_access(&path, "perf_test").unwrap();
|
||||
service.loop_detector.complete_access(access, Some(10), Some(2), None).unwrap();
|
||||
}
|
||||
|
||||
let elapsed = start_time.elapsed();
|
||||
println!("100 loop detection operations took: {:?}", elapsed);
|
||||
|
||||
// Should complete quickly (within 1 second for 100 operations)
|
||||
assert!(elapsed < Duration::from_secs(1), "Loop detection performance too slow: {:?}", elapsed);
|
||||
|
||||
let metrics = service.get_loop_detection_metrics().unwrap();
|
||||
assert_eq!(metrics["total_accesses"], 100);
|
||||
assert_eq!(metrics["total_loops_detected"], 0);
|
||||
}
|
||||
}
|
||||
|
|
@ -5,6 +5,7 @@ pub mod config;
|
|||
pub mod service;
|
||||
pub mod smart_sync;
|
||||
pub mod progress_shim; // Backward compatibility shim for simplified progress tracking
|
||||
pub mod loop_detection; // Loop detection and monitoring for sync operations
|
||||
|
||||
// Re-export main types for convenience
|
||||
pub use common::build_user_agent;
|
||||
|
|
@ -15,6 +16,7 @@ pub use service::{
|
|||
ValidationRecommendation, ValidationAction, ValidationSummary
|
||||
};
|
||||
pub use smart_sync::{SmartSyncService, SmartSyncDecision, SmartSyncStrategy, SmartSyncResult};
|
||||
pub use loop_detection::{LoopDetectionService, LoopDetectionConfig, LoopDetectionResult, LoopType};
|
||||
|
||||
// Backward compatibility exports for progress tracking (simplified)
|
||||
pub use progress_shim::{SyncProgress, SyncPhase, ProgressStats};
|
||||
|
|
@ -27,4 +29,6 @@ mod subdirectory_edge_cases_tests;
|
|||
#[cfg(test)]
|
||||
mod protocol_detection_tests;
|
||||
#[cfg(test)]
|
||||
mod loop_detection_integration_tests;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
|
@ -23,6 +23,7 @@ use crate::mime_detection::{detect_mime_from_content, MimeDetectionResult};
|
|||
|
||||
use super::{config::{WebDAVConfig, RetryConfig, ConcurrencyConfig}, SyncProgress};
|
||||
use super::common::build_user_agent;
|
||||
use super::loop_detection::LoopDetectionService;
|
||||
|
||||
/// Results from WebDAV discovery including both files and directories
|
||||
#[derive(Debug, Clone)]
|
||||
|
|
@ -153,6 +154,8 @@ pub struct WebDAVService {
|
|||
download_semaphore: Arc<Semaphore>,
|
||||
/// Stores the working protocol (updated after successful protocol detection)
|
||||
working_protocol: Arc<std::sync::RwLock<Option<String>>>,
|
||||
/// Loop detection service for monitoring sync patterns
|
||||
loop_detector: LoopDetectionService,
|
||||
}
|
||||
|
||||
impl WebDAVService {
|
||||
|
|
@ -184,6 +187,9 @@ impl WebDAVService {
|
|||
let scan_semaphore = Arc::new(Semaphore::new(concurrency_config.max_concurrent_scans));
|
||||
let download_semaphore = Arc::new(Semaphore::new(concurrency_config.max_concurrent_downloads));
|
||||
|
||||
// Create loop detector with config from WebDAV config
|
||||
let loop_detector = LoopDetectionService::with_config(config.loop_detection.clone());
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
config,
|
||||
|
|
@ -192,9 +198,25 @@ impl WebDAVService {
|
|||
scan_semaphore,
|
||||
download_semaphore,
|
||||
working_protocol: Arc::new(std::sync::RwLock::new(None)),
|
||||
loop_detector,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get loop detection metrics and status
|
||||
pub async fn get_loop_detection_metrics(&self) -> Result<serde_json::Value> {
|
||||
self.loop_detector.get_metrics().await
|
||||
}
|
||||
|
||||
/// Update loop detection configuration
|
||||
pub async fn update_loop_detection_config(&self, config: super::loop_detection::LoopDetectionConfig) -> Result<()> {
|
||||
self.loop_detector.update_config(config).await
|
||||
}
|
||||
|
||||
/// Clear loop detection state (useful for testing)
|
||||
pub async fn clear_loop_detection_state(&self) -> Result<()> {
|
||||
self.loop_detector.clear_state().await
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Protocol Detection Methods
|
||||
// ============================================================================
|
||||
|
|
@ -282,6 +304,7 @@ impl WebDAVService {
|
|||
file_extensions: self.config.file_extensions.clone(),
|
||||
timeout_seconds: self.config.timeout_seconds,
|
||||
server_type: self.config.server_type.clone(),
|
||||
loop_detection: self.config.loop_detection.clone(),
|
||||
};
|
||||
|
||||
// Test basic OPTIONS request
|
||||
|
|
@ -410,6 +433,7 @@ impl WebDAVService {
|
|||
file_extensions: vec![],
|
||||
timeout_seconds: 30,
|
||||
server_type: test_config.server_type.clone(),
|
||||
loop_detection: super::loop_detection::LoopDetectionConfig::default(),
|
||||
};
|
||||
|
||||
let service = Self::new(config)?;
|
||||
|
|
@ -428,6 +452,7 @@ impl WebDAVService {
|
|||
file_extensions: self.config.file_extensions.clone(),
|
||||
timeout_seconds: self.config.timeout_seconds,
|
||||
server_type: self.config.server_type.clone(),
|
||||
loop_detection: self.config.loop_detection.clone(),
|
||||
};
|
||||
let webdav_url = temp_config.webdav_url();
|
||||
|
||||
|
|
@ -821,6 +846,7 @@ impl WebDAVService {
|
|||
file_extensions: self.config.file_extensions.clone(),
|
||||
timeout_seconds: self.config.timeout_seconds,
|
||||
server_type: self.config.server_type.clone(),
|
||||
loop_detection: self.config.loop_detection.clone(),
|
||||
};
|
||||
let base_url = temp_config.webdav_url();
|
||||
let clean_path = path.trim_start_matches('/');
|
||||
|
|
@ -892,6 +918,7 @@ impl WebDAVService {
|
|||
file_extensions: self.config.file_extensions.clone(),
|
||||
timeout_seconds: self.config.timeout_seconds,
|
||||
server_type: self.config.server_type.clone(),
|
||||
loop_detection: self.config.loop_detection.clone(),
|
||||
};
|
||||
let base_url = temp_config.webdav_url();
|
||||
|
||||
|
|
@ -1120,6 +1147,17 @@ impl WebDAVService {
|
|||
|
||||
/// Discovers both files and directories in a single directory
|
||||
async fn discover_files_and_directories_single(&self, directory_path: &str) -> Result<WebDAVDiscoveryResult> {
|
||||
// Start loop detection tracking with graceful degradation
|
||||
let access_id = match self.loop_detector.start_access(directory_path, "single_discovery").await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
// Log the loop detection error but continue with sync
|
||||
warn!("Loop detection failed for '{}': {} - continuing sync without detection", directory_path, e);
|
||||
uuid::Uuid::new_v4() // Use dummy ID to continue
|
||||
}
|
||||
};
|
||||
|
||||
let result = async {
|
||||
// Try the primary URL first, then fallback URLs if we get a 405 error
|
||||
match self.discover_files_and_directories_single_with_url(directory_path, &self.get_url_for_path(directory_path)).await {
|
||||
Ok(result) => Ok(result),
|
||||
|
|
@ -1133,6 +1171,33 @@ impl WebDAVService {
|
|||
}
|
||||
}
|
||||
}
|
||||
}.await;
|
||||
|
||||
// Complete loop detection tracking with graceful degradation
|
||||
match &result {
|
||||
Ok(discovery) => {
|
||||
if let Err(e) = self.loop_detector.complete_access(
|
||||
access_id,
|
||||
Some(discovery.files.len()),
|
||||
Some(discovery.directories.len()),
|
||||
None
|
||||
).await {
|
||||
debug!("Loop detection completion failed for '{}': {} - sync completed successfully", directory_path, e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if let Err(completion_err) = self.loop_detector.complete_access(
|
||||
access_id,
|
||||
None,
|
||||
None,
|
||||
Some(e.to_string())
|
||||
).await {
|
||||
debug!("Loop detection completion failed for '{}': {} - original error: {}", directory_path, completion_err, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Tries fallback URLs when the primary WebDAV URL fails with 405
|
||||
|
|
@ -1236,6 +1301,16 @@ impl WebDAVService {
|
|||
|
||||
/// Discovers files and directories recursively
|
||||
async fn discover_files_and_directories_recursive(&self, directory_path: &str) -> Result<WebDAVDiscoveryResult> {
|
||||
// Start loop detection tracking for the root directory with graceful degradation
|
||||
let access_id = match self.loop_detector.start_access(directory_path, "recursive_discovery").await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
// Log the loop detection error but continue with sync
|
||||
warn!("Loop detection failed for recursive discovery '{}': {} - continuing sync without detection", directory_path, e);
|
||||
uuid::Uuid::new_v4() // Use dummy ID to continue
|
||||
}
|
||||
};
|
||||
|
||||
let mut all_files = Vec::new();
|
||||
let mut all_directories = Vec::new();
|
||||
let mut directories_to_scan = vec![directory_path.to_string()];
|
||||
|
|
@ -1305,6 +1380,17 @@ impl WebDAVService {
|
|||
}
|
||||
|
||||
info!("Recursive scan completed. Found {} files and {} directories", all_files.len(), all_directories.len());
|
||||
|
||||
// Complete loop detection tracking with graceful degradation
|
||||
if let Err(e) = self.loop_detector.complete_access(
|
||||
access_id,
|
||||
Some(all_files.len()),
|
||||
Some(all_directories.len()),
|
||||
None
|
||||
).await {
|
||||
debug!("Loop detection completion failed for recursive discovery '{}': {} - sync completed successfully", directory_path, e);
|
||||
}
|
||||
|
||||
Ok(WebDAVDiscoveryResult {
|
||||
files: all_files,
|
||||
directories: all_directories
|
||||
|
|
@ -2237,6 +2323,7 @@ impl WebDAVService {
|
|||
file_extensions: self.config.file_extensions.clone(),
|
||||
timeout_seconds: self.config.timeout_seconds,
|
||||
server_type: self.config.server_type.clone(),
|
||||
loop_detection: self.config.loop_detection.clone(),
|
||||
};
|
||||
|
||||
let options_response = self.authenticated_request(
|
||||
|
|
@ -2575,6 +2662,7 @@ impl Clone for WebDAVService {
|
|||
scan_semaphore: Arc::clone(&self.scan_semaphore),
|
||||
download_semaphore: Arc::clone(&self.download_semaphore),
|
||||
working_protocol: Arc::clone(&self.working_protocol),
|
||||
loop_detector: self.loop_detector.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2604,6 +2692,7 @@ mod tests {
|
|||
file_extensions: vec![],
|
||||
timeout_seconds: 30,
|
||||
server_type: Some("generic".to_string()),
|
||||
loop_detection: super::loop_detection::LoopDetectionConfig::default(),
|
||||
};
|
||||
|
||||
let service = WebDAVService::new(config).expect("Failed to create WebDAV service");
|
||||
|
|
@ -2629,6 +2718,7 @@ mod tests {
|
|||
file_extensions: vec![],
|
||||
timeout_seconds: 30,
|
||||
server_type: Some("generic".to_string()),
|
||||
loop_detection: super::loop_detection::LoopDetectionConfig::default(),
|
||||
};
|
||||
|
||||
let retry_config = RetryConfig {
|
||||
|
|
|
|||
|
|
@ -59,6 +59,11 @@ impl SmartSyncService {
|
|||
&self.state
|
||||
}
|
||||
|
||||
/// Get loop detection metrics from the WebDAV service
|
||||
pub async fn get_loop_detection_metrics(&self, webdav_service: &WebDAVService) -> Result<serde_json::Value> {
|
||||
webdav_service.get_loop_detection_metrics().await
|
||||
}
|
||||
|
||||
/// Evaluates whether sync is needed and determines the best strategy
|
||||
pub async fn evaluate_sync_need(
|
||||
&self,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,562 @@
|
|||
/*!
|
||||
* WebDAV Loop Detection Stress Test
|
||||
*
|
||||
* This stress test exercises the actual WebDAV sync functionality with loop detection enabled.
|
||||
* It creates scenarios that could cause loops and verifies that they are properly detected
|
||||
* and reported by the instrumented sync code.
|
||||
*/
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::collections::HashMap;
|
||||
use tokio::time::sleep;
|
||||
use anyhow::{Result, Context};
|
||||
use tracing::{info, warn, error, debug};
|
||||
use uuid::Uuid;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use readur::services::webdav::{
|
||||
WebDAVService, WebDAVConfig, SmartSyncService,
|
||||
LoopDetectionConfig, LoopType
|
||||
};
|
||||
use readur::{AppState, config::Config};
|
||||
|
||||
/// Configuration for the stress test
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StressTestConfig {
|
||||
/// Duration to run the stress test
|
||||
pub duration_secs: u64,
|
||||
/// WebDAV server URL for testing
|
||||
pub webdav_url: String,
|
||||
/// WebDAV username
|
||||
pub username: String,
|
||||
/// WebDAV password
|
||||
pub password: String,
|
||||
/// Number of concurrent sync operations
|
||||
pub concurrent_syncs: usize,
|
||||
/// Directories to test
|
||||
pub test_directories: Vec<String>,
|
||||
/// Whether to intentionally trigger loops for testing
|
||||
pub trigger_test_loops: bool,
|
||||
/// Loop detection timeout
|
||||
pub loop_detection_timeout_secs: u64,
|
||||
}
|
||||
|
||||
impl Default for StressTestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
duration_secs: std::env::var("STRESS_TEST_DURATION")
|
||||
.unwrap_or_else(|_| "300".to_string())
|
||||
.parse()
|
||||
.unwrap_or(300),
|
||||
webdav_url: std::env::var("WEBDAV_DUFS_URL")
|
||||
.unwrap_or_else(|_| "http://localhost:8080".to_string()),
|
||||
username: std::env::var("WEBDAV_USERNAME")
|
||||
.unwrap_or_else(|_| "webdav_user".to_string()),
|
||||
password: std::env::var("WEBDAV_PASSWORD")
|
||||
.unwrap_or_else(|_| "webdav_pass".to_string()),
|
||||
concurrent_syncs: std::env::var("CONCURRENT_SYNCS")
|
||||
.unwrap_or_else(|_| "4".to_string())
|
||||
.parse()
|
||||
.unwrap_or(4),
|
||||
test_directories: vec![
|
||||
"/stress_test".to_string(),
|
||||
"/stress_test/nested".to_string(),
|
||||
"/stress_test/deep/structure".to_string(),
|
||||
"/stress_test/complex".to_string(),
|
||||
],
|
||||
trigger_test_loops: std::env::var("TRIGGER_TEST_LOOPS")
|
||||
.unwrap_or_else(|_| "true".to_string())
|
||||
.parse()
|
||||
.unwrap_or(true),
|
||||
loop_detection_timeout_secs: std::env::var("LOOP_DETECTION_TIMEOUT")
|
||||
.unwrap_or_else(|_| "60".to_string())
|
||||
.parse()
|
||||
.unwrap_or(60),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Metrics collected during stress testing
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StressTestMetrics {
|
||||
pub total_sync_operations: u64,
|
||||
pub successful_syncs: u64,
|
||||
pub failed_syncs: u64,
|
||||
pub loops_detected: u64,
|
||||
pub avg_sync_duration_ms: f64,
|
||||
pub max_sync_duration_ms: u64,
|
||||
pub min_sync_duration_ms: u64,
|
||||
pub files_discovered: u64,
|
||||
pub directories_discovered: u64,
|
||||
pub errors_by_type: HashMap<String, u64>,
|
||||
pub loop_types_detected: HashMap<String, u64>,
|
||||
}
|
||||
|
||||
impl Default for StressTestMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
total_sync_operations: 0,
|
||||
successful_syncs: 0,
|
||||
failed_syncs: 0,
|
||||
loops_detected: 0,
|
||||
avg_sync_duration_ms: 0.0,
|
||||
max_sync_duration_ms: 0,
|
||||
min_sync_duration_ms: u64::MAX,
|
||||
files_discovered: 0,
|
||||
directories_discovered: 0,
|
||||
errors_by_type: HashMap::new(),
|
||||
loop_types_detected: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Main stress test runner
|
||||
pub struct WebDAVLoopDetectionStressTest {
|
||||
config: StressTestConfig,
|
||||
metrics: Arc<tokio::sync::Mutex<StressTestMetrics>>,
|
||||
}
|
||||
|
||||
impl WebDAVLoopDetectionStressTest {
|
||||
pub fn new(config: StressTestConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
metrics: Arc::new(tokio::sync::Mutex::new(StressTestMetrics::default())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a WebDAV service with loop detection configured for stress testing
|
||||
fn create_webdav_service(&self) -> Result<WebDAVService> {
|
||||
let mut webdav_config = WebDAVConfig::new(
|
||||
self.config.webdav_url.clone(),
|
||||
self.config.username.clone(),
|
||||
self.config.password.clone(),
|
||||
self.config.test_directories.clone(),
|
||||
vec!["pdf".to_string(), "txt".to_string(), "doc".to_string(), "docx".to_string()],
|
||||
);
|
||||
|
||||
// Configure loop detection for stress testing
|
||||
webdav_config.loop_detection = LoopDetectionConfig {
|
||||
enabled: true,
|
||||
max_access_count: 5, // Reasonable limit for stress testing
|
||||
time_window_secs: 60, // 1-minute window
|
||||
max_scan_duration_secs: self.config.loop_detection_timeout_secs,
|
||||
min_scan_interval_secs: 2, // 2-second minimum interval
|
||||
max_pattern_depth: 10,
|
||||
max_tracked_directories: 1000,
|
||||
enable_pattern_analysis: true,
|
||||
log_level: "warn".to_string(), // Reduce log noise during stress test
|
||||
};
|
||||
|
||||
WebDAVService::new(webdav_config)
|
||||
.context("Failed to create WebDAV service for stress testing")
|
||||
}
|
||||
|
||||
/// Run the main stress test
|
||||
pub async fn run(&self) -> Result<StressTestMetrics> {
|
||||
info!("🚀 Starting WebDAV Loop Detection Stress Test");
|
||||
info!("Configuration: {:?}", self.config);
|
||||
|
||||
let start_time = Instant::now();
|
||||
let end_time = start_time + Duration::from_secs(self.config.duration_secs);
|
||||
|
||||
// Create WebDAV services for concurrent testing
|
||||
let mut webdav_services = Vec::new();
|
||||
for i in 0..self.config.concurrent_syncs {
|
||||
match self.create_webdav_service() {
|
||||
Ok(service) => {
|
||||
info!("✅ Created WebDAV service #{}", i);
|
||||
webdav_services.push(service);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("❌ Failed to create WebDAV service #{}: {}", i, e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create app state for SmartSyncService
|
||||
let test_config = Config::test_default();
|
||||
let app_state = Arc::new(AppState::new_for_testing(test_config).await
|
||||
.context("Failed to create app state for testing")?);
|
||||
|
||||
let smart_sync_service = SmartSyncService::new(app_state.clone());
|
||||
|
||||
info!("🏁 Starting stress test operations...");
|
||||
|
||||
// Launch concurrent sync operations
|
||||
let mut handles = Vec::new();
|
||||
|
||||
for (service_id, webdav_service) in webdav_services.into_iter().enumerate() {
|
||||
let service = Arc::new(webdav_service);
|
||||
let smart_sync = smart_sync_service.clone();
|
||||
let config = self.config.clone();
|
||||
let metrics = Arc::clone(&self.metrics);
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
Self::run_sync_operations(
|
||||
service_id,
|
||||
service,
|
||||
smart_sync,
|
||||
config,
|
||||
metrics,
|
||||
end_time
|
||||
).await
|
||||
});
|
||||
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all operations to complete
|
||||
for (i, handle) in handles.into_iter().enumerate() {
|
||||
match handle.await {
|
||||
Ok(result) => {
|
||||
if let Err(e) = result {
|
||||
warn!("Sync operation #{} completed with error: {}", i, e);
|
||||
} else {
|
||||
info!("✅ Sync operation #{} completed successfully", i);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("❌ Failed to join sync operation #{}: {}", i, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let total_duration = start_time.elapsed();
|
||||
info!("🏁 Stress test completed in {:.2}s", total_duration.as_secs_f64());
|
||||
|
||||
// Generate final metrics
|
||||
let final_metrics = self.generate_final_metrics().await;
|
||||
self.print_stress_test_report(&final_metrics, total_duration);
|
||||
|
||||
Ok(final_metrics)
|
||||
}
|
||||
|
||||
/// Run sync operations for a single WebDAV service
|
||||
async fn run_sync_operations(
|
||||
service_id: usize,
|
||||
webdav_service: Arc<WebDAVService>,
|
||||
smart_sync_service: SmartSyncService,
|
||||
config: StressTestConfig,
|
||||
metrics: Arc<tokio::sync::Mutex<StressTestMetrics>>,
|
||||
end_time: Instant,
|
||||
) -> Result<()> {
|
||||
let user_id = Uuid::new_v4();
|
||||
let mut operation_count = 0;
|
||||
|
||||
info!("🔄 Service #{} starting sync operations", service_id);
|
||||
|
||||
while Instant::now() < end_time {
|
||||
operation_count += 1;
|
||||
let op_start = Instant::now();
|
||||
|
||||
// Randomly select a directory to sync
|
||||
let dir_index = operation_count % config.test_directories.len();
|
||||
let target_directory = &config.test_directories[dir_index];
|
||||
|
||||
debug!("Service #{} operation #{}: syncing {}", service_id, operation_count, target_directory);
|
||||
|
||||
// Perform sync operation with loop detection
|
||||
let sync_result = Self::perform_monitored_sync(
|
||||
&*webdav_service,
|
||||
&smart_sync_service,
|
||||
user_id,
|
||||
target_directory,
|
||||
operation_count,
|
||||
).await;
|
||||
|
||||
let op_duration = op_start.elapsed();
|
||||
|
||||
// Update metrics
|
||||
Self::update_metrics(
|
||||
&metrics,
|
||||
&sync_result,
|
||||
op_duration,
|
||||
&*webdav_service,
|
||||
).await;
|
||||
|
||||
// If we're testing loop triggers, occasionally create conditions that might cause loops
|
||||
if config.trigger_test_loops && operation_count % 10 == 0 {
|
||||
Self::trigger_test_loop_scenario(&*webdav_service, target_directory).await;
|
||||
}
|
||||
|
||||
// Brief pause between operations to avoid overwhelming the server
|
||||
sleep(Duration::from_millis(100 + (service_id * 50) as u64)).await;
|
||||
}
|
||||
|
||||
info!("📊 Service #{} completed {} operations", service_id, operation_count);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Perform a single sync operation with comprehensive monitoring
|
||||
async fn perform_monitored_sync(
|
||||
webdav_service: &WebDAVService,
|
||||
smart_sync_service: &SmartSyncService,
|
||||
user_id: Uuid,
|
||||
directory: &str,
|
||||
operation_id: usize,
|
||||
) -> Result<(usize, usize)> {
|
||||
// First evaluate if sync is needed
|
||||
match smart_sync_service.evaluate_sync_need(
|
||||
user_id,
|
||||
webdav_service,
|
||||
directory,
|
||||
None, // No progress tracking for stress test
|
||||
).await {
|
||||
Ok(decision) => {
|
||||
match decision {
|
||||
readur::services::webdav::SmartSyncDecision::SkipSync => {
|
||||
debug!("Operation #{}: Sync skipped for {}", operation_id, directory);
|
||||
Ok((0, 0))
|
||||
}
|
||||
readur::services::webdav::SmartSyncDecision::RequiresSync(strategy) => {
|
||||
// Perform the actual sync
|
||||
match smart_sync_service.perform_smart_sync(
|
||||
user_id,
|
||||
None, // No source ID for stress test
|
||||
webdav_service,
|
||||
directory,
|
||||
strategy,
|
||||
None, // No progress tracking
|
||||
).await {
|
||||
Ok(result) => Ok((result.files.len(), result.directories.len())),
|
||||
Err(e) => {
|
||||
if e.to_string().contains("Loop detected") {
|
||||
debug!("Operation #{}: Loop detected for {} - {}", operation_id, directory, e);
|
||||
Err(e)
|
||||
} else {
|
||||
warn!("Operation #{}: Sync failed for {} - {}", operation_id, directory, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Operation #{}: Sync evaluation failed for {} - {}", operation_id, directory, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Trigger test scenarios that might cause loops (for testing purposes)
|
||||
async fn trigger_test_loop_scenario(webdav_service: &WebDAVService, directory: &str) {
|
||||
debug!("🧪 Triggering test loop scenario for {}", directory);
|
||||
|
||||
// Rapid repeated access to the same directory
|
||||
for i in 0..3 {
|
||||
match webdav_service.discover_files_and_directories(directory, false).await {
|
||||
Ok(_) => debug!("Test loop trigger #{} succeeded for {}", i, directory),
|
||||
Err(e) => {
|
||||
if e.to_string().contains("Loop detected") {
|
||||
debug!("✅ Test loop scenario successfully triggered loop detection: {}", e);
|
||||
return;
|
||||
} else {
|
||||
debug!("Test loop trigger #{} failed for {}: {}", i, directory, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Very short delay to trigger immediate re-scan detection
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Update metrics based on sync operation result
|
||||
async fn update_metrics(
|
||||
metrics: &Arc<tokio::sync::Mutex<StressTestMetrics>>,
|
||||
sync_result: &Result<(usize, usize)>,
|
||||
duration: Duration,
|
||||
webdav_service: &WebDAVService,
|
||||
) {
|
||||
let mut m = metrics.lock().await;
|
||||
m.total_sync_operations += 1;
|
||||
|
||||
let duration_ms = duration.as_millis() as u64;
|
||||
m.max_sync_duration_ms = m.max_sync_duration_ms.max(duration_ms);
|
||||
m.min_sync_duration_ms = m.min_sync_duration_ms.min(duration_ms);
|
||||
|
||||
// Update average duration
|
||||
let total_duration = m.avg_sync_duration_ms * (m.total_sync_operations - 1) as f64;
|
||||
m.avg_sync_duration_ms = (total_duration + duration_ms as f64) / m.total_sync_operations as f64;
|
||||
|
||||
match sync_result {
|
||||
Ok((files, dirs)) => {
|
||||
m.successful_syncs += 1;
|
||||
m.files_discovered += *files as u64;
|
||||
m.directories_discovered += *dirs as u64;
|
||||
}
|
||||
Err(e) => {
|
||||
m.failed_syncs += 1;
|
||||
|
||||
let error_msg = e.to_string();
|
||||
if error_msg.contains("Loop detected") {
|
||||
m.loops_detected += 1;
|
||||
|
||||
// Classify loop types
|
||||
if error_msg.contains("re-accessed after only") {
|
||||
*m.loop_types_detected.entry("ImmediateReScan".to_string()).or_insert(0) += 1;
|
||||
} else if error_msg.contains("Concurrent access detected") {
|
||||
*m.loop_types_detected.entry("ConcurrentAccess".to_string()).or_insert(0) += 1;
|
||||
} else if error_msg.contains("accessed") && error_msg.contains("times") {
|
||||
*m.loop_types_detected.entry("FrequentReAccess".to_string()).or_insert(0) += 1;
|
||||
} else if error_msg.contains("stuck") {
|
||||
*m.loop_types_detected.entry("StuckScan".to_string()).or_insert(0) += 1;
|
||||
} else if error_msg.contains("Circular pattern") {
|
||||
*m.loop_types_detected.entry("CircularPattern".to_string()).or_insert(0) += 1;
|
||||
} else {
|
||||
*m.loop_types_detected.entry("Other".to_string()).or_insert(0) += 1;
|
||||
}
|
||||
} else {
|
||||
// Classify other error types
|
||||
let error_type = if error_msg.contains("timeout") {
|
||||
"Timeout"
|
||||
} else if error_msg.contains("connection") {
|
||||
"Connection"
|
||||
} else if error_msg.contains("404") || error_msg.contains("Not Found") {
|
||||
"NotFound"
|
||||
} else if error_msg.contains("403") || error_msg.contains("Forbidden") {
|
||||
"Forbidden"
|
||||
} else if error_msg.contains("500") || error_msg.contains("Internal Server Error") {
|
||||
"ServerError"
|
||||
} else {
|
||||
"Unknown"
|
||||
};
|
||||
|
||||
*m.errors_by_type.entry(error_type.to_string()).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect loop detection metrics from the WebDAV service
|
||||
if let Ok(ld_metrics) = webdav_service.get_loop_detection_metrics() {
|
||||
if let Some(total_loops) = ld_metrics.get("total_loops_detected") {
|
||||
if let Some(loops) = total_loops.as_u64() {
|
||||
// Update our metrics with the actual count from loop detector
|
||||
m.loops_detected = m.loops_detected.max(loops);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate final comprehensive metrics
|
||||
async fn generate_final_metrics(&self) -> StressTestMetrics {
|
||||
self.metrics.lock().await.clone()
|
||||
}
|
||||
|
||||
/// Print a comprehensive stress test report
|
||||
fn print_stress_test_report(&self, metrics: &StressTestMetrics, total_duration: Duration) {
|
||||
println!("\n" + "=".repeat(80).as_str());
|
||||
println!("📊 WEBDAV LOOP DETECTION STRESS TEST REPORT");
|
||||
println!("=".repeat(80));
|
||||
|
||||
println!("\n🕒 Test Duration: {:.2}s", total_duration.as_secs_f64());
|
||||
println!("🔄 Total Sync Operations: {}", metrics.total_sync_operations);
|
||||
println!("✅ Successful Syncs: {} ({:.1}%)",
|
||||
metrics.successful_syncs,
|
||||
metrics.successful_syncs as f64 / metrics.total_sync_operations as f64 * 100.0);
|
||||
println!("❌ Failed Syncs: {} ({:.1}%)",
|
||||
metrics.failed_syncs,
|
||||
metrics.failed_syncs as f64 / metrics.total_sync_operations as f64 * 100.0);
|
||||
|
||||
println!("\n🔄 Loop Detection Results:");
|
||||
println!(" 🚨 Loops Detected: {} ({:.1}%)",
|
||||
metrics.loops_detected,
|
||||
metrics.loops_detected as f64 / metrics.total_sync_operations as f64 * 100.0);
|
||||
|
||||
if !metrics.loop_types_detected.is_empty() {
|
||||
println!(" 📊 Loop Types Detected:");
|
||||
for (loop_type, count) in &metrics.loop_types_detected {
|
||||
println!(" - {}: {}", loop_type, count);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\n⚡ Performance Metrics:");
|
||||
println!(" 📈 Average Sync Duration: {:.2}ms", metrics.avg_sync_duration_ms);
|
||||
println!(" 🏃 Fastest Sync: {}ms", metrics.min_sync_duration_ms);
|
||||
println!(" 🐌 Slowest Sync: {}ms", metrics.max_sync_duration_ms);
|
||||
println!(" 🏁 Operations per Second: {:.2}",
|
||||
metrics.total_sync_operations as f64 / total_duration.as_secs_f64());
|
||||
|
||||
println!("\n📁 Discovery Results:");
|
||||
println!(" 📄 Files Discovered: {}", metrics.files_discovered);
|
||||
println!(" 📂 Directories Discovered: {}", metrics.directories_discovered);
|
||||
|
||||
if !metrics.errors_by_type.is_empty() {
|
||||
println!("\n❌ Error Breakdown:");
|
||||
for (error_type, count) in &metrics.errors_by_type {
|
||||
println!(" - {}: {} ({:.1}%)",
|
||||
error_type, count,
|
||||
*count as f64 / metrics.failed_syncs as f64 * 100.0);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\n" + "=".repeat(80).as_str());
|
||||
|
||||
// Generate JSON report for CI/CD
|
||||
let report = json!({
|
||||
"test_type": "webdav_loop_detection_stress",
|
||||
"duration_secs": total_duration.as_secs_f64(),
|
||||
"total_operations": metrics.total_sync_operations,
|
||||
"successful_operations": metrics.successful_syncs,
|
||||
"failed_operations": metrics.failed_syncs,
|
||||
"success_rate": metrics.successful_syncs as f64 / metrics.total_sync_operations as f64 * 100.0,
|
||||
"loops_detected": metrics.loops_detected,
|
||||
"loop_detection_rate": metrics.loops_detected as f64 / metrics.total_sync_operations as f64 * 100.0,
|
||||
"avg_duration_ms": metrics.avg_sync_duration_ms,
|
||||
"min_duration_ms": metrics.min_sync_duration_ms,
|
||||
"max_duration_ms": metrics.max_sync_duration_ms,
|
||||
"ops_per_second": metrics.total_sync_operations as f64 / total_duration.as_secs_f64(),
|
||||
"files_discovered": metrics.files_discovered,
|
||||
"directories_discovered": metrics.directories_discovered,
|
||||
"loop_types": metrics.loop_types_detected,
|
||||
"error_types": metrics.errors_by_type,
|
||||
});
|
||||
|
||||
// Write JSON report for CI/CD consumption
|
||||
if let Ok(report_dir) = std::env::var("STRESS_RESULTS_DIR") {
|
||||
let report_path = format!("{}/webdav_loop_detection_report.json", report_dir);
|
||||
if let Err(e) = std::fs::write(&report_path, serde_json::to_string_pretty(&report).unwrap()) {
|
||||
warn!("Failed to write JSON report to {}: {}", report_path, e);
|
||||
} else {
|
||||
info!("📋 JSON report written to {}", report_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Main entry point for the stress test
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
// Initialize tracing
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
std::env::var("RUST_LOG").unwrap_or_else(|_| "info,webdav_loop_detection_stress=debug".to_string())
|
||||
)
|
||||
.init();
|
||||
|
||||
let config = StressTestConfig::default();
|
||||
let stress_test = WebDAVLoopDetectionStressTest::new(config);
|
||||
|
||||
let metrics = stress_test.run().await
|
||||
.context("Stress test failed")?;
|
||||
|
||||
// Exit with error code if too many loops were detected (indicating a problem)
|
||||
let loop_rate = metrics.loops_detected as f64 / metrics.total_sync_operations as f64 * 100.0;
|
||||
if loop_rate > 50.0 {
|
||||
error!("🚨 CRITICAL: Loop detection rate ({:.1}%) exceeds threshold (50%)", loop_rate);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
// Exit with error code if success rate is too low
|
||||
let success_rate = metrics.successful_syncs as f64 / metrics.total_sync_operations as f64 * 100.0;
|
||||
if success_rate < 70.0 {
|
||||
error!("🚨 CRITICAL: Success rate ({:.1}%) below threshold (70%)", success_rate);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
info!("🎉 Stress test completed successfully!");
|
||||
Ok(())
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue