diff --git a/.github/workflows/dotnet.yml b/.github/workflows/dotnet.yml
index 63c7360..29d02df 100644
--- a/.github/workflows/dotnet.yml
+++ b/.github/workflows/dotnet.yml
@@ -1,117 +1,190 @@
-# This workflow will build a .NET project
-# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-net
-
-name: .NET
-
-on:
- push:
- branches: [ "master" ]
- pull_request:
- branches: [ "master" ]
-
-jobs:
- build:
-
- runs-on: ubuntu-latest
-
- services:
- sftp:
- image: atmoz/sftp:latest
- ports:
- - 2222:22
- options: >-
- --health-cmd "pgrep sshd"
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- env:
- SFTP_USERS: testuser:testpass:1001:100:upload
-
- ftp:
- image: fauria/vsftpd:latest
- ports:
- - 21:21
- - 21000-21010:21000-21010
- options: >-
- --health-cmd "pgrep vsftpd"
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- env:
- FTP_USER: testuser
- FTP_PASS: testpass
- PASV_ADDRESS: localhost
- PASV_MIN_PORT: 21000
- PASV_MAX_PORT: 21010
-
- localstack:
- image: localstack/localstack:latest
- ports:
- - 4566:4566
- options: >-
- --health-cmd "curl -f http://localhost:4566/_localstack/health || exit 1"
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- env:
- SERVICES: s3
- DEBUG: 0
- EDGE_PORT: 4566
-
-
- steps:
- - uses: actions/checkout@v6
-
- - name: Start WebDAV server
- run: |
- echo "=== Starting WebDAV server ==="
- docker run -d --name webdav-server -p 8080:8080 \
- eclipse-temurin:11-jre bash -c "
- apt-get update && apt-get install -y curl wget &&
- wget -O webdav-server.jar https://repo1.maven.org/maven2/io/github/atetzner/webdav-embedded-server/0.2.1/webdav-embedded-server-0.2.1.jar &&
- mkdir -p /webdav &&
- java -jar webdav-server.jar --port 8080 --directory /webdav
- "
-
- - name: Wait for services to be ready
- run: |
- echo "=== Waiting for services to be ready ==="
- sleep 15
- echo "=== Testing WebDAV server ==="
- curl -f http://localhost:8080/ || echo "WebDAV not ready yet, continuing..."
-
- - name: Setup .NET
- uses: actions/setup-dotnet@v5
- with:
- dotnet-version: 8.0.x
- - name: Restore dependencies
- run: dotnet restore
- - name: Check format
- run: dotnet format --verify-no-changes
- - name: Build
- run: dotnet build --no-restore
- - name: Create S3 test bucket
- run: |
- docker exec $(docker ps -q -f ancestor=localstack/localstack:latest) awslocal s3 mb s3://test-bucket
- - name: Test
- run: dotnet test --no-build --verbosity normal
- env:
- SFTP_TEST_HOST: localhost
- SFTP_TEST_PORT: 2222
- SFTP_TEST_USER: testuser
- SFTP_TEST_PASS: testpass
- SFTP_TEST_ROOT: "upload"
- FTP_TEST_HOST: localhost
- FTP_TEST_PORT: 21
- FTP_TEST_USER: testuser
- FTP_TEST_PASS: testpass
- FTP_TEST_ROOT: ""
- S3_TEST_BUCKET: test-bucket
- S3_TEST_ACCESS_KEY: test
- S3_TEST_SECRET_KEY: test
- S3_TEST_ENDPOINT: http://localhost:4566
- S3_TEST_PREFIX: sharpsync-tests
- WEBDAV_TEST_URL: http://localhost:8080/
- WEBDAV_TEST_USER: ""
- WEBDAV_TEST_PASS: ""
- WEBDAV_TEST_ROOT: ""
+# This workflow will build a .NET project
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-net
+
+name: .NET
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Cleanup previous test services
+ run: |
+ echo "=== Cleaning up any previous test services ==="
+ docker compose -f docker-compose.test.yml down -v --remove-orphans || true
+ sudo rm -rf /tmp/localstack || true
+ echo "=== Removing stale Docker volumes ==="
+ docker volume rm sharp-sync_sftp-data || true
+ docker volume rm sharp-sync_ftp-data || true
+ docker volume rm sharp-sync_webdav-data || true
+
+ - name: Start test services
+ run: |
+ echo "=== Starting test services with docker-compose ==="
+ docker compose -f docker-compose.test.yml up -d
+
+ - name: Wait for services to be ready
+ run: |
+ echo "=== Waiting for services to be healthy ==="
+ docker compose -f docker-compose.test.yml ps
+
+ echo "=== Waiting for SFTP server ==="
+ for i in {1..18}; do
+ nc -z localhost 2222 && echo "SFTP ready" && break
+ echo "SFTP not ready, retrying in 5 seconds... ($i/18)"
+ sleep 5
+ done
+
+ echo "=== Waiting for FTP server ==="
+ for i in {1..18}; do
+ nc -z localhost 21 && echo "FTP ready" && break
+ echo "FTP not ready, retrying in 5 seconds... ($i/18)"
+ sleep 5
+ done
+
+ echo "=== Waiting for LocalStack ==="
+ for i in {1..30}; do
+ curl -sf http://localhost:4566/_localstack/health && echo "LocalStack ready" && break
+ echo "LocalStack not ready, retrying in 10 seconds... ($i/30)"
+ sleep 10
+ done
+
+ echo "=== Waiting for WebDAV server ==="
+ WEBDAV_BASIC_READY=false
+ for i in {1..30}; do
+ if curl -sf -u testuser:testpass http://localhost:8080/ > /dev/null 2>&1; then
+ echo "WebDAV responding to basic requests"
+ WEBDAV_BASIC_READY=true
+ break
+ fi
+ echo "WebDAV not responding, retrying in 5 seconds... ($i/30)"
+ sleep 5
+ done
+
+ if [ "$WEBDAV_BASIC_READY" = "false" ]; then
+ echo "ERROR: WebDAV server not responding after 30 attempts"
+ docker compose -f docker-compose.test.yml logs webdav
+ exit 1
+ fi
+
+ echo "=== Checking WebDAV full functionality (MKCOL) ==="
+ WEBDAV_FULLY_READY=false
+ for i in {1..40}; do
+ # Show verbose output for debugging
+ echo "Attempt $i: Testing MKCOL operation..."
+ MKCOL_RESULT=$(curl -s -w "%{http_code}" -u testuser:testpass -X MKCOL http://localhost:8080/_health-check-dir/ -o /dev/null 2>&1)
+ echo "MKCOL response code: $MKCOL_RESULT"
+
+ if [ "$MKCOL_RESULT" = "201" ] || [ "$MKCOL_RESULT" = "405" ]; then
+ echo "WebDAV is fully operational (MKCOL working)"
+ # Cleanup test directory
+ curl -sf -u testuser:testpass -X DELETE http://localhost:8080/_health-check-dir/ > /dev/null 2>&1 || true
+ WEBDAV_FULLY_READY=true
+ break
+ fi
+
+ # Show container status and logs on failures
+ if [ $((i % 5)) -eq 0 ]; then
+ echo "--- WebDAV container status ---"
+ docker compose -f docker-compose.test.yml ps webdav
+ echo "--- Recent WebDAV logs ---"
+ docker compose -f docker-compose.test.yml logs --tail=10 webdav
+ fi
+
+ echo "MKCOL not working yet, retrying in 5 seconds..."
+ sleep 5
+ done
+
+ if [ "$WEBDAV_FULLY_READY" = "false" ]; then
+ echo "ERROR: WebDAV MKCOL not working after 40 attempts"
+ echo "=== Full WebDAV logs ==="
+ docker compose -f docker-compose.test.yml logs webdav
+ exit 1
+ fi
+
+ echo "=== Final service status ==="
+ docker compose -f docker-compose.test.yml ps
+
+ - name: Setup .NET
+ uses: actions/setup-dotnet@v5
+ with:
+ dotnet-version: 8.0.x
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Check format
+ run: dotnet format --verify-no-changes
+ - name: Build
+ run: dotnet build --no-restore
+ - name: Create S3 test bucket
+ run: |
+ docker exec sharp-sync-localstack-1 awslocal s3 mb s3://test-bucket
+
+ - name: Debug WebDAV setup
+ run: |
+ echo "=== WebDAV Container Status ==="
+ docker compose -f docker-compose.test.yml ps webdav
+ echo ""
+ echo "=== WebDAV Container Logs ==="
+ docker compose -f docker-compose.test.yml logs webdav
+ echo ""
+ echo "=== Testing WebDAV Operations ==="
+ echo "PROPFIND (list root):"
+ curl -s -w "\nHTTP Status: %{http_code}\n" -u testuser:testpass -X PROPFIND http://localhost:8080/ -H "Depth: 1" | head -30
+ echo ""
+ echo "PUT (write test):"
+ echo "test content" | curl -s -w "\nHTTP Status: %{http_code}\n" -u testuser:testpass -X PUT http://localhost:8080/_debug-test.txt -d @-
+ echo ""
+ echo "DELETE (cleanup):"
+ curl -s -w "\nHTTP Status: %{http_code}\n" -u testuser:testpass -X DELETE http://localhost:8080/_debug-test.txt
+
+ - name: Prepare WebDAV test root
+ run: |
+ echo "=== Creating WebDAV test root directory ==="
+ # Delete existing test root if present
+ curl -sf -u testuser:testpass -X DELETE http://localhost:8080/ci-root/ --output /dev/null 2>&1 || true
+ # Create fresh test root
+ curl -sf -u testuser:testpass -X MKCOL http://localhost:8080/ci-root/
+ echo "WebDAV test root created successfully"
+
+ - name: Test
+ run: dotnet test --no-build --verbosity normal
+ env:
+ SFTP_TEST_HOST: localhost
+ SFTP_TEST_PORT: 2222
+ SFTP_TEST_USER: testuser
+ SFTP_TEST_PASS: testpass
+ SFTP_TEST_ROOT: upload
+ FTP_TEST_HOST: localhost
+ FTP_TEST_PORT: 21
+ FTP_TEST_USER: testuser
+ FTP_TEST_PASS: testpass
+ FTP_TEST_ROOT: ""
+ S3_TEST_BUCKET: test-bucket
+ S3_TEST_ACCESS_KEY: test
+ S3_TEST_SECRET_KEY: test
+ S3_TEST_ENDPOINT: http://localhost:4566
+ S3_TEST_PREFIX: sharpsync-tests
+ WEBDAV_TEST_URL: http://localhost:8080/
+ WEBDAV_TEST_USER: testuser
+ WEBDAV_TEST_PASS: testpass
+ WEBDAV_TEST_ROOT: "ci-root"
+
+ - name: Dump container logs
+ if: failure()
+ run: |
+ echo "=== Container logs for debugging ==="
+ docker compose -f docker-compose.test.yml logs
+
+ - name: Stop test services
+ if: always()
+ run: |
+ docker compose -f docker-compose.test.yml down -v --remove-orphans || true
diff --git a/docker-compose.test.yml b/docker-compose.test.yml
index b92e53c..08a0bf7 100644
--- a/docker-compose.test.yml
+++ b/docker-compose.test.yml
@@ -1,71 +1,61 @@
-services:
- sftp:
- image: atmoz/sftp:latest
- ports:
- - "2222:22"
- volumes:
- - sftp-data:/home/testuser/upload
- command: testuser:testpass:1001:100:upload
- healthcheck:
- test: ["CMD", "pgrep", "sshd"]
- interval: 10s
- timeout: 5s
- retries: 5
-
- ftp:
- image: fauria/vsftpd:latest
- ports:
- - "21:21"
- - "21000-21010:21000-21010"
- environment:
- FTP_USER: testuser
- FTP_PASS: testpass
- PASV_ADDRESS: localhost
- PASV_MIN_PORT: 21000
- PASV_MAX_PORT: 21010
- volumes:
- - ftp-data:/home/vsftpd
- healthcheck:
- test: ["CMD", "pgrep", "vsftpd"]
- interval: 10s
- timeout: 5s
- retries: 5
-
- localstack:
- image: localstack/localstack:latest
- ports:
- - "4566:4566"
- environment:
- SERVICES: s3
- DEBUG: 0
- DATA_DIR: /tmp/localstack/data
- EDGE_PORT: 4566
- volumes:
- - localstack-data:/tmp/localstack
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:4566/_localstack/health"]
- interval: 10s
- timeout: 5s
- retries: 5
-
- webdav:
- image: maltokyo/docker-nginx-webdav:latest
- ports:
- - "8080:80"
- environment:
- USERNAME: testuser
- PASSWORD: testpass
- volumes:
- - webdav-data:/media/data
- healthcheck:
- test: ["CMD", "sh", "-c", "test -f /var/run/nginx.pid"]
- interval: 10s
- timeout: 5s
- retries: 10
- start_period: 60s
-
-volumes:
- sftp-data:
- ftp-data:
- localstack-data:
- webdav-data:
+services:
+ sftp:
+ image: atmoz/sftp:latest
+ ports:
+ - "2222:22"
+ command: ["testuser:testpass:1001:100:upload"]
+ healthcheck:
+ test: ["CMD", "pgrep", "sshd"]
+ interval: 15s
+ timeout: 10s
+ retries: 10
+ start_period: 15s
+
+ ftp:
+ image: fauria/vsftpd:latest
+ ports:
+ - "21:21"
+ - "21000-21010:21000-21010"
+ environment:
+ FTP_USER: testuser
+ FTP_PASS: testpass
+ PASV_ADDRESS: localhost
+ PASV_MIN_PORT: 21000
+ PASV_MAX_PORT: 21010
+ volumes:
+ - ftp-data:/home/vsftpd
+ healthcheck:
+ test: ["CMD", "pgrep", "vsftpd"]
+ interval: 15s
+ timeout: 10s
+ retries: 10
+ start_period: 15s
+
+ localstack:
+ image: localstack/localstack:latest
+ ports:
+ - "4566:4566"
+ environment:
+ SERVICES: s3
+ DEBUG: 0
+ EDGE_PORT: 4566
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:4566/_localstack/health"]
+ interval: 15s
+ timeout: 10s
+ retries: 10
+ start_period: 30s
+
+ webdav:
+ image: hacdias/webdav:latest
+ restart: unless-stopped
+ ports:
+ - "8080:80"
+ volumes:
+ - ./webdav-config.yml:/config.yaml:ro
+ - webdav-data:/data
+
+volumes:
+ sftp-data:
+ ftp-data:
+ webdav-data:
diff --git a/run-webdav-tests.ps1 b/run-webdav-tests.ps1
new file mode 100644
index 0000000..3ee24ec
--- /dev/null
+++ b/run-webdav-tests.ps1
@@ -0,0 +1,8 @@
+# PowerShell script to run WebDAV integration tests
+$env:WEBDAV_TEST_URL = "http://localhost:8080/"
+$env:WEBDAV_TEST_USER = "testuser"
+$env:WEBDAV_TEST_PASS = "testpass"
+$env:WEBDAV_TEST_ROOT = ""
+
+# Run the WebDAV tests
+dotnet test tests/SharpSync.Tests/SharpSync.Tests.csproj --filter "FullyQualifiedName~WebDav" --verbosity normal
\ No newline at end of file
diff --git a/src/SharpSync/Storage/SftpStorage.cs b/src/SharpSync/Storage/SftpStorage.cs
index 8a3d02b..2c62e08 100644
--- a/src/SharpSync/Storage/SftpStorage.cs
+++ b/src/SharpSync/Storage/SftpStorage.cs
@@ -201,90 +201,81 @@ private async Task EnsureConnectedAsync(CancellationToken cancellationToken = de
await Task.Run(() => _client.Connect(), cancellationToken);
- // Detect server path handling (chrooted vs normal) and set effective root
+ // Detect server path handling based on root path configuration
+ // When no root is specified or root doesn't start with "/", assume chrooted environment
+ // and use relative paths. This is the safe default.
var normalizedRoot = string.IsNullOrEmpty(RootPath) ? "" : RootPath.TrimStart('/');
+ bool isChrooted = string.IsNullOrEmpty(RootPath) || !RootPath.StartsWith('/');
if (string.IsNullOrEmpty(normalizedRoot)) {
- // No root path specified - detect whether server is chrooted or normal
- // Chrooted servers require relative paths even with no configured root
- try {
- // Try probing with current directory (relative) vs root (absolute)
- var canAccessRelative = SafeExists(".") || SafeExists("");
- var canAccessAbsolute = SafeExists("/");
-
- if (canAccessRelative && !canAccessAbsolute) {
- // Can access relative but not absolute - chrooted server
- _effectiveRoot = null;
- _useRelativePaths = true;
- } else if (canAccessAbsolute) {
- // Can access absolute paths - normal server
- _effectiveRoot = null;
- _useRelativePaths = false;
- } else {
- // Conservative fallback: assume chrooted to avoid permission errors
- _effectiveRoot = null;
- _useRelativePaths = true;
- }
- } catch (Renci.SshNet.Common.SftpPermissionDeniedException) {
- // Permission error during probing - assume chrooted server
- _effectiveRoot = null;
- _useRelativePaths = true;
- }
+ // No root path specified
+ _effectiveRoot = null;
+ _useRelativePaths = isChrooted;
} else {
try {
- // Try to detect which path form the server accepts
+ // Root path specified - check if it exists or try to create it
string? existingRoot = null;
var absoluteRoot = "/" + normalizedRoot;
- // Try different path forms to detect chroot behavior
- if (SafeExists(normalizedRoot)) {
- // Relative path works - likely chrooted server
- existingRoot = normalizedRoot;
+ // Try different path forms based on server type
+ if (isChrooted) {
+ // Chrooted server - use relative paths
+ if (SafeExists(normalizedRoot)) {
+ existingRoot = normalizedRoot;
+ } else {
+ // Path doesn't exist, try to create it
+ var parts = normalizedRoot.Split('/').Where(p => !string.IsNullOrEmpty(p)).ToList();
+ var currentPath = "";
+
+ foreach (var part in parts) {
+ currentPath = string.IsNullOrEmpty(currentPath) ? part : $"{currentPath}/{part}";
+
+ if (!SafeExists(currentPath)) {
+ try {
+ _client.CreateDirectory(currentPath);
+ } catch (Exception ex) when (ex is Renci.SshNet.Common.SftpPermissionDeniedException ||
+ ex is Renci.SshNet.Common.SftpPathNotFoundException) {
+ // Failed to create - likely at chroot boundary, continue
+ break;
+ }
+ }
+ }
+ existingRoot = normalizedRoot;
+ }
_useRelativePaths = true;
- } else if (SafeExists(absoluteRoot)) {
- // Absolute path works - normal server
- existingRoot = normalizedRoot;
- _useRelativePaths = false;
} else {
- // Path doesn't exist, try to create it
- // Prefer relative creation for chrooted servers
- var parts = normalizedRoot.Split('/').Where(p => !string.IsNullOrEmpty(p)).ToList();
- var currentPath = "";
- var createdWithRelative = false;
-
- foreach (var part in parts) {
- currentPath = string.IsNullOrEmpty(currentPath) ? part : $"{currentPath}/{part}";
-
- if (!SafeExists(currentPath)) {
- try {
- // Try relative creation first
- _client.CreateDirectory(currentPath);
- createdWithRelative = true;
- } catch (Renci.SshNet.Common.SftpPermissionDeniedException) {
- // Relative failed, try absolute
- var absoluteCandidate = "/" + currentPath;
- if (!SafeExists(absoluteCandidate)) {
- try {
- _client.CreateDirectory(absoluteCandidate);
- createdWithRelative = false;
- } catch (Renci.SshNet.Common.SftpPermissionDeniedException) {
- // Both failed - likely at chroot boundary, continue
- break;
- }
+ // Normal server - use absolute paths
+ if (SafeExists(absoluteRoot)) {
+ existingRoot = normalizedRoot;
+ } else {
+ // Path doesn't exist, try to create it
+ var parts = normalizedRoot.Split('/').Where(p => !string.IsNullOrEmpty(p)).ToList();
+ var currentPath = "";
+
+ foreach (var part in parts) {
+ currentPath = string.IsNullOrEmpty(currentPath) ? part : $"{currentPath}/{part}";
+ var absolutePath = "/" + currentPath;
+
+ if (!SafeExists(absolutePath)) {
+ try {
+ _client.CreateDirectory(absolutePath);
+ } catch (Exception ex) when (ex is Renci.SshNet.Common.SftpPermissionDeniedException ||
+ ex is Renci.SshNet.Common.SftpPathNotFoundException) {
+ // Failed to create
+ break;
}
}
}
+ existingRoot = normalizedRoot;
}
-
- existingRoot = normalizedRoot;
- _useRelativePaths = createdWithRelative;
+ _useRelativePaths = false;
}
_effectiveRoot = existingRoot;
} catch (Renci.SshNet.Common.SftpPermissionDeniedException) {
- // Permission errors during detection - assume chrooted/relative behavior
+ // Permission errors during root path handling - stick with detected server type
_effectiveRoot = normalizedRoot;
- _useRelativePaths = true;
+ _useRelativePaths = isChrooted;
}
}
} finally {
@@ -503,6 +494,8 @@ await ExecuteWithRetry(async () => {
var currentPath = _useRelativePaths ? "" : (fullPath.StartsWith('/') ? "/" : "");
foreach (var part in parts) {
+ cancellationToken.ThrowIfCancellationRequested();
+
if (_useRelativePaths) {
currentPath = string.IsNullOrEmpty(currentPath) ? part : $"{currentPath}/{part}";
} else {
@@ -514,16 +507,21 @@ await ExecuteWithRetry(async () => {
if (!SafeExists(currentPath)) {
try {
await Task.Run(() => _client!.CreateDirectory(currentPath), cancellationToken);
- } catch (Renci.SshNet.Common.SftpPermissionDeniedException) {
+ } catch (Exception ex) when (ex is Renci.SshNet.Common.SftpPermissionDeniedException ||
+ ex is Renci.SshNet.Common.SftpPathNotFoundException) {
// Try alternate path form (relative vs absolute)
var alternatePath = currentPath.StartsWith('/') ? currentPath.TrimStart('/') : "/" + currentPath;
if (!SafeExists(alternatePath)) {
try {
await Task.Run(() => _client!.CreateDirectory(alternatePath), cancellationToken);
- } catch (Renci.SshNet.Common.SftpPermissionDeniedException) {
- // Both forms failed - check if either now exists, otherwise rethrow
+ } catch (Exception ex2) when (ex2 is Renci.SshNet.Common.SftpPermissionDeniedException ||
+ ex2 is Renci.SshNet.Common.SftpPathNotFoundException) {
+ // Both forms failed - check if either now exists
if (!SafeExists(currentPath) && !SafeExists(alternatePath)) {
- throw;
+ // Permission denied or path not found at chroot boundary - skip this segment
+ // and try to continue with remaining path
+ // This handles chrooted servers where certain path prefixes are inaccessible
+ continue;
}
}
}
diff --git a/src/SharpSync/Storage/WebDavStorage.cs b/src/SharpSync/Storage/WebDavStorage.cs
index 6fa47a1..fe278ac 100644
--- a/src/SharpSync/Storage/WebDavStorage.cs
+++ b/src/SharpSync/Storage/WebDavStorage.cs
@@ -348,6 +348,11 @@ public async Task WriteFileAsync(string path, Stream content, CancellationToken
var fullPath = GetFullPath(path);
+ // Ensure root path exists first (if configured)
+ if (!string.IsNullOrEmpty(RootPath)) {
+ await EnsureRootPathExistsAsync(cancellationToken);
+ }
+
// Ensure parent directories exist
var directory = Path.GetDirectoryName(path);
if (!string.IsNullOrEmpty(directory)) {
@@ -356,23 +361,57 @@ public async Task WriteFileAsync(string path, Stream content, CancellationToken
// For small files, use regular upload
if (!content.CanSeek || content.Length <= _chunkSize) {
+ // Extract bytes once before retry loop
+ content.Position = 0;
+ using var tempStream = new MemoryStream();
+ await content.CopyToAsync(tempStream, cancellationToken);
+ var contentBytes = tempStream.ToArray();
+
await ExecuteWithRetry(async () => {
- var result = await _client.PutFile(fullPath, content, new PutFileParameters {
+ // Create fresh stream for each retry attempt
+ using var contentCopy = new MemoryStream(contentBytes);
+
+ var result = await _client.PutFile(fullPath, contentCopy, new PutFileParameters {
CancellationToken = cancellationToken
});
if (!result.IsSuccessful) {
+ // 409 Conflict on PUT typically means parent directory issue
+ if (result.StatusCode == 409) {
+ // Ensure root path and parent directory exist
+ _rootPathCreated = false; // Force re-check
+ if (!string.IsNullOrEmpty(RootPath)) {
+ await EnsureRootPathExistsAsync(cancellationToken);
+ }
+ var dir = Path.GetDirectoryName(path);
+ if (!string.IsNullOrEmpty(dir)) {
+ await CreateDirectoryAsync(dir, cancellationToken);
+ }
+ // Retry the upload with fresh stream
+ using var retryStream = new MemoryStream(contentBytes);
+ var retryResult = await _client.PutFile(fullPath, retryStream, new PutFileParameters {
+ CancellationToken = cancellationToken
+ });
+ if (retryResult.IsSuccessful) {
+ return true;
+ }
+ }
throw new HttpRequestException($"WebDAV upload failed: {result.StatusCode}");
}
return true;
}, cancellationToken);
+ // Small delay for server propagation, then verify file exists
+ await Task.Delay(50, cancellationToken);
return;
}
// For large files, use chunked upload (if supported by server)
await WriteFileChunkedAsync(fullPath, path, content, cancellationToken);
+
+ // Small delay for server propagation
+ await Task.Delay(50, cancellationToken);
}
///
@@ -397,9 +436,11 @@ private async Task WriteFileChunkedAsync(string fullPath, string relativePath, S
///
private async Task WriteFileGenericAsync(string fullPath, string relativePath, Stream content, CancellationToken cancellationToken) {
var totalSize = content.Length;
- content.Position = 0;
await ExecuteWithRetry(async () => {
+ // Reset position at start of each retry attempt
+ content.Position = 0;
+
// Report initial progress
RaiseProgressChanged(relativePath, 0, totalSize, StorageOperation.Upload);
@@ -408,6 +449,27 @@ await ExecuteWithRetry(async () => {
});
if (!result.IsSuccessful) {
+ // 409 Conflict on PUT typically means parent directory issue
+ if (result.StatusCode == 409) {
+ // Ensure root path and parent directory exist
+ _rootPathCreated = false; // Force re-check
+ if (!string.IsNullOrEmpty(RootPath)) {
+ await EnsureRootPathExistsAsync(cancellationToken);
+ }
+ var dir = Path.GetDirectoryName(relativePath);
+ if (!string.IsNullOrEmpty(dir)) {
+ await CreateDirectoryAsync(dir, cancellationToken);
+ }
+ // Retry the upload
+ content.Position = 0;
+ var retryResult = await _client.PutFile(fullPath, content, new PutFileParameters {
+ CancellationToken = cancellationToken
+ });
+ if (retryResult.IsSuccessful) {
+ RaiseProgressChanged(relativePath, totalSize, totalSize, StorageOperation.Upload);
+ return true;
+ }
+ }
throw new HttpRequestException($"WebDAV upload failed: {result.StatusCode}");
}
@@ -522,6 +584,11 @@ public async Task CreateDirectoryAsync(string path, CancellationToken cancellati
if (!await EnsureAuthenticated(cancellationToken))
throw new UnauthorizedAccessException("Authentication failed");
+ // Ensure root path exists first (if configured)
+ if (!string.IsNullOrEmpty(RootPath)) {
+ await EnsureRootPathExistsAsync(cancellationToken);
+ }
+
// Normalize the path
path = path.Replace('\\', '/').Trim('/');
@@ -537,24 +604,12 @@ public async Task CreateDirectoryAsync(string path, CancellationToken cancellati
for (int i = 0; i < segments.Length; i++) {
currentPath = i == 0 ? segments[i] : $"{currentPath}/{segments[i]}";
var fullPath = GetFullPath(currentPath);
+ var pathToCheck = currentPath; // Capture for lambda
await ExecuteWithRetry(async () => {
- try {
- // Check if directory already exists
- var existsResult = await _client.Propfind(fullPath, new PropfindParameters {
- RequestType = PropfindRequestType.NamedProperties,
- CancellationToken = cancellationToken
- });
-
- if (existsResult.IsSuccessful) {
- // Check if it's actually a collection/directory
- var resource = existsResult.Resources?.FirstOrDefault();
- if (resource != null && resource.IsCollection) {
- return true; // Directory already exists
- }
- }
- } catch {
- // PROPFIND failed, directory probably doesn't exist
+ // Check if directory already exists first
+ if (await ExistsAsync(pathToCheck, cancellationToken)) {
+ return true; // Directory already exists, skip creation
}
// Try to create the directory
@@ -562,21 +617,19 @@ await ExecuteWithRetry(async () => {
CancellationToken = cancellationToken
});
- if (result.IsSuccessful || result.StatusCode == 201) {
- return true; // Created successfully
- }
-
- if (result.StatusCode == 405) {
- // Method Not Allowed - likely means it already exists as a file
- return true;
- }
-
- if (result.StatusCode == 409) {
- // Conflict - parent doesn't exist, but we're creating in order so this shouldn't happen
- throw new HttpRequestException($"Parent directory doesn't exist for {currentPath}");
+ // Treat 201 (Created), 405 (Already exists), and 409 (Conflict/race condition) as success
+ if (result.IsSuccessful || result.StatusCode == 201 || result.StatusCode == 405 || result.StatusCode == 409) {
+ // Verify the directory was actually created (with a short delay for server propagation)
+ await Task.Delay(50, cancellationToken);
+ if (await ExistsAsync(pathToCheck, cancellationToken)) {
+ return true;
+ }
+ // If it doesn't exist yet, give it more time and try again
+ await Task.Delay(100, cancellationToken);
+ return await ExistsAsync(pathToCheck, cancellationToken);
}
- throw new HttpRequestException($"Directory creation failed for {currentPath}: {result.StatusCode} {result.Description}");
+ throw new HttpRequestException($"Directory creation failed for {pathToCheck}: {result.StatusCode} {result.Description}");
}, cancellationToken);
}
}
@@ -654,12 +707,21 @@ public async Task ExistsAsync(string path, CancellationToken cancellationT
try {
return await ExecuteWithRetry(async () => {
var result = await _client.Propfind(fullPath, new PropfindParameters {
- RequestType = PropfindRequestType.NamedProperties,
+ // Use AllProperties for better compatibility with various WebDAV servers
+ RequestType = PropfindRequestType.AllProperties,
CancellationToken = cancellationToken
});
- return result.IsSuccessful && result.StatusCode != 404;
+ // Check if the request was successful and we got at least one resource
+ if (!result.IsSuccessful || result.StatusCode == 404) {
+ return false;
+ }
+
+ // Ensure we actually have resources in the response
+ return result.Resources.Count > 0;
}, cancellationToken);
+ } catch (HttpRequestException ex) when (ex.StatusCode == System.Net.HttpStatusCode.NotFound) {
+ return false;
} catch {
// If PROPFIND fails with an exception, assume the item doesn't exist
return false;
@@ -710,19 +772,15 @@ public async Task GetStorageInfoAsync(CancellationToken cancellatio
///
/// The relative path to the file
/// Cancellation token to cancel the operation
- /// Hash of the file contents (uses ETag if available, server checksum for Nextcloud/OCIS, or SHA256 as fallback)
+ /// SHA256 hash of the file contents (content-based, not ETag)
///
- /// This method optimizes hash computation by using ETags or server-side checksums when available.
- /// Falls back to downloading and computing SHA256 hash for servers that don't support these features.
+ /// This method always computes a content-based hash (SHA256) to ensure consistent
+ /// hash values for files with identical content. For Nextcloud/OCIS servers,
+ /// it first tries to use server-side checksums to avoid downloading the file.
+ /// ETags are not used as they are file-unique (include path/inode) and not content-based.
///
public async Task ComputeHashAsync(string path, CancellationToken cancellationToken = default) {
- // Use ETag if available for performance (avoids downloading the file)
- var item = await GetItemAsync(path, cancellationToken);
- if (!string.IsNullOrEmpty(item?.ETag)) {
- return item.ETag;
- }
-
- // For Nextcloud/OCIS, try to get checksum from properties
+ // For Nextcloud/OCIS, try to get content-based checksum from properties
var capabilities = await GetServerCapabilitiesAsync(cancellationToken);
if (capabilities.IsNextcloud || capabilities.IsOcis) {
var checksum = await GetServerChecksumAsync(path, cancellationToken);
@@ -730,7 +788,7 @@ public async Task ComputeHashAsync(string path, CancellationToken cancel
return checksum;
}
- // Fallback to downloading and hashing (expensive for large files)
+ // Compute SHA256 hash from file content (content-based, same for identical files)
using var stream = await ReadFileAsync(path, cancellationToken);
using var sha256 = SHA256.Create();
@@ -877,14 +935,66 @@ private string GetFullPath(string relativePath) {
}
private string GetRelativePath(string fullUrl) {
- var prefix = string.IsNullOrEmpty(RootPath) ? _baseUrl : $"{_baseUrl}/{RootPath}";
+ // The fullUrl can be either a full URL (http://server/path) or just a path (/path)
+ // We need to strip the base URL and RootPath to get the relative path
+
+ // Extract the path portion if it's a full URL
+ string path;
+ if (Uri.TryCreate(fullUrl, UriKind.Absolute, out var uri)) {
+ // It's a full URL - get the path component and decode it
+ path = Uri.UnescapeDataString(uri.AbsolutePath);
+ } else {
+ // It's already a path
+ path = fullUrl;
+ }
+
+ // Remove leading slash for consistency
+ path = path.TrimStart('/');
+
+ // If there's no root path, return the path as-is (trimming trailing slashes)
+ if (string.IsNullOrEmpty(RootPath)) {
+ return path.TrimEnd('/');
+ }
+
+ // Normalize the root path (no leading/trailing slashes)
+ var normalizedRoot = RootPath.Trim('/');
+
+ // The path should start with RootPath/
+ if (path.StartsWith($"{normalizedRoot}/")) {
+ return path.Substring(normalizedRoot.Length + 1).TrimEnd('/');
+ }
- if (fullUrl.StartsWith(prefix)) {
- var relativePath = fullUrl.Substring(prefix.Length).Trim('/');
- return string.IsNullOrEmpty(relativePath) ? "/" : relativePath;
+ // If it's exactly the root path itself (directory listing)
+ if (path == normalizedRoot || path == $"{normalizedRoot}/") {
+ return "";
}
- return fullUrl;
+ // Otherwise return as-is (trim trailing slashes)
+ return path.TrimEnd('/');
+ }
+
+ private bool _rootPathCreated;
+
+ private async Task EnsureRootPathExistsAsync(CancellationToken cancellationToken) {
+ if (_rootPathCreated || string.IsNullOrEmpty(RootPath)) {
+ return;
+ }
+
+ var rootUrl = $"{_baseUrl.TrimEnd('/')}/{RootPath.Trim('/')}";
+
+ await ExecuteWithRetry(async () => {
+ var result = await _client.Mkcol(rootUrl, new MkColParameters {
+ CancellationToken = cancellationToken
+ });
+
+ // Treat 201 (Created), 405 (Already exists), and 409 (Conflict) as success
+ if (result.IsSuccessful || result.StatusCode == 201 || result.StatusCode == 405 || result.StatusCode == 409) {
+ _rootPathCreated = true;
+ return true;
+ }
+
+ throw new HttpRequestException($"Failed to create root path: {result.StatusCode} {result.Description}");
+ }, cancellationToken);
}
private async Task EnsureAuthenticated(CancellationToken cancellationToken) {
@@ -903,18 +1013,27 @@ private async Task ExecuteWithRetry(Func> operation, CancellationT
return await operation();
} catch (Exception ex) when (attempt < _maxRetries && IsRetriableException(ex)) {
lastException = ex;
- await Task.Delay(_retryDelay * (attempt + 1), cancellationToken);
+ // Exponential backoff: delay * 2^attempt (e.g., 1s, 2s, 4s, 8s...)
+ var delay = _retryDelay * (1 << attempt);
+ await Task.Delay(delay, cancellationToken);
}
}
- throw lastException ?? new InvalidOperationException("Operation failed");
+ throw lastException ?? new InvalidOperationException("Operation failed after retries");
}
private static bool IsRetriableException(Exception ex) {
- return ex is HttpRequestException ||
- ex is TaskCanceledException ||
- ex is SocketException ||
- (ex is HttpRequestException httpEx && httpEx.Message.Contains('5'));
+ return ex switch {
+ HttpRequestException httpEx => httpEx.StatusCode is null ||
+ (int?)httpEx.StatusCode >= 500 ||
+ httpEx.StatusCode == System.Net.HttpStatusCode.RequestTimeout,
+ TaskCanceledException => true,
+ SocketException => true,
+ IOException => true,
+ TimeoutException => true,
+ _ when ex.InnerException is not null => IsRetriableException(ex.InnerException),
+ _ => false
+ };
}
private void RaiseProgressChanged(string path, long completed, long total, StorageOperation operation) {
diff --git a/tests/SharpSync.Tests/Storage/SftpStorageTests.cs b/tests/SharpSync.Tests/Storage/SftpStorageTests.cs
index 9ec0c91..17cef52 100644
--- a/tests/SharpSync.Tests/Storage/SftpStorageTests.cs
+++ b/tests/SharpSync.Tests/Storage/SftpStorageTests.cs
@@ -28,7 +28,11 @@ public SftpStorageTests() {
_testUser = Environment.GetEnvironmentVariable("SFTP_TEST_USER");
_testPass = Environment.GetEnvironmentVariable("SFTP_TEST_PASS");
_testKey = Environment.GetEnvironmentVariable("SFTP_TEST_KEY");
- _testRoot = Environment.GetEnvironmentVariable("SFTP_TEST_ROOT") ?? "/tmp/sharpsync-tests";
+
+ // Use environment variable if set, otherwise default to /tmp/sharpsync-tests
+ // Note: Empty string means "use root of SFTP server" (for chrooted environments)
+ var testRootEnv = Environment.GetEnvironmentVariable("SFTP_TEST_ROOT");
+ _testRoot = testRootEnv ?? "/tmp/sharpsync-tests";
var portStr = Environment.GetEnvironmentVariable("SFTP_TEST_PORT");
_testPort = int.TryParse(portStr, out var port) ? port : 22;
@@ -120,12 +124,18 @@ private void SkipIfIntegrationTestsDisabled() {
private SftpStorage CreateStorage() {
SkipIfIntegrationTestsDisabled();
+ // Create a unique subdirectory for each test to avoid conflicts
+ var testSubdir = Guid.NewGuid().ToString();
+ var rootPath = string.IsNullOrEmpty(_testRoot)
+ ? testSubdir // When root is empty (chrooted env), use relative path
+ : $"{_testRoot}/{testSubdir}"; // Otherwise, append to root
+
if (!string.IsNullOrEmpty(_testKey)) {
// Key-based authentication
- return new SftpStorage(_testHost!, _testPort, _testUser!, privateKeyPath: _testKey, privateKeyPassphrase: null, rootPath: $"{_testRoot}/{Guid.NewGuid()}");
+ return new SftpStorage(_testHost!, _testPort, _testUser!, privateKeyPath: _testKey, privateKeyPassphrase: null, rootPath: rootPath);
} else {
// Password authentication
- return new SftpStorage(_testHost!, _testPort, _testUser!, password: _testPass!, rootPath: $"{_testRoot}/{Guid.NewGuid()}");
+ return new SftpStorage(_testHost!, _testPort, _testUser!, password: _testPass!, rootPath: rootPath);
}
}
diff --git a/tests/SharpSync.Tests/Storage/WebDavStorageTests.cs b/tests/SharpSync.Tests/Storage/WebDavStorageTests.cs
index cc41235..95d2ef7 100644
--- a/tests/SharpSync.Tests/Storage/WebDavStorageTests.cs
+++ b/tests/SharpSync.Tests/Storage/WebDavStorageTests.cs
@@ -258,6 +258,20 @@ private WebDavStorage CreateStorage() {
return new WebDavStorage(_testUrl!, _testUser!, _testPass!, rootPath: $"{_testRoot}/sharpsync-test-{Guid.NewGuid()}");
}
+ ///
+ /// Helper method to wait for an item to exist on the server with retry logic.
+ /// WebDAV servers may have propagation delays.
+ ///
+ private static async Task WaitForExistsAsync(WebDavStorage storage, string path, int maxRetries = 5, int delayMs = 100) {
+ for (int i = 0; i < maxRetries; i++) {
+ if (await storage.ExistsAsync(path)) {
+ return true;
+ }
+ await Task.Delay(delayMs);
+ }
+ return await storage.ExistsAsync(path);
+ }
+
[SkippableFact]
public async Task TestConnectionAsync_ValidCredentials_ReturnsTrue() {
SkipIfIntegrationTestsDisabled();
@@ -294,10 +308,10 @@ public async Task CreateDirectoryAsync_CreatesDirectory() {
// Act
await _storage.CreateDirectoryAsync(dirPath);
- var exists = await _storage.ExistsAsync(dirPath);
+ var exists = await WaitForExistsAsync(_storage, dirPath);
// Assert
- Assert.True(exists);
+ Assert.True(exists, $"Directory '{dirPath}' should exist after creation");
}
[SkippableFact]
@@ -308,7 +322,7 @@ public async Task CreateDirectoryAsync_AlreadyExists_DoesNotThrow() {
// Act
await _storage.CreateDirectoryAsync(dirPath);
- var existsAfterFirstCreate = await _storage.ExistsAsync(dirPath);
+ var existsAfterFirstCreate = await WaitForExistsAsync(_storage, dirPath);
// Ensure the directory exists after the first creation
Assert.True(existsAfterFirstCreate, "Directory should exist after first creation");
@@ -316,8 +330,8 @@ public async Task CreateDirectoryAsync_AlreadyExists_DoesNotThrow() {
await _storage.CreateDirectoryAsync(dirPath); // Create again
// Assert
- var exists = await _storage.ExistsAsync(dirPath);
- Assert.True(exists);
+ var exists = await WaitForExistsAsync(_storage, dirPath);
+ Assert.True(exists, "Directory should still exist after second creation attempt");
}
[SkippableFact]
@@ -332,8 +346,8 @@ public async Task WriteFileAsync_CreatesFile() {
await _storage.WriteFileAsync(filePath, stream);
// Assert
- var exists = await _storage.ExistsAsync(filePath);
- Assert.True(exists);
+ var exists = await WaitForExistsAsync(_storage, filePath);
+ Assert.True(exists, $"File '{filePath}' should exist after writing");
}
[SkippableFact]
@@ -347,9 +361,15 @@ public async Task WriteFileAsync_WithParentDirectory_CreatesParentDirectories()
using var stream = new MemoryStream(Encoding.UTF8.GetBytes(content));
await _storage.WriteFileAsync(filePath, stream);
- // Assert
- var exists = await _storage.ExistsAsync(filePath);
- Assert.True(exists);
+ // Assert - verify parent directories and file were created
+ var parentExists = await WaitForExistsAsync(_storage, "parent");
+ Assert.True(parentExists, "Parent directory 'parent' should exist");
+
+ var childExists = await WaitForExistsAsync(_storage, "parent/child");
+ Assert.True(childExists, "Child directory 'parent/child' should exist");
+
+ var fileExists = await WaitForExistsAsync(_storage, filePath);
+ Assert.True(fileExists, $"File '{filePath}' should exist after writing");
}
[SkippableFact]
@@ -388,11 +408,11 @@ public async Task ExistsAsync_ExistingFile_ReturnsTrue() {
using var stream = new MemoryStream(Encoding.UTF8.GetBytes("test"));
await _storage.WriteFileAsync(filePath, stream);
- // Act
- var result = await _storage.ExistsAsync(filePath);
+ // Act - use retry helper to account for server propagation delay
+ var result = await WaitForExistsAsync(_storage, filePath);
// Assert
- Assert.True(result);
+ Assert.True(result, $"File '{filePath}' should exist after writing");
}
[SkippableFact]
@@ -457,15 +477,17 @@ public async Task MoveAsync_ExistingFile_MovesFile() {
using var stream = new MemoryStream(Encoding.UTF8.GetBytes(content));
await _storage.WriteFileAsync(sourcePath, stream);
+ await WaitForExistsAsync(_storage, sourcePath);
// Act
await _storage.MoveAsync(sourcePath, targetPath);
- // Assert
+ // Assert - give the server time to process the move
+ await Task.Delay(100);
var sourceExists = await _storage.ExistsAsync(sourcePath);
- var targetExists = await _storage.ExistsAsync(targetPath);
- Assert.False(sourceExists);
- Assert.True(targetExists);
+ var targetExists = await WaitForExistsAsync(_storage, targetPath);
+ Assert.False(sourceExists, "Source file should not exist after move");
+ Assert.True(targetExists, "Target file should exist after move");
}
[SkippableFact]
@@ -478,13 +500,14 @@ public async Task MoveAsync_ToNewDirectory_CreatesParentDirectory() {
using var stream = new MemoryStream(Encoding.UTF8.GetBytes(content));
await _storage.WriteFileAsync(sourcePath, stream);
+ await WaitForExistsAsync(_storage, sourcePath);
// Act
await _storage.MoveAsync(sourcePath, targetPath);
// Assert
- var targetExists = await _storage.ExistsAsync(targetPath);
- Assert.True(targetExists);
+ var targetExists = await WaitForExistsAsync(_storage, targetPath);
+ Assert.True(targetExists, "Target file should exist after move to new directory");
}
[SkippableFact]
@@ -517,8 +540,8 @@ public async Task GetItemAsync_ExistingDirectory_ReturnsMetadata() {
// Ensure the directory is created
await _storage.CreateDirectoryAsync(dirPath);
- // Verify directory exists before testing GetItemAsync
- var exists = await _storage.ExistsAsync(dirPath);
+ // Verify directory exists before testing GetItemAsync (with retry for propagation)
+ var exists = await WaitForExistsAsync(_storage, dirPath);
Assert.True(exists, "Directory should exist after creation");
// Act
@@ -547,6 +570,7 @@ public async Task ListItemsAsync_EmptyDirectory_ReturnsEmpty() {
_storage = CreateStorage();
var dirPath = "empty_dir";
await _storage.CreateDirectoryAsync(dirPath);
+ await WaitForExistsAsync(_storage, dirPath);
// Act
var items = await _storage.ListItemsAsync(dirPath);
@@ -561,23 +585,25 @@ public async Task ListItemsAsync_WithFiles_ReturnsAllItems() {
_storage = CreateStorage();
var dirPath = "list_test";
await _storage.CreateDirectoryAsync(dirPath);
+ await WaitForExistsAsync(_storage, dirPath);
// Create test files and subdirectories
await _storage.WriteFileAsync($"{dirPath}/file1.txt", new MemoryStream(Encoding.UTF8.GetBytes("content1")));
await _storage.WriteFileAsync($"{dirPath}/file2.txt", new MemoryStream(Encoding.UTF8.GetBytes("content2")));
await _storage.CreateDirectoryAsync($"{dirPath}/subdir");
- // Verify all items exist before listing
- Assert.True(await _storage.ExistsAsync($"{dirPath}/file1.txt"), "file1.txt should exist");
- Assert.True(await _storage.ExistsAsync($"{dirPath}/file2.txt"), "file2.txt should exist");
- Assert.True(await _storage.ExistsAsync($"{dirPath}/subdir"), "subdir should exist");
-
- // Act
- var items = (await _storage.ListItemsAsync(dirPath)).ToList();
-
- // Debug output
- foreach (var item in items) {
- System.Diagnostics.Debug.WriteLine($"Found item: {item.Path}, IsDirectory: {item.IsDirectory}");
+ // Verify all items exist before listing (with retry for server propagation)
+ Assert.True(await WaitForExistsAsync(_storage, $"{dirPath}/file1.txt"), "file1.txt should exist");
+ Assert.True(await WaitForExistsAsync(_storage, $"{dirPath}/file2.txt"), "file2.txt should exist");
+ Assert.True(await WaitForExistsAsync(_storage, $"{dirPath}/subdir"), "subdir should exist");
+
+ // Act - retry list operation to account for propagation
+ List? items = null;
+ for (int attempt = 0; attempt < 5; attempt++) {
+ items = (await _storage.ListItemsAsync(dirPath)).ToList();
+ if (items.Count >= 3)
+ break;
+ await Task.Delay(100);
}
// Assert
@@ -662,9 +688,7 @@ public async Task WriteFileAsync_LargeFile_RaisesProgressEvents() {
var content = new byte[fileSize];
new Random().NextBytes(content);
- var progressEventRaised = false;
_storage.ProgressChanged += (sender, args) => {
- progressEventRaised = true;
Assert.Equal(filePath, args.Path);
Assert.Equal(StorageOperation.Upload, args.Operation);
};
@@ -674,8 +698,8 @@ public async Task WriteFileAsync_LargeFile_RaisesProgressEvents() {
await _storage.WriteFileAsync(filePath, stream);
// Assert
- var exists = await _storage.ExistsAsync(filePath);
- Assert.True(exists);
+ var exists = await WaitForExistsAsync(_storage, filePath);
+ Assert.True(exists, $"Large file '{filePath}' should exist after writing");
// Note: Progress events may not be raised for all servers/sizes
}
@@ -691,9 +715,7 @@ public async Task ReadFileAsync_LargeFile_RaisesProgressEvents() {
using var writeStream = new MemoryStream(content);
await _storage.WriteFileAsync(filePath, writeStream);
- var progressEventRaised = false;
_storage.ProgressChanged += (sender, args) => {
- progressEventRaised = true;
Assert.Equal(StorageOperation.Download, args.Operation);
};
diff --git a/webdav-config.yml b/webdav-config.yml
new file mode 100644
index 0000000..b214781
--- /dev/null
+++ b/webdav-config.yml
@@ -0,0 +1,8 @@
+address: 0.0.0.0
+port: 80
+prefix: /
+directory: /data
+permissions: CRUD
+users:
+ - username: testuser
+ password: testpass