Compare commits

...

57 Commits

Author SHA1 Message Date
kodi 54e56ab0d8 Fix context menu positioning and remove unnecessary width 2026-03-28 08:12:29 +01:00
kodi 9778dc6c33 Add Phase 3 remote read-only file operations
Introduce dedicated remote file facade for /Clients paths, add agent read/download endpoints, enable remote view/properties/download/image preview in the web UI, and keep remote write operations disabled.
2026-03-27 15:16:01 +01:00
kodi 2fa4a0b291 Implement Remote Client Shares Phase 2 browse support and unify remote agent HTTP + heartbeat 2026-03-27 14:17:55 +01:00
kodi 4062cbf6c8 Add Phase 2 remote browse scaffolding for /Clients 2026-03-27 11:39:26 +01:00
kodi 841318c9e2 Add Phase 1 remote client registry and heartbeat agent
- add remote client register, heartbeat and list endpoints
- add remote client repository and service
- add minimal macOS remote agent with config, register and heartbeat loop
- keep client_id as leading identity
- keep status fields separated: last_seen, status, last_error, reachable_at
- avoid changes to local storage flows, PathGuard and /Volumes behavior
2026-03-27 10:39:54 +01:00
kodi 684f52be4d feat: remote client deel 1 2026-03-26 19:41:58 +01:00
kodi fc4ec39646 Voor remote client agent 2026-03-25 18:21:54 +01:00
kodi 9537a29de3 feat: feedback verbetering - 06 2026-03-15 15:51:13 +01:00
kodi ae6a9d8c45 feat: feedback verbetering 05 2026-03-15 15:30:55 +01:00
kodi 61d0c8de41 feat: feedback verbetering 04 2026-03-15 14:52:33 +01:00
kodi 3d82699535 feat: feedback verbetering 03 2026-03-15 14:16:17 +01:00
kodi 492082c2b7 feat: feedback verbetering 02 2026-03-15 13:52:48 +01:00
kodi 9a7ca4e2db feat: feedback verbetering 01 2026-03-15 13:44:38 +01:00
kodi 66abf991d8 feat: feedback verbetering 2026-03-15 13:28:11 +01:00
kodi a52493459a feat: annuleren taak toegevoegd 2026-03-15 13:06:48 +01:00
kodi 7d910479f9 feat: voortgang delete in headerbar 2026-03-15 11:52:39 +01:00
kodi 73b09d2802 feat: voortgang copy/duplicate/move in headerbar 2026-03-15 11:40:21 +01:00
kodi 9d5fb5a0c9 feat: favicon en logo toegevoegd 2026-03-15 09:12:11 +01:00
kodi c0bd6b647c fix: navigatie 2026-03-15 07:39:57 +01:00
kodi cc5a978e79 feature: duplicate 02 2026-03-14 17:27:24 +01:00
kodi 7f7665880f feature: duplicate 01 2026-03-14 17:20:36 +01:00
kodi 14600dd5b6 fix: log/tasks posities - 02 2026-03-14 16:39:22 +01:00
kodi a816f71ad5 fix: log/tasks posities 2026-03-14 16:33:46 +01:00
kodi 15c85e874c fix: log/tasks rendering 2026-03-14 16:12:40 +01:00
kodi 90b1828160 task bij logs gezet 2026-03-14 16:08:36 +01:00
kodi 5265d6458c download taken zichtbaar gemaakt 2026-03-14 15:57:45 +01:00
kodi e85e51d64a polish 2026-03-14 15:29:50 +01:00
kodi 3fb8528b0e feat: conf toegevoegd 2026-03-14 15:12:35 +01:00
kodi 8af4b1a6b0 feat: B4 - progressbar bij single file 2026-03-14 14:58:07 +01:00
kodi d459f3c524 feat: B4 - progressbar 2026-03-14 14:49:15 +01:00
kodi 2981ac2796 feat: B3 uit voor veilige archive-downloads - cancel knop toegevoegd 2026-03-14 14:39:57 +01:00
kodi d463b3977d feat: B2 uit voor veilige archive-downloads 2026-03-14 14:24:52 +01:00
kodi 592b10acc2 feat: download - download status aan logs toegevoegd 2026-03-14 13:53:53 +01:00
kodi 8ea2bd1498 feat: download - download dwnload limieten in settings 2026-03-14 13:38:44 +01:00
kodi ea337338e3 feat: download - download safeguard 2026-03-14 13:24:17 +01:00
kodi 7e7c2f3958 feat: download - fase 03 2026-03-14 13:10:52 +01:00
kodi dab87878cc feat: download - fase 02 2026-03-14 12:40:41 +01:00
kodi 610a648fd1 feat: download - fase 01 2026-03-14 12:31:11 +01:00
kodi af1d1eea23 feat: contextmenu Open aangepast 2026-03-14 11:51:29 +01:00
kodi 6b4fb34b40 feat: contextmenu Edit toegevoegd 2026-03-14 11:38:52 +01:00
kodi 3dfbc64913 feat: contextmenu Open toegevoegd 2026-03-14 11:23:52 +01:00
kodi 73c539ba4a feat: contextmenu eigenschappen toegevoegd 2026-03-14 11:11:03 +01:00
kodi d08ca24c87 feat: contextmenu copy multiple folders toegevoegd 2026-03-14 11:01:45 +01:00
kodi 4e1288fe47 feat: contextmenu copy folders toegevoegd 2026-03-14 10:34:31 +01:00
kodi 8908b1dce9 feat: contextmenu deel copy toegevoegd 2026-03-14 09:59:42 +01:00
kodi 84f3eedb74 feat: contextmenu deel move toegevoegd 2026-03-14 09:54:21 +01:00
kodi 054e736aa6 feat: contextmenu deel 3a rename en delete 2026-03-14 09:41:16 +01:00
kodi 7bb59a2b65 feat: contextmenu deel 2 2026-03-14 09:31:01 +01:00
kodi 0615324607 feat: contextmenu deel 1 2026-03-14 09:22:24 +01:00
kodi 3987de27e0 feat: delete multiple non empty folders 2026-03-14 08:36:47 +01:00
kodi d84b3da561 feat: delete non empty folders 2026-03-14 07:48:29 +01:00
kodi f092007998 feat: upload progressbar 2026-03-14 07:28:31 +01:00
kodi f0b04fd4ee feat: folder upload - deel 3 2026-03-14 07:10:49 +01:00
kodi 287dddb7b3 feat: folder upload - deel 2 2026-03-14 06:57:18 +01:00
kodi e2e206573d feat: folder upload - deel 1 2026-03-14 06:52:18 +01:00
kodi 360815498e feat: upload - deel 03.02 - Skipp all toegevoegd 2026-03-13 18:30:10 +01:00
kodi 8fe9d0f436 feat: upload - deel 02 2026-03-13 16:21:51 +01:00
106 changed files with 13905 additions and 416 deletions
+7
View File
@@ -0,0 +1,7 @@
__pycache__/
*.pyc
*.log
.venv/
venv/
.DS_Store
.sqlite3
+1 -1
View File
@@ -18,7 +18,7 @@ RUN mkdir -p /app/backend /app/html /app/conf /Volumes/8TB /Volumes/8TB_RAID1
# Installeer een lichtgewicht Python API framework (FastAPI) # Installeer een lichtgewicht Python API framework (FastAPI)
# We gebruiken --break-system-packages omdat we in een container zitten # We gebruiken --break-system-packages omdat we in een container zitten
RUN pip3 install fastapi uvicorn --break-system-packages RUN pip3 install fastapi uvicorn python-multipart httpx --break-system-packages
# Exposeer de poort voor de webinterface # Exposeer de poort voor de webinterface
EXPOSE 8030 EXPOSE 8030
+67
View File
@@ -0,0 +1,67 @@
# Finder Commander
Lokale webapp die aanvoelt als een Midnight Commander-achtige file manager voor macOS/Linux home directories.
## Wat zit erin
- twee panelen naast elkaar
- actieve panel met duidelijke focusrand
- keyboard shortcuts:
- `Tab` wissel panel
- `↑ / ↓ / PgUp / PgDn` navigatie
- `Enter` open map of view bestand
- `Backspace` omhoog
- `Space` markeer bestand/map
- `F3` view
- `F4` edit
- `Shift+F4` nieuw bestand
- `F5` copy naar ander panel
- `F6` move naar ander panel
- `Shift+F6` rename
- `F7` nieuwe map
- `F8` delete naar `~/.Trash`
- `Ctrl+H` toggle verborgen bestanden
- `Ctrl+R` refresh
- `Alt+X` focus command line
- command line onderin met beperkte veilige commando's:
- `cd <path>`
- `mkdir <name>`
- `touch <name>`
- `select <glob>`
- `help`
- upload per panel
- viewer voor tekst en afbeeldingen
- editor voor tekstbestanden
- padbeveiliging: alles blijft binnen `~`
## Starten
```bash
./run-local.sh
```
Het script kiest automatisch `python3.14` als dat aanwezig is. Bestaat er al een `.venv` met een andere Python minor-versie, dan wordt die automatisch opnieuw aangemaakt.
Open daarna:
```text
http://127.0.0.1:8765/
```
## Python 3.14-notes
Deze build is opgeschoond voor Python 3.14.x:
- minimale Uvicorn-installatie (`uvicorn` i.p.v. `uvicorn[standard]`)
- geen optionele C-extensies nodig om te starten
- dependencies ondersteunen Python 3.14
## Opmerking
Dit is een **MC-like v1**, geen volledige Midnight Commander clone. Bewust niet ingebouwd:
- shell/subshell uitvoering
- chmod/chown dialogs
- archive browsing als directory
- remote FTP/SFTP panels
- diff/compare directories
+360
View File
@@ -0,0 +1,360 @@
from __future__ import annotations
import json
import mimetypes
import os
import struct
from dataclasses import dataclass
from datetime import datetime, timezone
from functools import lru_cache
from pathlib import Path
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import FileResponse, JSONResponse
APP_NAME = "Finder Commander Remote Agent"
DEFAULT_PORT = 8765
TEXT_PREVIEW_MAX_BYTES = 256 * 1024
TEXT_CONTENT_TYPES = {
".txt": "text/plain",
".log": "text/plain",
".conf": "text/plain",
".ini": "text/plain",
".cfg": "text/plain",
".md": "text/markdown",
".yml": "text/yaml",
".yaml": "text/yaml",
".json": "application/json",
".js": "text/javascript",
".py": "text/x-python",
".css": "text/css",
".html": "text/html",
}
SPECIAL_TEXT_FILENAMES = {
"dockerfile": "text/plain",
"containerfile": "text/plain",
}
@dataclass(frozen=True)
class AgentRuntimeConfig:
config_path: Path | None
agent_access_token: str
shares: dict[str, str]
display_name: str
endpoint: str
client_id: str
platform: str
def _now_iso() -> str:
return datetime.now(tz=timezone.utc).isoformat().replace("+00:00", "Z")
def _candidate_config_paths() -> list[Path]:
candidates: list[Path] = []
env_path = os.getenv("FINDER_COMMANDER_REMOTE_AGENT_CONFIG", "").strip()
if env_path:
candidates.append(Path(env_path).expanduser().resolve(strict=False))
base_dir = Path(__file__).resolve().parents[1]
candidates.append(base_dir / "remote_client_agent.launchd.json")
candidates.append(base_dir / "remote_client_agent.example.json")
return candidates
def _load_raw_config() -> tuple[Path | None, dict]:
for candidate in _candidate_config_paths():
if candidate.is_file():
try:
raw = json.loads(candidate.read_text(encoding="utf-8"))
except ValueError as exc:
raise RuntimeError(f"Invalid JSON in config file: {candidate}") from exc
if not isinstance(raw, dict):
raise RuntimeError(f"Config file must contain a JSON object: {candidate}")
return candidate.resolve(strict=False), raw
return None, {}
@lru_cache(maxsize=1)
def get_runtime_config() -> AgentRuntimeConfig:
config_path, raw = _load_raw_config()
shares_raw = raw.get("shares", {})
shares: dict[str, str] = {}
if isinstance(shares_raw, dict):
for key, value in shares_raw.items():
normalized_key = str(key).strip()
normalized_value = str(value).strip()
if normalized_key and normalized_value:
shares[normalized_key] = normalized_value
return AgentRuntimeConfig(
config_path=config_path,
agent_access_token=os.getenv("FINDER_COMMANDER_AGENT_ACCESS_TOKEN", "").strip()
or str(raw.get("agent_access_token", "")).strip(),
shares=shares,
display_name=str(raw.get("display_name", "")).strip(),
endpoint=str(raw.get("public_endpoint", raw.get("endpoint", ""))).strip(),
client_id=str(raw.get("client_id", "")).strip(),
platform=str(raw.get("platform", "macos")).strip() or "macos",
)
def require_agent_auth(request: Request) -> None:
config = get_runtime_config()
if not config.agent_access_token:
return
authorization = request.headers.get("authorization", "").strip()
if authorization != f"Bearer {config.agent_access_token}":
raise_agent_error(
status_code=403,
code="invalid_agent_token",
message="Invalid agent token",
extra={
"config_path": str(config.config_path) if config.config_path else None,
"client_id": config.client_id or None,
"display_name": config.display_name or None,
},
)
def raise_agent_error(status_code: int, code: str, message: str, *, extra: dict | None = None) -> None:
detail = {"code": code, "message": message}
if extra:
detail.update(extra)
raise HTTPException(status_code=status_code, detail=detail)
def get_share_root(share: str) -> Path:
config = get_runtime_config()
normalized_share = (share or "").strip()
if normalized_share not in config.shares:
raise_agent_error(404, "path_not_found", "Share not found")
return Path(config.shares[normalized_share]).expanduser().resolve(strict=False)
def ensure_within_root(root: Path, candidate: Path) -> Path:
try:
candidate.relative_to(root)
except ValueError as exc:
_ = exc
raise_agent_error(403, "path_traversal_detected", "Path escapes share root")
return candidate
def resolve_share_path(share: str, raw_path: str, *, must_exist: bool = True) -> Path:
root = get_share_root(share)
normalized = (raw_path or "").strip().replace("\\", "/")
if normalized.startswith("/") or any(part == ".." for part in normalized.split("/")):
raise_agent_error(400, "invalid_request", "Invalid share-relative path")
candidate = (root / normalized).resolve(strict=False)
candidate = ensure_within_root(root, candidate)
if must_exist and not candidate.exists():
raise_agent_error(404, "path_not_found", "Path not found")
return candidate
def directory_entry_payload(path: Path) -> dict:
stat_result = path.lstat()
return {
"name": path.name,
"kind": "directory" if path.is_dir() else "file",
"size": stat_result.st_size,
"modified": datetime.fromtimestamp(stat_result.st_mtime, tz=timezone.utc).isoformat().replace("+00:00", "Z"),
}
def info_payload(path: Path, *, share: str, raw_path: str) -> dict:
stat_result = path.lstat()
kind = "directory" if path.is_dir() else "file"
mime, _ = mimetypes.guess_type(path.name)
width, height = image_dimensions(path) if path.is_file() else (None, None)
return {
"share": share,
"path": raw_path.strip().replace("\\", "/").strip("/"),
"name": path.name,
"kind": kind,
"size": None if path.is_dir() else stat_result.st_size,
"modified": datetime.fromtimestamp(stat_result.st_mtime, tz=timezone.utc).isoformat().replace("+00:00", "Z"),
"content_type": mime or "application/octet-stream",
"extension": path.suffix.lower() or None,
"width": width,
"height": height,
"owner": None,
"group": None,
"config_path": str(get_runtime_config().config_path) if get_runtime_config().config_path else None,
}
def list_directory(path: Path, *, show_hidden: bool) -> list[dict]:
try:
children = list(path.iterdir())
except PermissionError as exc:
_ = exc
raise_agent_error(403, "forbidden", "Permission denied by operating system")
filtered = []
for child in children:
if not show_hidden and child.name.startswith("."):
continue
filtered.append(child)
filtered.sort(key=lambda item: (not item.is_dir(), item.name.lower()))
return [directory_entry_payload(child) for child in filtered]
def text_content_type_for_name(name: str) -> str | None:
lowered = (name or "").lower()
special = SPECIAL_TEXT_FILENAMES.get(lowered)
if special:
return special
return TEXT_CONTENT_TYPES.get(Path(name).suffix.lower())
def read_text_preview(path: Path, *, max_bytes: int) -> dict:
size = int(path.stat().st_size)
preview_limit = min(max(1, int(max_bytes)), TEXT_PREVIEW_MAX_BYTES)
with path.open("rb") as handle:
raw = handle.read(preview_limit + 1)
truncated = size > preview_limit or len(raw) > preview_limit
if truncated:
raw = raw[:preview_limit]
if b"\x00" in raw:
raise_agent_error(409, "unsupported_type", "Binary content is not supported for text preview")
try:
content = raw.decode("utf-8")
except UnicodeDecodeError as exc:
_ = exc
raise_agent_error(409, "unsupported_type", "Binary content is not supported for text preview")
return {
"size": size,
"modified": datetime.fromtimestamp(path.stat().st_mtime, tz=timezone.utc).isoformat().replace("+00:00", "Z"),
"encoding": "utf-8",
"truncated": truncated,
"content": content,
}
def image_dimensions(path: Path) -> tuple[int | None, int | None]:
suffix = path.suffix.lower()
try:
if suffix == ".png":
with path.open("rb") as handle:
header = handle.read(24)
if len(header) < 24 or header[:8] != b"\x89PNG\r\n\x1a\n":
return None, None
return struct.unpack(">II", header[16:24])
if suffix == ".gif":
with path.open("rb") as handle:
header = handle.read(10)
if len(header) < 10 or header[:6] not in {b"GIF87a", b"GIF89a"}:
return None, None
return struct.unpack("<HH", header[6:10])
if suffix == ".bmp":
with path.open("rb") as handle:
header = handle.read(26)
if len(header) < 26 or header[:2] != b"BM":
return None, None
width, height = struct.unpack("<ii", header[18:26])
return abs(width), abs(height)
except (OSError, ValueError, struct.error):
return None, None
return None, None
app = FastAPI(title=APP_NAME)
@app.get("/")
def root() -> dict:
config = get_runtime_config()
return {
"ok": True,
"app": APP_NAME,
"time": _now_iso(),
"client_id": config.client_id or None,
"display_name": config.display_name or None,
"config_path": str(config.config_path) if config.config_path else None,
"shares": sorted(config.shares.keys()),
"auth_enabled": bool(config.agent_access_token),
}
@app.get("/health")
def health(request: Request) -> dict:
require_agent_auth(request)
config = get_runtime_config()
return {
"ok": True,
"app": APP_NAME,
"time": _now_iso(),
"client_id": config.client_id or None,
"display_name": config.display_name or None,
"platform": config.platform,
"endpoint": config.endpoint or None,
"shares": sorted(config.shares.keys()),
"config_path": str(config.config_path) if config.config_path else None,
"port_hint": DEFAULT_PORT,
"auth_enabled": bool(config.agent_access_token),
}
@app.get("/api/list")
def api_list(request: Request, share: str, path: str = "", show_hidden: bool = False) -> dict:
require_agent_auth(request)
target = resolve_share_path(share, path)
if not target.is_dir():
raise HTTPException(status_code=400, detail="Path is not a directory")
return {
"share": share.strip(),
"path": path.strip().replace("\\", "/").strip("/"),
"entries": list_directory(target, show_hidden=show_hidden),
}
@app.get("/api/info")
def api_info(request: Request, share: str, path: str = "") -> dict:
require_agent_auth(request)
target = resolve_share_path(share, path)
return info_payload(target, share=share.strip(), raw_path=path)
@app.get("/api/read")
def api_read(request: Request, share: str, path: str = "", max_bytes: int = TEXT_PREVIEW_MAX_BYTES) -> dict:
require_agent_auth(request)
target = resolve_share_path(share, path)
if target.is_dir():
raise_agent_error(409, "type_conflict", "Source must be a file")
if not target.is_file():
raise_agent_error(409, "type_conflict", "Unsupported path type for read")
content_type = text_content_type_for_name(target.name)
if content_type is None:
raise_agent_error(409, "unsupported_type", "File type is not supported for text preview")
return {
"name": target.name,
"path": path.strip().replace("\\", "/").strip("/"),
"content_type": content_type,
**read_text_preview(target, max_bytes=max_bytes),
}
@app.get("/api/download")
def api_download(request: Request, share: str, path: str = "") -> FileResponse:
require_agent_auth(request)
target = resolve_share_path(share, path)
if target.is_dir():
raise_agent_error(409, "type_conflict", "Source must be a file")
if not target.is_file():
raise_agent_error(409, "type_conflict", "Unsupported path type for download")
return FileResponse(
path=target,
media_type=mimetypes.guess_type(target.name)[0] or "application/octet-stream",
filename=target.name,
)
@app.exception_handler(HTTPException)
async def http_exception_handler(_: Request, exc: HTTPException) -> JSONResponse:
return JSONResponse(status_code=exc.status_code, content={"ok": False, "detail": exc.detail})
@app.exception_handler(Exception)
async def unhandled_exception_handler(_: Request, exc: Exception) -> JSONResponse:
return JSONResponse(status_code=500, content={"ok": False, "detail": str(exc)})
@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.webmanager.remote-client-agent</string>
<key>ProgramArguments</key>
<array>
<string>/usr/bin/python3</string>
<string>-u</string>
<string>/workspace/webmanager-mvp/finder_commander/remote_client_agent.py</string>
<string>--config</string>
<string>/workspace/webmanager-mvp/finder_commander/remote_client_agent.launchd.json</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardOutPath</key>
<string>/tmp/com.webmanager.remote-client-agent.out.log</string>
<key>StandardErrorPath</key>
<string>/tmp/com.webmanager.remote-client-agent.err.log</string>
</dict>
</plist>
@@ -0,0 +1,15 @@
{
"agent_access_token": "change-me-agent-token",
"client_id": "",
"display_name": "MacBook Pro van Jan",
"endpoint": "http://192.168.1.25:8765",
"heartbeat_interval_seconds": 20,
"platform": "macos",
"registration_token": "change-me-registration-token",
"shares": {
"downloads": "/Users/jan/Downloads",
"movies": "/Users/jan/Movies",
"pictures": "/Users/jan/Pictures"
},
"webmanager_base_url": "http://127.0.0.1:8080"
}
@@ -0,0 +1,15 @@
{
"agent_access_token": "change-me-agent-token",
"client_id": "",
"display_name": "MacBook Pro van Jan",
"endpoint": "http://192.168.1.25:8765",
"heartbeat_interval_seconds": 20,
"platform": "macos",
"registration_token": "change-me-registration-token",
"shares": {
"downloads": "/Users/jan/Downloads",
"movies": "/Users/jan/Movies",
"pictures": "/Users/jan/Pictures"
},
"webmanager_base_url": "http://127.0.0.1:8080"
}
+220
View File
@@ -0,0 +1,220 @@
from __future__ import annotations
import argparse
import json
import sys
import threading
import uuid
from dataclasses import dataclass
from pathlib import Path
from typing import Any
from urllib import error, request
from urllib.parse import urlparse
import uvicorn
AGENT_VERSION = "1.1.0-phase1"
@dataclass
class AgentConfig:
config_path: Path
webmanager_base_url: str
registration_token: str
agent_access_token: str
display_name: str
endpoint: str
shares: dict[str, str]
heartbeat_interval_seconds: int
client_id: str
platform: str = "macos"
@property
def normalized_base_url(self) -> str:
return self.webmanager_base_url.rstrip("/")
def load_config(config_path: Path) -> AgentConfig:
raw = json.loads(config_path.read_text(encoding="utf-8"))
client_id = str(raw.get("client_id", "")).strip()
if not client_id:
client_id = str(uuid.uuid4())
raw["client_id"] = client_id
config_path.write_text(json.dumps(raw, indent=2, sort_keys=True) + "\n", encoding="utf-8")
shares_raw = raw.get("shares") or {}
shares: dict[str, str] = {}
if isinstance(shares_raw, dict):
for key, value in shares_raw.items():
normalized_key = str(key).strip()
normalized_value = str(value).strip()
if normalized_key and normalized_value:
shares[normalized_key] = normalized_value
if not shares:
raise ValueError("config requires at least one share")
return AgentConfig(
config_path=config_path,
webmanager_base_url=str(raw.get("webmanager_base_url", "")).strip(),
registration_token=str(raw.get("registration_token", "")).strip(),
agent_access_token=str(raw.get("agent_access_token", "")).strip(),
display_name=str(raw.get("display_name", "")).strip(),
endpoint=str(raw.get("public_endpoint", raw.get("endpoint", ""))).strip(),
shares=shares,
heartbeat_interval_seconds=max(5, int(raw.get("heartbeat_interval_seconds", 20))),
client_id=client_id,
platform=str(raw.get("platform", "macos")).strip() or "macos",
)
def require_non_empty(value: str, field: str) -> str:
normalized = value.strip()
if not normalized:
raise ValueError(f"config field '{field}' is required")
return normalized
def build_register_payload(config: AgentConfig) -> dict[str, Any]:
return {
"client_id": config.client_id,
"display_name": config.display_name,
"platform": config.platform,
"agent_version": AGENT_VERSION,
"endpoint": config.endpoint,
"shares": [{"key": key, "label": key.capitalize()} for key in sorted(config.shares.keys())],
}
def build_heartbeat_payload(config: AgentConfig) -> dict[str, Any]:
return {
"client_id": config.client_id,
"agent_version": AGENT_VERSION,
}
def post_json(url: str, token: str, payload: dict[str, Any]) -> dict[str, Any]:
data = json.dumps(payload).encode("utf-8")
req = request.Request(
url,
method="POST",
data=data,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
},
)
with request.urlopen(req, timeout=10) as resp:
return json.loads(resp.read().decode("utf-8"))
def run_heartbeat_loop(config: AgentConfig, stop_event: threading.Event) -> None:
require_non_empty(config.webmanager_base_url, "webmanager_base_url")
require_non_empty(config.registration_token, "registration_token")
require_non_empty(config.agent_access_token, "agent_access_token")
require_non_empty(config.display_name, "display_name")
require_non_empty(config.endpoint, "public_endpoint")
register_url = f"{config.normalized_base_url}/api/clients/register"
heartbeat_url = f"{config.normalized_base_url}/api/clients/heartbeat"
print(f"Starting remote client agent for {config.display_name} ({config.client_id})", flush=True)
print(f"Using config: {config.config_path}", flush=True)
print("agent_access_token is configured for authenticated agent endpoints", flush=True)
while not stop_event.is_set():
try:
post_json(register_url, config.registration_token, build_register_payload(config))
print("register ok", flush=True)
break
except error.HTTPError as exc:
print(f"register failed: HTTP {exc.code}", file=sys.stderr, flush=True)
except error.URLError as exc:
print(f"register failed: {exc.reason}", file=sys.stderr, flush=True)
if stop_event.wait(config.heartbeat_interval_seconds):
return
while not stop_event.is_set():
try:
post_json(heartbeat_url, config.registration_token, build_heartbeat_payload(config))
print("heartbeat ok", flush=True)
except error.HTTPError as exc:
print(f"heartbeat failed: HTTP {exc.code}", file=sys.stderr, flush=True)
except error.URLError as exc:
print(f"heartbeat failed: {exc.reason}", file=sys.stderr, flush=True)
if stop_event.wait(config.heartbeat_interval_seconds):
return
def resolve_bind_host(config: AgentConfig, requested_host: str | None) -> str:
normalized = (requested_host or "").strip()
if normalized:
return normalized
return "0.0.0.0"
def resolve_bind_port(config: AgentConfig, requested_port: int | None) -> int:
if requested_port and requested_port > 0:
return requested_port
parsed = urlparse(config.endpoint)
if parsed.port:
return parsed.port
if parsed.scheme == "https":
return 443
if parsed.scheme == "http":
return 80
return 8765
def run(config: AgentConfig, requested_host: str | None, requested_port: int | None) -> None:
stop_event = threading.Event()
heartbeat_thread = threading.Thread(
target=run_heartbeat_loop,
args=(config, stop_event),
daemon=True,
name="remote-client-heartbeat",
)
heartbeat_thread.start()
bind_host = resolve_bind_host(config, requested_host)
bind_port = resolve_bind_port(config, requested_port)
print(f"Starting HTTP agent on {bind_host}:{bind_port}", flush=True)
print(f"Advertised endpoint: {config.endpoint}", flush=True)
try:
import os
os.environ["FINDER_COMMANDER_REMOTE_AGENT_CONFIG"] = str(config.config_path)
uvicorn.run("app.main:app", host=bind_host, port=bind_port)
finally:
stop_event.set()
heartbeat_thread.join(timeout=2)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Remote client agent Phase 1 for WebManager MVP")
parser.add_argument(
"--config",
default=str(Path(__file__).resolve().with_name("remote_client_agent.example.json")),
help="Path to remote client agent config JSON",
)
parser.add_argument("--host", default="", help="Bind host for the HTTP agent, defaults to 0.0.0.0")
parser.add_argument("--port", type=int, default=0, help="Bind port for the HTTP agent, defaults to endpoint port")
return parser.parse_args()
def main() -> int:
args = parse_args()
try:
config = load_config(Path(args.config).resolve())
run(config, requested_host=args.host, requested_port=args.port)
except KeyboardInterrupt:
return 130
except Exception as exc:
print(str(exc), file=sys.stderr)
return 1
return 0
if __name__ == "__main__":
raise SystemExit(main())
+4
View File
@@ -0,0 +1,4 @@
fastapi>=0.128.8,<1.0
uvicorn>=0.39,<1.0
jinja2>=3.1.6,<4.0
python-multipart>=0.0.22,<1.0
+27
View File
@@ -0,0 +1,27 @@
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "$0")"
if command -v python3.14 >/dev/null 2>&1; then
PYTHON_BIN=python3.14
else
PYTHON_BIN=python3
fi
echo "Using Python: $($PYTHON_BIN --version 2>&1)"
TARGET_MM=$("$PYTHON_BIN" -c 'import sys; print(f"{sys.version_info[0]}.{sys.version_info[1]}")')
CURRENT_MM=""
if [ -x .venv/bin/python ]; then
CURRENT_MM=$(.venv/bin/python -c 'import sys; print(f"{sys.version_info[0]}.{sys.version_info[1]}")')
fi
if [ ! -d .venv ] || [ "$CURRENT_MM" != "$TARGET_MM" ]; then
rm -rf .venv
"$PYTHON_BIN" -m venv .venv
fi
source .venv/bin/activate
python -m pip install --upgrade pip
python -m pip install -r requirements.txt
exec python -m uvicorn app.main:app --host 127.0.0.1 --port 8765
+35
View File
@@ -0,0 +1,35 @@
from __future__ import annotations
import argparse
import os
from pathlib import Path
import uvicorn
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Run Finder Commander remote agent HTTP API")
parser.add_argument(
"--config",
required=True,
help="Path to remote agent config JSON",
)
parser.add_argument("--host", default="0.0.0.0", help="Listen host")
parser.add_argument("--port", type=int, default=8765, help="Listen port")
return parser.parse_args()
def main() -> int:
args = parse_args()
config_path = Path(args.config).expanduser().resolve(strict=False)
if not config_path.is_file():
raise SystemExit(f"Config file not found: {config_path}")
os.environ["FINDER_COMMANDER_REMOTE_AGENT_CONFIG"] = str(config_path)
print(f"Using config: {config_path}", flush=True)
uvicorn.run("app.main:app", host=args.host, port=args.port)
return 0
if __name__ == "__main__":
raise SystemExit(main())
@@ -0,0 +1,79 @@
from __future__ import annotations
import json
import os
import tempfile
import unittest
from pathlib import Path
from fastapi import HTTPException
from starlette.requests import Request
from finder_commander.app import main as agent_main
class AgentFileEndpointsTest(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory()
self.share_root = Path(self.temp_dir.name) / "Downloads"
self.share_root.mkdir(parents=True, exist_ok=True)
self.outside_root = Path(self.temp_dir.name) / "Outside"
self.outside_root.mkdir(parents=True, exist_ok=True)
self.config_path = Path(self.temp_dir.name) / "agent.json"
self.config_path.write_text(
json.dumps(
{
"agent_access_token": "agent-secret",
"client_id": "client-123",
"display_name": "Jan MacBook",
"shares": {"downloads": str(self.share_root)},
}
),
encoding="utf-8",
)
os.environ["FINDER_COMMANDER_REMOTE_AGENT_CONFIG"] = str(self.config_path)
agent_main.get_runtime_config.cache_clear()
def tearDown(self) -> None:
os.environ.pop("FINDER_COMMANDER_REMOTE_AGENT_CONFIG", None)
agent_main.get_runtime_config.cache_clear()
self.temp_dir.cleanup()
@staticmethod
def _authorized_request() -> Request:
return Request({"type": "http", "headers": [(b"authorization", b"Bearer agent-secret")]})
def test_info_read_and_download_success(self) -> None:
notes = self.share_root / "notes.md"
notes.write_text("# title\nhello\n", encoding="utf-8")
info_response = agent_main.api_info(self._authorized_request(), share="downloads", path="notes.md")
self.assertEqual(info_response["kind"], "file")
self.assertEqual(info_response["extension"], ".md")
read_response = agent_main.api_read(self._authorized_request(), share="downloads", path="notes.md", max_bytes=4)
self.assertTrue(read_response["truncated"])
self.assertEqual(read_response["content"], "# ti")
download_response = agent_main.api_download(self._authorized_request(), share="downloads", path="notes.md")
self.assertEqual(download_response.media_type, "text/markdown")
self.assertIn('attachment; filename="notes.md"', download_response.headers.get("content-disposition", ""))
def test_unknown_share_and_escape_outside_root_are_rejected(self) -> None:
outside_file = self.outside_root / "secret.txt"
outside_file.write_text("secret", encoding="utf-8")
(self.share_root / "escape.txt").symlink_to(outside_file)
with self.assertRaises(HTTPException) as unknown_share:
agent_main.api_info(self._authorized_request(), share="missing", path="notes.md")
self.assertEqual(unknown_share.exception.status_code, 404)
self.assertEqual(unknown_share.exception.detail["code"], "path_not_found")
with self.assertRaises(HTTPException) as escaped:
agent_main.api_info(self._authorized_request(), share="downloads", path="escape.txt")
self.assertEqual(escaped.exception.status_code, 403)
self.assertEqual(escaped.exception.detail["code"], "path_traversal_detected")
if __name__ == "__main__":
unittest.main()
+60 -2
View File
@@ -54,9 +54,12 @@ Success:
Conflict (`already_exists`) + invalid name (`invalid_request`) gebruiken dezelfde error-shape als mkdir. Conflict (`already_exists`) + invalid name (`invalid_request`) gebruiken dezelfde error-shape als mkdir.
### `POST /api/files/delete` ### `POST /api/files/delete`
Success: Success (202):
```json ```json
{ "path": "storage1/parent/file_or_empty_dir" } {
"task_id": "<uuid>",
"status": "queued"
}
``` ```
Non-empty directory: Non-empty directory:
@@ -74,6 +77,7 @@ Non-empty directory:
### `POST /api/files/copy` ### `POST /api/files/copy`
### `POST /api/files/move` ### `POST /api/files/move`
### `POST /api/files/delete`
Success (202): Success (202):
```json ```json
{ {
@@ -82,6 +86,13 @@ Success (202):
} }
``` ```
Notes:
- Batch move is supported as one task-based operation via `{ "sources": [...], "destination_base": "..." }`.
- Cross-root batch move is supported for file-only selections.
- Cross-root batch move with any directory in the selection remains unsupported in v1.
- Batch delete is supported as one task-based operation via `{ "paths": [...], "recursive_paths": [...] }`.
- Single delete remains supported via `{ "path": "...", "recursive": true|false }`.
## Tasks read endpoints ## Tasks read endpoints
### `GET /api/tasks` ### `GET /api/tasks`
@@ -125,6 +136,53 @@ Response shape:
} }
``` ```
Voor task-based file-actions `copy`, `move`, `duplicate` en `delete` betekenen progressvelden:
- `done_items`: aantal volledig verwerkte bestanden
- `total_items`: exact aantal te verwerken bestanden in de hele task
- `current_item`: taakrelatief bestandspad als beschikbaar, anders bestandsnaam
Voor `move` geldt een expliciete uitzondering:
- file-gebaseerde move-paden rapporteren file-progress
- same-root directory moves behouden directe rename-semantiek en rapporteren daarom grovere item-progress per directory-operatie
Voor `delete` geldt:
- recursive delete van directorytrees rapporteert file-progress per verwijderd bestand
- lege mappen of directory-only deletes houden `done_items = 0`, `total_items = 0` en gebruiken geen kunstmatige file-teller
### `POST /api/tasks/{task_id}/cancel`
Success for cancellable file-action task:
```json
{
"id": "<uuid>",
"operation": "copy",
"status": "cancelling",
"source": "2 items",
"destination": "storage1/dest",
"done_bytes": null,
"total_bytes": null,
"done_items": 0,
"total_items": 2,
"current_item": "storage1/a.txt",
"failed_item": null,
"error_code": null,
"error_message": null,
"created_at": "2026-03-10T10:00:00Z",
"started_at": "2026-03-10T10:00:01Z",
"finished_at": null
}
```
Not cancellable:
```json
{
"error": {
"code": "task_not_cancellable",
"message": "Task cannot be cancelled",
"details": { "task_id": "<uuid>", "status": "completed" }
}
}
```
Task not found: Task not found:
```json ```json
{ {
+220
View File
@@ -0,0 +1,220 @@
# Folder Upload v1 Design
## 1. Doel
Folder upload voegt waarde toe omdat de huidige uploadflow al bruikbaar is voor losse bestanden en batches, maar niet voor veelvoorkomende workflows waarbij een gebruiker een complete lokale mapstructuur naar de storage wil kopieren. Dat past logisch binnen de bestaande dual-pane workflow: het actieve paneel bepaalt al de doelmap, en upload is al een expliciete actie in de functiebalk.
De kern van v1 is niet "een nieuwe uploadarchitectuur", maar een gecontroleerde uitbreiding van de bestaande uploadflow zodat een lokale map recursief kan worden ingestuurd naar `currentPath` van het actieve paneel.
## 2. Scope
Folder Upload v1 ondersteunt expliciet:
- selectie van precies een lokale map via de browser
- recursieve upload van de inhoud van die map
- behoud van directorystructuur onder het gekozen doelpad
- target = `currentPath` van het actieve paneel
- hergebruik van de bestaande sequentiele uploadflow en bestaande conflictopties
Niet in scope voor v1:
- meerdere lokale mappen tegelijk
- drag & drop
- resumable upload
- chunked upload
- taskmodel-integratie
- rollback
- backendherontwerp buiten wat strikt nodig is om directorystructuur veilig te ondersteunen
Aanbevolen v1-scope met laag regressierisico:
- precies 1 geselecteerde lokale map
- recursieve upload van alle files daaronder
- directorystructuur behouden
- conflictbehandeling alleen op bestandsniveau via bestaande keuzes
## 3. Browserselectie
Browsermatig is folderselectie geen aparte native "map upload API" zoals bij desktop-apps, maar een file input met directory-selectie-attributen zoals `webkitdirectory`. In de praktijk levert dit een lijst bestanden op met relatieve paden binnen de gekozen map.
Dit past redelijk goed bij de bestaande native file picker flow:
- huidige uploadknop opent al een browser file picker
- voor folder upload kan een aparte, kleine flow dezelfde picker gebruiken, maar dan in directory-selectiemodus
- drag & drop is niet nodig voor v1
Aanbeveling:
- v1 gebruikt browser-native directory picker via input-attributen
- geen drag & drop
- geen extra dependency
## 4. Doelstructuur
De veiligste en meest voorspelbare semantiek voor v1 is:
- de geselecteerde mapnaam zelf wordt meegenomen in de doelstructuur
- dus upload van lokale map `Photos/` naar target `/Volumes/8TB/Uploads` resulteert in:
- `/Volumes/8TB/Uploads/Photos/...`
Dit voorkomt ambiguiteit en sluit aan op gebruikersverwachting uit file managers.
Relatieve paden:
- browser levert per bestand een relatief pad onder de gekozen rootmap
- frontend mag dat relatieve pad gebruiken als beschrijving van directorystructuur
- backend mag die structuur nooit blind vertrouwen zonder per segment validatie
Aanbevolen semantiek:
- geselecteerde mapnaam opnemen
- directorystructuur daaronder behouden
- alle relatieve padsegmenten strikt normaliseren en valideren
## 5. Conflictgedrag
Conflictgedrag moet in v1 voortbouwen op de bestaande uploadconflictflow.
### Bestandsconflicten
Bij een bestaand doelbestand:
- `Overwrite`: huidig bestand overschrijven
- `Overwrite all`: huidige en volgende bestandsconflicten overschrijven
- `Skip`: huidig bestand overslaan
- `Skip all`: huidige en volgende bestandsconflicten overslaan
- `Cancel`: resterende upload stoppen
### Directoryconflicten
Directoryconflict is subtieler. Als de doelmap al bestaat en ook een directory is, hoeft dat in v1 geen fout te zijn. Dat is juist het normale mechanisme om inhoud in een bestaande mapstructuur te laten landen.
Aanbevolen v1-regel:
- bestaande doel-directory: toegestaan, geen conflictmodal
- bestaande doel-directory fungeert als containermap voor verdere recursie
### Typeconflicten
Als een padsegment een typeconflict veroorzaakt, bijvoorbeeld:
- lokale structuur verwacht een directory
- maar op bestemming bestaat daar een file
Dan moet dit als conflict/failure behandeld worden. De bestaande conflictknoppen kunnen dan alleen zinnig worden toegepast als overschrijven echt veilig definieerbaar is. Voor v1 is dat te riskant op directoryniveau.
Aanbevolen v1-regel:
- typeconflict directory-versus-file niet proberen slim op te lossen
- behandel als blokkade/failure voor het huidige bestand
- laat bestaande flow stoppen of conflictueel handelen op bestandsniveau, maar niet op "directory vervangen"
## 6. Backend-impact
De bestaande backend uploadbasis is grotendeels herbruikbaar voor de feitelijke bestandsoverdracht, maar folder upload heeft waarschijnlijk extra backendondersteuning nodig voor directorystructuur.
Het bestaande endpoint ondersteunt nu:
- 1 file per request
- `target_path`
- basename-validatie
Voor folder upload is minimaal een van deze routes nodig:
### Route A: frontend maakt directories expliciet aan
- frontend leest relatieve paden
- frontend zorgt eerst dat directories bestaan via bestaand `mkdir` endpoint
- daarna uploadt frontend elk bestand naar het juiste `target_path`
Voordelen:
- weinig nieuw backendcontract
- hergebruik van bestaande `mkdir` en `upload`
Nadelen:
- meer frontendcoordinatie
- meer requests
### Route B: upload-endpoint accepteert veilige relatieve subpath
- per bestand meegeven:
- `target_path`
- `relative_path`
- `file`
- backend maakt ontbrekende directories aan na validatie
Voordelen:
- schonere folder-uploadflow
- minder frontendcomplexiteit
Nadelen:
- nieuw backendcontract
- iets meer validatielogica
Aanbeveling voor laag regressierisico:
- v1 folder upload liever via Route A ontwerpen:
- frontend maakt directories expliciet aan via bestaande of lichte `mkdir`-flow
- frontend uploadt bestanden daarna via bestaand endpoint
- alleen als dat in praktijk te onhandig blijkt, Route B overwegen
Beide varianten moeten blijven leunen op:
- `path_guard`
- bestaande whitelist/root-containment
- bestaande naamvalidatie per segment
## 7. Frontend-impact
De bestaande sequentiele uploadflow kan worden uitgebreid zonder herontwerp:
- browser levert lijst bestanden uit de gekozen map
- frontend groepeert impliciet op relatieve directorystructuur
- frontend zorgt dat doel-directories bestaan
- frontend uploadt daarna de files sequentieel
Voortgang bij veel bestanden:
- huidige compacte progress UI kan blijven
- tonen:
- aantal totaal
- huidig bestand
- doelpad of huidige relatieve submap indien nuttig
- geen zware task-UI nodig in v1
Aanbevolen v1-richting:
- zelfde uploadmodal/progresscomponent als nu
- alleen uitbreiden met "uploading folder X to path Y"
- geen tweede aparte uploadarchitectuur
## 8. Regressierisico
Belangrijkste risico's:
- security: relatieve paden uit browser niet blind vertrouwen
- diepe mapstructuren: veel requests, langzame voortgang
- gedeeltelijke successen/failures: batch kan halverwege stoppen
- conflictcomplexiteit: directoryconflicten versus bestandsconflicten
- UI-complexiteit: folder upload mag bestaande file upload niet verwarren
Specifiek risico:
- een ogenschijnlijk simpele folder-upload kan ongemerkt uitgroeien tot een mini-sync-engine
- dat moet expliciet vermeden worden
## 9. Teststrategie
### Backend golden tests
Als folder upload later gebouwd wordt, minimaal testen:
- create-mkdir-then-upload flow voor nested directorystructuur
- traversal blokkade op relatieve padsegmenten
- invalid filename segment blokkade
- typeconflict file-versus-directory
- conflict op bestaand bestand
- upload naar bestaande directorystructuur
### UI smoke/regressietests
- folder-upload startpunt aanwezig
- progress UI blijft werken
- conflictopties blijven intact
- actieve-paneel target blijft leidend
### Handmatige validatie
- map met alleen files
- map met nested subdirs
- map met enkele conflicten
- map met typeconflict
- lange/brede directorystructuur
## 10. Aanbeveling
De aanbevolen v1-richting met laag regressierisico is:
- ondersteun precies 1 lokale map
- behoud de geselecteerde mapnaam in de doelstructuur
- gebruik browser-native directory picker
- breid de bestaande sequentiele uploadflow uit in plaats van een nieuwe architectuur te bouwen
- houd conflictbehandeling primair op bestandsniveau
- behandel bestaande directories als toegestaan
- vermijd drag & drop, taskintegratie, chunking en resumable uploads
Concreet aanbevolen technische richting:
- eerst proberen met bestaande architectuur en expliciete directorycreatie vanuit frontend
- alleen als dat te fragiel blijkt een kleine backenduitbreiding voor veilige relatieve paden ontwerpen
Dit houdt folder upload klein, bruikbaar en beheersbaar zonder de bestaande uploadflow opnieuw uit te vinden.
@@ -0,0 +1,359 @@
# Remote Client Shares Implementation Phases V1.1
## Status
Per huidige repositorystatus zijn de in dit document beschreven implementatiefases afgerond:
- Phase 1: afgerond
- Phase 2: afgerond
- Phase 3: afgerond
Dit document beschrijft geen Phase 4.
De sectie `Later` hieronder blijft expliciet buiten de beschreven fasering van V1.1 en is geen impliciete volgende fase.
## Doel
Dit document splitst `REMOTE_CLIENT_SHARES_V1_DESIGN.md` op in pragmatische implementatiefases.
Uitgangspunten:
- geen overengineering
- elke fase moet zelfstandig waarde leveren
- WebManager mag nooit blokkeren op remote agents
- bestaande storage-functionaliteit moet intact blijven
- `/Clients` blijft een aparte source, geen uitbreiding van lokale filesystem roots
---
## Overzicht
### Phase 1
Client registry, identiteit en statusmodel.
### Phase 2
Browse van remote client shares via virtuele `Clients` root.
### Phase 3
Info, tekstpreview, eenvoudige image preview en download voor remote shares.
### Later
Alle write-acties, bookmarks/startup paths en cross-source flows.
Opmerking:
- `Later` betekent in dit document: bewust uitgestelde scope, niet een gedefinieerde volgende implementatiefase
---
## Phase 1: Client Registry
## Doel
WebManager moet remote agents kennen, identificeren en hun status betrouwbaar kunnen bijhouden.
## Resultaat
De backend en UI kunnen een lijst van bekende clients tonen, inclusief stabiele identiteit en basisstatus.
## In scope
- remote client registratie
- heartbeat endpoint
- opslag van client metadata
- statusmodel met gescheiden velden
- lijstendpoint voor bekende clients
- registratie-auth
- agent-access-auth contract vastleggen
## Niet in scope
- browsen in shares
- file operations
- download
- rename/delete/mkdir
## Beslissingen
- `client_id` is leidend
- `display_name` is niet leidend
- browse-routing mag niet afhankelijk zijn van alleen displaynaam
- `last_seen`, `status`, `last_error` en `reachable_at` blijven logisch gescheiden
## Backendwerk
Nieuwe onderdelen:
- repository voor remote clients
- service voor registratie en heartbeat
- statusafleiding
- opslag van auth- en endpointmetadata
- routes:
- `POST /api/clients/register`
- `POST /api/clients/heartbeat`
- `GET /api/clients`
Waarschijnlijk te wijzigen:
- [main.py](/workspace/webmanager-mvp/webui/backend/app/main.py)
- [dependencies.py](/workspace/webmanager-mvp/webui/backend/app/dependencies.py)
Waarschijnlijk nieuw:
- `webui/backend/app/api/routes_clients.py`
- `webui/backend/app/services/remote_client_service.py`
- `webui/backend/app/db/remote_client_repository.py`
## Agentwerk
- vaste config inlezen
- `client_id` beheren
- registratie naar WebManager
- periodieke heartbeat
- agent-access-token config toevoegen
## UI-werk
Minimaal:
- geen browse-integratie nodig
- een eenvoudige clientlijst of debug-status is voldoende
## Acceptatiecriteria
- agent kan zich registreren
- client verschijnt in `GET /api/clients`
- `last_seen` wordt bijgewerkt
- `status` wordt afgeleid zonder te flappen
- `last_error` en `reachable_at` bestaan als apart concept
- server blijft normaal werken als er geen agents bestaan
---
## Phase 2: Browse via `/Clients`
## Doel
Remote clients en hun shares moeten zichtbaar worden in dezelfde browse-ervaring als server storage, zonder lokale services te vervormen.
## Resultaat
De gebruiker kan navigeren naar:
- `/Clients`
- `/Clients/<client>`
- `/Clients/<client>/<share>`
## In scope
- virtuele root `/Clients`
- clientlijst als directories
- sharelijst per client als directories
- browse binnen share
- offline foutafhandeling
- agent-auth op browsecalls
## Niet in scope
- view/download
- edit
- rename/delete/mkdir
- bookmarks/startup paths
## Beslissingen
- `/Clients` wordt vroeg in de backend-route afgehandeld
- remote paden mogen niet in gewone lokale `PathGuard` resolution terechtkomen
- lokale browse-services blijven verantwoordelijk voor alleen lokale server sources
## Backendwerk
Waarschijnlijk te wijzigen:
- [routes_browse.py](/workspace/webmanager-mvp/webui/backend/app/api/routes_browse.py)
Liever niet verbreden:
- [path_guard.py](/workspace/webmanager-mvp/webui/backend/app/security/path_guard.py)
Nieuwe onderdelen:
- browse-facade voor remote client paden
- agent HTTP client met korte timeouts en auth
## UI-werk
Waarschijnlijk te wijzigen:
- [app.js](/workspace/webmanager-mvp/webui/html/app.js)
Benodigd:
- rootnavigatie voor `/Clients`
- breadcrumbs voor client/share-paden
- render van client/status/share directories
- nette foutmelding bij offline client
## Agentwerk
Nieuwe browse endpoint(s):
- `GET /health`
- `GET /api/list?share=...&path=...`
## Acceptatiecriteria
- `/Clients` toont bekende clients
- `/Clients/<client>` toont alleen toegestane shares
- `/Clients/<client>/<share>` toont directory-inhoud
- offline client geeft een snelle fout, geen hang
- `/Volumes` gedrag blijft intact
- lokale browse-code blijft logisch gescheiden van remote browse-code
---
## Phase 3: Info, Preview, Download
## Doel
Remote shares moeten read-only bruikbaar worden voor dagelijkse taken.
## Resultaat
Gebruiker kan bestanden in remote shares inspecteren, bekijken en downloaden.
## In scope
- file info
- tekstpreview
- eenvoudige image preview
- download van remote bestanden
- expliciete resource-limieten
## Niet in scope
- edit
- rename/delete/mkdir
- upload
- cross-source copy/move
## Beslissingen
- tekstpreview krijgt een harde limiet
- text/binary-detectie moet expliciet zijn
- downloads worden gestreamd
- geen grote in-memory buffering voor download
## Backendwerk
Nieuwe facade of routes voor remote file actions:
- info
- read/view
- download
Belangrijk:
- backend vertaalt WebManager-pad naar agent-call
- timeouts en foutmapping blijven streng
- source-aware afhandeling blijft gescheiden van lokale file ops
Waarschijnlijk geraakt:
- `routes_files.py` of parallelle remote-fileroute
- aparte service-laag voor remote file proxying
## UI-werk
Waarschijnlijk te wijzigen:
- [app.js](/workspace/webmanager-mvp/webui/html/app.js)
Benodigd:
- source-aware afhandeling voor `View`
- downloadknop moet remote paths ondersteunen
- properties/info moet ook werken voor remote paden
## Agentwerk
Nieuwe endpoints:
- `GET /api/info`
- `GET /api/read`
- `GET /api/download`
## Acceptatiecriteria
- file info werkt voor remote paden
- tekstbestand kan bekeken worden binnen limieten
- afbeelding kan bekeken worden als ondersteund
- download van remote bestand werkt via streaming
- foutafhandeling blijft lokaal tot betreffende pane/actie
---
## Later
Deze onderdelen horen niet in V1.1.
Status:
- deze onderdelen blijven expliciet buiten de afgeronde Phase 1 t/m Phase 3 scope
- voor deze onderdelen bestaat in dit document geen aparte vervolgfase
### Write-acties
- mkdir
- rename
- delete
- upload
### UI-integraties
- bookmarks voor `/Clients/...`
- startup paths voor `/Clients/...`
### Cross-source flows
- `/Volumes/...` naar `/Clients/...`
- `/Clients/...` naar `/Volumes/...`
Dit vereist expliciete transfersemantiek en hoort niet in de eerste read-mostly release.
### Zwaardere netwerkmodellen
- reverse-connect
- tunnelmodel
- relay-infrastructuur
### Sterkere pairing
- pair codes
- per-agent secret rotation
- signed registration
---
## Aanbevolen volgorde
1. Phase 1 volledig afronden.
2. Daarna Phase 2 volledig afronden.
3. Daarna Phase 3 read-only afronden.
4. Alles daarna alleen oppakken als een concrete productbehoefte dat rechtvaardigt.
---
## Beslisadvies
Als er snel waarde geleverd moet worden, is de beste minimale keten:
1. registry
2. browse
3. info/preview/download
Daarmee ontstaat een bruikbare remote client bron zonder write-complexiteit, contractbreuk in lokale services of half-afgewerkte transferlogica.
@@ -0,0 +1,717 @@
# Remote Client Shares V1.1 Design
## Status
Dit document beschrijft de V1.1-doelscope voor Remote Client Shares.
Per huidige repositorystatus valt de beschreven V1.1 read-mostly scope onder afgeronde implementatie van:
- client registry
- browse via `/Clients`
- file info
- tekstpreview
- eenvoudige image preview
- download
De expliciet niet in V1.1 opgenomen onderdelen hieronder blijven buiten scope en vormen in dit document geen aparte vervolgfase.
## Doel
Een gebruiker van WebManager moet naast de bestaande server-side storage-roots ook een beperkte set lokale mappen van zijn eigen client-Mac kunnen benaderen, zonder de hele homefolder bloot te geven.
Voorbeelden van toegestane client-shares:
- `Downloads`
- `Movies`
- `Pictures`
De oplossing moet:
- simpel blijven
- veilig blijven
- de bestaande storage-workflow niet breken
- WebManager niet laten vastlopen als een remote helper-agent offline is
---
## Kernbeslissingen voor V1.1
Deze beslissingen liggen in V1.1 vast.
- Remote client shares worden niet opgenomen in `root_aliases`.
- `/Clients` wordt een aparte virtuele bron naast `/Volumes`.
- Remote paden lopen niet door de bestaande lokale filesystem-resolutie.
- `client_id` is intern de enige leidende identiteit.
- `display_name` is alleen voor UI-weergave.
- De agent werkt alleen met `share key + relatief pad`.
- Alle agent-calls vereisen authenticatie, niet alleen registratie.
- Offline agents mogen alleen hun eigen subtree beïnvloeden, nooit de rest van de app.
- V1 blijft read-mostly: registry, browse, info, preview, download.
---
## Waarom niet als gewone root alias
De huidige backend gaat uit van server-side whitelisted filesystem roots.
Dat model werkt voor:
- `/Volumes/...`
- gemounte server storage
- container-side toegankelijke paden
Dat model werkt niet goed voor:
- de lokale schijf van de browsergebruiker
- een remote Mac die buiten de server draait
- clients die offline kunnen zijn
- clients die dynamische IP-adressen hebben
Daarom mogen remote client shares niet in hetzelfde model worden gestopt als `root_aliases`.
---
## Scope V1.1
### In scope
- beperkte client-shares: `Downloads`, `Movies`, `Pictures`
- lokale helper-agent op macOS
- agent registratie in WebManager
- heartbeat/status tracking
- virtuele `Clients` bron in de WebUI
- browse van remote shares
- bestand-info
- tekstpreview
- image preview waar triviaal
- download van bestanden
- nette offline-afhandeling
### Expliciet niet in V1.1
- hele homefolder
- willekeurige custom paths buiten de toegestane sharelijst
- shell/subprocess execution
- rename
- mkdir
- delete
- upload naar remote share
- bookmarks voor `/Clients/...`
- startup paths voor `/Clients/...`
- cross-source copy of move
- complete taakrunner-integratie zoals server copy/move tasks
- automatische LAN discovery
- multi-user auth met OS user mapping
Status:
- deze lijst blijft expliciet uitgesloten van V1.1
- dit document definieert hiervoor geen Phase 4 of andere vervolgfase
---
## Gewenste gebruikerservaring
In de WebUI komt naast server-storage een extra virtuele bron:
- `/Volumes`
- `/Clients`
Onder `/Clients` ziet de gebruiker geregistreerde clients, bijvoorbeeld:
- `MacBook Pro van Jan`
- `iMac Woonkamer`
Onder een client ziet de gebruiker alleen de toegestane shares:
- `Downloads`
- `Movies`
- `Pictures`
Voor de gebruiker kan dat eruitzien als:
- `/Clients/MacBook-Pro-van-Jan/Downloads`
- `/Clients/MacBook-Pro-van-Jan/Movies`
- `/Clients/MacBook-Pro-van-Jan/Pictures`
Maar intern mag routing niet op `display_name` leunen.
Intern moet WebManager werken met een stabiele client-identiteit en een mappinglaag:
- `client_id` voor routing en opslag
- `display_name` voor weergave
- optioneel een afgeleide slug voor browse-url-presentatie
---
## Architectuuroverzicht
Er zijn drie componenten.
### 1. WebManager backend
Verantwoordelijk voor:
- registry van bekende remote clients
- status- en heartbeat-tracking
- virtuele browse-root `Clients`
- proxying van requests naar agents
- timeouts en foutafhandeling
- scheiding tussen local-source en remote-source afhandeling
### 2. WebUI frontend
Verantwoordelijk voor:
- tonen van `Clients` als extra bron
- navigeren binnen client/share paden
- offline status tonen
- requests afvuren naar gewone WebManager backend-routes
### 3. Remote helper-agent op macOS
Verantwoordelijk voor:
- toegang tot vaste lokale shares
- strikte padvalidatie binnen die shares
- simpele browse/info/read/download endpoints
- zichzelf registreren bij WebManager
- heartbeat sturen
- auth afdwingen op alle agent-endpoints
---
## Bereikbaarheidsmodel
Dit is de eerste harde productbeslissing.
### V1.1-keuze
V1.1 gaat uit van een omgeving waarin WebManager de agent rechtstreeks kan bereiken.
Dat betekent praktisch:
- dezelfde LAN
- of een expliciet configureerbaar agent-endpoint
- of een deployment waar server en client netwerkmatig direct verbonden zijn
### Waarom deze keuze
Dit is het simpelste model dat functioneel klopt zonder reverse tunnels, websockets als transportlaag, of extra relay-infrastructuur.
### Wat V1.1 niet probeert op te lossen
Deze versie garandeert niet dat een agent achter willekeurige NAT/firewall altijd bereikbaar is.
Dus:
- self-registration blijft het discoverymodel
- direct bereikbare agent-endpoint blijft het V1-transportmodel
- reverse-connect of tunnelmodellen zijn uitgesteld
### Fallback
Een handmatige endpoint override blijft toegestaan als operationele fallback, bijvoorbeeld:
- `http://192.168.1.25:8765`
Maar dat is geen hoofdmodel en geen productbelofte.
---
## Hoe de remote agent bekend wordt in WebManager
### Gekozen model: agent registreert zichzelf
De agent meldt zichzelf actief aan bij WebManager. Niet andersom.
Dat betekent:
- geen handmatig client-IP nodig als hoofdmodel
- geen server-naar-client discovery nodig
- geen afhankelijkheid van LAN-broadcasting
- geen probleem als het client-IP wisselt, zolang het geregistreerde endpoint actueel is
### Registratiestroom
Bij starten van de agent:
1. de agent leest lokale config
2. de agent bepaalt:
- `client_id`
- `display_name`
- `shares`
- `endpoint`
3. de agent registreert zich bij WebManager
4. WebManager slaat client-record op of werkt het bij
5. de agent stuurt periodieke heartbeats
### Benodigde velden bij registratie
Voorstel:
```json
{
"client_id": "f4b2c8f8-2b1b-4d89-9ed2-8d6d7b1f3abc",
"display_name": "MacBook Pro van Jan",
"platform": "macos",
"agent_version": "1.1.0",
"endpoint": "http://192.168.1.25:8765",
"shares": [
{ "key": "downloads", "label": "Downloads" },
{ "key": "movies", "label": "Movies" },
{ "key": "pictures", "label": "Pictures" }
]
}
```
### Backend bewaart per client
- `client_id`
- `display_name`
- `platform`
- `agent_version`
- `endpoint`
- `shares`
- `last_seen`
- `status`
- `last_error`
- `reachable_at`
- eventueel `registration_token_id`
### Heartbeat
De agent stuurt elke 15-30 seconden een heartbeat.
Bijvoorbeeld:
```json
{
"client_id": "f4b2c8f8-2b1b-4d89-9ed2-8d6d7b1f3abc",
"agent_version": "1.1.0"
}
```
### Statusmodel
Deze velden moeten logisch gescheiden blijven:
- `last_seen`
Laatste succesvolle heartbeat van de agent.
- `status`
Afgeleide UI-status, bijvoorbeeld `online` of `offline`.
- `last_error`
Laatste connect- of browsefout richting agent.
- `reachable_at`
Laatste moment waarop een directe agent-call echt succesvol was.
Belangrijk:
- een heartbeat bepaalt niet automatisch dat elke browse-call werkt
- een enkele browse-timeout mag niet blind `last_seen` overschrijven
- status mag niet gaan flappen op basis van één los incident
### Aanbevolen statusregels
- `online` als `last_seen` recent is
- `offline` als heartbeat-timeout overschreden is
- extra foutdetails via `last_error`
- optioneel UI-label zoals `online with recent errors` later, maar niet nodig in V1.1
---
## Authenticatie en beveiliging
### Backend registratie-auth
Registratie vereist een bearer token.
Bijvoorbeeld:
- `Authorization: Bearer <registration-token>`
### Agent endpoint-auth
Alle agent-calls vereisen authenticatie. Niet alleen registratie.
Dus ook:
- `/health`
- `/api/list`
- `/api/info`
- `/api/read`
- `/api/download`
moeten beschermd zijn.
### V1.1 minimum
Voor V1.1 volstaat een eenvoudige gedeelde agent-token, bijvoorbeeld:
- WebManager bewaart een secret per client of per installatie
- backend stuurt dat token mee op elke agent-call
- agent weigert requests zonder geldig token
Voorbeeld:
- `Authorization: Bearer <agent-access-token>`
### Niet doen in V1.1
- open agent-HTTP API zonder auth
- browse/download endpoints publiek bereikbaar maken op het LAN
---
## Virtueel padmodel
Remote client shares krijgen een aparte namespace.
Voorstel voor de gebruikersweergave:
- `/Clients`
- `/Clients/<client-display>`
- `/Clients/<client-display>/<share-label>`
- `/Clients/<client-display>/<share-label>/subdir/file.ext`
Intern moet de backend dit mappen naar:
- `client_id`
- `share_key`
- relatief share-pad
Belangrijk:
- dit zijn logische WebManager-paden
- het zijn geen echte lokale backend filesystem-paden
- ze mogen niet door de bestaande lokale `PathGuard` resolved worden
### Consequentie voor de codebasis
`/Clients/...` moet vroeg in routing worden onderschept door een aparte browse- of source-facade.
Dus:
- niet de lokale `PathGuard` uitbreiden tot remote sources
- niet overal `if remote` in bestaande lokale services strooien
- wel een duidelijke scheiding tussen local source en remote source
---
## Share-validatie in de agent
De agent werkt niet met vrije absolute paden.
De agent heeft een vaste share-map, bijvoorbeeld:
```json
{
"downloads": "/Users/jan/Downloads",
"movies": "/Users/jan/Movies",
"pictures": "/Users/jan/Pictures"
}
```
Een request bevat dan:
- `share = downloads`
- `path = Some/Subdir/file.txt`
Niet:
- `/Users/jan/...`
### Validatieregels
- onbekende `share` weigeren
- `..` weigeren
- pad resolven binnen de gekozen share-root
- symlink escape blokkeren
- alleen toegestane bestandshandelingen toestaan
---
## Read, preview en download limieten
V1.1 moet resource-grenzen expliciet vastleggen.
### Tekstpreview
- maximum grootte voor tekstpreview vastleggen
- voorstel: zelfde orde als huidige server-side preview/edit-limieten, of kleiner
- grote tekstbestanden niet volledig in memory laden voor preview
### Binary versus text
- agent moet tekstpreview alleen teruggeven voor ondersteunde teksttypes
- binaire content mag niet per ongeluk als tekst in JSON-responses worden gepusht
### Download
- downloads moeten gestreamd worden
- geen volledige bestand-buffering in memory
### Image preview
- alleen triviale image preview in V1.1
- geen zware thumbnail-pipeline in deze fase
---
## Offline gedrag
Dit is een harde eis.
WebManager mag niet vastlopen als de agent niet draait.
### Backendregels
- alle agent-calls krijgen korte timeouts, bijvoorbeeld 1-3 seconden
- connect- of timeoutfouten worden vertaald naar nette app-fouten
- offline agent blokkeert nooit globale pagina-initialisatie
- browse- en file-fouten blijven lokaal tot betreffende request
### Frontendregels
- `/Clients` mag laden, ook als sommige clients offline zijn
- offline clients mogen zichtbaar blijven in de lijst
- browsen in offline subtree toont foutmelding
- andere panes blijven bruikbaar
- geen endless spinner
---
## API-ontwerp
## 1. Backend registry endpoints
### `POST /api/clients/register`
Registreert of update een remote agent.
### `POST /api/clients/heartbeat`
Werkt `last_seen` bij.
### `GET /api/clients`
Geeft bekende clients terug met:
- `client_id`
- `display_name`
- `status`
- `last_seen`
- `last_error`
- `shares`
---
## 2. Backend browse facade voor UI
De frontend blijft praten met gewone WebManager-routes.
### `GET /api/browse?path=/Clients`
Geeft alle bekende clients terug als directories.
### `GET /api/browse?path=/Clients/<client>/`
Geeft shares van die client terug als directories.
### `GET /api/browse?path=/Clients/<client>/<share>/...`
Backend vertaalt dit naar een agent-call.
Belangrijk:
- browse facade bepaalt eerst of pad onder `/Clients` valt
- alleen niet-remote paden mogen daarna naar bestaande lokale browse-paths
---
## 3. Agent endpoints
Eenvoudig houden. Geen shell.
### `GET /health`
Gezondheidscheck met auth.
### `GET /api/list?share=downloads&path=subdir`
Directory-inhoud binnen een share.
### `GET /api/info?share=downloads&path=file.txt`
Metadata.
### `GET /api/read?share=downloads&path=file.txt`
Tekstpreview.
### `GET /api/download?share=downloads&path=file.txt`
Gestreamde download.
---
## Haalbaarheid
## Goed haalbaar in V1.1
- client registry
- heartbeat online/offline
- virtuele `Clients` root
- browse
- file info
- tekstpreview
- eenvoudige image preview
- gestreamde download
## Bewust uitgesteld
- rename
- mkdir
- delete
- upload
- bookmarks/startup paths
- cross-source copy
- cross-source move
- unified history
- task-runner integratie
---
## Veranderingen per gebied
## Backend
Nieuwe onderdelen:
- client registry repository
- client registry service
- routes voor register/heartbeat/list
- browse/source facade voor `Clients/...`
- agent HTTP client met harde timeouts en auth
Bestaande onderdelen die waarschijnlijk geraakt worden:
- [routes_browse.py](/workspace/webmanager-mvp/webui/backend/app/api/routes_browse.py)
Om `/Clients` vroeg te routeren.
- [dependencies.py](/workspace/webmanager-mvp/webui/backend/app/dependencies.py)
Voor nieuwe registry- en agent-services.
- [app/main.py](/workspace/webmanager-mvp/webui/backend/app/main.py)
Voor nieuwe routers.
Liever niet verbreden:
- [path_guard.py](/workspace/webmanager-mvp/webui/backend/app/security/path_guard.py)
Deze hoort lokaal filesystemgericht te blijven.
- [file_ops_service.py](/workspace/webmanager-mvp/webui/backend/app/services/file_ops_service.py)
Deze service is nu server-filesystemgericht en moet niet vervuild raken met remote transportlogica.
## Frontend
Waarschijnlijk aanpassen:
- [app.js](/workspace/webmanager-mvp/webui/html/app.js)
Voor:
- extra virtuele root
- render van clients en shares
- offline status
- source-aware browse/view/download/info flows
- [index.html](/workspace/webmanager-mvp/webui/html/index.html)
Alleen als extra statuslabels of clientindicatoren nodig zijn
## Remote agent
Te baseren op:
- [finder_commander/app/main.py](/workspace/webmanager-mvp/finder_commander/app/main.py)
- [finder_commander/run-local.sh](/workspace/webmanager-mvp/finder_commander/run-local.sh)
- [finder_commander/requirements.txt](/workspace/webmanager-mvp/finder_commander/requirements.txt)
Maar vereenvoudigd:
- geen shell command endpoint
- geen hele home-root
- alleen `share key + relatief pad`
- registratie en heartbeat toevoegen
- auth afdwingen op alle endpoints
---
## Minimale agent-config
Voorstel lokaal configbestand:
```json
{
"webmanager_base_url": "https://webmanager.example.com",
"registration_token": "registration-secret",
"agent_access_token": "agent-secret",
"client_id": "f4b2c8f8-2b1b-4d89-9ed2-8d6d7b1f3abc",
"display_name": "MacBook Pro van Jan",
"shares": {
"downloads": "/Users/jan/Downloads",
"movies": "/Users/jan/Movies",
"pictures": "/Users/jan/Pictures"
},
"listen_host": "0.0.0.0",
"listen_port": 8765,
"public_endpoint": "http://192.168.1.25:8765"
}
```
Opmerking:
- `public_endpoint` is het endpoint dat WebManager gebruikt
- `listen_host` en `public_endpoint` hoeven niet identiek te zijn
---
## Open keuzes die bewust zijn uitgesteld
Deze keuzes zijn echt later werk, niet meer V1.1:
- reverse-connect of tunnelmodel
- cross-source copy
- cross-source move
- bookmarks/startup paths voor `/Clients/...`
- write-acties op remote shares
- sterkere pairing of key rotation
---
## Beslisadvies
Aanbevolen implementatievolgorde voor V1.1:
1. agent registry + heartbeat
2. virtuele `Clients` root in browse
3. online/offline status met gescheiden statusvelden
4. browse/info/preview/download voor remote shares
Niet in V1.1:
5. write-acties
6. bookmarks/startup paths
7. cross-source flows
---
## Samenvatting
De juiste V1.1-richting is:
- geen hele homefolder
- wel beperkte shares zoals `Downloads`, `Movies`, `Pictures`
- remote helper-agent op macOS
- agent registreert zichzelf bij WebManager
- WebManager bewaart `client_id`-geleide registry en status
- `/Clients` wordt een aparte virtuele bron
- remote paden blijven buiten lokale filesystem services
- alle agent-calls vereisen auth
- offline agents mogen nooit de rest van WebManager verstoren
Dit model is haalbaar, beperkt in scope, en houdt de bestaande lokale storage-architectuur schoon.
+252
View File
@@ -0,0 +1,252 @@
1 analyse
De repo heeft al een bruikbaar taskmodel voor copy, move, download en duplicate, maar de main WebUI gebruikt dat model voor copy/move nog nauwelijks. In de hoofd-UI ziet de gebruiker na start nu vooral een korte statusregel of summary; live voortgang staat feitelijk alleen in `F1 > Settings > Logs`. Daardoor ontbreekt directe, persistente feedback in de hoofd-UI en is er geen zichtbare rem op dubbel starten.
Belangrijkste conclusie:
- Copy en move hebben al echte backend-tasks met progressvelden.
- De bron van truth voor lopende copy/move-taken is al `/api/tasks`.
- Er bestaat nu geen cancel/abort voor copy of move.
- Een eerlijke abortknop voor copy/move kan dus nu niet frontend-only worden toegevoegd.
- De kleinste veilige stap is een compacte live task-indicator in de bestaande header/toolbar-zone, gevoed door de bestaande task-feed.
2 bestaande functionaliteit
A. Taskmodel / backend
- `copy` en `move` gebruiken hetzelfde taskmechanisme via [tasks_runner.py](/workspace/webmanager-mvp/webui/backend/app/tasks_runner.py), [task_repository.py](/workspace/webmanager-mvp/webui/backend/app/db/task_repository.py), [copy_task_service.py](/workspace/webmanager-mvp/webui/backend/app/services/copy_task_service.py) en [move_task_service.py](/workspace/webmanager-mvp/webui/backend/app/services/move_task_service.py).
- Taskstatussen die al bestaan in [task_repository.py](/workspace/webmanager-mvp/webui/backend/app/db/task_repository.py):
- `queued`
- `running`
- `completed`
- `failed`
- daarnaast voor download ook `requested`, `preparing`, `ready`, `cancelled`
- Progressinformatie bestaat al:
- files: `done_bytes`, `total_bytes`, `current_item`
- batch/directory: `done_items`, `total_items`, `current_item`
- Copy:
- file copy gebruikt byte-progress callback
- directory copy is grof: `0/1` naar `1/1`
- batch copy gebruikt item-progress
- Move:
- same-root file move heeft praktisch geen tussentijdse progress, alleen start/einde
- cross-root file move gebruikt copy-progress en delete na afloop
- directory move is grof `0/1` naar `1/1`
- batch move gebruikt item-progress
- Er is al read-API voor tasks:
- `GET /api/tasks`
- `GET /api/tasks/{task_id}`
- Er is geen cancel-API voor copy/move.
- De enige echte cancel in de repo zit nu bij archive-downloads in [archive_download_task_service.py](/workspace/webmanager-mvp/webui/backend/app/services/archive_download_task_service.py) en `POST /api/files/download/archive/{task_id}/cancel`.
- Copy/move workers in [tasks_runner.py](/workspace/webmanager-mvp/webui/backend/app/tasks_runner.py) hebben geen cooperative cancel checks.
- Copy/move history bestaat al via [history_repository.py](/workspace/webmanager-mvp/webui/backend/app/db/history_repository.py): `queued`, `completed`, `failed`.
B. Bestaande frontend feedback
- In de hoofd-UI starten copy en move vanuit [app.js](/workspace/webmanager-mvp/webui/html/app.js):
- `startCopySelected()`
- `executeMoveSelection()`
- Huidige feedback voor copy/move:
- `setStatus(...)` onderin/headerstatus
- `showActionSummary(...)`
- `openFeedbackModal(...)` via `actions-error`
- Die feedback is niet persistent als live taskweergave.
- Er is nu geen compacte taskindicator in de hoofd-UI.
- `state.selectedTaskId` en `refreshTasksSnapshot()` bestaan al in [app.js](/workspace/webmanager-mvp/webui/html/app.js), maar worden voor copy/move alleen gebruikt om een snapshotcount op te halen; er is geen zichtbare hoofd-UI-component die dit toont.
- Buiten download is er geen modal of popover voor actieve taken in de hoofd-UI.
C. Logs / history / settings
- `F1 > Settings > Logs` toont al twee side-by-side secties:
- `Tasks`
- `History`
- Deze UI gebruikt al de bestaande feeds:
- `/api/tasks`
- `/api/history`
- Polling bestaat al in [app.js](/workspace/webmanager-mvp/webui/html/app.js):
- `loadTasksForSettings()`
- `loadHistoryForSettings()`
- `loadLogsAndTasksForSettings()`
- `scheduleSettingsLogsPolling()`
- De UI rendert taskdetails al compact via `formatTaskLine(task)`:
- status
- source/destination
- `done_items/total_items`
- `current_item`
- Dat betekent dat de repo al een bruikbare frontend formatteringslaag heeft die ook buiten Settings herbruikbaar is.
D. Abort/cancel haalbaarheid
- Copy/move kunnen nu technisch niet veilig worden afgebroken via bestaande code.
- Er is geen taskstatus-overgang of API-contract voor copy/move-cancel.
- Er is geen cooperative worker-check in copy/move loops.
- Er is geen rollback.
- Eerlijke cancelsemantiek voor copy/move zou dus moeten zijn:
- stop resterende verwerking zo snel mogelijk op een checkpunt
- reeds verwerkte bestanden blijven zoals ze zijn
- geen rollback
- Maar die semantiek is nog niet geïmplementeerd.
- Conclusie: een abortknop voor copy/move is nu buiten scope zonder backendwerk.
3 scope
Minimale veilige volgende stap, op basis van wat al bestaat:
- frontend-only hoofd-UI verbetering
- geen layoutwijziging van de dual-pane browse-UI
- geen nieuw vast paneel
- wel een compacte task/status chip in bestaande headerbar of function-bar zone
- alleen zichtbaar als er actieve taken zijn (`queued`, `running`, en eventueel download `requested/preparing`)
- klik opent een kleine popover/dropdown met actieve taken
- popover hergebruikt bestaande taskdata en formattering uit `/api/tasks`
- popover bevat link/actie naar `F1 > Settings > Logs`
- geen abortknop voor copy/move in deze fase
Waarom dit binnen scope past:
- gebruikt bestaande task-feed
- gebruikt bestaande taaksemantiek
- verandert de hoofd-layout niet
- geeft persistente feedback zonder modal-first patroon
- is compatibel met de OneDrive-achtige richting: compacte indicator, detail op aanvraag
4 impact
Positief:
- gebruiker ziet direct in de hoofd-UI dat copy/move loopt
- feedback blijft zichtbaar zolang taak actief is
- minder kans op dubbel starten
- geen extra structureel paneel
- F1 Logs blijft intact als detailbron
Beperkingen:
- zonder backendwerk is er nog geen eerlijke cancel voor copy/move
- progress blijft zo nauwkeurig als bestaande taskdata toelaat
- same-root move en directory move blijven qua progress relatief grof
5 risico
Laag tot middel als alleen de voorgestelde frontendstap wordt gebouwd.
Belangrijkste risicos:
- polling in de hoofd-UI kan onrustig worden als hij niet net zo stabiel wordt gebouwd als de bestaande Settings-polling
- een te opvallende indicator kan visueel concurreren met de bestaande headerstatus
- als een abortknop zonder backendsteun zou worden toegevoegd, zou dat misleidend zijn; dat moet expliciet niet gebeuren
Expliciet risico buiten scope:
- copy/move-cancel vereist backend-aanpassing aan taskmodel, runner en waarschijnlijk history
6 testplan
Voor de minimale frontendstap:
- gerichte UI smoke/golden checks voor:
- indicator aanwezig in header/toolbar markup
- indicator alleen bedoeld voor actieve taken
- popover/dropdown markup aanwezig
- link naar bestaande logs-entrypoint aanwezig
- gerichte JS-checks voor:
- actieve taken worden uit `/api/tasks` gefilterd
- `queued`/`running` tonen indicator
- `completed`/`failed` verdwijnen uit de actieve indicator
- polling start/stop logisch zonder extra layoutreset
- geen backend golden updates nodig zolang `/api/tasks` contract ongewijzigd blijft
Niet nu testen:
- abort voor copy/move, want die functionaliteit bestaat nog niet
7 acceptatiecriteria
Voor de voorgestelde minimale stap:
- Een gestart copy- of move-proces is zichtbaar in de hoofd-UI zonder navigatie naar `F1 > Settings / Logs`.
- De oplossing verandert de dual-panel layout niet structureel.
- De feedback blijft zichtbaar zolang de taak actief is.
- De oplossing gebruikt bestaande taskdata als bron van truth.
- Er wordt geen fake progress getoond.
- Er wordt geen fake cancelknop getoond voor copy/move.
- Bestaande task/log/history-functionaliteit blijft intact.
- API-contract blijft ongewijzigd.
Voor abort/cancel:
- Niet acceptabel in deze fase zonder backendsteun.
- Eerst aparte backendfase nodig.
8 codex-uitvoering / voorstel
Huidige stap:
- Alleen analyse uitgevoerd.
- Geen functionele implementatie gedaan.
Waarom:
- `CHANGE_POLICY.md` zegt dat frontend flow aanpassen eerst een voorstel nodig heeft.
- De opdracht vroeg expliciet om eerst grondige repo-inspectie en pas daarna een minimaal voorstel.
- Cancel/abort voor copy/move is niet eerlijk implementeerbaar zonder backendwerk.
Minimaal wijzigingsvoorstel dat ik hierna zou uitvoeren als vervolgstap:
1. Frontend-only compacte task chip
- plaats in `#title-zone-actions` of direct naast `#status`
- toont bijvoorbeeld:
- `1 task running`
- `3 active tasks`
2. Kleine popover/dropdown
- opent op klik op de chip
- toont alleen actieve taken uit `/api/tasks`
- hergebruikt bestaande `formatTaskLine(task)` of een kleine variant daarop
- toont eerlijke status:
- `queued`
- `running`
- eventueel later download `requested/preparing`
3. Polling hergebruik
- hergebruik bestaande `/api/tasks`
- implementeer lichte polling alleen als er actieve taken zijn of als de popover open is
- gebruik stabiele rerender-aanpak zoals in Settings > Logs
4. Doorgang naar detail
- knop of link `View in Logs`
- opent bestaande `F1 > Settings > Logs`
5. Expliciet nog niet doen
- geen cancelknop voor copy/move
- geen extra paneel
- geen fake progressbar
Vervolgvoorstel voor latere backendfase als abort gewenst is:
- copy/move taskstatus uitbreiden met `cancelled`
- cancel-endpoint voor copy/move
- cooperative checks in `TaskRunner` tussen items/chunks
- eerlijke semantiek:
- stop resterende verwerking
- reeds verwerkte bestanden blijven bestaan
- geen rollback
9 gewijzigde bestanden
- [project_docs/UI_FEEDACK.md](/workspace/webmanager-mvp/project_docs/UI_FEEDACK.md)
10 uitgevoerde tests
Wel gedaan:
- code-inspectie van backend taskmodel, runners, services, routes en frontend task/log UI
Niet gedaan:
- geen functionele tests
- geen implementatiechecks
Reden:
- deze stap is bewust alleen analyse + voorstel, geen implementatie
+194
View File
@@ -0,0 +1,194 @@
# Research: Remote Single-File Copy To Host
## Relevante file analysis
### Backend
- [routes_files.py](/workspace/webmanager-mvp/webui/backend/app/api/routes_files.py)
Bevat de bestaande lokale upload-route (`POST /api/files/upload`) en de remote read-only Phase 3 routes (`view`, `info`, `download`, `image`) via `RemoteFileService`.
- [routes_copy.py](/workspace/webmanager-mvp/webui/backend/app/api/routes_copy.py)
Bevat de bestaande copy-route (`POST /api/files/copy`) die volledig uitgaat van host-side source en host-side destination.
- [file_ops_service.py](/workspace/webmanager-mvp/webui/backend/app/services/file_ops_service.py)
Bevat lokale file-acties. Relevant is vooral `upload()`, omdat die host-write doet na `PathGuard`-validatie van een doeldirectory.
- [copy_task_service.py](/workspace/webmanager-mvp/webui/backend/app/services/copy_task_service.py)
Bevat task-opbouw, destination-validatie en taakcreatie voor copy, maar gaat uit van een lokale bron die via `PathGuard` naar een host-pad resolveert.
- [remote_file_service.py](/workspace/webmanager-mvp/webui/backend/app/services/remote_file_service.py)
Bevat al de benodigde remote read-path parsing, share-validatie via registry, agent-auth, error mapping en een gestreamde `prepare_download()` naar de agent.
- [filesystem_adapter.py](/workspace/webmanager-mvp/webui/backend/app/fs/filesystem_adapter.py)
Bevat de feitelijke host-write helpers:
- `write_uploaded_file(path, file_stream, overwrite=False)`
- `copy_file(source, destination, on_progress=None)`
`copy_file` vereist een lokale bron op de host en is dus niet bruikbaar voor remote input. `write_uploaded_file` schrijft een inkomende stream naar een hostpad en is conceptueel het dichtstbij.
- [path_guard.py](/workspace/webmanager-mvp/webui/backend/app/security/path_guard.py)
Houdt host-write validatie strikt lokaal. Dat moet zo blijven; remote paden mogen hier niet als bronsemantiek in terechtkomen.
- [tasks_runner.py](/workspace/webmanager-mvp/webui/backend/app/tasks_runner.py)
Bevat task-based copy/move uitvoering, maar alleen voor host-side bronpaden. Wel relevant als patroon voor een aparte remote-to-host worker.
- [schemas.py](/workspace/webmanager-mvp/webui/backend/app/api/schemas.py)
Bevat bestaande `CopyRequest` en upload/copy response-modellen. Voor een aparte feature is waarschijnlijk een nieuw requestmodel nodig.
### Frontend
- [app.js](/workspace/webmanager-mvp/webui/html/app.js)
Relevante bestaande flows:
- `uploadFileRequest()` gebruikt uitsluitend `/api/files/upload`
- `startCopySelected()` gebruikt uitsluitend `/api/files/copy`
- remote browse/view/download is al source-aware
- remote copy is nu bewust geblokkeerd
Dit bevestigt dat upload-flow en copy-flow momenteel twee losse UI-contracten zijn.
### Agent
- [finder_commander/app/main.py](/workspace/webmanager-mvp/finder_commander/app/main.py)
Agent heeft al wat voor deze feature nodig is:
- strikte `share + relative path` validatie
- `GET /api/info`
- `GET /api/download`
Voor remote single-file copy naar host is geen nieuwe remote write-API nodig.
## Oordeel over hergebruik van upload-internals
### Bestaande upload-functionaliteit aanpassen?
Nee.
Reden:
- de bestaande upload-route, upload-requestvorm en upload-UI werken al goed
- upload is browser -> host via multipart/form-data
- de gewenste feature is agent/remote -> host via backend-proxy/stream
- dat is een ander contract, andere foutbron en andere bronsemantiek
### Interne host-write logica hergebruiken?
Ja, maar alleen op intern helper/service-niveau.
Concreet oordeel:
- `FilesystemAdapter.copy_file()` is niet geschikt voor hergebruik
Reden: vereist een lokale host-bronpad als source.
- `FilesystemAdapter.write_uploaded_file()` is deels relevant
Reden: dit doet precies de host-write van een inkomende stream naar een doelbestand.
- Direct hergebruik van `FileOpsService.upload()` is niet verstandig
Reden: die methode is semantisch en contractueel gekoppeld aan multipart upload en `UploadFile`.
Best passende richting:
- niet hergebruiken via bestaande upload-endpoints of upload-flow
- wel overwegen om de onderliggende stream-naar-bestand write logica te hergebruiken of te veralgemeniseren in `FilesystemAdapter`
- voorkeur: een nieuwe sibling-helper zoals `write_stream_file(...)` of een kleine interne extractie, zodat upload ongewijzigd blijft en remote copy dezelfde veilige host-write primitief kan gebruiken
## Ontwerpvoorstel
### Feature
`Copy remote file to host`
### Scope
- alleen single file
- alleen source onder `/Clients/...`
- alleen destination op host-side lokale map
- geen mappen
- geen overwrite in eerste change request tenzij expliciet gewenst
- geen upload-route hergebruik
- geen brede refactor
### Backendontwerp
Voeg een aparte backend feature toe, niet via `POST /api/files/upload` en niet via bestaande `POST /api/files/copy`.
Voorkeursvorm:
- nieuwe route, bijvoorbeeld `POST /api/files/remote-copy`
- request bevat:
- `source`: remote bestandspad onder `/Clients/...`
- `destination_dir`: host-directory pad
Nieuwe service, bijvoorbeeld:
- `RemoteCopyToHostService`
Verantwoordelijkheden:
1. valideer dat `source` een remote `/Clients/...` file is
2. valideer dat `destination_dir` een host-directory is via bestaande lokale `PathGuard`
3. haal remote metadata op of resolve remote naam via bestaande `RemoteFileService`
4. bouw destination pad als `destination_dir/<remote-filename>`
5. faal op bestaand doelbestand in eerste versie
6. open remote download-stream via aparte interne helper op `RemoteFileService`
7. schrijf gestreamd naar host met een aparte interne host-write helper
8. map fouten strikt:
- remote unavailable blijft lokale actie-fout
- host permission/path-conflict blijft gewone host-fout
### Aanbevolen interne hergebruikslijn
- laat `RemoteFileService` een interne streaming primitive aanbieden, bijvoorbeeld een variant op de huidige remote download-open logica zonder HTTP-response voor browser-download
- laat `FilesystemAdapter` een aparte stream-write helper aanbieden voor generieke inkomende streams
- laat upload zijn bestaande publieke route en flow behouden
### Frontendontwerp
Geen wijziging aan upload-UI.
Kleine aparte UI-feature:
- toon een aparte actie alleen als:
- bronpane een remote file-selectie heeft van exact 1 bestand
- doelpane op een host/local directory staat
- de actie roept de nieuwe backend-route aan
- na succes:
- refresh beide panes
- toon lokale foutmelding bij falen
Voorkeur:
- aparte actie of expliciete source-aware branch voor "Copy remote file to host"
- niet de bestaande upload-flow hergebruiken
### Agentontwerp
Geen nieuwe agent-endpoints nodig in deze scope.
De bestaande `GET /api/download` is voldoende als read-only bron voor streaming.
## Acceptance criteria
- een enkel bestand onder `/Clients/...` kan naar een host-directory worden gekopieerd
- de destination moet een host/local directory zijn
- mappen als remote bron worden geweigerd
- remote -> remote wordt geweigerd
- host -> remote wordt geweigerd
- overwrite gebeurt niet impliciet; bestaand doelbestand geeft een nette fout
- bestaande upload-route, upload-contract en upload-UI blijven ongewijzigd
- bestaande lokale copy-flow blijft ongewijzigd
- remote fouten blijven lokaal tot deze actie
- host-write blijft onder bestaande lokale `PathGuard`-regels vallen
- data wordt gestreamd; geen volledige file-buffer in memory
## Klein plan
1. Voeg een research-backed change request toe voor een aparte route `POST /api/files/remote-copy`.
2. Voeg een kleine service toe die alleen remote single-file source + local destination_dir ondersteunt.
3. Voeg een interne streaming helper toe in `RemoteFileService` voor remote bestand-inname door backend.
4. Voeg een aparte interne host-write helper toe in `FilesystemAdapter` voor generieke stream-naar-bestand writes, zonder upload-API te wijzigen.
5. Voeg minimale frontend wiring toe voor een aparte "Copy remote file to host"-actie.
6. Test stapsgewijs:
- success path remote file -> local dir
- bestaand doelbestand
- remote directory rejected
- remote failure stays local
- upload-regressie: bestaande `/api/files/upload` blijft ongewijzigd
## Expliciete lijst van wat buiten scope blijft
- remote mappen kopiëren
- remote write-acties
- remote -> remote
- host -> remote
- aanpassing van bestaande upload-routes
- aanpassing van upload-requestcontract
- aanpassing van upload-UI
- brede refactor van copy/upload/task-infrastructuur
- bookmarks/startup paths
- remote task-runner verbreding buiten deze ene actie
Binary file not shown.
Binary file not shown.
+39
View File
@@ -0,0 +1,39 @@
from __future__ import annotations
from fastapi import APIRouter, Depends, Header
from backend.app.api.schemas import (
RemoteClientHeartbeatRequest,
RemoteClientItem,
RemoteClientListResponse,
RemoteClientRegisterRequest,
)
from backend.app.dependencies import get_remote_client_service
from backend.app.services.remote_client_service import RemoteClientService
router = APIRouter(prefix="/clients")
@router.get("", response_model=RemoteClientListResponse)
async def list_clients(
service: RemoteClientService = Depends(get_remote_client_service),
) -> RemoteClientListResponse:
return service.list_clients()
@router.post("/register", response_model=RemoteClientItem)
async def register_client(
request: RemoteClientRegisterRequest,
authorization: str | None = Header(default=None),
service: RemoteClientService = Depends(get_remote_client_service),
) -> RemoteClientItem:
return service.register_client(authorization=authorization, request=request)
@router.post("/heartbeat", response_model=RemoteClientItem)
async def heartbeat(
request: RemoteClientHeartbeatRequest,
authorization: str | None = Header(default=None),
service: RemoteClientService = Depends(get_remote_client_service),
) -> RemoteClientItem:
return service.record_heartbeat(authorization=authorization, request=request)
+5
View File
@@ -14,4 +14,9 @@ async def copy_file(
request: CopyRequest, request: CopyRequest,
service: CopyTaskService = Depends(get_copy_task_service), service: CopyTaskService = Depends(get_copy_task_service),
) -> TaskCreateResponse: ) -> TaskCreateResponse:
if request.sources is not None:
return service.create_batch_copy_task(
sources=request.sources,
destination_base=request.destination_base,
)
return service.create_copy_task(source=request.source, destination=request.destination) return service.create_copy_task(source=request.source, destination=request.destination)
+17
View File
@@ -0,0 +1,17 @@
from __future__ import annotations
from fastapi import APIRouter, Depends
from backend.app.api.schemas import DuplicateRequest, TaskCreateResponse
from backend.app.dependencies import get_duplicate_task_service
from backend.app.services.duplicate_task_service import DuplicateTaskService
router = APIRouter(prefix="/files")
@router.post("/duplicate", response_model=TaskCreateResponse, status_code=202)
async def duplicate_paths(
request: DuplicateRequest,
service: DuplicateTaskService = Depends(get_duplicate_task_service),
) -> TaskCreateResponse:
return service.create_duplicate_task(paths=request.paths)
+75 -8
View File
@@ -1,11 +1,15 @@
from __future__ import annotations from __future__ import annotations
from fastapi import APIRouter, Depends, File, Form, Request, UploadFile from fastapi import APIRouter, Depends, File, Form, Query, Request, UploadFile
from fastapi.responses import StreamingResponse from fastapi.responses import StreamingResponse
from starlette.background import BackgroundTask
from backend.app.api.schemas import DeleteRequest, DeleteResponse, FileInfoResponse, MkdirRequest, MkdirResponse, RenameRequest, RenameResponse, SaveRequest, SaveResponse, UploadResponse, ViewResponse from backend.app.api.schemas import ArchivePrepareRequest, DeleteRequest, FileInfoResponse, MkdirRequest, MkdirResponse, RenameRequest, RenameResponse, SaveRequest, SaveResponse, TaskCreateResponse, TaskDetailResponse, UploadResponse, ViewResponse
from backend.app.dependencies import get_file_ops_service from backend.app.dependencies import get_archive_download_task_service, get_delete_task_service, get_file_ops_service, get_remote_file_service
from backend.app.services.archive_download_task_service import ArchiveDownloadTaskService
from backend.app.services.delete_task_service import DeleteTaskService
from backend.app.services.file_ops_service import FileOpsService from backend.app.services.file_ops_service import FileOpsService
from backend.app.services.remote_file_service import RemoteFileService
router = APIRouter(prefix="/files") router = APIRouter(prefix="/files")
@@ -26,21 +30,24 @@ async def rename(
return service.rename(path=request.path, new_name=request.new_name) return service.rename(path=request.path, new_name=request.new_name)
@router.post("/delete", response_model=DeleteResponse) @router.post("/delete", response_model=TaskCreateResponse, status_code=202)
async def delete( async def delete(
request: DeleteRequest, request: DeleteRequest,
service: FileOpsService = Depends(get_file_ops_service), service: DeleteTaskService = Depends(get_delete_task_service),
) -> DeleteResponse: ) -> TaskCreateResponse:
return service.delete(path=request.path) if request.paths is not None:
return service.create_batch_delete_task(paths=request.paths, recursive_paths=request.recursive_paths or [])
return service.create_delete_task(path=request.path, recursive=request.recursive)
@router.post("/upload", response_model=UploadResponse) @router.post("/upload", response_model=UploadResponse)
async def upload( async def upload(
target_path: str = Form(...), target_path: str = Form(...),
overwrite: bool = Form(False),
file: UploadFile = File(...), file: UploadFile = File(...),
service: FileOpsService = Depends(get_file_ops_service), service: FileOpsService = Depends(get_file_ops_service),
) -> UploadResponse: ) -> UploadResponse:
return service.upload(target_path=target_path, upload_file=file) return service.upload(target_path=target_path, upload_file=file, overwrite=overwrite)
@router.get("/view", response_model=ViewResponse) @router.get("/view", response_model=ViewResponse)
@@ -48,7 +55,10 @@ async def view(
path: str, path: str,
for_edit: bool = False, for_edit: bool = False,
service: FileOpsService = Depends(get_file_ops_service), service: FileOpsService = Depends(get_file_ops_service),
remote_service: RemoteFileService = Depends(get_remote_file_service),
) -> ViewResponse: ) -> ViewResponse:
if remote_service.handles_path(path):
return remote_service.view(path=path, for_edit=for_edit)
return service.view(path=path, for_edit=for_edit) return service.view(path=path, for_edit=for_edit)
@@ -56,10 +66,59 @@ async def view(
async def info( async def info(
path: str, path: str,
service: FileOpsService = Depends(get_file_ops_service), service: FileOpsService = Depends(get_file_ops_service),
remote_service: RemoteFileService = Depends(get_remote_file_service),
) -> FileInfoResponse: ) -> FileInfoResponse:
if remote_service.handles_path(path):
return remote_service.info(path=path)
return service.info(path=path) return service.info(path=path)
@router.get("/download")
async def download(
path: list[str] = Query(...),
service: FileOpsService = Depends(get_file_ops_service),
remote_service: RemoteFileService = Depends(get_remote_file_service),
) -> StreamingResponse:
prepared = remote_service.prepare_download(paths=path) if any(remote_service.handles_path(item) for item in path) else service.prepare_download(paths=path)
response = StreamingResponse(
prepared["content"],
headers=prepared["headers"],
media_type=prepared["content_type"],
)
if prepared.get("cleanup"):
response.background = BackgroundTask(prepared["cleanup"])
return response
@router.post("/download/archive-prepare", response_model=TaskCreateResponse, status_code=202)
async def archive_prepare(
request: ArchivePrepareRequest,
service: ArchiveDownloadTaskService = Depends(get_archive_download_task_service),
) -> TaskCreateResponse:
return service.create_archive_prepare_task(paths=request.paths)
@router.get("/download/archive/{task_id}")
async def archive_download(
task_id: str,
service: ArchiveDownloadTaskService = Depends(get_archive_download_task_service),
) -> StreamingResponse:
prepared = service.prepare_ready_archive_download(task_id=task_id)
return StreamingResponse(
prepared["content"],
headers=prepared["headers"],
media_type=prepared["content_type"],
)
@router.post("/download/archive/{task_id}/cancel", response_model=TaskDetailResponse)
async def archive_cancel(
task_id: str,
service: ArchiveDownloadTaskService = Depends(get_archive_download_task_service),
) -> TaskDetailResponse:
return TaskDetailResponse(**service.cancel_archive_prepare_task(task_id=task_id))
@router.get("/video") @router.get("/video")
async def video( async def video(
path: str, path: str,
@@ -92,7 +151,15 @@ async def pdf(
async def image( async def image(
path: str, path: str,
service: FileOpsService = Depends(get_file_ops_service), service: FileOpsService = Depends(get_file_ops_service),
remote_service: RemoteFileService = Depends(get_remote_file_service),
) -> StreamingResponse: ) -> StreamingResponse:
if remote_service.handles_path(path):
prepared = remote_service.prepare_image_stream(path=path)
return StreamingResponse(
prepared["content"],
headers=prepared["headers"],
media_type=prepared["content_type"],
)
prepared = service.prepare_image_stream(path=path) prepared = service.prepare_image_stream(path=path)
return StreamingResponse( return StreamingResponse(
prepared["content"], prepared["content"],
+6 -1
View File
@@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends, status
from backend.app.api.schemas import TaskDetailResponse, TaskListResponse from backend.app.api.schemas import TaskDetailResponse, TaskListResponse
from backend.app.dependencies import get_task_service from backend.app.dependencies import get_task_service
@@ -17,3 +17,8 @@ async def list_tasks(service: TaskService = Depends(get_task_service)) -> TaskLi
@router.get("/{task_id}", response_model=TaskDetailResponse) @router.get("/{task_id}", response_model=TaskDetailResponse)
async def get_task(task_id: str, service: TaskService = Depends(get_task_service)) -> TaskDetailResponse: async def get_task(task_id: str, service: TaskService = Depends(get_task_service)) -> TaskDetailResponse:
return service.get_task(task_id) return service.get_task(task_id)
@router.post("/{task_id}/cancel", response_model=TaskDetailResponse, status_code=status.HTTP_200_OK)
async def cancel_task(task_id: str, service: TaskService = Depends(get_task_service)) -> TaskDetailResponse:
return service.cancel_task(task_id)
+63 -3
View File
@@ -51,7 +51,10 @@ class RenameResponse(BaseModel):
class DeleteRequest(BaseModel): class DeleteRequest(BaseModel):
path: str path: str | None = None
recursive: bool = False
paths: list[str] | None = None
recursive_paths: list[str] | None = None
class DeleteResponse(BaseModel): class DeleteResponse(BaseModel):
@@ -87,6 +90,10 @@ class SaveResponse(BaseModel):
modified: str modified: str
class ArchivePrepareRequest(BaseModel):
paths: list[str]
class FileInfoResponse(BaseModel): class FileInfoResponse(BaseModel):
name: str name: str
path: str path: str
@@ -102,12 +109,21 @@ class FileInfoResponse(BaseModel):
height: int | None = None height: int | None = None
class ZipDownloadLimitsResponse(BaseModel):
max_items: int
max_total_input_bytes: int
max_individual_file_bytes: int
scan_timeout_seconds: float
symlink_policy: str
class SettingsResponse(BaseModel): class SettingsResponse(BaseModel):
show_thumbnails: bool show_thumbnails: bool
preferred_startup_path_left: str | None = None preferred_startup_path_left: str | None = None
preferred_startup_path_right: str | None = None preferred_startup_path_right: str | None = None
selected_theme: str selected_theme: str
selected_color_mode: str selected_color_mode: str
zip_download_limits: ZipDownloadLimitsResponse
class SettingsUpdateRequest(BaseModel): class SettingsUpdateRequest(BaseModel):
@@ -152,8 +168,14 @@ class TaskDetailResponse(BaseModel):
class CopyRequest(BaseModel): class CopyRequest(BaseModel):
source: str source: str | None = None
destination: str destination: str | None = None
sources: list[str] | None = None
destination_base: str | None = None
class DuplicateRequest(BaseModel):
paths: list[str]
class TaskCreateResponse(BaseModel): class TaskCreateResponse(BaseModel):
@@ -216,3 +238,41 @@ class SearchResultItem(BaseModel):
class SearchResponse(BaseModel): class SearchResponse(BaseModel):
items: list[SearchResultItem] items: list[SearchResultItem]
truncated: bool truncated: bool
class RemoteClientShare(BaseModel):
key: str
label: str
class RemoteClientRegisterRequest(BaseModel):
client_id: str
display_name: str
platform: str
agent_version: str
endpoint: str
shares: list[RemoteClientShare]
class RemoteClientHeartbeatRequest(BaseModel):
client_id: str
agent_version: str
class RemoteClientItem(BaseModel):
client_id: str
display_name: str
platform: str
agent_version: str
endpoint: str
shares: list[RemoteClientShare]
last_seen: str | None = None
status: str
last_error: str | None = None
reachable_at: str | None = None
created_at: str
updated_at: str
class RemoteClientListResponse(BaseModel):
items: list[RemoteClientItem]
+20 -1
View File
@@ -9,6 +9,11 @@ from pathlib import Path
class Settings: class Settings:
root_aliases: dict[str, str] root_aliases: dict[str, str]
task_db_path: str task_db_path: str
remote_client_registration_token: str
remote_client_offline_timeout_seconds: int
remote_client_agent_auth_header: str
remote_client_agent_auth_scheme: str
remote_client_agent_auth_token: str
DEFAULT_ROOT_ALIASES = { DEFAULT_ROOT_ALIASES = {
@@ -40,4 +45,18 @@ def get_settings() -> Settings:
task_db_path = os.getenv("WEBMANAGER_TASK_DB_PATH", default_task_db_path).strip() task_db_path = os.getenv("WEBMANAGER_TASK_DB_PATH", default_task_db_path).strip()
if not task_db_path: if not task_db_path:
task_db_path = default_task_db_path task_db_path = default_task_db_path
return Settings(root_aliases=_load_root_aliases(), task_db_path=task_db_path) raw_offline_timeout = os.getenv("WEBMANAGER_REMOTE_CLIENT_OFFLINE_TIMEOUT_SECONDS", "60").strip()
try:
remote_client_offline_timeout_seconds = max(1, int(raw_offline_timeout))
except ValueError:
remote_client_offline_timeout_seconds = 60
return Settings(
root_aliases=_load_root_aliases(),
task_db_path=task_db_path,
remote_client_registration_token=os.getenv("WEBMANAGER_REMOTE_CLIENT_REGISTRATION_TOKEN", "").strip(),
remote_client_offline_timeout_seconds=remote_client_offline_timeout_seconds,
remote_client_agent_auth_header=os.getenv("WEBMANAGER_REMOTE_CLIENT_AGENT_AUTH_HEADER", "Authorization").strip()
or "Authorization",
remote_client_agent_auth_scheme=os.getenv("WEBMANAGER_REMOTE_CLIENT_AGENT_AUTH_SCHEME", "Bearer").strip() or "Bearer",
remote_client_agent_auth_token=os.getenv("WEBMANAGER_REMOTE_CLIENT_AGENT_AUTH_TOKEN", "").strip(),
)
+23 -2
View File
@@ -6,8 +6,8 @@ from contextlib import contextmanager
from datetime import datetime, timezone from datetime import datetime, timezone
from pathlib import Path from pathlib import Path
VALID_HISTORY_STATUSES = {"queued", "completed", "failed"} VALID_HISTORY_STATUSES = {"queued", "completed", "failed", "requested", "ready", "preflight_failed", "cancelled"}
VALID_HISTORY_OPERATIONS = {"mkdir", "rename", "delete", "copy", "move", "upload"} VALID_HISTORY_OPERATIONS = {"mkdir", "rename", "delete", "copy", "move", "upload", "download", "duplicate"}
class HistoryRepository: class HistoryRepository:
@@ -119,6 +119,27 @@ class HistoryRepository:
), ),
) )
def reconcile_entries_failed(
self,
entry_ids: list[str],
*,
error_code: str = "task_interrupted",
error_message: str = "Task was interrupted before completion",
) -> None:
if not entry_ids:
return
finished_at = self._now_iso()
placeholders = ", ".join("?" for _ in entry_ids)
with self._connection() as conn:
conn.execute(
f"""
UPDATE history
SET status = ?, error_code = ?, error_message = ?, finished_at = ?
WHERE id IN ({placeholders})
""",
("failed", error_code, error_message, finished_at, *entry_ids),
)
def _ensure_schema(self) -> None: def _ensure_schema(self) -> None:
db_path = Path(self._db_path) db_path = Path(self._db_path)
if db_path.parent and str(db_path.parent) not in {"", "."}: if db_path.parent and str(db_path.parent) not in {"", "."}:
@@ -0,0 +1,201 @@
from __future__ import annotations
import json
import sqlite3
from contextlib import contextmanager
from datetime import datetime, timezone
from pathlib import Path
class RemoteClientRepository:
def __init__(self, db_path: str):
self._db_path = db_path
self._ensure_schema()
def upsert_client(
self,
*,
client_id: str,
display_name: str,
platform: str,
agent_version: str,
endpoint: str,
shares: list[dict[str, str]],
now_iso: str,
) -> dict:
shares_json = self._encode_shares(shares)
with self._connection() as conn:
conn.execute(
"""
INSERT INTO remote_clients (
client_id, display_name, platform, agent_version, endpoint, shares_json,
last_seen, status, last_error, reachable_at, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(client_id) DO UPDATE SET
display_name = excluded.display_name,
platform = excluded.platform,
agent_version = excluded.agent_version,
endpoint = excluded.endpoint,
shares_json = excluded.shares_json,
last_seen = excluded.last_seen,
status = excluded.status,
last_error = NULL,
updated_at = excluded.updated_at
""",
(
client_id,
display_name,
platform,
agent_version,
endpoint,
shares_json,
now_iso,
"online",
None,
None,
now_iso,
now_iso,
),
)
row = conn.execute("SELECT * FROM remote_clients WHERE client_id = ?", (client_id,)).fetchone()
return self._to_dict(row)
def record_heartbeat(self, *, client_id: str, agent_version: str, now_iso: str) -> dict | None:
with self._connection() as conn:
cursor = conn.execute(
"""
UPDATE remote_clients
SET agent_version = ?, last_seen = ?, status = ?, updated_at = ?
WHERE client_id = ?
""",
(agent_version, now_iso, "online", now_iso, client_id),
)
if cursor.rowcount <= 0:
return None
row = conn.execute("SELECT * FROM remote_clients WHERE client_id = ?", (client_id,)).fetchone()
return self._to_dict(row)
def mark_stale_clients_offline(self, *, cutoff_iso: str, now_iso: str) -> None:
with self._connection() as conn:
conn.execute(
"""
UPDATE remote_clients
SET status = ?, updated_at = ?
WHERE status != ? AND last_seen IS NOT NULL AND last_seen < ?
""",
("offline", now_iso, "offline", cutoff_iso),
)
def list_clients(self) -> list[dict]:
with self._connection() as conn:
rows = conn.execute(
"""
SELECT *
FROM remote_clients
ORDER BY LOWER(display_name) ASC, client_id ASC
"""
).fetchall()
return [self._to_dict(row) for row in rows]
def get_client(self, client_id: str) -> dict | None:
with self._connection() as conn:
row = conn.execute(
"""
SELECT *
FROM remote_clients
WHERE client_id = ?
""",
(client_id,),
).fetchone()
if row is None:
return None
return self._to_dict(row)
def _ensure_schema(self) -> None:
db_path = Path(self._db_path)
if db_path.parent and str(db_path.parent) not in {"", "."}:
db_path.parent.mkdir(parents=True, exist_ok=True)
with self._connection() as conn:
conn.execute(
"""
CREATE TABLE IF NOT EXISTS remote_clients (
client_id TEXT PRIMARY KEY,
display_name TEXT NOT NULL,
platform TEXT NOT NULL,
agent_version TEXT NOT NULL,
endpoint TEXT NOT NULL,
shares_json TEXT NOT NULL,
last_seen TEXT NULL,
status TEXT NOT NULL,
last_error TEXT NULL,
reachable_at TEXT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
)
"""
)
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_remote_clients_display_name
ON remote_clients(display_name)
"""
)
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_remote_clients_last_seen
ON remote_clients(last_seen)
"""
)
@contextmanager
def _connection(self):
conn = sqlite3.connect(self._db_path)
conn.row_factory = sqlite3.Row
try:
yield conn
conn.commit()
except Exception:
conn.rollback()
raise
finally:
conn.close()
@classmethod
def _to_dict(cls, row: sqlite3.Row) -> dict:
return {
"client_id": row["client_id"],
"display_name": row["display_name"],
"platform": row["platform"],
"agent_version": row["agent_version"],
"endpoint": row["endpoint"],
"shares": cls._decode_shares(row["shares_json"]),
"last_seen": row["last_seen"],
"status": row["status"],
"last_error": row["last_error"],
"reachable_at": row["reachable_at"],
"created_at": row["created_at"],
"updated_at": row["updated_at"],
}
@staticmethod
def _encode_shares(shares: list[dict[str, str]]) -> str:
return json.dumps(shares, separators=(",", ":"), sort_keys=True)
@staticmethod
def _decode_shares(raw: str) -> list[dict[str, str]]:
parsed = json.loads(raw or "[]")
if not isinstance(parsed, list):
return []
normalized: list[dict[str, str]] = []
for item in parsed:
if not isinstance(item, dict):
continue
key = str(item.get("key", "")).strip()
label = str(item.get("label", "")).strip()
if key and label:
normalized.append({"key": key, "label": label})
return normalized
@staticmethod
def now_iso() -> str:
return datetime.now(tz=timezone.utc).isoformat().replace("+00:00", "Z")
+249 -13
View File
@@ -6,8 +6,9 @@ from contextlib import contextmanager
from datetime import datetime, timezone from datetime import datetime, timezone
from pathlib import Path from pathlib import Path
VALID_STATUSES = {"queued", "running", "completed", "failed"} VALID_STATUSES = {"queued", "running", "cancelling", "completed", "failed", "requested", "preparing", "ready", "cancelled"}
VALID_OPERATIONS = {"copy", "move"} VALID_OPERATIONS = {"copy", "move", "download", "duplicate", "delete"}
NON_TERMINAL_STATUSES = ("queued", "running", "cancelling", "requested", "preparing")
TASK_MIGRATION_COLUMNS: dict[str, str] = { TASK_MIGRATION_COLUMNS: dict[str, str] = {
"operation": "TEXT NOT NULL DEFAULT 'copy'", "operation": "TEXT NOT NULL DEFAULT 'copy'",
"status": "TEXT NOT NULL DEFAULT 'queued'", "status": "TEXT NOT NULL DEFAULT 'queued'",
@@ -32,9 +33,18 @@ class TaskRepository:
self._db_path = db_path self._db_path = db_path
self._ensure_schema() self._ensure_schema()
def create_task(self, operation: str, source: str, destination: str, task_id: str | None = None) -> dict: def create_task(
self,
operation: str,
source: str,
destination: str,
task_id: str | None = None,
status: str = "queued",
) -> dict:
if operation not in VALID_OPERATIONS: if operation not in VALID_OPERATIONS:
raise ValueError("invalid operation") raise ValueError("invalid operation")
if status not in VALID_STATUSES:
raise ValueError("invalid status")
task_id = task_id or str(uuid.uuid4()) task_id = task_id or str(uuid.uuid4())
created_at = self._now_iso() created_at = self._now_iso()
@@ -52,7 +62,7 @@ class TaskRepository:
( (
task_id, task_id,
operation, operation,
"queued", status,
source, source,
destination, destination,
None, None,
@@ -133,17 +143,37 @@ class TaskRepository:
done_items: int | None = None, done_items: int | None = None,
total_items: int | None = None, total_items: int | None = None,
current_item: str | None = None, current_item: str | None = None,
) -> None: ) -> bool:
started_at = self._now_iso() started_at = self._now_iso()
with self._connection() as conn: with self._connection() as conn:
conn.execute( cursor = conn.execute(
""" """
UPDATE tasks UPDATE tasks
SET status = ?, started_at = ?, done_bytes = ?, total_bytes = ?, done_items = ?, total_items = ?, current_item = ? SET status = ?, started_at = ?, done_bytes = ?, total_bytes = ?, done_items = ?, total_items = ?, current_item = ?
WHERE id = ? WHERE id = ? AND status = ?
""", """,
("running", started_at, done_bytes, total_bytes, done_items, total_items, current_item, task_id), ("running", started_at, done_bytes, total_bytes, done_items, total_items, current_item, task_id, "queued"),
) )
return cursor.rowcount > 0
def mark_preparing(
self,
task_id: str,
done_items: int | None = None,
total_items: int | None = None,
current_item: str | None = None,
) -> bool:
started_at = self._now_iso()
with self._connection() as conn:
cursor = conn.execute(
"""
UPDATE tasks
SET status = ?, started_at = COALESCE(started_at, ?), done_items = ?, total_items = ?, current_item = ?
WHERE id = ? AND status = ?
""",
("preparing", started_at, done_items, total_items, current_item, task_id, "requested"),
)
return cursor.rowcount > 0
def update_progress( def update_progress(
self, self,
@@ -171,17 +201,36 @@ class TaskRepository:
total_bytes: int | None = None, total_bytes: int | None = None,
done_items: int | None = None, done_items: int | None = None,
total_items: int | None = None, total_items: int | None = None,
) -> None: ) -> bool:
finished_at = self._now_iso() finished_at = self._now_iso()
with self._connection() as conn: with self._connection() as conn:
conn.execute( cursor = conn.execute(
""" """
UPDATE tasks UPDATE tasks
SET status = ?, finished_at = ?, done_bytes = ?, total_bytes = ?, done_items = ?, total_items = ? SET status = ?, finished_at = ?, done_bytes = ?, total_bytes = ?, done_items = ?, total_items = ?, current_item = NULL
WHERE id = ? WHERE id = ? AND status = ?
""", """,
("completed", finished_at, done_bytes, total_bytes, done_items, total_items, task_id), ("completed", finished_at, done_bytes, total_bytes, done_items, total_items, task_id, "running"),
) )
return cursor.rowcount > 0
def mark_ready(
self,
task_id: str,
done_items: int | None = None,
total_items: int | None = None,
) -> bool:
finished_at = self._now_iso()
with self._connection() as conn:
cursor = conn.execute(
"""
UPDATE tasks
SET status = ?, finished_at = ?, done_items = ?, total_items = ?, current_item = NULL
WHERE id = ? AND status = ?
""",
("ready", finished_at, done_items, total_items, task_id, "preparing"),
)
return cursor.rowcount > 0
def mark_failed( def mark_failed(
self, self,
@@ -216,6 +265,97 @@ class TaskRepository:
), ),
) )
def mark_failed_if_not_cancelled(
self,
task_id: str,
error_code: str,
error_message: str,
failed_item: str | None,
done_bytes: int | None,
total_bytes: int | None,
done_items: int | None = None,
total_items: int | None = None,
) -> bool:
finished_at = self._now_iso()
with self._connection() as conn:
cursor = conn.execute(
"""
UPDATE tasks
SET status = ?, finished_at = ?, error_code = ?, error_message = ?, failed_item = ?, done_bytes = ?, total_bytes = ?, done_items = ?, total_items = ?, current_item = NULL
WHERE id = ? AND status != ?
""",
(
"failed",
finished_at,
error_code,
error_message,
failed_item,
done_bytes,
total_bytes,
done_items,
total_items,
task_id,
"cancelled",
),
)
return cursor.rowcount > 0
def mark_cancelled(self, task_id: str) -> bool:
finished_at = self._now_iso()
with self._connection() as conn:
cursor = conn.execute(
"""
UPDATE tasks
SET status = ?, finished_at = ?, current_item = NULL
WHERE id = ? AND status IN (?, ?)
""",
("cancelled", finished_at, task_id, "requested", "preparing"),
)
return cursor.rowcount > 0
def request_cancellation(self, task_id: str) -> dict | None:
finished_at = self._now_iso()
with self._connection() as conn:
conn.execute(
"""
UPDATE tasks
SET status = ?, finished_at = ?, current_item = NULL
WHERE id = ? AND status = ?
""",
("cancelled", finished_at, task_id, "queued"),
)
conn.execute(
"""
UPDATE tasks
SET status = ?
WHERE id = ? AND status = ?
""",
("cancelling", task_id, "running"),
)
row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone()
return self._to_dict(row) if row else None
def finalize_cancelled(
self,
task_id: str,
*,
done_bytes: int | None = None,
total_bytes: int | None = None,
done_items: int | None = None,
total_items: int | None = None,
) -> bool:
finished_at = self._now_iso()
with self._connection() as conn:
cursor = conn.execute(
"""
UPDATE tasks
SET status = ?, finished_at = ?, done_bytes = ?, total_bytes = ?, done_items = ?, total_items = ?, current_item = NULL
WHERE id = ? AND status IN (?, ?)
""",
("cancelled", finished_at, done_bytes, total_bytes, done_items, total_items, task_id, "cancelling", "queued"),
)
return cursor.rowcount > 0
def _ensure_schema(self) -> None: def _ensure_schema(self) -> None:
db_path = Path(self._db_path) db_path = Path(self._db_path)
if db_path.parent and str(db_path.parent) not in {"", "."}: if db_path.parent and str(db_path.parent) not in {"", "."}:
@@ -244,14 +384,100 @@ class TaskRepository:
) )
""" """
) )
conn.execute(
"""
CREATE TABLE IF NOT EXISTS task_artifacts (
task_id TEXT PRIMARY KEY,
file_path TEXT NOT NULL,
file_name TEXT NOT NULL,
expires_at TEXT NOT NULL,
created_at TEXT NOT NULL
)
"""
)
conn.execute( conn.execute(
""" """
CREATE INDEX IF NOT EXISTS idx_tasks_created_at_desc CREATE INDEX IF NOT EXISTS idx_tasks_created_at_desc
ON tasks(created_at DESC) ON tasks(created_at DESC)
""" """
) )
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_task_artifacts_expires_at
ON task_artifacts(expires_at ASC)
"""
)
self._migrate_tasks_columns(conn) self._migrate_tasks_columns(conn)
def upsert_artifact(self, *, task_id: str, file_path: str, file_name: str, expires_at: str) -> dict:
created_at = self._now_iso()
with self._connection() as conn:
conn.execute(
"""
INSERT INTO task_artifacts (task_id, file_path, file_name, expires_at, created_at)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(task_id) DO UPDATE SET
file_path = excluded.file_path,
file_name = excluded.file_name,
expires_at = excluded.expires_at
""",
(task_id, file_path, file_name, expires_at, created_at),
)
row = conn.execute("SELECT * FROM task_artifacts WHERE task_id = ?", (task_id,)).fetchone()
return self._artifact_to_dict(row)
def get_artifact(self, task_id: str) -> dict | None:
with self._connection() as conn:
row = conn.execute("SELECT * FROM task_artifacts WHERE task_id = ?", (task_id,)).fetchone()
return self._artifact_to_dict(row) if row else None
def list_artifacts(self) -> list[dict]:
with self._connection() as conn:
rows = conn.execute("SELECT * FROM task_artifacts ORDER BY created_at ASC").fetchall()
return [self._artifact_to_dict(row) for row in rows]
def delete_artifact(self, task_id: str) -> None:
with self._connection() as conn:
conn.execute("DELETE FROM task_artifacts WHERE task_id = ?", (task_id,))
def reconcile_incomplete_tasks(
self,
*,
error_code: str = "task_interrupted",
error_message: str = "Task was interrupted before completion",
) -> list[str]:
finished_at = self._now_iso()
placeholders = ", ".join("?" for _ in NON_TERMINAL_STATUSES)
with self._connection() as conn:
rows = conn.execute(
f"""
SELECT id
FROM tasks
WHERE status IN ({placeholders})
""",
NON_TERMINAL_STATUSES,
).fetchall()
task_ids = [row["id"] for row in rows]
if not task_ids:
return []
task_placeholders = ", ".join("?" for _ in task_ids)
conn.execute(
f"""
UPDATE tasks
SET status = ?, finished_at = ?, error_code = ?, error_message = ?, current_item = NULL
WHERE id IN ({task_placeholders})
""",
("failed", finished_at, error_code, error_message, *task_ids),
)
conn.execute(
f"""
DELETE FROM task_artifacts
WHERE task_id IN ({task_placeholders})
""",
task_ids,
)
return task_ids
def _migrate_tasks_columns(self, conn: sqlite3.Connection) -> None: def _migrate_tasks_columns(self, conn: sqlite3.Connection) -> None:
rows = conn.execute("PRAGMA table_info(tasks)").fetchall() rows = conn.execute("PRAGMA table_info(tasks)").fetchall()
existing_columns = {row["name"] for row in rows} existing_columns = {row["name"] for row in rows}
@@ -298,6 +524,16 @@ class TaskRepository:
"finished_at": row["finished_at"], "finished_at": row["finished_at"],
} }
@staticmethod
def _artifact_to_dict(row: sqlite3.Row) -> dict:
return {
"task_id": row["task_id"],
"file_path": row["file_path"],
"file_name": row["file_name"],
"expires_at": row["expires_at"],
"created_at": row["created_at"],
}
@staticmethod @staticmethod
def _now_iso() -> str: def _now_iso() -> str:
return datetime.now(tz=timezone.utc).isoformat().replace("+00:00", "Z") return datetime.now(tz=timezone.utc).isoformat().replace("+00:00", "Z")
+88 -2
View File
@@ -1,10 +1,12 @@
from __future__ import annotations from __future__ import annotations
from functools import lru_cache from functools import lru_cache
from pathlib import Path
from backend.app.config import Settings, get_settings from backend.app.config import Settings, get_settings
from backend.app.db.bookmark_repository import BookmarkRepository from backend.app.db.bookmark_repository import BookmarkRepository
from backend.app.db.history_repository import HistoryRepository from backend.app.db.history_repository import HistoryRepository
from backend.app.db.remote_client_repository import RemoteClientRepository
from backend.app.db.settings_repository import SettingsRepository from backend.app.db.settings_repository import SettingsRepository
from backend.app.db.task_repository import TaskRepository from backend.app.db.task_repository import TaskRepository
from backend.app.fs.filesystem_adapter import FilesystemAdapter from backend.app.fs.filesystem_adapter import FilesystemAdapter
@@ -12,9 +14,15 @@ from backend.app.security.path_guard import PathGuard
from backend.app.services.bookmark_service import BookmarkService from backend.app.services.bookmark_service import BookmarkService
from backend.app.services.browse_service import BrowseService from backend.app.services.browse_service import BrowseService
from backend.app.services.copy_task_service import CopyTaskService from backend.app.services.copy_task_service import CopyTaskService
from backend.app.services.archive_download_task_service import ArchiveDownloadTaskService
from backend.app.services.delete_task_service import DeleteTaskService
from backend.app.services.duplicate_task_service import DuplicateTaskService
from backend.app.services.file_ops_service import FileOpsService from backend.app.services.file_ops_service import FileOpsService
from backend.app.services.history_service import HistoryService from backend.app.services.history_service import HistoryService
from backend.app.services.move_task_service import MoveTaskService from backend.app.services.move_task_service import MoveTaskService
from backend.app.services.remote_browse_service import RemoteBrowseService
from backend.app.services.remote_client_service import RemoteClientService
from backend.app.services.remote_file_service import RemoteFileService
from backend.app.services.search_service import SearchService from backend.app.services.search_service import SearchService
from backend.app.services.settings_service import SettingsService from backend.app.services.settings_service import SettingsService
from backend.app.services.task_service import TaskService from backend.app.services.task_service import TaskService
@@ -55,6 +63,12 @@ def get_settings_repository() -> SettingsRepository:
return SettingsRepository(db_path=settings.task_db_path) return SettingsRepository(db_path=settings.task_db_path)
@lru_cache(maxsize=1)
def get_remote_client_repository() -> RemoteClientRepository:
settings: Settings = get_settings()
return RemoteClientRepository(db_path=settings.task_db_path)
@lru_cache(maxsize=1) @lru_cache(maxsize=1)
def get_task_runner() -> TaskRunner: def get_task_runner() -> TaskRunner:
return TaskRunner( return TaskRunner(
@@ -64,8 +78,18 @@ def get_task_runner() -> TaskRunner:
) )
@lru_cache(maxsize=1)
def get_archive_artifact_root() -> str:
settings: Settings = get_settings()
return str(Path(settings.task_db_path).resolve().parent / "archive_tmp")
async def get_browse_service() -> BrowseService: async def get_browse_service() -> BrowseService:
return BrowseService(path_guard=get_path_guard(), filesystem=get_filesystem_adapter()) return BrowseService(
path_guard=get_path_guard(),
filesystem=get_filesystem_adapter(),
remote_browse_service=await get_remote_browse_service(),
)
async def get_file_ops_service() -> FileOpsService: async def get_file_ops_service() -> FileOpsService:
@@ -76,8 +100,23 @@ async def get_file_ops_service() -> FileOpsService:
) )
async def get_archive_download_task_service() -> ArchiveDownloadTaskService:
return ArchiveDownloadTaskService(
path_guard=get_path_guard(),
repository=get_task_repository(),
runner=get_task_runner(),
history_repository=get_history_repository(),
file_ops_service=FileOpsService(
path_guard=get_path_guard(),
filesystem=get_filesystem_adapter(),
history_repository=get_history_repository(),
),
artifact_root=Path(get_archive_artifact_root()),
)
async def get_task_service() -> TaskService: async def get_task_service() -> TaskService:
return TaskService(repository=get_task_repository()) return TaskService(repository=get_task_repository(), history_repository=get_history_repository())
async def get_copy_task_service() -> CopyTaskService: async def get_copy_task_service() -> CopyTaskService:
@@ -89,6 +128,24 @@ async def get_copy_task_service() -> CopyTaskService:
) )
async def get_delete_task_service() -> DeleteTaskService:
return DeleteTaskService(
path_guard=get_path_guard(),
repository=get_task_repository(),
runner=get_task_runner(),
history_repository=get_history_repository(),
)
async def get_duplicate_task_service() -> DuplicateTaskService:
return DuplicateTaskService(
path_guard=get_path_guard(),
repository=get_task_repository(),
runner=get_task_runner(),
history_repository=get_history_repository(),
)
async def get_move_task_service() -> MoveTaskService: async def get_move_task_service() -> MoveTaskService:
return MoveTaskService( return MoveTaskService(
path_guard=get_path_guard(), path_guard=get_path_guard(),
@@ -112,3 +169,32 @@ async def get_search_service() -> SearchService:
async def get_settings_service() -> SettingsService: async def get_settings_service() -> SettingsService:
return SettingsService(repository=get_settings_repository(), path_guard=get_path_guard()) return SettingsService(repository=get_settings_repository(), path_guard=get_path_guard())
async def get_remote_client_service() -> RemoteClientService:
settings: Settings = get_settings()
return RemoteClientService(
repository=get_remote_client_repository(),
registration_token=settings.remote_client_registration_token,
offline_timeout_seconds=settings.remote_client_offline_timeout_seconds,
)
async def get_remote_browse_service() -> RemoteBrowseService:
settings: Settings = get_settings()
return RemoteBrowseService(
remote_client_service=await get_remote_client_service(),
agent_auth_header=settings.remote_client_agent_auth_header,
agent_auth_scheme=settings.remote_client_agent_auth_scheme,
agent_auth_token=settings.remote_client_agent_auth_token,
)
async def get_remote_file_service() -> RemoteFileService:
settings: Settings = get_settings()
return RemoteFileService(
remote_client_service=await get_remote_client_service(),
agent_auth_header=settings.remote_client_agent_auth_header,
agent_auth_scheme=settings.remote_client_agent_auth_scheme,
agent_auth_token=settings.remote_client_agent_auth_token,
)
+9 -2
View File
@@ -104,6 +104,9 @@ class FilesystemAdapter:
def delete_empty_directory(self, path: Path) -> None: def delete_empty_directory(self, path: Path) -> None:
path.rmdir() path.rmdir()
def delete_directory_recursive(self, path: Path) -> None:
shutil.rmtree(path)
def copy_file(self, source: str, destination: str, on_progress: callable | None = None) -> None: def copy_file(self, source: str, destination: str, on_progress: callable | None = None) -> None:
src = Path(source) src = Path(source)
dst = Path(destination) dst = Path(destination)
@@ -117,6 +120,9 @@ class FilesystemAdapter:
on_progress(out_f.tell()) on_progress(out_f.tell())
shutil.copystat(src, dst, follow_symlinks=False) shutil.copystat(src, dst, follow_symlinks=False)
def copy_directory(self, source: str, destination: str) -> None:
shutil.copytree(source, destination, symlinks=True, copy_function=shutil.copy2)
def read_text_preview(self, path: Path, max_bytes: int, encoding: str = "utf-8") -> dict: def read_text_preview(self, path: Path, max_bytes: int, encoding: str = "utf-8") -> dict:
size = int(path.stat().st_size) size = int(path.stat().st_size)
limit = max_bytes + 1 limit = max_bytes + 1
@@ -140,8 +146,9 @@ class FilesystemAdapter:
"modified": self.modified_iso(path), "modified": self.modified_iso(path),
} }
def write_uploaded_file(self, path: Path, file_stream, chunk_size: int = 1024 * 1024) -> dict: def write_uploaded_file(self, path: Path, file_stream, chunk_size: int = 1024 * 1024, overwrite: bool = False) -> dict:
with path.open("xb") as handle: mode = "wb" if overwrite else "xb"
with path.open(mode) as handle:
while True: while True:
chunk = file_stream.read(chunk_size) chunk = file_stream.read(chunk_size)
if not chunk: if not chunk:
+14
View File
@@ -10,13 +10,17 @@ from backend.app.api.errors import AppError
from backend.app.api.routes_bookmarks import router as bookmarks_router from backend.app.api.routes_bookmarks import router as bookmarks_router
from backend.app.api.routes_browse import router as browse_router from backend.app.api.routes_browse import router as browse_router
from backend.app.api.routes_copy import router as copy_router from backend.app.api.routes_copy import router as copy_router
from backend.app.api.routes_clients import router as clients_router
from backend.app.api.routes_duplicate import router as duplicate_router
from backend.app.api.routes_files import router as files_router from backend.app.api.routes_files import router as files_router
from backend.app.api.routes_history import router as history_router from backend.app.api.routes_history import router as history_router
from backend.app.api.routes_move import router as move_router from backend.app.api.routes_move import router as move_router
from backend.app.api.routes_search import router as search_router from backend.app.api.routes_search import router as search_router
from backend.app.api.routes_settings import router as settings_router from backend.app.api.routes_settings import router as settings_router
from backend.app.api.routes_tasks import router as tasks_router from backend.app.api.routes_tasks import router as tasks_router
from backend.app.dependencies import get_history_repository, get_task_repository
from backend.app.logging import configure_logging from backend.app.logging import configure_logging
from backend.app.services.task_recovery_service import reconcile_persisted_incomplete_tasks
configure_logging() configure_logging()
@@ -30,6 +34,8 @@ app.mount("/ui", StaticFiles(directory=str(UI_DIR), html=True), name="ui")
app.include_router(browse_router, prefix="/api") app.include_router(browse_router, prefix="/api")
app.include_router(files_router, prefix="/api") app.include_router(files_router, prefix="/api")
app.include_router(copy_router, prefix="/api") app.include_router(copy_router, prefix="/api")
app.include_router(clients_router, prefix="/api")
app.include_router(duplicate_router, prefix="/api")
app.include_router(move_router, prefix="/api") app.include_router(move_router, prefix="/api")
app.include_router(search_router, prefix="/api") app.include_router(search_router, prefix="/api")
app.include_router(settings_router, prefix="/api") app.include_router(settings_router, prefix="/api")
@@ -38,6 +44,14 @@ app.include_router(history_router, prefix="/api")
app.include_router(tasks_router, prefix="/api") app.include_router(tasks_router, prefix="/api")
@app.on_event("startup")
async def reconcile_incomplete_tasks_on_startup() -> None:
reconcile_persisted_incomplete_tasks(
task_repository=get_task_repository(),
history_repository=get_history_repository(),
)
@app.exception_handler(AppError) @app.exception_handler(AppError)
async def handle_app_error(_: Request, exc: AppError) -> JSONResponse: async def handle_app_error(_: Request, exc: AppError) -> JSONResponse:
return JSONResponse( return JSONResponse(
@@ -0,0 +1,363 @@
from __future__ import annotations
import os
import uuid
import zipfile
from datetime import datetime, timedelta, timezone
from pathlib import Path
from backend.app.api.errors import AppError
from backend.app.api.schemas import TaskCreateResponse
from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository
from backend.app.security.path_guard import PathGuard
from backend.app.services.file_ops_service import FileOpsService
from backend.app.tasks_runner import TaskRunner
ARCHIVE_DOWNLOAD_TTL_SECONDS = 30 * 60
class ArchivePrepareCancelled(Exception):
pass
class ArchiveDownloadTaskService:
def __init__(
self,
path_guard: PathGuard,
repository: TaskRepository,
runner: TaskRunner,
history_repository: HistoryRepository | None,
file_ops_service: FileOpsService,
artifact_root: Path,
artifact_ttl_seconds: int = ARCHIVE_DOWNLOAD_TTL_SECONDS,
):
self._path_guard = path_guard
self._repository = repository
self._runner = runner
self._history_repository = history_repository
self._file_ops_service = file_ops_service
self._artifact_root = artifact_root
self._artifact_ttl_seconds = artifact_ttl_seconds
self._artifact_root.mkdir(parents=True, exist_ok=True)
self.sweep_artifacts()
def create_archive_prepare_task(self, paths: list[str]) -> TaskCreateResponse:
if not paths:
raise AppError(
code="invalid_request",
message="At least one path is required",
status_code=400,
)
self.sweep_artifacts()
resolved_targets = [self._path_guard.resolve_existing_path(path) for path in paths]
mode = self._file_ops_service._download_mode_from_resolved_targets(resolved_targets)
if mode == "single_file":
raise AppError(
code="invalid_request",
message="Single file downloads must use direct download",
status_code=400,
)
summary = self._file_ops_service._summarize_download_targets([target.relative for target in resolved_targets])
archive_name = self._file_ops_service._download_name_for_targets(resolved_targets)
task_id = str(uuid.uuid4())
task = self._repository.create_task(
operation="download",
source=summary,
destination=archive_name,
task_id=task_id,
status="requested",
)
self._record_history(
entry_id=task_id,
operation="download",
status="requested",
source=mode,
destination=archive_name,
path=summary,
)
target_paths = [target.relative for target in resolved_targets]
self._runner.enqueue_archive_prepare(
lambda: self._run_archive_prepare_task(
task_id=task_id,
target_paths=target_paths,
archive_name=archive_name,
history_mode=mode,
history_path=summary,
)
)
return TaskCreateResponse(task_id=task["id"], status=task["status"])
def prepare_ready_archive_download(self, task_id: str) -> dict:
self.sweep_artifacts()
task = self._repository.get_task(task_id)
if not task:
raise AppError(
code="task_not_found",
message="Task was not found",
status_code=404,
details={"task_id": task_id},
)
if task["operation"] != "download":
raise AppError(
code="invalid_request",
message="Task is not an archive download",
status_code=400,
details={"task_id": task_id},
)
if task["status"] == "cancelled":
raise AppError(
code="download_cancelled",
message="Archive download was cancelled",
status_code=409,
details={"task_id": task_id},
)
if task["status"] != "ready":
raise AppError(
code="download_not_ready",
message="Archive download is not ready",
status_code=409,
details={"task_id": task_id, "status": task["status"]},
)
artifact = self._repository.get_artifact(task_id)
if not artifact:
raise AppError(
code="archive_not_found",
message="Prepared archive was not found",
status_code=404,
details={"task_id": task_id},
)
if self._is_expired(artifact["expires_at"]):
self._delete_artifact_record_and_file(task_id, artifact["file_path"])
raise AppError(
code="archive_expired",
message="Prepared archive expired",
status_code=410,
details={"task_id": task_id},
)
artifact_path = Path(artifact["file_path"])
if not artifact_path.exists():
self._repository.delete_artifact(task_id)
raise AppError(
code="archive_not_found",
message="Prepared archive was not found",
status_code=404,
details={"task_id": task_id},
)
return {
"content": self._file_ops_service._filesystem.stream_file(artifact_path),
"headers": {
"Content-Disposition": f'attachment; filename="{artifact["file_name"]}"',
"Content-Length": str(int(artifact_path.stat().st_size)),
},
"content_type": "application/zip",
}
def cancel_archive_prepare_task(self, task_id: str) -> dict:
self.sweep_artifacts()
task = self._repository.get_task(task_id)
if not task:
raise AppError(
code="task_not_found",
message="Task was not found",
status_code=404,
details={"task_id": task_id},
)
if task["operation"] != "download":
raise AppError(
code="invalid_request",
message="Task is not an archive download",
status_code=400,
details={"task_id": task_id},
)
if task["status"] == "ready":
raise AppError(
code="download_not_cancellable",
message="Archive download is already ready",
status_code=409,
details={"task_id": task_id, "status": task["status"]},
)
if task["status"] in {"failed", "cancelled"}:
raise AppError(
code="download_not_cancellable",
message="Archive download cannot be cancelled",
status_code=409,
details={"task_id": task_id, "status": task["status"]},
)
if not self._repository.mark_cancelled(task_id):
current = self._repository.get_task(task_id)
current_status = current["status"] if current else task["status"]
raise AppError(
code="download_not_cancellable",
message="Archive download cannot be cancelled",
status_code=409,
details={"task_id": task_id, "status": current_status},
)
self._cleanup_task_artifacts(task_id)
self._update_history_cancelled(task_id)
cancelled_task = self._repository.get_task(task_id)
if not cancelled_task:
raise AppError(
code="task_not_found",
message="Task was not found",
status_code=404,
details={"task_id": task_id},
)
return cancelled_task
def sweep_artifacts(self) -> None:
self._artifact_root.mkdir(parents=True, exist_ok=True)
referenced_paths: set[Path] = set()
for artifact in self._repository.list_artifacts():
artifact_path = Path(artifact["file_path"])
referenced_paths.add(artifact_path)
if self._is_expired(artifact["expires_at"]) or not artifact_path.exists():
self._delete_artifact_record_and_file(artifact["task_id"], artifact["file_path"])
for candidate in self._artifact_root.iterdir():
if candidate.is_file() and candidate not in referenced_paths:
try:
candidate.unlink()
except FileNotFoundError:
pass
def _run_archive_prepare_task(
self,
*,
task_id: str,
target_paths: list[str],
archive_name: str,
history_mode: str,
history_path: str,
) -> None:
partial_path = self._artifact_root / f"{task_id}.partial.zip"
final_path = self._artifact_root / f"{task_id}.zip"
total_items = len(target_paths)
try:
self._raise_if_cancelled(task_id)
if not self._repository.mark_preparing(
task_id=task_id,
done_items=0,
total_items=total_items,
current_item=target_paths[0] if target_paths else None,
):
self._raise_if_cancelled(task_id)
return
resolved_targets = [self._path_guard.resolve_existing_path(path) for path in target_paths]
self._raise_if_cancelled(task_id)
self._file_ops_service._validate_zip_download_archive_names(resolved_targets)
self._file_ops_service._run_zip_download_preflight(resolved_targets)
self._raise_if_cancelled(task_id)
with zipfile.ZipFile(partial_path, "w", compression=zipfile.ZIP_DEFLATED) as archive:
for index, resolved_target in enumerate(resolved_targets):
self._raise_if_cancelled(task_id)
self._repository.update_progress(
task_id=task_id,
done_items=index,
total_items=total_items,
current_item=resolved_target.relative,
)
self._file_ops_service._write_download_target_to_zip(
archive,
resolved_target,
on_each_item=lambda: self._raise_if_cancelled(task_id),
)
self._raise_if_cancelled(task_id)
os.replace(partial_path, final_path)
self._raise_if_cancelled(task_id)
self._repository.upsert_artifact(
task_id=task_id,
file_path=str(final_path),
file_name=archive_name,
expires_at=self._expires_at_iso(),
)
if not self._repository.mark_ready(
task_id=task_id,
done_items=total_items,
total_items=total_items,
):
self._cleanup_task_artifacts(task_id)
self._raise_if_cancelled(task_id)
return
self._update_history_ready(task_id)
except ArchivePrepareCancelled:
self._cleanup_task_artifacts(task_id)
except AppError as exc:
self._cleanup_task_artifacts(task_id)
if self._repository.mark_failed_if_not_cancelled(
task_id=task_id,
error_code=exc.code,
error_message=exc.message,
failed_item=history_path,
done_bytes=None,
total_bytes=None,
done_items=0,
total_items=total_items,
):
self._update_history_failed(task_id, exc.code, exc.message)
except OSError as exc:
self._cleanup_task_artifacts(task_id)
if self._repository.mark_failed_if_not_cancelled(
task_id=task_id,
error_code="io_error",
error_message=str(exc),
failed_item=history_path,
done_bytes=None,
total_bytes=None,
done_items=0,
total_items=total_items,
):
self._update_history_failed(task_id, "io_error", str(exc))
def _cleanup_task_artifacts(self, task_id: str) -> None:
self._delete_artifact_record_and_file(task_id, str(self._artifact_root / f"{task_id}.partial.zip"))
self._delete_artifact_record_and_file(task_id, str(self._artifact_root / f"{task_id}.zip"))
def _delete_artifact_record_and_file(self, task_id: str, file_path: str) -> None:
self._repository.delete_artifact(task_id)
path = Path(file_path)
try:
path.unlink()
except FileNotFoundError:
pass
def _update_history_ready(self, task_id: str) -> None:
if self._history_repository:
self._history_repository.update_entry(entry_id=task_id, status="ready")
def _update_history_failed(self, task_id: str, error_code: str, error_message: str) -> None:
if self._history_repository:
self._history_repository.update_entry(
entry_id=task_id,
status="failed",
error_code=error_code,
error_message=error_message,
)
def _update_history_cancelled(self, task_id: str) -> None:
if self._history_repository:
self._history_repository.update_entry(entry_id=task_id, status="cancelled")
def _record_history(self, **kwargs) -> None:
if self._history_repository:
self._history_repository.create_entry(**kwargs)
def _expires_at_iso(self) -> str:
return (datetime.now(timezone.utc) + timedelta(seconds=self._artifact_ttl_seconds)).replace(microsecond=0).isoformat().replace("+00:00", "Z")
@staticmethod
def _is_expired(expires_at: str) -> bool:
return datetime.now(timezone.utc) >= datetime.fromisoformat(expires_at.replace("Z", "+00:00"))
def _raise_if_cancelled(self, task_id: str) -> None:
task = self._repository.get_task(task_id)
if task and task["status"] == "cancelled":
raise ArchivePrepareCancelled()
+11 -1
View File
@@ -3,14 +3,24 @@ from __future__ import annotations
from backend.app.api.schemas import BrowseResponse, DirectoryEntry, FileEntry from backend.app.api.schemas import BrowseResponse, DirectoryEntry, FileEntry
from backend.app.fs.filesystem_adapter import FilesystemAdapter from backend.app.fs.filesystem_adapter import FilesystemAdapter
from backend.app.security.path_guard import PathGuard from backend.app.security.path_guard import PathGuard
from backend.app.services.remote_browse_service import RemoteBrowseService
class BrowseService: class BrowseService:
def __init__(self, path_guard: PathGuard, filesystem: FilesystemAdapter): def __init__(
self,
path_guard: PathGuard,
filesystem: FilesystemAdapter,
remote_browse_service: RemoteBrowseService | None = None,
):
self._path_guard = path_guard self._path_guard = path_guard
self._filesystem = filesystem self._filesystem = filesystem
self._remote_browse_service = remote_browse_service
def browse(self, path: str, show_hidden: bool) -> BrowseResponse: def browse(self, path: str, show_hidden: bool) -> BrowseResponse:
if self._remote_browse_service and self._remote_browse_service.handles_path(path):
return self._remote_browse_service.browse(path=path, show_hidden=show_hidden)
if self._path_guard.is_virtual_volumes_path(path): if self._path_guard.is_virtual_volumes_path(path):
directories = [ directories = [
DirectoryEntry(name=item["name"], path=item["path"], modified="") DirectoryEntry(name=item["name"], path=item["path"], modified="")
+242 -43
View File
@@ -1,5 +1,6 @@
from __future__ import annotations from __future__ import annotations
import os
from pathlib import Path from pathlib import Path
import uuid import uuid
@@ -8,7 +9,7 @@ from backend.app.api.errors import AppError
from backend.app.api.schemas import TaskCreateResponse from backend.app.api.schemas import TaskCreateResponse
from backend.app.db.history_repository import HistoryRepository from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository from backend.app.db.task_repository import TaskRepository
from backend.app.security.path_guard import PathGuard from backend.app.security.path_guard import PathGuard, ResolvedPath
from backend.app.tasks_runner import TaskRunner from backend.app.tasks_runner import TaskRunner
@@ -20,62 +21,38 @@ class CopyTaskService:
self._history_repository = history_repository self._history_repository = history_repository
def create_copy_task(self, source: str, destination: str) -> TaskCreateResponse: def create_copy_task(self, source: str, destination: str) -> TaskCreateResponse:
if not source or not destination:
raise AppError(
code="invalid_request",
message="Source and destination are required",
status_code=400,
)
try: try:
resolved_source = self._path_guard.resolve_existing_path(source) item = self._build_copy_item(source=source, destination=destination)
_, _, lexical_source, _ = self._path_guard.resolve_lexical_path(source)
if lexical_source.is_symlink():
raise AppError(
code="type_conflict",
message="Source must be a regular file",
status_code=409,
details={"path": source},
)
if not resolved_source.absolute.is_file():
raise AppError(
code="type_conflict",
message="Source must be a file",
status_code=409,
details={"path": source},
)
resolved_destination = self._path_guard.resolve_path(destination)
destination_parent = resolved_destination.absolute.parent
parent_relative = self._path_guard.entry_relative_path(
resolved_destination.alias,
destination_parent,
display_style=resolved_destination.display_style,
)
self._map_directory_validation(parent_relative)
if resolved_destination.absolute.exists():
raise AppError(
code="already_exists",
message="Target path already exists",
status_code=409,
details={"path": resolved_destination.relative},
)
total_bytes = int(resolved_source.absolute.stat().st_size)
task_id = str(uuid.uuid4()) task_id = str(uuid.uuid4())
task = self._repository.create_task( task = self._repository.create_task(
operation="copy", operation="copy",
source=resolved_source.relative, source=item["source_relative"],
destination=resolved_destination.relative, destination=item["destination_relative"],
task_id=task_id, task_id=task_id,
) )
self._record_history( self._record_history(
entry_id=task_id, entry_id=task_id,
operation="copy", operation="copy",
status="queued", status="queued",
source=resolved_source.relative, source=item["source_relative"],
destination=resolved_destination.relative, destination=item["destination_relative"],
) )
if item["kind"] == "directory":
self._runner.enqueue_copy_directory(task_id=task["id"], item=item)
else:
self._runner.enqueue_copy_file( self._runner.enqueue_copy_file(
task_id=task["id"], task_id=task["id"],
source=str(resolved_source.absolute), source=item["source_absolute"],
destination=str(resolved_destination.absolute), destination=item["destination_absolute"],
total_bytes=total_bytes, total_bytes=item["total_bytes"],
current_item=item["files"][0]["label"],
) )
return TaskCreateResponse(task_id=task["id"], status=task["status"]) return TaskCreateResponse(task_id=task["id"], status=task["status"])
@@ -91,6 +68,152 @@ class CopyTaskService:
) )
raise raise
def create_batch_copy_task(self, sources: list[str] | None, destination_base: str | None) -> TaskCreateResponse:
if not sources or len(sources) < 2:
raise AppError(
code="invalid_request",
message="Batch copy requires at least 2 sources",
status_code=400,
)
if not destination_base:
raise AppError(
code="invalid_request",
message="Destination base is required",
status_code=400,
)
resolved_destination_base = self._path_guard.resolve_directory_path(destination_base)
items: list[dict] = []
for source in sources:
destination = self._join_destination_base(destination_base, self._path_guard.resolve_existing_path(source).absolute.name)
item = self._build_copy_item(
source=source,
destination=destination,
resolved_destination=resolved_destination_base,
destination_base=destination_base,
include_root_prefix=True,
)
items.append(item)
task_id = str(uuid.uuid4())
task = self._repository.create_task(
operation="copy",
source=f"{len(items)} items",
destination=resolved_destination_base.relative,
task_id=task_id,
)
self._record_history(
entry_id=task_id,
operation="copy",
status="queued",
source=f"{len(items)} items",
destination=resolved_destination_base.relative,
)
self._runner.enqueue_copy_batch(
task_id=task["id"],
items=[
{
"source": item["source_absolute"],
"destination": item["destination_absolute"],
"kind": item["kind"],
"files": item["files"],
"directories": item["directories"],
}
for item in items
],
)
return TaskCreateResponse(task_id=task["id"], status=task["status"])
def _build_copy_item(
self,
source: str,
destination: str,
resolved_destination: ResolvedPath | None = None,
destination_base: str | None = None,
include_root_prefix: bool = False,
) -> dict:
resolved_source = self._path_guard.resolve_existing_path(source)
_, _, lexical_source, _ = self._path_guard.resolve_lexical_path(source)
if lexical_source.is_symlink():
raise AppError(
code="type_conflict",
message="Source must not be a symlink",
status_code=409,
details={"path": source},
)
source_is_file = resolved_source.absolute.is_file()
source_is_directory = resolved_source.absolute.is_dir()
if not source_is_file and not source_is_directory:
raise AppError(
code="type_conflict",
message="Unsupported source path type",
status_code=409,
details={"path": source},
)
resolved_destination = resolved_destination or self._path_guard.resolve_path(destination)
destination_absolute = (
resolved_destination.absolute / resolved_source.absolute.name
if destination_base is not None
else resolved_destination.absolute
)
destination_relative = self._path_guard.entry_relative_path(
resolved_destination.alias,
destination_absolute,
display_style=resolved_destination.display_style,
)
destination_parent = destination_absolute.parent
parent_relative = self._path_guard.entry_relative_path(
resolved_destination.alias,
destination_parent,
display_style=resolved_destination.display_style,
)
self._map_directory_validation(parent_relative)
if destination_absolute.exists():
raise AppError(
code="already_exists",
message="Target path already exists",
status_code=409,
details={"path": destination_relative},
)
if source_is_directory and self._is_nested_destination(resolved_source.absolute, destination_absolute):
raise AppError(
code="invalid_request",
message="Destination cannot be inside source",
status_code=400,
details={"path": source, "destination": destination_relative},
)
if source_is_directory:
directories, files = self._build_directory_plan(
resolved_source=resolved_source,
destination_root=destination_absolute,
include_root_prefix=include_root_prefix,
)
else:
files = [
{
"source": str(resolved_source.absolute),
"destination": str(destination_absolute),
"label": resolved_source.absolute.name,
}
]
directories = []
return {
"source_relative": resolved_source.relative,
"destination_relative": destination_relative,
"source_absolute": str(resolved_source.absolute),
"destination_absolute": str(destination_absolute),
"kind": "directory" if source_is_directory else "file",
"total_bytes": int(resolved_source.absolute.stat().st_size) if source_is_file else None,
"files": files,
"directories": directories,
}
def _map_directory_validation(self, relative_path: str) -> None: def _map_directory_validation(self, relative_path: str) -> None:
try: try:
self._path_guard.resolve_directory_path(relative_path) self._path_guard.resolve_directory_path(relative_path)
@@ -104,6 +227,82 @@ class CopyTaskService:
) )
raise raise
def _build_directory_plan(
self,
*,
resolved_source: ResolvedPath,
destination_root: Path,
include_root_prefix: bool,
) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
directories: list[dict[str, str]] = [
{
"source": str(resolved_source.absolute),
"destination": str(destination_root),
}
]
files: list[dict[str, str]] = []
for root, dirnames, filenames in os.walk(resolved_source.absolute, followlinks=False):
root_path = Path(root)
dirnames.sort(key=str.lower)
filenames.sort(key=str.lower)
for name in dirnames:
entry = root_path / name
if entry.is_symlink():
raise AppError(
code="type_conflict",
message="Source directory must not contain symlinks",
status_code=409,
details={"path": resolved_source.relative},
)
relative = entry.relative_to(resolved_source.absolute)
directories.append(
{
"source": str(entry),
"destination": str(destination_root / relative),
}
)
for name in filenames:
entry = root_path / name
if entry.is_symlink():
raise AppError(
code="type_conflict",
message="Source directory must not contain symlinks",
status_code=409,
details={"path": resolved_source.relative},
)
relative = entry.relative_to(resolved_source.absolute)
files.append(
{
"source": str(entry),
"destination": str(destination_root / relative),
"label": self._progress_label(
top_level_name=resolved_source.absolute.name,
relative_path=relative,
include_root_prefix=include_root_prefix,
),
}
)
return directories, files
@staticmethod
def _progress_label(*, top_level_name: str, relative_path: Path, include_root_prefix: bool) -> str:
relative_value = relative_path.as_posix()
if not relative_value:
return top_level_name
return f"{top_level_name}/{relative_value}" if include_root_prefix else relative_value
@staticmethod
def _join_destination_base(destination_base: str, name: str) -> str:
return f"{destination_base.rstrip('/')}/{name}" if destination_base.rstrip("/") else f"/{name}"
@staticmethod
def _is_nested_destination(source: Path, destination: Path) -> bool:
try:
destination.relative_to(source)
return True
except ValueError:
return False
def _record_history(self, **kwargs) -> None: def _record_history(self, **kwargs) -> None:
if self._history_repository: if self._history_repository:
self._history_repository.create_entry(**kwargs) self._history_repository.create_entry(**kwargs)
@@ -0,0 +1,221 @@
from __future__ import annotations
import uuid
from datetime import datetime, timezone
from pathlib import Path
from backend.app.api.errors import AppError
from backend.app.api.schemas import TaskCreateResponse
from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository
from backend.app.security.path_guard import PathGuard
from backend.app.tasks_runner import TaskRunner
class DeleteTaskService:
def __init__(
self,
path_guard: PathGuard,
repository: TaskRepository,
runner: TaskRunner,
history_repository: HistoryRepository | None = None,
):
self._path_guard = path_guard
self._repository = repository
self._runner = runner
self._history_repository = history_repository
def create_delete_task(self, path: str | None, recursive: bool = False) -> TaskCreateResponse:
if not path:
raise AppError(
code="invalid_request",
message="Query parameter 'path' is required",
status_code=400,
)
try:
item = self._build_delete_item(path=path, recursive=recursive)
task_id = str(uuid.uuid4())
task = self._repository.create_task(
operation="delete",
source=item["relative_path"],
destination="",
task_id=task_id,
)
self._record_history(
entry_id=task_id,
operation="delete",
status="queued",
path=item["relative_path"],
)
self._runner.enqueue_delete_path(task_id=task["id"], item=item)
return TaskCreateResponse(task_id=task["id"], status=task["status"])
except AppError as exc:
self._record_history(
operation="delete",
status="failed",
path=path,
error_code=exc.code,
error_message=exc.message,
finished_at=self._now_iso(),
)
raise
except OSError as exc:
error = AppError(
code="io_error",
message="Filesystem operation failed",
status_code=500,
details={"reason": str(exc)},
)
self._record_history(
operation="delete",
status="failed",
path=path,
error_code=error.code,
error_message=error.message,
finished_at=self._now_iso(),
)
raise error
def create_batch_delete_task(self, paths: list[str] | None, recursive_paths: list[str] | None = None) -> TaskCreateResponse:
if not paths or len(paths) < 2:
raise AppError(
code="invalid_request",
message="Batch delete requires at least 2 paths",
status_code=400,
)
recursive_paths_set = set(recursive_paths or [])
invalid_recursive = sorted(path for path in recursive_paths_set if path not in paths)
if invalid_recursive:
raise AppError(
code="invalid_request",
message="Recursive delete paths must be included in the batch selection",
status_code=400,
details={"path": invalid_recursive[0]},
)
try:
items = [
self._build_delete_item(
path=path,
recursive=path in recursive_paths_set,
include_root_prefix=True,
)
for path in paths
]
task_id = str(uuid.uuid4())
task = self._repository.create_task(
operation="delete",
source=f"{len(items)} items",
destination="",
task_id=task_id,
)
self._record_history(
entry_id=task_id,
operation="delete",
status="queued",
path=f"{len(items)} items",
)
self._runner.enqueue_delete_batch(task_id=task["id"], items=items)
return TaskCreateResponse(task_id=task["id"], status=task["status"])
except AppError as exc:
self._record_history(
operation="delete",
status="failed",
path=f"{len(paths or [])} items",
error_code=exc.code,
error_message=exc.message,
finished_at=self._now_iso(),
)
raise
except OSError as exc:
error = AppError(
code="io_error",
message="Filesystem operation failed",
status_code=500,
details={"reason": str(exc)},
)
self._record_history(
operation="delete",
status="failed",
path=f"{len(paths or [])} items",
error_code=error.code,
error_message=error.message,
finished_at=self._now_iso(),
)
raise error
def _build_delete_item(self, path: str, recursive: bool, include_root_prefix: bool = False) -> dict:
resolved_target = self._path_guard.resolve_existing_path(path)
if resolved_target.absolute.is_file():
label = resolved_target.absolute.name
files = [{"path": str(resolved_target.absolute), "label": label}]
directories: list[str] = []
kind = "file"
elif resolved_target.absolute.is_dir():
kind = "directory"
if not recursive and any(resolved_target.absolute.iterdir()):
raise AppError(
code="directory_not_empty",
message="Directory is not empty",
status_code=409,
details={"path": resolved_target.relative},
)
if recursive:
files, directories = self._build_recursive_delete_plan(
resolved_target.absolute,
include_root_prefix=include_root_prefix,
)
else:
files = []
directories = [str(resolved_target.absolute)]
else:
raise AppError(
code="type_conflict",
message="Unsupported path type for delete",
status_code=409,
details={"path": resolved_target.relative},
)
return {
"target": str(resolved_target.absolute),
"relative_path": resolved_target.relative,
"kind": kind,
"recursive": recursive,
"files": files,
"directories": directories,
"progress_total_items": len(files),
"progress_label": files[0]["label"] if files else None,
}
def _build_recursive_delete_plan(self, root: Path, include_root_prefix: bool = False) -> tuple[list[dict[str, str]], list[str]]:
files: list[dict[str, str]] = []
directories: list[str] = []
start_prefix = Path(root.name) if include_root_prefix else Path()
def walk(path: Path, relative_prefix: Path) -> None:
for entry in sorted(path.iterdir(), key=lambda child: child.name.lower()):
relative_path = relative_prefix / entry.name
if entry.is_symlink():
files.append({"path": str(entry), "label": relative_path.as_posix()})
continue
if entry.is_dir():
walk(entry, relative_path)
directories.append(str(entry))
continue
files.append({"path": str(entry), "label": relative_path.as_posix()})
walk(root, start_prefix)
directories.append(str(root))
return files, directories
def _record_history(self, **kwargs) -> None:
if self._history_repository:
self._history_repository.create_entry(**kwargs)
@staticmethod
def _now_iso() -> str:
return datetime.now(timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z")
@@ -0,0 +1,266 @@
from __future__ import annotations
import os
import uuid
from datetime import datetime, timezone
from pathlib import Path
from backend.app.api.errors import AppError
from backend.app.api.schemas import TaskCreateResponse
from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository
from backend.app.security.path_guard import PathGuard, ResolvedPath
from backend.app.tasks_runner import TaskRunner
class DuplicateTaskService:
def __init__(self, path_guard: PathGuard, repository: TaskRepository, runner: TaskRunner, history_repository: HistoryRepository | None = None):
self._path_guard = path_guard
self._repository = repository
self._runner = runner
self._history_repository = history_repository
def create_duplicate_task(self, paths: list[str] | None) -> TaskCreateResponse:
if not paths:
raise AppError(
code="invalid_request",
message="At least 1 path is required",
status_code=400,
)
try:
items: list[dict[str, str]] = []
reserved_destinations: set[str] = set()
for input_path in paths:
item = self._build_duplicate_item(
input_path,
reserved_destinations,
include_root_prefix=len(paths) > 1,
)
if item is None:
continue
reserved_destinations.add(item["destination_absolute"])
items.append(item)
source_summary = self._source_summary(paths, items)
destination_summary = self._destination_summary(items)
task_id = str(uuid.uuid4())
task = self._repository.create_task(
operation="duplicate",
source=source_summary,
destination=destination_summary,
task_id=task_id,
)
self._record_history(
entry_id=task_id,
operation="duplicate",
status="queued",
source=source_summary,
destination=destination_summary,
)
self._runner.enqueue_duplicate_batch(
task_id=task["id"],
items=[
{
"source": item["source_absolute"],
"destination": item["destination_absolute"],
"kind": item["kind"],
"files": item["files"],
"directories": item["directories"],
}
for item in items
],
)
return TaskCreateResponse(task_id=task["id"], status=task["status"])
except AppError as exc:
self._record_history(
operation="duplicate",
status="failed",
source=paths[0] if len(paths) == 1 else f"{len(paths)} items",
destination="same directory",
error_code=exc.code,
error_message=exc.message,
finished_at=self._now_iso(),
)
raise
def _build_duplicate_item(
self,
source: str,
reserved_destinations: set[str],
*,
include_root_prefix: bool,
) -> dict[str, str] | None:
resolved_source = self._path_guard.resolve_existing_path(source)
_, _, lexical_source, _ = self._path_guard.resolve_lexical_path(source)
if self._should_skip_name(lexical_source.name):
return None
if lexical_source.is_symlink():
raise AppError(
code="type_conflict",
message="Source must not be a symlink",
status_code=409,
details={"path": source},
)
source_is_file = resolved_source.absolute.is_file()
source_is_directory = resolved_source.absolute.is_dir()
if not source_is_file and not source_is_directory:
raise AppError(
code="type_conflict",
message="Unsupported source path type",
status_code=409,
details={"path": source},
)
destination_absolute = self._next_duplicate_destination(resolved_source.absolute, reserved_destinations)
destination_relative = self._path_guard.entry_relative_path(
resolved_source.alias,
destination_absolute,
display_style=resolved_source.display_style,
)
if source_is_directory:
directories, files = self._build_directory_plan(
resolved_source=resolved_source,
destination_root=destination_absolute,
include_root_prefix=include_root_prefix,
)
else:
files = [
{
"source": str(resolved_source.absolute),
"destination": str(destination_absolute),
"label": resolved_source.absolute.name,
}
]
directories = []
return {
"source_relative": resolved_source.relative,
"destination_relative": destination_relative,
"source_absolute": str(resolved_source.absolute),
"destination_absolute": str(destination_absolute),
"kind": "directory" if source_is_directory else "file",
"files": files,
"directories": directories,
}
def _build_directory_plan(
self,
*,
resolved_source: ResolvedPath,
destination_root: Path,
include_root_prefix: bool,
) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
directories: list[dict[str, str]] = [
{
"source": str(resolved_source.absolute),
"destination": str(destination_root),
}
]
files: list[dict[str, str]] = []
for root, dirnames, filenames in os.walk(resolved_source.absolute, followlinks=False):
dirnames[:] = [name for name in dirnames if not self._should_skip_name(name)]
dirnames.sort(key=str.lower)
filenames = sorted(filenames, key=str.lower)
root_path = Path(root)
for name in dirnames:
entry = root_path / name
if entry.is_symlink():
raise AppError(
code="type_conflict",
message="Source directory must not contain symlinks",
status_code=409,
details={"path": resolved_source.relative},
)
relative = entry.relative_to(resolved_source.absolute)
directories.append(
{
"source": str(entry),
"destination": str(destination_root / relative),
}
)
for name in filenames:
if self._should_skip_name(name):
continue
entry = root_path / name
if entry.is_symlink():
raise AppError(
code="type_conflict",
message="Source directory must not contain symlinks",
status_code=409,
details={"path": resolved_source.relative},
)
relative = entry.relative_to(resolved_source.absolute)
files.append(
{
"source": str(entry),
"destination": str(destination_root / relative),
"label": self._progress_label(
top_level_name=resolved_source.absolute.name,
relative_path=relative,
include_root_prefix=include_root_prefix,
),
}
)
return directories, files
@staticmethod
def _progress_label(*, top_level_name: str, relative_path: Path, include_root_prefix: bool) -> str:
relative_value = relative_path.as_posix()
if not relative_value:
return top_level_name
return f"{top_level_name}/{relative_value}" if include_root_prefix else relative_value
@classmethod
def _next_duplicate_destination(cls, source: Path, reserved_destinations: set[str]) -> Path:
parent = source.parent
candidate_index = 1
while True:
candidate_name = cls._duplicate_name(source.name, source.is_file(), candidate_index)
candidate = parent / candidate_name
if not candidate.exists() and str(candidate) not in reserved_destinations:
return candidate
candidate_index += 1
@classmethod
def _duplicate_name(cls, original_name: str, is_file: bool, index: int) -> str:
if not is_file:
suffix = ""
base_name = original_name
else:
suffixes = Path(original_name).suffixes
suffix = "".join(suffixes)
base_name = original_name[: -len(suffix)] if suffix else original_name
copy_suffix = " copy" if index == 1 else f" copy {index}"
return f"{base_name}{copy_suffix}{suffix}"
@staticmethod
def _should_skip_name(name: str) -> bool:
return name.startswith("._")
@staticmethod
def _source_summary(paths: list[str], items: list[dict[str, str]]) -> str:
if len(paths) == 1:
return paths[0]
if not items:
return "0 items"
return DuplicateTaskService._items_label(len(items))
@staticmethod
def _destination_summary(items: list[dict[str, str]]) -> str:
if len(items) == 1:
return items[0]["destination_relative"]
return "same directory"
@staticmethod
def _items_label(count: int) -> str:
return "1 item" if count == 1 else f"{count} items"
def _record_history(self, **kwargs) -> None:
if self._history_repository:
self._history_repository.create_entry(**kwargs)
@staticmethod
def _now_iso() -> str:
return datetime.now(timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z")
+407 -6
View File
@@ -1,6 +1,13 @@
from __future__ import annotations from __future__ import annotations
import os
import time
import zipfile
from dataclasses import dataclass
from datetime import datetime, timezone
from io import BytesIO
from pathlib import Path from pathlib import Path
from typing import Callable
from backend.app.api.errors import AppError from backend.app.api.errors import AppError
from backend.app.api.schemas import DeleteResponse, FileInfoResponse, MkdirResponse, RenameResponse, SaveResponse, UploadResponse, ViewResponse from backend.app.api.schemas import DeleteResponse, FileInfoResponse, MkdirResponse, RenameResponse, SaveResponse, UploadResponse, ViewResponse
@@ -13,6 +20,7 @@ TEXT_EDIT_MAX_BYTES = 256 * 1024
TEXT_CONTENT_TYPES = { TEXT_CONTENT_TYPES = {
".txt": "text/plain", ".txt": "text/plain",
".log": "text/plain", ".log": "text/plain",
".conf": "text/plain",
".md": "text/markdown", ".md": "text/markdown",
".yml": "text/yaml", ".yml": "text/yaml",
".yaml": "text/yaml", ".yaml": "text/yaml",
@@ -50,11 +58,37 @@ PDF_CONTENT_TYPES = {
} }
@dataclass(frozen=True)
class ZipDownloadPreflightLimits:
max_items: int = 1000
max_total_input_bytes: int = 2 * 1024 * 1024 * 1024
max_individual_file_bytes: int = 500 * 1024 * 1024
scan_timeout_seconds: float = 10.0
@dataclass
class ZipDownloadPreflightState:
item_count: int = 0
total_input_bytes: int = 0
ZIP_DOWNLOAD_PREFLIGHT_LIMITS = ZipDownloadPreflightLimits()
class FileOpsService: class FileOpsService:
def __init__(self, path_guard: PathGuard, filesystem: FilesystemAdapter, history_repository: HistoryRepository | None = None): def __init__(
self,
path_guard: PathGuard,
filesystem: FilesystemAdapter,
history_repository: HistoryRepository | None = None,
zip_download_preflight_limits: ZipDownloadPreflightLimits = ZIP_DOWNLOAD_PREFLIGHT_LIMITS,
monotonic: Callable[[], float] | None = None,
):
self._path_guard = path_guard self._path_guard = path_guard
self._filesystem = filesystem self._filesystem = filesystem
self._history_repository = history_repository self._history_repository = history_repository
self._zip_download_preflight_limits = zip_download_preflight_limits
self._monotonic = monotonic or time.monotonic
def mkdir(self, parent_path: str, name: str) -> MkdirResponse: def mkdir(self, parent_path: str, name: str) -> MkdirResponse:
try: try:
@@ -158,7 +192,7 @@ class FileOpsService:
self._record_history_error(operation="rename", source=path, destination=new_name, path=path, error=error) self._record_history_error(operation="rename", source=path, destination=new_name, path=path, error=error)
raise error raise error
def delete(self, path: str) -> DeleteResponse: def delete(self, path: str, recursive: bool = False) -> DeleteResponse:
try: try:
resolved_target = self._path_guard.resolve_existing_path(path) resolved_target = self._path_guard.resolve_existing_path(path)
@@ -166,12 +200,15 @@ class FileOpsService:
self._filesystem.delete_file(resolved_target.absolute) self._filesystem.delete_file(resolved_target.absolute)
elif resolved_target.absolute.is_dir(): elif resolved_target.absolute.is_dir():
if not self._filesystem.is_directory_empty(resolved_target.absolute): if not self._filesystem.is_directory_empty(resolved_target.absolute):
if not recursive:
raise AppError( raise AppError(
code="directory_not_empty", code="directory_not_empty",
message="Directory is not empty", message="Directory is not empty",
status_code=409, status_code=409,
details={"path": resolved_target.relative}, details={"path": resolved_target.relative},
) )
self._filesystem.delete_directory_recursive(resolved_target.absolute)
else:
self._filesystem.delete_empty_directory(resolved_target.absolute) self._filesystem.delete_empty_directory(resolved_target.absolute)
else: else:
raise AppError( raise AppError(
@@ -204,7 +241,7 @@ class FileOpsService:
self._record_history_error(operation="delete", path=path, error=error) self._record_history_error(operation="delete", path=path, error=error)
raise error raise error
def upload(self, target_path: str, upload_file) -> UploadResponse: def upload(self, target_path: str, upload_file, overwrite: bool = False) -> UploadResponse:
destination_relative = None destination_relative = None
history_path = target_path history_path = target_path
try: try:
@@ -216,14 +253,26 @@ class FileOpsService:
resolved_destination = self._path_guard.resolve_path(destination_relative) resolved_destination = self._path_guard.resolve_path(destination_relative)
if resolved_destination.absolute.exists(): if resolved_destination.absolute.exists():
if not overwrite:
raise AppError( raise AppError(
code="already_exists", code="already_exists",
message="Target path already exists", message="Target path already exists",
status_code=409, status_code=409,
details={"path": resolved_destination.relative}, details={"path": resolved_destination.relative},
) )
if resolved_destination.absolute.is_dir():
raise AppError(
code="type_conflict",
message="Cannot overwrite an existing directory",
status_code=409,
details={"path": resolved_destination.relative},
)
saved = self._filesystem.write_uploaded_file(resolved_destination.absolute, upload_file.file) saved = self._filesystem.write_uploaded_file(
resolved_destination.absolute,
upload_file.file,
overwrite=overwrite,
)
self._record_history( self._record_history(
operation="upload", operation="upload",
status="completed", status="completed",
@@ -338,6 +387,81 @@ class FileOpsService:
height=metadata["height"], height=metadata["height"],
) )
def prepare_download(self, paths: list[str]) -> dict:
history_entry_id: str | None = None
history_mode = self._download_mode_from_request_paths(paths)
history_path = self._summarize_download_targets(paths)
history_download_name: str | None = None
if not paths:
error = AppError(
code="invalid_request",
message="At least one path is required",
status_code=400,
)
self._record_download_failure(
mode=history_mode,
path_summary=history_path,
download_name=None,
error=error,
history_entry_id=None,
)
raise error
try:
resolved_targets = [self._path_guard.resolve_existing_path(path) for path in paths]
history_mode = self._download_mode_from_resolved_targets(resolved_targets)
history_path = self._summarize_download_targets([target.relative for target in resolved_targets])
history_download_name = self._download_name_for_targets(resolved_targets)
if history_mode != "single_file":
raise AppError(
code="invalid_request",
message="Archive downloads must be prepared first",
status_code=400,
)
history_entry_id = self._record_download_status(
status="requested",
mode=history_mode,
path_summary=history_path,
download_name=history_download_name,
)
prepared = self._prepare_single_file_download(resolved_targets[0])
self._record_download_status(
status="ready",
mode=history_mode,
path_summary=history_path,
download_name=history_download_name,
history_entry_id=history_entry_id,
)
return prepared
except AppError as error:
self._record_download_failure(
mode=history_mode,
path_summary=history_path,
download_name=history_download_name,
error=error,
history_entry_id=history_entry_id,
)
raise
except OSError as exc:
error = AppError(
code="io_error",
message="Filesystem operation failed",
status_code=500,
details={"reason": str(exc)},
)
self._record_download_failure(
mode=history_mode,
path_summary=history_path,
download_name=history_download_name,
error=error,
history_entry_id=history_entry_id,
)
raise error
def save(self, path: str, content: str, expected_modified: str) -> SaveResponse: def save(self, path: str, content: str, expected_modified: str) -> SaveResponse:
resolved_target = self._path_guard.resolve_existing_path(path) resolved_target = self._path_guard.resolve_existing_path(path)
@@ -619,9 +743,286 @@ class FileOpsService:
@staticmethod @staticmethod
def _now_iso() -> str: def _now_iso() -> str:
from datetime import datetime, timezone
return datetime.now(timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z") return datetime.now(timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z")
def _prepare_single_file_download(self, resolved_target) -> dict:
_, _, lexical_source, _ = self._path_guard.resolve_lexical_path(resolved_target.relative)
if lexical_source.is_symlink():
raise AppError(
code="type_conflict",
message="Source must not be a symlink",
status_code=409,
details={"path": resolved_target.relative},
)
return {
"content": self._filesystem.stream_file(resolved_target.absolute),
"headers": {
"Content-Disposition": f'attachment; filename="{resolved_target.absolute.name}"',
},
"content_type": self._content_type_for(resolved_target.absolute) or "application/octet-stream",
}
def _prepare_zip_download(self, resolved_targets: list, download_name: str) -> dict:
self._validate_zip_download_archive_names(resolved_targets)
self._run_zip_download_preflight(resolved_targets)
buffer = BytesIO()
with zipfile.ZipFile(buffer, "w", compression=zipfile.ZIP_DEFLATED) as archive:
for resolved_target in resolved_targets:
self._write_download_target_to_zip(archive, resolved_target)
payload = buffer.getvalue()
async def _stream_zip():
yield payload
return {
"content": _stream_zip(),
"headers": {
"Content-Disposition": f'attachment; filename="{download_name}"',
},
"content_type": "application/zip",
}
def _validate_zip_download_archive_names(self, resolved_targets: list) -> None:
archive_names: set[str] = set()
for resolved_target in resolved_targets:
archive_name = resolved_target.absolute.name
if archive_name in archive_names:
raise AppError(
code="invalid_request",
message="Selected items must have distinct top-level names",
status_code=400,
)
archive_names.add(archive_name)
def _download_name_for_targets(self, resolved_targets: list) -> str:
if len(resolved_targets) == 1 and resolved_targets[0].absolute.is_file():
return resolved_targets[0].absolute.name
if len(resolved_targets) == 1 and resolved_targets[0].absolute.is_dir():
return f"{resolved_targets[0].absolute.name}.zip"
return f"kodidownload-{datetime.now(timezone.utc).strftime('%Y%m%d-%H%M%S')}.zip"
@staticmethod
def _download_mode_from_request_paths(paths: list[str]) -> str:
return "multi_zip" if len(paths) > 1 else "single_file"
@staticmethod
def _download_mode_from_resolved_targets(resolved_targets: list) -> str:
if len(resolved_targets) == 1 and resolved_targets[0].absolute.is_file():
return "single_file"
if len(resolved_targets) == 1 and resolved_targets[0].absolute.is_dir():
return "single_directory_zip"
return "multi_zip"
@staticmethod
def _summarize_download_targets(paths: list[str]) -> str:
if not paths:
return "-"
if len(paths) == 1:
return paths[0]
if len(paths) == 2:
return f"{paths[0]}, {paths[1]}"
return f"{paths[0]}, {paths[1]}, +{len(paths) - 2} more"
def _record_download_status(
self,
*,
status: str,
mode: str,
path_summary: str,
download_name: str | None,
history_entry_id: str | None = None,
) -> str | None:
if not self._history_repository:
return history_entry_id
if history_entry_id:
self._history_repository.update_entry(
entry_id=history_entry_id,
status=status,
error_code=None,
error_message=None,
finished_at=self._now_iso(),
)
return history_entry_id
created = self._history_repository.create_entry(
operation="download",
status=status,
source=mode,
destination=download_name,
path=path_summary,
finished_at=self._now_iso() if status != "requested" else None,
)
return created["id"]
def _record_download_failure(
self,
*,
mode: str,
path_summary: str,
download_name: str | None,
error: AppError,
history_entry_id: str | None,
) -> None:
if not self._history_repository:
return
failure_status = "preflight_failed" if error.code == "download_preflight_failed" else "failed"
if history_entry_id:
self._history_repository.update_entry(
entry_id=history_entry_id,
status=failure_status,
error_code=error.code,
error_message=error.message,
finished_at=self._now_iso(),
)
return
self._history_repository.create_entry(
operation="download",
status=failure_status,
source=mode,
destination=download_name,
path=path_summary,
error_code=error.code,
error_message=error.message,
finished_at=self._now_iso(),
)
def _run_zip_download_preflight(self, resolved_targets: list) -> None:
started_at = self._monotonic()
state = ZipDownloadPreflightState()
for resolved_target in resolved_targets:
self._ensure_zip_download_preflight_within_timeout(started_at)
self._validate_zip_download_root_target(resolved_target)
if resolved_target.absolute.is_file():
self._record_zip_download_file(
state=state,
entry_path=resolved_target.absolute,
entry_relative=resolved_target.relative,
)
continue
self._increment_zip_download_item_count(
state=state,
entry_relative=resolved_target.relative,
)
self._scan_zip_download_directory(
state=state,
resolved_target=resolved_target,
started_at=started_at,
)
def _validate_zip_download_root_target(self, resolved_target) -> None:
_, _, lexical_source, _ = self._path_guard.resolve_lexical_path(resolved_target.relative)
if lexical_source.is_symlink():
self._raise_zip_download_preflight_error(
reason="symlink_detected",
details={"path": resolved_target.relative},
)
if resolved_target.absolute.is_file() or resolved_target.absolute.is_dir():
return
self._raise_zip_download_preflight_error(
reason="unsupported_path_type",
details={"path": resolved_target.relative},
)
def _scan_zip_download_directory(self, state: ZipDownloadPreflightState, resolved_target, started_at: float) -> None:
for root, dirnames, filenames in os.walk(resolved_target.absolute, followlinks=False):
root_path = Path(root)
dirnames.sort()
filenames.sort()
for name in [*dirnames, *filenames]:
self._ensure_zip_download_preflight_within_timeout(started_at)
entry_path = root_path / name
relative_suffix = entry_path.relative_to(resolved_target.absolute).as_posix()
entry_relative = self._join_relative(resolved_target.relative, relative_suffix)
if entry_path.is_symlink():
self._raise_zip_download_preflight_error(
reason="symlink_detected",
details={"path": entry_relative},
)
if entry_path.is_dir():
self._increment_zip_download_item_count(state=state, entry_relative=entry_relative)
continue
self._record_zip_download_file(
state=state,
entry_path=entry_path,
entry_relative=entry_relative,
)
def _record_zip_download_file(
self,
*,
state: ZipDownloadPreflightState,
entry_path: Path,
entry_relative: str,
) -> None:
self._increment_zip_download_item_count(state=state, entry_relative=entry_relative)
file_size = int(entry_path.stat().st_size)
if file_size > self._zip_download_preflight_limits.max_individual_file_bytes:
self._raise_zip_download_preflight_error(
reason="max_individual_file_size_exceeded",
details={
"path": entry_relative,
"limit_bytes": str(self._zip_download_preflight_limits.max_individual_file_bytes),
"actual_bytes": str(file_size),
},
)
state.total_input_bytes += file_size
if state.total_input_bytes > self._zip_download_preflight_limits.max_total_input_bytes:
self._raise_zip_download_preflight_error(
reason="max_total_input_bytes_exceeded",
details={
"limit_bytes": str(self._zip_download_preflight_limits.max_total_input_bytes),
"actual_bytes": str(state.total_input_bytes),
},
)
def _increment_zip_download_item_count(self, *, state: ZipDownloadPreflightState, entry_relative: str) -> None:
state.item_count += 1
if state.item_count > self._zip_download_preflight_limits.max_items:
self._raise_zip_download_preflight_error(
reason="max_items_exceeded",
details={
"path": entry_relative,
"limit": str(self._zip_download_preflight_limits.max_items),
"actual": str(state.item_count),
},
)
def _ensure_zip_download_preflight_within_timeout(self, started_at: float) -> None:
elapsed = self._monotonic() - started_at
if elapsed > self._zip_download_preflight_limits.scan_timeout_seconds:
self._raise_zip_download_preflight_error(
reason="preflight_timeout",
details={
"timeout_seconds": str(self._zip_download_preflight_limits.scan_timeout_seconds),
},
)
@staticmethod
def _raise_zip_download_preflight_error(reason: str, details: dict[str, str]) -> None:
raise AppError(
code="download_preflight_failed",
message="Zip download preflight failed",
status_code=409,
details={"reason": reason, **details},
)
def _write_download_target_to_zip(self, archive: zipfile.ZipFile, resolved_target, on_each_item=None) -> None:
root_name = resolved_target.absolute.name
if resolved_target.absolute.is_file():
if on_each_item:
on_each_item()
archive.write(resolved_target.absolute, arcname=root_name)
return
archive.writestr(f"{root_name}/", b"")
for child in sorted(resolved_target.absolute.rglob("*")):
if on_each_item:
on_each_item()
arcname = f"{root_name}/{child.relative_to(resolved_target.absolute).as_posix()}"
if child.is_dir():
archive.writestr(f"{arcname}/", b"")
else:
archive.write(child, arcname=arcname)
@staticmethod @staticmethod
def _parse_range_header(range_header: str, file_size: int) -> tuple[int, int]: def _parse_range_header(range_header: str, file_size: int) -> tuple[int, int]:
def invalid_range() -> AppError: def invalid_range() -> AppError:
@@ -45,11 +45,7 @@ class MoveTaskService:
) )
if item["kind"] == "directory": if item["kind"] == "directory":
self._runner.enqueue_move_directory( self._runner.enqueue_move_directory(task_id=task["id"], item=item)
task_id=task["id"],
source=item["source_absolute"],
destination=item["destination_absolute"],
)
else: else:
self._runner.enqueue_move_file( self._runner.enqueue_move_file(
task_id=task["id"], task_id=task["id"],
@@ -57,6 +53,7 @@ class MoveTaskService:
destination=item["destination_absolute"], destination=item["destination_absolute"],
total_bytes=item["total_bytes"], total_bytes=item["total_bytes"],
same_root=item["same_root"], same_root=item["same_root"],
current_item=item["files"][0]["label"],
) )
return TaskCreateResponse(task_id=task["id"], status=task["status"]) return TaskCreateResponse(task_id=task["id"], status=task["status"])
@@ -98,10 +95,11 @@ class MoveTaskService:
) )
root_alias = next(iter(source_aliases)) root_alias = next(iter(source_aliases))
if root_alias != resolved_destination_base.alias: has_directory = any(resolved_source.absolute.is_dir() for resolved_source in resolved_sources)
if root_alias != resolved_destination_base.alias and has_directory:
raise AppError( raise AppError(
code="invalid_request", code="invalid_request",
message="Cross-root batch directory move is not supported in v1", message="Cross-root batch move with directories is not supported in v1",
status_code=400, status_code=400,
details={"destination_base": destination_base}, details={"destination_base": destination_base},
) )
@@ -113,6 +111,7 @@ class MoveTaskService:
destination=destination, destination=destination,
resolved_destination=resolved_destination_base, resolved_destination=resolved_destination_base,
destination_base=destination_base, destination_base=destination_base,
include_root_prefix=True,
) )
items.append(item) items.append(item)
@@ -137,6 +136,11 @@ class MoveTaskService:
"source": item["source_absolute"], "source": item["source_absolute"],
"destination": item["destination_absolute"], "destination": item["destination_absolute"],
"kind": item["kind"], "kind": item["kind"],
"same_root": item["same_root"],
"files": item["files"],
"directories": item["directories"],
"progress_total_items": item["progress_total_items"],
"progress_label": item["progress_label"],
} }
for item in items for item in items
], ],
@@ -149,6 +153,7 @@ class MoveTaskService:
destination: str, destination: str,
resolved_destination: ResolvedPath | None = None, resolved_destination: ResolvedPath | None = None,
destination_base: str | None = None, destination_base: str | None = None,
include_root_prefix: bool = False,
) -> dict: ) -> dict:
resolved_source = self._path_guard.resolve_existing_path(source) resolved_source = self._path_guard.resolve_existing_path(source)
_, _, lexical_source, _ = self._path_guard.resolve_lexical_path(source) _, _, lexical_source, _ = self._path_guard.resolve_lexical_path(source)
@@ -224,6 +229,23 @@ class MoveTaskService:
details={"path": source, "destination": destination_relative}, details={"path": source, "destination": destination_relative},
) )
progress_label = resolved_source.absolute.name
if source_is_directory:
files = []
directories = []
if include_root_prefix:
progress_label = resolved_source.absolute.name
else:
files = [
{
"source": str(resolved_source.absolute),
"destination": str(destination_absolute),
"label": resolved_source.absolute.name,
}
]
directories = []
progress_label = files[0]["label"]
return { return {
"source_relative": resolved_source.relative, "source_relative": resolved_source.relative,
"destination_relative": destination_relative, "destination_relative": destination_relative,
@@ -232,6 +254,10 @@ class MoveTaskService:
"kind": "directory" if source_is_directory else "file", "kind": "directory" if source_is_directory else "file",
"same_root": same_root, "same_root": same_root,
"total_bytes": int(resolved_source.absolute.stat().st_size) if source_is_file else None, "total_bytes": int(resolved_source.absolute.stat().st_size) if source_is_file else None,
"files": files,
"directories": directories,
"progress_total_items": 1,
"progress_label": progress_label,
} }
def _map_directory_validation(self, relative_path: str) -> None: def _map_directory_validation(self, relative_path: str) -> None:
@@ -0,0 +1,201 @@
from __future__ import annotations
from urllib.parse import urlencode
import httpx
from backend.app.api.errors import AppError
from backend.app.api.schemas import BrowseResponse, DirectoryEntry, FileEntry, RemoteClientItem
from backend.app.services.remote_client_service import RemoteClientService
class RemoteBrowseService:
ROOT_PATH = "/Clients"
def __init__(
self,
remote_client_service: RemoteClientService,
agent_auth_header: str,
agent_auth_scheme: str,
agent_auth_token: str,
agent_timeout_seconds: float = 2.0,
):
self._remote_client_service = remote_client_service
self._agent_auth_header = (agent_auth_header or "Authorization").strip() or "Authorization"
self._agent_auth_scheme = (agent_auth_scheme or "Bearer").strip() or "Bearer"
self._agent_auth_token = (agent_auth_token or "").strip()
self._agent_timeout_seconds = max(0.1, float(agent_timeout_seconds))
@classmethod
def handles_path(cls, path: str) -> bool:
normalized = (path or "").strip()
return normalized == cls.ROOT_PATH or normalized.startswith(f"{cls.ROOT_PATH}/")
def browse(self, path: str, show_hidden: bool) -> BrowseResponse:
parts = self._path_parts(path)
if not parts:
return self._browse_clients_root()
if len(parts) == 1:
return self._browse_client(parts[0])
return self._browse_remote_share(parts[0], parts[1], parts[2:], show_hidden)
@classmethod
def _path_parts(cls, path: str) -> list[str]:
normalized = (path or "").strip().rstrip("/")
if normalized == cls.ROOT_PATH:
return []
return normalized[len(cls.ROOT_PATH) + 1 :].split("/")
def _browse_clients_root(self) -> BrowseResponse:
clients = self._remote_client_service.list_clients().items
directories = [
DirectoryEntry(
name=client.display_name,
path=f"{self.ROOT_PATH}/{client.client_id}",
modified=client.last_seen or client.updated_at,
)
for client in clients
]
return BrowseResponse(path=self.ROOT_PATH, directories=directories, files=[])
def _browse_client(self, client_id: str) -> BrowseResponse:
client = self._remote_client_service.get_client(client_id)
directories = [
DirectoryEntry(
name=share.label,
path=f"{self.ROOT_PATH}/{client.client_id}/{share.key}",
modified=client.last_seen or client.updated_at,
)
for share in client.shares
]
return BrowseResponse(path=f"{self.ROOT_PATH}/{client.client_id}", directories=directories, files=[])
def _browse_remote_share(
self,
client_id: str,
share_key: str,
relative_parts: list[str],
show_hidden: bool,
) -> BrowseResponse:
client = self._remote_client_service.get_client(client_id)
if client.status != "online":
raise AppError(
code="remote_client_unavailable",
message=f"Remote client '{client.display_name}' is offline",
status_code=503,
details={"client_id": client.client_id, "status": client.status},
)
share = next((item for item in client.shares if item.key == share_key), None)
if share is None:
raise AppError(
code="path_not_found",
message="Remote share was not found",
status_code=404,
details={"client_id": client.client_id, "share_key": share_key},
)
if not self._agent_auth_token:
raise AppError(
code="remote_client_agent_auth_not_configured",
message="Remote client agent auth token is not configured",
status_code=503,
details={"client_id": client.client_id},
)
base_path = f"{self.ROOT_PATH}/{client.client_id}/{share.key}"
relative_path = "/".join(relative_parts)
agent_payload = self._fetch_remote_listing(client=client, share_key=share.key, relative_path=relative_path, show_hidden=show_hidden)
directories: list[DirectoryEntry] = []
files: list[FileEntry] = []
for entry in agent_payload.get("entries", []):
if not isinstance(entry, dict):
continue
name = str(entry.get("name", "")).strip()
kind = str(entry.get("kind", "")).strip()
if not name or kind not in {"directory", "file"}:
continue
child_path = f"{base_path}/{name}"
modified = str(entry.get("modified", "") or "")
if kind == "directory":
directories.append(DirectoryEntry(name=name, path=child_path, modified=modified))
continue
size = entry.get("size", 0)
try:
normalized_size = max(0, int(size))
except (TypeError, ValueError):
normalized_size = 0
files.append(FileEntry(name=name, path=child_path, size=normalized_size, modified=modified))
response_path = base_path if not relative_path else f"{base_path}/{relative_path}"
return BrowseResponse(path=response_path, directories=directories, files=files)
def _fetch_remote_listing(
self,
*,
client: RemoteClientItem,
share_key: str,
relative_path: str,
show_hidden: bool,
) -> dict:
normalized_endpoint = client.endpoint.rstrip("/")
query = urlencode({"share": share_key, "path": relative_path, "show_hidden": str(show_hidden).lower()})
url = f"{normalized_endpoint}/api/list?{query}"
headers = {self._agent_auth_header: f"{self._agent_auth_scheme} {self._agent_auth_token}"}
timeout = httpx.Timeout(self._agent_timeout_seconds, connect=self._agent_timeout_seconds)
try:
with httpx.Client(timeout=timeout, headers=headers) as client_http:
response = client_http.get(url)
except httpx.TimeoutException as exc:
raise AppError(
code="remote_client_timeout",
message=f"Remote client '{client.display_name}' timed out",
status_code=504,
details={"client_id": client.client_id, "endpoint": client.endpoint},
) from exc
except httpx.HTTPError as exc:
raise AppError(
code="remote_client_unreachable",
message=f"Remote client '{client.display_name}' is unreachable",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
) from exc
if response.status_code == 404:
raise AppError(
code="path_not_found",
message="Remote path was not found",
status_code=404,
details={"client_id": client.client_id, "share_key": share_key},
)
if response.status_code in {401, 403}:
raise AppError(
code="remote_client_forbidden",
message=f"Remote client '{client.display_name}' rejected authentication",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
)
if response.status_code >= 400:
raise AppError(
code="remote_client_error",
message=f"Remote client '{client.display_name}' browse failed",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint, "status_code": str(response.status_code)},
)
try:
payload = response.json()
except ValueError as exc:
raise AppError(
code="remote_client_error",
message=f"Remote client '{client.display_name}' returned invalid JSON",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
) from exc
if not isinstance(payload, dict):
raise AppError(
code="remote_client_error",
message=f"Remote client '{client.display_name}' returned an invalid response",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
)
return payload
@@ -0,0 +1,151 @@
from __future__ import annotations
from datetime import datetime, timedelta, timezone
from typing import Callable
from backend.app.api.errors import AppError
from backend.app.api.schemas import (
RemoteClientHeartbeatRequest,
RemoteClientItem,
RemoteClientListResponse,
RemoteClientRegisterRequest,
)
from backend.app.db.remote_client_repository import RemoteClientRepository
class RemoteClientService:
def __init__(
self,
repository: RemoteClientRepository,
registration_token: str,
offline_timeout_seconds: int,
now: Callable[[], datetime] | None = None,
):
self._repository = repository
self._registration_token = registration_token.strip()
self._offline_timeout_seconds = max(1, int(offline_timeout_seconds))
self._now = now or (lambda: datetime.now(tz=timezone.utc))
def list_clients(self) -> RemoteClientListResponse:
self._refresh_stale_statuses()
items = [RemoteClientItem(**row) for row in self._repository.list_clients()]
return RemoteClientListResponse(items=items)
def get_client(self, client_id: str) -> RemoteClientItem:
normalized_client_id = (client_id or "").strip()
if not normalized_client_id:
raise AppError(
code="invalid_request",
message="client_id is required",
status_code=400,
details={"client_id": client_id},
)
self._refresh_stale_statuses()
item = self._repository.get_client(normalized_client_id)
if item is None:
raise AppError(
code="path_not_found",
message="Remote client was not found",
status_code=404,
details={"client_id": normalized_client_id},
)
return RemoteClientItem(**item)
def register_client(self, authorization: str | None, request: RemoteClientRegisterRequest) -> RemoteClientItem:
self._require_registration_auth(authorization)
payload = self._normalize_register_request(request)
now_iso = self._to_iso(self._now())
item = self._repository.upsert_client(now_iso=now_iso, **payload)
return RemoteClientItem(**item)
def record_heartbeat(self, authorization: str | None, request: RemoteClientHeartbeatRequest) -> RemoteClientItem:
self._require_registration_auth(authorization)
client_id = (request.client_id or "").strip()
agent_version = (request.agent_version or "").strip()
if not client_id:
raise AppError(
code="invalid_request",
message="client_id is required",
status_code=400,
details={"client_id": request.client_id},
)
if not agent_version:
raise AppError(
code="invalid_request",
message="agent_version is required",
status_code=400,
details={"agent_version": request.agent_version},
)
item = self._repository.record_heartbeat(
client_id=client_id,
agent_version=agent_version,
now_iso=self._to_iso(self._now()),
)
if item is None:
raise AppError(
code="path_not_found",
message="Remote client was not found",
status_code=404,
details={"client_id": client_id},
)
return RemoteClientItem(**item)
def _require_registration_auth(self, authorization: str | None) -> None:
if not self._registration_token:
raise AppError(
code="remote_client_registration_disabled",
message="Remote client registration is not configured",
status_code=503,
)
expected = f"Bearer {self._registration_token}"
if (authorization or "").strip() != expected:
raise AppError(
code="forbidden",
message="Invalid remote client registration token",
status_code=403,
)
def _normalize_register_request(self, request: RemoteClientRegisterRequest) -> dict:
client_id = (request.client_id or "").strip()
display_name = (request.display_name or "").strip()
platform = (request.platform or "").strip()
agent_version = (request.agent_version or "").strip()
endpoint = (request.endpoint or "").strip()
shares = [
{"key": (item.key or "").strip(), "label": (item.label or "").strip()}
for item in request.shares
]
shares = [item for item in shares if item["key"] and item["label"]]
if not client_id:
raise AppError("invalid_request", "client_id is required", 400, {"client_id": request.client_id})
if not display_name:
raise AppError("invalid_request", "display_name is required", 400, {"display_name": request.display_name})
if not platform:
raise AppError("invalid_request", "platform is required", 400, {"platform": request.platform})
if not agent_version:
raise AppError("invalid_request", "agent_version is required", 400, {"agent_version": request.agent_version})
if not endpoint:
raise AppError("invalid_request", "endpoint is required", 400, {"endpoint": request.endpoint})
if not shares:
raise AppError("invalid_request", "at least one share is required", 400, {"shares": "[]"})
return {
"client_id": client_id,
"display_name": display_name,
"platform": platform,
"agent_version": agent_version,
"endpoint": endpoint,
"shares": shares,
}
def _refresh_stale_statuses(self) -> None:
now = self._now()
self._repository.mark_stale_clients_offline(
cutoff_iso=self._to_iso(now - timedelta(seconds=self._offline_timeout_seconds)),
now_iso=self._to_iso(now),
)
@staticmethod
def _to_iso(value: datetime) -> str:
return value.astimezone(timezone.utc).isoformat().replace("+00:00", "Z")
@@ -0,0 +1,432 @@
from __future__ import annotations
from dataclasses import dataclass
from pathlib import PurePosixPath
from urllib.parse import urlencode
import httpx
from backend.app.api.errors import AppError
from backend.app.api.schemas import FileInfoResponse, RemoteClientItem, ViewResponse
from backend.app.services.remote_browse_service import RemoteBrowseService
from backend.app.services.remote_client_service import RemoteClientService
REMOTE_TEXT_PREVIEW_MAX_BYTES = 256 * 1024
REMOTE_AGENT_TIMEOUT_SECONDS = 2.0
REMOTE_DOWNLOAD_READ_TIMEOUT_SECONDS = 5.0
REMOTE_STREAM_CHUNK_BYTES = 64 * 1024
TEXT_CONTENT_TYPES = {
".txt": "text/plain",
".log": "text/plain",
".conf": "text/plain",
".ini": "text/plain",
".cfg": "text/plain",
".md": "text/markdown",
".yml": "text/yaml",
".yaml": "text/yaml",
".json": "application/json",
".js": "text/javascript",
".py": "text/x-python",
".css": "text/css",
".html": "text/html",
}
SPECIAL_TEXT_FILENAMES = {
"dockerfile": "text/plain",
"containerfile": "text/plain",
}
IMAGE_CONTENT_TYPES = {
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".png": "image/png",
".webp": "image/webp",
".gif": "image/gif",
".bmp": "image/bmp",
".avif": "image/avif",
}
@dataclass(frozen=True)
class RemoteResolvedPath:
raw_path: str
client: RemoteClientItem
share_key: str
relative_path: str
name: str
root_path: str
class RemoteFileService:
def __init__(
self,
remote_client_service: RemoteClientService,
agent_auth_header: str,
agent_auth_scheme: str,
agent_auth_token: str,
agent_timeout_seconds: float = REMOTE_AGENT_TIMEOUT_SECONDS,
text_preview_max_bytes: int = REMOTE_TEXT_PREVIEW_MAX_BYTES,
download_read_timeout_seconds: float = REMOTE_DOWNLOAD_READ_TIMEOUT_SECONDS,
stream_chunk_bytes: int = REMOTE_STREAM_CHUNK_BYTES,
):
self._remote_client_service = remote_client_service
self._agent_auth_header = (agent_auth_header or "Authorization").strip() or "Authorization"
self._agent_auth_scheme = (agent_auth_scheme or "Bearer").strip() or "Bearer"
self._agent_auth_token = (agent_auth_token or "").strip()
self._agent_timeout_seconds = max(0.1, float(agent_timeout_seconds))
self._text_preview_max_bytes = max(1024, int(text_preview_max_bytes))
self._download_read_timeout_seconds = max(0.1, float(download_read_timeout_seconds))
self._stream_chunk_bytes = max(4096, int(stream_chunk_bytes))
def handles_path(self, path: str) -> bool:
return RemoteBrowseService.handles_path(path)
def info(self, path: str) -> FileInfoResponse:
resolved = self._resolve_remote_path(path, allow_share_root=True)
payload = self._request_json(
client=resolved.client,
endpoint_path="/api/info",
params={"share": resolved.share_key, "path": resolved.relative_path},
)
kind = str(payload.get("kind", "")).strip()
if kind not in {"file", "directory"}:
raise self._invalid_agent_payload(resolved.client, "Remote file info response was invalid")
extension = str(payload.get("extension", "") or "").strip() or PurePosixPath(resolved.name).suffix.lower() or None
return FileInfoResponse(
name=str(payload.get("name", resolved.name)).strip() or resolved.name,
path=resolved.raw_path,
type=kind,
size=self._normalize_optional_int(payload.get("size")),
modified=str(payload.get("modified", "")).strip(),
root=resolved.root_path,
extension=extension,
content_type=self._normalize_optional_string(payload.get("content_type")),
owner=self._normalize_optional_string(payload.get("owner")),
group=self._normalize_optional_string(payload.get("group")),
width=self._normalize_optional_int(payload.get("width")),
height=self._normalize_optional_int(payload.get("height")),
)
def view(self, path: str, *, for_edit: bool = False) -> ViewResponse:
if for_edit:
raise AppError(
code="unsupported_type",
message="Remote files are not supported for edit",
status_code=409,
details={"path": path},
)
resolved = self._resolve_remote_path(path)
payload = self._request_json(
client=resolved.client,
endpoint_path="/api/read",
params={
"share": resolved.share_key,
"path": resolved.relative_path,
"max_bytes": str(self._text_preview_max_bytes),
},
)
content = str(payload.get("content", ""))
if len(content.encode("utf-8")) > self._text_preview_max_bytes:
raise self._invalid_agent_payload(resolved.client, "Remote text preview exceeded the configured limit")
return ViewResponse(
path=resolved.raw_path,
name=str(payload.get("name", resolved.name)).strip() or resolved.name,
content_type=str(payload.get("content_type", self._content_type_for_name(resolved.name) or "text/plain")).strip(),
encoding=str(payload.get("encoding", "utf-8")).strip() or "utf-8",
truncated=bool(payload.get("truncated", False)),
size=max(0, int(payload.get("size", 0))),
modified=str(payload.get("modified", "")).strip(),
content=content,
)
def prepare_download(self, paths: list[str]) -> dict:
if len(paths) != 1:
raise AppError(
code="invalid_request",
message="Remote downloads support exactly one file per request",
status_code=400,
)
resolved = self._resolve_remote_path(paths[0])
stream = self._open_stream(
client=resolved.client,
endpoint_path="/api/download",
params={"share": resolved.share_key, "path": resolved.relative_path},
)
content_disposition = stream.headers.get("content-disposition") or f'attachment; filename="{resolved.name}"'
headers = {"Content-Disposition": content_disposition}
if stream.headers.get("content-length"):
headers["Content-Length"] = stream.headers["content-length"]
return {
"content": self._iter_remote_stream(stream),
"headers": headers,
"content_type": stream.headers.get("content-type", "application/octet-stream"),
}
def prepare_image_stream(self, path: str) -> dict:
resolved = self._resolve_remote_path(path)
content_type = self._image_content_type_for_name(resolved.name)
if content_type is None:
raise AppError(
code="unsupported_type",
message="File type is not supported for image viewing",
status_code=409,
details={"path": path},
)
stream = self._open_stream(
client=resolved.client,
endpoint_path="/api/download",
params={"share": resolved.share_key, "path": resolved.relative_path},
)
headers: dict[str, str] = {}
if stream.headers.get("content-length"):
headers["Content-Length"] = stream.headers["content-length"]
return {
"content": self._iter_remote_stream(stream),
"headers": headers,
"content_type": content_type,
}
def _resolve_remote_path(self, path: str, *, allow_share_root: bool = False) -> RemoteResolvedPath:
normalized = (path or "").strip().rstrip("/")
if not self.handles_path(normalized):
raise AppError(
code="invalid_request",
message="Remote path must be under /Clients",
status_code=400,
details={"path": path},
)
parts = normalized[len(RemoteBrowseService.ROOT_PATH) + 1 :].split("/") if normalized != RemoteBrowseService.ROOT_PATH else []
min_parts = 2 if allow_share_root else 3
if len(parts) < min_parts:
raise AppError(
code="type_conflict",
message="Remote path must reference a file or directory inside a share",
status_code=409,
details={"path": path},
)
client = self._remote_client_service.get_client(parts[0])
if client.status != "online":
raise AppError(
code="remote_client_unavailable",
message=f"Remote client '{client.display_name}' is offline",
status_code=503,
details={"client_id": client.client_id, "status": client.status},
)
share_key = parts[1]
if not any(share.key == share_key for share in client.shares):
raise AppError(
code="path_not_found",
message="Remote share was not found",
status_code=404,
details={"client_id": client.client_id, "share_key": share_key},
)
relative_path = "/".join(parts[2:])
if not relative_path and not allow_share_root:
raise AppError(
code="type_conflict",
message="Remote file operation requires a path inside the share",
status_code=409,
details={"path": path},
)
name = parts[-1]
if allow_share_root and len(parts) == 2:
share = next((item for item in client.shares if item.key == share_key), None)
if share is not None:
name = share.label
return RemoteResolvedPath(
raw_path=normalized,
client=client,
share_key=share_key,
relative_path=relative_path,
name=name,
root_path=f"{RemoteBrowseService.ROOT_PATH}/{client.client_id}/{share_key}",
)
def _request_json(self, *, client: RemoteClientItem, endpoint_path: str, params: dict[str, str]) -> dict:
url = self._build_url(client.endpoint, endpoint_path, params)
timeout = httpx.Timeout(self._agent_timeout_seconds, connect=self._agent_timeout_seconds)
try:
with httpx.Client(timeout=timeout, headers=self._auth_headers()) as client_http:
response = client_http.get(url)
except httpx.TimeoutException as exc:
raise self._timeout_error(client) from exc
except httpx.HTTPError as exc:
raise self._unreachable_error(client) from exc
self._raise_for_agent_error(client=client, response=response)
try:
payload = response.json()
except ValueError as exc:
raise self._invalid_agent_payload(client, "Remote client returned invalid JSON") from exc
if not isinstance(payload, dict):
raise self._invalid_agent_payload(client, "Remote client returned an invalid response")
return payload
def _open_stream(self, *, client: RemoteClientItem, endpoint_path: str, params: dict[str, str]) -> httpx.Response:
url = self._build_url(client.endpoint, endpoint_path, params)
timeout = httpx.Timeout(
connect=self._agent_timeout_seconds,
read=self._download_read_timeout_seconds,
write=self._agent_timeout_seconds,
pool=self._agent_timeout_seconds,
)
client_http = httpx.Client(timeout=timeout, headers=self._auth_headers())
try:
response = client_http.stream("GET", url)
response.__enter__()
except httpx.TimeoutException as exc:
client_http.close()
raise self._timeout_error(client) from exc
except httpx.HTTPError as exc:
client_http.close()
raise self._unreachable_error(client) from exc
try:
self._raise_for_agent_error(client=client, response=response)
except Exception:
response.close()
client_http.close()
raise
response.extensions["remote_client_http_client"] = client_http
return response
def _iter_remote_stream(self, response: httpx.Response):
client_http = response.extensions.get("remote_client_http_client")
try:
for chunk in response.iter_bytes(chunk_size=self._stream_chunk_bytes):
if chunk:
yield chunk
finally:
response.close()
if client_http is not None:
client_http.close()
def _raise_for_agent_error(self, *, client: RemoteClientItem, response: httpx.Response) -> None:
if response.status_code < 400:
return
code = None
message = None
detail_payload = None
try:
payload = response.json()
except ValueError:
payload = None
if isinstance(payload, dict):
detail = payload.get("detail")
if isinstance(detail, dict):
detail_payload = detail
code = self._normalize_optional_string(detail.get("code"))
message = self._normalize_optional_string(detail.get("message"))
elif isinstance(detail, str):
message = detail.strip() or None
if response.status_code == 400:
raise AppError(
code=code or "invalid_request",
message=message or "Remote request was rejected",
status_code=400,
details={"client_id": client.client_id},
)
if response.status_code == 403:
agent_code = code or "forbidden"
if agent_code == "invalid_agent_token":
raise AppError(
code="remote_client_forbidden",
message=f"Remote client '{client.display_name}' rejected authentication",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
)
raise AppError(
code=agent_code,
message=message or "Remote access was denied",
status_code=403,
details={"client_id": client.client_id},
)
if response.status_code == 404:
raise AppError(
code=code or "path_not_found",
message=message or "Remote path was not found",
status_code=404,
details={"client_id": client.client_id},
)
if response.status_code == 409:
raise AppError(
code=code or "type_conflict",
message=message or "Remote file operation could not be completed",
status_code=409,
details={"client_id": client.client_id},
)
raise AppError(
code="remote_client_error",
message=message or f"Remote client '{client.display_name}' request failed",
status_code=502,
details={
"client_id": client.client_id,
"endpoint": client.endpoint,
"status_code": str(response.status_code),
"agent_code": code or "",
"agent_detail": str(detail_payload or ""),
},
)
def _auth_headers(self) -> dict[str, str]:
if not self._agent_auth_token:
raise AppError(
code="remote_client_agent_auth_not_configured",
message="Remote client agent auth token is not configured",
status_code=503,
)
return {self._agent_auth_header: f"{self._agent_auth_scheme} {self._agent_auth_token}"}
@staticmethod
def _build_url(endpoint: str, endpoint_path: str, params: dict[str, str]) -> str:
return f"{endpoint.rstrip('/')}{endpoint_path}?{urlencode(params)}"
@staticmethod
def _timeout_error(client: RemoteClientItem) -> AppError:
return AppError(
code="remote_client_timeout",
message=f"Remote client '{client.display_name}' timed out",
status_code=504,
details={"client_id": client.client_id, "endpoint": client.endpoint},
)
@staticmethod
def _unreachable_error(client: RemoteClientItem) -> AppError:
return AppError(
code="remote_client_unreachable",
message=f"Remote client '{client.display_name}' is unreachable",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
)
@staticmethod
def _invalid_agent_payload(client: RemoteClientItem, message: str) -> AppError:
return AppError(
code="remote_client_error",
message=message,
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
)
@staticmethod
def _normalize_optional_string(value) -> str | None:
normalized = str(value).strip() if value is not None else ""
return normalized or None
@staticmethod
def _normalize_optional_int(value) -> int | None:
if value is None or value == "":
return None
try:
return max(0, int(value))
except (TypeError, ValueError):
return None
@staticmethod
def _content_type_for_name(name: str) -> str | None:
special_name = SPECIAL_TEXT_FILENAMES.get((name or "").lower())
if special_name:
return special_name
return TEXT_CONTENT_TYPES.get(PurePosixPath(name).suffix.lower())
@staticmethod
def _image_content_type_for_name(name: str) -> str | None:
return IMAGE_CONTENT_TYPES.get(PurePosixPath(name).suffix.lower())
@@ -1,9 +1,10 @@
from __future__ import annotations from __future__ import annotations
from backend.app.api.errors import AppError from backend.app.api.errors import AppError
from backend.app.api.schemas import SettingsResponse, SettingsUpdateRequest from backend.app.api.schemas import SettingsResponse, SettingsUpdateRequest, ZipDownloadLimitsResponse
from backend.app.db.settings_repository import SettingsRepository from backend.app.db.settings_repository import SettingsRepository
from backend.app.security.path_guard import PathGuard from backend.app.security.path_guard import PathGuard
from backend.app.services.file_ops_service import ZIP_DOWNLOAD_PREFLIGHT_LIMITS
VALID_THEMES = { VALID_THEMES = {
@@ -38,6 +39,13 @@ class SettingsService:
preferred_startup_path_right=preferred_right, preferred_startup_path_right=preferred_right,
selected_theme=selected_theme, selected_theme=selected_theme,
selected_color_mode=selected_color_mode, selected_color_mode=selected_color_mode,
zip_download_limits=ZipDownloadLimitsResponse(
max_items=ZIP_DOWNLOAD_PREFLIGHT_LIMITS.max_items,
max_total_input_bytes=ZIP_DOWNLOAD_PREFLIGHT_LIMITS.max_total_input_bytes,
max_individual_file_bytes=ZIP_DOWNLOAD_PREFLIGHT_LIMITS.max_individual_file_bytes,
scan_timeout_seconds=ZIP_DOWNLOAD_PREFLIGHT_LIMITS.scan_timeout_seconds,
symlink_policy="not_allowed",
),
) )
def update_settings(self, request: SettingsUpdateRequest) -> SettingsResponse: def update_settings(self, request: SettingsUpdateRequest) -> SettingsResponse:
@@ -0,0 +1,14 @@
from __future__ import annotations
from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository
def reconcile_persisted_incomplete_tasks(
task_repository: TaskRepository,
history_repository: HistoryRepository,
) -> list[str]:
task_ids = task_repository.reconcile_incomplete_tasks()
if task_ids:
history_repository.reconcile_entries_failed(task_ids)
return task_ids
+43 -1
View File
@@ -2,12 +2,16 @@ from __future__ import annotations
from backend.app.api.errors import AppError from backend.app.api.errors import AppError
from backend.app.api.schemas import TaskDetailResponse, TaskListItem, TaskListResponse from backend.app.api.schemas import TaskDetailResponse, TaskListItem, TaskListResponse
from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository from backend.app.db.task_repository import TaskRepository
FILE_ACTION_CANCELLABLE_OPERATIONS = {"copy", "move", "duplicate", "delete"}
class TaskService: class TaskService:
def __init__(self, repository: TaskRepository): def __init__(self, repository: TaskRepository, history_repository: HistoryRepository | None = None):
self._repository = repository self._repository = repository
self._history_repository = history_repository
def create_task(self, operation: str, source: str, destination: str) -> TaskDetailResponse: def create_task(self, operation: str, source: str, destination: str) -> TaskDetailResponse:
task = self._repository.create_task(operation=operation, source=source, destination=destination) task = self._repository.create_task(operation=operation, source=source, destination=destination)
@@ -40,3 +44,41 @@ class TaskService:
for task in tasks for task in tasks
] ]
) )
def cancel_task(self, task_id: str) -> TaskDetailResponse:
task = self._repository.get_task(task_id)
if not task:
raise AppError(
code="task_not_found",
message="Task was not found",
status_code=404,
details={"task_id": task_id},
)
if task["operation"] not in FILE_ACTION_CANCELLABLE_OPERATIONS:
raise AppError(
code="task_not_cancellable",
message="Task cannot be cancelled",
status_code=409,
details={"task_id": task_id, "status": task["status"]},
)
if task["status"] not in {"queued", "running", "cancelling"}:
raise AppError(
code="task_not_cancellable",
message="Task cannot be cancelled",
status_code=409,
details={"task_id": task_id, "status": task["status"]},
)
updated = self._repository.request_cancellation(task_id)
if not updated:
raise AppError(
code="task_not_cancellable",
message="Task cannot be cancelled",
status_code=409,
details={"task_id": task_id, "status": task["status"]},
)
if updated["status"] == "cancelled" and self._history_repository:
self._history_repository.update_entry(entry_id=task_id, status="cancelled")
return TaskDetailResponse(**updated)
File diff suppressed because it is too large Load Diff
Binary file not shown.
@@ -1,6 +1,7 @@
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import os
import sys import sys
import tempfile import tempfile
import unittest import unittest
@@ -11,11 +12,43 @@ import httpx
sys.path.insert(0, str(Path(__file__).resolve().parents[3])) sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from backend.app.api.errors import AppError
from backend.app.dependencies import get_browse_service from backend.app.dependencies import get_browse_service
from backend.app.db.remote_client_repository import RemoteClientRepository
from backend.app.fs.filesystem_adapter import FilesystemAdapter from backend.app.fs.filesystem_adapter import FilesystemAdapter
from backend.app.main import app from backend.app.main import app
from backend.app.security.path_guard import PathGuard from backend.app.security.path_guard import PathGuard
from backend.app.services.browse_service import BrowseService from backend.app.services.browse_service import BrowseService
from backend.app.services.remote_browse_service import RemoteBrowseService
from backend.app.services.remote_client_service import RemoteClientService
class _StubRemoteBrowseService(RemoteBrowseService):
def __init__(
self,
remote_client_service: RemoteClientService,
listings: dict[tuple[str, str, str], dict],
failing_client_ids: set[str],
):
super().__init__(
remote_client_service=remote_client_service,
agent_auth_header="Authorization",
agent_auth_scheme="Bearer",
agent_auth_token="agent-secret",
agent_timeout_seconds=0.25,
)
self._listings = listings
self._failing_client_ids = failing_client_ids
def _fetch_remote_listing(self, *, client, share_key: str, relative_path: str, show_hidden: bool) -> dict:
if client.client_id in self._failing_client_ids:
raise AppError(
code="remote_client_unreachable",
message=f"Remote client '{client.display_name}' is unreachable",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
)
return self._listings[(client.client_id, share_key, relative_path)]
class BrowseApiGoldenTest(unittest.TestCase): class BrowseApiGoldenTest(unittest.TestCase):
@@ -36,6 +69,12 @@ class BrowseApiGoldenTest(unittest.TestCase):
file_path.write_bytes(b"abc") file_path.write_bytes(b"abc")
second_file = self.second_root / "archive.txt" second_file = self.second_root / "archive.txt"
second_file.write_text("z", encoding="utf-8") second_file.write_text("z", encoding="utf-8")
remote_root = Path(self.temp_dir.name) / "remote-downloads"
remote_root.mkdir(parents=True, exist_ok=True)
remote_dir = remote_root / "Series"
remote_dir.mkdir()
remote_file = remote_root / "episode.mkv"
remote_file.write_bytes(b"remote")
hidden_dir = self.root / ".hidden_dir" hidden_dir = self.root / ".hidden_dir"
hidden_dir.mkdir() hidden_dir.mkdir()
@@ -43,15 +82,70 @@ class BrowseApiGoldenTest(unittest.TestCase):
hidden_file.write_bytes(b"x") hidden_file.write_bytes(b"x")
mtime = 1710000000 mtime = 1710000000
for path in [folder, file_path, hidden_dir, hidden_file, second_file]: for path in [folder, file_path, hidden_dir, hidden_file, second_file, remote_dir, remote_file]:
Path(path).touch() Path(path).touch()
Path(path).chmod(0o755) Path(path).chmod(0o755)
import os
os.utime(path, (mtime, mtime)) os.utime(path, (mtime, mtime))
repository = RemoteClientRepository(str(Path(self.temp_dir.name) / "remote-clients.db"))
now_iso = "2026-03-26T12:00:00Z"
repository.upsert_client(
client_id="client-123",
display_name="Jan MacBook",
platform="macos",
agent_version="1.1.0",
endpoint="http://agent.test",
shares=[{"key": "downloads", "label": "Downloads"}],
now_iso=now_iso,
)
repository.upsert_client(
client_id="broken-client",
display_name="Offline iMac",
platform="macos",
agent_version="1.1.0",
endpoint="http://127.0.0.1:1",
shares=[{"key": "downloads", "label": "Downloads"}],
now_iso=now_iso,
)
service = BrowseService( service = BrowseService(
path_guard=PathGuard({"storage1": str(self.root), "storage2": str(self.second_root)}), path_guard=PathGuard({"storage1": str(self.root), "storage2": str(self.second_root)}),
filesystem=FilesystemAdapter(), filesystem=FilesystemAdapter(),
remote_browse_service=_StubRemoteBrowseService(
remote_client_service=RemoteClientService(
repository=repository,
registration_token="secret-token",
offline_timeout_seconds=60,
now=lambda: datetime(2026, 3, 26, 12, 0, 0, tzinfo=timezone.utc),
),
listings={
(
"client-123",
"downloads",
"",
): {
"entries": [
{
"name": "Series",
"kind": "directory",
"size": remote_dir.stat().st_size,
"modified": datetime.fromtimestamp(remote_dir.stat().st_mtime, tz=timezone.utc)
.isoformat()
.replace("+00:00", "Z"),
},
{
"name": "episode.mkv",
"kind": "file",
"size": remote_file.stat().st_size,
"modified": datetime.fromtimestamp(remote_file.stat().st_mtime, tz=timezone.utc)
.isoformat()
.replace("+00:00", "Z"),
},
]
}
},
failing_client_ids={"broken-client"},
),
) )
async def _override_browse_service() -> BrowseService: async def _override_browse_service() -> BrowseService:
return service return service
@@ -151,6 +245,80 @@ class BrowseApiGoldenTest(unittest.TestCase):
}, },
) )
def test_browse_virtual_clients_and_remote_share(self) -> None:
clients_response = self._get("/Clients")
self.assertEqual(clients_response.status_code, 200)
self.assertEqual(
clients_response.json(),
{
"path": "/Clients",
"directories": [
{
"name": "Jan MacBook",
"path": "/Clients/client-123",
"modified": "2026-03-26T12:00:00Z",
},
{
"name": "Offline iMac",
"path": "/Clients/broken-client",
"modified": "2026-03-26T12:00:00Z",
},
],
"files": [],
},
)
shares_response = self._get("/Clients/client-123")
self.assertEqual(shares_response.status_code, 200)
self.assertEqual(
shares_response.json(),
{
"path": "/Clients/client-123",
"directories": [
{
"name": "Downloads",
"path": "/Clients/client-123/downloads",
"modified": "2026-03-26T12:00:00Z",
}
],
"files": [],
},
)
browse_response = self._get("/Clients/client-123/downloads")
self.assertEqual(browse_response.status_code, 200)
modified = datetime.fromtimestamp(1710000000, tz=timezone.utc).isoformat().replace("+00:00", "Z")
self.assertEqual(
browse_response.json(),
{
"path": "/Clients/client-123/downloads",
"directories": [
{
"name": "Series",
"path": "/Clients/client-123/downloads/Series",
"modified": modified,
}
],
"files": [
{
"name": "episode.mkv",
"path": "/Clients/client-123/downloads/episode.mkv",
"size": 6,
"modified": modified,
}
],
},
)
def test_remote_client_failure_stays_local_to_remote_subtree(self) -> None:
broken_response = self._get("/Clients/broken-client/downloads")
self.assertEqual(broken_response.status_code, 502)
self.assertEqual(broken_response.json()["error"]["code"], "remote_client_unreachable")
volumes_response = self._get("/Volumes")
self.assertEqual(volumes_response.status_code, 200)
self.assertEqual(volumes_response.json()["path"], "/Volumes")
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
@@ -0,0 +1,139 @@
from __future__ import annotations
import asyncio
import sys
import tempfile
import unittest
from datetime import datetime, timedelta, timezone
from pathlib import Path
import httpx
sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from backend.app.dependencies import get_remote_client_service
from backend.app.db.remote_client_repository import RemoteClientRepository
from backend.app.main import app
from backend.app.services.remote_client_service import RemoteClientService
class _Clock:
def __init__(self, current: datetime):
self.current = current
def now(self) -> datetime:
return self.current
def advance(self, *, seconds: int) -> None:
self.current += timedelta(seconds=seconds)
class RemoteClientsApiGoldenTest(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory()
self.clock = _Clock(datetime(2026, 3, 26, 12, 0, 0, tzinfo=timezone.utc))
repository = RemoteClientRepository(str(Path(self.temp_dir.name) / "remote-clients.db"))
service = RemoteClientService(
repository=repository,
registration_token="secret-token",
offline_timeout_seconds=60,
now=self.clock.now,
)
async def _override_remote_client_service() -> RemoteClientService:
return service
app.dependency_overrides[get_remote_client_service] = _override_remote_client_service
def tearDown(self) -> None:
app.dependency_overrides.clear()
self.temp_dir.cleanup()
def _request(self, method: str, url: str, payload: dict | None = None, token: str | None = None) -> httpx.Response:
async def _run() -> httpx.Response:
transport = httpx.ASGITransport(app=app)
headers = {}
if token is not None:
headers["Authorization"] = f"Bearer {token}"
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
if method == "GET":
return await client.get(url, headers=headers)
return await client.post(url, json=payload, headers=headers)
return asyncio.run(_run())
@staticmethod
def _register_payload() -> dict:
return {
"client_id": "client-123",
"display_name": "Jan MacBook",
"platform": "macos",
"agent_version": "1.1.0",
"endpoint": "http://192.168.1.25:8765",
"shares": [{"key": "downloads", "label": "Downloads"}],
}
def test_list_is_empty_by_default(self) -> None:
response = self._request("GET", "/api/clients")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"items": []})
def test_register_then_list_then_heartbeat_and_status_timeout(self) -> None:
register_response = self._request(
"POST",
"/api/clients/register",
self._register_payload(),
token="secret-token",
)
self.assertEqual(register_response.status_code, 200)
register_body = register_response.json()
self.assertEqual(register_body["client_id"], "client-123")
self.assertEqual(register_body["display_name"], "Jan MacBook")
self.assertEqual(register_body["status"], "online")
self.assertEqual(register_body["last_seen"], "2026-03-26T12:00:00Z")
self.assertIsNone(register_body["last_error"])
self.assertIsNone(register_body["reachable_at"])
list_response = self._request("GET", "/api/clients")
self.assertEqual(list_response.status_code, 200)
self.assertEqual(len(list_response.json()["items"]), 1)
self.assertEqual(list_response.json()["items"][0]["status"], "online")
self.clock.advance(seconds=30)
heartbeat_response = self._request(
"POST",
"/api/clients/heartbeat",
{"client_id": "client-123", "agent_version": "1.1.1"},
token="secret-token",
)
self.assertEqual(heartbeat_response.status_code, 200)
heartbeat_body = heartbeat_response.json()
self.assertEqual(heartbeat_body["agent_version"], "1.1.1")
self.assertEqual(heartbeat_body["last_seen"], "2026-03-26T12:00:30Z")
self.assertEqual(heartbeat_body["status"], "online")
self.clock.advance(seconds=61)
timed_out_list = self._request("GET", "/api/clients")
self.assertEqual(timed_out_list.status_code, 200)
timed_out_item = timed_out_list.json()["items"][0]
self.assertEqual(timed_out_item["status"], "offline")
self.assertEqual(timed_out_item["last_seen"], "2026-03-26T12:00:30Z")
self.assertIsNone(timed_out_item["last_error"])
self.assertIsNone(timed_out_item["reachable_at"])
def test_register_rejects_invalid_token(self) -> None:
response = self._request(
"POST",
"/api/clients/register",
self._register_payload(),
token="wrong-token",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.json()["error"]["code"], "forbidden")
if __name__ == "__main__":
unittest.main()
@@ -3,6 +3,7 @@ from __future__ import annotations
import asyncio import asyncio
import sys import sys
import tempfile import tempfile
import threading
import time import time
import unittest import unittest
from pathlib import Path from pathlib import Path
@@ -25,6 +26,21 @@ class FailingFilesystemAdapter(FilesystemAdapter):
def copy_file(self, source: str, destination: str, on_progress: callable | None = None) -> None: def copy_file(self, source: str, destination: str, on_progress: callable | None = None) -> None:
raise OSError("forced copy failure") raise OSError("forced copy failure")
def copy_directory(self, source: str, destination: str) -> None:
raise OSError("forced copy failure")
class BlockingCopyFilesystemAdapter(FilesystemAdapter):
def __init__(self) -> None:
super().__init__()
self.entered = threading.Event()
self.release = threading.Event()
def copy_file(self, source: str, destination: str, on_progress: callable | None = None) -> None:
self.entered.set()
self.release.wait(timeout=2.0)
return super().copy_file(source=source, destination=destination, on_progress=on_progress)
class CopyApiGoldenTest(unittest.TestCase): class CopyApiGoldenTest(unittest.TestCase):
def setUp(self) -> None: def setUp(self) -> None:
@@ -69,11 +85,21 @@ class CopyApiGoldenTest(unittest.TestCase):
while time.time() < deadline: while time.time() < deadline:
response = self._request("GET", f"/api/tasks/{task_id}") response = self._request("GET", f"/api/tasks/{task_id}")
body = response.json() body = response.json()
if body["status"] in {"completed", "failed"}: if body["status"] in {"completed", "failed", "cancelled"}:
return body return body
time.sleep(0.02) time.sleep(0.02)
self.fail("task did not reach terminal state in time") self.fail("task did not reach terminal state in time")
def _wait_for_status(self, task_id: str, statuses: set[str], timeout_s: float = 2.0) -> dict:
deadline = time.time() + timeout_s
while time.time() < deadline:
response = self._request("GET", f"/api/tasks/{task_id}")
body = response.json()
if body["status"] in statuses:
return body
time.sleep(0.02)
self.fail(f"task did not reach one of {sorted(statuses)} in time")
def test_copy_success_create_task_shape(self) -> None: def test_copy_success_create_task_shape(self) -> None:
src = self.root / "source.txt" src = self.root / "source.txt"
src.write_text("hello", encoding="utf-8") src.write_text("hello", encoding="utf-8")
@@ -91,11 +117,137 @@ class CopyApiGoldenTest(unittest.TestCase):
detail = self._wait_task(body["task_id"]) detail = self._wait_task(body["task_id"])
self.assertEqual(detail["status"], "completed") self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 1)
self.assertEqual(detail["total_bytes"], 5) self.assertEqual(detail["total_bytes"], 5)
self.assertEqual(detail["done_bytes"], 5) self.assertEqual(detail["done_bytes"], 5)
self.assertTrue((self.root / "copy.txt").exists()) self.assertTrue((self.root / "copy.txt").exists())
self.assertEqual((self.root / "copy.txt").read_text(encoding="utf-8"), "hello") self.assertEqual((self.root / "copy.txt").read_text(encoding="utf-8"), "hello")
def test_copy_batch_multi_file_success(self) -> None:
(self.root / "a.txt").write_text("A", encoding="utf-8")
(self.root / "b.txt").write_text("B", encoding="utf-8")
(self.root / "dest").mkdir()
response = self._request(
"POST",
"/api/files/copy",
{
"sources": ["storage1/a.txt", "storage1/b.txt"],
"destination_base": "storage1/dest",
},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertEqual((self.root / "dest" / "a.txt").read_text(encoding="utf-8"), "A")
self.assertEqual((self.root / "dest" / "b.txt").read_text(encoding="utf-8"), "B")
def test_copy_single_directory_success(self) -> None:
src = self.root / "photos"
(src / "nested").mkdir(parents=True)
(src / "cover.jpg").write_text("img", encoding="utf-8")
(src / "nested" / "a.txt").write_text("nested", encoding="utf-8")
response = self._request(
"POST",
"/api/files/copy",
{"source": "storage1/photos", "destination": "storage1/photos-copy"},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertTrue((self.root / "photos-copy").is_dir())
self.assertEqual((self.root / "photos-copy" / "cover.jpg").read_text(encoding="utf-8"), "img")
self.assertEqual((self.root / "photos-copy" / "nested" / "a.txt").read_text(encoding="utf-8"), "nested")
def test_copy_batch_multi_directory_success(self) -> None:
(self.root / "dir1" / "sub").mkdir(parents=True)
(self.root / "dir2").mkdir()
(self.root / "dir1" / "sub" / "a.txt").write_text("A", encoding="utf-8")
(self.root / "dir2" / "b.txt").write_text("B", encoding="utf-8")
(self.root / "dest").mkdir()
response = self._request(
"POST",
"/api/files/copy",
{
"sources": ["storage1/dir1", "storage1/dir2"],
"destination_base": "storage1/dest",
},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertEqual((self.root / "dest" / "dir1" / "sub" / "a.txt").read_text(encoding="utf-8"), "A")
self.assertEqual((self.root / "dest" / "dir2" / "b.txt").read_text(encoding="utf-8"), "B")
def test_copy_batch_mixed_file_and_directory_success(self) -> None:
(self.root / "file.txt").write_text("F", encoding="utf-8")
(self.root / "docs" / "nested").mkdir(parents=True)
(self.root / "docs" / "nested" / "note.txt").write_text("N", encoding="utf-8")
(self.root / "dest").mkdir()
response = self._request(
"POST",
"/api/files/copy",
{
"sources": ["storage1/file.txt", "storage1/docs"],
"destination_base": "storage1/dest",
},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertEqual((self.root / "dest" / "file.txt").read_text(encoding="utf-8"), "F")
self.assertEqual((self.root / "dest" / "docs" / "nested" / "note.txt").read_text(encoding="utf-8"), "N")
def test_copy_batch_cancelled_after_current_file_finishes(self) -> None:
blocking_fs = BlockingCopyFilesystemAdapter()
path_guard = PathGuard({"storage1": str(self.root), "storage2": str(self.root)})
self._set_services(path_guard=path_guard, filesystem=blocking_fs)
(self.root / "a.txt").write_text("A", encoding="utf-8")
(self.root / "b.txt").write_text("B", encoding="utf-8")
(self.root / "dest").mkdir()
response = self._request(
"POST",
"/api/files/copy",
{
"sources": ["storage1/a.txt", "storage1/b.txt"],
"destination_base": "storage1/dest",
},
)
task_id = response.json()["task_id"]
self.assertTrue(blocking_fs.entered.wait(timeout=2.0))
running = self._wait_for_status(task_id, {"running"})
self.assertEqual(running["current_item"], "a.txt")
cancel_response = self._request("POST", f"/api/tasks/{task_id}/cancel")
self.assertEqual(cancel_response.status_code, 200)
self.assertEqual(cancel_response.json()["status"], "cancelling")
blocking_fs.release.set()
detail = self._wait_task(task_id)
self.assertEqual(detail["status"], "cancelled")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 2)
self.assertTrue((self.root / "dest" / "a.txt").exists())
self.assertFalse((self.root / "dest" / "b.txt").exists())
def test_copy_source_not_found(self) -> None: def test_copy_source_not_found(self) -> None:
response = self._request( response = self._request(
"POST", "POST",
@@ -115,18 +267,6 @@ class CopyApiGoldenTest(unittest.TestCase):
}, },
) )
def test_copy_source_is_directory_type_conflict(self) -> None:
(self.root / "dir").mkdir()
response = self._request(
"POST",
"/api/files/copy",
{"source": "storage1/dir", "destination": "storage1/out.txt"},
)
self.assertEqual(response.status_code, 409)
self.assertEqual(response.json()["error"]["code"], "type_conflict")
def test_copy_destination_exists_already_exists(self) -> None: def test_copy_destination_exists_already_exists(self) -> None:
(self.root / "source.txt").write_text("x", encoding="utf-8") (self.root / "source.txt").write_text("x", encoding="utf-8")
(self.root / "exists.txt").write_text("y", encoding="utf-8") (self.root / "exists.txt").write_text("y", encoding="utf-8")
@@ -149,6 +289,38 @@ class CopyApiGoldenTest(unittest.TestCase):
}, },
) )
def test_copy_directory_destination_exists_already_exists(self) -> None:
(self.root / "src").mkdir()
(self.root / "src" / "a.txt").write_text("x", encoding="utf-8")
(self.root / "exists").mkdir()
response = self._request(
"POST",
"/api/files/copy",
{"source": "storage1/src", "destination": "storage1/exists"},
)
self.assertEqual(response.status_code, 409)
self.assertEqual(response.json()["error"]["code"], "already_exists")
def test_copy_batch_destination_exists_already_exists(self) -> None:
(self.root / "a.txt").write_text("A", encoding="utf-8")
(self.root / "dest").mkdir()
(self.root / "dest" / "a.txt").write_text("exists", encoding="utf-8")
(self.root / "b.txt").write_text("B", encoding="utf-8")
response = self._request(
"POST",
"/api/files/copy",
{
"sources": ["storage1/a.txt", "storage1/b.txt"],
"destination_base": "storage1/dest",
},
)
self.assertEqual(response.status_code, 409)
self.assertEqual(response.json()["error"]["code"], "already_exists")
def test_copy_traversal_source(self) -> None: def test_copy_traversal_source(self) -> None:
response = self._request( response = self._request(
"POST", "POST",
@@ -171,6 +343,31 @@ class CopyApiGoldenTest(unittest.TestCase):
self.assertEqual(response.status_code, 403) self.assertEqual(response.status_code, 403)
self.assertEqual(response.json()["error"]["code"], "path_traversal_detected") self.assertEqual(response.json()["error"]["code"], "path_traversal_detected")
def test_copy_invalid_root_alias(self) -> None:
(self.root / "source.txt").write_text("x", encoding="utf-8")
response = self._request(
"POST",
"/api/files/copy",
{"source": "storage1/source.txt", "destination": "unknown/out.txt"},
)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.json()["error"]["code"], "invalid_root_alias")
def test_copy_destination_inside_directory_source_blocked(self) -> None:
(self.root / "src").mkdir()
(self.root / "src" / "a.txt").write_text("x", encoding="utf-8")
response = self._request(
"POST",
"/api/files/copy",
{"source": "storage1/src", "destination": "storage1/src/child"},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["error"]["code"], "invalid_request")
def test_copy_source_symlink_rejected(self) -> None: def test_copy_source_symlink_rejected(self) -> None:
target = self.root / "real.txt" target = self.root / "real.txt"
target.write_text("x", encoding="utf-8") target.write_text("x", encoding="utf-8")
@@ -0,0 +1,344 @@
from __future__ import annotations
import asyncio
import sys
import tempfile
import threading
import time
import unittest
import zipfile
from io import BytesIO
from pathlib import Path
import httpx
sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from backend.app.dependencies import get_archive_download_task_service, get_file_ops_service, get_task_service
from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository
from backend.app.fs.filesystem_adapter import FilesystemAdapter
from backend.app.main import app
from backend.app.security.path_guard import PathGuard
from backend.app.services.archive_download_task_service import ArchiveDownloadTaskService
from backend.app.services.file_ops_service import FileOpsService, ZipDownloadPreflightLimits
from backend.app.services.task_service import TaskService
from backend.app.tasks_runner import TaskRunner
class BlockingArchiveFileOpsService(FileOpsService):
def __init__(self, *args, gate: threading.Event, **kwargs):
super().__init__(*args, **kwargs)
self._gate = gate
def _run_zip_download_preflight(self, resolved_targets: list) -> None:
super()._run_zip_download_preflight(resolved_targets)
self._gate.wait(timeout=2.0)
class FailingArchiveFileOpsService(FileOpsService):
def _write_download_target_to_zip(self, archive: zipfile.ZipFile, resolved_target, on_each_item=None) -> None:
archive.writestr("partial.txt", b"partial")
raise OSError("forced archive failure")
class BlockingArchiveBuildFileOpsService(FileOpsService):
def __init__(self, *args, entered: threading.Event, release: threading.Event, **kwargs):
super().__init__(*args, **kwargs)
self._entered = entered
self._release = release
def _write_download_target_to_zip(self, archive: zipfile.ZipFile, resolved_target, on_each_item=None) -> None:
archive.writestr("partial.txt", b"partial")
self._entered.set()
self._release.wait(timeout=2.0)
if on_each_item:
on_each_item()
super()._write_download_target_to_zip(archive, resolved_target, on_each_item=on_each_item)
class DownloadApiGoldenTest(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory()
self.root = Path(self.temp_dir.name) / "root"
self.root.mkdir(parents=True, exist_ok=True)
self.db_path = str(Path(self.temp_dir.name) / "tasks.db")
self.artifact_root = Path(self.temp_dir.name) / "archive_tmp"
self.path_guard = PathGuard({"storage1": str(self.root), "storage2": str(self.root)})
self.filesystem = FilesystemAdapter()
self.task_repo = TaskRepository(self.db_path)
self.history_repo = HistoryRepository(self.db_path)
self._override_services()
def tearDown(self) -> None:
app.dependency_overrides.clear()
self.temp_dir.cleanup()
def _override_services(
self,
*,
file_ops_service: FileOpsService | None = None,
artifact_ttl_seconds: int = 1800,
) -> None:
file_ops_service = file_ops_service or FileOpsService(
path_guard=self.path_guard,
filesystem=self.filesystem,
history_repository=self.history_repo,
zip_download_preflight_limits=ZipDownloadPreflightLimits(),
)
runner = TaskRunner(repository=self.task_repo, filesystem=self.filesystem, history_repository=self.history_repo)
archive_service = ArchiveDownloadTaskService(
path_guard=self.path_guard,
repository=self.task_repo,
runner=runner,
history_repository=self.history_repo,
file_ops_service=file_ops_service,
artifact_root=self.artifact_root,
artifact_ttl_seconds=artifact_ttl_seconds,
)
task_service = TaskService(repository=self.task_repo)
async def _override_file_ops_service() -> FileOpsService:
return file_ops_service
async def _override_archive_service() -> ArchiveDownloadTaskService:
return archive_service
async def _override_task_service() -> TaskService:
return task_service
app.dependency_overrides[get_file_ops_service] = _override_file_ops_service
app.dependency_overrides[get_archive_download_task_service] = _override_archive_service
app.dependency_overrides[get_task_service] = _override_task_service
def _request(self, method: str, url: str, payload: dict | None = None) -> httpx.Response:
async def _run() -> httpx.Response:
transport = httpx.ASGITransport(app=app)
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
if method == "GET":
return await client.get(url)
return await client.post(url, json=payload)
return asyncio.run(_run())
def _wait_for_task_status(self, task_id: str, statuses: set[str], timeout_s: float = 2.0) -> dict:
deadline = time.time() + timeout_s
while time.time() < deadline:
response = self._request("GET", f"/api/tasks/{task_id}")
body = response.json()
if body["status"] in statuses:
return body
time.sleep(0.02)
self.fail("task did not reach expected status in time")
def test_download_success_for_allowed_file(self) -> None:
src = self.root / "report.txt"
src.write_text("hello download", encoding="utf-8")
response = self._request("GET", "/api/files/download?path=storage1/report.txt")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"hello download")
self.assertIn('attachment; filename="report.txt"', response.headers.get("content-disposition", ""))
self.assertEqual(response.headers.get("content-type"), "text/plain; charset=utf-8")
def test_archive_prepare_single_directory_ends_ready(self) -> None:
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
self.assertEqual(created.status_code, 202)
task = self._wait_for_task_status(created.json()["task_id"], {"ready"})
self.assertEqual(task["operation"], "download")
self.assertEqual(task["status"], "ready")
self.assertEqual(task["destination"], "docs.zip")
def test_archive_prepare_multi_mixed_selection_ends_ready(self) -> None:
(self.root / "readme.txt").write_text("R", encoding="utf-8")
(self.root / "photos").mkdir()
(self.root / "photos" / "img.txt").write_text("P", encoding="utf-8")
created = self._request(
"POST",
"/api/files/download/archive-prepare",
{"paths": ["storage1/readme.txt", "storage1/photos"]},
)
self.assertEqual(created.status_code, 202)
task = self._wait_for_task_status(created.json()["task_id"], {"ready"})
self.assertEqual(task["status"], "ready")
self.assertEqual(task["source"], "storage1/readme.txt, storage1/photos")
self.assertRegex(task["destination"], r'^kodidownload-\d{8}-\d{6}\.zip$')
def test_archive_retrieval_from_ready_task_works(self) -> None:
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
task = self._wait_for_task_status(created.json()["task_id"], {"ready"})
response = self._request("GET", f"/api/files/download/archive/{task['id']}")
self.assertEqual(response.status_code, 200)
self.assertIn('attachment; filename="docs.zip"', response.headers.get("content-disposition", ""))
with zipfile.ZipFile(BytesIO(response.content)) as archive:
self.assertIn("docs/", archive.namelist())
self.assertIn("docs/a.txt", archive.namelist())
self.assertEqual(archive.read("docs/a.txt"), b"a")
def test_archive_retrieval_before_ready_rejected(self) -> None:
gate = threading.Event()
file_ops_service = BlockingArchiveFileOpsService(
path_guard=self.path_guard,
filesystem=self.filesystem,
history_repository=self.history_repo,
zip_download_preflight_limits=ZipDownloadPreflightLimits(),
gate=gate,
)
self._override_services(file_ops_service=file_ops_service)
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
task = self._wait_for_task_status(created.json()["task_id"], {"requested", "preparing"})
response = self._request("GET", f"/api/files/download/archive/{task['id']}")
gate.set()
self.assertEqual(response.status_code, 409)
self.assertEqual(response.json()["error"]["code"], "download_not_ready")
def test_archive_preflight_failure_sets_failed_and_error_code(self) -> None:
target = self.root / "real.txt"
target.write_text("x", encoding="utf-8")
(self.root / "docs").mkdir()
(self.root / "docs" / "link.txt").symlink_to(target)
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
task = self._wait_for_task_status(created.json()["task_id"], {"failed"})
self.assertEqual(task["status"], "failed")
self.assertEqual(task["error_code"], "download_preflight_failed")
def test_archive_failure_removes_partial_artifact(self) -> None:
file_ops_service = FailingArchiveFileOpsService(
path_guard=self.path_guard,
filesystem=self.filesystem,
history_repository=self.history_repo,
zip_download_preflight_limits=ZipDownloadPreflightLimits(),
)
self._override_services(file_ops_service=file_ops_service)
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
task = self._wait_for_task_status(created.json()["task_id"], {"failed"})
self.assertEqual(task["error_code"], "io_error")
self.assertEqual(list(self.artifact_root.glob("*")), [])
def test_archive_cancel_during_preparing_sets_cancelled_and_removes_partial_artifact(self) -> None:
entered = threading.Event()
release = threading.Event()
file_ops_service = BlockingArchiveBuildFileOpsService(
path_guard=self.path_guard,
filesystem=self.filesystem,
history_repository=self.history_repo,
zip_download_preflight_limits=ZipDownloadPreflightLimits(),
entered=entered,
release=release,
)
self._override_services(file_ops_service=file_ops_service)
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
self.assertEqual(created.status_code, 202)
self.assertTrue(entered.wait(timeout=2.0))
response = self._request("POST", f"/api/files/download/archive/{created.json()['task_id']}/cancel")
release.set()
task = self._wait_for_task_status(created.json()["task_id"], {"cancelled"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["status"], "cancelled")
self.assertEqual(task["status"], "cancelled")
self.assertEqual(list(self.artifact_root.glob("*")), [])
def test_archive_retrieval_for_cancelled_task_rejected(self) -> None:
entered = threading.Event()
release = threading.Event()
file_ops_service = BlockingArchiveBuildFileOpsService(
path_guard=self.path_guard,
filesystem=self.filesystem,
history_repository=self.history_repo,
zip_download_preflight_limits=ZipDownloadPreflightLimits(),
entered=entered,
release=release,
)
self._override_services(file_ops_service=file_ops_service)
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
self.assertTrue(entered.wait(timeout=2.0))
cancel_response = self._request("POST", f"/api/files/download/archive/{created.json()['task_id']}/cancel")
release.set()
response = self._request("GET", f"/api/files/download/archive/{created.json()['task_id']}")
self.assertEqual(cancel_response.status_code, 200)
self.assertEqual(response.status_code, 409)
self.assertEqual(response.json()["error"]["code"], "download_cancelled")
def test_archive_cancel_after_ready_rejected(self) -> None:
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
task = self._wait_for_task_status(created.json()["task_id"], {"ready"})
response = self._request("POST", f"/api/files/download/archive/{task['id']}/cancel")
self.assertEqual(response.status_code, 409)
self.assertEqual(response.json()["error"]["code"], "download_not_cancellable")
def test_expired_artifact_rejected_and_removed(self) -> None:
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
self._override_services(artifact_ttl_seconds=1)
created = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/docs"]})
task = self._wait_for_task_status(created.json()["task_id"], {"ready"})
artifact = self.task_repo.get_artifact(task["id"])
self.task_repo.upsert_artifact(
task_id=task["id"],
file_path=artifact["file_path"],
file_name=artifact["file_name"],
expires_at="2000-01-01T00:00:00Z",
)
response = self._request("GET", f"/api/files/download/archive/{task['id']}")
self.assertEqual(response.status_code, 410)
self.assertEqual(response.json()["error"]["code"], "archive_expired")
self.assertIsNone(self.task_repo.get_artifact(task["id"]))
self.assertFalse(Path(artifact["file_path"]).exists())
def test_archive_prepare_rejects_single_file(self) -> None:
(self.root / "report.txt").write_text("hello download", encoding="utf-8")
response = self._request("POST", "/api/files/download/archive-prepare", {"paths": ["storage1/report.txt"]})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["error"]["code"], "invalid_request")
def test_direct_archive_download_route_rejected(self) -> None:
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("a", encoding="utf-8")
response = self._request("GET", "/api/files/download?path=storage1/docs")
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["error"]["code"], "invalid_request")
if __name__ == "__main__":
unittest.main()
@@ -0,0 +1,255 @@
from __future__ import annotations
import asyncio
import sys
import tempfile
import threading
import time
import unittest
from pathlib import Path
import httpx
sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from backend.app.dependencies import get_duplicate_task_service, get_task_service
from backend.app.db.task_repository import TaskRepository
from backend.app.fs.filesystem_adapter import FilesystemAdapter
from backend.app.main import app
from backend.app.security.path_guard import PathGuard
from backend.app.services.duplicate_task_service import DuplicateTaskService
from backend.app.services.task_service import TaskService
from backend.app.tasks_runner import TaskRunner
class FailOnSecondCopyFilesystemAdapter(FilesystemAdapter):
def __init__(self) -> None:
super().__init__()
self._copy_calls = 0
def copy_file(self, source: str, destination: str, on_progress: callable | None = None) -> None:
self._copy_calls += 1
if self._copy_calls == 2:
raise OSError("forced duplicate failure")
super().copy_file(source=source, destination=destination, on_progress=on_progress)
class BlockingDuplicateFilesystemAdapter(FilesystemAdapter):
def __init__(self) -> None:
super().__init__()
self.entered = threading.Event()
self.release = threading.Event()
def copy_file(self, source: str, destination: str, on_progress: callable | None = None) -> None:
self.entered.set()
self.release.wait(timeout=2.0)
super().copy_file(source=source, destination=destination, on_progress=on_progress)
class DuplicateApiGoldenTest(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory()
self.root = Path(self.temp_dir.name) / "root"
self.root.mkdir(parents=True, exist_ok=True)
self.repo = TaskRepository(str(Path(self.temp_dir.name) / "tasks.db"))
path_guard = PathGuard({"storage1": str(self.root), "storage2": str(self.root)})
self._set_services(path_guard=path_guard, filesystem=FilesystemAdapter())
def tearDown(self) -> None:
app.dependency_overrides.clear()
self.temp_dir.cleanup()
def _set_services(self, path_guard: PathGuard, filesystem: FilesystemAdapter) -> None:
runner = TaskRunner(repository=self.repo, filesystem=filesystem)
duplicate_service = DuplicateTaskService(path_guard=path_guard, repository=self.repo, runner=runner)
task_service = TaskService(repository=self.repo)
async def _override_duplicate_service() -> DuplicateTaskService:
return duplicate_service
async def _override_task_service() -> TaskService:
return task_service
app.dependency_overrides[get_duplicate_task_service] = _override_duplicate_service
app.dependency_overrides[get_task_service] = _override_task_service
def _request(self, method: str, url: str, payload: dict | None = None) -> httpx.Response:
async def _run() -> httpx.Response:
transport = httpx.ASGITransport(app=app)
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
if method == "POST":
return await client.post(url, json=payload)
return await client.get(url)
return asyncio.run(_run())
def _wait_task(self, task_id: str, timeout_s: float = 2.0) -> dict:
deadline = time.time() + timeout_s
while time.time() < deadline:
response = self._request("GET", f"/api/tasks/{task_id}")
body = response.json()
if body["status"] in {"completed", "failed", "cancelled"}:
return body
time.sleep(0.02)
self.fail("task did not reach terminal state in time")
def _wait_for_status(self, task_id: str, statuses: set[str], timeout_s: float = 2.0) -> dict:
deadline = time.time() + timeout_s
while time.time() < deadline:
response = self._request("GET", f"/api/tasks/{task_id}")
body = response.json()
if body["status"] in statuses:
return body
time.sleep(0.02)
self.fail(f"task did not reach one of {sorted(statuses)} in time")
def test_duplicate_single_file_success(self) -> None:
(self.root / "note.txt").write_text("hello", encoding="utf-8")
response = self._request("POST", "/api/files/duplicate", {"paths": ["storage1/note.txt"]})
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["operation"], "duplicate")
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 1)
self.assertEqual(detail["source"], "storage1/note.txt")
self.assertEqual(detail["destination"], "storage1/note copy.txt")
self.assertEqual((self.root / "note copy.txt").read_text(encoding="utf-8"), "hello")
def test_duplicate_single_directory_success(self) -> None:
(self.root / "Folder" / "nested").mkdir(parents=True)
(self.root / "Folder" / "alpha.txt").write_text("A", encoding="utf-8")
(self.root / "Folder" / "nested" / "beta.txt").write_text("B", encoding="utf-8")
response = self._request("POST", "/api/files/duplicate", {"paths": ["storage1/Folder"]})
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertTrue((self.root / "Folder copy").is_dir())
self.assertEqual((self.root / "Folder copy" / "alpha.txt").read_text(encoding="utf-8"), "A")
self.assertEqual((self.root / "Folder copy" / "nested" / "beta.txt").read_text(encoding="utf-8"), "B")
def test_duplicate_multi_select_success(self) -> None:
(self.root / "a.txt").write_text("A", encoding="utf-8")
(self.root / "docs" / "nested").mkdir(parents=True)
(self.root / "docs" / "nested" / "b.txt").write_text("B", encoding="utf-8")
response = self._request(
"POST",
"/api/files/duplicate",
{"paths": ["storage1/a.txt", "storage1/docs"]},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertEqual(detail["source"], "2 items")
self.assertEqual(detail["destination"], "same directory")
self.assertEqual((self.root / "a copy.txt").read_text(encoding="utf-8"), "A")
self.assertEqual((self.root / "docs copy" / "nested" / "b.txt").read_text(encoding="utf-8"), "B")
def test_duplicate_multi_select_cancelled_after_current_item_finishes(self) -> None:
blocking_fs = BlockingDuplicateFilesystemAdapter()
path_guard = PathGuard({"storage1": str(self.root), "storage2": str(self.root)})
self._set_services(path_guard=path_guard, filesystem=blocking_fs)
(self.root / "a.txt").write_text("A", encoding="utf-8")
(self.root / "b.txt").write_text("B", encoding="utf-8")
response = self._request(
"POST",
"/api/files/duplicate",
{"paths": ["storage1/a.txt", "storage1/b.txt"]},
)
task_id = response.json()["task_id"]
self.assertTrue(blocking_fs.entered.wait(timeout=2.0))
running = self._wait_for_status(task_id, {"running"})
self.assertEqual(running["current_item"], "a.txt")
cancel_response = self._request("POST", f"/api/tasks/{task_id}/cancel")
self.assertEqual(cancel_response.status_code, 200)
self.assertEqual(cancel_response.json()["status"], "cancelling")
blocking_fs.release.set()
detail = self._wait_task(task_id)
self.assertEqual(detail["status"], "cancelled")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 2)
self.assertTrue((self.root / "a copy.txt").exists())
self.assertFalse((self.root / "b copy.txt").exists())
def test_duplicate_collision_resolution_for_files_and_directories(self) -> None:
(self.root / "report.txt").write_text("R", encoding="utf-8")
(self.root / "report copy.txt").write_text("existing", encoding="utf-8")
(self.root / "report copy 2.txt").write_text("existing", encoding="utf-8")
(self.root / "Album").mkdir()
(self.root / "Album copy").mkdir()
(self.root / "Album copy 2").mkdir()
file_response = self._request("POST", "/api/files/duplicate", {"paths": ["storage1/report.txt"]})
dir_response = self._request("POST", "/api/files/duplicate", {"paths": ["storage1/Album"]})
self.assertEqual(self._wait_task(file_response.json()["task_id"])["destination"], "storage1/report copy 3.txt")
self.assertEqual(self._wait_task(dir_response.json()["task_id"])["destination"], "storage1/Album copy 3")
self.assertTrue((self.root / "report copy 3.txt").exists())
self.assertTrue((self.root / "Album copy 3").is_dir())
def test_duplicate_skips_top_level_macos_sidecar(self) -> None:
(self.root / "._note.txt").write_text("sidecar", encoding="utf-8")
response = self._request("POST", "/api/files/duplicate", {"paths": ["storage1/._note.txt"]})
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 0)
self.assertEqual(detail["total_items"], 0)
self.assertFalse((self.root / "._note copy.txt").exists())
def test_duplicate_skips_macos_sidecars_inside_directory_tree(self) -> None:
(self.root / "docs" / "nested").mkdir(parents=True)
(self.root / "docs" / "keep.txt").write_text("keep", encoding="utf-8")
(self.root / "docs" / "._skip.txt").write_text("skip", encoding="utf-8")
(self.root / "docs" / "nested" / "._nested.txt").write_text("skip", encoding="utf-8")
(self.root / "docs" / "nested" / "real.txt").write_text("real", encoding="utf-8")
response = self._request("POST", "/api/files/duplicate", {"paths": ["storage1/docs"]})
self.assertEqual(response.status_code, 202)
self._wait_task(response.json()["task_id"])
self.assertTrue((self.root / "docs copy" / "keep.txt").exists())
self.assertTrue((self.root / "docs copy" / "nested" / "real.txt").exists())
self.assertFalse((self.root / "docs copy" / "._skip.txt").exists())
self.assertFalse((self.root / "docs copy" / "nested" / "._nested.txt").exists())
def test_duplicate_failure_removes_partial_directory_artifact(self) -> None:
(self.root / "docs").mkdir()
(self.root / "docs" / "a.txt").write_text("A", encoding="utf-8")
(self.root / "docs" / "b.txt").write_text("B", encoding="utf-8")
path_guard = PathGuard({"storage1": str(self.root), "storage2": str(self.root)})
self._set_services(path_guard=path_guard, filesystem=FailOnSecondCopyFilesystemAdapter())
response = self._request("POST", "/api/files/duplicate", {"paths": ["storage1/docs"]})
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "failed")
self.assertEqual(detail["error_code"], "io_error")
self.assertFalse((self.root / "docs copy").exists())
def test_duplicate_path_safety_stays_intact(self) -> None:
response = self._request("POST", "/api/files/duplicate", {"paths": ["storage1/../escape.txt"]})
self.assertEqual(response.status_code, 403)
self.assertEqual(response.json()["error"]["code"], "path_traversal_detected")
if __name__ == "__main__":
unittest.main()
@@ -80,6 +80,19 @@ class EditApiGoldenTest(unittest.TestCase):
self.assertEqual(body["content_type"], "text/x-python") self.assertEqual(body["content_type"], "text/x-python")
self.assertEqual(body["content"], "print('hello')\n") self.assertEqual(body["content"], "print('hello')\n")
def test_edit_view_conf_success(self) -> None:
file_path = self.root / "app.conf"
file_path.write_text("enabled=true\n", encoding="utf-8")
response = self._request("GET", "/api/files/view", params={"path": "storage1/app.conf", "for_edit": "true"})
self.assertEqual(response.status_code, 200)
body = response.json()
self.assertEqual(body["path"], "storage1/app.conf")
self.assertEqual(body["name"], "app.conf")
self.assertEqual(body["content_type"], "text/plain")
self.assertEqual(body["content"], "enabled=true\n")
def test_save_success(self) -> None: def test_save_success(self) -> None:
file_path = self.root / "notes.txt" file_path = self.root / "notes.txt"
file_path.write_text("hello", encoding="utf-8") file_path.write_text("hello", encoding="utf-8")
@@ -3,6 +3,8 @@ from __future__ import annotations
import asyncio import asyncio
import sys import sys
import tempfile import tempfile
import threading
import time
import unittest import unittest
from pathlib import Path from pathlib import Path
@@ -10,11 +12,27 @@ import httpx
sys.path.insert(0, str(Path(__file__).resolve().parents[3])) sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from backend.app.dependencies import get_file_ops_service from backend.app.dependencies import get_delete_task_service, get_file_ops_service, get_task_service
from backend.app.db.task_repository import TaskRepository
from backend.app.fs.filesystem_adapter import FilesystemAdapter from backend.app.fs.filesystem_adapter import FilesystemAdapter
from backend.app.main import app from backend.app.main import app
from backend.app.security.path_guard import PathGuard from backend.app.security.path_guard import PathGuard
from backend.app.services.delete_task_service import DeleteTaskService
from backend.app.services.file_ops_service import FileOpsService from backend.app.services.file_ops_service import FileOpsService
from backend.app.services.task_service import TaskService
from backend.app.tasks_runner import TaskRunner
class BlockingDeleteFilesystemAdapter(FilesystemAdapter):
def __init__(self) -> None:
super().__init__()
self.entered = threading.Event()
self.release = threading.Event()
def delete_file(self, path: Path) -> None:
self.entered.set()
self.release.wait(timeout=2.0)
super().delete_file(path)
class FileOpsApiGoldenTest(unittest.TestCase): class FileOpsApiGoldenTest(unittest.TestCase):
@@ -22,21 +40,37 @@ class FileOpsApiGoldenTest(unittest.TestCase):
self.temp_dir = tempfile.TemporaryDirectory() self.temp_dir = tempfile.TemporaryDirectory()
self.root = Path(self.temp_dir.name) / "root" self.root = Path(self.temp_dir.name) / "root"
self.root.mkdir(parents=True, exist_ok=True) self.root.mkdir(parents=True, exist_ok=True)
self.repo = TaskRepository(str(Path(self.temp_dir.name) / "tasks.db"))
self.scope = self.root / "scope" self.scope = self.root / "scope"
self.scope.mkdir(parents=True, exist_ok=True) self.scope.mkdir(parents=True, exist_ok=True)
(self.scope / "old.txt").write_text("x", encoding="utf-8") (self.scope / "old.txt").write_text("x", encoding="utf-8")
(self.scope / "existing.txt").write_text("y", encoding="utf-8") (self.scope / "existing.txt").write_text("y", encoding="utf-8")
path_guard = PathGuard({"storage1": str(self.root)})
service = FileOpsService( service = FileOpsService(
path_guard=PathGuard({"storage1": str(self.root)}), path_guard=path_guard,
filesystem=FilesystemAdapter(), filesystem=FilesystemAdapter(),
) )
delete_service = DeleteTaskService(
path_guard=path_guard,
repository=self.repo,
runner=TaskRunner(repository=self.repo, filesystem=FilesystemAdapter()),
)
task_service = TaskService(repository=self.repo)
async def _override_file_ops_service() -> FileOpsService: async def _override_file_ops_service() -> FileOpsService:
return service return service
async def _override_delete_task_service() -> DeleteTaskService:
return delete_service
async def _override_task_service() -> TaskService:
return task_service
app.dependency_overrides[get_file_ops_service] = _override_file_ops_service app.dependency_overrides[get_file_ops_service] = _override_file_ops_service
app.dependency_overrides[get_delete_task_service] = _override_delete_task_service
app.dependency_overrides[get_task_service] = _override_task_service
def tearDown(self) -> None: def tearDown(self) -> None:
app.dependency_overrides.clear() app.dependency_overrides.clear()
@@ -50,6 +84,34 @@ class FileOpsApiGoldenTest(unittest.TestCase):
return asyncio.run(_run()) return asyncio.run(_run())
def _get(self, url: str) -> httpx.Response:
async def _run() -> httpx.Response:
transport = httpx.ASGITransport(app=app)
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
return await client.get(url)
return asyncio.run(_run())
def _wait_task(self, task_id: str, timeout_s: float = 2.0) -> dict:
deadline = time.time() + timeout_s
while time.time() < deadline:
response = self._get(f"/api/tasks/{task_id}")
body = response.json()
if body["status"] in {"completed", "failed", "cancelled"}:
return body
time.sleep(0.02)
self.fail("task did not reach terminal state in time")
def _wait_for_status(self, task_id: str, statuses: set[str], timeout_s: float = 2.0) -> dict:
deadline = time.time() + timeout_s
while time.time() < deadline:
response = self._get(f"/api/tasks/{task_id}")
body = response.json()
if body["status"] in statuses:
return body
time.sleep(0.02)
self.fail(f"task did not reach one of {sorted(statuses)} in time")
def test_mkdir_success(self) -> None: def test_mkdir_success(self) -> None:
response = self._post( response = self._post(
"/api/files/mkdir", "/api/files/mkdir",
@@ -225,8 +287,63 @@ class FileOpsApiGoldenTest(unittest.TestCase):
{"path": "storage1/scope/delete_me.txt"}, {"path": "storage1/scope/delete_me.txt"},
) )
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 202)
self.assertEqual(response.json(), {"path": "storage1/scope/delete_me.txt"}) body = response.json()
self.assertEqual(body["status"], "queued")
detail = self._wait_task(body["task_id"])
self.assertEqual(detail["operation"], "delete")
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 1)
self.assertIsNone(detail["current_item"])
self.assertFalse(target.exists())
def test_delete_file_cancelled_after_current_delete_finishes(self) -> None:
blocking_fs = BlockingDeleteFilesystemAdapter()
path_guard = PathGuard({"storage1": str(self.root)})
service = FileOpsService(path_guard=path_guard, filesystem=blocking_fs)
delete_service = DeleteTaskService(
path_guard=path_guard,
repository=self.repo,
runner=TaskRunner(repository=self.repo, filesystem=blocking_fs),
)
task_service = TaskService(repository=self.repo)
async def _override_file_ops_service() -> FileOpsService:
return service
async def _override_delete_task_service() -> DeleteTaskService:
return delete_service
async def _override_task_service() -> TaskService:
return task_service
app.dependency_overrides[get_file_ops_service] = _override_file_ops_service
app.dependency_overrides[get_delete_task_service] = _override_delete_task_service
app.dependency_overrides[get_task_service] = _override_task_service
target = self.scope / "delete_later.txt"
target.write_text("z", encoding="utf-8")
response = self._post(
"/api/files/delete",
{"path": "storage1/scope/delete_later.txt"},
)
task_id = response.json()["task_id"]
self.assertTrue(blocking_fs.entered.wait(timeout=2.0))
running = self._wait_for_status(task_id, {"running"})
self.assertEqual(running["current_item"], "delete_later.txt")
cancel_response = self._post(f"/api/tasks/{task_id}/cancel", {})
self.assertEqual(cancel_response.status_code, 200)
self.assertEqual(cancel_response.json()["status"], "cancelling")
blocking_fs.release.set()
detail = self._wait_task(task_id)
self.assertEqual(detail["status"], "cancelled")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 1)
self.assertFalse(target.exists()) self.assertFalse(target.exists())
def test_delete_empty_directory_success(self) -> None: def test_delete_empty_directory_success(self) -> None:
@@ -238,8 +355,15 @@ class FileOpsApiGoldenTest(unittest.TestCase):
{"path": "storage1/scope/empty_dir"}, {"path": "storage1/scope/empty_dir"},
) )
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 202)
self.assertEqual(response.json(), {"path": "storage1/scope/empty_dir"}) body = response.json()
self.assertEqual(body["status"], "queued")
detail = self._wait_task(body["task_id"])
self.assertEqual(detail["operation"], "delete")
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 0)
self.assertEqual(detail["total_items"], 0)
self.assertIsNone(detail["current_item"])
self.assertFalse(target.exists()) self.assertFalse(target.exists())
def test_delete_not_found(self) -> None: def test_delete_not_found(self) -> None:
@@ -300,6 +424,188 @@ class FileOpsApiGoldenTest(unittest.TestCase):
}, },
) )
def test_delete_non_empty_directory_recursive_success(self) -> None:
target = self.scope / "non_empty_recursive"
target.mkdir()
nested = target / "nested"
nested.mkdir()
(nested / "a.txt").write_text("a", encoding="utf-8")
(target / "b.txt").write_text("b", encoding="utf-8")
response = self._post(
"/api/files/delete",
{"path": "storage1/scope/non_empty_recursive", "recursive": True},
)
self.assertEqual(response.status_code, 202)
body = response.json()
self.assertEqual(body["status"], "queued")
detail = self._wait_task(body["task_id"])
self.assertEqual(detail["operation"], "delete")
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertIsNone(detail["current_item"])
self.assertFalse(target.exists())
def test_delete_non_empty_directory_recursive_cancelled_after_current_file_finishes(self) -> None:
blocking_fs = BlockingDeleteFilesystemAdapter()
path_guard = PathGuard({"storage1": str(self.root)})
service = FileOpsService(path_guard=path_guard, filesystem=blocking_fs)
delete_service = DeleteTaskService(
path_guard=path_guard,
repository=self.repo,
runner=TaskRunner(repository=self.repo, filesystem=blocking_fs),
)
task_service = TaskService(repository=self.repo)
async def _override_file_ops_service() -> FileOpsService:
return service
async def _override_delete_task_service() -> DeleteTaskService:
return delete_service
async def _override_task_service() -> TaskService:
return task_service
app.dependency_overrides[get_file_ops_service] = _override_file_ops_service
app.dependency_overrides[get_delete_task_service] = _override_delete_task_service
app.dependency_overrides[get_task_service] = _override_task_service
target = self.scope / "delete_recursive_later"
target.mkdir()
nested = target / "nested"
nested.mkdir()
(target / "a.txt").write_text("a", encoding="utf-8")
(nested / "b.txt").write_text("b", encoding="utf-8")
response = self._post(
"/api/files/delete",
{"path": "storage1/scope/delete_recursive_later", "recursive": True},
)
task_id = response.json()["task_id"]
self.assertTrue(blocking_fs.entered.wait(timeout=2.0))
running = self._wait_for_status(task_id, {"running"})
self.assertEqual(running["current_item"], "a.txt")
self.assertEqual(running["done_items"], 0)
self.assertEqual(running["total_items"], 2)
cancel_response = self._post(f"/api/tasks/{task_id}/cancel", {})
self.assertEqual(cancel_response.status_code, 200)
self.assertEqual(cancel_response.json()["status"], "cancelling")
blocking_fs.release.set()
detail = self._wait_task(task_id)
self.assertEqual(detail["status"], "cancelled")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 2)
self.assertFalse(target.joinpath("a.txt").exists())
self.assertTrue(target.joinpath("nested", "b.txt").exists())
self.assertTrue(target.exists())
def test_delete_batch_multi_select_starts_one_task_and_completes(self) -> None:
first = self.scope / "batch-a.txt"
second_dir = self.scope / "batch-dir"
second_nested = second_dir / "nested.txt"
first.write_text("a", encoding="utf-8")
second_dir.mkdir()
second_nested.write_text("b", encoding="utf-8")
response = self._post(
"/api/files/delete",
{
"paths": ["storage1/scope/batch-a.txt", "storage1/scope/batch-dir"],
"recursive_paths": ["storage1/scope/batch-dir"],
},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["operation"], "delete")
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["source"], "2 items")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertFalse(first.exists())
self.assertFalse(second_dir.exists())
def test_delete_batch_cancelled_after_current_delete_finishes(self) -> None:
blocking_fs = BlockingDeleteFilesystemAdapter()
path_guard = PathGuard({"storage1": str(self.root)})
service = FileOpsService(path_guard=path_guard, filesystem=blocking_fs)
delete_service = DeleteTaskService(
path_guard=path_guard,
repository=self.repo,
runner=TaskRunner(repository=self.repo, filesystem=blocking_fs),
)
task_service = TaskService(repository=self.repo)
async def _override_file_ops_service() -> FileOpsService:
return service
async def _override_delete_task_service() -> DeleteTaskService:
return delete_service
async def _override_task_service() -> TaskService:
return task_service
app.dependency_overrides[get_file_ops_service] = _override_file_ops_service
app.dependency_overrides[get_delete_task_service] = _override_delete_task_service
app.dependency_overrides[get_task_service] = _override_task_service
first = self.scope / "cancel-a.txt"
second = self.scope / "cancel-b.txt"
first.write_text("a", encoding="utf-8")
second.write_text("b", encoding="utf-8")
response = self._post(
"/api/files/delete",
{
"paths": ["storage1/scope/cancel-a.txt", "storage1/scope/cancel-b.txt"],
},
)
task_id = response.json()["task_id"]
self.assertTrue(blocking_fs.entered.wait(timeout=2.0))
running = self._wait_for_status(task_id, {"running"})
self.assertEqual(running["done_items"], 0)
self.assertEqual(running["total_items"], 2)
cancel_response = self._post(f"/api/tasks/{task_id}/cancel", {})
self.assertEqual(cancel_response.status_code, 200)
self.assertEqual(cancel_response.json()["status"], "cancelling")
blocking_fs.release.set()
detail = self._wait_task(task_id)
self.assertEqual(detail["status"], "cancelled")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 2)
self.assertFalse(first.exists())
self.assertTrue(second.exists())
def test_delete_batch_directory_only_empty_dirs_remains_honestly_coarse(self) -> None:
first = self.scope / "empty-a"
second = self.scope / "empty-b"
first.mkdir()
second.mkdir()
response = self._post(
"/api/files/delete",
{
"paths": ["storage1/scope/empty-a", "storage1/scope/empty-b"],
},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 0)
self.assertEqual(detail["total_items"], 0)
self.assertIsNone(detail["current_item"])
self.assertFalse(first.exists())
self.assertFalse(second.exists())
def test_delete_invalid_path(self) -> None: def test_delete_invalid_path(self) -> None:
response = self._post( response = self._post(
"/api/files/delete", "/api/files/delete",
@@ -3,6 +3,7 @@ from __future__ import annotations
import asyncio import asyncio
import sys import sys
import tempfile import tempfile
import threading
import time import time
import unittest import unittest
from pathlib import Path from pathlib import Path
@@ -11,13 +12,16 @@ import httpx
sys.path.insert(0, str(Path(__file__).resolve().parents[3])) sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from backend.app.dependencies import get_copy_task_service, get_file_ops_service, get_history_service, get_move_task_service, get_task_service from backend.app.dependencies import get_archive_download_task_service, get_copy_task_service, get_delete_task_service, get_duplicate_task_service, get_file_ops_service, get_history_service, get_move_task_service, get_task_service
from backend.app.db.history_repository import HistoryRepository from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository from backend.app.db.task_repository import TaskRepository
from backend.app.fs.filesystem_adapter import FilesystemAdapter from backend.app.fs.filesystem_adapter import FilesystemAdapter
from backend.app.main import app from backend.app.main import app
from backend.app.security.path_guard import PathGuard from backend.app.security.path_guard import PathGuard
from backend.app.services.archive_download_task_service import ArchiveDownloadTaskService
from backend.app.services.copy_task_service import CopyTaskService from backend.app.services.copy_task_service import CopyTaskService
from backend.app.services.delete_task_service import DeleteTaskService
from backend.app.services.duplicate_task_service import DuplicateTaskService
from backend.app.services.file_ops_service import FileOpsService from backend.app.services.file_ops_service import FileOpsService
from backend.app.services.history_service import HistoryService from backend.app.services.history_service import HistoryService
from backend.app.services.move_task_service import MoveTaskService from backend.app.services.move_task_service import MoveTaskService
@@ -30,6 +34,33 @@ class FailingCopyFilesystemAdapter(FilesystemAdapter):
raise OSError('forced copy failure') raise OSError('forced copy failure')
class BlockingArchiveBuildFileOpsService(FileOpsService):
def __init__(self, *args, entered: threading.Event, release: threading.Event, **kwargs):
super().__init__(*args, **kwargs)
self._entered = entered
self._release = release
def _write_download_target_to_zip(self, archive, resolved_target, on_each_item=None) -> None:
archive.writestr("partial.txt", b"partial")
self._entered.set()
self._release.wait(timeout=2.0)
if on_each_item:
on_each_item()
super()._write_download_target_to_zip(archive, resolved_target, on_each_item=on_each_item)
class BlockingCopyFilesystemAdapter(FilesystemAdapter):
def __init__(self) -> None:
super().__init__()
self.entered = threading.Event()
self.release = threading.Event()
def copy_file(self, source: str, destination: str, on_progress=None) -> None:
self.entered.set()
self.release.wait(timeout=2.0)
return super().copy_file(source=source, destination=destination, on_progress=on_progress)
class HistoryApiGoldenTest(unittest.TestCase): class HistoryApiGoldenTest(unittest.TestCase):
def setUp(self) -> None: def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory() self.temp_dir = tempfile.TemporaryDirectory()
@@ -38,6 +69,7 @@ class HistoryApiGoldenTest(unittest.TestCase):
self.root1.mkdir(parents=True, exist_ok=True) self.root1.mkdir(parents=True, exist_ok=True)
self.root2.mkdir(parents=True, exist_ok=True) self.root2.mkdir(parents=True, exist_ok=True)
db_path = str(Path(self.temp_dir.name) / 'tasks.db') db_path = str(Path(self.temp_dir.name) / 'tasks.db')
self.artifact_root = Path(self.temp_dir.name) / "archive_tmp"
self.task_repo = TaskRepository(db_path) self.task_repo = TaskRepository(db_path)
self.history_repo = HistoryRepository(db_path) self.history_repo = HistoryRepository(db_path)
self.path_guard = PathGuard({'storage1': str(self.root1), 'storage2': str(self.root2)}) self.path_guard = PathGuard({'storage1': str(self.root1), 'storage2': str(self.root2)})
@@ -47,20 +79,39 @@ class HistoryApiGoldenTest(unittest.TestCase):
app.dependency_overrides.clear() app.dependency_overrides.clear()
self.temp_dir.cleanup() self.temp_dir.cleanup()
def _set_services(self, filesystem: FilesystemAdapter) -> None: def _set_services(self, filesystem: FilesystemAdapter, file_ops_service: FileOpsService | None = None) -> None:
runner = TaskRunner(repository=self.task_repo, filesystem=filesystem, history_repository=self.history_repo) runner = TaskRunner(repository=self.task_repo, filesystem=filesystem, history_repository=self.history_repo)
file_ops_service = FileOpsService(path_guard=self.path_guard, filesystem=filesystem, history_repository=self.history_repo) file_ops_service = file_ops_service or FileOpsService(path_guard=self.path_guard, filesystem=filesystem, history_repository=self.history_repo)
archive_service = ArchiveDownloadTaskService(
path_guard=self.path_guard,
repository=self.task_repo,
runner=runner,
history_repository=self.history_repo,
file_ops_service=file_ops_service,
artifact_root=self.artifact_root,
)
copy_service = CopyTaskService(path_guard=self.path_guard, repository=self.task_repo, runner=runner, history_repository=self.history_repo) copy_service = CopyTaskService(path_guard=self.path_guard, repository=self.task_repo, runner=runner, history_repository=self.history_repo)
delete_service = DeleteTaskService(path_guard=self.path_guard, repository=self.task_repo, runner=runner, history_repository=self.history_repo)
duplicate_service = DuplicateTaskService(path_guard=self.path_guard, repository=self.task_repo, runner=runner, history_repository=self.history_repo)
move_service = MoveTaskService(path_guard=self.path_guard, repository=self.task_repo, runner=runner, history_repository=self.history_repo) move_service = MoveTaskService(path_guard=self.path_guard, repository=self.task_repo, runner=runner, history_repository=self.history_repo)
task_service = TaskService(repository=self.task_repo) task_service = TaskService(repository=self.task_repo, history_repository=self.history_repo)
history_service = HistoryService(repository=self.history_repo) history_service = HistoryService(repository=self.history_repo)
async def _override_file_ops_service() -> FileOpsService: async def _override_file_ops_service() -> FileOpsService:
return file_ops_service return file_ops_service
async def _override_archive_service() -> ArchiveDownloadTaskService:
return archive_service
async def _override_copy_service() -> CopyTaskService: async def _override_copy_service() -> CopyTaskService:
return copy_service return copy_service
async def _override_duplicate_service() -> DuplicateTaskService:
return duplicate_service
async def _override_delete_service() -> DeleteTaskService:
return delete_service
async def _override_move_service() -> MoveTaskService: async def _override_move_service() -> MoveTaskService:
return move_service return move_service
@@ -71,7 +122,10 @@ class HistoryApiGoldenTest(unittest.TestCase):
return history_service return history_service
app.dependency_overrides[get_file_ops_service] = _override_file_ops_service app.dependency_overrides[get_file_ops_service] = _override_file_ops_service
app.dependency_overrides[get_archive_download_task_service] = _override_archive_service
app.dependency_overrides[get_copy_task_service] = _override_copy_service app.dependency_overrides[get_copy_task_service] = _override_copy_service
app.dependency_overrides[get_delete_task_service] = _override_delete_service
app.dependency_overrides[get_duplicate_task_service] = _override_duplicate_service
app.dependency_overrides[get_move_task_service] = _override_move_service app.dependency_overrides[get_move_task_service] = _override_move_service
app.dependency_overrides[get_task_service] = _override_task_service app.dependency_overrides[get_task_service] = _override_task_service
app.dependency_overrides[get_history_service] = _override_history_service app.dependency_overrides[get_history_service] = _override_history_service
@@ -91,11 +145,21 @@ class HistoryApiGoldenTest(unittest.TestCase):
while time.time() < deadline: while time.time() < deadline:
response = self._request('GET', f'/api/tasks/{task_id}') response = self._request('GET', f'/api/tasks/{task_id}')
body = response.json() body = response.json()
if body['status'] in {'completed', 'failed'}: if body['status'] in {'completed', 'failed', 'ready', 'cancelled'}:
return body return body
time.sleep(0.02) time.sleep(0.02)
self.fail('task did not reach terminal state in time') self.fail('task did not reach terminal state in time')
def _wait_for_status(self, task_id: str, statuses: set[str], timeout_s: float = 2.0) -> dict:
deadline = time.time() + timeout_s
while time.time() < deadline:
response = self._request('GET', f'/api/tasks/{task_id}')
body = response.json()
if body['status'] in statuses:
return body
time.sleep(0.02)
self.fail(f"task did not reach one of {sorted(statuses)} in time")
def test_get_history_empty_list(self) -> None: def test_get_history_empty_list(self) -> None:
response = self._request('GET', '/api/history') response = self._request('GET', '/api/history')
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
@@ -165,6 +229,35 @@ class HistoryApiGoldenTest(unittest.TestCase):
self.assertEqual(history[0]['source'], 'storage1/source.txt') self.assertEqual(history[0]['source'], 'storage1/source.txt')
self.assertEqual(history[0]['destination'], 'storage1/copied.txt') self.assertEqual(history[0]['destination'], 'storage1/copied.txt')
def test_copy_cancelled_history_item(self) -> None:
blocking_fs = BlockingCopyFilesystemAdapter()
self._set_services(blocking_fs)
(self.root1 / 'a.txt').write_text('A', encoding='utf-8')
(self.root1 / 'b.txt').write_text('B', encoding='utf-8')
(self.root1 / 'dest').mkdir()
response = self._request(
'POST',
'/api/files/copy',
{'sources': ['storage1/a.txt', 'storage1/b.txt'], 'destination_base': 'storage1/dest'},
)
task_id = response.json()['task_id']
self.assertTrue(blocking_fs.entered.wait(timeout=2.0))
self._wait_for_status(task_id, {'running'})
cancel_response = self._request('POST', f'/api/tasks/{task_id}/cancel')
self.assertEqual(cancel_response.status_code, 200)
self.assertEqual(cancel_response.json()['status'], 'cancelling')
blocking_fs.release.set()
detail = self._wait_task(task_id)
self.assertEqual(detail['status'], 'cancelled')
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(history[0]['operation'], 'copy')
self.assertEqual(history[0]['status'], 'cancelled')
self.assertEqual(history[0]['source'], '2 items')
self.assertEqual(history[0]['destination'], 'storage1/dest')
def test_move_failed_history_item(self) -> None: def test_move_failed_history_item(self) -> None:
src = self.root1 / 'source.txt' src = self.root1 / 'source.txt'
src.write_text('hello', encoding='utf-8') src.write_text('hello', encoding='utf-8')
@@ -178,3 +271,152 @@ class HistoryApiGoldenTest(unittest.TestCase):
self.assertEqual(history[0]['operation'], 'move') self.assertEqual(history[0]['operation'], 'move')
self.assertEqual(history[0]['status'], 'failed') self.assertEqual(history[0]['status'], 'failed')
self.assertEqual(history[0]['error_code'], 'io_error') self.assertEqual(history[0]['error_code'], 'io_error')
def test_duplicate_completed_history_item(self) -> None:
(self.root1 / 'report.txt').write_text('hello', encoding='utf-8')
response = self._request('POST', '/api/files/duplicate', {'paths': ['storage1/report.txt']})
self.assertEqual(response.status_code, 202)
self._wait_task(response.json()['task_id'])
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(history[0]['operation'], 'duplicate')
self.assertEqual(history[0]['status'], 'completed')
self.assertEqual(history[0]['source'], 'storage1/report.txt')
self.assertEqual(history[0]['destination'], 'storage1/report copy.txt')
def test_delete_completed_history_item(self) -> None:
(self.root1 / 'trash.txt').write_text('bye', encoding='utf-8')
response = self._request('POST', '/api/files/delete', {'path': 'storage1/trash.txt'})
self.assertEqual(response.status_code, 202)
self._wait_task(response.json()['task_id'])
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(history[0]['operation'], 'delete')
self.assertEqual(history[0]['status'], 'completed')
self.assertEqual(history[0]['path'], 'storage1/trash.txt')
def test_delete_batch_completed_history_item(self) -> None:
(self.root1 / 'trash-a.txt').write_text('a', encoding='utf-8')
(self.root1 / 'trash-b.txt').write_text('b', encoding='utf-8')
response = self._request('POST', '/api/files/delete', {'paths': ['storage1/trash-a.txt', 'storage1/trash-b.txt']})
self.assertEqual(response.status_code, 202)
self._wait_task(response.json()['task_id'])
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(history[0]['operation'], 'delete')
self.assertEqual(history[0]['status'], 'completed')
self.assertEqual(history[0]['path'], '2 items')
def test_single_file_download_writes_ready_history_item(self) -> None:
(self.root1 / 'report.txt').write_text('hello download', encoding='utf-8')
response = self._request('GET', '/api/files/download?path=storage1/report.txt')
self.assertEqual(response.status_code, 200)
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(history[0]['operation'], 'download')
self.assertEqual(history[0]['status'], 'ready')
self.assertEqual(history[0]['source'], 'single_file')
self.assertEqual(history[0]['path'], 'storage1/report.txt')
self.assertEqual(history[0]['destination'], 'report.txt')
self.assertEqual(history[0]['error_code'], None)
self.assertEqual(history[0]['error_message'], None)
def test_single_directory_zip_download_writes_ready_history_item(self) -> None:
(self.root1 / 'docs').mkdir()
(self.root1 / 'docs' / 'a.txt').write_text('A', encoding='utf-8')
response = self._request('POST', '/api/files/download/archive-prepare', {'paths': ['storage1/docs']})
self.assertEqual(response.status_code, 202)
self._wait_task(response.json()['task_id'])
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(history[0]['operation'], 'download')
self.assertEqual(history[0]['status'], 'ready')
self.assertEqual(history[0]['source'], 'single_directory_zip')
self.assertEqual(history[0]['path'], 'storage1/docs')
self.assertEqual(history[0]['destination'], 'docs.zip')
def test_multi_mixed_zip_download_writes_ready_history_item(self) -> None:
(self.root1 / 'readme.txt').write_text('R', encoding='utf-8')
(self.root1 / 'photos').mkdir()
(self.root1 / 'photos' / 'img.txt').write_text('P', encoding='utf-8')
response = self._request('POST', '/api/files/download/archive-prepare', {'paths': ['storage1/readme.txt', 'storage1/photos']})
self.assertEqual(response.status_code, 202)
self._wait_task(response.json()['task_id'])
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(history[0]['operation'], 'download')
self.assertEqual(history[0]['status'], 'ready')
self.assertEqual(history[0]['source'], 'multi_zip')
self.assertEqual(history[0]['path'], 'storage1/readme.txt, storage1/photos')
self.assertRegex(history[0]['destination'], r'^kodidownload-\d{8}-\d{6}\.zip$')
def test_download_preflight_failure_writes_preflight_failed_history_item(self) -> None:
target = self.root1 / 'real.txt'
target.write_text('x', encoding='utf-8')
(self.root1 / 'docs').mkdir()
(self.root1 / 'docs' / 'link.txt').symlink_to(target)
response = self._request('POST', '/api/files/download/archive-prepare', {'paths': ['storage1/docs']})
self.assertEqual(response.status_code, 202)
self._wait_task(response.json()['task_id'])
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(history[0]['operation'], 'download')
self.assertEqual(history[0]['status'], 'failed')
self.assertEqual(history[0]['source'], 'single_directory_zip')
self.assertEqual(history[0]['path'], 'storage1/docs')
self.assertEqual(history[0]['destination'], 'docs.zip')
self.assertEqual(history[0]['error_code'], 'download_preflight_failed')
self.assertEqual(history[0]['error_message'], 'Zip download preflight failed')
def test_download_cancellation_writes_cancelled_history_item(self) -> None:
entered = threading.Event()
release = threading.Event()
file_ops_service = BlockingArchiveBuildFileOpsService(
path_guard=self.path_guard,
filesystem=FilesystemAdapter(),
history_repository=self.history_repo,
entered=entered,
release=release,
)
self._set_services(FilesystemAdapter(), file_ops_service=file_ops_service)
(self.root1 / 'docs').mkdir()
(self.root1 / 'docs' / 'a.txt').write_text('A', encoding='utf-8')
response = self._request('POST', '/api/files/download/archive-prepare', {'paths': ['storage1/docs']})
self.assertEqual(response.status_code, 202)
self.assertTrue(entered.wait(timeout=2.0))
cancel = self._request('POST', f"/api/files/download/archive/{response.json()['task_id']}/cancel")
release.set()
self._wait_task(response.json()['task_id'])
time.sleep(0.05)
history = self._request('GET', '/api/history').json()['items']
self.assertEqual(cancel.status_code, 200)
self.assertEqual(history[0]['operation'], 'download')
self.assertEqual(history[0]['status'], 'cancelled')
self.assertEqual(history[0]['source'], 'single_directory_zip')
self.assertEqual(history[0]['path'], 'storage1/docs')
self.assertEqual(history[0]['destination'], 'docs.zip')
self.assertEqual(history[0]['error_code'], None)
self.assertEqual(history[0]['error_message'], None)
def test_download_history_uses_server_certain_statuses_only(self) -> None:
(self.root1 / 'report.txt').write_text('hello download', encoding='utf-8')
response = self._request('GET', '/api/files/download?path=storage1/report.txt')
self.assertEqual(response.status_code, 200)
history = self._request('GET', '/api/history').json()['items']
self.assertIn(history[0]['status'], {'requested', 'ready', 'preflight_failed', 'failed', 'cancelled'})
self.assertNotIn(history[0]['status'], {'completed', 'downloaded', 'saved'})
@@ -1,8 +1,10 @@
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import errno
import sys import sys
import tempfile import tempfile
import threading
import time import time
import unittest import unittest
from pathlib import Path from pathlib import Path
@@ -28,7 +30,8 @@ class FailingDeleteFilesystemAdapter(FilesystemAdapter):
class FailingBatchFilesystemAdapter(FilesystemAdapter): class FailingBatchFilesystemAdapter(FilesystemAdapter):
def move_file(self, source: str, destination: str) -> None: def move_file(self, source: str, destination: str) -> None:
if Path(source).name == "fail-file.txt": source_path = Path(source)
if source_path.name == "fail-file.txt" or "fail-dir" in source_path.parts:
raise OSError("forced batch move failure") raise OSError("forced batch move failure")
super().move_file(source, destination) super().move_file(source, destination)
@@ -38,6 +41,23 @@ class FailingBatchFilesystemAdapter(FilesystemAdapter):
super().move_directory(source, destination) super().move_directory(source, destination)
class BlockingMoveFilesystemAdapter(FilesystemAdapter):
def __init__(self) -> None:
super().__init__()
self.entered = threading.Event()
self.release = threading.Event()
def move_file(self, source: str, destination: str) -> None:
self.entered.set()
self.release.wait(timeout=2.0)
super().move_file(source, destination)
class CrossDeviceMoveFilesystemAdapter(FilesystemAdapter):
def move_file(self, source: str, destination: str) -> None:
raise OSError(errno.EXDEV, "Invalid cross-device link")
class MoveApiGoldenTest(unittest.TestCase): class MoveApiGoldenTest(unittest.TestCase):
def setUp(self) -> None: def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory() self.temp_dir = tempfile.TemporaryDirectory()
@@ -83,11 +103,21 @@ class MoveApiGoldenTest(unittest.TestCase):
while time.time() < deadline: while time.time() < deadline:
response = self._request("GET", f"/api/tasks/{task_id}") response = self._request("GET", f"/api/tasks/{task_id}")
body = response.json() body = response.json()
if body["status"] in {"completed", "failed"}: if body["status"] in {"completed", "failed", "cancelled"}:
return body return body
time.sleep(0.02) time.sleep(0.02)
self.fail("task did not reach terminal state in time") self.fail("task did not reach terminal state in time")
def _wait_for_status(self, task_id: str, statuses: set[str], timeout_s: float = 2.0) -> dict:
deadline = time.time() + timeout_s
while time.time() < deadline:
response = self._request("GET", f"/api/tasks/{task_id}")
body = response.json()
if body["status"] in statuses:
return body
time.sleep(0.02)
self.fail(f"task did not reach one of {sorted(statuses)} in time")
def test_move_success_same_root_create_task_shape_and_completed(self) -> None: def test_move_success_same_root_create_task_shape_and_completed(self) -> None:
src = self.root1 / "source.txt" src = self.root1 / "source.txt"
src.write_text("hello", encoding="utf-8") src.write_text("hello", encoding="utf-8")
@@ -105,6 +135,8 @@ class MoveApiGoldenTest(unittest.TestCase):
detail = self._wait_task(body["task_id"]) detail = self._wait_task(body["task_id"])
self.assertEqual(detail["status"], "completed") self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 1)
self.assertTrue((self.root1 / "moved.txt").exists()) self.assertTrue((self.root1 / "moved.txt").exists())
self.assertFalse(src.exists()) self.assertFalse(src.exists())
@@ -135,6 +167,31 @@ class MoveApiGoldenTest(unittest.TestCase):
self.assertTrue((self.root1 / "target-parent" / "moved-dir" / "nested.txt").exists()) self.assertTrue((self.root1 / "target-parent" / "moved-dir" / "nested.txt").exists())
self.assertFalse(src_dir.exists()) self.assertFalse(src_dir.exists())
def test_move_directory_success_same_root_with_nested_symlink_keeps_direct_move_semantics(self) -> None:
src_dir = self.root1 / "source-dir"
src_dir.mkdir()
real_dir = self.root1 / "real-dir"
real_dir.mkdir()
(real_dir / "nested.txt").write_text("hello", encoding="utf-8")
(src_dir / "link-dir").symlink_to(real_dir, target_is_directory=True)
target_parent = self.root1 / "target-parent"
target_parent.mkdir()
response = self._request(
"POST",
"/api/files/move",
{"source": "storage1/source-dir", "destination": "storage1/target-parent/moved-dir"},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 1)
self.assertTrue((self.root1 / "target-parent" / "moved-dir").is_dir())
self.assertTrue((self.root1 / "target-parent" / "moved-dir" / "link-dir").is_symlink())
self.assertFalse(src_dir.exists())
def test_move_success_cross_root_create_task_shape_and_completed(self) -> None: def test_move_success_cross_root_create_task_shape_and_completed(self) -> None:
src = self.root1 / "source.txt" src = self.root1 / "source.txt"
src.write_text("hello", encoding="utf-8") src.write_text("hello", encoding="utf-8")
@@ -225,6 +282,95 @@ class MoveApiGoldenTest(unittest.TestCase):
self.assertFalse(source_file.exists()) self.assertFalse(source_file.exists())
self.assertFalse(source_dir.exists()) self.assertFalse(source_dir.exists())
def test_move_batch_cancelled_after_current_file_finishes(self) -> None:
blocking_fs = BlockingMoveFilesystemAdapter()
path_guard = PathGuard({"storage1": str(self.root1), "storage2": str(self.root2)})
self._set_services(path_guard=path_guard, filesystem=blocking_fs)
(self.root1 / "a.txt").write_text("A", encoding="utf-8")
(self.root1 / "b.txt").write_text("B", encoding="utf-8")
target = self.root1 / "target"
target.mkdir()
response = self._request(
"POST",
"/api/files/move",
{
"sources": ["storage1/a.txt", "storage1/b.txt"],
"destination_base": "storage1/target",
},
)
task_id = response.json()["task_id"]
self.assertTrue(blocking_fs.entered.wait(timeout=2.0))
running = self._wait_for_status(task_id, {"running"})
self.assertEqual(running["current_item"], "a.txt")
cancel_response = self._request("POST", f"/api/tasks/{task_id}/cancel")
self.assertEqual(cancel_response.status_code, 200)
self.assertEqual(cancel_response.json()["status"], "cancelling")
blocking_fs.release.set()
detail = self._wait_task(task_id)
self.assertEqual(detail["status"], "cancelled")
self.assertEqual(detail["done_items"], 1)
self.assertEqual(detail["total_items"], 2)
self.assertTrue((target / "a.txt").exists())
self.assertTrue((self.root1 / "b.txt").exists())
self.assertFalse((target / "b.txt").exists())
def test_move_batch_cross_root_files_success(self) -> None:
first = self.root1 / "first.txt"
second = self.root1 / "second.txt"
first.write_text("a", encoding="utf-8")
second.write_text("b", encoding="utf-8")
response = self._request(
"POST",
"/api/files/move",
{
"sources": ["storage1/first.txt", "storage1/second.txt"],
"destination_base": "storage2",
},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertTrue((self.root2 / "first.txt").exists())
self.assertTrue((self.root2 / "second.txt").exists())
self.assertFalse(first.exists())
self.assertFalse(second.exists())
def test_move_batch_cross_root_files_falls_back_from_exdev(self) -> None:
first = self.root1 / "first.txt"
second = self.root1 / "second.txt"
first.write_text("a", encoding="utf-8")
second.write_text("b", encoding="utf-8")
path_guard = PathGuard({"storage1": str(self.root1), "storage2": str(self.root2)})
self._set_services(path_guard=path_guard, filesystem=CrossDeviceMoveFilesystemAdapter())
response = self._request(
"POST",
"/api/files/move",
{
"sources": ["storage1/first.txt", "storage1/second.txt"],
"destination_base": "storage2",
},
)
self.assertEqual(response.status_code, 202)
detail = self._wait_task(response.json()["task_id"])
self.assertEqual(detail["status"], "completed")
self.assertEqual(detail["done_items"], 2)
self.assertEqual(detail["total_items"], 2)
self.assertTrue((self.root2 / "first.txt").exists())
self.assertTrue((self.root2 / "second.txt").exists())
self.assertFalse(first.exists())
self.assertFalse(second.exists())
def test_move_batch_cross_root_directories_blocked(self) -> None: def test_move_batch_cross_root_directories_blocked(self) -> None:
first = self.root1 / "first-dir" first = self.root1 / "first-dir"
second = self.root1 / "second-dir" second = self.root1 / "second-dir"
@@ -242,6 +388,26 @@ class MoveApiGoldenTest(unittest.TestCase):
self.assertEqual(response.status_code, 400) self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["error"]["code"], "invalid_request") self.assertEqual(response.json()["error"]["code"], "invalid_request")
self.assertEqual(response.json()["error"]["message"], "Cross-root batch move with directories is not supported in v1")
def test_move_batch_cross_root_mixed_files_and_directories_blocked(self) -> None:
first = self.root1 / "first.txt"
first.write_text("a", encoding="utf-8")
second = self.root1 / "second-dir"
second.mkdir()
response = self._request(
"POST",
"/api/files/move",
{
"sources": ["storage1/first.txt", "storage1/second-dir"],
"destination_base": "storage2",
},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["error"]["code"], "invalid_request")
self.assertEqual(response.json()["error"]["message"], "Cross-root batch move with directories is not supported in v1")
def test_move_batch_mixed_root_selection_blocked(self) -> None: def test_move_batch_mixed_root_selection_blocked(self) -> None:
first = self.root1 / "first-dir" first = self.root1 / "first-dir"
@@ -328,8 +494,10 @@ class MoveApiGoldenTest(unittest.TestCase):
def test_move_batch_runtime_io_error_failed_task_shape(self) -> None: def test_move_batch_runtime_io_error_failed_task_shape(self) -> None:
first = self.root1 / "ok-dir" first = self.root1 / "ok-dir"
first.mkdir() first.mkdir()
(first / "a.txt").write_text("A", encoding="utf-8")
second = self.root1 / "fail-dir" second = self.root1 / "fail-dir"
second.mkdir() second.mkdir()
(second / "b.txt").write_text("B", encoding="utf-8")
target = self.root1 / "target" target = self.root1 / "target"
target.mkdir() target.mkdir()
@@ -0,0 +1,269 @@
from __future__ import annotations
import asyncio
import base64
import os
import sys
import tempfile
import unittest
from datetime import datetime, timezone
from pathlib import Path
import httpx
sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from backend.app.api.errors import AppError
from backend.app.dependencies import get_browse_service, get_remote_file_service
from backend.app.db.remote_client_repository import RemoteClientRepository
from backend.app.fs.filesystem_adapter import FilesystemAdapter
from backend.app.main import app
from backend.app.security.path_guard import PathGuard
from backend.app.services.browse_service import BrowseService
from backend.app.services.remote_client_service import RemoteClientService
from backend.app.services.remote_file_service import RemoteFileService
PNG_1X1 = base64.b64decode(
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4z8AAAAMBAQDJ/pLvAAAAAElFTkSuQmCC"
)
class _StubRemoteFileService(RemoteFileService):
def __init__(
self,
remote_client_service: RemoteClientService,
*,
payloads: dict[tuple[str, str, str, str], dict],
streams: dict[tuple[str, str, str], dict],
failing_client_ids: set[str],
):
super().__init__(
remote_client_service=remote_client_service,
agent_auth_header="Authorization",
agent_auth_scheme="Bearer",
agent_auth_token="agent-secret",
)
self._payloads = payloads
self._streams = streams
self._failing_client_ids = failing_client_ids
def _request_json(self, *, client, endpoint_path: str, params: dict[str, str]) -> dict:
if client.client_id in self._failing_client_ids:
raise AppError(
code="remote_client_unreachable",
message=f"Remote client '{client.display_name}' is unreachable",
status_code=502,
details={"client_id": client.client_id, "endpoint": client.endpoint},
)
return self._payloads[(client.client_id, endpoint_path, params["share"], params.get("path", ""))]
def prepare_download(self, paths: list[str]) -> dict:
resolved = self._resolve_remote_path(paths[0])
item = self._stream_item(resolved.client.client_id, resolved.share_key, resolved.relative_path, resolved.name)
return {
"content": self._bytes_iter(item["content"]),
"headers": {"Content-Disposition": item["headers"]["content-disposition"]},
"content_type": item["headers"]["content-type"],
}
def prepare_image_stream(self, path: str) -> dict:
resolved = self._resolve_remote_path(path)
item = self._stream_item(resolved.client.client_id, resolved.share_key, resolved.relative_path, resolved.name)
return {
"content": self._bytes_iter(item["content"]),
"headers": {"Content-Length": item["headers"]["content-length"]},
"content_type": item["headers"]["content-type"],
}
def _stream_item(self, client_id: str, share_key: str, relative_path: str, default_name: str) -> dict:
if client_id in self._failing_client_ids:
raise AppError(
code="remote_client_unreachable",
message=f"Remote client '{default_name}' is unreachable",
status_code=502,
details={"client_id": client_id},
)
return self._streams[(client_id, share_key, relative_path)]
@staticmethod
async def _bytes_iter(payload: bytes):
yield payload
class RemoteFileOpsApiGoldenTest(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory()
self.volumes_root = Path(self.temp_dir.name) / "Volumes"
self.volumes_root.mkdir(parents=True, exist_ok=True)
self.storage_root = self.volumes_root / "8TB"
self.storage_root.mkdir(parents=True, exist_ok=True)
local_file = self.storage_root / "local.txt"
local_file.write_text("local", encoding="utf-8")
mtime = 1710000000
os.utime(local_file, (mtime, mtime))
repository = RemoteClientRepository(str(Path(self.temp_dir.name) / "remote-clients.db"))
now_iso = "2026-03-26T12:00:00Z"
repository.upsert_client(
client_id="client-123",
display_name="Jan MacBook",
platform="macos",
agent_version="1.1.0",
endpoint="http://agent.test",
shares=[{"key": "downloads", "label": "Downloads"}],
now_iso=now_iso,
)
repository.upsert_client(
client_id="broken-client",
display_name="Offline iMac",
platform="macos",
agent_version="1.1.0",
endpoint="http://broken.test",
shares=[{"key": "downloads", "label": "Downloads"}],
now_iso=now_iso,
)
remote_client_service = RemoteClientService(
repository=repository,
registration_token="secret-token",
offline_timeout_seconds=60,
now=lambda: datetime(2026, 3, 26, 12, 0, 0, tzinfo=timezone.utc),
)
remote_file_service = _StubRemoteFileService(
remote_client_service,
payloads={
(
"client-123",
"/api/info",
"downloads",
"notes.md",
): {
"name": "notes.md",
"kind": "file",
"size": 13,
"modified": "2026-03-26T12:00:00Z",
"content_type": "text/markdown",
"extension": ".md",
"width": None,
"height": None,
"owner": None,
"group": None,
},
(
"client-123",
"/api/read",
"downloads",
"notes.md",
): {
"name": "notes.md",
"content_type": "text/markdown",
"encoding": "utf-8",
"truncated": False,
"size": 13,
"modified": "2026-03-26T12:00:00Z",
"content": "# title\nhello",
},
},
streams={
(
"client-123",
"downloads",
"notes.md",
): {
"headers": {
"content-type": "text/markdown; charset=utf-8",
"content-disposition": 'attachment; filename="notes.md"',
"content-length": "13",
},
"content": b"# title\nhello",
},
(
"client-123",
"downloads",
"pixel.png",
): {
"headers": {
"content-type": "image/png",
"content-disposition": 'attachment; filename="pixel.png"',
"content-length": str(len(PNG_1X1)),
},
"content": PNG_1X1,
},
},
failing_client_ids={"broken-client"},
)
browse_service = BrowseService(
path_guard=PathGuard({"storage1": str(self.storage_root)}),
filesystem=FilesystemAdapter(),
)
async def _override_remote_file_service() -> RemoteFileService:
return remote_file_service
async def _override_browse_service() -> BrowseService:
return browse_service
app.dependency_overrides[get_remote_file_service] = _override_remote_file_service
app.dependency_overrides[get_browse_service] = _override_browse_service
def tearDown(self) -> None:
app.dependency_overrides.clear()
self.temp_dir.cleanup()
def _request(self, method: str, url: str, *, params: dict | list[tuple[str, str]] | None = None) -> httpx.Response:
async def _run() -> httpx.Response:
transport = httpx.ASGITransport(app=app)
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
return await client.request(method, url, params=params)
return asyncio.run(_run())
def test_remote_info_view_image_and_download_work(self) -> None:
info_response = self._request("GET", "/api/files/info", params={"path": "/Clients/client-123/downloads/notes.md"})
self.assertEqual(info_response.status_code, 200)
self.assertEqual(
info_response.json(),
{
"name": "notes.md",
"path": "/Clients/client-123/downloads/notes.md",
"type": "file",
"size": 13,
"modified": "2026-03-26T12:00:00Z",
"root": "/Clients/client-123/downloads",
"extension": ".md",
"content_type": "text/markdown",
"owner": None,
"group": None,
"width": None,
"height": None,
},
)
view_response = self._request("GET", "/api/files/view", params={"path": "/Clients/client-123/downloads/notes.md"})
self.assertEqual(view_response.status_code, 200)
self.assertEqual(view_response.json()["content"], "# title\nhello")
self.assertEqual(view_response.json()["content_type"], "text/markdown")
image_response = self._request("GET", "/api/files/image", params={"path": "/Clients/client-123/downloads/pixel.png"})
self.assertEqual(image_response.status_code, 200)
self.assertEqual(image_response.headers.get("content-type"), "image/png")
self.assertEqual(image_response.content, PNG_1X1)
download_response = self._request("GET", "/api/files/download", params=[("path", "/Clients/client-123/downloads/notes.md")])
self.assertEqual(download_response.status_code, 200)
self.assertEqual(download_response.content, b"# title\nhello")
self.assertIn('attachment; filename="notes.md"', download_response.headers.get("content-disposition", ""))
def test_remote_failure_stays_local_and_volumes_behavior_is_unchanged(self) -> None:
failed_response = self._request("GET", "/api/files/info", params={"path": "/Clients/broken-client/downloads/notes.md"})
self.assertEqual(failed_response.status_code, 502)
self.assertEqual(failed_response.json()["error"]["code"], "remote_client_unreachable")
volumes_response = self._request("GET", "/api/browse", params={"path": "/Volumes/8TB"})
self.assertEqual(volumes_response.status_code, 200)
self.assertEqual(volumes_response.json()["path"], "/Volumes/8TB")
self.assertEqual([item["name"] for item in volumes_response.json()["files"]], ["local.txt"])
if __name__ == "__main__":
unittest.main()
@@ -49,6 +49,16 @@ class SettingsApiGoldenTest(unittest.TestCase):
return asyncio.run(_run()) return asyncio.run(_run())
@staticmethod
def _default_zip_download_limits() -> dict:
return {
"max_items": 1000,
"max_total_input_bytes": 2147483648,
"max_individual_file_bytes": 524288000,
"scan_timeout_seconds": 10.0,
"symlink_policy": "not_allowed",
}
def test_settings_default_response(self) -> None: def test_settings_default_response(self) -> None:
response = self._request("GET", "/api/settings") response = self._request("GET", "/api/settings")
@@ -61,6 +71,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
"preferred_startup_path_right": None, "preferred_startup_path_right": None,
"selected_theme": "default", "selected_theme": "default",
"selected_color_mode": "dark", "selected_color_mode": "dark",
"zip_download_limits": self._default_zip_download_limits(),
}, },
) )
@@ -79,6 +90,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
"preferred_startup_path_right": None, "preferred_startup_path_right": None,
"selected_theme": "default", "selected_theme": "default",
"selected_color_mode": "dark", "selected_color_mode": "dark",
"zip_download_limits": self._default_zip_download_limits(),
}, },
) )
@@ -102,6 +114,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
"preferred_startup_path_right": "storage1/docs", "preferred_startup_path_right": "storage1/docs",
"selected_theme": "default", "selected_theme": "default",
"selected_color_mode": "dark", "selected_color_mode": "dark",
"zip_download_limits": self._default_zip_download_limits(),
}, },
) )
self.assertEqual( self.assertEqual(
@@ -112,6 +125,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
"preferred_startup_path_right": "storage1/docs", "preferred_startup_path_right": "storage1/docs",
"selected_theme": "default", "selected_theme": "default",
"selected_color_mode": "dark", "selected_color_mode": "dark",
"zip_download_limits": self._default_zip_download_limits(),
}, },
) )
@@ -123,6 +137,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
self.assertEqual(response.json()["preferred_startup_path_right"], None) self.assertEqual(response.json()["preferred_startup_path_right"], None)
self.assertEqual(response.json()["selected_theme"], "default") self.assertEqual(response.json()["selected_theme"], "default")
self.assertEqual(response.json()["selected_color_mode"], "dark") self.assertEqual(response.json()["selected_color_mode"], "dark")
self.assertEqual(response.json()["zip_download_limits"], self._default_zip_download_limits())
def test_settings_preferred_startup_path_right_persistence(self) -> None: def test_settings_preferred_startup_path_right_persistence(self) -> None:
response = self._request("POST", "/api/settings", {"preferred_startup_path_right": "storage1/docs"}) response = self._request("POST", "/api/settings", {"preferred_startup_path_right": "storage1/docs"})
@@ -132,6 +147,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
self.assertEqual(response.json()["preferred_startup_path_right"], "storage1/docs") self.assertEqual(response.json()["preferred_startup_path_right"], "storage1/docs")
self.assertEqual(response.json()["selected_theme"], "default") self.assertEqual(response.json()["selected_theme"], "default")
self.assertEqual(response.json()["selected_color_mode"], "dark") self.assertEqual(response.json()["selected_color_mode"], "dark")
self.assertEqual(response.json()["zip_download_limits"], self._default_zip_download_limits())
def test_settings_preferred_startup_path_empty_string_resets_only_left_to_null(self) -> None: def test_settings_preferred_startup_path_empty_string_resets_only_left_to_null(self) -> None:
self._request( self._request(
@@ -149,6 +165,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
self.assertEqual(response.json()["preferred_startup_path_right"], "storage1/docs") self.assertEqual(response.json()["preferred_startup_path_right"], "storage1/docs")
self.assertEqual(response.json()["selected_theme"], "default") self.assertEqual(response.json()["selected_theme"], "default")
self.assertEqual(response.json()["selected_color_mode"], "dark") self.assertEqual(response.json()["selected_color_mode"], "dark")
self.assertEqual(response.json()["zip_download_limits"], self._default_zip_download_limits())
def test_settings_selected_theme_persistence(self) -> None: def test_settings_selected_theme_persistence(self) -> None:
response = self._request("POST", "/api/settings", {"selected_theme": "midnight"}) response = self._request("POST", "/api/settings", {"selected_theme": "midnight"})
@@ -156,6 +173,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["selected_theme"], "midnight") self.assertEqual(response.json()["selected_theme"], "midnight")
self.assertEqual(response.json()["selected_color_mode"], "dark") self.assertEqual(response.json()["selected_color_mode"], "dark")
self.assertEqual(response.json()["zip_download_limits"], self._default_zip_download_limits())
def test_settings_selected_theme_accepts_new_built_in_family(self) -> None: def test_settings_selected_theme_accepts_new_built_in_family(self) -> None:
response = self._request("POST", "/api/settings", {"selected_theme": "commander-electric"}) response = self._request("POST", "/api/settings", {"selected_theme": "commander-electric"})
@@ -163,6 +181,7 @@ class SettingsApiGoldenTest(unittest.TestCase):
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["selected_theme"], "commander-electric") self.assertEqual(response.json()["selected_theme"], "commander-electric")
self.assertEqual(response.json()["selected_color_mode"], "dark") self.assertEqual(response.json()["selected_color_mode"], "dark")
self.assertEqual(response.json()["zip_download_limits"], self._default_zip_download_limits())
def test_settings_selected_color_mode_persistence(self) -> None: def test_settings_selected_color_mode_persistence(self) -> None:
response = self._request("POST", "/api/settings", {"selected_color_mode": "light"}) response = self._request("POST", "/api/settings", {"selected_color_mode": "light"})
@@ -170,6 +189,13 @@ class SettingsApiGoldenTest(unittest.TestCase):
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["selected_theme"], "default") self.assertEqual(response.json()["selected_theme"], "default")
self.assertEqual(response.json()["selected_color_mode"], "light") self.assertEqual(response.json()["selected_color_mode"], "light")
self.assertEqual(response.json()["zip_download_limits"], self._default_zip_download_limits())
def test_settings_includes_read_only_zip_download_limits(self) -> None:
response = self._request("GET", "/api/settings")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["zip_download_limits"], self._default_zip_download_limits())
def test_settings_rejects_invalid_selected_theme(self) -> None: def test_settings_rejects_invalid_selected_theme(self) -> None:
response = self._request("POST", "/api/settings", {"selected_theme": "unknown"}) response = self._request("POST", "/api/settings", {"selected_theme": "unknown"})
@@ -40,6 +40,14 @@ class TasksApiGoldenTest(unittest.TestCase):
return asyncio.run(_run()) return asyncio.run(_run())
def _post(self, url: str, payload: dict | None = None) -> httpx.Response:
async def _run() -> httpx.Response:
transport = httpx.ASGITransport(app=app)
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
return await client.post(url, json=payload)
return asyncio.run(_run())
def _insert_task( def _insert_task(
self, self,
*, *,
@@ -241,6 +249,215 @@ class TasksApiGoldenTest(unittest.TestCase):
self.assertEqual(body["error_code"], "io_error") self.assertEqual(body["error_code"], "io_error")
self.assertEqual(body["error_message"], "write failed") self.assertEqual(body["error_message"], "write failed")
def test_get_task_detail_delete_running(self) -> None:
self._insert_task(
task_id="task-delete",
operation="delete",
status="running",
source="storage1/trash.txt",
destination="",
created_at="2026-03-10T10:00:00Z",
started_at="2026-03-10T10:00:01Z",
done_items=0,
total_items=1,
current_item="trash.txt",
)
response = self._get("/api/tasks/task-delete")
self.assertEqual(response.status_code, 200)
body = response.json()
self.assertEqual(body["operation"], "delete")
self.assertEqual(body["status"], "running")
self.assertEqual(body["done_items"], 0)
self.assertEqual(body["total_items"], 1)
self.assertEqual(body["current_item"], "trash.txt")
def test_cancel_running_delete_task_returns_cancelling(self) -> None:
self._insert_task(
task_id="task-delete",
operation="delete",
status="running",
source="storage1/trash.txt",
destination="",
created_at="2026-03-10T10:00:00Z",
started_at="2026-03-10T10:00:01Z",
done_items=0,
total_items=1,
current_item="trash.txt",
)
response = self._post("/api/tasks/task-delete/cancel")
self.assertEqual(response.status_code, 200)
body = response.json()
self.assertEqual(body["operation"], "delete")
self.assertEqual(body["status"], "cancelling")
self.assertEqual(body["current_item"], "trash.txt")
def test_cancel_completed_task_rejected(self) -> None:
self._insert_task(
task_id="task-copy",
operation="copy",
status="completed",
source="storage1/a.txt",
destination="storage2/a.txt",
created_at="2026-03-10T10:00:00Z",
finished_at="2026-03-10T10:00:04Z",
)
response = self._post("/api/tasks/task-copy/cancel")
self.assertEqual(response.status_code, 409)
self.assertEqual(
response.json(),
{
"error": {
"code": "task_not_cancellable",
"message": "Task cannot be cancelled",
"details": {"task_id": "task-copy", "status": "completed"},
}
},
)
def test_cancel_download_task_rejected(self) -> None:
self._insert_task(
task_id="task-download",
operation="download",
status="preparing",
source="single_directory_zip",
destination="docs.zip",
created_at="2026-03-10T10:00:00Z",
started_at="2026-03-10T10:00:01Z",
)
response = self._post("/api/tasks/task-download/cancel")
self.assertEqual(response.status_code, 409)
self.assertEqual(
response.json(),
{
"error": {
"code": "task_not_cancellable",
"message": "Task cannot be cancelled",
"details": {"task_id": "task-download", "status": "preparing"},
}
},
)
def test_get_task_detail_ready_archive_download(self) -> None:
self._insert_task(
task_id="task-download-ready",
operation="download",
status="ready",
source="storage1/docs",
destination="docs.zip",
created_at="2026-03-10T10:00:00Z",
started_at="2026-03-10T10:00:01Z",
finished_at="2026-03-10T10:00:05Z",
done_items=1,
total_items=1,
)
response = self._get("/api/tasks/task-download-ready")
self.assertEqual(response.status_code, 200)
body = response.json()
self.assertEqual(body["operation"], "download")
self.assertEqual(body["status"], "ready")
self.assertEqual(body["destination"], "docs.zip")
def test_get_task_detail_duplicate_completed(self) -> None:
self._insert_task(
task_id="task-duplicate",
operation="duplicate",
status="completed",
source="storage1/report.txt",
destination="storage1/report copy.txt",
created_at="2026-03-10T10:00:00Z",
started_at="2026-03-10T10:00:01Z",
finished_at="2026-03-10T10:00:03Z",
done_items=1,
total_items=1,
current_item="storage1/report.txt",
)
response = self._get("/api/tasks/task-duplicate")
self.assertEqual(response.status_code, 200)
body = response.json()
self.assertEqual(body["operation"], "duplicate")
self.assertEqual(body["status"], "completed")
self.assertEqual(body["done_items"], 1)
self.assertEqual(body["total_items"], 1)
def test_get_task_detail_requested_archive_download(self) -> None:
self._insert_task(
task_id="task-download-requested",
operation="download",
status="requested",
source="storage1/docs",
destination="docs.zip",
created_at="2026-03-10T10:00:00Z",
done_items=0,
total_items=1,
)
response = self._get("/api/tasks/task-download-requested")
self.assertEqual(response.status_code, 200)
body = response.json()
self.assertEqual(body["operation"], "download")
self.assertEqual(body["status"], "requested")
self.assertEqual(body["done_items"], 0)
self.assertEqual(body["total_items"], 1)
def test_get_task_detail_preparing_archive_download_with_current_item(self) -> None:
self._insert_task(
task_id="task-download-preparing",
operation="download",
status="preparing",
source="storage1/docs",
destination="docs.zip",
created_at="2026-03-10T10:00:00Z",
started_at="2026-03-10T10:00:01Z",
done_items=1,
total_items=3,
current_item="storage1/docs/b.txt",
)
response = self._get("/api/tasks/task-download-preparing")
self.assertEqual(response.status_code, 200)
body = response.json()
self.assertEqual(body["operation"], "download")
self.assertEqual(body["status"], "preparing")
self.assertEqual(body["done_items"], 1)
self.assertEqual(body["total_items"], 3)
self.assertEqual(body["current_item"], "storage1/docs/b.txt")
def test_get_task_detail_cancelled_archive_download(self) -> None:
self._insert_task(
task_id="task-download-cancelled",
operation="download",
status="cancelled",
source="storage1/docs",
destination="docs.zip",
created_at="2026-03-10T10:00:00Z",
started_at="2026-03-10T10:00:01Z",
finished_at="2026-03-10T10:00:03Z",
done_items=0,
total_items=1,
)
response = self._get("/api/tasks/task-download-cancelled")
self.assertEqual(response.status_code, 200)
body = response.json()
self.assertEqual(body["operation"], "download")
self.assertEqual(body["status"], "cancelled")
self.assertEqual(body["destination"], "docs.zip")
def test_get_task_not_found(self) -> None: def test_get_task_not_found(self) -> None:
response = self._get("/api/tasks/task-missing") response = self._get("/api/tasks/task-missing")
@@ -49,13 +49,13 @@ class UploadApiGoldenTest(unittest.TestCase):
app.dependency_overrides.clear() app.dependency_overrides.clear()
self.temp_dir.cleanup() self.temp_dir.cleanup()
def _upload(self, *, target_path: str, filename: str, content: bytes) -> httpx.Response: def _upload(self, *, target_path: str, filename: str, content: bytes, overwrite: bool = False) -> httpx.Response:
async def _run() -> httpx.Response: async def _run() -> httpx.Response:
transport = httpx.ASGITransport(app=app) transport = httpx.ASGITransport(app=app)
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client: async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
return await client.post( return await client.post(
"/api/files/upload", "/api/files/upload",
data={"target_path": target_path}, data={"target_path": target_path, "overwrite": "true" if overwrite else "false"},
files={"file": (filename, content, "application/octet-stream")}, files={"file": (filename, content, "application/octet-stream")},
) )
@@ -184,3 +184,21 @@ class UploadApiGoldenTest(unittest.TestCase):
self.assertEqual(history[0]["operation"], "upload") self.assertEqual(history[0]["operation"], "upload")
self.assertEqual(history[0]["status"], "failed") self.assertEqual(history[0]["status"], "failed")
self.assertEqual(history[0]["error_code"], "already_exists") self.assertEqual(history[0]["error_code"], "already_exists")
def test_upload_overwrite_existing_file_success(self) -> None:
existing = self.uploads_dir / "hello.txt"
existing.write_text("existing", encoding="utf-8")
response = self._upload(
target_path="storage1/uploads",
filename="hello.txt",
content=b"replacement",
overwrite=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual((self.uploads_dir / "hello.txt").read_bytes(), b"replacement")
history = self._get_history()
self.assertEqual(history[0]["operation"], "upload")
self.assertEqual(history[0]["status"], "completed")
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,146 @@
from __future__ import annotations
import sys
import tempfile
import unittest
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from backend.app.db.history_repository import HistoryRepository
from backend.app.db.task_repository import TaskRepository
from backend.app.services.task_recovery_service import reconcile_persisted_incomplete_tasks
class TaskRecoveryServiceTest(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory()
self.db_path = str(Path(self.temp_dir.name) / "tasks.db")
self.task_repo = TaskRepository(self.db_path)
self.history_repo = HistoryRepository(self.db_path)
def tearDown(self) -> None:
self.temp_dir.cleanup()
def test_reconcile_persisted_incomplete_tasks_marks_old_non_terminal_tasks_failed(self) -> None:
self.task_repo.insert_task_for_testing(
{
"id": "task-running",
"operation": "copy",
"status": "running",
"source": "storage1/a.txt",
"destination": "storage2/a.txt",
"created_at": "2026-03-10T10:00:00Z",
"started_at": "2026-03-10T10:00:01Z",
"current_item": "storage1/a.txt",
}
)
self.history_repo.create_entry(
entry_id="task-running",
operation="copy",
status="queued",
source="storage1/a.txt",
destination="storage2/a.txt",
created_at="2026-03-10T10:00:00Z",
)
self.task_repo.insert_task_for_testing(
{
"id": "task-ready",
"operation": "download",
"status": "ready",
"source": "single_directory_zip",
"destination": "docs.zip",
"created_at": "2026-03-10T10:02:00Z",
"finished_at": "2026-03-10T10:03:00Z",
}
)
changed = reconcile_persisted_incomplete_tasks(self.task_repo, self.history_repo)
self.assertEqual(changed, ["task-running"])
task = self.task_repo.get_task("task-running")
self.assertEqual(task["status"], "failed")
self.assertEqual(task["error_code"], "task_interrupted")
self.assertEqual(task["error_message"], "Task was interrupted before completion")
self.assertIsNone(task["current_item"])
history = self.history_repo.list_history(limit=5)[0]
self.assertEqual(history["id"], "task-running")
self.assertEqual(history["status"], "failed")
self.assertEqual(history["error_code"], "task_interrupted")
ready_task = self.task_repo.get_task("task-ready")
self.assertEqual(ready_task["status"], "ready")
def test_reconcile_persisted_incomplete_tasks_is_noop_when_all_tasks_terminal(self) -> None:
self.task_repo.insert_task_for_testing(
{
"id": "task-completed",
"operation": "move",
"status": "completed",
"source": "storage1/a.txt",
"destination": "storage2/a.txt",
"created_at": "2026-03-10T10:00:00Z",
"finished_at": "2026-03-10T10:00:02Z",
}
)
changed = reconcile_persisted_incomplete_tasks(self.task_repo, self.history_repo)
self.assertEqual(changed, [])
self.assertEqual(self.task_repo.get_task("task-completed")["status"], "completed")
def test_reconcile_persisted_incomplete_tasks_marks_stale_delete_task_failed(self) -> None:
self.task_repo.insert_task_for_testing(
{
"id": "task-delete",
"operation": "delete",
"status": "running",
"source": "storage1/trash.txt",
"destination": "",
"created_at": "2026-03-10T10:00:00Z",
"started_at": "2026-03-10T10:00:01Z",
"current_item": "storage1/trash.txt",
}
)
changed = reconcile_persisted_incomplete_tasks(self.task_repo, self.history_repo)
self.assertEqual(changed, ["task-delete"])
task = self.task_repo.get_task("task-delete")
self.assertEqual(task["status"], "failed")
self.assertEqual(task["error_code"], "task_interrupted")
def test_reconcile_persisted_incomplete_tasks_marks_stale_cancelling_task_failed(self) -> None:
self.task_repo.insert_task_for_testing(
{
"id": "task-cancelling",
"operation": "duplicate",
"status": "cancelling",
"source": "2 items",
"destination": "same directory",
"created_at": "2026-03-10T10:00:00Z",
"started_at": "2026-03-10T10:00:01Z",
"current_item": "storage1/report.txt",
}
)
self.history_repo.create_entry(
entry_id="task-cancelling",
operation="duplicate",
status="queued",
source="2 items",
destination="same directory",
created_at="2026-03-10T10:00:00Z",
)
changed = reconcile_persisted_incomplete_tasks(self.task_repo, self.history_repo)
self.assertEqual(changed, ["task-cancelling"])
task = self.task_repo.get_task("task-cancelling")
self.assertEqual(task["status"], "failed")
self.assertEqual(task["error_code"], "task_interrupted")
history = self.history_repo.list_history(limit=5)[0]
self.assertEqual(history["id"], "task-cancelling")
self.assertEqual(history["status"], "failed")
if __name__ == "__main__":
unittest.main()

Some files were not shown because too many files have changed in this diff Show More