Compare commits

...

2 Commits

Author SHA1 Message Date
adamlamers 9064d3b7ea make endpoint names more consistent with their purpose
Continuous Integration / backend-tests (push) Successful in 30s
Continuous Integration / frontend-check (push) Successful in 16s
Continuous Integration / e2e-tests (push) Successful in 12m27s
2026-05-04 17:40:35 -04:00
adamlamers 8336805ee2 natural sort for filebrowser 2026-05-04 16:44:05 -04:00
19 changed files with 593 additions and 484 deletions
+394
View File
@@ -0,0 +1,394 @@
import json
import os
from datetime import datetime, timezone
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy import text
from sqlalchemy.orm import Session
from app.api.schemas import ItemMetadataSchema, TreeNodeSchema
from app.db import models
from app.db.database import get_db
router = APIRouter(prefix="/archive", tags=["Archive Index"])
def get_source_roots(db_session: Session) -> List[str]:
"""Retrieves the list of configured root paths from system settings."""
setting = (
db_session.query(models.SystemSetting)
.filter(models.SystemSetting.key == "source_roots")
.first()
)
if not setting:
# Fallback to scan_paths for legacy compatibility
setting = (
db_session.query(models.SystemSetting)
.filter(models.SystemSetting.key == "scan_paths")
.first()
)
if not setting:
return []
try:
return json.loads(setting.value)
except Exception:
return [setting.value] if setting.value else []
@router.get("/browse", operation_id="archive_browse")
def browse(path: str = "ROOT", db_session: Session = Depends(get_db)):
"""Browses the archived file index at a specific path."""
if path == "ROOT":
# Root level: show source roots that have at least one protected file
source_roots = get_source_roots(db_session)
results = []
for root in source_roots:
# Check if this root contains ANY protected file
# total: count files that are either not ignored OR already have a version
# protected: count files that have a version
prot_check = text("""
SELECT
SUM(CASE WHEN fs.is_ignored = 0 OR EXISTS(SELECT 1 FROM file_versions fv2 WHERE fv2.filesystem_state_id = fs.id) THEN 1 ELSE 0 END) as total,
SUM(CASE WHEN EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id) THEN 1 ELSE 0 END) as protected,
(SELECT GROUP_CONCAT(DISTINCT sm.identifier)
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
JOIN filesystem_state fs2 ON fs2.id = fv.filesystem_state_id
WHERE (fs2.file_path = :r OR fs2.file_path LIKE :prefix)) as media_list,
SUM(CASE WHEN EXISTS(SELECT 1 FROM restore_cart rc WHERE rc.filesystem_state_id = fs.id) THEN 1 ELSE 0 END) as selected_count,
SUM(fs.size) as total_size
FROM filesystem_state fs
WHERE (fs.file_path = :r OR fs.file_path LIKE :prefix)
""")
stats = db_session.execute(
prot_check, {"r": root, "prefix": f"{root}/%"}
).fetchone()
total = 0
protected = 0
media_list = []
selected_count = 0
total_size = 0
if stats:
total = stats[0] or 0
protected = stats[1] or 0
media_list = stats[2].split(",") if stats[2] else []
selected_count = stats[3] or 0
total_size = stats[4] or 0
if protected > 0:
results.append(
{
"name": root,
"path": root,
"type": "directory",
"size": total_size,
"vulnerable": (protected < total),
"selected": (
selected_count > 0 and selected_count == protected
),
"indeterminate": (
selected_count > 0 and selected_count < protected
),
"media": media_list,
}
)
return results
query_path = path if path.endswith("/") else path + "/"
# Find directories and their protection stats (Optimized: Single Pass)
dir_sql = text("""
SELECT
SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) as dir_name,
SUM(CASE WHEN is_ignored = 0 OR EXISTS(SELECT 1 FROM file_versions fv3 WHERE fv3.filesystem_state_id = filesystem_state.id) THEN 1 ELSE 0 END) as total,
SUM(CASE WHEN EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = filesystem_state.id) THEN 1 ELSE 0 END) as protected,
(SELECT GROUP_CONCAT(DISTINCT sm.identifier)
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
JOIN filesystem_state fs2 ON fs2.id = fv.filesystem_state_id
WHERE fs2.file_path LIKE :prefix || SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) || '/%') as media_list,
SUM(CASE WHEN EXISTS(SELECT 1 FROM restore_cart rc WHERE rc.filesystem_state_id = filesystem_state.id) THEN 1 ELSE 0 END) as selected_count,
SUM(size) as total_size
FROM filesystem_state
WHERE file_path LIKE :prefix_wildcard
AND file_path != :prefix
AND INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') > 0
GROUP BY dir_name
""")
dirs = db_session.execute(
dir_sql, {"prefix": query_path, "prefix_wildcard": f"{query_path}%"}
).fetchall()
# Find files (immediate children) with their media locations
file_sql = text("""
SELECT
fs.id, fs.file_path, fs.size, fs.mtime,
EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id) as has_version,
(SELECT GROUP_CONCAT(sm.identifier)
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
WHERE fv.filesystem_state_id = fs.id) as media_list,
EXISTS(SELECT 1 FROM restore_cart rc WHERE rc.filesystem_state_id = fs.id) as is_selected
FROM filesystem_state fs
WHERE fs.file_path LIKE :prefix_wildcard
AND fs.file_path != :prefix
AND INSTR(SUBSTR(fs.file_path, LENGTH(:prefix) + 1), '/') = 0
""")
files = db_session.execute(
file_sql, {"prefix": query_path, "prefix_wildcard": f"{query_path}%"}
).fetchall()
results = []
for d in dirs:
if not d[0] or d[0] == "/":
continue
total = d[1] or 0
protected = d[2] or 0
media_list = d[3].split(",") if d[3] else []
selected_count = d[4] or 0
total_size = d[5] or 0
# Only show directories that have at least one protected file
if protected == 0:
continue
full_dir_path = query_path + d[0]
results.append(
{
"name": d[0],
"path": full_dir_path,
"type": "directory",
"size": total_size,
"vulnerable": (protected < total),
"selected": (selected_count > 0 and selected_count == protected),
"indeterminate": (selected_count > 0 and selected_count < protected),
"media": media_list,
}
)
for f in files:
# Only show files that actually have at least one version on media
if not f[4]: # f[4] is has_version
continue
results.append(
{
"name": os.path.basename(f[1]),
"path": f[1],
"type": "file",
"size": f[2],
"mtime": datetime.fromtimestamp(f[3], tz=timezone.utc),
"vulnerable": False,
"selected": bool(f[6]),
"media": f[5].split(",") if f[5] else [],
}
)
# Deduplicate by path to prevent frontend keyed each block errors
seen_paths: set[str] = set()
deduped_results: list[dict] = []
for r in results:
if r["path"] not in seen_paths:
seen_paths.add(r["path"])
deduped_results.append(r)
results = deduped_results
return results
@router.get("/search", operation_id="archive_search")
def search(q: str, path: Optional[str] = None, db_session: Session = Depends(get_db)):
"""Performs FTS5 search across the indexed file paths, optionally scoped by path."""
if len(q) < 2:
return []
search_sql = text(
"""
SELECT
fs.id, fs.file_path, fs.size, fs.mtime,
EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id) as has_version,
(SELECT GROUP_CONCAT(sm.identifier)
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
WHERE fv.filesystem_state_id = fs.id) as media_list,
EXISTS(SELECT 1 FROM restore_cart rc WHERE rc.filesystem_state_id = fs.id) as is_selected
FROM filesystem_fts fts
JOIN filesystem_state fs ON fs.id = fts.rowid
WHERE filesystem_fts MATCH :query
AND fs.file_path LIKE :path_prefix
ORDER BY rank
LIMIT 100
"""
)
path_prefix = f"{path}%" if path and path != "ROOT" else "%"
query_params = {"query": q, "path_prefix": path_prefix}
rows = db_session.execute(search_sql, query_params).fetchall()
return [
{
"name": os.path.basename(r[1]),
"path": r[1],
"type": "file",
"size": r[2],
"mtime": datetime.fromtimestamp(r[3], tz=timezone.utc),
"vulnerable": False,
"selected": bool(r[6]),
"media": r[5].split(",") if r[5] else [],
}
for r in rows
if r[4] # Only show if has_version is True
]
@router.get("/tree", response_model=List[TreeNodeSchema], operation_id="archive_tree")
def tree(path: Optional[str] = None, db_session: Session = Depends(get_db)):
"""Returns a recursive tree view of the virtual archive index."""
if path is None or path == "ROOT":
# Root level: show source roots that have at least one protected file
source_roots = get_source_roots(db_session)
results = []
for root in source_roots:
# Check if this root contains ANY protected file
prot_check = text("""
SELECT 1 FROM filesystem_state fs
WHERE (fs.file_path = :r OR fs.file_path LIKE :prefix)
AND EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id)
LIMIT 1
""")
has_prot = db_session.execute(
prot_check, {"r": root, "prefix": f"{root}/%"}
).fetchone()
if has_prot:
results.append(TreeNodeSchema(name=root, path=root, has_children=True))
return results
query_path = path if path.endswith("/") else path + "/"
# Find subdirectories that contain at least one protected file (ignoring current is_ignored state)
dir_sql = text("""
SELECT DISTINCT
SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) as dir_name
FROM filesystem_state fs
WHERE file_path LIKE :prefix_wildcard
AND file_path != :prefix
AND INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') > 0
AND EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id)
""")
path_prefix = query_path
dirs = db_session.execute(
dir_sql, {"prefix": path_prefix, "prefix_wildcard": f"{path_prefix}%"}
).fetchall()
results = []
for d in dirs:
if not d[0] or d[0] == "/":
continue
results.append(
TreeNodeSchema(name=d[0], path=query_path + d[0], has_children=True)
)
results.sort(key=lambda x: x.name.lower())
return results
@router.get(
"/metadata", response_model=ItemMetadataSchema, operation_id="archive_metadata"
)
def metadata(path: str, db_session: Session = Depends(get_db)):
"""Retrieves full version history and location details for an indexed file or directory."""
item = (
db_session.query(models.FilesystemState)
.filter(models.FilesystemState.file_path == path)
.first()
)
if item:
# Exact file match
versions = []
for v in item.versions:
versions.append(
{
"media_id": v.media.identifier,
"media_type": v.media.media_type,
"archive_id": v.file_number,
"created_at": v.created_at,
"is_split": v.is_split,
"offset": v.offset_start,
}
)
return ItemMetadataSchema(
id=item.id,
path=item.file_path,
type="file",
size=item.size,
mtime=datetime.fromtimestamp(item.mtime, tz=timezone.utc),
last_seen_timestamp=item.last_seen_timestamp,
sha256_hash=item.sha256_hash,
is_ignored=item.is_ignored,
versions=versions,
)
# No exact match — check if this is a directory with archived children
prefix = path if path.endswith("/") else path + "/"
dir_stats = db_session.execute(
text("""
SELECT
COUNT(*) as child_count,
SUM(size) as total_size,
MAX(mtime) as latest_mtime,
MAX(last_seen_timestamp) as latest_seen
FROM filesystem_state
WHERE file_path LIKE :prefix
"""),
{"prefix": f"{prefix}%"},
).fetchone()
if not dir_stats or dir_stats[0] == 0:
raise HTTPException(status_code=404, detail="File not found in index.")
# Aggregate unique media locations for all children
media_rows = db_session.execute(
text("""
SELECT DISTINCT
sm.identifier as media_id,
sm.media_type,
MIN(fv.created_at) as earliest_created
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
JOIN filesystem_state fs ON fs.id = fv.filesystem_state_id
WHERE fs.file_path LIKE :prefix
GROUP BY sm.identifier, sm.media_type
"""),
{"prefix": f"{prefix}%"},
).fetchall()
versions = []
for row in media_rows:
versions.append(
{
"media_id": row[0],
"media_type": row[1],
"archive_id": "",
"created_at": row[2],
"is_split": False,
"offset": 0,
}
)
return ItemMetadataSchema(
id=-1,
path=path,
type="directory",
size=dir_stats[1] or 0,
mtime=datetime.fromtimestamp(dir_stats[2] or 0, tz=timezone.utc),
last_seen_timestamp=dir_stats[3],
child_count=dir_stats[0],
versions=versions,
)
+2 -384
View File
@@ -1,7 +1,6 @@
import json import json
import os
from datetime import datetime, timezone from datetime import datetime, timezone
from typing import List, Optional from typing import List
import psutil import psutil
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
@@ -10,13 +9,12 @@ from pydantic import BaseModel
from sqlalchemy import text from sqlalchemy import text
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
from app.api.archive import get_source_roots
from app.api.schemas import ( from app.api.schemas import (
ItemMetadataSchema,
MediaCreateSchema, MediaCreateSchema,
MediaSchema, MediaSchema,
MediaUpdateSchema, MediaUpdateSchema,
StorageProviderSchema, StorageProviderSchema,
TreeNodeSchema,
) )
from app.db import models from app.db import models
from app.db.database import get_db from app.db.database import get_db
@@ -33,28 +31,6 @@ class ReorderMediaRequest(BaseModel):
# --- Core Logic --- # --- Core Logic ---
def get_source_roots(db_session: Session) -> List[str]:
"""Retrieves the list of configured root paths from system settings."""
setting = (
db_session.query(models.SystemSetting)
.filter(models.SystemSetting.key == "source_roots")
.first()
)
if not setting:
# Fallback to scan_paths for legacy compatibility
setting = (
db_session.query(models.SystemSetting)
.filter(models.SystemSetting.key == "scan_paths")
.first()
)
if not setting:
return []
try:
return json.loads(setting.value)
except Exception:
return [setting.value] if setting.value else []
@router.get("/providers", response_model=List[StorageProviderSchema]) @router.get("/providers", response_model=List[StorageProviderSchema])
def list_storage_providers(): def list_storage_providers():
"""Returns a registry of all available storage providers and their configurations.""" """Returns a registry of all available storage providers and their configurations."""
@@ -693,361 +669,3 @@ def detect_unregistered_media(db_session: Session = Depends(get_db)):
) )
return detected return detected
@router.get("/browse")
def browse_archive_index(path: str = "ROOT", db_session: Session = Depends(get_db)):
"""Browses the archived file index at a specific path."""
if path == "ROOT":
# Root level: show source roots that have at least one protected file
source_roots = get_source_roots(db_session)
results = []
for root in source_roots:
# Check if this root contains ANY protected file
# total: count files that are either not ignored OR already have a version
# protected: count files that have a version
prot_check = text("""
SELECT
SUM(CASE WHEN fs.is_ignored = 0 OR EXISTS(SELECT 1 FROM file_versions fv2 WHERE fv2.filesystem_state_id = fs.id) THEN 1 ELSE 0 END) as total,
SUM(CASE WHEN EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id) THEN 1 ELSE 0 END) as protected,
(SELECT GROUP_CONCAT(DISTINCT sm.identifier)
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
JOIN filesystem_state fs2 ON fs2.id = fv.filesystem_state_id
WHERE (fs2.file_path = :r OR fs2.file_path LIKE :prefix)) as media_list,
SUM(CASE WHEN EXISTS(SELECT 1 FROM restore_cart rc WHERE rc.filesystem_state_id = fs.id) THEN 1 ELSE 0 END) as selected_count,
SUM(fs.size) as total_size
FROM filesystem_state fs
WHERE (fs.file_path = :r OR fs.file_path LIKE :prefix)
""")
stats = db_session.execute(
prot_check, {"r": root, "prefix": f"{root}/%"}
).fetchone()
total = 0
protected = 0
media_list = []
selected_count = 0
total_size = 0
if stats:
total = stats[0] or 0
protected = stats[1] or 0
media_list = stats[2].split(",") if stats[2] else []
selected_count = stats[3] or 0
total_size = stats[4] or 0
if protected > 0:
results.append(
{
"name": root,
"path": root,
"type": "directory",
"size": total_size,
"vulnerable": (protected < total),
"selected": (
selected_count > 0 and selected_count == protected
),
"indeterminate": (
selected_count > 0 and selected_count < protected
),
"media": media_list,
}
)
return results
query_path = path if path.endswith("/") else path + "/"
# Find directories and their protection stats (Optimized: Single Pass)
dir_sql = text("""
SELECT
SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) as dir_name,
SUM(CASE WHEN is_ignored = 0 OR EXISTS(SELECT 1 FROM file_versions fv3 WHERE fv3.filesystem_state_id = filesystem_state.id) THEN 1 ELSE 0 END) as total,
SUM(CASE WHEN EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = filesystem_state.id) THEN 1 ELSE 0 END) as protected,
(SELECT GROUP_CONCAT(DISTINCT sm.identifier)
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
JOIN filesystem_state fs2 ON fs2.id = fv.filesystem_state_id
WHERE fs2.file_path LIKE :prefix || SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) || '/%') as media_list,
SUM(CASE WHEN EXISTS(SELECT 1 FROM restore_cart rc WHERE rc.filesystem_state_id = filesystem_state.id) THEN 1 ELSE 0 END) as selected_count,
SUM(size) as total_size
FROM filesystem_state
WHERE file_path LIKE :prefix_wildcard
AND file_path != :prefix
AND INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') > 0
GROUP BY dir_name
""")
dirs = db_session.execute(
dir_sql, {"prefix": query_path, "prefix_wildcard": f"{query_path}%"}
).fetchall()
# Find files (immediate children) with their media locations
file_sql = text("""
SELECT
fs.id, fs.file_path, fs.size, fs.mtime,
EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id) as has_version,
(SELECT GROUP_CONCAT(sm.identifier)
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
WHERE fv.filesystem_state_id = fs.id) as media_list,
EXISTS(SELECT 1 FROM restore_cart rc WHERE rc.filesystem_state_id = fs.id) as is_selected
FROM filesystem_state fs
WHERE fs.file_path LIKE :prefix_wildcard
AND fs.file_path != :prefix
AND INSTR(SUBSTR(fs.file_path, LENGTH(:prefix) + 1), '/') = 0
""")
files = db_session.execute(
file_sql, {"prefix": query_path, "prefix_wildcard": f"{query_path}%"}
).fetchall()
results = []
for d in dirs:
if not d[0] or d[0] == "/":
continue
total = d[1] or 0
protected = d[2] or 0
media_list = d[3].split(",") if d[3] else []
selected_count = d[4] or 0
total_size = d[5] or 0
# Only show directories that have at least one protected file
if protected == 0:
continue
full_dir_path = query_path + d[0]
results.append(
{
"name": d[0],
"path": full_dir_path,
"type": "directory",
"size": total_size,
"vulnerable": (protected < total),
"selected": (selected_count > 0 and selected_count == protected),
"indeterminate": (selected_count > 0 and selected_count < protected),
"media": media_list,
}
)
for f in files:
# Only show files that actually have at least one version on media
if not f[4]: # f[4] is has_version
continue
results.append(
{
"name": os.path.basename(f[1]),
"path": f[1],
"type": "file",
"size": f[2],
"mtime": datetime.fromtimestamp(f[3], tz=timezone.utc),
"vulnerable": False,
"selected": bool(f[6]),
"media": f[5].split(",") if f[5] else [],
}
)
# Deduplicate by path to prevent frontend keyed each block errors
seen_paths: set[str] = set()
deduped_results: list[dict] = []
for r in results:
if r["path"] not in seen_paths:
seen_paths.add(r["path"])
deduped_results.append(r)
results = deduped_results
return results
@router.get("/search")
def search_archive_index(
q: str, path: Optional[str] = None, db_session: Session = Depends(get_db)
):
"""Performs FTS5 search across the indexed file paths, optionally scoped by path."""
if len(q) < 2:
return []
search_sql = text(
"""
SELECT
fs.id, fs.file_path, fs.size, fs.mtime,
EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id) as has_version,
(SELECT GROUP_CONCAT(sm.identifier)
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
WHERE fv.filesystem_state_id = fs.id) as media_list,
EXISTS(SELECT 1 FROM restore_cart rc WHERE rc.filesystem_state_id = fs.id) as is_selected
FROM filesystem_fts fts
JOIN filesystem_state fs ON fs.id = fts.rowid
WHERE filesystem_fts MATCH :query
AND fs.file_path LIKE :path_prefix
ORDER BY rank
LIMIT 100
"""
)
path_prefix = f"{path}%" if path and path != "ROOT" else "%"
query_params = {"query": q, "path_prefix": path_prefix}
rows = db_session.execute(search_sql, query_params).fetchall()
return [
{
"name": os.path.basename(r[1]),
"path": r[1],
"type": "file",
"size": r[2],
"mtime": datetime.fromtimestamp(r[3], tz=timezone.utc),
"vulnerable": False,
"selected": bool(r[6]),
"media": r[5].split(",") if r[5] else [],
}
for r in rows
if r[4] # Only show if has_version is True
]
@router.get("/tree", response_model=List[TreeNodeSchema])
def get_archive_tree(path: Optional[str] = None, db_session: Session = Depends(get_db)):
"""Returns a recursive tree view of the virtual archive index."""
if path is None or path == "ROOT":
# Root level: show source roots that have at least one protected file
source_roots = get_source_roots(db_session)
results = []
for root in source_roots:
# Check if this root contains ANY protected file
prot_check = text("""
SELECT 1 FROM filesystem_state fs
WHERE (fs.file_path = :r OR fs.file_path LIKE :prefix)
AND EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id)
LIMIT 1
""")
has_prot = db_session.execute(
prot_check, {"r": root, "prefix": f"{root}/%"}
).fetchone()
if has_prot:
results.append(TreeNodeSchema(name=root, path=root, has_children=True))
return results
query_path = path if path.endswith("/") else path + "/"
# Find subdirectories that contain at least one protected file (ignoring current is_ignored state)
dir_sql = text("""
SELECT DISTINCT
SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) as dir_name
FROM filesystem_state fs
WHERE file_path LIKE :prefix_wildcard
AND file_path != :prefix
AND INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') > 0
AND EXISTS(SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id)
""")
path_prefix = query_path
dirs = db_session.execute(
dir_sql, {"prefix": path_prefix, "prefix_wildcard": f"{path_prefix}%"}
).fetchall()
results = []
for d in dirs:
if not d[0] or d[0] == "/":
continue
results.append(
TreeNodeSchema(name=d[0], path=query_path + d[0], has_children=True)
)
results.sort(key=lambda x: x.name.lower())
return results
@router.get("/metadata", response_model=ItemMetadataSchema)
def get_archive_item_metadata(path: str, db_session: Session = Depends(get_db)):
"""Retrieves full version history and location details for an indexed file or directory."""
item = (
db_session.query(models.FilesystemState)
.filter(models.FilesystemState.file_path == path)
.first()
)
if item:
# Exact file match
versions = []
for v in item.versions:
versions.append(
{
"media_id": v.media.identifier,
"media_type": v.media.media_type,
"archive_id": v.file_number,
"created_at": v.created_at,
"is_split": v.is_split,
"offset": v.offset_start,
}
)
return ItemMetadataSchema(
id=item.id,
path=item.file_path,
type="file",
size=item.size,
mtime=datetime.fromtimestamp(item.mtime, tz=timezone.utc),
last_seen_timestamp=item.last_seen_timestamp,
sha256_hash=item.sha256_hash,
is_ignored=item.is_ignored,
versions=versions,
)
# No exact match — check if this is a directory with archived children
prefix = path if path.endswith("/") else path + "/"
dir_stats = db_session.execute(
text("""
SELECT
COUNT(*) as child_count,
SUM(size) as total_size,
MAX(mtime) as latest_mtime,
MAX(last_seen_timestamp) as latest_seen
FROM filesystem_state
WHERE file_path LIKE :prefix
"""),
{"prefix": f"{prefix}%"},
).fetchone()
if not dir_stats or dir_stats[0] == 0:
raise HTTPException(status_code=404, detail="File not found in index.")
# Aggregate unique media locations for all children
media_rows = db_session.execute(
text("""
SELECT DISTINCT
sm.identifier as media_id,
sm.media_type,
MIN(fv.created_at) as earliest_created
FROM file_versions fv
JOIN storage_media sm ON sm.id = fv.media_id
JOIN filesystem_state fs ON fs.id = fv.filesystem_state_id
WHERE fs.file_path LIKE :prefix
GROUP BY sm.identifier, sm.media_type
"""),
{"prefix": f"{prefix}%"},
).fetchall()
versions = []
for row in media_rows:
versions.append(
{
"media_id": row[0],
"media_type": row[1],
"archive_id": "",
"created_at": row[2],
"is_split": False,
"offset": 0,
}
)
return ItemMetadataSchema(
id=-1,
path=path,
type="directory",
size=dir_stats[1] or 0,
mtime=datetime.fromtimestamp(dir_stats[2] or 0, tz=timezone.utc),
last_seen_timestamp=dir_stats[3],
child_count=dir_stats[0],
versions=versions,
)
+20 -5
View File
@@ -640,7 +640,9 @@ def _get_last_scan_time(db_session: Session) -> Optional[datetime]:
return last_scan.completed_at if last_scan else None return last_scan.completed_at if last_scan else None
@router.get("/browse", response_model=BrowseResponseSchema) @router.get(
"/browse", response_model=BrowseResponseSchema, operation_id="filesystem_browse"
)
def browse_system_path( def browse_system_path(
path: Optional[str] = None, db_session: Session = Depends(get_db) path: Optional[str] = None, db_session: Session = Depends(get_db)
): ):
@@ -729,6 +731,15 @@ def browse_system_path(
except OSError: except OSError:
pass pass
# Aggregate sizes for directories from indexed rows
dir_sizes: dict[str, int] = {}
for file_path, size, _mtime, _sha256_hash, _is_ignored in rows:
relative = file_path[len(target_prefix) :]
if "/" in relative:
immediate_name = relative.split("/")[0]
child_path = target_prefix + immediate_name
dir_sizes[child_path] = dir_sizes.get(child_path, 0) + (size or 0)
results = [] results = []
seen = set() seen = set()
@@ -747,6 +758,7 @@ def browse_system_path(
name=immediate_name, name=immediate_name,
path=child_path, path=child_path,
type="directory", type="directory",
size=dir_sizes.get(child_path, 0),
ignored=dir_ignored, ignored=dir_ignored,
) )
) )
@@ -769,7 +781,9 @@ def browse_system_path(
return BrowseResponseSchema(files=results, last_scan_time=last_scan_time) return BrowseResponseSchema(files=results, last_scan_time=last_scan_time)
@router.get("/search", response_model=List[FileItemSchema]) @router.get(
"/search", response_model=List[FileItemSchema], operation_id="filesystem_search"
)
def search_system_index( def search_system_index(
q: str, q: str,
path: Optional[str] = None, path: Optional[str] = None,
@@ -1167,10 +1181,11 @@ async def import_database_index(file: Any, db_session: Session = Depends(get_db)
return {"message": "Import logic restricted for safety."} return {"message": "Import logic restricted for safety."}
@router.get("/tree", response_model=List[TreeNodeSchema]) @router.get(
"/tree", response_model=List[TreeNodeSchema], operation_id="filesystem_tree"
)
def get_system_tree(path: Optional[str] = None, db_session: Session = Depends(get_db)): def get_system_tree(path: Optional[str] = None, db_session: Session = Depends(get_db)):
"""Returns a recursive tree view of the system for configuration.""" """Returns a recursive tree view of the system for configuration."""
from app.api.inventory import TreeNodeSchema
roots = get_source_roots(db_session) roots = get_source_roots(db_session)
if path is None or path == "ROOT": if path is None or path == "ROOT":
@@ -1432,7 +1447,7 @@ def get_discrepancies_tree(
db_session: Session = Depends(get_db), db_session: Session = Depends(get_db),
): ):
"""Returns tree of directories that contain discrepancy files, grouped by source root.""" """Returns tree of directories that contain discrepancy files, grouped by source root."""
from app.api.inventory import get_source_roots from app.api.archive import get_source_roots
# Get source roots # Get source roots
roots = get_source_roots(db_session) roots = get_source_roots(db_session)
+2 -1
View File
@@ -6,7 +6,7 @@ from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse, JSONResponse from fastapi.responses import FileResponse, JSONResponse
from loguru import logger from loguru import logger
from app.api import backups, inventory, restores, system from app.api import archive, backups, inventory, restores, system
@asynccontextmanager @asynccontextmanager
@@ -44,6 +44,7 @@ app.add_middleware(
# Register API Routers # Register API Routers
app.include_router(system.router) app.include_router(system.router)
app.include_router(inventory.router) app.include_router(inventory.router)
app.include_router(archive.router)
app.include_router(backups.router) app.include_router(backups.router)
app.include_router(restores.router) app.include_router(restores.router)
+3 -3
View File
@@ -110,7 +110,7 @@ def test_browse_index_root(client, db_session):
db_session.commit() db_session.commit()
# Root should show source_data if it has versions # Root should show source_data if it has versions
response = client.get("/inventory/browse?path=ROOT") response = client.get("/archive/browse?path=ROOT")
assert response.status_code == 200 assert response.status_code == 200
data = response.json() data = response.json()
assert len(data) > 0 assert len(data) > 0
@@ -153,7 +153,7 @@ def test_search_index(client, db_session):
# but conftest uses a real temp file. # but conftest uses a real temp file.
db_session.commit() db_session.commit()
response = client.get("/inventory/search?q=important") response = client.get("/archive/search?q=important")
assert response.status_code == 200 assert response.status_code == 200
# If FTS5 is working, it should return results. # If FTS5 is working, it should return results.
@@ -169,6 +169,6 @@ def test_get_metadata(client, db_session):
db_session.add(file1) db_session.add(file1)
db_session.commit() db_session.commit()
response = client.get("/inventory/metadata?path=data/meta.txt") response = client.get("/archive/metadata?path=data/meta.txt")
assert response.status_code == 200 assert response.status_code == 200
assert response.json()["path"] == "data/meta.txt" assert response.json()["path"] == "data/meta.txt"
+3 -3
View File
@@ -36,13 +36,13 @@ export default defineConfig({
/* Run your local dev server before starting the tests */ /* Run your local dev server before starting the tests */
webServer: [ webServer: [
{ {
command: 'cd ../backend && rm -f e2e_test.db* && DATABASE_URL="sqlite:///e2e_test.db" TAPEHOARD_TEST_MODE="true" TAPEHOARD_CORS_ORIGINS="*,http://localhost:5174" uv run python -m app.start_test_server --host 0.0.0.0 --port 8001', command: 'cd ../backend && rm -f e2e_test.db* && DATABASE_URL="sqlite:///e2e_test.db" TAPEHOARD_TEST_MODE="true" TAPEHOARD_CORS_ORIGINS="*,http://localhost:5174,http://127.0.0.1:5174" uv run python -m app.start_test_server --host 127.0.0.1 --port 8001',
url: 'http://localhost:8001/health', url: 'http://127.0.0.1:8001/health',
reuseExistingServer: !process.env.CI, reuseExistingServer: !process.env.CI,
timeout: 120 * 1000, timeout: 120 * 1000,
}, },
{ {
command: 'VITE_API_URL=http://localhost:8001 npm run dev -- --port 5174', command: 'VITE_API_URL=http://127.0.0.1:8001 npm run dev -- --port 5174',
url: 'http://localhost:5174', url: 'http://localhost:5174',
reuseExistingServer: !process.env.CI, reuseExistingServer: !process.env.CI,
timeout: 120 * 1000, timeout: 120 * 1000,
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
+40 -40
View File
@@ -1064,7 +1064,7 @@ export type GetScanStatusSystemScanStatusGetResponses = {
export type GetScanStatusSystemScanStatusGetResponse = GetScanStatusSystemScanStatusGetResponses[keyof GetScanStatusSystemScanStatusGetResponses]; export type GetScanStatusSystemScanStatusGetResponse = GetScanStatusSystemScanStatusGetResponses[keyof GetScanStatusSystemScanStatusGetResponses];
export type BrowseSystemPathSystemBrowseGetData = { export type FilesystemBrowseData = {
body?: never; body?: never;
path?: never; path?: never;
query?: { query?: {
@@ -1076,25 +1076,25 @@ export type BrowseSystemPathSystemBrowseGetData = {
url: '/system/browse'; url: '/system/browse';
}; };
export type BrowseSystemPathSystemBrowseGetErrors = { export type FilesystemBrowseErrors = {
/** /**
* Validation Error * Validation Error
*/ */
422: HttpValidationError; 422: HttpValidationError;
}; };
export type BrowseSystemPathSystemBrowseGetError = BrowseSystemPathSystemBrowseGetErrors[keyof BrowseSystemPathSystemBrowseGetErrors]; export type FilesystemBrowseError = FilesystemBrowseErrors[keyof FilesystemBrowseErrors];
export type BrowseSystemPathSystemBrowseGetResponses = { export type FilesystemBrowseResponses = {
/** /**
* Successful Response * Successful Response
*/ */
200: BrowseResponseSchema; 200: BrowseResponseSchema;
}; };
export type BrowseSystemPathSystemBrowseGetResponse = BrowseSystemPathSystemBrowseGetResponses[keyof BrowseSystemPathSystemBrowseGetResponses]; export type FilesystemBrowseResponse = FilesystemBrowseResponses[keyof FilesystemBrowseResponses];
export type SearchSystemIndexSystemSearchGetData = { export type FilesystemSearchData = {
body?: never; body?: never;
path?: never; path?: never;
query: { query: {
@@ -1114,25 +1114,25 @@ export type SearchSystemIndexSystemSearchGetData = {
url: '/system/search'; url: '/system/search';
}; };
export type SearchSystemIndexSystemSearchGetErrors = { export type FilesystemSearchErrors = {
/** /**
* Validation Error * Validation Error
*/ */
422: HttpValidationError; 422: HttpValidationError;
}; };
export type SearchSystemIndexSystemSearchGetError = SearchSystemIndexSystemSearchGetErrors[keyof SearchSystemIndexSystemSearchGetErrors]; export type FilesystemSearchError = FilesystemSearchErrors[keyof FilesystemSearchErrors];
export type SearchSystemIndexSystemSearchGetResponses = { export type FilesystemSearchResponses = {
/** /**
* Response Search System Index System Search Get * Response Filesystem Search
* *
* Successful Response * Successful Response
*/ */
200: Array<FileItemSchema>; 200: Array<FileItemSchema>;
}; };
export type SearchSystemIndexSystemSearchGetResponse = SearchSystemIndexSystemSearchGetResponses[keyof SearchSystemIndexSystemSearchGetResponses]; export type FilesystemSearchResponse = FilesystemSearchResponses[keyof FilesystemSearchResponses];
export type BatchUpdateTrackingSystemTrackBatchPostData = { export type BatchUpdateTrackingSystemTrackBatchPostData = {
body: BatchTrackRequest; body: BatchTrackRequest;
@@ -1330,7 +1330,7 @@ export type ImportDatabaseIndexSystemDatabaseImportPostResponses = {
200: unknown; 200: unknown;
}; };
export type GetSystemTreeSystemTreeGetData = { export type FilesystemTreeData = {
body?: never; body?: never;
path?: never; path?: never;
query?: { query?: {
@@ -1342,25 +1342,25 @@ export type GetSystemTreeSystemTreeGetData = {
url: '/system/tree'; url: '/system/tree';
}; };
export type GetSystemTreeSystemTreeGetErrors = { export type FilesystemTreeErrors = {
/** /**
* Validation Error * Validation Error
*/ */
422: HttpValidationError; 422: HttpValidationError;
}; };
export type GetSystemTreeSystemTreeGetError = GetSystemTreeSystemTreeGetErrors[keyof GetSystemTreeSystemTreeGetErrors]; export type FilesystemTreeError = FilesystemTreeErrors[keyof FilesystemTreeErrors];
export type GetSystemTreeSystemTreeGetResponses = { export type FilesystemTreeResponses = {
/** /**
* Response Get System Tree System Tree Get * Response Filesystem Tree
* *
* Successful Response * Successful Response
*/ */
200: Array<TreeNodeSchema>; 200: Array<TreeNodeSchema>;
}; };
export type GetSystemTreeSystemTreeGetResponse = GetSystemTreeSystemTreeGetResponses[keyof GetSystemTreeSystemTreeGetResponses]; export type FilesystemTreeResponse = FilesystemTreeResponses[keyof FilesystemTreeResponses];
export type ListDiscrepanciesSystemDiscrepanciesGetData = { export type ListDiscrepanciesSystemDiscrepanciesGetData = {
body?: never; body?: never;
@@ -1862,7 +1862,7 @@ export type DetectUnregisteredMediaInventoryDetectGetResponses = {
200: unknown; 200: unknown;
}; };
export type BrowseArchiveIndexInventoryBrowseGetData = { export type ArchiveBrowseData = {
body?: never; body?: never;
path?: never; path?: never;
query?: { query?: {
@@ -1871,26 +1871,26 @@ export type BrowseArchiveIndexInventoryBrowseGetData = {
*/ */
path?: string; path?: string;
}; };
url: '/inventory/browse'; url: '/archive/browse';
}; };
export type BrowseArchiveIndexInventoryBrowseGetErrors = { export type ArchiveBrowseErrors = {
/** /**
* Validation Error * Validation Error
*/ */
422: HttpValidationError; 422: HttpValidationError;
}; };
export type BrowseArchiveIndexInventoryBrowseGetError = BrowseArchiveIndexInventoryBrowseGetErrors[keyof BrowseArchiveIndexInventoryBrowseGetErrors]; export type ArchiveBrowseError = ArchiveBrowseErrors[keyof ArchiveBrowseErrors];
export type BrowseArchiveIndexInventoryBrowseGetResponses = { export type ArchiveBrowseResponses = {
/** /**
* Successful Response * Successful Response
*/ */
200: unknown; 200: unknown;
}; };
export type SearchArchiveIndexInventorySearchGetData = { export type ArchiveSearchData = {
body?: never; body?: never;
path?: never; path?: never;
query: { query: {
@@ -1903,26 +1903,26 @@ export type SearchArchiveIndexInventorySearchGetData = {
*/ */
path?: string | null; path?: string | null;
}; };
url: '/inventory/search'; url: '/archive/search';
}; };
export type SearchArchiveIndexInventorySearchGetErrors = { export type ArchiveSearchErrors = {
/** /**
* Validation Error * Validation Error
*/ */
422: HttpValidationError; 422: HttpValidationError;
}; };
export type SearchArchiveIndexInventorySearchGetError = SearchArchiveIndexInventorySearchGetErrors[keyof SearchArchiveIndexInventorySearchGetErrors]; export type ArchiveSearchError = ArchiveSearchErrors[keyof ArchiveSearchErrors];
export type SearchArchiveIndexInventorySearchGetResponses = { export type ArchiveSearchResponses = {
/** /**
* Successful Response * Successful Response
*/ */
200: unknown; 200: unknown;
}; };
export type GetArchiveTreeInventoryTreeGetData = { export type ArchiveTreeData = {
body?: never; body?: never;
path?: never; path?: never;
query?: { query?: {
@@ -1931,30 +1931,30 @@ export type GetArchiveTreeInventoryTreeGetData = {
*/ */
path?: string | null; path?: string | null;
}; };
url: '/inventory/tree'; url: '/archive/tree';
}; };
export type GetArchiveTreeInventoryTreeGetErrors = { export type ArchiveTreeErrors = {
/** /**
* Validation Error * Validation Error
*/ */
422: HttpValidationError; 422: HttpValidationError;
}; };
export type GetArchiveTreeInventoryTreeGetError = GetArchiveTreeInventoryTreeGetErrors[keyof GetArchiveTreeInventoryTreeGetErrors]; export type ArchiveTreeError = ArchiveTreeErrors[keyof ArchiveTreeErrors];
export type GetArchiveTreeInventoryTreeGetResponses = { export type ArchiveTreeResponses = {
/** /**
* Response Get Archive Tree Inventory Tree Get * Response Archive Tree
* *
* Successful Response * Successful Response
*/ */
200: Array<TreeNodeSchema>; 200: Array<TreeNodeSchema>;
}; };
export type GetArchiveTreeInventoryTreeGetResponse = GetArchiveTreeInventoryTreeGetResponses[keyof GetArchiveTreeInventoryTreeGetResponses]; export type ArchiveTreeResponse = ArchiveTreeResponses[keyof ArchiveTreeResponses];
export type GetArchiveItemMetadataInventoryMetadataGetData = { export type ArchiveMetadataData = {
body?: never; body?: never;
path?: never; path?: never;
query: { query: {
@@ -1963,26 +1963,26 @@ export type GetArchiveItemMetadataInventoryMetadataGetData = {
*/ */
path: string; path: string;
}; };
url: '/inventory/metadata'; url: '/archive/metadata';
}; };
export type GetArchiveItemMetadataInventoryMetadataGetErrors = { export type ArchiveMetadataErrors = {
/** /**
* Validation Error * Validation Error
*/ */
422: HttpValidationError; 422: HttpValidationError;
}; };
export type GetArchiveItemMetadataInventoryMetadataGetError = GetArchiveItemMetadataInventoryMetadataGetErrors[keyof GetArchiveItemMetadataInventoryMetadataGetErrors]; export type ArchiveMetadataError = ArchiveMetadataErrors[keyof ArchiveMetadataErrors];
export type GetArchiveItemMetadataInventoryMetadataGetResponses = { export type ArchiveMetadataResponses = {
/** /**
* Successful Response * Successful Response
*/ */
200: ItemMetadataSchema; 200: ItemMetadataSchema;
}; };
export type GetArchiveItemMetadataInventoryMetadataGetResponse = GetArchiveItemMetadataInventoryMetadataGetResponses[keyof GetArchiveItemMetadataInventoryMetadataGetResponses]; export type ArchiveMetadataResponse = ArchiveMetadataResponses[keyof ArchiveMetadataResponses];
export type TriggerAutoBackupBackupsTriggerAutoPostData = { export type TriggerAutoBackupBackupsTriggerAutoPostData = {
body?: never; body?: never;
@@ -18,12 +18,12 @@
import FileBrowserTreeItem from "./FileBrowserTreeItem.svelte"; import FileBrowserTreeItem from "./FileBrowserTreeItem.svelte";
import FileBrowserRowItem from "./FileBrowserRowItem.svelte"; import FileBrowserRowItem from "./FileBrowserRowItem.svelte";
import type { FileItem, TreeNode, Breadcrumb } from "$lib/types"; import type { FileItem, TreeNode, Breadcrumb } from "$lib/types";
import { cn } from "$lib/utils"; import { cn, naturalSortCompare } from "$lib/utils";
import { import {
getSystemTreeSystemTreeGet, filesystemTree,
getArchiveTreeInventoryTreeGet, archiveTree,
browseSystemPathSystemBrowseGet, filesystemBrowse,
browseArchiveIndexInventoryBrowseGet, archiveBrowse,
getDiscrepanciesTreeSystemDiscrepanciesTreeGet, getDiscrepanciesTreeSystemDiscrepanciesTreeGet,
browseDiscrepanciesSystemDiscrepanciesBrowseGet, browseDiscrepanciesSystemDiscrepanciesBrowseGet,
} from "$lib/api"; } from "$lib/api";
@@ -264,12 +264,24 @@
}); });
result.sort((a: FileItem, b: FileItem) => { result.sort((a: FileItem, b: FileItem) => {
let cmp = 0;
if (sortColumn === "name") {
// Directories always sort before files, then natural sort by name
if (a.type !== b.type) {
cmp = a.type === "directory" ? -1 : 1;
} else {
cmp = naturalSortCompare(a.name, b.name);
}
} else {
const valA = sortColumn === "type" ? a.type : a[sortColumn as keyof FileItem] || 0; const valA = sortColumn === "type" ? a.type : a[sortColumn as keyof FileItem] || 0;
const valB = sortColumn === "type" ? b.type : b[sortColumn as keyof FileItem] || 0; const valB = sortColumn === "type" ? b.type : b[sortColumn as keyof FileItem] || 0;
if (valA < (valB as any)) return sortDirection === "asc" ? -1 : 1; if (valA < (valB as any)) cmp = -1;
if (valA > (valB as any)) return sortDirection === "asc" ? 1 : -1; else if (valA > (valB as any)) cmp = 1;
return 0; }
return sortDirection === "asc" ? cmp : -cmp;
}); });
return result; return result;
@@ -5,7 +5,7 @@
import type { TreeNode } from "$lib/types"; import type { TreeNode } from "$lib/types";
import { cn } from "$lib/utils"; import { cn } from "$lib/utils";
import FileBrowserTreeItem from "./FileBrowserTreeItem.svelte"; import FileBrowserTreeItem from "./FileBrowserTreeItem.svelte";
import { getSystemTreeSystemTreeGet, getArchiveTreeInventoryTreeGet, getDiscrepanciesTreeSystemDiscrepanciesTreeGet } from "$lib/api"; import { filesystemTree, archiveTree, getDiscrepanciesTreeSystemDiscrepanciesTreeGet } from "$lib/api";
let { let {
node, node,
@@ -66,7 +66,7 @@
query: { path: node.path } query: { path: node.path }
}); });
} else { } else {
const fetchFn = (mode === "host" || mode === "live") ? getSystemTreeSystemTreeGet : getArchiveTreeInventoryTreeGet; const fetchFn = (mode === "host" || mode === "live") ? filesystemTree : archiveTree;
response = await fetchFn({ response = await fetchFn({
query: { path: node.path } query: { path: node.path }
}); });
+67
View File
@@ -55,3 +55,70 @@ export function formatSize(bytes: number | null | undefined): string {
} }
return `${size.toFixed(1)} ${units[unitIndex]}`; return `${size.toFixed(1)} ${units[unitIndex]}`;
} }
/**
* Natural sort comparator mimicking Windows Explorer's StrCmpLogicalW.
*
* Rules:
* 1. Directories always sort before files.
* 2. Case-insensitive alphanumeric comparison.
* 3. Multi-digit numbers are compared as whole integers (1, 2, 10 not 1, 10, 2).
* 4. Falls back to locale-aware comparison for non-ASCII characters.
*/
export function naturalSortCompare(aName: string, bName: string): number {
const aLower = aName.toLowerCase();
const bLower = bName.toLowerCase();
const len = Math.min(aLower.length, bLower.length);
let i = 0;
while (i < len) {
const aChar = aLower[i];
const bChar = bLower[i];
// If both are digits, extract the full number and compare numerically
if (isDigit(aChar) && isDigit(bChar)) {
let aNum = 0;
let bNum = 0;
let j = i;
while (j < aLower.length && isDigit(aLower[j])) {
aNum = aNum * 10 + (aLower.charCodeAt(j) - 48);
j++;
}
const aEnd = j;
j = i;
while (j < bLower.length && isDigit(bLower[j])) {
bNum = bNum * 10 + (bLower.charCodeAt(j) - 48);
j++;
}
const bEnd = j;
if (aNum !== bNum) {
return aNum - bNum;
}
// Numbers are equal but one may have leading zeros; shorter run first
if (aEnd !== bEnd) {
return aEnd - bEnd;
}
i = aEnd;
continue;
}
// Simple character comparison (locale-aware fallback for non-ASCII)
if (aChar !== bChar) {
return aChar.localeCompare(bChar);
}
i++;
}
return aLower.length - bLower.length;
}
function isDigit(c: string): boolean {
const code = c.charCodeAt(0);
return code >= 48 && code <= 57;
}
+4 -4
View File
@@ -7,11 +7,11 @@
import FileBrowser from '$lib/components/file-browser/FileBrowser.svelte'; import FileBrowser from '$lib/components/file-browser/FileBrowser.svelte';
import type { FileItem } from '$lib/types'; import type { FileItem } from '$lib/types';
import { import {
browseSystemPathSystemBrowseGet, filesystemBrowse,
batchUpdateTrackingSystemTrackBatchPost, batchUpdateTrackingSystemTrackBatchPost,
triggerScanSystemScanPost, triggerScanSystemScanPost,
getScanStatusSystemScanStatusGet, getScanStatusSystemScanStatusGet,
searchSystemIndexSystemSearchGet, filesystemSearch,
type ScanStatusSchema type ScanStatusSchema
} from '$lib/api'; } from '$lib/api';
import { toast } from "svelte-sonner"; import { toast } from "svelte-sonner";
@@ -40,7 +40,7 @@
if (searchQuery.trim().length >= 3) return; if (searchQuery.trim().length >= 3) return;
loading = true; loading = true;
try { try {
const response = await browseSystemPathSystemBrowseGet({ const response = await filesystemBrowse({
query: { path } query: { path }
}); });
if (response.data) { if (response.data) {
@@ -66,7 +66,7 @@
async function searchFiles(query: string) { async function searchFiles(query: string) {
searchLoading = true; searchLoading = true;
try { try {
const response = await searchSystemIndexSystemSearchGet({ const response = await filesystemSearch({
query: { q: query, path: currentPath } query: { q: query, path: currentPath }
}); });
if (response.data) { if (response.data) {
@@ -21,13 +21,13 @@
import FileBrowser from '$lib/components/file-browser/FileBrowser.svelte'; import FileBrowser from '$lib/components/file-browser/FileBrowser.svelte';
import type { FileItem } from '$lib/types'; import type { FileItem } from '$lib/types';
import { import {
browseArchiveIndexInventoryBrowseGet, archiveBrowse,
getArchiveItemMetadataInventoryMetadataGet, archiveMetadata,
listRecoveryQueueRestoresQueueGet, listRecoveryQueueRestoresQueueGet,
addFileToRecoveryQueueRestoresQueueFileFileIdPost, addFileToRecoveryQueueRestoresQueueFileFileIdPost,
removeFromRecoveryQueueRestoresQueueItemItemIdDelete, removeFromRecoveryQueueRestoresQueueItemItemIdDelete,
addDirectoryToRecoveryQueueRestoresQueueDirectoryPost, addDirectoryToRecoveryQueueRestoresQueueDirectoryPost,
searchArchiveIndexInventorySearchGet, archiveSearch,
type ItemMetadataSchema, type ItemMetadataSchema,
type CartItemSchema type CartItemSchema
} from '$lib/api'; } from '$lib/api';
@@ -76,7 +76,7 @@
if (searchQuery.trim().length >= 3) return; if (searchQuery.trim().length >= 3) return;
loading = true; loading = true;
try { try {
const response = await browseArchiveIndexInventoryBrowseGet({ const response = await archiveBrowse({
query: { path } query: { path }
}); });
if (response.data) { if (response.data) {
@@ -103,7 +103,7 @@
async function searchFiles(query: string) { async function searchFiles(query: string) {
searchLoading = true; searchLoading = true;
try { try {
const response = await searchArchiveIndexInventorySearchGet({ const response = await archiveSearch({
query: { q: query, path: currentPath } query: { q: query, path: currentPath }
}); });
if (response.data) { if (response.data) {
@@ -147,7 +147,7 @@
async function fetchMetadata(item: FileItem) { async function fetchMetadata(item: FileItem) {
metadataLoading = true; metadataLoading = true;
try { try {
const response = await getArchiveItemMetadataInventoryMetadataGet({ const response = await archiveMetadata({
query: { path: item.path } query: { path: item.path }
}); });
if (response.data) { if (response.data) {
@@ -188,7 +188,7 @@
} else { } else {
if (item.type === 'file') { if (item.type === 'file') {
// Fetch metadata to get the DB ID // Fetch metadata to get the DB ID
const metaResponse = await getArchiveItemMetadataInventoryMetadataGet({ const metaResponse = await archiveMetadata({
query: { path: item.path } query: { path: item.path }
}); });
+4 -4
View File
@@ -49,7 +49,7 @@ test.describe('Backup & Restore', () => {
expect(backupJob.status).toBe('COMPLETED'); expect(backupJob.status).toBe('COMPLETED');
}).toPass({ timeout: 30000 }); }).toPass({ timeout: 30000 });
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata`, { const metaResp = await requestContext.get(`${API_URL}/archive/metadata`, {
params: { path: path.join(SOURCE_ROOT, 'backup_test.txt') } params: { path: path.join(SOURCE_ROOT, 'backup_test.txt') }
}); });
expect(metaResp.ok()).toBe(true); expect(metaResp.ok()).toBe(true);
@@ -132,7 +132,7 @@ test.describe('Backup & Restore', () => {
expect(backupJob.status).toBe('COMPLETED'); expect(backupJob.status).toBe('COMPLETED');
}).toPass({ timeout: 30000 }); }).toPass({ timeout: 30000 });
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata`, { const metaResp = await requestContext.get(`${API_URL}/archive/metadata`, {
params: { path: path.join(SOURCE_ROOT, 'backup_test.txt') } params: { path: path.join(SOURCE_ROOT, 'backup_test.txt') }
}); });
expect(metaResp.ok()).toBe(true); expect(metaResp.ok()).toBe(true);
@@ -187,7 +187,7 @@ test.describe('Backup & Restore', () => {
expect(backupJob.status).toBe('COMPLETED'); expect(backupJob.status).toBe('COMPLETED');
}).toPass({ timeout: 30000 }); }).toPass({ timeout: 30000 });
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata`, { const metaResp = await requestContext.get(`${API_URL}/archive/metadata`, {
params: { path: path.join(SOURCE_ROOT, 'backup_test.txt') } params: { path: path.join(SOURCE_ROOT, 'backup_test.txt') }
}); });
expect(metaResp.ok()).toBe(true); expect(metaResp.ok()).toBe(true);
@@ -236,7 +236,7 @@ test.describe('Backup & Restore', () => {
expect(backupJob.status).toBe('COMPLETED'); expect(backupJob.status).toBe('COMPLETED');
}).toPass({ timeout: 30000 }); }).toPass({ timeout: 30000 });
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata`, { const metaResp = await requestContext.get(`${API_URL}/archive/metadata`, {
params: { path: path.join(SOURCE_ROOT, 'backup_test.txt') } params: { path: path.join(SOURCE_ROOT, 'backup_test.txt') }
}); });
expect(metaResp.ok()).toBe(true); expect(metaResp.ok()).toBe(true);
+4 -4
View File
@@ -49,7 +49,7 @@ test.describe('Discrepancies', () => {
for (const f of [...testFiles, 'ui_missing.txt']) { for (const f of [...testFiles, 'ui_missing.txt']) {
const filePath = path.join(SOURCE_ROOT, f); const filePath = path.join(SOURCE_ROOT, f);
const encodedPath = encodeURIComponent(filePath); const encodedPath = encodeURIComponent(filePath);
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata?path=${encodedPath}`); const metaResp = await requestContext.get(`${API_URL}/archive/metadata?path=${encodedPath}`);
if (metaResp.ok()) { if (metaResp.ok()) {
const meta = await metaResp.json(); const meta = await metaResp.json();
fileIds[f] = meta.id; fileIds[f] = meta.id;
@@ -161,7 +161,7 @@ test.describe('Discrepancies', () => {
for (const f of files) { for (const f of files) {
const filePath = path.join(SOURCE_ROOT, f); const filePath = path.join(SOURCE_ROOT, f);
const encodedPath = encodeURIComponent(filePath); const encodedPath = encodeURIComponent(filePath);
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata?path=${encodedPath}`); const metaResp = await requestContext.get(`${API_URL}/archive/metadata?path=${encodedPath}`);
if (metaResp.ok()) { if (metaResp.ok()) {
const meta = await metaResp.json(); const meta = await metaResp.json();
ids.push(meta.id); ids.push(meta.id);
@@ -218,7 +218,7 @@ test.describe('Discrepancies', () => {
for (const f of files) { for (const f of files) {
const filePath = path.join(SOURCE_ROOT, f); const filePath = path.join(SOURCE_ROOT, f);
const encodedPath = encodeURIComponent(filePath); const encodedPath = encodeURIComponent(filePath);
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata?path=${encodedPath}`); const metaResp = await requestContext.get(`${API_URL}/archive/metadata?path=${encodedPath}`);
if (metaResp.ok()) { if (metaResp.ok()) {
const meta = await metaResp.json(); const meta = await metaResp.json();
ids.push(meta.id); ids.push(meta.id);
@@ -274,7 +274,7 @@ test.describe('Discrepancies', () => {
for (const f of files) { for (const f of files) {
const filePath = path.join(SOURCE_ROOT, f); const filePath = path.join(SOURCE_ROOT, f);
const encodedPath = encodeURIComponent(filePath); const encodedPath = encodeURIComponent(filePath);
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata?path=${encodedPath}`); const metaResp = await requestContext.get(`${API_URL}/archive/metadata?path=${encodedPath}`);
if (metaResp.ok()) { if (metaResp.ok()) {
const meta = await metaResp.json(); const meta = await metaResp.json();
ids.push(meta.id); ids.push(meta.id);
+1 -1
View File
@@ -192,7 +192,7 @@ test.describe('TapeHoard Golden Path', () => {
// Get the file ID from the metadata endpoint // Get the file ID from the metadata endpoint
const encodedPath = encodeURIComponent(testFilePath); const encodedPath = encodeURIComponent(testFilePath);
const metaResp = await requestContext.get(`${API_URL}/inventory/metadata?path=${encodedPath}`); const metaResp = await requestContext.get(`${API_URL}/archive/metadata?path=${encodedPath}`);
expect(metaResp.ok()).toBe(true); expect(metaResp.ok()).toBe(true);
const meta = await metaResp.json(); const meta = await metaResp.json();
const fileId = meta.id; const fileId = meta.id;
+1 -1
View File
@@ -1,6 +1,6 @@
import { expect, request } from '@playwright/test'; import { expect, request } from '@playwright/test';
export const API_URL = 'http://localhost:8001'; export const API_URL = 'http://127.0.0.1:8001';
export const SOURCE_ROOT = '/tmp/tapehoard_e2e_source'; export const SOURCE_ROOT = '/tmp/tapehoard_e2e_source';
export const MOCK_LTO_PATH = '/tmp/tapehoard_e2e_mock_lto'; export const MOCK_LTO_PATH = '/tmp/tapehoard_e2e_mock_lto';
export const RESTORE_DEST = '/tmp/tapehoard_e2e_restore'; export const RESTORE_DEST = '/tmp/tapehoard_e2e_restore';
+2
View File
@@ -33,6 +33,8 @@ lint:
cd backend && uv run ty check cd backend && uv run ty check
@echo "Type checking Svelte..." @echo "Type checking Svelte..."
cd frontend && npm run check cd frontend && npm run check
@echo "Running pre-commit hooks..."
git hook run pre-commit
# Run all backend tests # Run all backend tests
pytest: pytest: