From 3ea0dd8fc9e4c97e8f5c71834d0a5bdef4e7eef1 Mon Sep 17 00:00:00 2001 From: Adam Lamers Date: Sat, 25 Apr 2026 18:07:52 -0400 Subject: [PATCH] improve recovery queue insertion speed --- .../versions/51c095df4615_add_path_index.py | 26 +++++++++++++++++ backend/app/api/backups.py | 2 +- backend/app/api/inventory.py | 6 ++-- backend/app/api/restores.py | 13 ++++++--- backend/app/db/database.py | 14 ++++++++-- backend/app/main.py | 11 +++++++- backend/app/services/archiver.py | 28 +++++++------------ backend/app/services/scanner.py | 23 ++++++++------- docker/Dockerfile | 13 +++++++-- docker/docker-compose.yml | 14 ++++------ docker/entrypoint.sh | 28 ++++++++++++++++--- frontend/src/lib/api/index.ts | 15 ++++++++-- frontend/src/routes/restores/+page.svelte | 22 ++++++++------- justfile | 6 ++-- 14 files changed, 152 insertions(+), 69 deletions(-) create mode 100644 backend/alembic/versions/51c095df4615_add_path_index.py diff --git a/backend/alembic/versions/51c095df4615_add_path_index.py b/backend/alembic/versions/51c095df4615_add_path_index.py new file mode 100644 index 0000000..84ba4c3 --- /dev/null +++ b/backend/alembic/versions/51c095df4615_add_path_index.py @@ -0,0 +1,26 @@ +"""add_path_index + +Revision ID: 51c095df4615 +Revises: ac51f5e25832 +Create Date: 2026-04-25 22:15:00.000000 + +""" + +from typing import Sequence, Union +from alembic import op + + +# revision identifiers, used by Alembic. +revision: str = "51c095df4615" +down_revision: Union[str, Sequence[str], None] = "ac51f5e25832" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Add index on file_path to speed up directory prefix matching + op.create_index("ix_filesystem_state_file_path", "filesystem_state", ["file_path"]) + + +def downgrade() -> None: + op.drop_index("ix_filesystem_state_file_path", table_name="filesystem_state") diff --git a/backend/app/api/backups.py b/backend/app/api/backups.py index bae0777..5ba3283 100644 --- a/backend/app/api/backups.py +++ b/backend/app/api/backups.py @@ -12,7 +12,7 @@ router = APIRouter(prefix="/backups", tags=["Backups"]) def trigger_backup( media_id: int, background_tasks: BackgroundTasks, db: Session = Depends(get_db) ): - media = db.query(models.StorageMedia).get(media_id) + media = db.get(models.StorageMedia, media_id) if not media: raise HTTPException(status_code=404, detail="Media not found") diff --git a/backend/app/api/inventory.py b/backend/app/api/inventory.py index b71fea2..a82d8b4 100644 --- a/backend/app/api/inventory.py +++ b/backend/app/api/inventory.py @@ -174,7 +174,7 @@ def register_media(req: MediaCreateSchema, db: Session = Depends(get_db)): @router.patch("/media/{media_id}", response_model=MediaSchema) def update_media(media_id: int, req: MediaUpdateSchema, db: Session = Depends(get_db)): - media = db.query(models.StorageMedia).get(media_id) + media = db.get(models.StorageMedia, media_id) if not media: raise HTTPException(status_code=404, detail="Media not found") @@ -206,7 +206,7 @@ def update_media(media_id: int, req: MediaUpdateSchema, db: Session = Depends(ge @router.delete("/media/{media_id}") def delete_media(media_id: int, db: Session = Depends(get_db)): - media = db.query(models.StorageMedia).get(media_id) + media = db.get(models.StorageMedia, media_id) if not media: raise HTTPException(status_code=404, detail="Media not found") if media.versions: @@ -220,7 +220,7 @@ def delete_media(media_id: int, db: Session = Depends(get_db)): def initialize_media(media_id: int, db: Session = Depends(get_db)): from app.services.archiver import archiver_manager - media = db.query(models.StorageMedia).get(media_id) + media = db.get(models.StorageMedia, media_id) if not media: raise HTTPException(status_code=404, detail="Media not found") diff --git a/backend/app/api/restores.py b/backend/app/api/restores.py index 06960dd..9b77c4c 100644 --- a/backend/app/api/restores.py +++ b/backend/app/api/restores.py @@ -255,13 +255,18 @@ def add_directory_to_cart(req: DirectoryCartRequest, db: Session = Depends(get_d logger.info(f"Adding directory to queue: {path} (prefix: {prefix_query})") + # Optimized SQL for lightning-fast bulk insert + # 1. Matches path prefix (using the new index) + # 2. Joins file_versions to ensure it's restorable + # 3. Left Joins restore_cart to skip already-queued items insert_sql = text(""" INSERT INTO restore_cart (filesystem_state_id, created_at) SELECT DISTINCT fs.id, :now FROM filesystem_state fs + JOIN file_versions fv ON fv.filesystem_state_id = fs.id + LEFT JOIN restore_cart rc ON rc.filesystem_state_id = fs.id WHERE (fs.file_path = :path OR fs.file_path LIKE :prefix) - AND EXISTS (SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id) - AND fs.id NOT IN (SELECT filesystem_state_id FROM restore_cart) + AND rc.id IS NULL """) db.execute( @@ -291,7 +296,7 @@ def add_to_cart(file_id: int, db: Session = Depends(get_db)): if existing: return {"message": "Already in recovery queue"} - file_state = db.query(models.FilesystemState).get(file_id) + file_state = db.get(models.FilesystemState, file_id) if not file_state or not file_state.versions: raise HTTPException(status_code=400, detail="File has no backed up versions") @@ -303,7 +308,7 @@ def add_to_cart(file_id: int, db: Session = Depends(get_db)): @router.delete("/cart/{item_id}") def remove_from_cart(item_id: int, db: Session = Depends(get_db)): - item = db.query(models.RestoreCart).get(item_id) + item = db.get(models.RestoreCart, item_id) if item: db.delete(item) db.commit() diff --git a/backend/app/db/database.py b/backend/app/db/database.py index b6927ca..289c4de 100644 --- a/backend/app/db/database.py +++ b/backend/app/db/database.py @@ -1,5 +1,5 @@ import os -from sqlalchemy import create_engine +from sqlalchemy import create_engine, event from sqlalchemy.orm import sessionmaker # Dependency mapping for FastAPI @@ -9,12 +9,22 @@ SQLALCHEMY_DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///tapehoard.db") # connect_args={"check_same_thread": False} is required for SQLite in FastAPI engine = create_engine( SQLALCHEMY_DATABASE_URL, - connect_args={"check_same_thread": False}, + connect_args={"check_same_thread": False, "timeout": 30}, pool_size=20, max_overflow=10, pool_timeout=30, ) + +# Enable WAL mode for SQLite to allow concurrent reads and writes +@event.listens_for(engine, "connect") +def set_sqlite_pragma(dbapi_connection, connection_record): + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA journal_mode=WAL") + cursor.execute("PRAGMA synchronous=NORMAL") + cursor.close() + + SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) diff --git a/backend/app/main.py b/backend/app/main.py index 3b028e3..fd42720 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -32,7 +32,16 @@ app = FastAPI( # Configure CORS app.add_middleware( CORSMiddleware, - allow_origins=["*"], # In production, this should be restricted + allow_origins=[ + "http://localhost:5173", + "http://localhost:5174", + "http://localhost:5175", + "http://localhost:8000", + "http://127.0.0.1:5173", + "http://127.0.0.1:5174", + "http://127.0.0.1:5175", + "http://127.0.0.1:8000", + ], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], diff --git a/backend/app/services/archiver.py b/backend/app/services/archiver.py index 62be483..a8accd7 100644 --- a/backend/app/services/archiver.py +++ b/backend/app/services/archiver.py @@ -96,7 +96,10 @@ class ArchiverService: ) return ( - db.query(models.FilesystemState) + db.query( + models.FilesystemState, + func.coalesce(subquery.c.covered_size, 0).label("covered_size"), + ) .outerjoin( subquery, models.FilesystemState.id == subquery.c.filesystem_state_id ) @@ -113,7 +116,7 @@ class ArchiverService: self, db: Session, media_id: int, max_bytes: Optional[int] = None ) -> List[Dict[str, Any]]: """Selects a batch of files/chunks that fit on the media's remaining capacity""" - media = db.query(models.StorageMedia).get(media_id) + media = db.get(models.StorageMedia, media_id) if not media: return [] @@ -129,29 +132,18 @@ class ArchiverService: # We need at least some space to make it worthwhile MIN_CHUNK_SIZE = 100 * 1024 * 1024 # 100MB - for f in eligible: + for f, covered_size in eligible: if current_size >= remaining_capacity: break - # Calculate how much of this file is already backed up - # For simplicity, we assume we always backup from the end of the last chunk - covered_size = ( - db.query( - func.sum( - models.FileVersion.offset_end - models.FileVersion.offset_start - ) - ) - .filter(models.FileVersion.filesystem_state_id == f.id) - .scalar() - or 0 - ) - remaining_file_size = f.size - covered_size # Allow 0-byte files if they have no versions yet if remaining_file_size <= 0 and f.size > 0: continue if f.size == 0: + if covered_size > 0: + continue # Check if it already has a version to avoid infinite loop has_version = ( db.query(models.FileVersion) @@ -193,7 +185,7 @@ class ArchiverService: return backup_set def run_backup(self, db: Session, media_id: int, job_id: int): - media = db.query(models.StorageMedia).get(media_id) + media = db.get(models.StorageMedia, media_id) if not media: JobManager.fail_job(job_id, "Media not found") return @@ -447,7 +439,7 @@ class ArchiverService: if JobManager.is_cancelled(job_id): break - media = db.query(models.StorageMedia).get(media_id) + media = db.get(models.StorageMedia, media_id) if not media: continue diff --git a/backend/app/services/scanner.py b/backend/app/services/scanner.py index 6bb1c32..35fbc49 100644 --- a/backend/app/services/scanner.py +++ b/backend/app/services/scanner.py @@ -1,7 +1,7 @@ import os import hashlib from datetime import datetime, timezone -from typing import Dict, List, Optional, Set, Tuple, Any, cast +from typing import Dict, List, Optional, Tuple, Any, cast from loguru import logger from sqlalchemy.orm import Session from app.db import models @@ -12,9 +12,6 @@ import json class JobManager: - # Set of job IDs that have been requested to cancel - _cancelled_jobs: Set[int] = set() - @staticmethod def create_job(db: Session, job_type: str) -> models.Job: job = models.Job(job_type=job_type, status="PENDING") @@ -57,8 +54,6 @@ class JobManager: job.progress = 100.0 job.completed_at = datetime.now(timezone.utc) db.commit() - if job_id in JobManager._cancelled_jobs: - JobManager._cancelled_jobs.remove(job_id) finally: db.close() @@ -72,14 +67,11 @@ class JobManager: job.error_message = error_message job.completed_at = datetime.now(timezone.utc) db.commit() - if job_id in JobManager._cancelled_jobs: - JobManager._cancelled_jobs.remove(job_id) finally: db.close() @staticmethod def cancel_job(job_id: int): - JobManager._cancelled_jobs.add(job_id) db = SessionLocal() try: job = db.get(models.Job, job_id) @@ -93,7 +85,18 @@ class JobManager: @staticmethod def is_cancelled(job_id: int) -> bool: - return job_id in JobManager._cancelled_jobs + db = SessionLocal() + try: + job = db.get(models.Job, job_id) + if ( + job + and job.status == "FAILED" + and job.error_message == "Cancelled by user" + ): + return True + return False + finally: + db.close() class ScannerService: diff --git a/docker/Dockerfile b/docker/Dockerfile index 9855438..d455915 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -15,6 +15,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ mt-st \ tar \ sqlite3 \ + passwd \ + util-linux \ build-essential \ autoconf \ automake \ @@ -44,12 +46,19 @@ COPY backend/ ./ COPY --from=frontend-builder /app/frontend/build /app/backend/static # Setup volumes and permissions -RUN mkdir -p /app/data /staging /source_data /restores -ENV DATABASE_URL=sqlite:////app/data/tapehoard.db +RUN mkdir -p /database /staging /source_data /restores /home/appuser /uv-cache && chmod 777 /uv-cache +ENV DATABASE_URL=sqlite:////database/tapehoard.db +ENV HOME=/home/appuser +ENV UV_CACHE_DIR=/uv-cache +ENV XDG_CACHE_HOME=/uv-cache # Entrypoint COPY docker/entrypoint.sh /app/entrypoint.sh RUN chmod +x /app/entrypoint.sh +# Ensure the app can write to its volumes +VOLUME ["/database", "/staging", "/restores"] + +ENV PORT=8000 EXPOSE 8000 ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 86caa03..cb74d39 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -5,17 +5,15 @@ services: dockerfile: docker/Dockerfile container_name: tapehoard ports: - - "8000:8000" + - "9000:8000" volumes: - # - ./data:/app/data + - ./db:/database - ./staging:/staging - - /Users/alamers/git/:/source_data:ro - - /Users/alamers/restore/:/restores - - /Users/alamers/backup/:/mnt/HDD-001 - # - /mnt/storage:/source_data:ro - # - /mnt/restores:/restores + - /Users/alamers/git/ditchcord:/source_data:ro + - /Users/alamers/restore:/restores + # Uncomment to enable LTO tape drive support # devices: - # - /dev/nst0:/dev/nst0 + # - /dev/nst0:/dev/nst0 environment: - PUID=1000 - PGID=1000 diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 80bc1cf..4347da3 100644 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -1,15 +1,35 @@ #!/bin/bash set -e +# Establish global UV safety +export HOME=/home/appuser +export UV_CACHE_DIR=/uv-cache +export UV_CONFIG_DIR=/uv-cache/config +export UV_DATA_DIR=/uv-cache/data + echo "Starting TapeHoard..." +# Handle PUID/PGID for volume permissions +if [ "$(id -u)" = '0' ] && [ -n "$PUID" ] && [ -n "$PGID" ]; then + echo "Adjusting permissions for PUID:PGID $PUID:$PGID..." + + groupmod -g "$PGID" appuser 2>/dev/null || groupadd -g "$PGID" appuser + usermod -u "$PUID" -g "$PGID" -d /home/appuser appuser 2>/dev/null || useradd -m -d /home/appuser -u "$PUID" -g "$PGID" -s /bin/bash appuser + + # Ensure all volumes are writable by the mapped user + chown -R "$PUID:$PGID" /database /staging /restores /app/backend /home/appuser /uv-cache + + echo "Switching to appuser..." + exec setpriv --reuid="$PUID" --regid="$PGID" --init-groups "$0" "$@" +fi + # Change to backend directory cd /app/backend -# Run database migrations +# Use UV for all operations echo "Running database migrations..." -uv run alembic upgrade head +uv --cache-dir /uv-cache run alembic upgrade head # Start the application -echo "Starting application server..." -exec uv run uvicorn app.main:app --host 0.0.0.0 --port 8000 +echo "Starting application server on port ${PORT:-8000}..." +exec uv --cache-dir /uv-cache run uvicorn app.main:app --host 0.0.0.0 --port "${PORT:-8000}" diff --git a/frontend/src/lib/api/index.ts b/frontend/src/lib/api/index.ts index 0b416bb..851651a 100644 --- a/frontend/src/lib/api/index.ts +++ b/frontend/src/lib/api/index.ts @@ -1,4 +1,13 @@ -// This file is auto-generated by @hey-api/openapi-ts +import { client } from './client.gen'; -export { addDirectoryToCartRestoresCartDirectoryPost, addToCartRestoresCartFileIdPost, browseCartRestoresCartBrowseGet, browseIndexInventoryBrowseGet, browsePathSystemBrowseGet, cancelJobSystemJobsJobIdCancelPost, clearCartRestoresCartClearPost, deleteMediaInventoryMediaMediaIdDelete, exportDatabaseSystemDatabaseExportGet, getCartTreeRestoresCartTreeGet, getDashboardStatsSystemDashboardStatsGet, getIndexTreeInventoryTreeGet, getItemMetadataInventoryMetadataGet, getManifestRestoresManifestGet, getScanStatusSystemScanStatusGet, getSettingsSystemSettingsGet, getTreeSystemTreeGet, healthCheckHealthGet, importDatabaseSystemDatabaseImportPost, initializeMediaInventoryMediaMediaIdInitializePost, listBackupsBackupsGet, listCartRestoresCartGet, listInventoryInventoryGet, listJobsSystemJobsGet, listMediaInventoryMediaGet, type Options, registerMediaInventoryMediaPost, removeFromCartRestoresCartItemIdDelete, searchIndexInventorySearchGet, searchSystemSystemSearchGet, streamJobsSystemJobsStreamGet, testNotificationSystemNotificationsTestPost, trackBatchSystemTrackBatchPost, triggerBackupBackupsTriggerMediaIdPost, triggerRestoreRestoresTriggerPost, triggerScanSystemScanPost, updateMediaInventoryMediaMediaIdPatch, updateSettingSystemSettingsPost } from './sdk.gen'; -export type { AddDirectoryToCartRestoresCartDirectoryPostData, AddDirectoryToCartRestoresCartDirectoryPostError, AddDirectoryToCartRestoresCartDirectoryPostErrors, AddDirectoryToCartRestoresCartDirectoryPostResponses, AddToCartRestoresCartFileIdPostData, AddToCartRestoresCartFileIdPostError, AddToCartRestoresCartFileIdPostErrors, AddToCartRestoresCartFileIdPostResponses, AppApiInventoryFileItemSchema, AppApiSystemFileItemSchema, BatchTrackRequest, BodyImportDatabaseSystemDatabaseImportPost, BrowseCartRestoresCartBrowseGetData, BrowseCartRestoresCartBrowseGetError, BrowseCartRestoresCartBrowseGetErrors, BrowseCartRestoresCartBrowseGetResponse, BrowseCartRestoresCartBrowseGetResponses, BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetError, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponse, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetError, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponse, BrowsePathSystemBrowseGetResponses, CancelJobSystemJobsJobIdCancelPostData, CancelJobSystemJobsJobIdCancelPostError, CancelJobSystemJobsJobIdCancelPostErrors, CancelJobSystemJobsJobIdCancelPostResponses, CartFileItemSchema, CartItemSchema, CartTreeNodeSchema, ClearCartRestoresCartClearPostData, ClearCartRestoresCartClearPostResponses, ClientOptions, DashboardStatsSchema, DeleteMediaInventoryMediaMediaIdDeleteData, DeleteMediaInventoryMediaMediaIdDeleteError, DeleteMediaInventoryMediaMediaIdDeleteErrors, DeleteMediaInventoryMediaMediaIdDeleteResponses, DirectoryCartRequest, ExportDatabaseSystemDatabaseExportGetData, ExportDatabaseSystemDatabaseExportGetResponses, FileVersionSchema, GetCartTreeRestoresCartTreeGetData, GetCartTreeRestoresCartTreeGetError, GetCartTreeRestoresCartTreeGetErrors, GetCartTreeRestoresCartTreeGetResponse, GetCartTreeRestoresCartTreeGetResponses, GetDashboardStatsSystemDashboardStatsGetData, GetDashboardStatsSystemDashboardStatsGetResponse, GetDashboardStatsSystemDashboardStatsGetResponses, GetIndexTreeInventoryTreeGetData, GetIndexTreeInventoryTreeGetError, GetIndexTreeInventoryTreeGetErrors, GetIndexTreeInventoryTreeGetResponse, GetIndexTreeInventoryTreeGetResponses, GetItemMetadataInventoryMetadataGetData, GetItemMetadataInventoryMetadataGetError, GetItemMetadataInventoryMetadataGetErrors, GetItemMetadataInventoryMetadataGetResponse, GetItemMetadataInventoryMetadataGetResponses, GetManifestRestoresManifestGetData, GetManifestRestoresManifestGetResponse, GetManifestRestoresManifestGetResponses, GetScanStatusSystemScanStatusGetData, GetScanStatusSystemScanStatusGetResponse, GetScanStatusSystemScanStatusGetResponses, GetSettingsSystemSettingsGetData, GetSettingsSystemSettingsGetResponse, GetSettingsSystemSettingsGetResponses, GetTreeSystemTreeGetData, GetTreeSystemTreeGetError, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponses, HealthCheckHealthGetData, HealthCheckHealthGetResponses, HttpValidationError, ImportDatabaseSystemDatabaseImportPostData, ImportDatabaseSystemDatabaseImportPostError, ImportDatabaseSystemDatabaseImportPostErrors, ImportDatabaseSystemDatabaseImportPostResponses, InitializeMediaInventoryMediaMediaIdInitializePostData, InitializeMediaInventoryMediaMediaIdInitializePostError, InitializeMediaInventoryMediaMediaIdInitializePostErrors, InitializeMediaInventoryMediaMediaIdInitializePostResponses, ItemMetadataSchema, JobSchema, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ListCartRestoresCartGetData, ListCartRestoresCartGetResponse, ListCartRestoresCartGetResponses, ListInventoryInventoryGetData, ListInventoryInventoryGetResponses, ListJobsSystemJobsGetData, ListJobsSystemJobsGetError, ListJobsSystemJobsGetErrors, ListJobsSystemJobsGetResponse, ListJobsSystemJobsGetResponses, ListMediaInventoryMediaGetData, ListMediaInventoryMediaGetResponse, ListMediaInventoryMediaGetResponses, ManifestMediaRequirement, MediaCreateSchema, MediaSchema, MediaUpdateSchema, RegisterMediaInventoryMediaPostData, RegisterMediaInventoryMediaPostError, RegisterMediaInventoryMediaPostErrors, RegisterMediaInventoryMediaPostResponse, RegisterMediaInventoryMediaPostResponses, RemoveFromCartRestoresCartItemIdDeleteData, RemoveFromCartRestoresCartItemIdDeleteError, RemoveFromCartRestoresCartItemIdDeleteErrors, RemoveFromCartRestoresCartItemIdDeleteResponses, RestoreManifestSchema, RestoreRequest, ScanStatusSchema, SearchIndexInventorySearchGetData, SearchIndexInventorySearchGetError, SearchIndexInventorySearchGetErrors, SearchIndexInventorySearchGetResponse, SearchIndexInventorySearchGetResponses, SearchSystemSystemSearchGetData, SearchSystemSystemSearchGetError, SearchSystemSystemSearchGetErrors, SearchSystemSystemSearchGetResponse, SearchSystemSystemSearchGetResponses, SettingSchema, StreamJobsSystemJobsStreamGetData, StreamJobsSystemJobsStreamGetResponses, TestNotificationRequest, TestNotificationSystemNotificationsTestPostData, TestNotificationSystemNotificationsTestPostError, TestNotificationSystemNotificationsTestPostErrors, TestNotificationSystemNotificationsTestPostResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostError, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TreeNodeSchema, TriggerBackupBackupsTriggerMediaIdPostData, TriggerBackupBackupsTriggerMediaIdPostError, TriggerBackupBackupsTriggerMediaIdPostErrors, TriggerBackupBackupsTriggerMediaIdPostResponses, TriggerRestoreRestoresTriggerPostData, TriggerRestoreRestoresTriggerPostError, TriggerRestoreRestoresTriggerPostErrors, TriggerRestoreRestoresTriggerPostResponses, TriggerScanSystemScanPostData, TriggerScanSystemScanPostResponses, UpdateMediaInventoryMediaMediaIdPatchData, UpdateMediaInventoryMediaMediaIdPatchError, UpdateMediaInventoryMediaMediaIdPatchErrors, UpdateMediaInventoryMediaMediaIdPatchResponse, UpdateMediaInventoryMediaMediaIdPatchResponses, UpdateSettingSystemSettingsPostData, UpdateSettingSystemSettingsPostError, UpdateSettingSystemSettingsPostErrors, UpdateSettingSystemSettingsPostResponses, ValidationError } from './types.gen'; +// In production, we use relative paths because the frontend is served by the backend. +// In development, we can override this via an environment variable. +const BASE_URL = import.meta.env.VITE_API_URL || ''; + +client.setConfig({ + baseUrl: BASE_URL, +}); + +export * from './client.gen'; +export * from './sdk.gen'; +export * from './types.gen'; diff --git a/frontend/src/routes/restores/+page.svelte b/frontend/src/routes/restores/+page.svelte index f6d4f41..8a3d3c7 100644 --- a/frontend/src/routes/restores/+page.svelte +++ b/frontend/src/routes/restores/+page.svelte @@ -196,17 +196,19 @@ {#if (manifest?.total_files || 0) === 0 && !loading} -
-
- +
+
+
+ +
+

Recovery Queue is Empty

+

+ You haven't selected any files for restoration yet. Use the Index Browser to find and queue the items you need to recover from your fleet. +

+
-

Recovery Queue is Empty

-

- You haven't selected any files for restoration yet. Use the Index Browser to find and queue the items you need to recover from your fleet. -

-
{:else}
diff --git a/justfile b/justfile index 2219f8b..ae26039 100644 --- a/justfile +++ b/justfile @@ -12,12 +12,12 @@ default: dev: @echo "Starting Backend (FastAPI) and Frontend (SvelteKit)..." @trap 'kill %1' SIGINT; \ - (cd backend && uv run uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload) & \ - (cd frontend && npm run dev) + (cd backend && uv run uvicorn app.main:app --host 0.0.0.0 --port ${PORT:-8000} --reload) & \ + (cd frontend && VITE_API_URL=http://localhost:${PORT:-8000} npm run dev) # Run just the backend backend: - cd backend && uv run uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload + cd backend && uv run uvicorn app.main:app --host 0.0.0.0 --port ${PORT:-8000} --reload # Run just the frontend frontend: