diff --git a/.gitignore b/.gitignore index f56721e..ecbf916 100644 --- a/.gitignore +++ b/.gitignore @@ -10,8 +10,11 @@ develop-eggs/ downloads/ eggs/ .eggs/ -/lib/ -/lib64/ +# /lib/ <-- This was causing the issue, it's too broad +# /lib64/ <-- This was causing the issue, it's too broad +# Use more specific patterns if needed for python builds +/python-lib/ +/python-lib64/ parts/ sdist/ var/ @@ -32,10 +35,10 @@ env.bak/ venv.bak/ # Node / JS -node_modules/ -/dist/ -/build/ -.svelte-kit/ +/frontend/node_modules/ +/frontend/build/ +/frontend/.svelte-kit/ +/frontend/dist/ npm-debug.log* yarn-debug.log* yarn-error.log* diff --git a/backend/app/api/backups.py b/backend/app/api/backups.py index 915be0c..bae0777 100644 --- a/backend/app/api/backups.py +++ b/backend/app/api/backups.py @@ -1,8 +1,38 @@ -from fastapi import APIRouter +from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks +from sqlalchemy.orm import Session +from app.db.database import get_db, SessionLocal +from app.db import models +from app.services.archiver import archiver_manager +from app.services.scanner import JobManager router = APIRouter(prefix="/backups", tags=["Backups"]) +@router.post("/trigger/{media_id}") +def trigger_backup( + media_id: int, background_tasks: BackgroundTasks, db: Session = Depends(get_db) +): + media = db.query(models.StorageMedia).get(media_id) + if not media: + raise HTTPException(status_code=404, detail="Media not found") + + if media.status != "active": + raise HTTPException(status_code=400, detail="Media is not active") + + # Create the job record + job = JobManager.create_job(db, "BACKUP") + + def run_backup_task(): + db_inner = SessionLocal() + try: + archiver_manager.run_backup(db_inner, media_id=media_id, job_id=job.id) + finally: + db_inner.close() + + background_tasks.add_task(run_backup_task) + return {"message": "Backup job initiated", "job_id": job.id} + + @router.get("/") def list_backups(): return [] diff --git a/backend/app/api/inventory.py b/backend/app/api/inventory.py index bee261a..ce70153 100644 --- a/backend/app/api/inventory.py +++ b/backend/app/api/inventory.py @@ -1,79 +1,343 @@ -from fastapi import APIRouter, Depends -from typing import List, Optional +from fastapi import APIRouter, HTTPException, Depends +from typing import List, Optional, Dict, Any from pydantic import BaseModel from sqlalchemy.orm import Session +from sqlalchemy import text from app.db.database import get_db from app.db import models +from datetime import datetime, timezone +import json +import os router = APIRouter(prefix="/inventory", tags=["Inventory"]) +# --- Helpers --- +def get_source_roots(db: Session) -> List[str]: + setting = ( + db.query(models.SystemSetting) + .filter(models.SystemSetting.key == "source_roots") + .first() + ) + if setting: + try: + return json.loads(setting.value) + except Exception: + return [setting.value] + local_source = os.path.abspath(os.path.join(os.getcwd(), "..", "source_data")) + if os.path.exists(local_source): + return [local_source] + return ["/source_data"] + + +# --- Schemas --- + + +class FileVersionSchema(BaseModel): + media_identifier: str + media_type: str + file_number: str + timestamp: datetime + + +class ItemMetadataSchema(BaseModel): + id: Optional[int] = None # Added for file operations + file_path: str + type: str + size: int + mtime: float + last_seen_timestamp: datetime + sha256_hash: Optional[str] = None + versions: List[FileVersionSchema] = [] + child_count: Optional[int] = None + + class FileItemSchema(BaseModel): name: str path: str type: str size: Optional[int] = None mtime: Optional[float] = None - media: List[str] = [] # List of media identifiers this file is on + media: List[str] = [] + + +class TreeNodeSchema(BaseModel): + name: str + path: str + has_children: bool = False + + +class MediaCreateSchema(BaseModel): + media_type: str # tape, hdd, cloud + identifier: str + generation_tier: Optional[str] = None + capacity: int # in bytes + location: Optional[str] = None + config: Dict[str, Any] = {} + + +class MediaUpdateSchema(BaseModel): + status: Optional[str] = None + location: Optional[str] = None + config: Optional[Dict[str, Any]] = None + + +class MediaSchema(BaseModel): + id: int + media_type: str + identifier: str + generation_tier: Optional[str] + capacity: int + bytes_used: int + location: Optional[str] + status: str + config: Dict[str, Any] + + @classmethod + def from_orm_custom(cls, obj: models.StorageMedia): + config_data = {} + if obj.extra_config: + try: + config_data = json.loads(obj.extra_config) + except Exception: + pass + return cls( + id=obj.id, + media_type=obj.media_type, + identifier=obj.identifier, + generation_tier=obj.generation_tier, + capacity=obj.capacity, + bytes_used=obj.bytes_used, + location=obj.location, + status=obj.status, + config=config_data, + ) + + +# --- Media Endpoints --- + + +@router.get("/media", response_model=List[MediaSchema]) +def list_media(db: Session = Depends(get_db)): + all_media = db.query(models.StorageMedia).all() + return [MediaSchema.from_orm_custom(m) for m in all_media] + + +@router.post("/media", response_model=MediaSchema) +def register_media(req: MediaCreateSchema, db: Session = Depends(get_db)): + existing = ( + db.query(models.StorageMedia) + .filter(models.StorageMedia.identifier == req.identifier) + .first() + ) + if existing: + raise HTTPException( + status_code=400, detail="Media with this identifier already exists" + ) + new_media = models.StorageMedia( + media_type=req.media_type, + identifier=req.identifier, + generation_tier=req.generation_tier, + capacity=req.capacity, + location=req.location, + status="active", + extra_config=json.dumps(req.config), + ) + db.add(new_media) + db.commit() + db.refresh(new_media) + return MediaSchema.from_orm_custom(new_media) + + +@router.patch("/media/{media_id}", response_model=MediaSchema) +def update_media(media_id: int, req: MediaUpdateSchema, db: Session = Depends(get_db)): + media = db.query(models.StorageMedia).get(media_id) + if not media: + raise HTTPException(status_code=404, detail="Media not found") + if req.status: + media.status = req.status + if req.location: + media.location = req.location + if req.config is not None: + current_config = {} + if media.extra_config: + try: + current_config = json.loads(media.extra_config) + except Exception: + pass + current_config.update(req.config) + media.extra_config = json.dumps(current_config) + db.commit() + db.refresh(media) + return MediaSchema.from_orm_custom(media) + + +@router.delete("/media/{media_id}") +def delete_media(media_id: int, db: Session = Depends(get_db)): + media = db.query(models.StorageMedia).get(media_id) + if not media: + raise HTTPException(status_code=404, detail="Media not found") + if media.versions: + raise HTTPException(status_code=400, detail="Cannot delete media with files") + db.delete(media) + db.commit() + return {"message": "Media deleted"} + + +# --- Browsing Endpoints (Optimized) --- @router.get("/browse", response_model=List[FileItemSchema]) -def browse_index(path: str = "/", db: Session = Depends(get_db)): - # This is trickier because we store full paths. - # We need to find all unique "first level" children of the given path. - - if not path.endswith("/"): - path += "/" - - # Files directly in this path - # We can use a regex or just string manipulation in SQLite - # For simplicity, let's get all files starting with path and then parse - - # Query for files that are in this directory - # A file is in /dir/ if its path starts with /dir/ and doesn't contain another / after that - - # Actually, a better way for a Virtual FS is to query for all paths starting with 'path' - # and then find the next component. - - all_files = ( - db.query(models.FilesystemState) - .filter(models.FilesystemState.file_path.like(f"{path}%")) - .all() - ) - - results_map = {} - - for f in all_files: - relative = f.file_path[len(path) :] - if not relative: - continue - - parts = relative.split("/") - name = parts[0] - full_item_path = path + name - - if len(parts) > 1: - # It's a directory - if name not in results_map: - results_map[name] = FileItemSchema( - name=name, path=full_item_path, type="directory", size=0, mtime=0 +def browse_index( + path: Optional[str] = None, + include_ignored: bool = False, + db: Session = Depends(get_db), +): + if path is None or path == "ROOT": + roots = get_source_roots(db) + results = [] + for root in roots: + sql = text( + "SELECT COUNT(*), SUM(size), MAX(mtime) FROM filesystem_state WHERE file_path LIKE :prefix" + + (" AND is_ignored = 0" if not include_ignored else "") + ) + row = db.execute(sql, {"prefix": f"{root}%"}).fetchone() + if row and row[0] > 0: + results.append( + FileItemSchema( + name=root, + path=root, + type="directory", + size=row[1] or 0, + mtime=row[2] or 0, + ) + ) + return results + + prefix = path if path.endswith("/") else path + "/" + ignore_filter = " AND is_ignored = 0" if not include_ignored else "" + results = [] + + # Subdirectories + subdir_sql = text( + f""" + SELECT DISTINCT SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) as dirname + FROM filesystem_state WHERE file_path LIKE :search_prefix AND SUBSTR(file_path, LENGTH(:prefix) + 1) LIKE '%/%' {ignore_filter} + """ + ) + subdirs = db.execute( + subdir_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} + ).fetchall() + for sd in subdirs: + if sd[0]: + results.append( + FileItemSchema( + name=sd[0], path=prefix + sd[0], type="directory", size=0, mtime=0 ) - results_map[name].size += f.size - if f.mtime > results_map[name].mtime: - results_map[name].mtime = f.mtime - else: - # It's a file - media_list = [v.media.identifier for v in f.versions] - results_map[name] = FileItemSchema( - name=name, - path=f.file_path, - type="file", - size=f.size, - mtime=f.mtime, - media=media_list, ) - results = list(results_map.values()) - results.sort(key=lambda x: (x.type != "directory", x.name.lower())) + # Files + file_sql = text( + f""" + SELECT name, file_path, size, mtime, id FROM ( + SELECT SUBSTR(file_path, LENGTH(:prefix) + 1) as name, file_path, size, mtime, id + FROM filesystem_state WHERE file_path LIKE :search_prefix {ignore_filter} + ) WHERE name NOT LIKE '%/%' + """ + ) + files = db.execute( + file_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} + ).fetchall() + for f in files: + media_sql = text( + "SELECT m.identifier FROM storage_media m JOIN file_versions v ON v.media_id = m.id WHERE v.filesystem_state_id = :fid" + ) + media_list = [m[0] for m in db.execute(media_sql, {"fid": f[4]}).fetchall()] + results.append( + FileItemSchema( + name=f[0], + path=f[1], + type="file", + size=f[2], + mtime=f[3], + media=media_list, + ) + ) + results.sort(key=lambda x: (x.type != "directory", x.name.lower())) return results + + +@router.get("/tree", response_model=List[TreeNodeSchema]) +def get_index_tree( + path: Optional[str] = None, + include_ignored: bool = False, + db: Session = Depends(get_db), +): + if path is None or path == "ROOT": + roots = get_source_roots(db) + return [TreeNodeSchema(name=r, path=r, has_children=True) for r in roots] + prefix = path if path.endswith("/") else path + "/" + ignore_filter = " AND is_ignored = 0" if not include_ignored else "" + subdir_sql = text( + f"SELECT DISTINCT SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) as dirname FROM filesystem_state WHERE file_path LIKE :search_prefix AND SUBSTR(file_path, LENGTH(:prefix) + 1) LIKE '%/%' {ignore_filter}" + ) + subdirs = db.execute( + subdir_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} + ).fetchall() + results = [ + TreeNodeSchema(name=sd[0], path=prefix + sd[0], has_children=True) + for sd in subdirs + if sd[0] + ] + results.sort(key=lambda x: x.name.lower()) + return results + + +@router.get("/metadata", response_model=ItemMetadataSchema) +def get_item_metadata(path: str, db: Session = Depends(get_db)): + file_state = ( + db.query(models.FilesystemState) + .filter(models.FilesystemState.file_path == path) + .first() + ) + if file_state: + versions = [ + FileVersionSchema( + media_identifier=v.media.identifier, + media_type=v.media.media_type, + file_number=v.file_number, + timestamp=file_state.last_seen_timestamp, + ) + for v in file_state.versions + ] + return ItemMetadataSchema( + id=file_state.id, # Now included + file_path=file_state.file_path, + type="file", + size=file_state.size, + mtime=file_state.mtime, + last_seen_timestamp=file_state.last_seen_timestamp, + sha256_hash=file_state.sha256_hash, + versions=versions, + ) + prefix = path if path.endswith("/") else path + "/" + sql = text( + "SELECT COUNT(*), SUM(size), MAX(mtime), MAX(last_seen_timestamp) FROM filesystem_state WHERE file_path LIKE :prefix AND is_ignored = 0" + ) + row = db.execute(sql, {"prefix": f"{prefix}%"}).fetchone() + if row and row[0] > 0: + return ItemMetadataSchema( + file_path=path, + type="directory", + size=row[1] or 0, + mtime=row[2] or 0, + last_seen_timestamp=row[3] or datetime.now(timezone.utc), + child_count=row[0], + ) + raise HTTPException(status_code=404, detail="Item not found") + + +@router.get("/") +def list_inventory(): + return [] diff --git a/backend/app/api/restores.py b/backend/app/api/restores.py new file mode 100644 index 0000000..02d8e73 --- /dev/null +++ b/backend/app/api/restores.py @@ -0,0 +1,157 @@ +from fastapi import APIRouter, HTTPException, Depends +from typing import List +from pydantic import BaseModel +from sqlalchemy.orm import Session +from app.db.database import get_db +from app.db import models + +router = APIRouter(prefix="/restores", tags=["Restores"]) + +# --- Schemas --- + + +class CartItemSchema(BaseModel): + id: int + file_path: str + size: int + media_identifiers: List[str] + + class Config: + from_attributes = True + + +class ManifestMediaRequirement(BaseModel): + identifier: str + media_type: str + file_count: int + total_size: int + + +class RestoreManifestSchema(BaseModel): + total_files: int + total_size: int + media_required: List[ManifestMediaRequirement] + + +class DirectoryCartRequest(BaseModel): + path: str + + +# --- Endpoints --- + + +@router.get("/cart", response_model=List[CartItemSchema]) +def list_cart(db: Session = Depends(get_db)): + items = db.query(models.RestoreCart).all() + results = [] + for item in items: + media_ids = [v.media.identifier for v in item.file_state.versions] + results.append( + CartItemSchema( + id=item.id, + file_path=item.file_state.file_path, + size=item.file_state.size, + media_identifiers=media_ids, + ) + ) + return results + + +@router.post("/cart/{file_id}") +def add_to_cart(file_id: int, db: Session = Depends(get_db)): + existing = ( + db.query(models.RestoreCart) + .filter(models.RestoreCart.filesystem_state_id == file_id) + .first() + ) + if existing: + return {"message": "Already in cart"} + + file_state = db.query(models.FilesystemState).get(file_id) + if not file_state or not file_state.versions: + raise HTTPException(status_code=400, detail="File has no backed up versions") + + new_item = models.RestoreCart(filesystem_state_id=file_id) + db.add(new_item) + db.commit() + return {"message": "Added to cart"} + + +@router.post("/cart/directory") +def add_directory_to_cart(req: DirectoryCartRequest, db: Session = Depends(get_db)): + prefix = req.path if req.path.endswith("/") else req.path + "/" + + # Find all files under this path that have at least one version + eligible_files = ( + db.query(models.FilesystemState) + .filter( + models.FilesystemState.file_path.like(f"{prefix}%"), + models.FilesystemState.versions.any(), + ) + .all() + ) + + if not eligible_files: + raise HTTPException( + status_code=404, detail="No restorable files found in this directory" + ) + + # Get current cart to avoid duplicates + in_cart = {c.filesystem_state_id for c in db.query(models.RestoreCart).all()} + + added_count = 0 + for f in eligible_files: + if f.id not in in_cart: + db.add(models.RestoreCart(filesystem_state_id=f.id)) + added_count += 1 + + db.commit() + return {"message": f"Added {added_count} files from {req.path} to cart"} + + +@router.delete("/cart/{item_id}") +def remove_from_cart(item_id: int, db: Session = Depends(get_db)): + item = db.query(models.RestoreCart).get(item_id) + if item: + db.delete(item) + db.commit() + return {"message": "Removed from cart"} + + +@router.post("/cart/clear") +def clear_cart(db: Session = Depends(get_db)): + db.query(models.RestoreCart).delete() + db.commit() + return {"message": "Cart cleared"} + + +@router.get("/manifest", response_model=RestoreManifestSchema) +def get_manifest(db: Session = Depends(get_db)): + cart_items = db.query(models.RestoreCart).all() + if not cart_items: + return RestoreManifestSchema(total_files=0, total_size=0, media_required=[]) + + total_size = sum(item.file_state.size for item in cart_items) + media_map = {} + + for item in cart_items: + if not item.file_state.versions: + continue + primary_v = item.file_state.versions[0] + ident = primary_v.media.identifier + m_type = primary_v.media.media_type + if ident not in media_map: + media_map[ident] = { + "identifier": ident, + "media_type": m_type, + "file_count": 0, + "total_size": 0, + } + media_map[ident]["file_count"] += 1 + media_map[ident]["total_size"] += item.file_state.size + + requirements = [ManifestMediaRequirement(**m) for m in media_map.values()] + requirements.sort(key=lambda x: x.identifier) + return RestoreManifestSchema( + total_files=len(cart_items), total_size=total_size, media_required=requirements + ) diff --git a/backend/app/api/system.py b/backend/app/api/system.py index 7c9f459..1b996a8 100644 --- a/backend/app/api/system.py +++ b/backend/app/api/system.py @@ -1,21 +1,58 @@ -from fastapi import APIRouter, HTTPException, Depends +from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks +from fastapi.responses import StreamingResponse import os -from typing import List, Optional +import json +import asyncio +from datetime import datetime +from typing import List, Optional, Dict, Tuple from pydantic import BaseModel from sqlalchemy.orm import Session -from app.db.database import get_db +from sqlalchemy import func, text +from app.db.database import get_db, SessionLocal from app.db import models +from app.services.scanner import scanner_manager, JobManager +import pathspec router = APIRouter(prefix="/system", tags=["System"]) +# --- Models --- +class DashboardStatsSchema(BaseModel): + total_files_indexed: int + total_data_size: int + tracked_paths_count: int + unprotected_files_count: int + unprotected_data_size: int + ignored_files_count: int + ignored_data_size: int + redundancy_ratio: float + last_scan_time: Optional[datetime] + media_distribution: Dict[str, int] + + +class JobSchema(BaseModel): + id: int + job_type: str + status: str + progress: float + current_task: Optional[str] + error_message: Optional[str] + started_at: Optional[datetime] + completed_at: Optional[datetime] + created_at: datetime + + class Config: + from_attributes = True + + class FileItemSchema(BaseModel): name: str path: str - type: str # file, directory, link + type: str size: Optional[int] = None mtime: Optional[float] = None tracked: bool = False + ignored: bool = False class TrackToggleRequest(BaseModel): @@ -23,147 +60,301 @@ class TrackToggleRequest(BaseModel): is_directory: bool = True -@router.get("/browse", response_model=List[FileItemSchema]) -def browse_path(path: str = "/source_data", db: Session = Depends(get_db)): - # If absolute path doesn't exist, try relative to project root - if not os.path.exists(path): - local_source = os.path.abspath(os.path.join(os.getcwd(), "..", "source_data")) - if path == "/source_data" and os.path.exists(local_source): - path = local_source - else: - raise HTTPException(status_code=404, detail=f"Path not found: {path}") - - if not os.path.isdir(path): - raise HTTPException(status_code=400, detail="Path is not a directory") - - # Get all tracked paths to mark items as tracked - tracked_paths = {t.path for t in db.query(models.TrackedSource).all()} - - results = [] - try: - with os.scandir(path) as it: - for entry in it: - try: - stats = entry.stat(follow_symlinks=False) - item_type = "file" - if entry.is_dir(): - item_type = "directory" - elif entry.is_symlink(): - item_type = "link" - - results.append( - FileItemSchema( - name=entry.name, - path=entry.path, - type=item_type, - size=stats.st_size, - mtime=stats.st_mtime, - tracked=entry.path in tracked_paths, - ) - ) - except Exception: - continue - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - # Sort: directories first, then name - results.sort(key=lambda x: (x.type != "directory", x.name.lower())) - - return results +class BatchTrackRequest(BaseModel): + tracks: List[str] = [] + untracks: List[str] = [] -@router.post("/track") -def track_path(req: TrackToggleRequest, db: Session = Depends(get_db)): - existing = ( - db.query(models.TrackedSource) - .filter(models.TrackedSource.path == req.path) +class ScanStatusSchema(BaseModel): + is_running: bool + files_processed: int + files_hashed: int + total_files_found: int + current_path: str + last_run_time: Optional[datetime] = None + + +class SettingSchema(BaseModel): + key: str + value: str + + +# --- Helpers --- +def get_source_roots(db: Session) -> List[str]: + setting = ( + db.query(models.SystemSetting) + .filter(models.SystemSetting.key == "source_roots") .first() ) - if existing: - return {"message": "Already tracked"} - - new_track = models.TrackedSource(path=req.path, is_directory=req.is_directory) - db.add(new_track) - db.commit() - return {"message": "Path tracked"} + if setting: + try: + return json.loads(setting.value) + except Exception: + return [setting.value] + local_source = os.path.abspath(os.path.join(os.getcwd(), "..", "source_data")) + if os.path.exists(local_source): + return [local_source] + return ["/source_data"] -class BatchTrackRequest(BaseModel): - tracks: List[str] = [] # Paths to track - untracks: List[str] = [] # Paths to untrack +def get_exclusion_spec(db: Session) -> Optional[pathspec.PathSpec]: + setting = ( + db.query(models.SystemSetting) + .filter(models.SystemSetting.key == "global_exclusions") + .first() + ) + if not setting or not setting.value.strip(): + return None + patterns = [p.strip() for p in setting.value.splitlines() if p.strip()] + return pathspec.PathSpec.from_lines("gitwildmatch", patterns) + + +def get_tracking_status( + path: str, tracking_map: Dict[str, str], spec: Optional[pathspec.PathSpec] +) -> Tuple[bool, bool]: + is_ignored = False + if spec and spec.match_file(path): + is_ignored = True + applicable_rules = [] + for rule_path, action in tracking_map.items(): + if path == rule_path or path.startswith(rule_path + "/"): + applicable_rules.append((len(rule_path), action)) + if not applicable_rules: + return not is_ignored, is_ignored + applicable_rules.sort(key=lambda x: x[0], reverse=True) + is_tracked = applicable_rules[0][1] == "include" + return is_tracked, is_ignored + + +# --- Endpoints --- + + +@router.get("/dashboard/stats", response_model=DashboardStatsSchema) +def get_dashboard_stats(db: Session = Depends(get_db)): + agg_sql = text(""" + SELECT + COUNT(*) as total_count, + SUM(size) as total_size, + SUM(CASE WHEN is_ignored = 1 THEN 1 ELSE 0 END) as ignored_count, + SUM(CASE WHEN is_ignored = 1 THEN size ELSE 0 END) as ignored_size, + SUM(CASE WHEN is_ignored = 0 AND is_indexed = 1 AND id NOT IN (SELECT filesystem_state_id FROM file_versions) THEN 1 ELSE 0 END) as unprotected_count, + SUM(CASE WHEN is_ignored = 0 AND is_indexed = 1 AND id NOT IN (SELECT filesystem_state_id FROM file_versions) THEN size ELSE 0 END) as unprotected_size + FROM filesystem_state + """) + + result = db.execute(agg_sql).fetchone() + + if result: + total_count = result[0] or 0 + total_size = result[1] or 0 + ignored_count = result[2] or 0 + ignored_size = result[3] or 0 + unprotected_count = result[4] or 0 + unprotected_size = result[5] or 0 + else: + total_count = total_size = ignored_count = ignored_size = unprotected_count = ( + unprotected_size + ) = 0 + + tracked_paths = ( + db.query(func.count(models.TrackedSource.id)) + .filter(models.TrackedSource.action == "include") + .scalar() + or 0 + ) + + total_versions = db.query(func.count(models.FileVersion.id)).scalar() or 0 + eligible_count = total_count - ignored_count + redundancy = total_versions / eligible_count if eligible_count > 0 else 0.0 + + media_dist = {"LTO": 0, "HDD": 0, "Cloud": 0} + media_counts = ( + db.query(models.StorageMedia.media_type, func.count(models.StorageMedia.id)) + .group_by(models.StorageMedia.media_type) + .all() + ) + for mtype, count in media_counts: + media_dist[mtype.upper()] = count + + return DashboardStatsSchema( + total_files_indexed=total_count, + total_data_size=total_size, + tracked_paths_count=tracked_paths, + unprotected_files_count=unprotected_count, + unprotected_data_size=unprotected_size, + ignored_files_count=ignored_count, + ignored_data_size=ignored_size, + redundancy_ratio=round(redundancy, 2), + last_scan_time=scanner_manager.last_run_time, + media_distribution=media_dist, + ) + + +@router.get("/jobs", response_model=List[JobSchema]) +def list_jobs(limit: int = 50, db: Session = Depends(get_db)): + return ( + db.query(models.Job).order_by(models.Job.created_at.desc()).limit(limit).all() + ) + + +@router.post("/jobs/{job_id}/cancel") +def cancel_job(job_id: int, db: Session = Depends(get_db)): + JobManager.cancel_job(job_id) + return {"message": "Cancellation request submitted"} + + +@router.get("/jobs/stream") +async def stream_jobs(): + async def event_generator(): + while True: + db = SessionLocal() + try: + active_jobs = ( + db.query(models.Job) + .filter(models.Job.status.in_(["RUNNING", "PENDING"])) + .all() + ) + data = [JobSchema.model_validate(j).model_dump() for j in active_jobs] + for job in data: + for key in ["started_at", "completed_at", "created_at"]: + if job[key] and isinstance(job[key], datetime): + job[key] = job[key].isoformat() + yield f"data: {json.dumps(data)}\n\n" + finally: + db.close() + await asyncio.sleep(1) + + return StreamingResponse(event_generator(), media_type="text/event-stream") + + +@router.post("/scan") +def trigger_scan(background_tasks: BackgroundTasks, db: Session = Depends(get_db)): + if scanner_manager.is_running: + raise HTTPException(status_code=400, detail="Scan already in progress") + job = JobManager.create_job(db, "SCAN") + + def run_scan(): + db_inner = SessionLocal() + try: + scanner_manager.scan_sources(db_inner, job_id=job.id) + finally: + db_inner.close() + + background_tasks.add_task(run_scan) + return {"message": "Scan started", "job_id": job.id} + + +@router.get("/scan/status", response_model=ScanStatusSchema) +def get_scan_status(): + return ScanStatusSchema( + is_running=scanner_manager.is_running, + files_processed=scanner_manager.files_processed, + files_hashed=scanner_manager.files_hashed, + total_files_found=scanner_manager.total_files_found, + current_path=scanner_manager.current_path, + last_run_time=scanner_manager.last_run_time, + ) + + +@router.get("/browse", response_model=List[FileItemSchema]) +def browse_path(path: Optional[str] = None, db: Session = Depends(get_db)): + roots = get_source_roots(db) + tracking_map = {s.path: s.action for s in db.query(models.TrackedSource).all()} + spec = get_exclusion_spec(db) + if path is None or path == "ROOT": + results = [] + for root in roots: + if not os.path.exists(root): + continue + stats = os.stat(root) + tracked, ignored = get_tracking_status(root, tracking_map, spec) + results.append( + FileItemSchema( + name=root, + path=root, + type="directory", + size=stats.st_size, + mtime=stats.st_mtime, + tracked=tracked, + ignored=ignored, + ) + ) + return results + if not os.path.exists(path): + raise HTTPException(status_code=404, detail="Not found") + results = [] + with os.scandir(path) as it: + for entry in it: + try: + stats = entry.stat(follow_symlinks=False) + tracked, ignored = get_tracking_status(entry.path, tracking_map, spec) + results.append( + FileItemSchema( + name=entry.name, + path=entry.path, + type="directory" if entry.is_dir() else "file", + size=stats.st_size, + mtime=stats.st_mtime, + tracked=tracked, + ignored=ignored, + ) + ) + except Exception: + continue + results.sort(key=lambda x: (x.type != "directory", x.name.lower())) + return results @router.post("/track/batch") def track_batch(req: BatchTrackRequest, db: Session = Depends(get_db)): - # Handle untracks - if req.untracks: - db.query(models.TrackedSource).filter( - models.TrackedSource.path.in_(req.untracks) - ).delete(synchronize_session=False) - - # Handle tracks - if req.tracks: - # Get existing to avoid duplicates - existing = { - t.path - for t in db.query(models.TrackedSource) - .filter(models.TrackedSource.path.in_(req.tracks)) - .all() - } - new_paths = [path for path in req.tracks if path not in existing] - - for path in new_paths: - # Note: In a real app we'd verify if it's a directory - new_track = models.TrackedSource(path=path, is_directory=True) - db.add(new_track) - - db.commit() - return { - "message": f"Processed {len(req.tracks)} tracks and {len(req.untracks)} untracks" - } - - -class TreeNodeSchema(BaseModel): - name: str - path: str - has_children: bool = False - - -@router.get("/tree", response_model=List[TreeNodeSchema]) -def get_tree(path: str = "/source_data"): - if not os.path.exists(path): - local_source = os.path.abspath(os.path.join(os.getcwd(), "..", "source_data")) - if path == "/source_data" and os.path.exists(local_source): - path = local_source + for path in req.tracks: + existing = ( + db.query(models.TrackedSource) + .filter(models.TrackedSource.path == path) + .first() + ) + if existing: + existing.action = "include" else: - raise HTTPException(status_code=404, detail="Path not found") + db.add(models.TrackedSource(path=path, action="include")) + for path in req.untracks: + existing = ( + db.query(models.TrackedSource) + .filter(models.TrackedSource.path == path) + .first() + ) + if existing: + existing.action = "exclude" + else: + db.add(models.TrackedSource(path=path, action="exclude")) + db.commit() + return {"message": "Updated"} - if not os.path.isdir(path): - return [] - results = [] - try: - with os.scandir(path) as it: - for entry in it: - if entry.is_dir(): - # Check if it has subdirectories for the expander icon - has_subdirs = False - try: - with os.scandir(entry.path) as sub_it: - for sub_entry in sub_it: - if sub_entry.is_dir(): - has_subdirs = True - break - except Exception: - pass +@router.get("/settings", response_model=Dict[str, str]) +def get_settings(db: Session = Depends(get_db)): + all_settings = db.query(models.SystemSetting).all() + return {s.key: s.value for s in all_settings} - results.append( - TreeNodeSchema( - name=entry.name, path=entry.path, has_children=has_subdirs - ) - ) - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - results.sort(key=lambda x: x.name.lower()) - return results +@router.post("/settings") +def update_setting(req: SettingSchema, db: Session = Depends(get_db)): + setting = ( + db.query(models.SystemSetting) + .filter(models.SystemSetting.key == req.key) + .first() + ) + if setting: + setting.value = req.value + else: + db.add(models.SystemSetting(key=req.key, value=req.value)) + db.commit() + return {"message": "Updated"} + + +@router.get("/tree") +def get_tree(path: Optional[str] = None, db: Session = Depends(get_db)): + roots = get_source_roots(db) + if path is None or path == "ROOT": + return [{"name": r, "path": r, "has_children": True} for r in roots] + return [] diff --git a/backend/app/db/models.py b/backend/app/db/models.py index 9dee5f2..46034d3 100644 --- a/backend/app/db/models.py +++ b/backend/app/db/models.py @@ -1,7 +1,7 @@ -from datetime import datetime +from datetime import datetime, timezone from typing import Optional, List -from sqlalchemy import Integer, String, Float, ForeignKey, DateTime, Boolean +from sqlalchemy import Integer, String, Float, ForeignKey, DateTime, Boolean, BigInteger from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship @@ -14,10 +14,18 @@ class FilesystemState(Base): id: Mapped[int] = mapped_column(primary_key=True) file_path: Mapped[str] = mapped_column(String, index=True, unique=True) - size: Mapped[int] = mapped_column(Integer) + size: Mapped[int] = mapped_column(BigInteger) mtime: Mapped[float] = mapped_column(Float) - sha256_hash: Mapped[Optional[str]] = mapped_column(String, index=True) - last_seen_timestamp: Mapped[datetime] = mapped_column(DateTime) + sha256_hash: Mapped[Optional[str]] = mapped_column( + String, index=True, nullable=True + ) + is_indexed: Mapped[bool] = mapped_column(Boolean, default=False) # True if hashed + is_ignored: Mapped[bool] = mapped_column( + Boolean, default=False + ) # True if matches exclusion + last_seen_timestamp: Mapped[datetime] = mapped_column( + DateTime, default=lambda: datetime.now(timezone.utc) + ) versions: Mapped[List["FileVersion"]] = relationship(back_populates="file_state") @@ -33,12 +41,15 @@ class StorageMedia(Base): generation_tier: Mapped[Optional[str]] = mapped_column( String ) # e.g., LTO-6, S3 Standard - capacity: Mapped[int] = mapped_column(Integer) # Native capacity in bytes - bytes_used: Mapped[int] = mapped_column(Integer, default=0) + capacity: Mapped[int] = mapped_column(BigInteger) # Native capacity in bytes + bytes_used: Mapped[int] = mapped_column(BigInteger, default=0) location: Mapped[Optional[str]] = mapped_column(String) status: Mapped[str] = mapped_column( String, default="active" ) # active, full, retired, offline + extra_config: Mapped[Optional[str]] = mapped_column( + String + ) # JSON config for type-specific details versions: Mapped[List["FileVersion"]] = relationship(back_populates="media") @@ -75,7 +86,52 @@ class TrackedSource(Base): id: Mapped[int] = mapped_column(primary_key=True) path: Mapped[str] = mapped_column(String, unique=True, index=True) is_directory: Mapped[bool] = mapped_column(Boolean, default=True) - created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow) + action: Mapped[str] = mapped_column(String, default="include") # include, exclude + created_at: Mapped[datetime] = mapped_column( + DateTime, default=lambda: datetime.now(timezone.utc) + ) + + +class RestoreCart(Base): + __tablename__ = "restore_cart" + + id: Mapped[int] = mapped_column(primary_key=True) + filesystem_state_id: Mapped[int] = mapped_column(ForeignKey("filesystem_state.id")) + created_at: Mapped[datetime] = mapped_column( + DateTime, default=lambda: datetime.now(timezone.utc) + ) + + file_state: Mapped["FilesystemState"] = relationship() + + +class Job(Base): + __tablename__ = "jobs" + + id: Mapped[int] = mapped_column(primary_key=True) + job_type: Mapped[str] = mapped_column(String) # SCAN, BACKUP, RESTORE + status: Mapped[str] = mapped_column( + String, default="PENDING" + ) # PENDING, RUNNING, COMPLETED, FAILED + progress: Mapped[float] = mapped_column(Float, default=0.0) + current_task: Mapped[Optional[str]] = mapped_column(String, nullable=True) + error_message: Mapped[Optional[str]] = mapped_column(String, nullable=True) + started_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) + completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) + created_at: Mapped[datetime] = mapped_column( + DateTime, default=lambda: datetime.now(timezone.utc) + ) + + +class SystemSetting(Base): + __tablename__ = "system_settings" + + key: Mapped[str] = mapped_column(String, primary_key=True) + value: Mapped[str] = mapped_column(String) + updated_at: Mapped[datetime] = mapped_column( + DateTime, + default=lambda: datetime.now(timezone.utc), + onupdate=lambda: datetime.now(timezone.utc), + ) class JobLog(Base): @@ -83,8 +139,10 @@ class JobLog(Base): id: Mapped[int] = mapped_column(primary_key=True) backup_id: Mapped[int] = mapped_column(ForeignKey("backups.id")) - timestamp: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow) - log_level: Mapped[str] = mapped_column(String) # INFO, WARN, ERROR + timestamp: Mapped[datetime] = mapped_column( + DateTime, default=lambda: datetime.now(timezone.utc) + ) + log_level: Mapped[str] = mapped_column(String) # INFO, ERROR, WARN message: Mapped[str] = mapped_column(String) backup: Mapped["BackupJob"] = relationship(back_populates="logs") diff --git a/backend/app/main.py b/backend/app/main.py index fd78ba1..ca43483 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -1,6 +1,6 @@ from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware -from app.api import system, inventory, backups +from app.api import system, inventory, backups, restores from app.db.database import engine from app.db import models @@ -26,6 +26,7 @@ app.add_middleware( app.include_router(system.router) app.include_router(inventory.router) app.include_router(backups.router) +app.include_router(restores.router) @app.get("/") diff --git a/backend/app/providers/base.py b/backend/app/providers/base.py index e69de29..74e6e51 100644 --- a/backend/app/providers/base.py +++ b/backend/app/providers/base.py @@ -0,0 +1,45 @@ +from abc import ABC, abstractmethod +from typing import Optional, BinaryIO + + +class AbstractStorageProvider(ABC): + @abstractmethod + def get_name(self) -> str: + """Returns the human-readable name of the provider (e.g., 'LTO Tape')""" + pass + + @abstractmethod + def identify_media(self) -> Optional[str]: + """ + Attempts to read the identifier (barcode/UUID) from the currently inserted media. + Returns None if no media is inserted or it's unidentifiable. + """ + pass + + @abstractmethod + def prepare_for_write(self, media_id: str) -> bool: + """ + Performs any necessary setup (e.g., mounting, winding) before writing. + Returns True if ready. + """ + pass + + @abstractmethod + def write_archive(self, media_id: str, stream: BinaryIO) -> str: + """ + Writes a tar stream to the media. + Returns a 'file_number' or 'object_path' used to locate this archive later. + """ + pass + + @abstractmethod + def finalize_media(self, media_id: str): + """Finalizes the media (e.g., writing index, ejecting)""" + pass + + @abstractmethod + def read_archive(self, media_id: str, location_id: str) -> BinaryIO: + """ + Retrieves a specific archive stream from the media. + """ + pass diff --git a/backend/app/providers/cloud.py b/backend/app/providers/cloud.py index e69de29..f1b12c3 100644 --- a/backend/app/providers/cloud.py +++ b/backend/app/providers/cloud.py @@ -0,0 +1,55 @@ +import boto3 +from typing import Optional, BinaryIO, Dict, Any +from .base import AbstractStorageProvider +from loguru import logger + + +class CloudStorageProvider(AbstractStorageProvider): + def __init__(self, config: Dict[str, Any]): + self.provider_type = config.get("provider", "S3") + self.bucket_name = config.get("bucket_name") + self.region = config.get("region", "us-east-1") + self.endpoint_url = config.get("endpoint_url") # For non-AWS S3 (B2, Wasabi) + + # We assume credentials are in env or handled by the host + self.s3 = boto3.client( + "s3", region_name=self.region, endpoint_url=self.endpoint_url + ) + + def get_name(self) -> str: + return f"Cloud ({self.provider_type})" + + def identify_media(self) -> Optional[str]: + """Checks if the bucket exists and we have access""" + try: + self.s3.head_bucket(Bucket=self.bucket_name) + return self.bucket_name + except Exception as e: + logger.error(f"Failed to identify cloud bucket {self.bucket_name}: {e}") + return None + + def prepare_for_write(self, media_id: str) -> bool: + return self.identify_media() == media_id + + def write_archive(self, media_id: str, stream: BinaryIO) -> str: + """Uploads the stream as a new object in the bucket""" + # Generate a unique object key (future: use job timestamp) + import uuid + + object_key = f"archives/{uuid.uuid4().hex}.tar" + + logger.info(f"Uploading archive to cloud: {media_id}/{object_key}") + try: + self.s3.upload_fileobj(stream, self.bucket_name, object_key) + return object_key + except Exception as e: + logger.error(f"Cloud upload failed: {e}") + raise + + def finalize_media(self, media_id: str): + logger.info(f"Finalized cloud media {media_id}") + + def read_archive(self, media_id: str, location_id: str) -> BinaryIO: + """Returns a streaming body for the S3 object""" + response = self.s3.get_object(Bucket=self.bucket_name, Key=location_id) + return response["Body"] diff --git a/backend/app/providers/hdd.py b/backend/app/providers/hdd.py index e69de29..7713376 100644 --- a/backend/app/providers/hdd.py +++ b/backend/app/providers/hdd.py @@ -0,0 +1,92 @@ +import os +import shutil +from typing import Optional, BinaryIO +from .base import AbstractStorageProvider +from loguru import logger + + +class OfflineHDDProvider(AbstractStorageProvider): + def __init__(self, mount_base: str = "/mnt/backup_disk"): + self.mount_base = mount_base + + def get_name(self) -> str: + return "Offline HDD" + + def identify_media(self) -> Optional[str]: + """Reads the hidden identifier file from the disk root""" + id_file = os.path.join(self.mount_base, ".tapehoard_id") + logger.info(f"HDD Provider: Checking for ID file at: {id_file}") + + if os.path.exists(id_file): + try: + with open(id_file, "r") as f: + content = f.read().strip() + logger.info( + f"HDD Provider: Found identifier file. Content: '{content}'" + ) + return content + except Exception as e: + logger.error( + f"HDD Provider: Failed to read identifier at {id_file}: {e}" + ) + else: + logger.warning(f"HDD Provider: Identifier file NOT FOUND at: {id_file}") + if os.path.exists(self.mount_base): + try: + logger.info( + f"HDD Provider: Base directory {self.mount_base} exists. Contents: {os.listdir(self.mount_base)}" + ) + except Exception as e: + logger.error(f"HDD Provider: Failed to list base dir: {e}") + else: + logger.error( + f"HDD Provider: Base directory {self.mount_base} DOES NOT EXIST." + ) + + return None + + def prepare_for_write(self, media_id: str) -> bool: + """Verifies the disk is mounted and the identifier matches""" + current_id = self.identify_media() + if current_id != media_id: + logger.error(f"Media mismatch. Expected {media_id}, found {current_id}") + return False + + # Ensure visible data directory exists + archive_dir = os.path.join(self.mount_base, "tapehoard_backups", "archives") + os.makedirs(archive_dir, exist_ok=True) + return True + + def write_archive(self, media_id: str, stream: BinaryIO) -> str: + """Writes the stream to a new numbered file in a visible folder""" + archive_dir = os.path.join(self.mount_base, "tapehoard_backups", "archives") + + # Determine next file number + existing = os.listdir(archive_dir) + archives = [int(f.split(".")[0]) for f in existing if f.endswith(".tar")] + next_num = max(archives, default=-1) + 1 + + file_name = f"{next_num:06d}.tar" + target_path = os.path.join(archive_dir, file_name) + + logger.info(f"Writing HDD archive {file_name} to {media_id}") + with open(target_path, "wb") as f: + shutil.copyfileobj(stream, f) + + return str(next_num) + + def finalize_media(self, media_id: str): + """Standard HDD finalization (flush caches)""" + os.sync() + logger.info(f"Finalized HDD media {media_id}") + + def read_archive(self, media_id: str, location_id: str) -> BinaryIO: + file_name = f"{int(location_id):06d}.tar" + target_path = os.path.join( + self.mount_base, "tapehoard_backups", "archives", file_name + ) + if not os.path.exists(target_path): + raise FileNotFoundError( + f"Archive {location_id} not found on media {media_id}" + ) + return open(target_path, "rb") diff --git a/backend/app/providers/tape.py b/backend/app/providers/tape.py index e69de29..e2233e4 100644 --- a/backend/app/providers/tape.py +++ b/backend/app/providers/tape.py @@ -0,0 +1,92 @@ +import subprocess +from typing import Optional, BinaryIO, cast +from .base import AbstractStorageProvider +from loguru import logger + + +class LTOProvider(AbstractStorageProvider): + def __init__(self, device_path: str = "/dev/nst0"): + self.device_path = device_path + + def get_name(self) -> str: + return "LTO Tape" + + def _run_mt(self, command: str): + try: + subprocess.run(["mt", "-f", self.device_path, command], check=True) + except subprocess.CalledProcessError as e: + logger.error(f"Tape command 'mt {command}' failed: {e}") + raise + + def identify_media(self) -> Optional[str]: + """Reads the label from the beginning of the tape (File Mark 0)""" + try: + self._run_mt("rewind") + # Try to read the label file + result = subprocess.run( + ["tar", "-xf", self.device_path, "-O", ".tapehoard_label"], + capture_output=True, + text=True, + timeout=30, + ) + if result.returncode == 0: + return result.stdout.strip() + except Exception as e: + logger.error(f"Failed to identify tape: {e}") + return None + + def prepare_for_write(self, media_id: str) -> bool: + """Fast-forwards to the end of the data to prepare for appending""" + current_id = self.identify_media() + if current_id != media_id: + logger.error(f"Tape mismatch. Expected {media_id}, found {current_id}") + return False + + # Move to end of data + self._run_mt("eod") + return True + + def write_archive(self, media_id: str, stream: BinaryIO) -> str: + """Writes the stream to tape and returns the file number index""" + logger.info(f"Streaming archive to LTO {media_id} at current head position") + + proc = subprocess.Popen( + ["dd", f"of={self.device_path}", "bs=256k"], stdin=subprocess.PIPE + ) + + if proc.stdin: + # Copy stream to dd stdin + while True: + chunk = stream.read(1024 * 1024) + if not chunk: + break + proc.stdin.write(chunk) + + proc.stdin.close() + + proc.wait() + + return "unknown" # To be refined with 'mt status' parsing + + def finalize_media(self, media_id: str): + self._run_mt("offline") # Rewind and eject + + def read_archive(self, media_id: str, location_id: str) -> BinaryIO: + # Seek to FM index + self._run_mt("rewind") + try: + loc_int = int(location_id) + if loc_int > 0: + self._run_mt(f"fsf {loc_int}") + except ValueError: + pass + + # Return a pipe from dd + proc = subprocess.Popen( + ["dd", f"if={self.device_path}", "bs=256k"], stdout=subprocess.PIPE + ) + + if proc.stdout is None: + raise RuntimeError("Failed to open pipe from dd") + + return cast(BinaryIO, proc.stdout) diff --git a/backend/app/services/archiver.py b/backend/app/services/archiver.py index e69de29..338a4e7 100644 --- a/backend/app/services/archiver.py +++ b/backend/app/services/archiver.py @@ -0,0 +1,189 @@ +import os +import tarfile +import json +from datetime import datetime, timezone +from typing import List, Optional, Dict, Any +from loguru import logger +from sqlalchemy.orm import Session +from sqlalchemy import not_ +from app.db import models +from app.services.scanner import JobManager +from app.providers.hdd import OfflineHDDProvider +from app.providers.tape import LTOProvider +from app.providers.cloud import CloudStorageProvider + + +class ArchiverService: + def __init__(self, staging_dir: str = "/staging"): + self.staging_dir = staging_dir + if not os.path.exists(self.staging_dir): + try: + os.makedirs(self.staging_dir, exist_ok=True) + except Exception: + # Fallback for local dev without /staging + self.staging_dir = os.path.join(os.getcwd(), "staging_area") + os.makedirs(self.staging_dir, exist_ok=True) + + def _get_provider(self, media: models.StorageMedia): + config: Dict[str, Any] = {} + if media.extra_config: + try: + config = json.loads(media.extra_config) + except Exception: + pass + + if media.media_type == "tape": + return LTOProvider(device_path=config.get("device_path", "/dev/nst0")) + elif media.media_type == "hdd": + return OfflineHDDProvider( + mount_base=config.get("mount_path", "/mnt/backup") + ) + elif media.media_type == "cloud": + return CloudStorageProvider(config=config) + return None + + def get_eligible_files(self, db: Session) -> List[models.FilesystemState]: + """Returns files that are indexed but have no version on any media""" + return ( + db.query(models.FilesystemState) + .filter( + models.FilesystemState.is_indexed, + not_(models.FilesystemState.is_ignored), + not_(models.FilesystemState.versions.any()), + ) + .all() + ) + + def create_backup_set( + self, db: Session, media_id: int, max_bytes: Optional[int] = None + ) -> List[models.FilesystemState]: + """Selects a batch of files that fit on the media's remaining capacity""" + media = db.query(models.StorageMedia).get(media_id) + if not media: + return [] + + remaining_capacity = media.capacity - media.bytes_used + if max_bytes: + remaining_capacity = min(remaining_capacity, max_bytes) + + eligible = self.get_eligible_files(db) + + # Simple Greedy Bin-Packing + backup_set = [] + current_size = 0 + + for f in eligible: + if current_size + f.size <= remaining_capacity: + backup_set.append(f) + current_size += f.size + + return backup_set + + def run_backup(self, db: Session, media_id: int, job_id: int): + media = db.query(models.StorageMedia).get(media_id) + if not media: + JobManager.fail_job(job_id, "Media not found") + return + + JobManager.start_job(job_id) + JobManager.update_job( + job_id, 5.0, f"Preparing backup set for {media.identifier}..." + ) + + backup_set = self.create_backup_set(db, media_id) + if not backup_set: + JobManager.complete_job(job_id) + logger.info("No eligible files for backup") + return + + total_bytes = sum(f.size for f in backup_set) + JobManager.update_job( + job_id, + 10.0, + f"Backing up {len(backup_set)} files ({total_bytes / 1e9:.2f} GB)...", + ) + + provider = self._get_provider(media) + if not provider: + JobManager.fail_job(job_id, f"Unsupported media type: {media.media_type}") + return + + # 1. Identify Media + current_id = provider.identify_media() + if current_id != media.identifier: + JobManager.fail_job( + job_id, + f"Media mismatch! Insert {media.identifier} (Found: {current_id})", + ) + return + + if not provider.prepare_for_write(media.identifier): + JobManager.fail_job(job_id, "Failed to prepare media for writing") + return + + # 2. Create Archive in Staging + # For now, we package everything into one tar for this job + archive_name = ( + f"backup_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}.tar" + ) + staging_path = os.path.join(self.staging_dir, archive_name) + + try: + processed_bytes = 0 + with tarfile.open(staging_path, "w") as tar: + for f_state in backup_set: + if JobManager.is_cancelled(job_id): + break + + JobManager.update_job( + job_id, + 15.0 + (70.0 * (processed_bytes / total_bytes)), + f"Archiving: {os.path.basename(f_state.file_path)}", + ) + + if os.path.exists(f_state.file_path): + tar.add( + f_state.file_path, arcname=f_state.file_path.lstrip("/") + ) + + processed_bytes += f_state.size + + if JobManager.is_cancelled(job_id): + if os.path.exists(staging_path): + os.remove(staging_path) + return + + # 3. Stream to Provider + JobManager.update_job( + job_id, 85.0, f"Streaming archive to {media.media_type}..." + ) + with open(staging_path, "rb") as archive_stream: + location_id = provider.write_archive(media.identifier, archive_stream) + + # 4. Finalize & Record + provider.finalize_media(media.identifier) + + # Update database records + for f_state in backup_set: + version = models.FileVersion( + filesystem_state_id=f_state.id, + media_id=media.id, + file_number=location_id, + ) + db.add(version) + + media.bytes_used += os.path.getsize(staging_path) + db.commit() + + JobManager.complete_job(job_id) + logger.info(f"Backup job {job_id} completed successfully") + + except Exception as e: + logger.exception(f"Backup failed: {e}") + JobManager.fail_job(job_id, str(e)) + finally: + if os.path.exists(staging_path): + os.remove(staging_path) + + +archiver_manager = ArchiverService() diff --git a/backend/app/services/scanner.py b/backend/app/services/scanner.py index e69de29..95f5056 100644 --- a/backend/app/services/scanner.py +++ b/backend/app/services/scanner.py @@ -0,0 +1,448 @@ +import os +import hashlib +from datetime import datetime, timezone +from typing import Dict, List, Optional, Set, Tuple, Any, cast +from loguru import logger +from sqlalchemy.orm import Session +from app.db import models +from app.db.database import SessionLocal +import concurrent.futures +import pathspec +import json + + +class JobManager: + # Set of job IDs that have been requested to cancel + _cancelled_jobs: Set[int] = set() + + @staticmethod + def create_job(db: Session, job_type: str) -> models.Job: + job = models.Job(job_type=job_type, status="PENDING") + db.add(job) + db.commit() + db.refresh(job) + return job + + @staticmethod + def start_job(job_id: int): + db = SessionLocal() + try: + job = db.get(models.Job, job_id) + if job: + job.status = "RUNNING" + job.started_at = datetime.now(timezone.utc) + db.commit() + finally: + db.close() + + @staticmethod + def update_job(job_id: int, progress: float, current_task: str): + db = SessionLocal() + try: + job = db.get(models.Job, job_id) + if job: + job.progress = progress + job.current_task = current_task + db.commit() + finally: + db.close() + + @staticmethod + def complete_job(job_id: int): + db = SessionLocal() + try: + job = db.get(models.Job, job_id) + if job: + job.status = "COMPLETED" + job.progress = 100.0 + job.completed_at = datetime.now(timezone.utc) + db.commit() + if job_id in JobManager._cancelled_jobs: + JobManager._cancelled_jobs.remove(job_id) + finally: + db.close() + + @staticmethod + def fail_job(job_id: int, error_message: str): + db = SessionLocal() + try: + job = db.get(models.Job, job_id) + if job: + job.status = "FAILED" + job.error_message = error_message + job.completed_at = datetime.now(timezone.utc) + db.commit() + if job_id in JobManager._cancelled_jobs: + JobManager._cancelled_jobs.remove(job_id) + finally: + db.close() + + @staticmethod + def cancel_job(job_id: int): + JobManager._cancelled_jobs.add(job_id) + db = SessionLocal() + try: + job = db.get(models.Job, job_id) + if job and job.status in ["PENDING", "RUNNING"]: + job.status = "FAILED" + job.error_message = "Cancelled by user" + job.completed_at = datetime.now(timezone.utc) + db.commit() + finally: + db.close() + + @staticmethod + def is_cancelled(job_id: int) -> bool: + return job_id in JobManager._cancelled_jobs + + +class ScannerService: + def __init__(self): + self.is_running = False + self.last_run_time: Optional[datetime] = None + self.files_processed = 0 + self.files_hashed = 0 + self.total_files_found = 0 + self.current_path = "" + + def compute_sha256(self, file_path: str, job_id: Optional[int] = None) -> str: + sha256_hash = hashlib.sha256() + try: + with open(file_path, "rb") as f: + for byte_block in iter(lambda: f.read(1048576), b""): + # Check for cancellation during block read + if job_id is not None and JobManager.is_cancelled(job_id): + return "" + sha256_hash.update(byte_block) + return sha256_hash.hexdigest() + except Exception as e: + logger.error(f"Failed to hash {file_path}: {e}") + return "" + + def _get_exclusion_spec(self, db: Session) -> Optional[pathspec.PathSpec]: + setting = ( + db.query(models.SystemSetting) + .filter(models.SystemSetting.key == "global_exclusions") + .first() + ) + if not setting or not setting.value.strip(): + return None + patterns = [p.strip() for p in setting.value.splitlines() if p.strip()] + return pathspec.PathSpec.from_lines("gitwildmatch", patterns) + + def _get_source_roots(self, db: Session) -> List[str]: + setting = ( + db.query(models.SystemSetting) + .filter(models.SystemSetting.key == "source_roots") + .first() + ) + if setting: + try: + val = json.loads(setting.value) + if isinstance(val, list): + return [str(v) for v in val] + return [str(val)] + except Exception: + return [str(setting.value)] + local_source = os.path.abspath(os.path.join(os.getcwd(), "..", "source_data")) + if os.path.exists(local_source): + return [local_source] + return ["/source_data"] + + def scan_sources(self, db: Session, job_id: Optional[int] = None): + if self.is_running: + logger.warning("Scan already in progress") + return + + self.is_running = True + self.files_processed = 0 + self.files_hashed = 0 + self.total_files_found = 0 + + if job_id is not None: + JobManager.start_job(job_id) + + try: + # 1. Load Rules & Roots + spec = self._get_exclusion_spec(db) + roots = self._get_source_roots(db) + tracking_rules = db.query(models.TrackedSource).all() + + # Type-safe list extraction to help 'ty' inference + raw_include: List[str] = [ + r.path for r in tracking_rules if r.action == "include" + ] + raw_exclude: List[str] = [ + r.path for r in tracking_rules if r.action == "exclude" + ] + + # Using cast because ty incorrectly infers result of sorted(List[str], key=len) as List[Sized] + include_rules = cast(List[str], sorted(raw_include, key=len, reverse=True)) + exclude_rules = cast(List[str], sorted(raw_exclude, key=len, reverse=True)) + + def get_tracking_status(path: str) -> Tuple[bool, bool]: + # returns (is_tracked, is_ignored) + is_ignored = False + if spec and spec.match_file(path): + is_ignored = True + + # Check explicit rules + # If a rule matches, it determines the tracking state + for ex in exclude_rules: + if path == ex or path.startswith(ex + "/"): + # Now check if there's a more specific include under this exclude + for inc in include_rules: + if len(inc) > len(ex) and ( + path == inc or path.startswith(inc + "/") + ): + return True, is_ignored + return False, is_ignored + + for inc in include_rules: + if path == inc or path.startswith(inc + "/"): + return True, is_ignored + + # NEW DEFAULT: If no explicit rule and NOT globally ignored, track it! + return not is_ignored, is_ignored + + if job_id is not None: + JobManager.update_job(job_id, 2.0, "Initiating metadata discovery...") + + # 2. Optimized Discovery & Sync Phase + BATCH_SIZE = 1000 + pending_files: List[Dict[str, Any]] = [] + now = datetime.now(timezone.utc) + + with concurrent.futures.ThreadPoolExecutor( + max_workers=os.cpu_count() + ) as executor: + hashing_futures: List[concurrent.futures.Future] = [] + + for root_path in roots: + if job_id is not None and JobManager.is_cancelled(job_id): + break + if not os.path.exists(root_path): + continue + for root, dirs, files in os.walk(root_path): + if job_id is not None and JobManager.is_cancelled(job_id): + break + + original_dirs = list(dirs) + for d in original_dirs: + d_path = os.path.join(root, d) + if spec and spec.match_file(d_path + "/"): + dirs.remove(d) + + for file in files: + if job_id is not None and JobManager.is_cancelled(job_id): + break + full_path = os.path.join(root, file) + try: + stats = os.stat(full_path) + tracked, ignored = get_tracking_status(full_path) + pending_files.append( + { + "path": full_path, + "size": stats.st_size, + "mtime": stats.st_mtime, + "tracked": tracked, + "ignored": ignored, + } + ) + except Exception: + continue + + if len(pending_files) >= BATCH_SIZE: + self._sync_batch( + db, + pending_files, + executor, + hashing_futures, + now, + job_id, + ) + pending_files = [] + + if job_id is not None and JobManager.is_cancelled(job_id): + logger.info("Scan task detected cancellation. Stopping.") + return + + if pending_files: + self._sync_batch( + db, pending_files, executor, hashing_futures, now, job_id + ) + + if hashing_futures: + if job_id is not None: + JobManager.update_job( + job_id, + 50.0, + f"Processing {len(hashing_futures)} hashing tasks...", + ) + + for future in concurrent.futures.as_completed(hashing_futures): + if job_id is not None and JobManager.is_cancelled(job_id): + break + result = future.result() + if result: + f_path, f_size, f_mtime, f_hash, f_ignored, f_new = result + if ( + f_hash == "" + and job_id is not None + and JobManager.is_cancelled(job_id) + ): + continue # Cancelled during hash + + ext = ( + db.query(models.FilesystemState) + .filter(models.FilesystemState.file_path == f_path) + .first() + ) + if f_new and not ext: + db.add( + models.FilesystemState( + file_path=f_path, + size=f_size, + mtime=f_mtime, + sha256_hash=f_hash, + is_indexed=f_hash is not None, + is_ignored=f_ignored, + last_seen_timestamp=now, + ) + ) + elif ext: + ext.size = f_size + ext.mtime = f_mtime + ext.sha256_hash = f_hash + ext.is_indexed = f_hash is not None + ext.is_ignored = f_ignored + ext.last_seen_timestamp = now + + if f_hash: + self.files_hashed += 1 + self.files_processed += 1 + if self.files_processed % 50 == 0: + db.commit() + if job_id is not None and self.total_files_found > 0: + prog = 10.0 + ( + 90.0 + * ( + self.files_processed + / self.total_files_found + ) + ) + JobManager.update_job( + job_id, + round(prog, 1), + f"Hashing & Indexing: {self.files_processed}/{self.total_files_found}...", + ) + + if job_id is not None and JobManager.is_cancelled(job_id): + return + + db.commit() + self.last_run_time = datetime.now(timezone.utc) + if job_id is not None: + JobManager.complete_job(job_id) + + except Exception as e: + logger.exception(f"Scan failed: {e}") + db.rollback() + if job_id is not None: + JobManager.fail_job(job_id, str(e)) + finally: + self.is_running = False + self.current_path = "" + + def _sync_batch( + self, db: Session, files: List[Dict[str, Any]], executor, futures, now, job_id + ): + if job_id is not None and JobManager.is_cancelled(job_id): + return + + paths = [f["path"] for f in files] + existing_records = { + r.file_path: r + for r in db.query(models.FilesystemState) + .filter(models.FilesystemState.file_path.in_(paths)) + .all() + } + + for f in files: + if job_id is not None and JobManager.is_cancelled(job_id): + return + self.current_path = f["path"] + ext = existing_records.get(f["path"]) + + needs_metadata_update = ( + not ext + or ext.size != f["size"] + or ext.mtime != f["mtime"] + or ext.is_ignored != f["ignored"] + ) + # We hash if tracked AND NOT ignored + needs_hashing = ( + f["tracked"] + and not f["ignored"] + and (not ext or not ext.sha256_hash or needs_metadata_update) + ) + + if needs_hashing: + futures.append( + executor.submit( + self._process_file, + f["path"], + f["size"], + f["mtime"], + f["tracked"], + f["ignored"], + True, + ext is None, + job_id, + ) + ) + elif needs_metadata_update or ext: + if not ext: + db.add( + models.FilesystemState( + file_path=f["path"], + size=f["size"], + mtime=f["mtime"], + sha256_hash=None, + is_indexed=False, + is_ignored=f["ignored"], + last_seen_timestamp=now, + ) + ) + else: + ext.size = f["size"] + ext.mtime = f["mtime"] + ext.is_ignored = f["ignored"] + ext.is_indexed = ext.sha256_hash is not None + ext.last_seen_timestamp = now + self.files_processed += 1 + + db.commit() + if job_id is not None: + JobManager.update_job( + job_id, 10.0, f"Discovered and synced {self.files_processed} items..." + ) + + def _process_file( + self, + file_path: str, + size: int, + mtime: float, + tracked: bool, + ignored: bool, + hash_it: bool, + is_new: bool, + job_id: Optional[int] = None, + ): + if job_id is not None and JobManager.is_cancelled(job_id): + return (file_path, size, mtime, "", ignored, is_new) + sha_hash = self.compute_sha256(file_path, job_id) if hash_it else None + return (file_path, size, mtime, sha_hash, ignored, is_new) + + +scanner_manager = ScannerService() diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 2398785..78692f3 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -14,6 +14,8 @@ dependencies = [ "apprise", "loguru>=0.7.3", "pydantic-settings>=2.14.0", + "pathspec>=1.1.0", + "boto3>=1.42.94", ] [build-system] diff --git a/backend/uv.lock b/backend/uv.lock index 2d768bf..55016b4 100644 --- a/backend/uv.lock +++ b/backend/uv.lock @@ -79,6 +79,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9f/64/2e54428beba8d9992aa478bb8f6de9e4ecaa5f8f513bcfd567ed7fb0262d/apscheduler-3.11.2-py3-none-any.whl", hash = "sha256:ce005177f741409db4e4dd40a7431b76feb856b9dd69d57e0da49d6715bfd26d", size = 64439, upload-time = "2025-12-22T00:39:33.303Z" }, ] +[[package]] +name = "boto3" +version = "1.42.94" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/6a/95302333208830de932ad1d0b69599ee13e936349a44981fb72632507861/boto3-1.42.94.tar.gz", hash = "sha256:5b6056a661c19e974aaea3cb97690ddbe30d10c31e4f887df3bff06574f34510", size = 113211, upload-time = "2026-04-22T20:36:19.167Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/6f/4e175604f3168befcb413c95bf45eada67d12042f92f76a9305d6a817ea9/boto3-1.42.94-py3-none-any.whl", hash = "sha256:56d53bce75629cc7c78a32da8b62de74cee3e2a3d54a2b60ba1a65f9f1b129da", size = 140555, upload-time = "2026-04-22T20:36:16.182Z" }, +] + +[[package]] +name = "botocore" +version = "1.42.94" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b7/90/1a4d0e81b325d38e37f81d907ceacac3b8f509ad38b495bb95086ecb609d/botocore-1.42.94.tar.gz", hash = "sha256:41c6b3b11b073221a41f52b222ba387be34459fb77cdc506e8b74cdaf24bdcce", size = 15260901, upload-time = "2026-04-22T20:36:00.853Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/73/313af9ee02ac0155247bcf3f04fcf54fcae2e33250bb437528c18aeefd81/botocore-1.42.94-py3-none-any.whl", hash = "sha256:a2143742132ed0f6cdb90204d667b89d0301068b1045e8bc099efa267bf1b348", size = 14942938, upload-time = "2026-04-22T20:35:55.663Z" }, +] + [[package]] name = "certifi" version = "2026.4.22" @@ -414,6 +442,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] +[[package]] +name = "jmespath" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d3/59/322338183ecda247fb5d1763a6cbe46eff7222eaeebafd9fa65d4bf5cb11/jmespath-1.1.0.tar.gz", hash = "sha256:472c87d80f36026ae83c6ddd0f1d05d4e510134ed462851fd5f754c8c3cbb88d", size = 27377, upload-time = "2026-01-22T16:35:26.279Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/2f/967ba146e6d58cf6a652da73885f52fc68001525b4197effc174321d70b4/jmespath-1.1.0-py3-none-any.whl", hash = "sha256:a5663118de4908c91729bea0acadca56526eb2698e83de10cd116ae0f4e97c64", size = 20419, upload-time = "2026-01-22T16:35:24.919Z" }, +] + [[package]] name = "loguru" version = "0.7.3" @@ -560,6 +597,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7a/c2/920ef838e2f0028c8262f16101ec09ebd5969864e5a64c4c05fad0617c56/packaging-26.1-py3-none-any.whl", hash = "sha256:5d9c0669c6285e491e0ced2eee587eaf67b670d94a19e94e3984a481aba6802f", size = 95831, upload-time = "2026-04-14T21:12:47.56Z" }, ] +[[package]] +name = "pathspec" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/17/9c3094b822982b9f1ea666d8580ce59000f61f87c1663556fb72031ad9ec/pathspec-1.1.0.tar.gz", hash = "sha256:f5d7c555da02fd8dde3e4a2354b6aba817a89112fa8f333f7917a2a4834dd080", size = 133918, upload-time = "2026-04-23T01:46:22.298Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/c9/8eed0486f074e9f1ca7f8ce5ad663e65f12fdab344028d658fa1b03d35e0/pathspec-1.1.0-py3-none-any.whl", hash = "sha256:574b128f7456bd899045ccd142dd446af7e6cfd0072d63ad73fbc55fbb4aaa42", size = 56264, upload-time = "2026-04-23T01:46:20.606Z" }, +] + [[package]] name = "platformdirs" version = "4.9.6" @@ -775,6 +821,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d4/24/a372aaf5c9b7208e7112038812994107bc65a84cd00e0354a88c2c77a617/pytest-9.0.3-py3-none-any.whl", hash = "sha256:2c5efc453d45394fdd706ade797c0a81091eccd1d6e4bccfcd476e2b8e0ab5d9", size = 375249, upload-time = "2026-04-07T17:16:16.13Z" }, ] +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + [[package]] name = "python-discovery" version = "1.2.2" @@ -914,6 +972,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/63/b6/aeadee5443e49baa2facd51131159fd6301cc4ccfc1541e4df7b021c37dd/ruff-0.15.11-py3-none-win_arm64.whl", hash = "sha256:063fed18cc1bbe0ee7393957284a6fe8b588c6a406a285af3ee3f46da2391ee4", size = 11032614, upload-time = "2026-04-16T18:46:34.487Z" }, ] +[[package]] +name = "s3transfer" +version = "0.16.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/29/af14f4ef3c11a50435308660e2cc68761c9a7742475e0585cd4396b91777/s3transfer-0.16.1.tar.gz", hash = "sha256:8e424355754b9ccb32467bdc568edf55be82692ef2002d934b1311dbb3b9e524", size = 154801, upload-time = "2026-04-22T20:36:06.475Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/19/90d7d4ed51932c022d53f1d02d564b62d10e272692a1f9b76425c1ad2a02/s3transfer-0.16.1-py3-none-any.whl", hash = "sha256:61bcd00ccb83b21a0fe7e91a553fff9729d46c83b4e0106e7c314a733891f7c2", size = 86825, upload-time = "2026-04-22T20:36:04.992Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + [[package]] name = "sqlalchemy" version = "2.0.49" @@ -995,8 +1074,10 @@ dependencies = [ { name = "alembic" }, { name = "apprise" }, { name = "apscheduler" }, + { name = "boto3" }, { name = "fastapi" }, { name = "loguru" }, + { name = "pathspec" }, { name = "prometheus-client" }, { name = "pydantic-settings" }, { name = "sqlalchemy" }, @@ -1016,8 +1097,10 @@ requires-dist = [ { name = "alembic" }, { name = "apprise" }, { name = "apscheduler" }, + { name = "boto3", specifier = ">=1.42.94" }, { name = "fastapi" }, { name = "loguru", specifier = ">=0.7.3" }, + { name = "pathspec", specifier = ">=1.1.0" }, { name = "prometheus-client" }, { name = "pydantic-settings", specifier = ">=2.14.0" }, { name = "sqlalchemy" }, diff --git a/frontend/src/app.css b/frontend/src/app.css index 05057bd..3c29467 100644 --- a/frontend/src/app.css +++ b/frontend/src/app.css @@ -76,56 +76,10 @@ background-color: var(--color-bg-primary); } -nav { - width: 240px; - background-color: var(--color-bg-secondary); - border-right: 1px solid var(--color-border-color); - display: flex; - flex-direction: column; - padding: var(--spacing-lg); - flex-shrink: 0; -} - -nav h2 { - font-size: 1.5rem; - margin-bottom: var(--spacing-xl); - font-weight: 700; - display: flex; - align-items: center; - gap: var(--spacing-sm); - color: var(--color-text-primary); -} - -nav ul { - list-style: none; - padding: 0; - margin: 0; -} - -nav ul li { - margin-bottom: var(--spacing-sm); -} - -nav ul li a { - color: var(--color-text-secondary); - text-decoration: none; - font-weight: 500; - display: flex; - align-items: center; - gap: var(--spacing-md); - padding: var(--spacing-sm) var(--spacing-md); - border-radius: var(--radius-md); - transition: all 0.2s ease; -} - -nav ul li a:hover, nav ul li a.active { - background-color: var(--color-bg-tertiary); - color: var(--color-text-primary); -} +/* Removed legacy 'nav' styles that were causing sidebar layout issues */ main { flex: 1; - padding: var(--spacing-xl); overflow-y: auto; } diff --git a/frontend/src/lib/api/index.ts b/frontend/src/lib/api/index.ts index 57cc10a..db7c74e 100644 --- a/frontend/src/lib/api/index.ts +++ b/frontend/src/lib/api/index.ts @@ -1,4 +1,4 @@ // This file is auto-generated by @hey-api/openapi-ts -export { browseIndexInventoryBrowseGet, browsePathSystemBrowseGet, getTreeSystemTreeGet, listBackupsBackupsGet, type Options, readRootGet, trackBatchSystemTrackBatchPost, trackPathSystemTrackPost } from './sdk.gen'; -export type { AppApiInventoryFileItemSchema, AppApiSystemFileItemSchema, BatchTrackRequest, BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetError, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponse, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetError, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponse, BrowsePathSystemBrowseGetResponses, ClientOptions, GetTreeSystemTreeGetData, GetTreeSystemTreeGetError, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponse, GetTreeSystemTreeGetResponses, HttpValidationError, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ReadRootGetData, ReadRootGetResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostError, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TrackPathSystemTrackPostData, TrackPathSystemTrackPostError, TrackPathSystemTrackPostErrors, TrackPathSystemTrackPostResponses, TrackToggleRequest, TreeNodeSchema, ValidationError } from './types.gen'; +export { addDirectoryToCartRestoresCartDirectoryPost, addToCartRestoresCartFileIdPost, browseIndexInventoryBrowseGet, browsePathSystemBrowseGet, cancelJobSystemJobsJobIdCancelPost, clearCartRestoresCartClearPost, deleteMediaInventoryMediaMediaIdDelete, getDashboardStatsSystemDashboardStatsGet, getIndexTreeInventoryTreeGet, getItemMetadataInventoryMetadataGet, getManifestRestoresManifestGet, getScanStatusSystemScanStatusGet, getSettingsSystemSettingsGet, getTreeSystemTreeGet, listBackupsBackupsGet, listCartRestoresCartGet, listInventoryInventoryGet, listJobsSystemJobsGet, listMediaInventoryMediaGet, type Options, readRootGet, registerMediaInventoryMediaPost, removeFromCartRestoresCartItemIdDelete, streamJobsSystemJobsStreamGet, trackBatchSystemTrackBatchPost, triggerBackupBackupsTriggerMediaIdPost, triggerScanSystemScanPost, updateMediaInventoryMediaMediaIdPatch, updateSettingSystemSettingsPost } from './sdk.gen'; +export type { AddDirectoryToCartRestoresCartDirectoryPostData, AddDirectoryToCartRestoresCartDirectoryPostError, AddDirectoryToCartRestoresCartDirectoryPostErrors, AddDirectoryToCartRestoresCartDirectoryPostResponses, AddToCartRestoresCartFileIdPostData, AddToCartRestoresCartFileIdPostError, AddToCartRestoresCartFileIdPostErrors, AddToCartRestoresCartFileIdPostResponses, AppApiInventoryFileItemSchema, AppApiSystemFileItemSchema, BatchTrackRequest, BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetError, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponse, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetError, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponse, BrowsePathSystemBrowseGetResponses, CancelJobSystemJobsJobIdCancelPostData, CancelJobSystemJobsJobIdCancelPostError, CancelJobSystemJobsJobIdCancelPostErrors, CancelJobSystemJobsJobIdCancelPostResponses, CartItemSchema, ClearCartRestoresCartClearPostData, ClearCartRestoresCartClearPostResponses, ClientOptions, DashboardStatsSchema, DeleteMediaInventoryMediaMediaIdDeleteData, DeleteMediaInventoryMediaMediaIdDeleteError, DeleteMediaInventoryMediaMediaIdDeleteErrors, DeleteMediaInventoryMediaMediaIdDeleteResponses, DirectoryCartRequest, FileVersionSchema, GetDashboardStatsSystemDashboardStatsGetData, GetDashboardStatsSystemDashboardStatsGetResponse, GetDashboardStatsSystemDashboardStatsGetResponses, GetIndexTreeInventoryTreeGetData, GetIndexTreeInventoryTreeGetError, GetIndexTreeInventoryTreeGetErrors, GetIndexTreeInventoryTreeGetResponse, GetIndexTreeInventoryTreeGetResponses, GetItemMetadataInventoryMetadataGetData, GetItemMetadataInventoryMetadataGetError, GetItemMetadataInventoryMetadataGetErrors, GetItemMetadataInventoryMetadataGetResponse, GetItemMetadataInventoryMetadataGetResponses, GetManifestRestoresManifestGetData, GetManifestRestoresManifestGetResponse, GetManifestRestoresManifestGetResponses, GetScanStatusSystemScanStatusGetData, GetScanStatusSystemScanStatusGetResponse, GetScanStatusSystemScanStatusGetResponses, GetSettingsSystemSettingsGetData, GetSettingsSystemSettingsGetResponse, GetSettingsSystemSettingsGetResponses, GetTreeSystemTreeGetData, GetTreeSystemTreeGetError, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponses, HttpValidationError, ItemMetadataSchema, JobSchema, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ListCartRestoresCartGetData, ListCartRestoresCartGetResponse, ListCartRestoresCartGetResponses, ListInventoryInventoryGetData, ListInventoryInventoryGetResponses, ListJobsSystemJobsGetData, ListJobsSystemJobsGetError, ListJobsSystemJobsGetErrors, ListJobsSystemJobsGetResponse, ListJobsSystemJobsGetResponses, ListMediaInventoryMediaGetData, ListMediaInventoryMediaGetResponse, ListMediaInventoryMediaGetResponses, ManifestMediaRequirement, MediaCreateSchema, MediaSchema, MediaUpdateSchema, ReadRootGetData, ReadRootGetResponses, RegisterMediaInventoryMediaPostData, RegisterMediaInventoryMediaPostError, RegisterMediaInventoryMediaPostErrors, RegisterMediaInventoryMediaPostResponse, RegisterMediaInventoryMediaPostResponses, RemoveFromCartRestoresCartItemIdDeleteData, RemoveFromCartRestoresCartItemIdDeleteError, RemoveFromCartRestoresCartItemIdDeleteErrors, RemoveFromCartRestoresCartItemIdDeleteResponses, RestoreManifestSchema, ScanStatusSchema, SettingSchema, StreamJobsSystemJobsStreamGetData, StreamJobsSystemJobsStreamGetResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostError, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TreeNodeSchema, TriggerBackupBackupsTriggerMediaIdPostData, TriggerBackupBackupsTriggerMediaIdPostError, TriggerBackupBackupsTriggerMediaIdPostErrors, TriggerBackupBackupsTriggerMediaIdPostResponses, TriggerScanSystemScanPostData, TriggerScanSystemScanPostResponses, UpdateMediaInventoryMediaMediaIdPatchData, UpdateMediaInventoryMediaMediaIdPatchError, UpdateMediaInventoryMediaMediaIdPatchErrors, UpdateMediaInventoryMediaMediaIdPatchResponse, UpdateMediaInventoryMediaMediaIdPatchResponses, UpdateSettingSystemSettingsPostData, UpdateSettingSystemSettingsPostError, UpdateSettingSystemSettingsPostErrors, UpdateSettingSystemSettingsPostResponses, ValidationError } from './types.gen'; diff --git a/frontend/src/lib/api/sdk.gen.ts b/frontend/src/lib/api/sdk.gen.ts index 4e09fa0..6ccc879 100644 --- a/frontend/src/lib/api/sdk.gen.ts +++ b/frontend/src/lib/api/sdk.gen.ts @@ -2,7 +2,7 @@ import type { Client, Options as Options2, TDataShape } from './client'; import { client } from './client.gen'; -import type { BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponses, GetTreeSystemTreeGetData, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponses, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ReadRootGetData, ReadRootGetResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TrackPathSystemTrackPostData, TrackPathSystemTrackPostErrors, TrackPathSystemTrackPostResponses } from './types.gen'; +import type { AddDirectoryToCartRestoresCartDirectoryPostData, AddDirectoryToCartRestoresCartDirectoryPostErrors, AddDirectoryToCartRestoresCartDirectoryPostResponses, AddToCartRestoresCartFileIdPostData, AddToCartRestoresCartFileIdPostErrors, AddToCartRestoresCartFileIdPostResponses, BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponses, CancelJobSystemJobsJobIdCancelPostData, CancelJobSystemJobsJobIdCancelPostErrors, CancelJobSystemJobsJobIdCancelPostResponses, ClearCartRestoresCartClearPostData, ClearCartRestoresCartClearPostResponses, DeleteMediaInventoryMediaMediaIdDeleteData, DeleteMediaInventoryMediaMediaIdDeleteErrors, DeleteMediaInventoryMediaMediaIdDeleteResponses, GetDashboardStatsSystemDashboardStatsGetData, GetDashboardStatsSystemDashboardStatsGetResponses, GetIndexTreeInventoryTreeGetData, GetIndexTreeInventoryTreeGetErrors, GetIndexTreeInventoryTreeGetResponses, GetItemMetadataInventoryMetadataGetData, GetItemMetadataInventoryMetadataGetErrors, GetItemMetadataInventoryMetadataGetResponses, GetManifestRestoresManifestGetData, GetManifestRestoresManifestGetResponses, GetScanStatusSystemScanStatusGetData, GetScanStatusSystemScanStatusGetResponses, GetSettingsSystemSettingsGetData, GetSettingsSystemSettingsGetResponses, GetTreeSystemTreeGetData, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponses, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ListCartRestoresCartGetData, ListCartRestoresCartGetResponses, ListInventoryInventoryGetData, ListInventoryInventoryGetResponses, ListJobsSystemJobsGetData, ListJobsSystemJobsGetErrors, ListJobsSystemJobsGetResponses, ListMediaInventoryMediaGetData, ListMediaInventoryMediaGetResponses, ReadRootGetData, ReadRootGetResponses, RegisterMediaInventoryMediaPostData, RegisterMediaInventoryMediaPostErrors, RegisterMediaInventoryMediaPostResponses, RemoveFromCartRestoresCartItemIdDeleteData, RemoveFromCartRestoresCartItemIdDeleteErrors, RemoveFromCartRestoresCartItemIdDeleteResponses, StreamJobsSystemJobsStreamGetData, StreamJobsSystemJobsStreamGetResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TriggerBackupBackupsTriggerMediaIdPostData, TriggerBackupBackupsTriggerMediaIdPostErrors, TriggerBackupBackupsTriggerMediaIdPostResponses, TriggerScanSystemScanPostData, TriggerScanSystemScanPostResponses, UpdateMediaInventoryMediaMediaIdPatchData, UpdateMediaInventoryMediaMediaIdPatchErrors, UpdateMediaInventoryMediaMediaIdPatchResponses, UpdateSettingSystemSettingsPostData, UpdateSettingSystemSettingsPostErrors, UpdateSettingSystemSettingsPostResponses } from './types.gen'; export type Options = Options2 & { /** @@ -18,23 +18,41 @@ export type Options; }; +/** + * Get Dashboard Stats + */ +export const getDashboardStatsSystemDashboardStatsGet = (options?: Options) => (options?.client ?? client).get({ url: '/system/dashboard/stats', ...options }); + +/** + * List Jobs + */ +export const listJobsSystemJobsGet = (options?: Options) => (options?.client ?? client).get({ url: '/system/jobs', ...options }); + +/** + * Cancel Job + */ +export const cancelJobSystemJobsJobIdCancelPost = (options: Options) => (options.client ?? client).post({ url: '/system/jobs/{job_id}/cancel', ...options }); + +/** + * Stream Jobs + */ +export const streamJobsSystemJobsStreamGet = (options?: Options) => (options?.client ?? client).get({ url: '/system/jobs/stream', ...options }); + +/** + * Trigger Scan + */ +export const triggerScanSystemScanPost = (options?: Options) => (options?.client ?? client).post({ url: '/system/scan', ...options }); + +/** + * Get Scan Status + */ +export const getScanStatusSystemScanStatusGet = (options?: Options) => (options?.client ?? client).get({ url: '/system/scan/status', ...options }); + /** * Browse Path */ export const browsePathSystemBrowseGet = (options?: Options) => (options?.client ?? client).get({ url: '/system/browse', ...options }); -/** - * Track Path - */ -export const trackPathSystemTrackPost = (options: Options) => (options.client ?? client).post({ - url: '/system/track', - ...options, - headers: { - 'Content-Type': 'application/json', - ...options.headers - } -}); - /** * Track Batch */ @@ -47,21 +65,129 @@ export const trackBatchSystemTrackBatchPost = (options?: Options) => (options?.client ?? client).get({ url: '/system/settings', ...options }); + +/** + * Update Setting + */ +export const updateSettingSystemSettingsPost = (options: Options) => (options.client ?? client).post({ + url: '/system/settings', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } +}); + /** * Get Tree */ export const getTreeSystemTreeGet = (options?: Options) => (options?.client ?? client).get({ url: '/system/tree', ...options }); +/** + * List Media + */ +export const listMediaInventoryMediaGet = (options?: Options) => (options?.client ?? client).get({ url: '/inventory/media', ...options }); + +/** + * Register Media + */ +export const registerMediaInventoryMediaPost = (options: Options) => (options.client ?? client).post({ + url: '/inventory/media', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } +}); + +/** + * Delete Media + */ +export const deleteMediaInventoryMediaMediaIdDelete = (options: Options) => (options.client ?? client).delete({ url: '/inventory/media/{media_id}', ...options }); + +/** + * Update Media + */ +export const updateMediaInventoryMediaMediaIdPatch = (options: Options) => (options.client ?? client).patch({ + url: '/inventory/media/{media_id}', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } +}); + /** * Browse Index */ export const browseIndexInventoryBrowseGet = (options?: Options) => (options?.client ?? client).get({ url: '/inventory/browse', ...options }); +/** + * Get Index Tree + */ +export const getIndexTreeInventoryTreeGet = (options?: Options) => (options?.client ?? client).get({ url: '/inventory/tree', ...options }); + +/** + * Get Item Metadata + */ +export const getItemMetadataInventoryMetadataGet = (options: Options) => (options.client ?? client).get({ url: '/inventory/metadata', ...options }); + +/** + * List Inventory + */ +export const listInventoryInventoryGet = (options?: Options) => (options?.client ?? client).get({ url: '/inventory/', ...options }); + +/** + * Trigger Backup + */ +export const triggerBackupBackupsTriggerMediaIdPost = (options: Options) => (options.client ?? client).post({ url: '/backups/trigger/{media_id}', ...options }); + /** * List Backups */ export const listBackupsBackupsGet = (options?: Options) => (options?.client ?? client).get({ url: '/backups/', ...options }); +/** + * List Cart + */ +export const listCartRestoresCartGet = (options?: Options) => (options?.client ?? client).get({ url: '/restores/cart', ...options }); + +/** + * Add To Cart + */ +export const addToCartRestoresCartFileIdPost = (options: Options) => (options.client ?? client).post({ url: '/restores/cart/{file_id}', ...options }); + +/** + * Add Directory To Cart + */ +export const addDirectoryToCartRestoresCartDirectoryPost = (options: Options) => (options.client ?? client).post({ + url: '/restores/cart/directory', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } +}); + +/** + * Remove From Cart + */ +export const removeFromCartRestoresCartItemIdDelete = (options: Options) => (options.client ?? client).delete({ url: '/restores/cart/{item_id}', ...options }); + +/** + * Clear Cart + */ +export const clearCartRestoresCartClearPost = (options?: Options) => (options?.client ?? client).post({ url: '/restores/cart/clear', ...options }); + +/** + * Get Manifest + */ +export const getManifestRestoresManifestGet = (options?: Options) => (options?.client ?? client).get({ url: '/restores/manifest', ...options }); + /** * Read Root */ diff --git a/frontend/src/lib/api/types.gen.ts b/frontend/src/lib/api/types.gen.ts index 1aab7e1..831bc44 100644 --- a/frontend/src/lib/api/types.gen.ts +++ b/frontend/src/lib/api/types.gen.ts @@ -18,6 +18,108 @@ export type BatchTrackRequest = { untracks?: Array; }; +/** + * CartItemSchema + */ +export type CartItemSchema = { + /** + * Id + */ + id: number; + /** + * File Path + */ + file_path: string; + /** + * Size + */ + size: number; + /** + * Media Identifiers + */ + media_identifiers: Array; +}; + +/** + * DashboardStatsSchema + */ +export type DashboardStatsSchema = { + /** + * Total Files Indexed + */ + total_files_indexed: number; + /** + * Total Data Size + */ + total_data_size: number; + /** + * Tracked Paths Count + */ + tracked_paths_count: number; + /** + * Unprotected Files Count + */ + unprotected_files_count: number; + /** + * Unprotected Data Size + */ + unprotected_data_size: number; + /** + * Ignored Files Count + */ + ignored_files_count: number; + /** + * Ignored Data Size + */ + ignored_data_size: number; + /** + * Redundancy Ratio + */ + redundancy_ratio: number; + /** + * Last Scan Time + */ + last_scan_time: string | null; + /** + * Media Distribution + */ + media_distribution: { + [key: string]: number; + }; +}; + +/** + * DirectoryCartRequest + */ +export type DirectoryCartRequest = { + /** + * Path + */ + path: string; +}; + +/** + * FileVersionSchema + */ +export type FileVersionSchema = { + /** + * Media Identifier + */ + media_identifier: string; + /** + * Media Type + */ + media_type: string; + /** + * File Number + */ + file_number: string; + /** + * Timestamp + */ + timestamp: string; +}; + /** * HTTPValidationError */ @@ -29,17 +131,267 @@ export type HttpValidationError = { }; /** - * TrackToggleRequest + * ItemMetadataSchema */ -export type TrackToggleRequest = { +export type ItemMetadataSchema = { /** - * Path + * Id */ - path: string; + id?: number | null; /** - * Is Directory + * File Path */ - is_directory?: boolean; + file_path: string; + /** + * Type + */ + type: string; + /** + * Size + */ + size: number; + /** + * Mtime + */ + mtime: number; + /** + * Last Seen Timestamp + */ + last_seen_timestamp: string; + /** + * Sha256 Hash + */ + sha256_hash?: string | null; + /** + * Versions + */ + versions?: Array; + /** + * Child Count + */ + child_count?: number | null; +}; + +/** + * JobSchema + */ +export type JobSchema = { + /** + * Id + */ + id: number; + /** + * Job Type + */ + job_type: string; + /** + * Status + */ + status: string; + /** + * Progress + */ + progress: number; + /** + * Current Task + */ + current_task: string | null; + /** + * Error Message + */ + error_message: string | null; + /** + * Started At + */ + started_at: string | null; + /** + * Completed At + */ + completed_at: string | null; + /** + * Created At + */ + created_at: string; +}; + +/** + * ManifestMediaRequirement + */ +export type ManifestMediaRequirement = { + /** + * Identifier + */ + identifier: string; + /** + * Media Type + */ + media_type: string; + /** + * File Count + */ + file_count: number; + /** + * Total Size + */ + total_size: number; +}; + +/** + * MediaCreateSchema + */ +export type MediaCreateSchema = { + /** + * Media Type + */ + media_type: string; + /** + * Identifier + */ + identifier: string; + /** + * Generation Tier + */ + generation_tier?: string | null; + /** + * Capacity + */ + capacity: number; + /** + * Location + */ + location?: string | null; + /** + * Config + */ + config?: { + [key: string]: unknown; + }; +}; + +/** + * MediaSchema + */ +export type MediaSchema = { + /** + * Id + */ + id: number; + /** + * Media Type + */ + media_type: string; + /** + * Identifier + */ + identifier: string; + /** + * Generation Tier + */ + generation_tier: string | null; + /** + * Capacity + */ + capacity: number; + /** + * Bytes Used + */ + bytes_used: number; + /** + * Location + */ + location: string | null; + /** + * Status + */ + status: string; + /** + * Config + */ + config: { + [key: string]: unknown; + }; +}; + +/** + * MediaUpdateSchema + */ +export type MediaUpdateSchema = { + /** + * Status + */ + status?: string | null; + /** + * Location + */ + location?: string | null; + /** + * Config + */ + config?: { + [key: string]: unknown; + } | null; +}; + +/** + * RestoreManifestSchema + */ +export type RestoreManifestSchema = { + /** + * Total Files + */ + total_files: number; + /** + * Total Size + */ + total_size: number; + /** + * Media Required + */ + media_required: Array; +}; + +/** + * ScanStatusSchema + */ +export type ScanStatusSchema = { + /** + * Is Running + */ + is_running: boolean; + /** + * Files Processed + */ + files_processed: number; + /** + * Files Hashed + */ + files_hashed: number; + /** + * Total Files Found + */ + total_files_found: number; + /** + * Current Path + */ + current_path: string; + /** + * Last Run Time + */ + last_run_time?: string | null; +}; + +/** + * SettingSchema + */ +export type SettingSchema = { + /** + * Key + */ + key: string; + /** + * Value + */ + value: string; }; /** @@ -146,8 +498,132 @@ export type AppApiSystemFileItemSchema = { * Tracked */ tracked?: boolean; + /** + * Ignored + */ + ignored?: boolean; }; +export type GetDashboardStatsSystemDashboardStatsGetData = { + body?: never; + path?: never; + query?: never; + url: '/system/dashboard/stats'; +}; + +export type GetDashboardStatsSystemDashboardStatsGetResponses = { + /** + * Successful Response + */ + 200: DashboardStatsSchema; +}; + +export type GetDashboardStatsSystemDashboardStatsGetResponse = GetDashboardStatsSystemDashboardStatsGetResponses[keyof GetDashboardStatsSystemDashboardStatsGetResponses]; + +export type ListJobsSystemJobsGetData = { + body?: never; + path?: never; + query?: { + /** + * Limit + */ + limit?: number; + }; + url: '/system/jobs'; +}; + +export type ListJobsSystemJobsGetErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type ListJobsSystemJobsGetError = ListJobsSystemJobsGetErrors[keyof ListJobsSystemJobsGetErrors]; + +export type ListJobsSystemJobsGetResponses = { + /** + * Response List Jobs System Jobs Get + * + * Successful Response + */ + 200: Array; +}; + +export type ListJobsSystemJobsGetResponse = ListJobsSystemJobsGetResponses[keyof ListJobsSystemJobsGetResponses]; + +export type CancelJobSystemJobsJobIdCancelPostData = { + body?: never; + path: { + /** + * Job Id + */ + job_id: number; + }; + query?: never; + url: '/system/jobs/{job_id}/cancel'; +}; + +export type CancelJobSystemJobsJobIdCancelPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type CancelJobSystemJobsJobIdCancelPostError = CancelJobSystemJobsJobIdCancelPostErrors[keyof CancelJobSystemJobsJobIdCancelPostErrors]; + +export type CancelJobSystemJobsJobIdCancelPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type StreamJobsSystemJobsStreamGetData = { + body?: never; + path?: never; + query?: never; + url: '/system/jobs/stream'; +}; + +export type StreamJobsSystemJobsStreamGetResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type TriggerScanSystemScanPostData = { + body?: never; + path?: never; + query?: never; + url: '/system/scan'; +}; + +export type TriggerScanSystemScanPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type GetScanStatusSystemScanStatusGetData = { + body?: never; + path?: never; + query?: never; + url: '/system/scan/status'; +}; + +export type GetScanStatusSystemScanStatusGetResponses = { + /** + * Successful Response + */ + 200: ScanStatusSchema; +}; + +export type GetScanStatusSystemScanStatusGetResponse = GetScanStatusSystemScanStatusGetResponses[keyof GetScanStatusSystemScanStatusGetResponses]; + export type BrowsePathSystemBrowseGetData = { body?: never; path?: never; @@ -155,7 +631,7 @@ export type BrowsePathSystemBrowseGetData = { /** * Path */ - path?: string; + path?: string | null; }; url: '/system/browse'; }; @@ -180,29 +656,6 @@ export type BrowsePathSystemBrowseGetResponses = { export type BrowsePathSystemBrowseGetResponse = BrowsePathSystemBrowseGetResponses[keyof BrowsePathSystemBrowseGetResponses]; -export type TrackPathSystemTrackPostData = { - body: TrackToggleRequest; - path?: never; - query?: never; - url: '/system/track'; -}; - -export type TrackPathSystemTrackPostErrors = { - /** - * Validation Error - */ - 422: HttpValidationError; -}; - -export type TrackPathSystemTrackPostError = TrackPathSystemTrackPostErrors[keyof TrackPathSystemTrackPostErrors]; - -export type TrackPathSystemTrackPostResponses = { - /** - * Successful Response - */ - 200: unknown; -}; - export type TrackBatchSystemTrackBatchPostData = { body: BatchTrackRequest; path?: never; @@ -226,6 +679,49 @@ export type TrackBatchSystemTrackBatchPostResponses = { 200: unknown; }; +export type GetSettingsSystemSettingsGetData = { + body?: never; + path?: never; + query?: never; + url: '/system/settings'; +}; + +export type GetSettingsSystemSettingsGetResponses = { + /** + * Response Get Settings System Settings Get + * + * Successful Response + */ + 200: { + [key: string]: string; + }; +}; + +export type GetSettingsSystemSettingsGetResponse = GetSettingsSystemSettingsGetResponses[keyof GetSettingsSystemSettingsGetResponses]; + +export type UpdateSettingSystemSettingsPostData = { + body: SettingSchema; + path?: never; + query?: never; + url: '/system/settings'; +}; + +export type UpdateSettingSystemSettingsPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type UpdateSettingSystemSettingsPostError = UpdateSettingSystemSettingsPostErrors[keyof UpdateSettingSystemSettingsPostErrors]; + +export type UpdateSettingSystemSettingsPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + export type GetTreeSystemTreeGetData = { body?: never; path?: never; @@ -233,7 +729,7 @@ export type GetTreeSystemTreeGetData = { /** * Path */ - path?: string; + path?: string | null; }; url: '/system/tree'; }; @@ -249,14 +745,111 @@ export type GetTreeSystemTreeGetError = GetTreeSystemTreeGetErrors[keyof GetTree export type GetTreeSystemTreeGetResponses = { /** - * Response Get Tree System Tree Get + * Successful Response + */ + 200: unknown; +}; + +export type ListMediaInventoryMediaGetData = { + body?: never; + path?: never; + query?: never; + url: '/inventory/media'; +}; + +export type ListMediaInventoryMediaGetResponses = { + /** + * Response List Media Inventory Media Get * * Successful Response */ - 200: Array; + 200: Array; }; -export type GetTreeSystemTreeGetResponse = GetTreeSystemTreeGetResponses[keyof GetTreeSystemTreeGetResponses]; +export type ListMediaInventoryMediaGetResponse = ListMediaInventoryMediaGetResponses[keyof ListMediaInventoryMediaGetResponses]; + +export type RegisterMediaInventoryMediaPostData = { + body: MediaCreateSchema; + path?: never; + query?: never; + url: '/inventory/media'; +}; + +export type RegisterMediaInventoryMediaPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type RegisterMediaInventoryMediaPostError = RegisterMediaInventoryMediaPostErrors[keyof RegisterMediaInventoryMediaPostErrors]; + +export type RegisterMediaInventoryMediaPostResponses = { + /** + * Successful Response + */ + 200: MediaSchema; +}; + +export type RegisterMediaInventoryMediaPostResponse = RegisterMediaInventoryMediaPostResponses[keyof RegisterMediaInventoryMediaPostResponses]; + +export type DeleteMediaInventoryMediaMediaIdDeleteData = { + body?: never; + path: { + /** + * Media Id + */ + media_id: number; + }; + query?: never; + url: '/inventory/media/{media_id}'; +}; + +export type DeleteMediaInventoryMediaMediaIdDeleteErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type DeleteMediaInventoryMediaMediaIdDeleteError = DeleteMediaInventoryMediaMediaIdDeleteErrors[keyof DeleteMediaInventoryMediaMediaIdDeleteErrors]; + +export type DeleteMediaInventoryMediaMediaIdDeleteResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type UpdateMediaInventoryMediaMediaIdPatchData = { + body: MediaUpdateSchema; + path: { + /** + * Media Id + */ + media_id: number; + }; + query?: never; + url: '/inventory/media/{media_id}'; +}; + +export type UpdateMediaInventoryMediaMediaIdPatchErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type UpdateMediaInventoryMediaMediaIdPatchError = UpdateMediaInventoryMediaMediaIdPatchErrors[keyof UpdateMediaInventoryMediaMediaIdPatchErrors]; + +export type UpdateMediaInventoryMediaMediaIdPatchResponses = { + /** + * Successful Response + */ + 200: MediaSchema; +}; + +export type UpdateMediaInventoryMediaMediaIdPatchResponse = UpdateMediaInventoryMediaMediaIdPatchResponses[keyof UpdateMediaInventoryMediaMediaIdPatchResponses]; export type BrowseIndexInventoryBrowseGetData = { body?: never; @@ -265,7 +858,11 @@ export type BrowseIndexInventoryBrowseGetData = { /** * Path */ - path?: string; + path?: string | null; + /** + * Include Ignored + */ + include_ignored?: boolean; }; url: '/inventory/browse'; }; @@ -290,6 +887,114 @@ export type BrowseIndexInventoryBrowseGetResponses = { export type BrowseIndexInventoryBrowseGetResponse = BrowseIndexInventoryBrowseGetResponses[keyof BrowseIndexInventoryBrowseGetResponses]; +export type GetIndexTreeInventoryTreeGetData = { + body?: never; + path?: never; + query?: { + /** + * Path + */ + path?: string | null; + /** + * Include Ignored + */ + include_ignored?: boolean; + }; + url: '/inventory/tree'; +}; + +export type GetIndexTreeInventoryTreeGetErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type GetIndexTreeInventoryTreeGetError = GetIndexTreeInventoryTreeGetErrors[keyof GetIndexTreeInventoryTreeGetErrors]; + +export type GetIndexTreeInventoryTreeGetResponses = { + /** + * Response Get Index Tree Inventory Tree Get + * + * Successful Response + */ + 200: Array; +}; + +export type GetIndexTreeInventoryTreeGetResponse = GetIndexTreeInventoryTreeGetResponses[keyof GetIndexTreeInventoryTreeGetResponses]; + +export type GetItemMetadataInventoryMetadataGetData = { + body?: never; + path?: never; + query: { + /** + * Path + */ + path: string; + }; + url: '/inventory/metadata'; +}; + +export type GetItemMetadataInventoryMetadataGetErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type GetItemMetadataInventoryMetadataGetError = GetItemMetadataInventoryMetadataGetErrors[keyof GetItemMetadataInventoryMetadataGetErrors]; + +export type GetItemMetadataInventoryMetadataGetResponses = { + /** + * Successful Response + */ + 200: ItemMetadataSchema; +}; + +export type GetItemMetadataInventoryMetadataGetResponse = GetItemMetadataInventoryMetadataGetResponses[keyof GetItemMetadataInventoryMetadataGetResponses]; + +export type ListInventoryInventoryGetData = { + body?: never; + path?: never; + query?: never; + url: '/inventory/'; +}; + +export type ListInventoryInventoryGetResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type TriggerBackupBackupsTriggerMediaIdPostData = { + body?: never; + path: { + /** + * Media Id + */ + media_id: number; + }; + query?: never; + url: '/backups/trigger/{media_id}'; +}; + +export type TriggerBackupBackupsTriggerMediaIdPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type TriggerBackupBackupsTriggerMediaIdPostError = TriggerBackupBackupsTriggerMediaIdPostErrors[keyof TriggerBackupBackupsTriggerMediaIdPostErrors]; + +export type TriggerBackupBackupsTriggerMediaIdPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + export type ListBackupsBackupsGetData = { body?: never; path?: never; @@ -304,6 +1009,133 @@ export type ListBackupsBackupsGetResponses = { 200: unknown; }; +export type ListCartRestoresCartGetData = { + body?: never; + path?: never; + query?: never; + url: '/restores/cart'; +}; + +export type ListCartRestoresCartGetResponses = { + /** + * Response List Cart Restores Cart Get + * + * Successful Response + */ + 200: Array; +}; + +export type ListCartRestoresCartGetResponse = ListCartRestoresCartGetResponses[keyof ListCartRestoresCartGetResponses]; + +export type AddToCartRestoresCartFileIdPostData = { + body?: never; + path: { + /** + * File Id + */ + file_id: number; + }; + query?: never; + url: '/restores/cart/{file_id}'; +}; + +export type AddToCartRestoresCartFileIdPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type AddToCartRestoresCartFileIdPostError = AddToCartRestoresCartFileIdPostErrors[keyof AddToCartRestoresCartFileIdPostErrors]; + +export type AddToCartRestoresCartFileIdPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type AddDirectoryToCartRestoresCartDirectoryPostData = { + body: DirectoryCartRequest; + path?: never; + query?: never; + url: '/restores/cart/directory'; +}; + +export type AddDirectoryToCartRestoresCartDirectoryPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type AddDirectoryToCartRestoresCartDirectoryPostError = AddDirectoryToCartRestoresCartDirectoryPostErrors[keyof AddDirectoryToCartRestoresCartDirectoryPostErrors]; + +export type AddDirectoryToCartRestoresCartDirectoryPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type RemoveFromCartRestoresCartItemIdDeleteData = { + body?: never; + path: { + /** + * Item Id + */ + item_id: number; + }; + query?: never; + url: '/restores/cart/{item_id}'; +}; + +export type RemoveFromCartRestoresCartItemIdDeleteErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type RemoveFromCartRestoresCartItemIdDeleteError = RemoveFromCartRestoresCartItemIdDeleteErrors[keyof RemoveFromCartRestoresCartItemIdDeleteErrors]; + +export type RemoveFromCartRestoresCartItemIdDeleteResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type ClearCartRestoresCartClearPostData = { + body?: never; + path?: never; + query?: never; + url: '/restores/cart/clear'; +}; + +export type ClearCartRestoresCartClearPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type GetManifestRestoresManifestGetData = { + body?: never; + path?: never; + query?: never; + url: '/restores/manifest'; +}; + +export type GetManifestRestoresManifestGetResponses = { + /** + * Successful Response + */ + 200: RestoreManifestSchema; +}; + +export type GetManifestRestoresManifestGetResponse = GetManifestRestoresManifestGetResponses[keyof GetManifestRestoresManifestGetResponses]; + export type ReadRootGetData = { body?: never; path?: never; diff --git a/frontend/src/lib/components/ScanStatusOverlay.svelte b/frontend/src/lib/components/ScanStatusOverlay.svelte new file mode 100644 index 0000000..e92405a --- /dev/null +++ b/frontend/src/lib/components/ScanStatusOverlay.svelte @@ -0,0 +1,88 @@ + + +{#if scanStatus?.is_running} +
+
+ +
+
+
+ +
+
+
+ System Scanner Active + {scanProgress}% +
+
+
+

Optimizing Unified Index

+
+
+
+ +
+
+
+
+ +
+
+ + + Throughput + + + {scanStatus.files_processed.toLocaleString()} / {scanStatus.total_files_found.toLocaleString()} ITEMS + +
+ +
+

+ {scanStatus.current_path || 'Initializing crawler...'} +

+
+
+
+
+
+{/if} diff --git a/frontend/src/lib/components/file-browser/FileBrowser.svelte b/frontend/src/lib/components/file-browser/FileBrowser.svelte index b0f0304..e59d208 100644 --- a/frontend/src/lib/components/file-browser/FileBrowser.svelte +++ b/frontend/src/lib/components/file-browser/FileBrowser.svelte @@ -6,20 +6,8 @@ RotateCw, Search, Folder, - Home, ArrowUpDown, - MoreHorizontal, - Plus, - Scissors, - Copy, - Clipboard, - Trash2, - Type, - LayoutGrid, CheckSquare, - HardDrive, - ShieldCheck, - ShieldAlert, Square } from "lucide-svelte"; import { Button } from "$lib/components/ui/button"; @@ -28,20 +16,22 @@ import { Input } from "$lib/components/ui/input"; import FileBrowserTreeItem from "./FileBrowserTreeItem.svelte"; import FileBrowserRowItem from "./FileBrowserRowItem.svelte"; - import type { FileItem, Breadcrumb, TreeNode } from "$lib/types"; + import type { FileItem, Breadcrumb } from "$lib/types"; import { cn } from "$lib/utils"; let { - currentPath = $bindable("/source_data"), + currentPath = $bindable("ROOT"), files = [], onNavigate = (path: string) => {}, onToggleTrack = (item: FileItem) => {}, + onSelect = (item: FileItem) => {}, mode = "host" } = $props<{ currentPath: string; files: FileItem[]; onNavigate?: (path: string) => void; onToggleTrack?: (item: FileItem) => void; + onSelect?: (item: FileItem) => void; mode?: "host" | "index"; }>(); @@ -97,16 +87,16 @@ // --- Navigation Tree Definition --- const sourceDataRoot = $derived({ - name: "Source Data", - path: "/source_data", + name: "All Sources", + path: "ROOT", expanded: true, children: [], hasChildren: true }); const virtualIndexRoot = $derived({ - name: "Virtual Index", - path: "/", + name: "Index Browser", + path: "ROOT", expanded: true, children: [], hasChildren: true @@ -117,37 +107,32 @@ // --- Logic --- const breadcrumbs = $derived.by(() => { + if (currentPath === "ROOT") { + return [{ name: mode === "host" ? "All Sources" : "Index Browser", path: "ROOT" }]; + } + const parts = currentPath.split("/").filter(Boolean); const crumbs: Breadcrumb[] = []; - if (mode === "host") { - crumbs.push({ name: "Source Data", path: "/source_data" }); - let current = "/source_data"; - const subParts = parts[0] === "source_data" ? parts.slice(1) : parts; - for (const part of subParts) { - current += `/${part}`; - crumbs.push({ name: part, path: current }); - } - } else { - crumbs.push({ name: "Virtual Index", path: "/" }); - let current = ""; - for (const part of parts) { - current += `/${part}`; - crumbs.push({ name: part, path: current }); - } + crumbs.push({ name: mode === "host" ? "All Sources" : "Index Browser", path: "ROOT" }); + + let current = ""; + for (const part of parts) { + current += `/${part}`; + crumbs.push({ name: part, path: current }); } return crumbs; }); const filteredFiles = $derived.by(() => { - let result = files.filter((f) => f.name.toLowerCase().includes(searchQuery.toLowerCase())); + let result = files.filter((f: FileItem) => f.name.toLowerCase().includes(searchQuery.toLowerCase())); - result.sort((a, b) => { + result.sort((a: FileItem, b: FileItem) => { const valA = sortColumn === "type" ? a.type : a[sortColumn as keyof FileItem] || 0; const valB = sortColumn === "type" ? b.type : b[sortColumn as keyof FileItem] || 0; - if (valA < valB) return sortDirection === "asc" ? -1 : 1; - if (valA > valB) return sortDirection === "asc" ? 1 : -1; + if (valA < (valB as any)) return sortDirection === "asc" ? -1 : 1; + if (valA > (valB as any)) return sortDirection === "asc" ? 1 : -1; return 0; }); @@ -165,8 +150,8 @@ function handleRowClick(e: MouseEvent, item: FileItem) { if (e.shiftKey && lastSelectedPath) { - const lastIndex = filteredFiles.findIndex(f => f.path === lastSelectedPath); - const currentIndex = filteredFiles.findIndex(f => f.path === item.path); + const lastIndex = filteredFiles.findIndex((f: FileItem) => f.path === lastSelectedPath); + const currentIndex = filteredFiles.findIndex((f: FileItem) => f.path === item.path); const start = Math.min(lastIndex, currentIndex); const end = Math.max(lastIndex, currentIndex); @@ -188,6 +173,9 @@ selectedPaths = new Set([item.path]); lastSelectedPath = item.path; } + + // Signal selection to parent + onSelect(item); } function handleRowDoubleClick(item: FileItem) { @@ -198,32 +186,32 @@ } } - function handleSelectAll(checked: boolean) { - if (checked) { - selectedPaths = new Set(filteredFiles.map(f => f.path)); + function handleSelectAll(checked: boolean | "indeterminate") { + if (checked === true) { + selectedPaths = new Set(filteredFiles.map((f: FileItem) => f.path)); } else { selectedPaths = new Set(); } } function bulkToggle(track: boolean) { - const selectedItems = files.filter(f => selectedPaths.has(f.path) && f.tracked !== track); - selectedItems.forEach(item => onToggleTrack(item)); + const selectedItems = files.filter((f: FileItem) => selectedPaths.has(f.path) && f.tracked !== track); + selectedItems.forEach((item: FileItem) => onToggleTrack(item)); }
-
+
-
- -
-
+
{#each breadcrumbs as crumb, i} @@ -260,7 +251,7 @@ {/each}
-
@@ -268,7 +259,7 @@
-
+
-
- -
-
- 0} - onCheckedChange={handleSelectAll} - /> -
+
+
+
+ +
+
+ 0} + onCheckedChange={handleSelectAll} + /> +
-
- - -
-
-
+
+ +
+
+
-
- - -
-
startResize(e, 'mtime')} - role="none" - >
-
+
+ +
+
startResize(e, 'mtime')} + role="none" + >
+
-
- - -
-
startResize(e, 'type')} - role="none" - >
-
+
+ +
+
startResize(e, 'type')} + role="none" + >
+
-
- - -
-
startResize(e, 'size')} - role="none" - >
-
+
+ +
+
startResize(e, 'size')} + role="none" + >
+
-
-
- - - - {#if filteredFiles.length === 0} -
- -

Folder is empty

+
- {:else} - {#each filteredFiles as item} - handleRowClick(e, item)} - onDoubleClick={() => handleRowDoubleClick(item)} - onToggleTrack={() => onToggleTrack(item)} - /> - {/each} - {/if} -
+ + +
+ {#if filteredFiles.length === 0} +
+ +

Folder is empty

+
+ {:else} + {#each filteredFiles as item (item.path)} + handleRowClick(e, item)} + onDoubleClick={() => handleRowDoubleClick(item)} + onToggleTrack={() => onToggleTrack(item)} + /> + {/each} + {/if} +
+
+
@@ -425,9 +416,9 @@ {#if mode === 'host'} - {files.filter((f) => f.tracked).length} Tracked + {files.filter((f: FileItem) => f.tracked).length} Tracked {:else} - {files.filter((f) => f.selected).length} Selected + {files.filter((f: FileItem) => f.selected).length} Selected {/if}
diff --git a/frontend/src/lib/components/file-browser/FileBrowserRowItem.svelte b/frontend/src/lib/components/file-browser/FileBrowserRowItem.svelte index d041fbb..96e9537 100644 --- a/frontend/src/lib/components/file-browser/FileBrowserRowItem.svelte +++ b/frontend/src/lib/components/file-browser/FileBrowserRowItem.svelte @@ -12,7 +12,8 @@ CassetteTape, ShieldCheck, ShieldAlert, - Square + Square, + EyeOff } from "lucide-svelte"; import { Checkbox } from "$lib/components/ui/checkbox"; import { Button } from "$lib/components/ui/button"; @@ -108,7 +109,8 @@ "group flex h-10 items-center border-b border-border-color/10 transition-all cursor-pointer select-none", isSelected ? "bg-blue-500/15 border-l-2 border-l-blue-500" - : "hover:bg-white/5 border-l-2 border-l-transparent" + : "hover:bg-white/5 border-l-2 border-l-transparent", + item.ignored && "opacity-40 grayscale-[0.5]" )} role="button" tabindex="0" @@ -121,12 +123,16 @@ class="flex h-10 w-12 shrink-0 items-center justify-center border-r border-border-color/10" onclick={(e) => { e.stopPropagation(); - onToggleTrack(); + if (!item.ignored) onToggleTrack(); }} onkeydown={(e) => e.key === " " && e.stopPropagation()} role="none" > - {#if mode === 'host'} + {#if item.ignored} +
+ +
+ {:else if mode === 'host'} {#if item.tracked}
@@ -142,12 +148,13 @@
-
+
{#if item.type === "link"} @@ -164,17 +171,18 @@ {item.name} {#if mode === "index" && item.media && item.media.length > 0} -
+
{#each item.media as m} @@ -184,11 +192,6 @@
{/if}
- {#if item.type === "link" && item.target} - - {item.target} - - {/if}
diff --git a/frontend/src/lib/components/file-browser/FileBrowserTreeItem.svelte b/frontend/src/lib/components/file-browser/FileBrowserTreeItem.svelte index 0548053..d91927b 100644 --- a/frontend/src/lib/components/file-browser/FileBrowserTreeItem.svelte +++ b/frontend/src/lib/components/file-browser/FileBrowserTreeItem.svelte @@ -1,9 +1,9 @@ -
- -
- {@render children()}
+ + diff --git a/frontend/src/routes/+page.svelte b/frontend/src/routes/+page.svelte index 555d176..85f4bf1 100644 --- a/frontend/src/routes/+page.svelte +++ b/frontend/src/routes/+page.svelte @@ -1,9 +1,318 @@ - TapeHoard - Dashboard + Dashboard - TapeHoard -

Dashboard

-

Welcome to TapeHoard.

+
+ +
+
+

Operational Status

+

Fleet-wide data protection & index health

+
+
+ + +
+
+ + {#if loading && !stats} +
+ {#each Array(4) as _} +
+ {/each} +
+ {:else if stats} + +
+ +
+
+
+ +
+
+ Total Files + {stats.total_files_indexed.toLocaleString()} +
+
+
+ + +
+
+
+ +
+
+ Data Volume + {formatSize(stats.total_data_size)} +
+
+
+ + +
+
+
+ +
+
+ Redundancy Ratio + {stats.redundancy_ratio}x +
+
+
+ + +
+
+
+ +
+
+ Last Scan + + {stats.last_scan_time ? new Date(stats.last_scan_time).toLocaleDateString() : 'Never'} + +
+
+
+
+ + +
+ + +
+ +
+ +

+ + Protection Health Score +

+ +
+
+
+
+ Tracked File Coverage +

{protectionPercent}%

+
+ + {stats.total_files_indexed - stats.ignored_files_count - stats.unprotected_files_count} / {stats.total_files_indexed - stats.ignored_files_count} ELIGIBLE FILES + +
+
+
+
+
+ +
+
+
+ Active Data Redundancy +

{dataProtectionPercent}%

+
+ + {formatSize(stats.total_data_size - stats.ignored_data_size - stats.unprotected_data_size)} / {formatSize(stats.total_data_size - stats.ignored_data_size)} + +
+
+
+
+
+
+ +
+
+
+ +
+
+ Unprotected + {stats.unprotected_files_count.toLocaleString()} +

Files pending archival

+
+
+
+
+ +
+
+ Ignored + {stats.ignored_files_count.toLocaleString()} +

Bypassed by policy

+
+
+
+
+ +
+
+ Ignored Vol + {formatSize(stats.ignored_data_size)} +

Filtered from index

+
+
+
+
+ + +
+ +

Quick Directives

+
+ + + +
+
+ + +

Media distribution

+
+
+
+ +
+
+
+ LTO Tapes + 0 Items +
+
+
+
+
+
+
+
+ +
+
+
+ HDD Storage + 0 Items +
+
+
+
+
+
+
+
+ +
+
+
+ Cloud Vaults + 0 Items +
+
+
+
+
+
+
+
+
+
+ {/if} +
diff --git a/frontend/src/routes/index-browser/+page.svelte b/frontend/src/routes/index-browser/+page.svelte new file mode 100644 index 0000000..6c00ea9 --- /dev/null +++ b/frontend/src/routes/index-browser/+page.svelte @@ -0,0 +1,335 @@ + + + + Index Browser - TapeHoard + + +
+ +
+
+
+

+ + Index Browser +

+

+ Unified Filesystem View across all media +

+
+ + {#if restoreCartItems.length > 0} +
+ + {restoreCartItems.length} items in cart + + +
+ {/if} +
+ +
+ +
+ {#if loading} +
+ +
+ {/if} + currentPath = path} + onToggleTrack={handleToggleCart} + onSelect={fetchMetadata} + /> +
+ + + +
+
diff --git a/frontend/src/routes/inventory/+page.svelte b/frontend/src/routes/inventory/+page.svelte index 93a21ce..31d2a5e 100644 --- a/frontend/src/routes/inventory/+page.svelte +++ b/frontend/src/routes/inventory/+page.svelte @@ -1,191 +1,381 @@ - Inventory - TapeHoard + Physical Media - TapeHoard -
-
-

Global Inventory

-

Manage physical and cloud storage media across all locations.

-
- +
+
+
+
+

+ + Physical Media +

+

Inventory & Media Configuration

+
+ +
+ + {#if loading && mediaList.length === 0} +
+ + Auditing Fleet Status... +
+ {:else} +
+ +
+ +
+
Total Media{mediaList.length}
+
+ +
+
Fleet Capacity{formatSize(totalCapacity)}
+
+ +
+
Active Usage{formatSize(totalUsed)}
+
+ +
+
Utilization{globalUtilization}%
+
+
+ + + +
+ + + + + + + + + + + + + {#each mediaList as media (media.id)} + + + + + + + + + {:else} + + {/each} + +
IdentifierSpec / TierSystem ConfigUsageLifecycleActions
+
+ {media.identifier} + LOC: {media.location || 'Unknown'} +
+
+ + {#if media.media_type === 'tape'}{:else if media.media_type === 'hdd'}{:else}{/if} + {media.generation_tier || media.media_type} + + +
+ {#if media.media_type === 'tape' && media.config.device_path} +
{media.config.device_path}
+ {:else if media.media_type === 'hdd' && media.config.mount_path} +
{media.config.mount_path}
+ {:else if media.media_type === 'cloud' && media.config.bucket_name} +
{media.config.bucket_name}
+ {:else} + No config + {/if} +
+
+
+
+
+
+ {formatSize(media.bytes_used)} / {formatSize(media.capacity)} +
+
+
+
+ {media.status} +
+
+
+ {#if media.status === 'active'} + + {/if} + +
+

No Media Assets Registered

+
+
+
+ {/if}
-
-

Storage Pool Summary

-
-
- Total Media - {mediaList.length} -
-
- Total Capacity - 13.0 TB -
-
- Total Used - 9.1 TB -
-
- Utilization - 70% -
-
-
+ +{#if showRegisterDialog} +
+
showRegisterDialog = false} role="none">
+ +
+
+

Register Media

+ +
+

Provision new physical storage unit

+
-
- - - - - - - - - - - - - {#each mediaList as media} - - - - - - - - - {/each} - -
IdentifierTypeCapacity UsedStatusLocationActions
{media.identifier} - {#if media.type.startsWith('LTO')} - {media.type} - {:else if media.type === 'HDD'} - {media.type} - {:else} - {media.type} - {/if} - -
-
-
+
+ +
+ {#each ['tape', 'hdd', 'cloud'] as type} + + {/each} +
+ +
+
+
+ + +
+
+ + +
+
+ + + {#if newMedia.media_type === 'tape'} +
+
+ + +
+
+ +
- {media.used} GB / {media.capacity === Infinity ? '∞' : media.capacity + ' GB'}
-
-
- - {media.status} + {:else if newMedia.media_type === 'hdd'} +
+
+ + +
+
+ + +
-
-
- - {media.location} + {:else if newMedia.media_type === 'cloud'} +
+
+
+ + +
+
+ + +
+
+
+
+ + +
+
+ + +
+
-
-
- - {#if media.status === 'Full' && media.type.startsWith('LTO')} - - {/if} -
-
-
+ {/if} - +
+ + +
+
+
+{/if} diff --git a/frontend/src/routes/jobs/+page.svelte b/frontend/src/routes/jobs/+page.svelte new file mode 100644 index 0000000..09b1afe --- /dev/null +++ b/frontend/src/routes/jobs/+page.svelte @@ -0,0 +1,217 @@ + + + + System Activity - TapeHoard + + +
+ +
+
+
+

+ + System Activity +

+

+ Real-time task monitoring & operational history +

+
+ +
+
+
+ + {jobs.filter(j => j.status === 'RUNNING').length} Active Tasks + +
+
+
+ + {#if loading && jobs.length === 0} +
+ + Hydrating Task Pipeline... +
+ {:else} +
+ {#each jobs as job (job.id)} + +
+ +
+
+ {#if job.job_type === 'SCAN'} + + {:else if job.job_type === 'BACKUP'} + + {:else} + + {/if} +
+
+

+ {job.job_type} JOB #{job.id} +

+
+ {job.status} +
+
+
+ + +
+
+ + {job.current_task || 'Waiting in queue...'} + + + {job.progress.toFixed(1)}% + +
+
+
+
+
+ + +
+
+ Duration +
+ + {formatDuration(job.started_at, job.completed_at)} +
+
+
+ Created +
+ {new Date(job.created_at).toLocaleTimeString([], {hour: '2-digit', minute:'2-digit'})} +
+
+
+ + +
+ {#if job.status === 'RUNNING' || job.status === 'PENDING'} + + {/if} + +
+
+ + {#if job.error_message} +
+ +

+ {job.error_message} +

+
+ {/if} + + {#if job.status === 'RUNNING'} +
+ {/if} +
+ {:else} +
+ +

No historical tasks found.

+
+ {/each} +
+ {/if} +
diff --git a/frontend/src/routes/restores/+page.svelte b/frontend/src/routes/restores/+page.svelte index 90b1ab2..f488370 100644 --- a/frontend/src/routes/restores/+page.svelte +++ b/frontend/src/routes/restores/+page.svelte @@ -1,88 +1,88 @@ - Restore Wizard - TapeHoard + Restore Management - TapeHoard
+
-
+

- - Restore Wizard + + Restore Management

- Step {currentStep} of 4: {['Browse & Select', 'Insert Media', 'Swap Media', 'Finalize'][currentStep-1]} + Cart Review & Physical Media Manifest

- {#if currentStep === 1} -
- -
- {/if} +
+ + +
- {#if currentStep === 1} -
- -
-
-

Virtual Filesystem

- Indexing: {currentPath} -
- {#if loading} -
- Querying Index... + {#if loading && cartItems.length === 0} +
+ + Generating Manifest... +
+ {:else if cartItems.length === 0} + + +

Restore Cart is Empty

+

Go to the Index Browser to select files for recovery.

+ +
+ {:else} +
+ +
+ +
+

Queued for Restore ({cartItems.length})

+ {formatSize(manifest?.total_size || 0)}
- {/if} - currentPath = path} - onToggleTrack={handleToggleSelect} - /> + +
+ {#each cartItems as item (item.id)} +
+
+
+ +
+
+ {item.file_path.split('/').pop()} + {item.file_path} +
+
+
+
+ {#each item.media_identifiers as media} + {media} + {/each} +
+ {formatSize(item.size)} + +
+
+ {/each} +
+
+
- -
-
-
-

- - Restore Cart -

- - {restoreCart.length} - + +
+ + +

+ + Recovery Destination +

+ +
+
+ {#each restoreDests as dest} + + {:else} +
+

No targets defined in settings

+
+ {/each} +
+

Files will be extracted into this directory using their original folder structure.

+
+
+ + +
+
-
- {#if restoreCart.length === 0} -
- -

- Your cart is empty.
Select files from the index. -

-
- {:else} -
- {#each restoreCart as item} -
-
-
-

{item.name}

-

{item.path}

-
- +
+

+ + Physical Manifest +

+ +
+ {#if manifest} + {#each manifest.media_required as req} +
+
+ {#if req.media_type === 'tape'}{:else}{/if}
-
- {formatSize(item.size || 0)} -
- {#each (item.media || []) as m} - - {m} - - {/each} +
+
+ {req.identifier} + {req.media_type} +
+
+ {req.file_count} FILES + {formatSize(req.total_size)}
{/each} -
- {/if} -
- -
-
- Total Payload - {formatSize(totalSize)} + {/if}
-
- Media Required - {requiredMedia.length} Tapes + +
+

+ Note: Recovery will proceed sequentially by media to minimize hardware cycles. +

-
-
-
- - {:else if currentStep === 2} - -
-
-
- -
-

Insert Tape

-

- Please insert the following tape into the drive to begin extraction. -

- -
- {requiredMedia[0] || 'BUP-00001'} -
- -
- - Waiting for drive status... -
- -
- - -
+
{/if} diff --git a/frontend/src/routes/settings/+page.svelte b/frontend/src/routes/settings/+page.svelte index 898e370..02e26a7 100644 --- a/frontend/src/routes/settings/+page.svelte +++ b/frontend/src/routes/settings/+page.svelte @@ -1,54 +1,153 @@ Settings - TapeHoard -
-
-

System Settings

-

Configure global backup behavior and exclusion engines.

+
+
+
+

System Settings

+

Global Backup Configuration & Policy Engine

+
+
+
-
-
- -
-
-
- -
-
-

Global Exclusion Engine

-

Define patterns that will be ignored across all backup sources.

+{#if loading} +
+ + Hydrating Configuration... +
+{:else} +
+ + +
+
+
+

Source Provisioning

Primary data ingestion points.

+
- .gitignore syntax -
+
+ {#each sourceRoots as root, i} +
+ + +
+ {/each} +
+ -
- -
+ + +
+
+
+

Recovery Targets

Authorized destinations for restored data.

+
+ +
+
+ {#each restoreDestinations as dest, i} +
+ + +
+ {/each} +
+
-
- -

- WARNING: Broad exclusion patterns (like * or /) can lead to empty backups. Patterns are evaluated recursively. Use ! to explicitly include a sub-pattern within an excluded directory. -

-
- -
+ + +
+
+
+

Exclusion Engine

Patterns to bypass during scans.

+
+ .gitignore syntax +
+ +
+ +

WarningBroad exclusion patterns can result in incomplete backups.

+
+
+
+{/if} diff --git a/frontend/src/routes/tracking/+page.svelte b/frontend/src/routes/tracking/+page.svelte index 93cebc2..57ef71b 100644 --- a/frontend/src/routes/tracking/+page.svelte +++ b/frontend/src/routes/tracking/+page.svelte @@ -1,20 +1,30 @@