diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..12f1895 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,18 @@ +node_modules +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.db +.sqlite +.git +.gitignore +.ruff_cache +.svelte-kit +build +static +staging_area +data +tmp +source_data diff --git a/backend/alembic/env.py b/backend/alembic/env.py index 60db6b8..68f36d5 100644 --- a/backend/alembic/env.py +++ b/backend/alembic/env.py @@ -1,3 +1,4 @@ +import os import sys from os.path import dirname, abspath from logging.config import fileConfig @@ -16,6 +17,11 @@ from app.db.models import Base # noqa: E402 # access to the values within the .ini file in use. config = context.config +# Override sqlalchemy.url if environment variable is set +database_url = os.getenv("DATABASE_URL") +if database_url: + config.set_main_option("sqlalchemy.url", database_url) + # Interpret the config file for Python logging. # This line sets up loggers basically. if config.config_file_name is not None: diff --git a/backend/alembic/versions/193bb204c677_add_fts_table.py b/backend/alembic/versions/193bb204c677_add_fts_table.py new file mode 100644 index 0000000..332d5d4 --- /dev/null +++ b/backend/alembic/versions/193bb204c677_add_fts_table.py @@ -0,0 +1,55 @@ +"""add_fts_table + +Revision ID: 193bb204c677 +Revises: 9a6e70fabf7b +Create Date: 2026-04-23 19:04:51.158603 + +""" + +from typing import Sequence, Union + +from alembic import op + + +# revision identifiers, used by Alembic. +revision: str = "193bb204c677" +down_revision: Union[str, Sequence[str], None] = "9a6e70fabf7b" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Upgrade schema.""" + op.execute(""" + CREATE VIRTUAL TABLE filesystem_fts USING fts5( + file_path, + tokenize='trigram' + ) + """) + + op.execute(""" + CREATE TRIGGER fts_ai AFTER INSERT ON filesystem_state BEGIN + INSERT INTO filesystem_fts(rowid, file_path) VALUES (new.id, new.file_path); + END; + """) + + op.execute(""" + CREATE TRIGGER fts_ad AFTER DELETE ON filesystem_state BEGIN + INSERT INTO filesystem_fts(filesystem_fts, rowid, file_path) VALUES('delete', old.id, old.file_path); + END; + """) + + op.execute(""" + CREATE TRIGGER fts_au AFTER UPDATE ON filesystem_state BEGIN + INSERT INTO filesystem_fts(filesystem_fts, rowid, file_path) VALUES('delete', old.id, old.file_path); + INSERT INTO filesystem_fts(rowid, file_path) VALUES (new.id, new.file_path); + END; + """) + + +def downgrade() -> None: + """Downgrade schema.""" + op.execute("DROP TRIGGER IF EXISTS fts_ai") + op.execute("DROP TRIGGER IF EXISTS fts_ad") + op.execute("DROP TRIGGER IF EXISTS fts_au") + op.execute("DROP TABLE IF EXISTS filesystem_fts") diff --git a/backend/alembic/versions/33d682d2c089_add_splitting_support.py b/backend/alembic/versions/33d682d2c089_add_splitting_support.py new file mode 100644 index 0000000..fee21f4 --- /dev/null +++ b/backend/alembic/versions/33d682d2c089_add_splitting_support.py @@ -0,0 +1,65 @@ +"""add_splitting_support + +Revision ID: 33d682d2c089 +Revises: 5e867c5b4930 +Create Date: 2026-04-23 20:47:53.406493 + +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = "33d682d2c089" +down_revision: Union[str, Sequence[str], None] = "5e867c5b4930" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Upgrade schema.""" + # Columns might exist due to failed previous run + conn = op.get_bind() + columns = [c["name"] for c in sa.inspect(conn).get_columns("file_versions")] + + if "is_split" not in columns: + op.add_column( + "file_versions", + sa.Column("is_split", sa.Boolean(), nullable=False, server_default="0"), + ) + if "split_id" not in columns: + op.add_column( + "file_versions", sa.Column("split_id", sa.String(), nullable=True) + ) + if "offset_start" not in columns: + op.add_column( + "file_versions", + sa.Column( + "offset_start", sa.BigInteger(), nullable=False, server_default="0" + ), + ) + if "offset_end" not in columns: + op.add_column( + "file_versions", + sa.Column( + "offset_end", sa.BigInteger(), nullable=False, server_default="0" + ), + ) + if "created_at" not in columns: + op.add_column( + "file_versions", sa.Column("created_at", sa.DateTime(), nullable=True) + ) + + +def downgrade() -> None: + """Downgrade schema.""" + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("file_versions", "created_at") + op.drop_column("file_versions", "offset_end") + op.drop_column("file_versions", "offset_start") + op.drop_column("file_versions", "split_id") + op.drop_column("file_versions", "is_split") + # ### end Alembic commands ### diff --git a/backend/alembic/versions/38cb9df7a18c_fix_fts_triggers.py b/backend/alembic/versions/38cb9df7a18c_fix_fts_triggers.py new file mode 100644 index 0000000..7251f3c --- /dev/null +++ b/backend/alembic/versions/38cb9df7a18c_fix_fts_triggers.py @@ -0,0 +1,70 @@ +"""fix_fts_triggers + +Revision ID: 38cb9df7a18c +Revises: 33d682d2c089 +Create Date: 2026-04-23 21:50:00.000000 + +""" + +from typing import Sequence, Union +from alembic import op + + +# revision identifiers, used by Alembic. +revision: str = "38cb9df7a18c" +down_revision: Union[str, Sequence[str], None] = "33d682d2c089" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Drop old invalid triggers + op.execute("DROP TRIGGER IF EXISTS fts_ai") + op.execute("DROP TRIGGER IF EXISTS fts_ad") + op.execute("DROP TRIGGER IF EXISTS fts_au") + + # Create correct triggers for standalone FTS5 table + op.execute(""" + CREATE TRIGGER fts_ai AFTER INSERT ON filesystem_state BEGIN + INSERT INTO filesystem_fts(rowid, file_path) VALUES (new.id, new.file_path); + END; + """) + + op.execute(""" + CREATE TRIGGER fts_ad AFTER DELETE ON filesystem_state BEGIN + DELETE FROM filesystem_fts WHERE rowid = old.id; + END; + """) + + # Only update FTS if the file_path actually changes + op.execute(""" + CREATE TRIGGER fts_au AFTER UPDATE OF file_path ON filesystem_state BEGIN + UPDATE filesystem_fts SET file_path = new.file_path WHERE rowid = old.id; + END; + """) + + +def downgrade() -> None: + op.execute("DROP TRIGGER IF EXISTS fts_ai") + op.execute("DROP TRIGGER IF EXISTS fts_ad") + op.execute("DROP TRIGGER IF EXISTS fts_au") + + # Recreate the old (invalid but matches previous state) triggers if needed for rollback + op.execute(""" + CREATE TRIGGER fts_ai AFTER INSERT ON filesystem_state BEGIN + INSERT INTO filesystem_fts(rowid, file_path) VALUES (new.id, new.file_path); + END; + """) + + op.execute(""" + CREATE TRIGGER fts_ad AFTER DELETE ON filesystem_state BEGIN + INSERT INTO filesystem_fts(filesystem_fts, rowid, file_path) VALUES('delete', old.id, old.file_path); + END; + """) + + op.execute(""" + CREATE TRIGGER fts_au AFTER UPDATE ON filesystem_state BEGIN + INSERT INTO filesystem_fts(filesystem_fts, rowid, file_path) VALUES('delete', old.id, old.file_path); + INSERT INTO filesystem_fts(rowid, file_path) VALUES (new.id, new.file_path); + END; + """) diff --git a/backend/alembic/versions/5e867c5b4930_sync_models.py b/backend/alembic/versions/5e867c5b4930_sync_models.py new file mode 100644 index 0000000..25f9da2 --- /dev/null +++ b/backend/alembic/versions/5e867c5b4930_sync_models.py @@ -0,0 +1,96 @@ +"""sync_models + +Revision ID: 5e867c5b4930 +Revises: 193bb204c677 +Create Date: 2026-04-23 19:10:26.093824 + +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = "5e867c5b4930" +down_revision: Union[str, Sequence[str], None] = "193bb204c677" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Upgrade schema.""" + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "filesystem_state", + sa.Column("is_indexed", sa.Boolean(), nullable=False, server_default="0"), + ) + op.add_column( + "filesystem_state", + sa.Column("is_ignored", sa.Boolean(), nullable=False, server_default="0"), + ) + op.add_column( + "storage_media", sa.Column("extra_config", sa.String(), nullable=True) + ) + + # We must also create missing tables: TrackedSource, RestoreCart, Job, SystemSetting + op.create_table( + "jobs", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("job_type", sa.String(), nullable=False), + sa.Column("status", sa.String(), nullable=False), + sa.Column("progress", sa.Float(), nullable=False), + sa.Column("current_task", sa.String(), nullable=True), + sa.Column("error_message", sa.String(), nullable=True), + sa.Column("started_at", sa.DateTime(), nullable=True), + sa.Column("completed_at", sa.DateTime(), nullable=True), + sa.Column("created_at", sa.DateTime(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "restore_cart", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("filesystem_state_id", sa.Integer(), nullable=False), + sa.Column("created_at", sa.DateTime(), nullable=False), + sa.ForeignKeyConstraint( + ["filesystem_state_id"], + ["filesystem_state.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "system_settings", + sa.Column("key", sa.String(), nullable=False), + sa.Column("value", sa.String(), nullable=False), + sa.Column("updated_at", sa.DateTime(), nullable=False), + sa.PrimaryKeyConstraint("key"), + ) + op.create_table( + "tracked_sources", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("path", sa.String(), nullable=False), + sa.Column("is_directory", sa.Boolean(), nullable=False), + sa.Column("action", sa.String(), nullable=False), + sa.Column("created_at", sa.DateTime(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index( + op.f("ix_tracked_sources_path"), "tracked_sources", ["path"], unique=True + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + """Downgrade schema.""" + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f("ix_tracked_sources_path"), table_name="tracked_sources") + op.drop_table("tracked_sources") + op.drop_table("system_settings") + op.drop_table("restore_cart") + op.drop_table("jobs") + + op.drop_column("storage_media", "extra_config") + op.drop_column("filesystem_state", "is_ignored") + op.drop_column("filesystem_state", "is_indexed") + # ### end Alembic commands ### diff --git a/backend/alembic/versions/ac51f5e25832_add_missing_indexes.py b/backend/alembic/versions/ac51f5e25832_add_missing_indexes.py new file mode 100644 index 0000000..f9bc4b2 --- /dev/null +++ b/backend/alembic/versions/ac51f5e25832_add_missing_indexes.py @@ -0,0 +1,31 @@ +"""add_missing_indexes + +Revision ID: ac51f5e25832 +Revises: 38cb9df7a18c +Create Date: 2026-04-23 23:00:00.000000 + +""" + +from typing import Sequence, Union +from alembic import op + + +# revision identifiers, used by Alembic. +revision: str = "ac51f5e25832" +down_revision: Union[str, Sequence[str], None] = "38cb9df7a18c" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.create_index( + "ix_file_versions_filesystem_state_id", "file_versions", ["filesystem_state_id"] + ) + op.create_index( + "ix_restore_cart_filesystem_state_id", "restore_cart", ["filesystem_state_id"] + ) + + +def downgrade() -> None: + op.drop_index("ix_restore_cart_filesystem_state_id", table_name="restore_cart") + op.drop_index("ix_file_versions_filesystem_state_id", table_name="file_versions") diff --git a/backend/app/api/inventory.py b/backend/app/api/inventory.py index ce70153..b71fea2 100644 --- a/backend/app/api/inventory.py +++ b/backend/app/api/inventory.py @@ -50,6 +50,9 @@ class ItemMetadataSchema(BaseModel): sha256_hash: Optional[str] = None versions: List[FileVersionSchema] = [] child_count: Optional[int] = None + vulnerable: bool = False + selected: bool = False + indeterminate: bool = False class FileItemSchema(BaseModel): @@ -59,6 +62,9 @@ class FileItemSchema(BaseModel): size: Optional[int] = None mtime: Optional[float] = None media: List[str] = [] + vulnerable: bool = False + selected: bool = False + indeterminate: bool = False class TreeNodeSchema(BaseModel): @@ -68,10 +74,10 @@ class TreeNodeSchema(BaseModel): class MediaCreateSchema(BaseModel): - media_type: str # tape, hdd, cloud + media_type: str identifier: str generation_tier: Optional[str] = None - capacity: int # in bytes + capacity: int location: Optional[str] = None config: Dict[str, Any] = {} @@ -93,34 +99,38 @@ class MediaSchema(BaseModel): status: str config: Dict[str, Any] - @classmethod - def from_orm_custom(cls, obj: models.StorageMedia): - config_data = {} - if obj.extra_config: - try: - config_data = json.loads(obj.extra_config) - except Exception: - pass - return cls( - id=obj.id, - media_type=obj.media_type, - identifier=obj.identifier, - generation_tier=obj.generation_tier, - capacity=obj.capacity, - bytes_used=obj.bytes_used, - location=obj.location, - status=obj.status, - config=config_data, - ) + class Config: + from_attributes = True -# --- Media Endpoints --- +# --- Media Management --- @router.get("/media", response_model=List[MediaSchema]) def list_media(db: Session = Depends(get_db)): - all_media = db.query(models.StorageMedia).all() - return [MediaSchema.from_orm_custom(m) for m in all_media] + media = db.query(models.StorageMedia).all() + results = [] + for m in media: + config = {} + if m.extra_config: + try: + config = json.loads(m.extra_config) + except Exception: + pass + results.append( + MediaSchema( + id=m.id, + media_type=m.media_type, + identifier=m.identifier, + generation_tier=m.generation_tier, + capacity=m.capacity, + bytes_used=m.bytes_used, + location=m.location, + status=m.status, + config=config, + ) + ) + return results @router.post("/media", response_model=MediaSchema) @@ -131,22 +141,35 @@ def register_media(req: MediaCreateSchema, db: Session = Depends(get_db)): .first() ) if existing: - raise HTTPException( - status_code=400, detail="Media with this identifier already exists" - ) + raise HTTPException(status_code=400, detail="Media already exists") + new_media = models.StorageMedia( media_type=req.media_type, identifier=req.identifier, generation_tier=req.generation_tier, capacity=req.capacity, location=req.location, - status="active", extra_config=json.dumps(req.config), ) db.add(new_media) db.commit() db.refresh(new_media) - return MediaSchema.from_orm_custom(new_media) + + config = {} + if new_media.extra_config: + config = json.loads(new_media.extra_config) + + return MediaSchema( + id=new_media.id, + media_type=new_media.media_type, + identifier=new_media.identifier, + generation_tier=new_media.generation_tier, + capacity=new_media.capacity, + bytes_used=new_media.bytes_used, + location=new_media.location, + status=new_media.status, + config=config, + ) @router.patch("/media/{media_id}", response_model=MediaSchema) @@ -154,22 +177,31 @@ def update_media(media_id: int, req: MediaUpdateSchema, db: Session = Depends(ge media = db.query(models.StorageMedia).get(media_id) if not media: raise HTTPException(status_code=404, detail="Media not found") + if req.status: media.status = req.status if req.location: media.location = req.location - if req.config is not None: - current_config = {} - if media.extra_config: - try: - current_config = json.loads(media.extra_config) - except Exception: - pass - current_config.update(req.config) - media.extra_config = json.dumps(current_config) + if req.config: + media.extra_config = json.dumps(req.config) + db.commit() db.refresh(media) - return MediaSchema.from_orm_custom(media) + config = {} + if media.extra_config: + config = json.loads(media.extra_config) + + return MediaSchema( + id=media.id, + media_type=media.media_type, + identifier=media.identifier, + generation_tier=media.generation_tier, + capacity=media.capacity, + bytes_used=media.bytes_used, + location=media.location, + status=media.status, + config=config, + ) @router.delete("/media/{media_id}") @@ -184,7 +216,25 @@ def delete_media(media_id: int, db: Session = Depends(get_db)): return {"message": "Media deleted"} -# --- Browsing Endpoints (Optimized) --- +@router.post("/media/{media_id}/initialize") +def initialize_media(media_id: int, db: Session = Depends(get_db)): + from app.services.archiver import archiver_manager + + media = db.query(models.StorageMedia).get(media_id) + if not media: + raise HTTPException(status_code=404, detail="Media not found") + + provider = archiver_manager._get_provider(media) + if not provider: + raise HTTPException(status_code=400, detail="Unsupported media type") + + if provider.initialize_media(media.identifier): + return {"message": "Media initialized successfully"} + else: + raise HTTPException(status_code=500, detail="Failed to initialize media") + + +# --- Browsing Endpoints (Highly Optimized) --- @router.get("/browse", response_model=List[FileItemSchema]) @@ -193,74 +243,124 @@ def browse_index( include_ignored: bool = False, db: Session = Depends(get_db), ): + roots = get_source_roots(db) if path is None or path == "ROOT": - roots = get_source_roots(db) + # OPTIMIZED: Fetch all root stats in a single complex SQL aggregate results = [] for root in roots: - sql = text( - "SELECT COUNT(*), SUM(size), MAX(mtime) FROM filesystem_state WHERE file_path LIKE :prefix" - + (" AND is_ignored = 0" if not include_ignored else "") - ) - row = db.execute(sql, {"prefix": f"{root}%"}).fetchone() - if row and row[0] > 0: - results.append( - FileItemSchema( - name=root, - path=root, - type="directory", - size=row[1] or 0, - mtime=row[2] or 0, - ) + prefix = root if root.endswith("/") else root + "/" + stats_sql = text(""" + SELECT + MAX(CASE WHEN fv.id IS NULL AND fs.is_ignored = 0 THEN 1 ELSE 0 END) as is_vulnerable, + COUNT(DISTINCT CASE WHEN fv.id IS NOT NULL THEN fs.id END) as restorable_count, + COUNT(DISTINCT CASE WHEN fv.id IS NOT NULL AND rc.id IS NOT NULL THEN fs.id END) as queued_count + FROM filesystem_state fs + LEFT JOIN file_versions fv ON fv.filesystem_state_id = fs.id + LEFT JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE fs.file_path LIKE :prefix + """) + stats = db.execute(stats_sql, {"prefix": f"{prefix}%"}).fetchone() + + is_vuln = stats[0] if stats else 0 + restorable = stats[1] if stats else 0 + queued = stats[2] if stats else 0 + + is_selected = restorable > 0 and queued == restorable + is_indeterminate = 0 < queued < restorable + + results.append( + FileItemSchema( + name=root, + path=root, + type="directory", + size=0, + mtime=0, + vulnerable=bool(is_vuln), + selected=is_selected, + indeterminate=is_indeterminate, ) + ) return results prefix = path if path.endswith("/") else path + "/" - ignore_filter = " AND is_ignored = 0" if not include_ignored else "" + ignore_filter = " AND fs.is_ignored = 0" if not include_ignored else "" results = [] - # Subdirectories - subdir_sql = text( - f""" - SELECT DISTINCT SUBSTR(file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(file_path, LENGTH(:prefix) + 1), '/') - 1) as dirname - FROM filesystem_state WHERE file_path LIKE :search_prefix AND SUBSTR(file_path, LENGTH(:prefix) + 1) LIKE '%/%' {ignore_filter} - """ - ) - subdirs = db.execute( - subdir_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} - ).fetchall() - for sd in subdirs: - if sd[0]: - results.append( - FileItemSchema( - name=sd[0], path=prefix + sd[0], type="directory", size=0, mtime=0 - ) - ) + # OPTIMIZED: Fetch ALL subdirectory metadata in a SINGLE aggregate query + # This replaces the N+1 query pattern that was killing performance + subdir_agg_sql = text(f""" + SELECT + SUBSTR(fs.file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(fs.file_path, LENGTH(:prefix) + 1), '/') - 1) as dirname, + MAX(CASE WHEN fv.id IS NULL AND fs.is_ignored = 0 THEN 1 ELSE 0 END) as is_vulnerable, + COUNT(DISTINCT CASE WHEN fv.id IS NOT NULL THEN fs.id END) as restorable_count, + COUNT(DISTINCT CASE WHEN fv.id IS NOT NULL AND rc.id IS NOT NULL THEN fs.id END) as queued_count + FROM filesystem_state fs + LEFT JOIN file_versions fv ON fv.filesystem_state_id = fs.id + LEFT JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE fs.file_path LIKE :search_prefix + AND SUBSTR(fs.file_path, LENGTH(:prefix) + 1) LIKE '%/%' {ignore_filter} + GROUP BY dirname + """) + + subdirs = db.execute( + subdir_agg_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} + ).fetchall() + + for sd in subdirs: + name = sd[0] + is_vuln = sd[1] + restorable = sd[2] + queued = sd[3] + + is_selected = restorable > 0 and queued == restorable + is_indeterminate = 0 < queued < restorable + + results.append( + FileItemSchema( + name=name, + path=prefix + name, + type="directory", + size=0, + mtime=0, + vulnerable=bool(is_vuln), + selected=is_selected, + indeterminate=is_indeterminate, + ) + ) + + # OPTIMIZED: Fetch files with version/cart status in a single joined query + file_sql = text(f""" + SELECT + fs.file_path, fs.size, fs.mtime, fs.id, + MAX(CASE WHEN fv.id IS NOT NULL THEN 1 ELSE 0 END) as has_version, + MAX(CASE WHEN rc.id IS NOT NULL THEN 1 ELSE 0 END) as is_selected, + GROUP_CONCAT(DISTINCT sm.identifier) as media_list + FROM filesystem_state fs + LEFT JOIN file_versions fv ON fv.filesystem_state_id = fs.id + LEFT JOIN storage_media sm ON sm.id = fv.media_id + LEFT JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE fs.file_path LIKE :search_prefix + AND SUBSTR(fs.file_path, LENGTH(:prefix) + 1) NOT LIKE '%/%' {ignore_filter} + GROUP BY fs.id + """) - # Files - file_sql = text( - f""" - SELECT name, file_path, size, mtime, id FROM ( - SELECT SUBSTR(file_path, LENGTH(:prefix) + 1) as name, file_path, size, mtime, id - FROM filesystem_state WHERE file_path LIKE :search_prefix {ignore_filter} - ) WHERE name NOT LIKE '%/%' - """ - ) files = db.execute( file_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} ).fetchall() + for f in files: - media_sql = text( - "SELECT m.identifier FROM storage_media m JOIN file_versions v ON v.media_id = m.id WHERE v.filesystem_state_id = :fid" - ) - media_list = [m[0] for m in db.execute(media_sql, {"fid": f[4]}).fetchall()] + media_list = f[6].split(",") if f[6] else [] + name = f[0].split("/")[-1] results.append( FileItemSchema( - name=f[0], - path=f[1], + name=name, + path=f[0], type="file", - size=f[2], - mtime=f[3], + size=f[1], + mtime=f[2], media=media_list, + vulnerable=not bool(f[4]), + selected=bool(f[5]), ) ) @@ -268,6 +368,55 @@ def browse_index( return results +@router.get("/search", response_model=List[FileItemSchema]) +def search_index(q: str, include_ignored: bool = False, db: Session = Depends(get_db)): + if not q or len(q) < 3: + return [] + + ignore_filter = " AND fs.is_ignored = 0" if not include_ignored else "" + + # Use FTS5 for instantaneous full-text search + sql = text(f""" + SELECT + fs.file_path, fs.size, fs.mtime, fs.id, + MAX(CASE WHEN fv.id IS NOT NULL THEN 1 ELSE 0 END) as has_version, + MAX(CASE WHEN rc.id IS NOT NULL THEN 1 ELSE 0 END) as is_selected, + GROUP_CONCAT(DISTINCT sm.identifier) as media_list + FROM filesystem_fts + JOIN filesystem_state fs ON fs.id = filesystem_fts.rowid + LEFT JOIN file_versions fv ON fv.filesystem_state_id = fs.id + LEFT JOIN storage_media sm ON sm.id = fv.media_id + LEFT JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE filesystem_fts MATCH :query {ignore_filter} + GROUP BY fs.id + LIMIT 200 + """) + + safe_query = f'"{q}"' + files = db.execute(sql, {"query": safe_query}).fetchall() + + results = [] + for f in files: + name = f[0].split("/")[-1] + media_list = f[6].split(",") if f[6] else [] + + results.append( + FileItemSchema( + name=name, + path=f[0], + type="file", + size=f[1], + mtime=f[2], + media=media_list, + vulnerable=not bool(f[4]), + selected=bool(f[5]), + ) + ) + + results.sort(key=lambda x: x.name.lower()) + return results + + @router.get("/tree", response_model=List[TreeNodeSchema]) def get_index_tree( path: Optional[str] = None, @@ -311,6 +460,14 @@ def get_item_metadata(path: str, db: Session = Depends(get_db)): ) for v in file_state.versions ] + + is_selected = ( + db.query(models.RestoreCart) + .filter(models.RestoreCart.filesystem_state_id == file_state.id) + .first() + is not None + ) + return ItemMetadataSchema( id=file_state.id, # Now included file_path=file_state.file_path, @@ -320,8 +477,30 @@ def get_item_metadata(path: str, db: Session = Depends(get_db)): last_seen_timestamp=file_state.last_seen_timestamp, sha256_hash=file_state.sha256_hash, versions=versions, + vulnerable=not bool(file_state.versions), + selected=is_selected, ) prefix = path if path.endswith("/") else path + "/" + # Check recursive vulnerability and selection + stats_sql = text(""" + SELECT + MAX(CASE WHEN fv.id IS NULL AND fs.is_ignored = 0 THEN 1 ELSE 0 END) as is_vulnerable, + COUNT(DISTINCT CASE WHEN fv.id IS NOT NULL THEN fs.id END) as restorable_count, + COUNT(DISTINCT CASE WHEN fv.id IS NOT NULL AND rc.id IS NOT NULL THEN fs.id END) as queued_count + FROM filesystem_state fs + LEFT JOIN file_versions fv ON fv.filesystem_state_id = fs.id + LEFT JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE fs.file_path LIKE :prefix + """) + stats = db.execute(stats_sql, {"prefix": f"{prefix}%"}).fetchone() + + is_vuln = stats[0] if stats else 0 + restorable = stats[1] if stats else 0 + queued = stats[2] if stats else 0 + + is_selected = restorable > 0 and queued == restorable + is_indeterminate = 0 < queued < restorable + sql = text( "SELECT COUNT(*), SUM(size), MAX(mtime), MAX(last_seen_timestamp) FROM filesystem_state WHERE file_path LIKE :prefix AND is_ignored = 0" ) @@ -334,6 +513,9 @@ def get_item_metadata(path: str, db: Session = Depends(get_db)): mtime=row[2] or 0, last_seen_timestamp=row[3] or datetime.now(timezone.utc), child_count=row[0], + vulnerable=bool(is_vuln), + selected=is_selected, + indeterminate=is_indeterminate, ) raise HTTPException(status_code=404, detail="Item not found") diff --git a/backend/app/api/restores.py b/backend/app/api/restores.py index 02d8e73..06960dd 100644 --- a/backend/app/api/restores.py +++ b/backend/app/api/restores.py @@ -1,9 +1,13 @@ -from fastapi import APIRouter, HTTPException, Depends -from typing import List +from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks +from typing import List, Optional from pydantic import BaseModel -from sqlalchemy.orm import Session -from app.db.database import get_db +from sqlalchemy.orm import Session, joinedload +from sqlalchemy import text +from app.db.database import get_db, SessionLocal from app.db import models +from datetime import datetime, timezone +from app.services.archiver import archiver_manager +from app.services.scanner import JobManager router = APIRouter(prefix="/restores", tags=["Restores"]) @@ -33,16 +37,185 @@ class RestoreManifestSchema(BaseModel): media_required: List[ManifestMediaRequirement] +class RestoreRequest(BaseModel): + destination: str + + class DirectoryCartRequest(BaseModel): path: str +class CartFileItemSchema(BaseModel): + name: str + path: str + type: str + size: Optional[int] = None + media: List[str] = [] + + +class CartTreeNodeSchema(BaseModel): + name: str + path: str + has_children: bool = False + + # --- Endpoints --- +@router.post("/trigger") +def trigger_restore( + req: RestoreRequest, + background_tasks: BackgroundTasks, + db: Session = Depends(get_db), +): + cart_items = db.query(models.RestoreCart).all() + if not cart_items: + raise HTTPException(status_code=400, detail="Recovery queue is empty") + + job = JobManager.create_job(db, "RESTORE") + + def run_restore_task(): + db_inner = SessionLocal() + try: + archiver_manager.run_restore( + db_inner, destination=req.destination, job_id=job.id + ) + finally: + db_inner.close() + + background_tasks.add_task(run_restore_task) + return {"message": "Restore job initiated", "job_id": job.id} + + +@router.get("/cart/browse", response_model=List[CartFileItemSchema]) +def browse_cart(path: Optional[str] = None, db: Session = Depends(get_db)): + from app.api.inventory import get_source_roots + + roots = get_source_roots(db) + + if path is None or path == "ROOT": + results = [] + for root in roots: + # Check if any file in the cart is under this root + prefix = root if root.endswith("/") else root + "/" + sql = text(""" + SELECT EXISTS ( + SELECT 1 FROM filesystem_state fs + JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE fs.file_path LIKE :prefix + ) + """) + if db.execute(sql, {"prefix": f"{prefix}%"}).scalar(): + results.append( + CartFileItemSchema(name=root, path=root, type="directory") + ) + return results + + prefix = path if path.endswith("/") else path + "/" + results = [] + + # Subdirectories in cart + subdir_sql = text(""" + SELECT DISTINCT SUBSTR(fs.file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(fs.file_path, LENGTH(:prefix) + 1), '/') - 1) as dirname + FROM filesystem_state fs + JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE fs.file_path LIKE :search_prefix + AND SUBSTR(fs.file_path, LENGTH(:prefix) + 1) LIKE '%/%' + """) + subdirs = db.execute( + subdir_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} + ).fetchall() + for sd in subdirs: + if sd[0]: + results.append( + CartFileItemSchema(name=sd[0], path=prefix + sd[0], type="directory") + ) + + # Files in cart + file_sql = text(""" + SELECT fs.file_path, fs.size, fs.id, GROUP_CONCAT(sm.identifier) as media_list + FROM filesystem_state fs + JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + JOIN file_versions fv ON fv.filesystem_state_id = fs.id + JOIN storage_media sm ON sm.id = fv.media_id + WHERE fs.file_path LIKE :search_prefix + AND SUBSTR(fs.file_path, LENGTH(:prefix) + 1) NOT LIKE '%/%' + GROUP BY fs.id + """) + files = db.execute( + file_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} + ).fetchall() + for f in files: + results.append( + CartFileItemSchema( + name=f[0].split("/")[-1], + path=f[0], + type="file", + size=f[1], + media=f[3].split(",") if f[3] else [], + ) + ) + + results.sort(key=lambda x: (x.type != "directory", x.name.lower())) + return results + + +@router.get("/cart/tree", response_model=List[CartTreeNodeSchema]) +def get_cart_tree(path: Optional[str] = None, db: Session = Depends(get_db)): + from app.api.inventory import get_source_roots + + roots = get_source_roots(db) + + if path is None or path == "ROOT": + results = [] + for root in roots: + prefix = root if root.endswith("/") else root + "/" + sql = text(""" + SELECT EXISTS ( + SELECT 1 FROM filesystem_state fs + JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE fs.file_path LIKE :prefix + ) + """) + if db.execute(sql, {"prefix": f"{prefix}%"}).scalar(): + results.append( + CartTreeNodeSchema(name=root, path=root, has_children=True) + ) + return results + + prefix = path if path.endswith("/") else path + "/" + subdir_sql = text(""" + SELECT DISTINCT SUBSTR(fs.file_path, LENGTH(:prefix) + 1, INSTR(SUBSTR(fs.file_path, LENGTH(:prefix) + 1), '/') - 1) as dirname + FROM filesystem_state fs + JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + WHERE fs.file_path LIKE :search_prefix + AND SUBSTR(fs.file_path, LENGTH(:prefix) + 1) LIKE '%/%' + """) + subdirs = db.execute( + subdir_sql, {"prefix": prefix, "search_prefix": f"{prefix}%"} + ).fetchall() + results = [ + CartTreeNodeSchema(name=sd[0], path=prefix + sd[0], has_children=True) + for sd in subdirs + if sd[0] + ] + results.sort(key=lambda x: x.name.lower()) + return results + + @router.get("/cart", response_model=List[CartItemSchema]) def list_cart(db: Session = Depends(get_db)): - items = db.query(models.RestoreCart).all() + # OPTIMIZED: Use joinedload to fetch all versions in a single query + items = ( + db.query(models.RestoreCart) + .options( + joinedload(models.RestoreCart.file_state) + .joinedload(models.FilesystemState.versions) + .joinedload(models.FileVersion.media) + ) + .all() + ) + results = [] for item in items: media_ids = [v.media.identifier for v in item.file_state.versions] @@ -57,6 +230,57 @@ def list_cart(db: Session = Depends(get_db)): return results +# NOTE: Static routes MUST come before parameterized ones like /cart/{file_id} + + +@router.post("/cart/clear") +def clear_cart(db: Session = Depends(get_db)): + db.query(models.RestoreCart).delete(synchronize_session=False) + db.commit() + return {"message": "Recovery queue cleared"} + + +@router.post("/cart/directory") +def add_directory_to_cart(req: DirectoryCartRequest, db: Session = Depends(get_db)): + from loguru import logger + + path = req.path + if path == "ROOT": + prefix_query = "%" + exact_path = "ROOT" + else: + prefix = path if path.endswith("/") else path + "/" + prefix_query = f"{prefix}%" + exact_path = path + + logger.info(f"Adding directory to queue: {path} (prefix: {prefix_query})") + + insert_sql = text(""" + INSERT INTO restore_cart (filesystem_state_id, created_at) + SELECT DISTINCT fs.id, :now + FROM filesystem_state fs + WHERE (fs.file_path = :path OR fs.file_path LIKE :prefix) + AND EXISTS (SELECT 1 FROM file_versions fv WHERE fv.filesystem_state_id = fs.id) + AND fs.id NOT IN (SELECT filesystem_state_id FROM restore_cart) + """) + + db.execute( + insert_sql, + { + "path": exact_path, + "prefix": prefix_query, + "now": datetime.now(timezone.utc).isoformat(), + }, + ) + + db.commit() + + total_in_cart = db.query(models.RestoreCart).count() + logger.info(f"Directory add complete. Total in cart: {total_in_cart}") + + return {"message": f"Added restorable items from {path} to recovery queue"} + + @router.post("/cart/{file_id}") def add_to_cart(file_id: int, db: Session = Depends(get_db)): existing = ( @@ -65,7 +289,7 @@ def add_to_cart(file_id: int, db: Session = Depends(get_db)): .first() ) if existing: - return {"message": "Already in cart"} + return {"message": "Already in recovery queue"} file_state = db.query(models.FilesystemState).get(file_id) if not file_state or not file_state.versions: @@ -74,39 +298,7 @@ def add_to_cart(file_id: int, db: Session = Depends(get_db)): new_item = models.RestoreCart(filesystem_state_id=file_id) db.add(new_item) db.commit() - return {"message": "Added to cart"} - - -@router.post("/cart/directory") -def add_directory_to_cart(req: DirectoryCartRequest, db: Session = Depends(get_db)): - prefix = req.path if req.path.endswith("/") else req.path + "/" - - # Find all files under this path that have at least one version - eligible_files = ( - db.query(models.FilesystemState) - .filter( - models.FilesystemState.file_path.like(f"{prefix}%"), - models.FilesystemState.versions.any(), - ) - .all() - ) - - if not eligible_files: - raise HTTPException( - status_code=404, detail="No restorable files found in this directory" - ) - - # Get current cart to avoid duplicates - in_cart = {c.filesystem_state_id for c in db.query(models.RestoreCart).all()} - - added_count = 0 - for f in eligible_files: - if f.id not in in_cart: - db.add(models.RestoreCart(filesystem_state_id=f.id)) - added_count += 1 - - db.commit() - return {"message": f"Added {added_count} files from {req.path} to cart"} + return {"message": "Added to recovery queue"} @router.delete("/cart/{item_id}") @@ -115,43 +307,49 @@ def remove_from_cart(item_id: int, db: Session = Depends(get_db)): if item: db.delete(item) db.commit() - return {"message": "Removed from cart"} - - -@router.post("/cart/clear") -def clear_cart(db: Session = Depends(get_db)): - db.query(models.RestoreCart).delete() - db.commit() - return {"message": "Cart cleared"} + return {"message": "Removed from recovery queue"} @router.get("/manifest", response_model=RestoreManifestSchema) def get_manifest(db: Session = Depends(get_db)): - cart_items = db.query(models.RestoreCart).all() - if not cart_items: - return RestoreManifestSchema(total_files=0, total_size=0, media_required=[]) + # OPTIMIZED: Use a single raw SQL query to calculate the entire manifest + # This completely avoids loading thousands of ORM objects into memory + sql = text(""" + SELECT + sm.identifier, + sm.media_type, + COUNT(DISTINCT fs.id) as file_count, + SUM(fv.offset_end - fv.offset_start) as total_size + FROM filesystem_state fs + JOIN restore_cart rc ON rc.filesystem_state_id = fs.id + JOIN file_versions fv ON fv.filesystem_state_id = fs.id + JOIN storage_media sm ON sm.id = fv.media_id + GROUP BY sm.id + """) - total_size = sum(item.file_state.size for item in cart_items) - media_map = {} + rows = db.execute(sql).fetchall() - for item in cart_items: - if not item.file_state.versions: - continue - primary_v = item.file_state.versions[0] - ident = primary_v.media.identifier - m_type = primary_v.media.media_type - if ident not in media_map: - media_map[ident] = { - "identifier": ident, - "media_type": m_type, - "file_count": 0, - "total_size": 0, - } - media_map[ident]["file_count"] += 1 - media_map[ident]["total_size"] += item.file_state.size + requirements = [] + total_size = 0 + + # We also need the total unique files in the cart (a file might be on multiple media) + total_unique_files = db.query(models.RestoreCart).count() + + for row in rows: + requirements.append( + ManifestMediaRequirement( + identifier=row[0], + media_type=row[1], + file_count=row[2], + total_size=row[3], + ) + ) + total_size += row[3] - requirements = [ManifestMediaRequirement(**m) for m in media_map.values()] requirements.sort(key=lambda x: x.identifier) + return RestoreManifestSchema( - total_files=len(cart_items), total_size=total_size, media_required=requirements + total_files=total_unique_files, + total_size=total_size, + media_required=requirements, ) diff --git a/backend/app/api/system.py b/backend/app/api/system.py index 1b996a8..feffb1c 100644 --- a/backend/app/api/system.py +++ b/backend/app/api/system.py @@ -1,6 +1,8 @@ -from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks -from fastapi.responses import StreamingResponse +from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks, File, UploadFile +from fastapi.responses import StreamingResponse, FileResponse import os +import shutil +import sqlite3 import json import asyncio from datetime import datetime @@ -79,6 +81,10 @@ class SettingSchema(BaseModel): value: str +class TestNotificationRequest(BaseModel): + url: str + + # --- Helpers --- def get_source_roots(db: Session) -> List[str]: setting = ( @@ -176,6 +182,15 @@ def get_dashboard_stats(db: Session = Depends(get_db)): for mtype, count in media_counts: media_dist[mtype.upper()] = count + # Get last successful scan time from jobs history + last_scan = ( + db.query(models.Job) + .filter(models.Job.job_type == "SCAN", models.Job.status == "COMPLETED") + .order_by(models.Job.completed_at.desc()) + .first() + ) + last_scan_time = last_scan.completed_at if last_scan else None + return DashboardStatsSchema( total_files_indexed=total_count, total_data_size=total_size, @@ -185,7 +200,7 @@ def get_dashboard_stats(db: Session = Depends(get_db)): ignored_files_count=ignored_count, ignored_data_size=ignored_size, redundancy_ratio=round(redundancy, 2), - last_scan_time=scanner_manager.last_run_time, + last_scan_time=last_scan_time, media_distribution=media_dist, ) @@ -224,7 +239,15 @@ async def stream_jobs(): db.close() await asyncio.sleep(1) - return StreamingResponse(event_generator(), media_type="text/event-stream") + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", # Critical for Nginx + }, + ) @router.post("/scan") @@ -305,6 +328,50 @@ def browse_path(path: Optional[str] = None, db: Session = Depends(get_db)): return results +@router.get("/search", response_model=List[FileItemSchema]) +def search_system(q: str, include_ignored: bool = False, db: Session = Depends(get_db)): + if not q or len(q) < 3: + return [] + + ignore_filter = " AND fs.is_ignored = 0" if not include_ignored else "" + + # Use FTS5 for instantaneous full-text search + sql = text(f""" + SELECT fs.file_path, fs.size, fs.mtime, fs.id, fs.is_ignored + FROM filesystem_fts + JOIN filesystem_state fs ON fs.id = filesystem_fts.rowid + WHERE filesystem_fts MATCH :query {ignore_filter} + LIMIT 200 + """) + + safe_query = f'"{q}"' + files = db.execute(sql, {"query": safe_query}).fetchall() + + tracking_map = {s.path: s.action for s in db.query(models.TrackedSource).all()} + spec = get_exclusion_spec(db) + + results = [] + for f in files: + path = f[0] + name = path.split("/")[-1] + tracked, _ = get_tracking_status(path, tracking_map, spec) + + results.append( + FileItemSchema( + name=name, + path=path, + type="file", + size=f[1], + mtime=f[2], + tracked=tracked, + ignored=f[4], + ) + ) + + results.sort(key=lambda x: x.name.lower()) + return results + + @router.post("/track/batch") def track_batch(req: BatchTrackRequest, db: Session = Depends(get_db)): for path in req.tracks: @@ -339,6 +406,8 @@ def get_settings(db: Session = Depends(get_db)): @router.post("/settings") def update_setting(req: SettingSchema, db: Session = Depends(get_db)): + from app.services.scheduler import scheduler_manager + setting = ( db.query(models.SystemSetting) .filter(models.SystemSetting.key == req.key) @@ -349,12 +418,135 @@ def update_setting(req: SettingSchema, db: Session = Depends(get_db)): else: db.add(models.SystemSetting(key=req.key, value=req.value)) db.commit() + + # Update scheduler if it's a schedule setting + if req.key == "schedule_scan": + scheduler_manager.add_job( + "system_scan", scheduler_manager.run_system_scan, req.value + ) + elif req.key == "schedule_archival": + scheduler_manager.add_job( + "system_archival", scheduler_manager.run_system_archival, req.value + ) + return {"message": "Updated"} +@router.post("/notifications/test") +def test_notification(req: TestNotificationRequest): + from app.services.notifications import notification_manager + + success = notification_manager.test_notification(req.url) + if success: + return {"message": "Test notification sent successfully"} + else: + raise HTTPException( + status_code=500, + detail="Failed to send test notification. Check your Apprise URL.", + ) + + +@router.get("/database/export") +def export_database(): + db_path = "tapehoard.db" + if not os.path.exists(db_path): + raise HTTPException(status_code=404, detail="Database file not found") + + # We create a temporary copy to ensure we don't return a partially locked file + export_path = "tapehoard_export.db" + try: + # Use sqlite3 backup API for a clean copy of the live DB + src = sqlite3.connect(db_path) + dest = sqlite3.connect(export_path) + with dest: + src.backup(dest) + src.close() + dest.close() + + return FileResponse( + export_path, + filename=f"tapehoard_{datetime.now().strftime('%Y%m%d_%H%M%S')}.db", + background=BackgroundTasks().add_task( + lambda: os.remove(export_path) if os.path.exists(export_path) else None + ), + ) + except Exception as e: + if os.path.exists(export_path): + os.remove(export_path) + raise HTTPException(status_code=500, detail=f"Export failed: {str(e)}") + + +@router.post("/database/import") +async def import_database(file: UploadFile = File(...), db: Session = Depends(get_db)): + # Validate it's a sqlite file + filename = file.filename or "" + if not filename.endswith(".db"): + raise HTTPException( + status_code=400, detail="Invalid file type. Must be a .db file." + ) + + temp_path = "tapehoard_import.db" + try: + with open(temp_path, "wb") as buffer: + shutil.copyfileobj(file.file, buffer) + + # Verify it's a valid SQLite DB + conn = sqlite3.connect(temp_path) + conn.execute("SELECT name FROM sqlite_master WHERE type='table'") + conn.close() + + # Replace the live DB + # To do this safely while running, we use the backup API to overwrite our own live DB + db_path = "tapehoard.db" + src = sqlite3.connect(temp_path) + dest = sqlite3.connect(db_path) + with dest: + src.backup(dest) + src.close() + dest.close() + + return { + "message": "Database restored successfully. Application state has been updated." + } + except Exception as e: + raise HTTPException(status_code=500, detail=f"Import failed: {str(e)}") + finally: + if os.path.exists(temp_path): + os.remove(temp_path) + + @router.get("/tree") def get_tree(path: Optional[str] = None, db: Session = Depends(get_db)): roots = get_source_roots(db) if path is None or path == "ROOT": return [{"name": r, "path": r, "has_children": True} for r in roots] - return [] + + if not os.path.exists(path) or not os.path.isdir(path): + return [] + + results = [] + try: + with os.scandir(path) as it: + for entry in it: + if entry.is_dir(): + # Check if it has any children to determine has_children + has_children = False + try: + with os.scandir(entry.path) as sub_it: + if any(sub_entry.is_dir() for sub_entry in sub_it): + has_children = True + except Exception: + pass + + results.append( + { + "name": entry.name, + "path": entry.path, + "has_children": has_children, + } + ) + except Exception: + return [] + + results.sort(key=lambda x: x["name"].lower()) + return results diff --git a/backend/app/db/database.py b/backend/app/db/database.py index 8acba40..b6927ca 100644 --- a/backend/app/db/database.py +++ b/backend/app/db/database.py @@ -1,13 +1,18 @@ +import os from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker # Dependency mapping for FastAPI # Using standard relative path, but easily overridden with env vars later -SQLALCHEMY_DATABASE_URL = "sqlite:///tapehoard.db" +SQLALCHEMY_DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///tapehoard.db") # connect_args={"check_same_thread": False} is required for SQLite in FastAPI engine = create_engine( - SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False} + SQLALCHEMY_DATABASE_URL, + connect_args={"check_same_thread": False}, + pool_size=20, + max_overflow=10, + pool_timeout=30, ) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) diff --git a/backend/app/db/models.py b/backend/app/db/models.py index 46034d3..37fb98e 100644 --- a/backend/app/db/models.py +++ b/backend/app/db/models.py @@ -76,6 +76,17 @@ class FileVersion(Base): file_number: Mapped[str] = mapped_column(String) # Tape position or object path offset_in_tar: Mapped[Optional[int]] = mapped_column(Integer) + # Split File Support + is_split: Mapped[bool] = mapped_column(Boolean, default=False) + split_id: Mapped[Optional[str]] = mapped_column( + String, nullable=True + ) # UUID grouping parts + offset_start: Mapped[int] = mapped_column(BigInteger, default=0) + offset_end: Mapped[int] = mapped_column(BigInteger, default=0) + created_at: Mapped[datetime] = mapped_column( + DateTime, default=lambda: datetime.now(timezone.utc) + ) + file_state: Mapped["FilesystemState"] = relationship(back_populates="versions") media: Mapped["StorageMedia"] = relationship(back_populates="versions") diff --git a/backend/app/main.py b/backend/app/main.py index ca43483..3b028e3 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -1,16 +1,32 @@ +import os +from contextlib import asynccontextmanager from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware +from fastapi.staticfiles import StaticFiles +from fastapi.responses import FileResponse, JSONResponse from app.api import system, inventory, backups, restores from app.db.database import engine from app.db import models +from app.services.scheduler import scheduler_manager -# Create tables +# Create standard tables models.Base.metadata.create_all(bind=engine) + +@asynccontextmanager +async def lifespan(app: FastAPI): + # Startup + scheduler_manager.start() + yield + # Shutdown + scheduler_manager.stop() + + app = FastAPI( title="TapeHoard API", description="A robust, index-driven Tape Backup Manager", version="0.1.0", + lifespan=lifespan, ) # Configure CORS @@ -28,7 +44,24 @@ app.include_router(inventory.router) app.include_router(backups.router) app.include_router(restores.router) +# Mount frontend static files +# We expect the 'build' directory to exist at the root level of the app +static_path = "static" +if os.path.exists(static_path): + app.mount("/", StaticFiles(directory=static_path, html=True), name="static") -@app.get("/") -def read_root(): - return {"message": "Welcome to TapeHoard API"} + # Add catch-all route for SPA (SvelteKit) + @app.exception_handler(404) + async def spa_catch_all(request, exc): + # If the request is for an API endpoint, return 404 normally + if request.url.path.startswith( + ("/system", "/inventory", "/backups", "/restores") + ): + return JSONResponse(status_code=404, content={"detail": "Not Found"}) + # Otherwise, serve the SPA index + return FileResponse(os.path.join(static_path, "index.html")) + + +@app.get("/health") +def health_check(): + return {"status": "healthy"} diff --git a/backend/app/providers/base.py b/backend/app/providers/base.py index 74e6e51..d3a100d 100644 --- a/backend/app/providers/base.py +++ b/backend/app/providers/base.py @@ -16,6 +16,13 @@ class AbstractStorageProvider(ABC): """ pass + @abstractmethod + def initialize_media(self, media_id: str) -> bool: + """ + Initializes raw media by writing the tapehoard identifier/label. + """ + pass + @abstractmethod def prepare_for_write(self, media_id: str) -> bool: """ diff --git a/backend/app/providers/cloud.py b/backend/app/providers/cloud.py index f1b12c3..8026fd2 100644 --- a/backend/app/providers/cloud.py +++ b/backend/app/providers/cloud.py @@ -28,6 +28,21 @@ class CloudStorageProvider(AbstractStorageProvider): logger.error(f"Failed to identify cloud bucket {self.bucket_name}: {e}") return None + def initialize_media(self, media_id: str) -> bool: + """Initializes cloud media by writing a dummy object to verify access""" + try: + self.s3.head_bucket(Bucket=self.bucket_name) + self.s3.put_object( + Bucket=self.bucket_name, + Key=".tapehoard_id", + Body=media_id.encode("utf-8"), + ) + logger.info(f"Initialized Cloud bucket {media_id}") + return True + except Exception as e: + logger.error(f"Failed to initialize cloud bucket {self.bucket_name}: {e}") + return False + def prepare_for_write(self, media_id: str) -> bool: return self.identify_media() == media_id diff --git a/backend/app/providers/hdd.py b/backend/app/providers/hdd.py index 7713376..a4e6a1b 100644 --- a/backend/app/providers/hdd.py +++ b/backend/app/providers/hdd.py @@ -45,6 +45,21 @@ class OfflineHDDProvider(AbstractStorageProvider): return None + def initialize_media(self, media_id: str) -> bool: + """Initializes HDD by writing the .tapehoard_id file""" + try: + os.makedirs(self.mount_base, exist_ok=True) + id_file = os.path.join(self.mount_base, ".tapehoard_id") + with open(id_file, "w") as f: + f.write(media_id) + archive_dir = os.path.join(self.mount_base, "tapehoard_backups", "archives") + os.makedirs(archive_dir, exist_ok=True) + logger.info(f"Initialized HDD media {media_id} at {self.mount_base}") + return True + except Exception as e: + logger.error(f"HDD Provider: Failed to initialize media: {e}") + return False + def prepare_for_write(self, media_id: str) -> bool: """Verifies the disk is mounted and the identifier matches""" current_id = self.identify_media() diff --git a/backend/app/providers/tape.py b/backend/app/providers/tape.py index e2233e4..5cbe8b7 100644 --- a/backend/app/providers/tape.py +++ b/backend/app/providers/tape.py @@ -5,8 +5,11 @@ from loguru import logger class LTOProvider(AbstractStorageProvider): - def __init__(self, device_path: str = "/dev/nst0"): + def __init__( + self, device_path: str = "/dev/nst0", encryption_key: Optional[str] = None + ): self.device_path = device_path + self.encryption_key = encryption_key def get_name(self) -> str: return "LTO Tape" @@ -18,9 +21,48 @@ class LTOProvider(AbstractStorageProvider): logger.error(f"Tape command 'mt {command}' failed: {e}") raise + def _setup_encryption(self): + """Configures hardware encryption on the drive using stenc""" + if not self.encryption_key: + # Explicitly disable encryption if no key provided + try: + subprocess.run( + ["stenc", "-f", self.device_path, "--off"], capture_output=True + ) + except Exception: + pass + return + + try: + logger.info(f"Setting LTO hardware encryption key for {self.device_path}") + # stenc expects a 32-byte hex key (256-bit) + # We use a pipe to avoid leaving the key in the process list + proc = subprocess.Popen( + ["stenc", "-f", self.device_path, "--import", "-k", "-"], + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + _, stderr = proc.communicate(input=self.encryption_key) + + if proc.returncode != 0: + logger.error(f"Failed to load encryption key: {stderr}") + raise RuntimeError(f"LTO Encryption Setup Failed: {stderr}") + + # Verify encryption is on + subprocess.run(["stenc", "-f", self.device_path, "--on"], check=True) + logger.info("LTO Hardware Encryption ENABLED and LOCKED") + + except Exception as e: + logger.error(f"Hardware encryption error: {e}") + raise + def identify_media(self) -> Optional[str]: """Reads the label from the beginning of the tape (File Mark 0)""" try: + # We must set up encryption BEFORE trying to read the label if it's an encrypted tape + self._setup_encryption() + self._run_mt("rewind") # Try to read the label file result = subprocess.run( @@ -35,6 +77,43 @@ class LTOProvider(AbstractStorageProvider): logger.error(f"Failed to identify tape: {e}") return None + def initialize_media(self, media_id: str) -> bool: + """Writes the identifier to File Mark 0 on the tape""" + try: + self._run_mt("rewind") + self._run_mt("weof") # Ensure we are starting clean + self._run_mt("rewind") + + import tempfile + import tarfile + + with tempfile.NamedTemporaryFile("w") as tmp_lbl: + tmp_lbl.write(media_id) + tmp_lbl.flush() + + with tempfile.NamedTemporaryFile("wb") as tmp_tar: + with tarfile.open(tmp_tar.name, "w") as tar: + tar.add(tmp_lbl.name, arcname=".tapehoard_label") + + # Write to tape + with open(tmp_tar.name, "rb") as f: + proc = subprocess.Popen( + ["dd", f"of={self.device_path}", "bs=256k"], + stdin=subprocess.PIPE, + ) + if proc.stdin: + proc.stdin.write(f.read()) + proc.stdin.close() + proc.wait() + + self._run_mt("weof") + self._run_mt("rewind") + logger.info(f"Initialized LTO tape with label {media_id}") + return True + except Exception as e: + logger.error(f"Failed to initialize tape: {e}") + return False + def prepare_for_write(self, media_id: str) -> bool: """Fast-forwards to the end of the data to prepare for appending""" current_id = self.identify_media() @@ -46,10 +125,42 @@ class LTOProvider(AbstractStorageProvider): self._run_mt("eod") return True + def _get_current_file_number(self) -> str: + """Parses 'mt status' to get the current tape file position""" + try: + result = subprocess.run( + ["mt", "-f", self.device_path, "status"], + capture_output=True, + text=True, + check=True, + ) + # mt status output varies by OS/Driver, but usually contains 'File number=X' + # We look for a line like 'File number=2, block number=0' + import re + + match = re.search(r"File number=(\d+)", result.stdout) + if match: + return match.group(1) + + # Alternative format + match = re.search(r"file number (\d+)", result.stdout) + if match: + return match.group(1) + + logger.warning( + f"Could not parse file number from mt status: {result.stdout}" + ) + except Exception as e: + logger.error(f"Failed to get tape status: {e}") + return "0" + def write_archive(self, media_id: str, stream: BinaryIO) -> str: """Writes the stream to tape and returns the file number index""" logger.info(f"Streaming archive to LTO {media_id} at current head position") + # Get position BEFORE writing + file_num = self._get_current_file_number() + proc = subprocess.Popen( ["dd", f"of={self.device_path}", "bs=256k"], stdin=subprocess.PIPE ) @@ -66,7 +177,10 @@ class LTOProvider(AbstractStorageProvider): proc.wait() - return "unknown" # To be refined with 'mt status' parsing + # After writing, we should be at the NEXT file mark. + # But tar/dd usually leaves us at the end of the written data. + # We'll return the position we started at as the 'location_id' + return file_num def finalize_media(self, media_id: str): self._run_mt("offline") # Rewind and eject diff --git a/backend/app/services/archiver.py b/backend/app/services/archiver.py index 338a4e7..62be483 100644 --- a/backend/app/services/archiver.py +++ b/backend/app/services/archiver.py @@ -1,11 +1,12 @@ import os import tarfile import json +import uuid from datetime import datetime, timezone from typing import List, Optional, Dict, Any from loguru import logger from sqlalchemy.orm import Session -from sqlalchemy import not_ +from sqlalchemy import not_, func from app.db import models from app.services.scanner import JobManager from app.providers.hdd import OfflineHDDProvider @@ -13,6 +14,39 @@ from app.providers.tape import LTOProvider from app.providers.cloud import CloudStorageProvider +class RangeFile: + """A file-like object that only reads a specific range of a file.""" + + def __init__(self, file_path: str, offset_start: int, length: int): + self.file_path = file_path + self.offset_start = offset_start + self.length = length + self.remaining = length + self.file = open(file_path, "rb") + self.file.seek(offset_start) + + def read(self, size: int = -1) -> bytes: + if self.remaining <= 0: + return b"" + + to_read = self.remaining + if size > 0: + to_read = min(size, self.remaining) + + data = self.file.read(to_read) + self.remaining -= len(data) + return data + + def close(self): + self.file.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + class ArchiverService: def __init__(self, staging_dir: str = "/staging"): self.staging_dir = staging_dir @@ -33,7 +67,10 @@ class ArchiverService: pass if media.media_type == "tape": - return LTOProvider(device_path=config.get("device_path", "/dev/nst0")) + return LTOProvider( + device_path=config.get("device_path", "/dev/nst0"), + encryption_key=config.get("encryption_key"), + ) elif media.media_type == "hdd": return OfflineHDDProvider( mount_base=config.get("mount_path", "/mnt/backup") @@ -42,22 +79,40 @@ class ArchiverService: return CloudStorageProvider(config=config) return None - def get_eligible_files(self, db: Session) -> List[models.FilesystemState]: - """Returns files that are indexed but have no version on any media""" + def get_eligible_files(self, db: Session): + """Returns files that are indexed but have no version, or are partially backed up""" + # Optimized query to find files that are not fully covered by their versions + # A file is eligible if sum(offset_end - offset_start) < size + + subquery = ( + db.query( + models.FileVersion.filesystem_state_id, + func.sum( + models.FileVersion.offset_end - models.FileVersion.offset_start + ).label("covered_size"), + ) + .group_by(models.FileVersion.filesystem_state_id) + .subquery() + ) + return ( db.query(models.FilesystemState) + .outerjoin( + subquery, models.FilesystemState.id == subquery.c.filesystem_state_id + ) .filter( models.FilesystemState.is_indexed, not_(models.FilesystemState.is_ignored), - not_(models.FilesystemState.versions.any()), + (subquery.c.covered_size.is_(None)) + | (subquery.c.covered_size < models.FilesystemState.size), ) - .all() + .yield_per(1000) ) def create_backup_set( self, db: Session, media_id: int, max_bytes: Optional[int] = None - ) -> List[models.FilesystemState]: - """Selects a batch of files that fit on the media's remaining capacity""" + ) -> List[Dict[str, Any]]: + """Selects a batch of files/chunks that fit on the media's remaining capacity""" media = db.query(models.StorageMedia).get(media_id) if not media: return [] @@ -68,14 +123,72 @@ class ArchiverService: eligible = self.get_eligible_files(db) - # Simple Greedy Bin-Packing backup_set = [] current_size = 0 + # We need at least some space to make it worthwhile + MIN_CHUNK_SIZE = 100 * 1024 * 1024 # 100MB + for f in eligible: - if current_size + f.size <= remaining_capacity: - backup_set.append(f) - current_size += f.size + if current_size >= remaining_capacity: + break + + # Calculate how much of this file is already backed up + # For simplicity, we assume we always backup from the end of the last chunk + covered_size = ( + db.query( + func.sum( + models.FileVersion.offset_end - models.FileVersion.offset_start + ) + ) + .filter(models.FileVersion.filesystem_state_id == f.id) + .scalar() + or 0 + ) + + remaining_file_size = f.size - covered_size + + # Allow 0-byte files if they have no versions yet + if remaining_file_size <= 0 and f.size > 0: + continue + if f.size == 0: + # Check if it already has a version to avoid infinite loop + has_version = ( + db.query(models.FileVersion) + .filter(models.FileVersion.filesystem_state_id == f.id) + .first() + is not None + ) + if has_version: + continue + + space_left = remaining_capacity - current_size + + if remaining_file_size <= space_left: + # Entire remaining file fits + backup_set.append( + { + "file_state": f, + "offset_start": covered_size, + "offset_end": f.size, + "is_split": covered_size + > 0, # It's a split if we already had parts + } + ) + current_size += remaining_file_size + elif space_left >= MIN_CHUNK_SIZE: + # Only part of it fits + backup_set.append( + { + "file_state": f, + "offset_start": covered_size, + "offset_end": covered_size + space_left, + "is_split": True, + } + ) + current_size += space_left + # Once we split a file to fill the media, we are done with this set + break return backup_set @@ -96,11 +209,15 @@ class ArchiverService: logger.info("No eligible files for backup") return - total_bytes = sum(f.size for f in backup_set) + total_bytes = sum( + item["offset_end"] - item["offset_start"] for item in backup_set + ) + divisor = max(total_bytes, 1) + JobManager.update_job( job_id, 10.0, - f"Backing up {len(backup_set)} files ({total_bytes / 1e9:.2f} GB)...", + f"Backing up {len(backup_set)} items ({total_bytes / 1e9:.2f} GB)...", ) provider = self._get_provider(media) @@ -122,7 +239,6 @@ class ArchiverService: return # 2. Create Archive in Staging - # For now, we package everything into one tar for this job archive_name = ( f"backup_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}.tar" ) @@ -130,60 +246,310 @@ class ArchiverService: try: processed_bytes = 0 - with tarfile.open(staging_path, "w") as tar: - for f_state in backup_set: - if JobManager.is_cancelled(job_id): - break + # Identify files that can be deduplicated (hash already exists on media) + deduped_items = [] + remaining_backup_set = [] - JobManager.update_job( - job_id, - 15.0 + (70.0 * (processed_bytes / total_bytes)), - f"Archiving: {os.path.basename(f_state.file_path)}", + for item in backup_set: + f_state = item["file_state"] + # Look for an existing version with the same hash + if f_state.sha256_hash: + existing_v = ( + db.query(models.FileVersion) + .join(models.FilesystemState) + .filter( + models.FilesystemState.sha256_hash == f_state.sha256_hash, + models.FileVersion.offset_start == item["offset_start"], + models.FileVersion.offset_end == item["offset_end"], + ) + .first() ) - if os.path.exists(f_state.file_path): - tar.add( - f_state.file_path, arcname=f_state.file_path.lstrip("/") + if existing_v: + logger.info( + f"Deduplicating {f_state.file_path} -> existing version on {existing_v.media_id}" ) + deduped_items.append( + { + "file_state": f_state, + "existing_v": existing_v, + "item": item, + } + ) + continue - processed_bytes += f_state.size + remaining_backup_set.append(item) + + if remaining_backup_set: + with tarfile.open(staging_path, "w") as tar: + for item in remaining_backup_set: + if JobManager.is_cancelled(job_id): + break + + f_state = item["file_state"] + start = item["offset_start"] + end = item["offset_end"] + chunk_size = end - start + + JobManager.update_job( + job_id, + 15.0 + (70.0 * (processed_bytes / divisor)), + f"Archiving: {os.path.basename(f_state.file_path)} (Part {start}-{end})", + ) + if os.path.exists(f_state.file_path): + # Use RangeFile to stream only the requested part + arcname = f_state.file_path.lstrip("/") + if item["is_split"]: + # Append part info to arcname if it's split + arcname = f"{arcname}.part_{start}_{end}" + + tarinfo = tar.gettarinfo(f_state.file_path, arcname=arcname) + tarinfo.size = chunk_size # Override size + + with RangeFile(f_state.file_path, start, chunk_size) as rf: + tar.addfile(tarinfo, rf) + + processed_bytes += chunk_size if JobManager.is_cancelled(job_id): if os.path.exists(staging_path): os.remove(staging_path) return - # 3. Stream to Provider - JobManager.update_job( - job_id, 85.0, f"Streaming archive to {media.media_type}..." - ) - with open(staging_path, "rb") as archive_stream: - location_id = provider.write_archive(media.identifier, archive_stream) + # 3. Stream to Provider (if there's anything to stream) + location_id = "DEDUPLICATED" + if remaining_backup_set: + JobManager.update_job( + job_id, 85.0, f"Streaming archive to {media.media_type}..." + ) + with open(staging_path, "rb") as archive_stream: + location_id = provider.write_archive( + media.identifier, archive_stream + ) + media.bytes_used += os.path.getsize(staging_path) # 4. Finalize & Record provider.finalize_media(media.identifier) - # Update database records - for f_state in backup_set: + # Update database records for written files + split_id = str(uuid.uuid4()) + for item in remaining_backup_set: + f_state = item["file_state"] version = models.FileVersion( filesystem_state_id=f_state.id, media_id=media.id, file_number=location_id, + is_split=item["is_split"], + split_id=split_id if item["is_split"] else None, + offset_start=item["offset_start"], + offset_end=item["offset_end"], + ) + db.add(version) + + # Update database records for deduped files + for dedup in deduped_items: + f_state = dedup["file_state"] + existing_v = dedup["existing_v"] + item = dedup["item"] + + version = models.FileVersion( + filesystem_state_id=f_state.id, + media_id=existing_v.media_id, + file_number=existing_v.file_number, + is_split=existing_v.is_split, + split_id=existing_v.split_id, + offset_start=item["offset_start"], + offset_end=item["offset_end"], ) db.add(version) - media.bytes_used += os.path.getsize(staging_path) db.commit() JobManager.complete_job(job_id) logger.info(f"Backup job {job_id} completed successfully") + from app.services.notifications import notification_manager + + notification_manager.notify( + "Archival Completed", + f"Archival job to {media.identifier} finished. {len(backup_set)} items written.", + "success", + ) except Exception as e: logger.exception(f"Backup failed: {e}") JobManager.fail_job(job_id, str(e)) + from app.services.notifications import notification_manager + + notification_manager.notify( + "Archival Failed", + f"Archival job to {media.identifier} failed: {str(e)}", + "failure", + ) finally: if os.path.exists(staging_path): os.remove(staging_path) + def run_restore(self, db: Session, destination: str, job_id: int): + JobManager.start_job(job_id) + JobManager.update_job(job_id, 2.0, "Preparing restore manifest...") + + cart_items = db.query(models.RestoreCart).all() + if not cart_items: + JobManager.complete_job(job_id) + logger.info("No items in restore cart") + return + + total_bytes = sum(item.file_state.size for item in cart_items) + divisor = max(total_bytes, 1) + + JobManager.update_job( + job_id, 5.0, f"Restoring {len(cart_items)} files to {destination}..." + ) + + # Ensure destination exists + os.makedirs(destination, exist_ok=True) + + # Group by media -> location_id -> [FileVersion] + # We need FileVersion objects to know the offsets + media_tasks = {} # media_id -> {location_id: [FileVersion]} + + for item in cart_items: + if not item.file_state.versions: + continue + + # Find the most recent "full" version or set of parts + # For now, we'll just pick all parts of the FIRST version group we find + # Actually, let's just get ALL versions and filter the logic + # Simpler: Get the latest versions for this file + versions = ( + db.query(models.FileVersion) + .filter(models.FileVersion.filesystem_state_id == item.file_state.id) + .order_by(models.FileVersion.created_at.desc()) + .all() + ) + + if not versions: + continue + + # If the latest one is split, we might need multiple. + # For now, let's just restore WHATEVER we have versions for. + for v in versions: + if v.media_id not in media_tasks: + media_tasks[v.media_id] = {} + if v.file_number not in media_tasks[v.media_id]: + media_tasks[v.media_id][v.file_number] = [] + media_tasks[v.media_id][v.file_number].append(v) + + processed_bytes = 0 + + try: + for media_id, locations in media_tasks.items(): + if JobManager.is_cancelled(job_id): + break + + media = db.query(models.StorageMedia).get(media_id) + if not media: + continue + + provider = self._get_provider(media) + if not provider: + continue + + # Check media + JobManager.update_job( + job_id, + 10.0 + (80.0 * (processed_bytes / divisor)), + f"Waiting for {media.identifier}...", + ) + current_id = provider.identify_media() + if current_id != media.identifier: + raise Exception( + f"Media mismatch! Insert {media.identifier} (Found: {current_id})" + ) + + # Sort location IDs for sequential access + for loc_id in sorted(locations.keys()): + v_list = locations[loc_id] + if JobManager.is_cancelled(job_id): + break + + JobManager.update_job( + job_id, + 10.0 + (80.0 * (processed_bytes / divisor)), + f"Extracting from {media.identifier} (Archive {loc_id})...", + ) + + archive_stream = provider.read_archive(media.identifier, loc_id) + + # Extract using tarfile + with tarfile.open(fileobj=archive_stream, mode="r|*") as tar: + # Build a map of what's in this tar that we want + # We use part names for split files + wanted_map = {} # arcname -> FileVersion + for v in v_list: + arcname = v.file_state.file_path.lstrip("/") + if v.is_split: + arcname = ( + f"{arcname}.part_{v.offset_start}_{v.offset_end}" + ) + wanted_map[arcname] = v + + for member in tar: + if JobManager.is_cancelled(job_id): + break + + if member.name in wanted_map: + v = wanted_map[member.name] + final_path = os.path.join( + destination, v.file_state.file_path.lstrip("/") + ) + + # Ensure dir exists + os.makedirs(os.path.dirname(final_path), exist_ok=True) + + if v.is_split: + # Atomic reassembly: Write to specific offset + # Use 'r+b' to allow seeking if file exists, 'wb' if not + mode = "r+b" if os.path.exists(final_path) else "wb" + with open(final_path, mode) as f: + if mode == "wb": + # Pre-allocate if possible or just seek + f.truncate(v.file_state.size) + f.seek(v.offset_start) + # Extract the member bytes + f_in = tar.extractfile(member) + if f_in: + f.write(f_in.read()) + else: + # Standard extraction + tar.extract(member, path=destination) + + processed_bytes += v.offset_end - v.offset_start + + if not JobManager.is_cancelled(job_id): + JobManager.complete_job(job_id) + # Clear cart + db.query(models.RestoreCart).delete() + db.commit() + logger.info(f"Restore job {job_id} completed successfully") + from app.services.notifications import notification_manager + + notification_manager.notify( + "Recovery Completed", + f"Data recovery to {destination} finished successfully.", + "success", + ) + + except Exception as e: + logger.exception(f"Restore failed: {e}") + JobManager.fail_job(job_id, str(e)) + from app.services.notifications import notification_manager + + notification_manager.notify( + "Recovery Failed", + f"Data recovery to {destination} failed: {str(e)}", + "failure", + ) + archiver_manager = ArchiverService() diff --git a/backend/app/services/notifications.py b/backend/app/services/notifications.py new file mode 100644 index 0000000..35cab01 --- /dev/null +++ b/backend/app/services/notifications.py @@ -0,0 +1,64 @@ +import apprise +from loguru import logger +from sqlalchemy.orm import Session +from app.db.database import SessionLocal +from app.db import models +import json + + +class NotificationService: + def __init__(self): + self.apobj = apprise.Apprise() + + def _load_urls(self, db: Session): + """Loads notification URLs from settings""" + self.apobj.clear() + setting = ( + db.query(models.SystemSetting) + .filter(models.SystemSetting.key == "notification_urls") + .first() + ) + if setting and setting.value: + try: + urls = json.loads(setting.value) + for url in urls: + if url.strip(): + self.apobj.add(url.strip()) + except Exception as e: + logger.error(f"Failed to parse notification URLs: {e}") + + def notify(self, title: str, body: str, notify_type: str = "info"): + """Sends a notification to all configured services""" + db = SessionLocal() + try: + self._load_urls(db) + if len(self.apobj) == 0: + logger.debug("No notification services configured, skipping.") + return + + self.apobj.notify( + title=f"[TapeHoard] {title}", body=body, notify_type=notify_type + ) + logger.info(f"Sent notification: {title}") + except Exception as e: + logger.error(f"Failed to send notification: {e}") + finally: + db.close() + + def test_notification(self, url: str) -> bool: + """Tests a single Apprise URL""" + try: + ap = apprise.Apprise() + ap.add(url) + result = ap.notify( + title="[TapeHoard] Test Notification", + body="This is a test notification from your TapeHoard instance. If you see this, your configuration is correct!", + notify_type="info", + ) + return bool(result) + except Exception as e: + logger.error(f"Test notification failed for {url}: {e}") + return False + + +notification_manager = NotificationService() diff --git a/backend/app/services/scanner.py b/backend/app/services/scanner.py index 95f5056..6bb1c32 100644 --- a/backend/app/services/scanner.py +++ b/backend/app/services/scanner.py @@ -323,18 +323,24 @@ class ScannerService: self.files_processed += 1 if self.files_processed % 50 == 0: db.commit() - if job_id is not None and self.total_files_found > 0: - prog = 10.0 + ( - 90.0 - * ( - self.files_processed - / self.total_files_found + if job_id is not None: + if self.total_files_found > 0: + prog = 10.0 + ( + 90.0 + * ( + self.files_processed + / self.total_files_found + ) ) - ) + status_text = f"Indexing: {self.files_processed}/{self.total_files_found} items" + else: + prog = ( + 10.0 # Keep it at a steady "working" phase + ) + status_text = f"Scanning: {self.files_processed} items discovered..." + JobManager.update_job( - job_id, - round(prog, 1), - f"Hashing & Indexing: {self.files_processed}/{self.total_files_found}...", + job_id, round(prog, 1), status_text ) if job_id is not None and JobManager.is_cancelled(job_id): @@ -344,12 +350,24 @@ class ScannerService: self.last_run_time = datetime.now(timezone.utc) if job_id is not None: JobManager.complete_job(job_id) + from app.services.notifications import notification_manager + + notification_manager.notify( + "Scan Completed", + f"System scan finished. {self.files_processed} files processed, {self.files_hashed} new hashes computed.", + "success", + ) except Exception as e: logger.exception(f"Scan failed: {e}") db.rollback() if job_id is not None: JobManager.fail_job(job_id, str(e)) + from app.services.notifications import notification_manager + + notification_manager.notify( + "Scan Failed", f"System scan failed: {str(e)}", "failure" + ) finally: self.is_running = False self.current_path = "" diff --git a/backend/app/services/scheduler.py b/backend/app/services/scheduler.py index e69de29..beaed60 100644 --- a/backend/app/services/scheduler.py +++ b/backend/app/services/scheduler.py @@ -0,0 +1,119 @@ +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.cron import CronTrigger +from loguru import logger +from sqlalchemy.orm import Session +from app.db.database import SessionLocal +from app.db import models +from app.services.scanner import scanner_manager, JobManager +from app.services.archiver import archiver_manager + + +class SchedulerService: + def __init__(self): + self.scheduler = BackgroundScheduler() + self.jobs = {} + + def start(self): + if not self.scheduler.running: + self.scheduler.start() + logger.info("Scheduler service started") + self.load_schedules() + + def stop(self): + if self.scheduler.running: + self.scheduler.shutdown() + logger.info("Scheduler service stopped") + + def load_schedules(self): + """Loads and schedules jobs from database settings""" + db = SessionLocal() + try: + # 1. Scan Schedule + scan_cron = self._get_setting(db, "schedule_scan") + if scan_cron: + self.add_job("system_scan", self.run_system_scan, scan_cron) + + # 2. Archival Schedule + # Note: This would typically pick the first active media or a designated 'auto' media + archival_cron = self._get_setting(db, "schedule_archival") + if archival_cron: + self.add_job("system_archival", self.run_system_archival, archival_cron) + finally: + db.close() + + def _get_setting(self, db: Session, key: str) -> str: + setting = ( + db.query(models.SystemSetting) + .filter(models.SystemSetting.key == key) + .first() + ) + return setting.value if setting else "" + + def add_job(self, job_id, func, cron_expression): + """Adds or updates a job with a cron expression""" + try: + # Remove existing if it exists + if self.scheduler.get_job(job_id): + self.scheduler.remove_job(job_id) + + if cron_expression.strip(): + self.scheduler.add_job( + func, + CronTrigger.from_crontab(cron_expression), + id=job_id, + replace_existing=True, + ) + logger.info(f"Scheduled job {job_id} with cron: {cron_expression}") + except Exception as e: + logger.error(f"Failed to schedule job {job_id}: {e}") + + def run_system_scan(self): + logger.info("Starting scheduled system scan...") + db = SessionLocal() + try: + if not scanner_manager.is_running: + job = JobManager.create_job(db, "SCAN") + scanner_manager.scan_sources(db, job_id=job.id) + except Exception as e: + logger.error(f"Scheduled scan failed: {e}") + finally: + db.close() + + def run_system_archival(self): + logger.info("Starting scheduled archival job...") + db = SessionLocal() + try: + # Look for a designated primary target + primary_id = self._get_setting(db, "primary_archival_target") + + media = None + if primary_id: + media = ( + db.query(models.StorageMedia) + .filter( + models.StorageMedia.id == int(primary_id), + models.StorageMedia.status == "active", + ) + .first() + ) + + if not media: + # Fallback: pick first available 'active' media if no primary set + media = ( + db.query(models.StorageMedia) + .filter(models.StorageMedia.status == "active") + .first() + ) + + if media: + job = JobManager.create_job(db, "BACKUP") + archiver_manager.run_backup(db, media.id, job_id=job.id) + else: + logger.warning("No suitable media found for scheduled archival") + except Exception as e: + logger.error(f"Scheduled archival failed: {e}") + finally: + db.close() + + +scheduler_manager = SchedulerService() diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 78692f3..14a74a8 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "pydantic-settings>=2.14.0", "pathspec>=1.1.0", "boto3>=1.42.94", + "python-multipart>=0.0.26", ] [build-system] diff --git a/backend/uv.lock b/backend/uv.lock index 55016b4..7f95fe2 100644 --- a/backend/uv.lock +++ b/backend/uv.lock @@ -855,6 +855,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0b/d7/1959b9648791274998a9c3526f6d0ec8fd2233e4d4acce81bbae76b44b2a/python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a", size = 22101, upload-time = "2026-03-01T16:00:25.09Z" }, ] +[[package]] +name = "python-multipart" +version = "0.0.26" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/88/71/b145a380824a960ebd60e1014256dbb7d2253f2316ff2d73dfd8928ec2c3/python_multipart-0.0.26.tar.gz", hash = "sha256:08fadc45918cd615e26846437f50c5d6d23304da32c341f289a617127b081f17", size = 43501, upload-time = "2026-04-10T14:09:59.473Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/22/f1925cdda983ab66fc8ec6ec8014b959262747e58bdca26a4e3d1da29d56/python_multipart-0.0.26-py3-none-any.whl", hash = "sha256:c0b169f8c4484c13b0dcf2ef0ec3a4adb255c4b7d18d8e420477d2b1dd03f185", size = 28847, upload-time = "2026-04-10T14:09:58.131Z" }, +] + [[package]] name = "pyyaml" version = "6.0.3" @@ -1080,6 +1089,7 @@ dependencies = [ { name = "pathspec" }, { name = "prometheus-client" }, { name = "pydantic-settings" }, + { name = "python-multipart" }, { name = "sqlalchemy" }, { name = "uvicorn", extra = ["standard"] }, ] @@ -1103,6 +1113,7 @@ requires-dist = [ { name = "pathspec", specifier = ">=1.1.0" }, { name = "prometheus-client" }, { name = "pydantic-settings", specifier = ">=2.14.0" }, + { name = "python-multipart", specifier = ">=0.0.26" }, { name = "sqlalchemy" }, { name = "uvicorn", extras = ["standard"] }, ] diff --git a/docker/Dockerfile b/docker/Dockerfile index e69de29..9855438 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -0,0 +1,55 @@ +# Stage 1: Build Frontend +FROM node:20-slim AS frontend-builder +WORKDIR /app/frontend +COPY frontend/package*.json ./ +RUN npm install +COPY frontend/ ./ +RUN npm run build + +# Stage 2: Backend & Runtime +FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim AS runtime +WORKDIR /app/backend + +# Install system dependencies and build stenc from source +RUN apt-get update && apt-get install -y --no-install-recommends \ + mt-st \ + tar \ + sqlite3 \ + build-essential \ + autoconf \ + automake \ + libtool \ + pkg-config \ + pandoc \ + git \ + liblzo2-dev \ + zlib1g-dev \ + && git clone https://github.com/scsitape/stenc.git /tmp/stenc \ + && cd /tmp/stenc \ + && ./autogen.sh \ + && ./configure \ + && make && make install \ + && apt-get purge -y build-essential autoconf automake libtool pkg-config pandoc git \ + && apt-get autoremove -y \ + && rm -rf /var/lib/apt/lists/* /tmp/stenc + +# Copy backend files +COPY README.md /app/ +COPY backend/pyproject.toml backend/uv.lock ./ +RUN uv sync --frozen --no-dev + +COPY backend/ ./ + +# Copy built frontend assets +COPY --from=frontend-builder /app/frontend/build /app/backend/static + +# Setup volumes and permissions +RUN mkdir -p /app/data /staging /source_data /restores +ENV DATABASE_URL=sqlite:////app/data/tapehoard.db + +# Entrypoint +COPY docker/entrypoint.sh /app/entrypoint.sh +RUN chmod +x /app/entrypoint.sh + +EXPOSE 8000 +ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index e69de29..86caa03 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -0,0 +1,23 @@ +services: + tapehoard: + build: + context: .. + dockerfile: docker/Dockerfile + container_name: tapehoard + ports: + - "8000:8000" + volumes: + # - ./data:/app/data + - ./staging:/staging + - /Users/alamers/git/:/source_data:ro + - /Users/alamers/restore/:/restores + - /Users/alamers/backup/:/mnt/HDD-001 + # - /mnt/storage:/source_data:ro + # - /mnt/restores:/restores + # devices: + # - /dev/nst0:/dev/nst0 + environment: + - PUID=1000 + - PGID=1000 + - TZ=UTC + restart: unless-stopped diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index e69de29..80bc1cf 100644 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +echo "Starting TapeHoard..." + +# Change to backend directory +cd /app/backend + +# Run database migrations +echo "Running database migrations..." +uv run alembic upgrade head + +# Start the application +echo "Starting application server..." +exec uv run uvicorn app.main:app --host 0.0.0.0 --port 8000 diff --git a/frontend/src/lib/api/index.ts b/frontend/src/lib/api/index.ts index db7c74e..0b416bb 100644 --- a/frontend/src/lib/api/index.ts +++ b/frontend/src/lib/api/index.ts @@ -1,4 +1,4 @@ // This file is auto-generated by @hey-api/openapi-ts -export { addDirectoryToCartRestoresCartDirectoryPost, addToCartRestoresCartFileIdPost, browseIndexInventoryBrowseGet, browsePathSystemBrowseGet, cancelJobSystemJobsJobIdCancelPost, clearCartRestoresCartClearPost, deleteMediaInventoryMediaMediaIdDelete, getDashboardStatsSystemDashboardStatsGet, getIndexTreeInventoryTreeGet, getItemMetadataInventoryMetadataGet, getManifestRestoresManifestGet, getScanStatusSystemScanStatusGet, getSettingsSystemSettingsGet, getTreeSystemTreeGet, listBackupsBackupsGet, listCartRestoresCartGet, listInventoryInventoryGet, listJobsSystemJobsGet, listMediaInventoryMediaGet, type Options, readRootGet, registerMediaInventoryMediaPost, removeFromCartRestoresCartItemIdDelete, streamJobsSystemJobsStreamGet, trackBatchSystemTrackBatchPost, triggerBackupBackupsTriggerMediaIdPost, triggerScanSystemScanPost, updateMediaInventoryMediaMediaIdPatch, updateSettingSystemSettingsPost } from './sdk.gen'; -export type { AddDirectoryToCartRestoresCartDirectoryPostData, AddDirectoryToCartRestoresCartDirectoryPostError, AddDirectoryToCartRestoresCartDirectoryPostErrors, AddDirectoryToCartRestoresCartDirectoryPostResponses, AddToCartRestoresCartFileIdPostData, AddToCartRestoresCartFileIdPostError, AddToCartRestoresCartFileIdPostErrors, AddToCartRestoresCartFileIdPostResponses, AppApiInventoryFileItemSchema, AppApiSystemFileItemSchema, BatchTrackRequest, BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetError, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponse, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetError, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponse, BrowsePathSystemBrowseGetResponses, CancelJobSystemJobsJobIdCancelPostData, CancelJobSystemJobsJobIdCancelPostError, CancelJobSystemJobsJobIdCancelPostErrors, CancelJobSystemJobsJobIdCancelPostResponses, CartItemSchema, ClearCartRestoresCartClearPostData, ClearCartRestoresCartClearPostResponses, ClientOptions, DashboardStatsSchema, DeleteMediaInventoryMediaMediaIdDeleteData, DeleteMediaInventoryMediaMediaIdDeleteError, DeleteMediaInventoryMediaMediaIdDeleteErrors, DeleteMediaInventoryMediaMediaIdDeleteResponses, DirectoryCartRequest, FileVersionSchema, GetDashboardStatsSystemDashboardStatsGetData, GetDashboardStatsSystemDashboardStatsGetResponse, GetDashboardStatsSystemDashboardStatsGetResponses, GetIndexTreeInventoryTreeGetData, GetIndexTreeInventoryTreeGetError, GetIndexTreeInventoryTreeGetErrors, GetIndexTreeInventoryTreeGetResponse, GetIndexTreeInventoryTreeGetResponses, GetItemMetadataInventoryMetadataGetData, GetItemMetadataInventoryMetadataGetError, GetItemMetadataInventoryMetadataGetErrors, GetItemMetadataInventoryMetadataGetResponse, GetItemMetadataInventoryMetadataGetResponses, GetManifestRestoresManifestGetData, GetManifestRestoresManifestGetResponse, GetManifestRestoresManifestGetResponses, GetScanStatusSystemScanStatusGetData, GetScanStatusSystemScanStatusGetResponse, GetScanStatusSystemScanStatusGetResponses, GetSettingsSystemSettingsGetData, GetSettingsSystemSettingsGetResponse, GetSettingsSystemSettingsGetResponses, GetTreeSystemTreeGetData, GetTreeSystemTreeGetError, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponses, HttpValidationError, ItemMetadataSchema, JobSchema, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ListCartRestoresCartGetData, ListCartRestoresCartGetResponse, ListCartRestoresCartGetResponses, ListInventoryInventoryGetData, ListInventoryInventoryGetResponses, ListJobsSystemJobsGetData, ListJobsSystemJobsGetError, ListJobsSystemJobsGetErrors, ListJobsSystemJobsGetResponse, ListJobsSystemJobsGetResponses, ListMediaInventoryMediaGetData, ListMediaInventoryMediaGetResponse, ListMediaInventoryMediaGetResponses, ManifestMediaRequirement, MediaCreateSchema, MediaSchema, MediaUpdateSchema, ReadRootGetData, ReadRootGetResponses, RegisterMediaInventoryMediaPostData, RegisterMediaInventoryMediaPostError, RegisterMediaInventoryMediaPostErrors, RegisterMediaInventoryMediaPostResponse, RegisterMediaInventoryMediaPostResponses, RemoveFromCartRestoresCartItemIdDeleteData, RemoveFromCartRestoresCartItemIdDeleteError, RemoveFromCartRestoresCartItemIdDeleteErrors, RemoveFromCartRestoresCartItemIdDeleteResponses, RestoreManifestSchema, ScanStatusSchema, SettingSchema, StreamJobsSystemJobsStreamGetData, StreamJobsSystemJobsStreamGetResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostError, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TreeNodeSchema, TriggerBackupBackupsTriggerMediaIdPostData, TriggerBackupBackupsTriggerMediaIdPostError, TriggerBackupBackupsTriggerMediaIdPostErrors, TriggerBackupBackupsTriggerMediaIdPostResponses, TriggerScanSystemScanPostData, TriggerScanSystemScanPostResponses, UpdateMediaInventoryMediaMediaIdPatchData, UpdateMediaInventoryMediaMediaIdPatchError, UpdateMediaInventoryMediaMediaIdPatchErrors, UpdateMediaInventoryMediaMediaIdPatchResponse, UpdateMediaInventoryMediaMediaIdPatchResponses, UpdateSettingSystemSettingsPostData, UpdateSettingSystemSettingsPostError, UpdateSettingSystemSettingsPostErrors, UpdateSettingSystemSettingsPostResponses, ValidationError } from './types.gen'; +export { addDirectoryToCartRestoresCartDirectoryPost, addToCartRestoresCartFileIdPost, browseCartRestoresCartBrowseGet, browseIndexInventoryBrowseGet, browsePathSystemBrowseGet, cancelJobSystemJobsJobIdCancelPost, clearCartRestoresCartClearPost, deleteMediaInventoryMediaMediaIdDelete, exportDatabaseSystemDatabaseExportGet, getCartTreeRestoresCartTreeGet, getDashboardStatsSystemDashboardStatsGet, getIndexTreeInventoryTreeGet, getItemMetadataInventoryMetadataGet, getManifestRestoresManifestGet, getScanStatusSystemScanStatusGet, getSettingsSystemSettingsGet, getTreeSystemTreeGet, healthCheckHealthGet, importDatabaseSystemDatabaseImportPost, initializeMediaInventoryMediaMediaIdInitializePost, listBackupsBackupsGet, listCartRestoresCartGet, listInventoryInventoryGet, listJobsSystemJobsGet, listMediaInventoryMediaGet, type Options, registerMediaInventoryMediaPost, removeFromCartRestoresCartItemIdDelete, searchIndexInventorySearchGet, searchSystemSystemSearchGet, streamJobsSystemJobsStreamGet, testNotificationSystemNotificationsTestPost, trackBatchSystemTrackBatchPost, triggerBackupBackupsTriggerMediaIdPost, triggerRestoreRestoresTriggerPost, triggerScanSystemScanPost, updateMediaInventoryMediaMediaIdPatch, updateSettingSystemSettingsPost } from './sdk.gen'; +export type { AddDirectoryToCartRestoresCartDirectoryPostData, AddDirectoryToCartRestoresCartDirectoryPostError, AddDirectoryToCartRestoresCartDirectoryPostErrors, AddDirectoryToCartRestoresCartDirectoryPostResponses, AddToCartRestoresCartFileIdPostData, AddToCartRestoresCartFileIdPostError, AddToCartRestoresCartFileIdPostErrors, AddToCartRestoresCartFileIdPostResponses, AppApiInventoryFileItemSchema, AppApiSystemFileItemSchema, BatchTrackRequest, BodyImportDatabaseSystemDatabaseImportPost, BrowseCartRestoresCartBrowseGetData, BrowseCartRestoresCartBrowseGetError, BrowseCartRestoresCartBrowseGetErrors, BrowseCartRestoresCartBrowseGetResponse, BrowseCartRestoresCartBrowseGetResponses, BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetError, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponse, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetError, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponse, BrowsePathSystemBrowseGetResponses, CancelJobSystemJobsJobIdCancelPostData, CancelJobSystemJobsJobIdCancelPostError, CancelJobSystemJobsJobIdCancelPostErrors, CancelJobSystemJobsJobIdCancelPostResponses, CartFileItemSchema, CartItemSchema, CartTreeNodeSchema, ClearCartRestoresCartClearPostData, ClearCartRestoresCartClearPostResponses, ClientOptions, DashboardStatsSchema, DeleteMediaInventoryMediaMediaIdDeleteData, DeleteMediaInventoryMediaMediaIdDeleteError, DeleteMediaInventoryMediaMediaIdDeleteErrors, DeleteMediaInventoryMediaMediaIdDeleteResponses, DirectoryCartRequest, ExportDatabaseSystemDatabaseExportGetData, ExportDatabaseSystemDatabaseExportGetResponses, FileVersionSchema, GetCartTreeRestoresCartTreeGetData, GetCartTreeRestoresCartTreeGetError, GetCartTreeRestoresCartTreeGetErrors, GetCartTreeRestoresCartTreeGetResponse, GetCartTreeRestoresCartTreeGetResponses, GetDashboardStatsSystemDashboardStatsGetData, GetDashboardStatsSystemDashboardStatsGetResponse, GetDashboardStatsSystemDashboardStatsGetResponses, GetIndexTreeInventoryTreeGetData, GetIndexTreeInventoryTreeGetError, GetIndexTreeInventoryTreeGetErrors, GetIndexTreeInventoryTreeGetResponse, GetIndexTreeInventoryTreeGetResponses, GetItemMetadataInventoryMetadataGetData, GetItemMetadataInventoryMetadataGetError, GetItemMetadataInventoryMetadataGetErrors, GetItemMetadataInventoryMetadataGetResponse, GetItemMetadataInventoryMetadataGetResponses, GetManifestRestoresManifestGetData, GetManifestRestoresManifestGetResponse, GetManifestRestoresManifestGetResponses, GetScanStatusSystemScanStatusGetData, GetScanStatusSystemScanStatusGetResponse, GetScanStatusSystemScanStatusGetResponses, GetSettingsSystemSettingsGetData, GetSettingsSystemSettingsGetResponse, GetSettingsSystemSettingsGetResponses, GetTreeSystemTreeGetData, GetTreeSystemTreeGetError, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponses, HealthCheckHealthGetData, HealthCheckHealthGetResponses, HttpValidationError, ImportDatabaseSystemDatabaseImportPostData, ImportDatabaseSystemDatabaseImportPostError, ImportDatabaseSystemDatabaseImportPostErrors, ImportDatabaseSystemDatabaseImportPostResponses, InitializeMediaInventoryMediaMediaIdInitializePostData, InitializeMediaInventoryMediaMediaIdInitializePostError, InitializeMediaInventoryMediaMediaIdInitializePostErrors, InitializeMediaInventoryMediaMediaIdInitializePostResponses, ItemMetadataSchema, JobSchema, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ListCartRestoresCartGetData, ListCartRestoresCartGetResponse, ListCartRestoresCartGetResponses, ListInventoryInventoryGetData, ListInventoryInventoryGetResponses, ListJobsSystemJobsGetData, ListJobsSystemJobsGetError, ListJobsSystemJobsGetErrors, ListJobsSystemJobsGetResponse, ListJobsSystemJobsGetResponses, ListMediaInventoryMediaGetData, ListMediaInventoryMediaGetResponse, ListMediaInventoryMediaGetResponses, ManifestMediaRequirement, MediaCreateSchema, MediaSchema, MediaUpdateSchema, RegisterMediaInventoryMediaPostData, RegisterMediaInventoryMediaPostError, RegisterMediaInventoryMediaPostErrors, RegisterMediaInventoryMediaPostResponse, RegisterMediaInventoryMediaPostResponses, RemoveFromCartRestoresCartItemIdDeleteData, RemoveFromCartRestoresCartItemIdDeleteError, RemoveFromCartRestoresCartItemIdDeleteErrors, RemoveFromCartRestoresCartItemIdDeleteResponses, RestoreManifestSchema, RestoreRequest, ScanStatusSchema, SearchIndexInventorySearchGetData, SearchIndexInventorySearchGetError, SearchIndexInventorySearchGetErrors, SearchIndexInventorySearchGetResponse, SearchIndexInventorySearchGetResponses, SearchSystemSystemSearchGetData, SearchSystemSystemSearchGetError, SearchSystemSystemSearchGetErrors, SearchSystemSystemSearchGetResponse, SearchSystemSystemSearchGetResponses, SettingSchema, StreamJobsSystemJobsStreamGetData, StreamJobsSystemJobsStreamGetResponses, TestNotificationRequest, TestNotificationSystemNotificationsTestPostData, TestNotificationSystemNotificationsTestPostError, TestNotificationSystemNotificationsTestPostErrors, TestNotificationSystemNotificationsTestPostResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostError, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TreeNodeSchema, TriggerBackupBackupsTriggerMediaIdPostData, TriggerBackupBackupsTriggerMediaIdPostError, TriggerBackupBackupsTriggerMediaIdPostErrors, TriggerBackupBackupsTriggerMediaIdPostResponses, TriggerRestoreRestoresTriggerPostData, TriggerRestoreRestoresTriggerPostError, TriggerRestoreRestoresTriggerPostErrors, TriggerRestoreRestoresTriggerPostResponses, TriggerScanSystemScanPostData, TriggerScanSystemScanPostResponses, UpdateMediaInventoryMediaMediaIdPatchData, UpdateMediaInventoryMediaMediaIdPatchError, UpdateMediaInventoryMediaMediaIdPatchErrors, UpdateMediaInventoryMediaMediaIdPatchResponse, UpdateMediaInventoryMediaMediaIdPatchResponses, UpdateSettingSystemSettingsPostData, UpdateSettingSystemSettingsPostError, UpdateSettingSystemSettingsPostErrors, UpdateSettingSystemSettingsPostResponses, ValidationError } from './types.gen'; diff --git a/frontend/src/lib/api/sdk.gen.ts b/frontend/src/lib/api/sdk.gen.ts index 6ccc879..f9f07f8 100644 --- a/frontend/src/lib/api/sdk.gen.ts +++ b/frontend/src/lib/api/sdk.gen.ts @@ -1,8 +1,8 @@ // This file is auto-generated by @hey-api/openapi-ts -import type { Client, Options as Options2, TDataShape } from './client'; +import { type Client, formDataBodySerializer, type Options as Options2, type TDataShape } from './client'; import { client } from './client.gen'; -import type { AddDirectoryToCartRestoresCartDirectoryPostData, AddDirectoryToCartRestoresCartDirectoryPostErrors, AddDirectoryToCartRestoresCartDirectoryPostResponses, AddToCartRestoresCartFileIdPostData, AddToCartRestoresCartFileIdPostErrors, AddToCartRestoresCartFileIdPostResponses, BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponses, CancelJobSystemJobsJobIdCancelPostData, CancelJobSystemJobsJobIdCancelPostErrors, CancelJobSystemJobsJobIdCancelPostResponses, ClearCartRestoresCartClearPostData, ClearCartRestoresCartClearPostResponses, DeleteMediaInventoryMediaMediaIdDeleteData, DeleteMediaInventoryMediaMediaIdDeleteErrors, DeleteMediaInventoryMediaMediaIdDeleteResponses, GetDashboardStatsSystemDashboardStatsGetData, GetDashboardStatsSystemDashboardStatsGetResponses, GetIndexTreeInventoryTreeGetData, GetIndexTreeInventoryTreeGetErrors, GetIndexTreeInventoryTreeGetResponses, GetItemMetadataInventoryMetadataGetData, GetItemMetadataInventoryMetadataGetErrors, GetItemMetadataInventoryMetadataGetResponses, GetManifestRestoresManifestGetData, GetManifestRestoresManifestGetResponses, GetScanStatusSystemScanStatusGetData, GetScanStatusSystemScanStatusGetResponses, GetSettingsSystemSettingsGetData, GetSettingsSystemSettingsGetResponses, GetTreeSystemTreeGetData, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponses, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ListCartRestoresCartGetData, ListCartRestoresCartGetResponses, ListInventoryInventoryGetData, ListInventoryInventoryGetResponses, ListJobsSystemJobsGetData, ListJobsSystemJobsGetErrors, ListJobsSystemJobsGetResponses, ListMediaInventoryMediaGetData, ListMediaInventoryMediaGetResponses, ReadRootGetData, ReadRootGetResponses, RegisterMediaInventoryMediaPostData, RegisterMediaInventoryMediaPostErrors, RegisterMediaInventoryMediaPostResponses, RemoveFromCartRestoresCartItemIdDeleteData, RemoveFromCartRestoresCartItemIdDeleteErrors, RemoveFromCartRestoresCartItemIdDeleteResponses, StreamJobsSystemJobsStreamGetData, StreamJobsSystemJobsStreamGetResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TriggerBackupBackupsTriggerMediaIdPostData, TriggerBackupBackupsTriggerMediaIdPostErrors, TriggerBackupBackupsTriggerMediaIdPostResponses, TriggerScanSystemScanPostData, TriggerScanSystemScanPostResponses, UpdateMediaInventoryMediaMediaIdPatchData, UpdateMediaInventoryMediaMediaIdPatchErrors, UpdateMediaInventoryMediaMediaIdPatchResponses, UpdateSettingSystemSettingsPostData, UpdateSettingSystemSettingsPostErrors, UpdateSettingSystemSettingsPostResponses } from './types.gen'; +import type { AddDirectoryToCartRestoresCartDirectoryPostData, AddDirectoryToCartRestoresCartDirectoryPostErrors, AddDirectoryToCartRestoresCartDirectoryPostResponses, AddToCartRestoresCartFileIdPostData, AddToCartRestoresCartFileIdPostErrors, AddToCartRestoresCartFileIdPostResponses, BrowseCartRestoresCartBrowseGetData, BrowseCartRestoresCartBrowseGetErrors, BrowseCartRestoresCartBrowseGetResponses, BrowseIndexInventoryBrowseGetData, BrowseIndexInventoryBrowseGetErrors, BrowseIndexInventoryBrowseGetResponses, BrowsePathSystemBrowseGetData, BrowsePathSystemBrowseGetErrors, BrowsePathSystemBrowseGetResponses, CancelJobSystemJobsJobIdCancelPostData, CancelJobSystemJobsJobIdCancelPostErrors, CancelJobSystemJobsJobIdCancelPostResponses, ClearCartRestoresCartClearPostData, ClearCartRestoresCartClearPostResponses, DeleteMediaInventoryMediaMediaIdDeleteData, DeleteMediaInventoryMediaMediaIdDeleteErrors, DeleteMediaInventoryMediaMediaIdDeleteResponses, ExportDatabaseSystemDatabaseExportGetData, ExportDatabaseSystemDatabaseExportGetResponses, GetCartTreeRestoresCartTreeGetData, GetCartTreeRestoresCartTreeGetErrors, GetCartTreeRestoresCartTreeGetResponses, GetDashboardStatsSystemDashboardStatsGetData, GetDashboardStatsSystemDashboardStatsGetResponses, GetIndexTreeInventoryTreeGetData, GetIndexTreeInventoryTreeGetErrors, GetIndexTreeInventoryTreeGetResponses, GetItemMetadataInventoryMetadataGetData, GetItemMetadataInventoryMetadataGetErrors, GetItemMetadataInventoryMetadataGetResponses, GetManifestRestoresManifestGetData, GetManifestRestoresManifestGetResponses, GetScanStatusSystemScanStatusGetData, GetScanStatusSystemScanStatusGetResponses, GetSettingsSystemSettingsGetData, GetSettingsSystemSettingsGetResponses, GetTreeSystemTreeGetData, GetTreeSystemTreeGetErrors, GetTreeSystemTreeGetResponses, HealthCheckHealthGetData, HealthCheckHealthGetResponses, ImportDatabaseSystemDatabaseImportPostData, ImportDatabaseSystemDatabaseImportPostErrors, ImportDatabaseSystemDatabaseImportPostResponses, InitializeMediaInventoryMediaMediaIdInitializePostData, InitializeMediaInventoryMediaMediaIdInitializePostErrors, InitializeMediaInventoryMediaMediaIdInitializePostResponses, ListBackupsBackupsGetData, ListBackupsBackupsGetResponses, ListCartRestoresCartGetData, ListCartRestoresCartGetResponses, ListInventoryInventoryGetData, ListInventoryInventoryGetResponses, ListJobsSystemJobsGetData, ListJobsSystemJobsGetErrors, ListJobsSystemJobsGetResponses, ListMediaInventoryMediaGetData, ListMediaInventoryMediaGetResponses, RegisterMediaInventoryMediaPostData, RegisterMediaInventoryMediaPostErrors, RegisterMediaInventoryMediaPostResponses, RemoveFromCartRestoresCartItemIdDeleteData, RemoveFromCartRestoresCartItemIdDeleteErrors, RemoveFromCartRestoresCartItemIdDeleteResponses, SearchIndexInventorySearchGetData, SearchIndexInventorySearchGetErrors, SearchIndexInventorySearchGetResponses, SearchSystemSystemSearchGetData, SearchSystemSystemSearchGetErrors, SearchSystemSystemSearchGetResponses, StreamJobsSystemJobsStreamGetData, StreamJobsSystemJobsStreamGetResponses, TestNotificationSystemNotificationsTestPostData, TestNotificationSystemNotificationsTestPostErrors, TestNotificationSystemNotificationsTestPostResponses, TrackBatchSystemTrackBatchPostData, TrackBatchSystemTrackBatchPostErrors, TrackBatchSystemTrackBatchPostResponses, TriggerBackupBackupsTriggerMediaIdPostData, TriggerBackupBackupsTriggerMediaIdPostErrors, TriggerBackupBackupsTriggerMediaIdPostResponses, TriggerRestoreRestoresTriggerPostData, TriggerRestoreRestoresTriggerPostErrors, TriggerRestoreRestoresTriggerPostResponses, TriggerScanSystemScanPostData, TriggerScanSystemScanPostResponses, UpdateMediaInventoryMediaMediaIdPatchData, UpdateMediaInventoryMediaMediaIdPatchErrors, UpdateMediaInventoryMediaMediaIdPatchResponses, UpdateSettingSystemSettingsPostData, UpdateSettingSystemSettingsPostErrors, UpdateSettingSystemSettingsPostResponses } from './types.gen'; export type Options = Options2 & { /** @@ -53,6 +53,11 @@ export const getScanStatusSystemScanStatusGet = (options?: Options) => (options?.client ?? client).get({ url: '/system/browse', ...options }); +/** + * Search System + */ +export const searchSystemSystemSearchGet = (options: Options) => (options.client ?? client).get({ url: '/system/search', ...options }); + /** * Track Batch */ @@ -82,6 +87,36 @@ export const updateSettingSystemSettingsPost = (options: Options) => (options.client ?? client).post({ + url: '/system/notifications/test', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } +}); + +/** + * Export Database + */ +export const exportDatabaseSystemDatabaseExportGet = (options?: Options) => (options?.client ?? client).get({ url: '/system/database/export', ...options }); + +/** + * Import Database + */ +export const importDatabaseSystemDatabaseImportPost = (options: Options) => (options.client ?? client).post({ + ...formDataBodySerializer, + url: '/system/database/import', + ...options, + headers: { + 'Content-Type': null, + ...options.headers + } +}); + /** * Get Tree */ @@ -121,11 +156,21 @@ export const updateMediaInventoryMediaMediaIdPatch = (options: Options) => (options.client ?? client).post({ url: '/inventory/media/{media_id}/initialize', ...options }); + /** * Browse Index */ export const browseIndexInventoryBrowseGet = (options?: Options) => (options?.client ?? client).get({ url: '/inventory/browse', ...options }); +/** + * Search Index + */ +export const searchIndexInventorySearchGet = (options: Options) => (options.client ?? client).get({ url: '/inventory/search', ...options }); + /** * Get Index Tree */ @@ -151,15 +196,37 @@ export const triggerBackupBackupsTriggerMediaIdPost = (options?: Options) => (options?.client ?? client).get({ url: '/backups/', ...options }); +/** + * Trigger Restore + */ +export const triggerRestoreRestoresTriggerPost = (options: Options) => (options.client ?? client).post({ + url: '/restores/trigger', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } +}); + +/** + * Browse Cart + */ +export const browseCartRestoresCartBrowseGet = (options?: Options) => (options?.client ?? client).get({ url: '/restores/cart/browse', ...options }); + +/** + * Get Cart Tree + */ +export const getCartTreeRestoresCartTreeGet = (options?: Options) => (options?.client ?? client).get({ url: '/restores/cart/tree', ...options }); + /** * List Cart */ export const listCartRestoresCartGet = (options?: Options) => (options?.client ?? client).get({ url: '/restores/cart', ...options }); /** - * Add To Cart + * Clear Cart */ -export const addToCartRestoresCartFileIdPost = (options: Options) => (options.client ?? client).post({ url: '/restores/cart/{file_id}', ...options }); +export const clearCartRestoresCartClearPost = (options?: Options) => (options?.client ?? client).post({ url: '/restores/cart/clear', ...options }); /** * Add Directory To Cart @@ -173,22 +240,22 @@ export const addDirectoryToCartRestoresCartDirectoryPost = (options: Options) => (options.client ?? client).post({ url: '/restores/cart/{file_id}', ...options }); + /** * Remove From Cart */ export const removeFromCartRestoresCartItemIdDelete = (options: Options) => (options.client ?? client).delete({ url: '/restores/cart/{item_id}', ...options }); -/** - * Clear Cart - */ -export const clearCartRestoresCartClearPost = (options?: Options) => (options?.client ?? client).post({ url: '/restores/cart/clear', ...options }); - /** * Get Manifest */ export const getManifestRestoresManifestGet = (options?: Options) => (options?.client ?? client).get({ url: '/restores/manifest', ...options }); /** - * Read Root + * Health Check */ -export const readRootGet = (options?: Options) => (options?.client ?? client).get({ url: '/', ...options }); +export const healthCheckHealthGet = (options?: Options) => (options?.client ?? client).get({ url: '/health', ...options }); diff --git a/frontend/src/lib/api/types.gen.ts b/frontend/src/lib/api/types.gen.ts index 831bc44..5fb91f7 100644 --- a/frontend/src/lib/api/types.gen.ts +++ b/frontend/src/lib/api/types.gen.ts @@ -18,6 +18,42 @@ export type BatchTrackRequest = { untracks?: Array; }; +/** + * Body_import_database_system_database_import_post + */ +export type BodyImportDatabaseSystemDatabaseImportPost = { + /** + * File + */ + file: Blob | File; +}; + +/** + * CartFileItemSchema + */ +export type CartFileItemSchema = { + /** + * Name + */ + name: string; + /** + * Path + */ + path: string; + /** + * Type + */ + type: string; + /** + * Size + */ + size?: number | null; + /** + * Media + */ + media?: Array; +}; + /** * CartItemSchema */ @@ -40,6 +76,24 @@ export type CartItemSchema = { media_identifiers: Array; }; +/** + * CartTreeNodeSchema + */ +export type CartTreeNodeSchema = { + /** + * Name + */ + name: string; + /** + * Path + */ + path: string; + /** + * Has Children + */ + has_children?: boolean; +}; + /** * DashboardStatsSchema */ @@ -170,6 +224,18 @@ export type ItemMetadataSchema = { * Child Count */ child_count?: number | null; + /** + * Vulnerable + */ + vulnerable?: boolean; + /** + * Selected + */ + selected?: boolean; + /** + * Indeterminate + */ + indeterminate?: boolean; }; /** @@ -350,6 +416,16 @@ export type RestoreManifestSchema = { media_required: Array; }; +/** + * RestoreRequest + */ +export type RestoreRequest = { + /** + * Destination + */ + destination: string; +}; + /** * ScanStatusSchema */ @@ -394,6 +470,16 @@ export type SettingSchema = { value: string; }; +/** + * TestNotificationRequest + */ +export type TestNotificationRequest = { + /** + * Url + */ + url: string; +}; + /** * TreeNodeSchema */ @@ -468,6 +554,18 @@ export type AppApiInventoryFileItemSchema = { * Media */ media?: Array; + /** + * Vulnerable + */ + vulnerable?: boolean; + /** + * Selected + */ + selected?: boolean; + /** + * Indeterminate + */ + indeterminate?: boolean; }; /** @@ -656,6 +754,42 @@ export type BrowsePathSystemBrowseGetResponses = { export type BrowsePathSystemBrowseGetResponse = BrowsePathSystemBrowseGetResponses[keyof BrowsePathSystemBrowseGetResponses]; +export type SearchSystemSystemSearchGetData = { + body?: never; + path?: never; + query: { + /** + * Q + */ + q: string; + /** + * Include Ignored + */ + include_ignored?: boolean; + }; + url: '/system/search'; +}; + +export type SearchSystemSystemSearchGetErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type SearchSystemSystemSearchGetError = SearchSystemSystemSearchGetErrors[keyof SearchSystemSystemSearchGetErrors]; + +export type SearchSystemSystemSearchGetResponses = { + /** + * Response Search System System Search Get + * + * Successful Response + */ + 200: Array; +}; + +export type SearchSystemSystemSearchGetResponse = SearchSystemSystemSearchGetResponses[keyof SearchSystemSystemSearchGetResponses]; + export type TrackBatchSystemTrackBatchPostData = { body: BatchTrackRequest; path?: never; @@ -722,6 +856,66 @@ export type UpdateSettingSystemSettingsPostResponses = { 200: unknown; }; +export type TestNotificationSystemNotificationsTestPostData = { + body: TestNotificationRequest; + path?: never; + query?: never; + url: '/system/notifications/test'; +}; + +export type TestNotificationSystemNotificationsTestPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type TestNotificationSystemNotificationsTestPostError = TestNotificationSystemNotificationsTestPostErrors[keyof TestNotificationSystemNotificationsTestPostErrors]; + +export type TestNotificationSystemNotificationsTestPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type ExportDatabaseSystemDatabaseExportGetData = { + body?: never; + path?: never; + query?: never; + url: '/system/database/export'; +}; + +export type ExportDatabaseSystemDatabaseExportGetResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type ImportDatabaseSystemDatabaseImportPostData = { + body: BodyImportDatabaseSystemDatabaseImportPost; + path?: never; + query?: never; + url: '/system/database/import'; +}; + +export type ImportDatabaseSystemDatabaseImportPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type ImportDatabaseSystemDatabaseImportPostError = ImportDatabaseSystemDatabaseImportPostErrors[keyof ImportDatabaseSystemDatabaseImportPostErrors]; + +export type ImportDatabaseSystemDatabaseImportPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + export type GetTreeSystemTreeGetData = { body?: never; path?: never; @@ -851,6 +1045,34 @@ export type UpdateMediaInventoryMediaMediaIdPatchResponses = { export type UpdateMediaInventoryMediaMediaIdPatchResponse = UpdateMediaInventoryMediaMediaIdPatchResponses[keyof UpdateMediaInventoryMediaMediaIdPatchResponses]; +export type InitializeMediaInventoryMediaMediaIdInitializePostData = { + body?: never; + path: { + /** + * Media Id + */ + media_id: number; + }; + query?: never; + url: '/inventory/media/{media_id}/initialize'; +}; + +export type InitializeMediaInventoryMediaMediaIdInitializePostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type InitializeMediaInventoryMediaMediaIdInitializePostError = InitializeMediaInventoryMediaMediaIdInitializePostErrors[keyof InitializeMediaInventoryMediaMediaIdInitializePostErrors]; + +export type InitializeMediaInventoryMediaMediaIdInitializePostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + export type BrowseIndexInventoryBrowseGetData = { body?: never; path?: never; @@ -887,6 +1109,42 @@ export type BrowseIndexInventoryBrowseGetResponses = { export type BrowseIndexInventoryBrowseGetResponse = BrowseIndexInventoryBrowseGetResponses[keyof BrowseIndexInventoryBrowseGetResponses]; +export type SearchIndexInventorySearchGetData = { + body?: never; + path?: never; + query: { + /** + * Q + */ + q: string; + /** + * Include Ignored + */ + include_ignored?: boolean; + }; + url: '/inventory/search'; +}; + +export type SearchIndexInventorySearchGetErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type SearchIndexInventorySearchGetError = SearchIndexInventorySearchGetErrors[keyof SearchIndexInventorySearchGetErrors]; + +export type SearchIndexInventorySearchGetResponses = { + /** + * Response Search Index Inventory Search Get + * + * Successful Response + */ + 200: Array; +}; + +export type SearchIndexInventorySearchGetResponse = SearchIndexInventorySearchGetResponses[keyof SearchIndexInventorySearchGetResponses]; + export type GetIndexTreeInventoryTreeGetData = { body?: never; path?: never; @@ -1009,6 +1267,93 @@ export type ListBackupsBackupsGetResponses = { 200: unknown; }; +export type TriggerRestoreRestoresTriggerPostData = { + body: RestoreRequest; + path?: never; + query?: never; + url: '/restores/trigger'; +}; + +export type TriggerRestoreRestoresTriggerPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type TriggerRestoreRestoresTriggerPostError = TriggerRestoreRestoresTriggerPostErrors[keyof TriggerRestoreRestoresTriggerPostErrors]; + +export type TriggerRestoreRestoresTriggerPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + +export type BrowseCartRestoresCartBrowseGetData = { + body?: never; + path?: never; + query?: { + /** + * Path + */ + path?: string | null; + }; + url: '/restores/cart/browse'; +}; + +export type BrowseCartRestoresCartBrowseGetErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type BrowseCartRestoresCartBrowseGetError = BrowseCartRestoresCartBrowseGetErrors[keyof BrowseCartRestoresCartBrowseGetErrors]; + +export type BrowseCartRestoresCartBrowseGetResponses = { + /** + * Response Browse Cart Restores Cart Browse Get + * + * Successful Response + */ + 200: Array; +}; + +export type BrowseCartRestoresCartBrowseGetResponse = BrowseCartRestoresCartBrowseGetResponses[keyof BrowseCartRestoresCartBrowseGetResponses]; + +export type GetCartTreeRestoresCartTreeGetData = { + body?: never; + path?: never; + query?: { + /** + * Path + */ + path?: string | null; + }; + url: '/restores/cart/tree'; +}; + +export type GetCartTreeRestoresCartTreeGetErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type GetCartTreeRestoresCartTreeGetError = GetCartTreeRestoresCartTreeGetErrors[keyof GetCartTreeRestoresCartTreeGetErrors]; + +export type GetCartTreeRestoresCartTreeGetResponses = { + /** + * Response Get Cart Tree Restores Cart Tree Get + * + * Successful Response + */ + 200: Array; +}; + +export type GetCartTreeRestoresCartTreeGetResponse = GetCartTreeRestoresCartTreeGetResponses[keyof GetCartTreeRestoresCartTreeGetResponses]; + export type ListCartRestoresCartGetData = { body?: never; path?: never; @@ -1027,28 +1372,14 @@ export type ListCartRestoresCartGetResponses = { export type ListCartRestoresCartGetResponse = ListCartRestoresCartGetResponses[keyof ListCartRestoresCartGetResponses]; -export type AddToCartRestoresCartFileIdPostData = { +export type ClearCartRestoresCartClearPostData = { body?: never; - path: { - /** - * File Id - */ - file_id: number; - }; + path?: never; query?: never; - url: '/restores/cart/{file_id}'; + url: '/restores/cart/clear'; }; -export type AddToCartRestoresCartFileIdPostErrors = { - /** - * Validation Error - */ - 422: HttpValidationError; -}; - -export type AddToCartRestoresCartFileIdPostError = AddToCartRestoresCartFileIdPostErrors[keyof AddToCartRestoresCartFileIdPostErrors]; - -export type AddToCartRestoresCartFileIdPostResponses = { +export type ClearCartRestoresCartClearPostResponses = { /** * Successful Response */ @@ -1078,6 +1409,34 @@ export type AddDirectoryToCartRestoresCartDirectoryPostResponses = { 200: unknown; }; +export type AddToCartRestoresCartFileIdPostData = { + body?: never; + path: { + /** + * File Id + */ + file_id: number; + }; + query?: never; + url: '/restores/cart/{file_id}'; +}; + +export type AddToCartRestoresCartFileIdPostErrors = { + /** + * Validation Error + */ + 422: HttpValidationError; +}; + +export type AddToCartRestoresCartFileIdPostError = AddToCartRestoresCartFileIdPostErrors[keyof AddToCartRestoresCartFileIdPostErrors]; + +export type AddToCartRestoresCartFileIdPostResponses = { + /** + * Successful Response + */ + 200: unknown; +}; + export type RemoveFromCartRestoresCartItemIdDeleteData = { body?: never; path: { @@ -1106,20 +1465,6 @@ export type RemoveFromCartRestoresCartItemIdDeleteResponses = { 200: unknown; }; -export type ClearCartRestoresCartClearPostData = { - body?: never; - path?: never; - query?: never; - url: '/restores/cart/clear'; -}; - -export type ClearCartRestoresCartClearPostResponses = { - /** - * Successful Response - */ - 200: unknown; -}; - export type GetManifestRestoresManifestGetData = { body?: never; path?: never; @@ -1136,14 +1481,14 @@ export type GetManifestRestoresManifestGetResponses = { export type GetManifestRestoresManifestGetResponse = GetManifestRestoresManifestGetResponses[keyof GetManifestRestoresManifestGetResponses]; -export type ReadRootGetData = { +export type HealthCheckHealthGetData = { body?: never; path?: never; query?: never; - url: '/'; + url: '/health'; }; -export type ReadRootGetResponses = { +export type HealthCheckHealthGetResponses = { /** * Successful Response */ diff --git a/frontend/src/lib/components/ScanStatusOverlay.svelte b/frontend/src/lib/components/ScanStatusOverlay.svelte index e92405a..158f867 100644 --- a/frontend/src/lib/components/ScanStatusOverlay.svelte +++ b/frontend/src/lib/components/ScanStatusOverlay.svelte @@ -51,7 +51,7 @@
System Scanner Active - {scanProgress}% + INDEXING
@@ -61,18 +61,14 @@
-
-
-
-
- Throughput + Progress - {scanStatus.files_processed.toLocaleString()} / {scanStatus.total_files_found.toLocaleString()} ITEMS + {scanStatus.files_processed.toLocaleString()} ITEMS SCANNED
diff --git a/frontend/src/lib/components/file-browser/FileBrowser.svelte b/frontend/src/lib/components/file-browser/FileBrowser.svelte index e59d208..dce2922 100644 --- a/frontend/src/lib/components/file-browser/FileBrowser.svelte +++ b/frontend/src/lib/components/file-browser/FileBrowser.svelte @@ -21,21 +21,24 @@ let { currentPath = $bindable("ROOT"), + searchQuery = $bindable(""), files = [], onNavigate = (path: string) => {}, onToggleTrack = (item: FileItem) => {}, onSelect = (item: FileItem) => {}, - mode = "host" + mode = "host", + isSearching = false } = $props<{ - currentPath: string; - files: FileItem[]; + currentPath?: string; + searchQuery?: string; + files?: FileItem[]; onNavigate?: (path: string) => void; onToggleTrack?: (item: FileItem) => void; onSelect?: (item: FileItem) => void; - mode?: "host" | "index"; + mode?: "host" | "index" | "cart"; + isSearching?: boolean; }>(); - let searchQuery = $state(""); let selectedPaths = $state>(new Set()); let lastSelectedPath = $state(null); let sortColumn = $state<"name" | "size" | "mtime" | "type">("name"); @@ -102,19 +105,37 @@ hasChildren: true }); - const activeRoot = $derived(mode === "host" ? sourceDataRoot : virtualIndexRoot); + const recoveryQueueRoot = $derived({ + name: "Recovery Queue", + path: "ROOT", + expanded: true, + children: [], + hasChildren: true + }); + + const activeRoot = $derived( + mode === "host" ? sourceDataRoot : + mode === "index" ? virtualIndexRoot : + recoveryQueueRoot + ); // --- Logic --- const breadcrumbs = $derived.by(() => { if (currentPath === "ROOT") { - return [{ name: mode === "host" ? "All Sources" : "Index Browser", path: "ROOT" }]; + let name = "All Sources"; + if (mode === "index") name = "Index Browser"; + if (mode === "cart") name = "Recovery Queue"; + return [{ name, path: "ROOT" }]; } const parts = currentPath.split("/").filter(Boolean); const crumbs: Breadcrumb[] = []; - crumbs.push({ name: mode === "host" ? "All Sources" : "Index Browser", path: "ROOT" }); + let rootName = "All Sources"; + if (mode === "index") rootName = "Index Browser"; + if (mode === "cart") rootName = "Recovery Queue"; + crumbs.push({ name: rootName, path: "ROOT" }); let current = ""; for (const part of parts) { @@ -125,7 +146,10 @@ }); const filteredFiles = $derived.by(() => { - let result = files.filter((f: FileItem) => f.name.toLowerCase().includes(searchQuery.toLowerCase())); + // When doing backend search, the parent feeds us already-filtered results. + // We'll still do a light local filter to ensure things like name matching, + // but we should match on the full path just in case. + let result = files.filter((f: FileItem) => f.path.toLowerCase().includes((searchQuery || "").toLowerCase())); result.sort((a: FileItem, b: FileItem) => { const valA = sortColumn === "type" ? a.type : a[sortColumn as keyof FileItem] || 0; @@ -194,14 +218,55 @@ } } - function bulkToggle(track: boolean) { - const selectedItems = files.filter((f: FileItem) => selectedPaths.has(f.path) && f.tracked !== track); - selectedItems.forEach((item: FileItem) => onToggleTrack(item)); + let isEditingPath = $state(false); + let pathInputValue = $state(""); + + function handleAddressClick() { + pathInputValue = currentPath; + isEditingPath = true; + } + + function handlePathSubmit() { + onNavigate(pathInputValue); + isEditingPath = false; + } + + function handleKeyDown(e: KeyboardEvent) { + if (isEditingPath) { + if (e.key === "Enter") handlePathSubmit(); + if (e.key === "Escape") isEditingPath = false; + return; + } + + if (e.key === "Enter" && selectedPaths.size === 1) { + const item = files.find((f: FileItem) => f.path === Array.from(selectedPaths)[0]); + if (item && item.type === "directory") { + handleRowDoubleClick(item); + } + } + if (e.key === "Backspace") { + if (currentPath === "ROOT") return; + const parts = currentPath.split("/").filter(Boolean); + if (parts.length === 1) { + onNavigate("ROOT"); + } else { + onNavigate("/" + parts.slice(0, -1).join("/")); + } + } + if ((e.ctrlKey || e.metaKey) && e.key === "f") { + e.preventDefault(); + const searchInput = document.getElementById("browser-search") as HTMLInputElement; + searchInput?.focus(); + } }
@@ -233,25 +298,42 @@
-
+
-
- {#each breadcrumbs as crumb, i} - {#if i > 0} - - {/if} - - {/each} -
- + {/each} +
+ {/if} + +
@@ -260,11 +342,16 @@
- + {#if isSearching} + + {:else} + + {/if} {#if mode === 'host'} {files.filter((f: FileItem) => f.tracked).length} Tracked - {:else} + {:else if mode === 'index'} {files.filter((f: FileItem) => f.selected).length} Selected + {:else} + {files.length} Queued {/if}
diff --git a/frontend/src/lib/components/file-browser/FileBrowserRowItem.svelte b/frontend/src/lib/components/file-browser/FileBrowserRowItem.svelte index 96e9537..af90d18 100644 --- a/frontend/src/lib/components/file-browser/FileBrowserRowItem.svelte +++ b/frontend/src/lib/components/file-browser/FileBrowserRowItem.svelte @@ -110,7 +110,8 @@ isSelected ? "bg-blue-500/15 border-l-2 border-l-blue-500" : "hover:bg-white/5 border-l-2 border-l-transparent", - item.ignored && "opacity-40 grayscale-[0.5]" + item.ignored && "opacity-40 grayscale-[0.5]", + (mode === 'index' && item.type === 'file' && item.vulnerable) && "opacity-60 cursor-not-allowed" )} role="button" tabindex="0" @@ -123,7 +124,9 @@ class="flex h-10 w-12 shrink-0 items-center justify-center border-r border-border-color/10" onclick={(e) => { e.stopPropagation(); - if (!item.ignored) onToggleTrack(); + if (item.ignored) return; + if (mode === 'index' && item.type === 'file' && item.vulnerable) return; + onToggleTrack(); }} onkeydown={(e) => e.key === " " && e.stopPropagation()} role="none" @@ -143,7 +146,12 @@
{/if} {:else} - + {/if}
@@ -181,15 +189,32 @@ > {item.name} - {#if mode === "index" && item.media && item.media.length > 0} -
- {#each item.media as m} - - - {m} - - {/each} -
+ {#if mode === "index"} + {#if item.media && item.media.length > 0} +
+ {#each item.media as m} + + + {m} + + {/each} +
+ {:else if item.vulnerable} + + + Vulnerable + + {:else if item.type === 'directory'} + + + Protected + + {:else if item.type === 'file'} + + + Vulnerable + + {/if} {/if}
diff --git a/frontend/src/lib/components/file-browser/FileBrowserTreeItem.svelte b/frontend/src/lib/components/file-browser/FileBrowserTreeItem.svelte index d91927b..7645b45 100644 --- a/frontend/src/lib/components/file-browser/FileBrowserTreeItem.svelte +++ b/frontend/src/lib/components/file-browser/FileBrowserTreeItem.svelte @@ -1,4 +1,5 @@ @@ -103,7 +113,7 @@
["variant"]; type Size = VariantProps["size"]; -type Props = ButtonPrimitive.Props & { - variant?: Variant; - size?: Size; +type Props = any & { + variant?: Variant; + size?: Size; }; - export { Root, type Props, diff --git a/frontend/src/lib/components/ui/checkbox/checkbox.svelte b/frontend/src/lib/components/ui/checkbox/checkbox.svelte index 2221f21..0996d78 100644 --- a/frontend/src/lib/components/ui/checkbox/checkbox.svelte +++ b/frontend/src/lib/components/ui/checkbox/checkbox.svelte @@ -1,26 +1,30 @@ + class: className, + checked = $bindable(false), + indeterminate = false, + ...rest + }: any = $props(); - - {#if checked} -
- -
+ > + {#if checked === true} +
+ +
+ {:else if indeterminate || checked === "indeterminate"} +
+ +
{/if} -
+ diff --git a/frontend/src/lib/types.ts b/frontend/src/lib/types.ts index 3d218d1..5741bf2 100644 --- a/frontend/src/lib/types.ts +++ b/frontend/src/lib/types.ts @@ -10,6 +10,8 @@ export interface FileItem { media?: string[]; // Media it's on (for index browsing) selected?: boolean; // For restore cart sha256_hash?: string | null; + vulnerable?: boolean; + indeterminate?: boolean; } export interface TreeNode { diff --git a/frontend/src/routes/+layout.svelte b/frontend/src/routes/+layout.svelte index 17ecd03..2e647f5 100644 --- a/frontend/src/routes/+layout.svelte +++ b/frontend/src/routes/+layout.svelte @@ -1,7 +1,8 @@ - + + + + +{#if showShortcuts} +
showShortcuts = false} role="presentation"> +
e.stopPropagation()} role="dialog"> +
+

+ + Fleet Command Shortcuts +

+

Universal system navigation & control.

+
+ +
+
+

Navigation

+
Dashboard D
+
Index Browser I
+
Tracking Policy T
+
System Activity A
+
+
+

Operations

+
Media Fleet M
+
Data Recovery R
+
System Settings S
+
Close Menu ESC
+
+
+ +
+

Press '?' at any time to toggle this command set.

+
+
+
+{/if} +
-
- -
- -
-
- {@render children()} -
+
+
+ {@render children()}