check staging area has enough capacity
Continuous Integration / backend-tests (push) Successful in 39s
Continuous Integration / frontend-check (push) Successful in 20s
Continuous Integration / e2e-tests (push) Successful in 5m17s

This commit is contained in:
2026-05-05 21:33:44 -04:00
parent 32fc9e4506
commit 65860e0408
7 changed files with 157 additions and 27 deletions
+7
View File
@@ -159,6 +159,13 @@ class DashboardStatsSchema(BaseModel):
redundancy_ratio: float
class StagingInfoSchema(BaseModel):
path: str
total_bytes: int
used_bytes: int
free_bytes: int
class JobSchema(BaseModel):
model_config = ConfigDict(from_attributes=True)
+38 -1
View File
@@ -1,7 +1,10 @@
import shutil
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app.db.database import get_db
from app.api.common import DashboardStatsSchema
from app.api.common import DashboardStatsSchema, StagingInfoSchema
from app.core.config import settings
from sqlalchemy import func, text
from app.db import models
@@ -113,3 +116,37 @@ def get_dashboard_stats(db_session: Session = Depends(get_db)):
last_scan_time=last_scan.completed_at if last_scan else None,
redundancy_ratio=round(redundancy_percentage, 1),
)
@router.get(
"/staging/info", response_model=StagingInfoSchema, operation_id="get_staging_info"
)
def get_staging_info():
"""Returns disk usage information for the backup staging directory."""
path = settings.staging_directory
try:
usage = shutil.disk_usage(path)
return StagingInfoSchema(
path=path,
total_bytes=usage.total,
used_bytes=usage.used,
free_bytes=usage.free,
)
except OSError:
# Fallback: if the configured path doesn't exist yet, check its parent
parent = path if path == "/" else path.rsplit("/", 1)[0] or "/"
try:
usage = shutil.disk_usage(parent)
return StagingInfoSchema(
path=path,
total_bytes=usage.total,
used_bytes=usage.used,
free_bytes=usage.free,
)
except OSError:
return StagingInfoSchema(
path=path,
total_bytes=0,
used_bytes=0,
free_bytes=0,
)
+25
View File
@@ -374,6 +374,31 @@ class ArchiverService:
if current_chunk:
chunks.append(current_chunk)
# --- Staging Space Validation ---
# Sequential media (tape) requires staging the full tarfile before writing.
# Ensure the staging directory has enough free space for the largest chunk.
if not storage_provider.capabilities.get("supports_random_access"):
largest_chunk_size = max(
sum(i["offset_end"] - i["offset_start"] for i in chunk)
for chunk in chunks
)
try:
usage = shutil.disk_usage(self.staging_directory)
# Require 110% of chunk size to leave headroom for tar overhead
required = int(largest_chunk_size * 1.1)
if usage.free < required:
free_gb = usage.free / (1024**3)
req_gb = required / (1024**3)
JobManager.fail_job(
job_id,
f"Staging area at {self.staging_directory} has only {free_gb:.1f} GB free, "
f"but the largest archive chunk requires {req_gb:.1f} GB. "
f"Free up space or reduce the backup set.",
)
return
except OSError as e:
logger.warning(f"Could not check staging disk usage: {e}")
JobManager.add_job_log(job_id, f"Packed into {len(chunks)} archive(s)")
for chunk_index, chunk_items in enumerate(chunks):