chunking
Continuous Integration / backend-tests (push) Successful in 10m41s
Continuous Integration / frontend-check (push) Successful in 10m4s

This commit is contained in:
2026-04-28 22:11:47 -04:00
parent fc47062dd5
commit c8346f2401
9 changed files with 450 additions and 138 deletions
+53
View File
@@ -92,6 +92,7 @@ def list_storage_fleet(refresh: bool = False, db_session: Session = Depends(get_
provider = archiver_manager._get_storage_provider(media)
is_online = False
hardware_identified = False
needs_registration = False
host_free_bytes = None
host_total_bytes = None
live_info = None
@@ -115,6 +116,7 @@ def list_storage_fleet(refresh: bool = False, db_session: Session = Depends(get_
# Always populate live_info using the unified interface
live_info = provider.get_live_info(force=refresh)
needs_registration = live_info.get("needs_registration", False)
# For HDD providers, also grab host-level capacity if possible
if is_online and media.media_type == "hdd":
@@ -150,6 +152,7 @@ def list_storage_fleet(refresh: bool = False, db_session: Session = Depends(get_
config=final_config,
is_online=is_online,
is_identified=hardware_identified,
needs_registration=needs_registration,
priority_index=media.priority_index,
host_free_bytes=host_free_bytes,
host_total_bytes=host_total_bytes,
@@ -546,6 +549,56 @@ def get_system_analytics(db_session: Session = Depends(get_db)):
}
@router.get("/detect")
def detect_unregistered_media(db_session: Session = Depends(get_db)):
"""Scans all configured hardware providers for newly inserted, unregistered media."""
from app.services.archiver import archiver_manager
# 1. Get all unique device paths from existing media
registered_devices = db_session.query(models.StorageMedia.extra_config).all()
device_paths = set()
for (cfg_json,) in registered_devices:
if cfg_json:
try:
cfg = json.loads(cfg_json)
if "device_path" in cfg:
device_paths.add(cfg["device_path"])
except Exception:
pass
# 2. Add default paths if not already in the set
device_paths.add("/dev/nst0")
detected = []
for path in device_paths:
# Create a temporary mock media record to get a provider instance
mock_media = models.StorageMedia(
media_type="lto_tape",
extra_config=json.dumps({"device_path": path, "compression": True}),
)
provider = archiver_manager._get_storage_provider(mock_media)
if provider and provider.check_online(force=True):
live = provider.get_live_info(force=True)
if live.get("identity"):
# Check if this identity is already in the DB
exists = (
db_session.query(models.StorageMedia)
.filter(models.StorageMedia.identifier == live["identity"])
.first()
)
if not exists:
detected.append(
{
"identifier": live["identity"],
"media_type": provider.provider_id,
"device_path": path,
"live_info": live,
}
)
return detected
@router.get("/browse")
def browse_archive_index(path: str = "ROOT", db_session: Session = Depends(get_db)):
"""Browses the archived file index at a specific path."""
+1
View File
@@ -38,6 +38,7 @@ class MediaSchema(BaseModel):
config: Dict[str, Any]
is_online: bool = False
is_identified: bool = False
needs_registration: bool = False
priority_index: int = 0
host_free_bytes: Optional[int] = None
host_total_bytes: Optional[int] = None
+8
View File
@@ -87,6 +87,14 @@ class AbstractStorageProvider(ABC):
"""
raise NotImplementedError("This provider does not support random access.")
def get_utilization(self) -> Optional[float]:
"""
Returns the actual hardware utilization as a float between 0.0 and 1.0.
Used for intelligent 'full' detection on hardware that supports it (like LTO MAM).
Returns None if not supported by the hardware/provider.
"""
return None
@abstractmethod
def finalize_media(self, media_id: str):
"""Finalizes the media (e.g., writing index, ejecting)"""
+46 -1
View File
@@ -23,6 +23,12 @@ class LTOProvider(AbstractStorageProvider):
"title": "Device Path",
"description": "e.g., /dev/nst0",
},
"compression": {
"type": "boolean",
"title": "Hardware Compression",
"description": "Enable LTO hardware-level compression (default: True).",
"default": True,
},
"encryption_key": {
"type": "string",
"title": "Hardware Encryption Key",
@@ -36,6 +42,7 @@ class LTOProvider(AbstractStorageProvider):
def __init__(self, config: Dict[str, Any]):
self.device_path = config.get("device_path", "/dev/nst0")
self.compression = config.get("compression", True)
self.encryption_key = config.get("encryption_key")
# Initialize LKG entry if not exists
@@ -231,18 +238,30 @@ class LTOProvider(AbstractStorageProvider):
def get_live_info(self, force: bool = False) -> Dict[str, Any]:
"""Performs a single-pass discovery of all hardware metrics to ensure consistency."""
prev_online = LTOProvider._lkg_state[self.device_path]["online"]
prev_barcode = LTOProvider._lkg_state[self.device_path]["mam"].get("barcode")
self.check_online(force=force)
# Since check_online throttles and sets online/last_check, we follow its lead
mam = self.get_mam_info(force=force)
drive = self.get_drive_info()
identity = mam.get("barcode") or mam.get("serial")
is_online = LTOProvider._lkg_state[self.device_path]["online"]
# Detection logic for state changes (Hardware Awareness)
needs_registration = False
if not prev_online and is_online:
if identity and not prev_barcode:
logger.info(f"DETECTED TAPE INSERTION: {identity}")
needs_registration = True
return {
"online": LTOProvider._lkg_state[self.device_path]["online"],
"online": is_online,
"drive": drive,
"tape": mam,
"identity": identity,
"needs_registration": needs_registration,
}
def get_name(self) -> str:
@@ -334,6 +353,18 @@ class LTOProvider(AbstractStorageProvider):
logger.error(f"Tape command 'mt {command}' failed: {e.stderr.decode()}")
raise
def _setup_compression(self):
"""Configures hardware compression on the drive using mt"""
if not os.path.exists(self.device_path):
return
try:
mode = "compression 1" if self.compression else "compression 0"
self._run_mt(mode)
logger.info(f"LTO Hardware Compression set to: {self.compression}")
except Exception as e:
logger.error(f"Failed to set hardware compression: {e}")
def _setup_encryption(self):
"""Configures hardware encryption on the drive using stenc"""
if not os.path.exists(self.device_path):
@@ -385,6 +416,7 @@ class LTOProvider(AbstractStorageProvider):
try:
self._setup_encryption()
self._setup_compression()
self._run_mt("rewind")
cmd = ["tar", "-xf", self.device_path, "-O", ".tapehoard_label"]
self._log_command(cmd)
@@ -461,6 +493,7 @@ class LTOProvider(AbstractStorageProvider):
# Ensure encryption key is loaded before appending
self._setup_encryption()
self._setup_compression()
self._run_mt("eod")
return True
@@ -493,12 +526,24 @@ class LTOProvider(AbstractStorageProvider):
proc.wait()
return file_num
def get_utilization(self) -> Optional[float]:
"""Calculates actual hardware utilization from MAM capacity attributes."""
# Force a fresh MAM read to get the most accurate current state after a write
mam = self.get_mam_info(force=True)
if "max_capacity_mib" in mam and "remaining_capacity_mib" in mam:
max_cap = mam["max_capacity_mib"]
rem_cap = mam["remaining_capacity_mib"]
if max_cap > 0:
return (max_cap - rem_cap) / max_cap
return None
def finalize_media(self, media_id: str):
self._run_mt("offline")
def read_archive(self, media_id: str, location_id: str) -> BinaryIO:
# Ensure encryption key is loaded before reading
self._setup_encryption()
self._setup_compression()
self._run_mt("rewind")
try:
loc_int = int(location_id)
+266 -112
View File
@@ -1,6 +1,7 @@
import json
import os
import shutil
import subprocess
import tarfile
import time
import uuid
@@ -204,7 +205,11 @@ class ArchiverService:
}
)
accumulated_size += remaining_file_bytes
elif available_space >= MINIMUM_FRAGMENT_SIZE:
elif (
file_state.size > media_record.capacity
and available_space >= MINIMUM_FRAGMENT_SIZE
):
# ONLY split if the file is physically larger than a single piece of media
backup_workload.append(
{
"file_state": file_state,
@@ -215,6 +220,10 @@ class ArchiverService:
)
accumulated_size += available_space
break
else:
# File is larger than remaining space but smaller than total media capacity.
# Skip it for this media to avoid unnecessary fragmentation.
continue
return backup_workload
@@ -249,6 +258,13 @@ class ArchiverService:
JobManager.complete_job(job_id)
return
# --- Tar Chunking Logic ---
# Ensure at least 100 archives per tape to improve restoration granularity.
# Max chunk size = capacity / 100.
MAX_CHUNK_SIZE = media_record.capacity // 100
if MAX_CHUNK_SIZE < 100 * 1024 * 1024: # Minimum 100MB chunk
MAX_CHUNK_SIZE = 100 * 1024 * 1024
total_payload_bytes = sum(
item["offset_end"] - item["offset_start"] for item in workload_batch
)
@@ -270,60 +286,98 @@ class ArchiverService:
JobManager.fail_job(job_id, "Hardware refused write initialization.")
return
archive_filename = (
f"backup_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}.tar"
)
staging_full_path = os.path.join(self.staging_directory, archive_filename)
processed_bytes = 0
remaining_to_write = []
# --- Optimized Deduplication ---
target_hashes = [
item["file_state"].sha256_hash
for item in workload_batch
if item["file_state"].sha256_hash
]
existing_versions = {}
SQLITE_VARIABLE_LIMIT = 500
for i in range(0, len(target_hashes), SQLITE_VARIABLE_LIMIT):
chunk = target_hashes[i : i + SQLITE_VARIABLE_LIMIT]
chunk_v = (
db_session.query(models.FileVersion)
.join(models.FilesystemState)
.filter(models.FilesystemState.sha256_hash.in_(chunk))
.all()
)
for v in chunk_v:
existing_versions[
(v.file_state.sha256_hash, v.offset_start, v.offset_end)
] = v
batch_uuid = str(uuid.uuid4())
# Split workload into chunks for packaging
chunks = []
current_chunk = []
current_chunk_size = 0
for item in workload_batch:
file_state = item["file_state"]
dupe = existing_versions.get(
(file_state.sha256_hash, item["offset_start"], item["offset_end"])
item_size = item["offset_end"] - item["offset_start"]
# CHUNKING LOGIC:
# 1. If adding this item exceeds MAX_CHUNK_SIZE...
# 2. AND we already have items in the current chunk...
# 3. AND it's not a random access provider...
# ... then finalize the current chunk.
if (
current_chunk_size + item_size > MAX_CHUNK_SIZE
and current_chunk
and not storage_provider.capabilities.get("supports_random_access")
):
chunks.append(current_chunk)
current_chunk = []
current_chunk_size = 0
# Add item to chunk (even if it makes the chunk > MAX_CHUNK_SIZE,
# this allows single large files to create their own larger archive).
current_chunk.append(item)
current_chunk_size += item_size
if current_chunk:
chunks.append(current_chunk)
for chunk_index, chunk_items in enumerate(chunks):
if JobManager.is_cancelled(job_id):
break
archive_filename = f"backup_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}_{chunk_index}.tar"
staging_full_path = os.path.join(
self.staging_directory, archive_filename
)
if dupe:
db_session.add(
models.FileVersion(
filesystem_state_id=file_state.id,
media_id=dupe.media_id,
file_number=dupe.file_number,
is_split=dupe.is_split,
split_id=dupe.split_id,
offset_start=item["offset_start"],
offset_end=item["offset_end"],
remaining_to_write = []
# --- Optimized Deduplication ---
target_hashes = [
item["file_state"].sha256_hash
for item in chunk_items
if item["file_state"].sha256_hash
]
existing_versions = {}
SQLITE_VARIABLE_LIMIT = 500
for i in range(0, len(target_hashes), SQLITE_VARIABLE_LIMIT):
sql_chunk = target_hashes[i : i + SQLITE_VARIABLE_LIMIT]
chunk_v = (
db_session.query(models.FileVersion)
.join(models.FilesystemState)
.filter(models.FilesystemState.sha256_hash.in_(sql_chunk))
.all()
)
for v in chunk_v:
existing_versions[
(v.file_state.sha256_hash, v.offset_start, v.offset_end)
] = v
for item in chunk_items:
file_state = item["file_state"]
dupe = existing_versions.get(
(
file_state.sha256_hash,
item["offset_start"],
item["offset_end"],
)
)
else:
remaining_to_write.append(item)
if dupe:
db_session.add(
models.FileVersion(
filesystem_state_id=file_state.id,
media_id=dupe.media_id,
file_number=dupe.file_number,
is_split=dupe.is_split,
split_id=dupe.split_id,
offset_start=item["offset_start"],
offset_end=item["offset_end"],
)
)
else:
remaining_to_write.append(item)
# Packaging
if remaining_to_write:
batch_uuid = str(uuid.uuid4())
if not remaining_to_write:
continue
if storage_provider.capabilities.get("supports_random_access"):
# Random Access: Write files directly
import io
for item in remaining_to_write:
@@ -385,82 +439,177 @@ class ArchiverService:
)
)
else:
# Sequential Media (Tape): Tar Stream
with tarfile.open(staging_full_path, "w") as tar_bundle:
for item in remaining_to_write:
if JobManager.is_cancelled(job_id):
break
# Sequential Media (Tape): Hybrid Tar Generation
has_splits = any(item["is_split"] for item in remaining_to_write)
file_state, start, end = (
item["file_state"],
item["offset_start"],
item["offset_end"],
if not has_splits and shutil.which("tar"):
# PERFORMANCE PATH: Use system tar binary for whole files
# Generate a null-terminated file list to handle special characters safely
file_list_path = staging_full_path + ".list"
with open(file_list_path, "w") as f_list:
for item in remaining_to_write:
# Write absolute path to list
f_list.write(item["file_state"].file_path + "\0")
try:
# Use -C / to handle absolute paths, --null and -T for the list
# --no-recursion since we've already resolved the file list
cmd = [
"tar",
"-cf",
staging_full_path,
"--null",
"-T",
file_list_path,
"--no-recursion",
"--absolute-names",
]
logger.debug(f"RUNNING BINARY TAR: {' '.join(cmd)}")
subprocess.run(cmd, check=True, capture_output=True)
# Update progress to 100% for this chunk
processed_bytes += sum(
i["offset_end"] - i["offset_start"]
for i in remaining_to_write
)
chunk_size = end - start
internal_name = self.normalize_path(file_state.file_path)
if item["is_split"]:
internal_name += f".part_{start}_{end}"
if os.path.lexists(file_state.file_path):
tar_info = tar_bundle.gettarinfo(
file_state.file_path, arcname=internal_name
)
if os.path.islink(file_state.file_path):
tar_info.type = tarfile.SYMTYPE
tar_info.linkname = os.readlink(
file_state.file_path
)
tar_bundle.addfile(tar_info)
else:
tar_info.type = tarfile.REGTYPE
tar_info.linkname = ""
tar_info.size = chunk_size
with RangeFile(
file_state.file_path, start, chunk_size
) as rh:
tar_bundle.addfile(tar_info, rh)
processed_bytes += chunk_size
JobManager.update_job(
job_id,
15.0 + (70.0 * (processed_bytes / safe_divisor)),
f"Archiving: {os.path.basename(file_state.file_path)}",
f"Archived chunk {chunk_index+1} via binary tar",
)
except Exception as e:
logger.error(
f"Binary tar failed, falling back to Python: {e}"
)
has_splits = True # Trigger fallback
finally:
if os.path.exists(file_list_path):
os.remove(file_list_path)
if JobManager.is_cancelled(job_id):
return
if has_splits or not shutil.which("tar"):
# COMPATIBILITY PATH: Pure Python for fragments or if tar is missing
with tarfile.open(staging_full_path, "w") as tar_bundle:
for item in remaining_to_write:
if JobManager.is_cancelled(job_id):
break
# Finalize Staging for Sequential
if remaining_to_write and not storage_provider.capabilities.get(
"supports_random_access"
):
with open(staging_full_path, "a") as f:
f.flush()
os.fsync(f.fileno())
file_state, start, end = (
item["file_state"],
item["offset_start"],
item["offset_end"],
)
chunk_size = end - start
internal_name = self.normalize_path(
file_state.file_path
)
if item["is_split"]:
internal_name += f".part_{start}_{end}"
JobManager.update_job(
job_id, 85.0, f"Streaming bitstream to {media_record.media_type}..."
)
with open(staging_full_path, "rb") as final_stream:
archive_location_id = storage_provider.write_archive(
media_record.identifier, final_stream
if os.path.lexists(file_state.file_path):
tar_info = tar_bundle.gettarinfo(
file_state.file_path, arcname=internal_name
)
if os.path.islink(file_state.file_path):
tar_info.type = tarfile.SYMTYPE
tar_info.linkname = os.readlink(
file_state.file_path
)
tar_bundle.addfile(tar_info)
else:
tar_info.type = tarfile.REGTYPE
tar_info.linkname = ""
tar_info.size = chunk_size
with RangeFile(
file_state.file_path, start, chunk_size
) as rh:
tar_bundle.addfile(tar_info, rh)
processed_bytes += chunk_size
JobManager.update_job(
job_id,
15.0 + (70.0 * (processed_bytes / safe_divisor)),
f"Archiving: {os.path.basename(file_state.file_path)}",
)
if JobManager.is_cancelled(job_id):
if os.path.exists(staging_full_path):
os.remove(staging_full_path)
break
with open(staging_full_path, "a") as f:
f.flush()
os.fsync(f.fileno())
JobManager.update_job(
job_id,
15.0 + (70.0 * (processed_bytes / safe_divisor)),
f"Streaming chunk {chunk_index+1}/{len(chunks)} to {media_record.media_type}...",
)
media_record.bytes_used += os.path.getsize(staging_full_path)
for item in remaining_to_write:
db_session.add(
models.FileVersion(
filesystem_state_id=item["file_state"].id,
media_id=media_record.id,
file_number=archive_location_id,
is_split=item["is_split"],
split_id=batch_uuid if item["is_split"] else None,
offset_start=item["offset_start"],
offset_end=item["offset_end"],
with open(staging_full_path, "rb") as final_stream:
archive_location_id = storage_provider.write_archive(
media_record.identifier, final_stream
)
media_record.bytes_used += os.path.getsize(staging_full_path)
for item in remaining_to_write:
db_session.add(
models.FileVersion(
filesystem_state_id=item["file_state"].id,
media_id=media_record.id,
file_number=archive_location_id,
is_split=item["is_split"],
split_id=batch_uuid if item["is_split"] else None,
offset_start=item["offset_start"],
offset_end=item["offset_end"],
)
)
if os.path.exists(staging_full_path):
os.remove(staging_full_path)
# --- Saturated Media Logic ---
# If utilized over 98%, mark as full and cede priority
# First, try to get actual hardware utilization (trust hardware MAM over our byte counts)
hardware_utilization = None
if hasattr(storage_provider, "get_utilization"):
hardware_utilization = storage_provider.get_utilization()
if hardware_utilization is not None:
# Handle MagicMock values in tests to prevent formatting errors
try:
utilization_ratio = float(hardware_utilization)
logger.info(
f"Hardware reported utilization: {utilization_ratio*100:.1f}%"
)
except (TypeError, ValueError):
utilization_ratio = (
media_record.bytes_used / media_record.capacity
if media_record.capacity > 0
else 0
)
else:
utilization_ratio = (
media_record.bytes_used / media_record.capacity
if media_record.capacity > 0
else 0
)
if utilization_ratio >= 0.98:
logger.info(
f"MEDIA SATURATED: {media_record.identifier} ({utilization_ratio*100:.1f}%)"
)
media_record.status = "full"
# Automate priority ceding: Move this media to the end of the list
max_priority = (
db_session.query(func.max(models.StorageMedia.priority_index))
.filter(models.StorageMedia.id != media_record.id)
.scalar()
or 0
)
media_record.priority_index = max_priority + 1
db_session.commit()
JobManager.complete_job(job_id)
@@ -476,8 +625,13 @@ class ArchiverService:
logger.exception(f"Archival failed: {e}")
JobManager.fail_job(job_id, str(e))
finally:
if os.path.exists(staging_full_path):
os.remove(staging_full_path)
# Clean up any residual staging files
for chunk_file in os.listdir(self.staging_directory):
if chunk_file.startswith("backup_") and chunk_file.endswith(".tar"):
try:
os.remove(os.path.join(self.staging_directory, chunk_file))
except Exception:
pass
def run_restore(self, db_session: Session, destination_root: str, job_id: int):
"""Orchestrates the retrieval and reassembly of data from storage providers."""
+10 -7
View File
@@ -87,26 +87,29 @@ def test_assemble_backup_batch(db_session):
)
db_session.add(m)
# Create files that will exceed capacity (200MB each)
# Create files
size_200mb = 200 * 1024 * 1024
f1 = models.FilesystemState(
file_path="/f1.bin", size=size_200mb, mtime=1, is_indexed=True
)
# f2 is 200MB, would fit on fresh tape. Should be skipped on this 300MB tape
# after f1 (200MB) is added, because only 100MB is left.
f2 = models.FilesystemState(
file_path="/f2.bin", size=size_200mb, mtime=1, is_indexed=True
)
db_session.add_all([f1, f2])
# f3 is 500MB, larger than total capacity (300MB). SHOULD be split.
f3 = models.FilesystemState(
file_path="/f3.bin", size=500 * 1024 * 1024, mtime=1, is_indexed=True
)
db_session.add_all([f1, f2, f3])
db_session.commit()
batch = archiver.assemble_backup_batch(db_session, m.id)
# Batch should contain all of f1 and 100MB of f2
# Batch should contain f1 (200MB) and 100MB of f3 (f2 is skipped)
assert len(batch) == 2
assert batch[0]["file_state"].file_path == "/f1.bin"
assert batch[0]["offset_end"] == size_200mb
assert batch[1]["file_state"].file_path == "/f2.bin"
# f1 uses 200MB, leaving 100MB for f2
assert batch[1]["file_state"].file_path == "/f3.bin"
assert batch[1]["offset_end"] == 100 * 1024 * 1024
assert batch[1]["is_split"] is True
+4
View File
@@ -448,6 +448,10 @@ export type MediaSchema = {
* Is Identified
*/
is_identified?: boolean;
/**
* Needs Registration
*/
needs_registration?: boolean;
/**
* Priority Index
*/
+58 -18
View File
@@ -215,6 +215,22 @@
if (pollInterval) clearInterval(pollInterval);
});
$effect(() => {
const needsReg = mediaList.find(m => m.is_online && m.needs_registration);
if (needsReg) {
toast.info(`New media detected: ${needsReg.identifier}. Please register or initialize it.`, {
action: {
label: 'Register',
onClick: () => {
newMedia.media_type = needsReg.media_type;
newMedia.identifier = needsReg.identifier;
showRegisterDialog = true;
}
}
});
}
});
function handleDndConsider(e: CustomEvent) {
const activeItems = e.detail.items;
const inactiveItems = mediaList.filter(m => m.status !== 'active' || (m.capacity > 0 && (m.bytes_used / m.capacity) >= 0.98));
@@ -456,7 +472,7 @@
{#if media.is_online}
{#if !media.is_identified}
<Button variant="outline" size="sm" class="h-7 text-[10px] border-orange-500/30 text-orange-400 hover:bg-orange-500/10" onclick={() => handleInitialize(media.id, media.identifier)}>Initialize</Button>
{:else if media.status === 'active'}
{:else if media.status === 'active' && (media.bytes_used / (media.capacity || 1)) < 0.98}
<Button variant="default" size="sm" class="h-7 text-[10px] bg-blue-600 hover:bg-blue-500" onclick={() => handleStartBackup(media.id, media.identifier)}>Archive</Button>
{/if}
{/if}
@@ -881,15 +897,27 @@
<div class="grid grid-cols-2 gap-4 animate-in slide-in-from-top-2 duration-300">
{#each Object.entries(activeProvider.config_schema) as [key, schema]}
{@const field = schema as any}
<div class="space-y-2">
<label class="text-xs font-medium text-text-secondary ml-1" for="config-{key}">{field.title || key}</label>
<Input
id="config-{key}"
bind:value={dynamicConfig[key]}
placeholder={field.description || ""}
type={key.includes("key") || key.includes("passphrase") ? "password" : "text"}
class="h-10 bg-bg-primary/50 border-border-color font-mono text-sm"
/>
<div class="space-y-2 flex flex-col justify-center">
{#if field.type === 'boolean'}
<div class="flex items-center gap-3 h-10 px-1">
<input
id="config-{key}"
type="checkbox"
bind:checked={dynamicConfig[key]}
class="w-4 h-4 rounded border-border-color bg-bg-primary text-blue-600 focus:ring-blue-500/20"
/>
<label class="text-xs font-medium text-text-secondary cursor-pointer" for="config-{key}">{field.title || key}</label>
</div>
{:else}
<label class="text-xs font-medium text-text-secondary ml-1" for="config-{key}">{field.title || key}</label>
<Input
id="config-{key}"
bind:value={dynamicConfig[key]}
placeholder={field.description || ""}
type={key.includes("key") || key.includes("passphrase") ? "password" : "text"}
class="h-10 bg-bg-primary/50 border-border-color font-mono text-sm"
/>
{/if}
</div>
{/each}
</div>
@@ -940,14 +968,26 @@
<div class="grid grid-cols-1 gap-4">
{#each Object.entries(schema) as [key, entry]}
{@const field = entry as any}
<div class="space-y-2">
<label class="text-xs font-medium text-text-secondary ml-1" for="edit-config-{key}">{field.title || key}</label>
<Input
id="edit-config-{key}"
bind:value={editingMedia.config[key]}
type={key.includes("key") || key.includes("passphrase") ? "password" : "text"}
class="h-10 bg-bg-primary/50 border-border-color font-mono text-sm"
/>
<div class="space-y-2 flex flex-col justify-center">
{#if field.type === 'boolean'}
<div class="flex items-center gap-3 h-10 px-1">
<input
id="edit-config-{key}"
type="checkbox"
bind:checked={(editingMedia.config[key] as any)}
class="w-4 h-4 rounded border-border-color bg-bg-primary text-blue-600 focus:ring-blue-500/20"
/>
<label class="text-xs font-medium text-text-secondary cursor-pointer" for="edit-config-{key}">{field.title || key}</label>
</div>
{:else}
<label class="text-xs font-medium text-text-secondary ml-1" for="edit-config-{key}">{field.title || key}</label>
<Input
id="edit-config-{key}"
bind:value={editingMedia.config[key]}
type={key.includes("key") || key.includes("passphrase") ? "password" : "text"}
class="h-10 bg-bg-primary/50 border-border-color font-mono text-sm"
/>
{/if}
</div>
{/each}
</div>
+4
View File
@@ -44,6 +44,10 @@ check:
@echo "Running frontend checks..."
cd frontend && npm run check
# Run all tests and linting
test: lint pytest check
@echo "Running tests..."
# Auto-format all code (Ruff Format)
format:
@echo "Formatting Python (Ruff)..."