Add channel params to commitlog compressor (#3254)

# Description of Changes

<!-- Please describe your change, mention any related tickets, and so on
here. -->

In order to facilitate commitlog and snapshot archival, this patch adds
channel params to `snapshot_watching_commitlog_compressor`

# API and ABI breaking changes

<!-- If this is an API or ABI breaking change, please apply the
corresponding GitHub label. -->

None

# Expected complexity level and risk

<!--
How complicated do you think these changes are? Grade on a scale from 1
to 5,
where 1 is a trivial change, and 5 is a deep-reaching and complex
change.

This complexity rating applies not only to the complexity apparent in
the diff,
but also to its interactions with existing and future code.

If you answered more than a 2, explain what is complex about the PR,
and what other components it interacts with in potentially concerning
ways. -->

1

# Testing

<!-- Describe any testing you've done, and any testing you'd like your
reviewers to do,
so that you're confident that all the changes work as expected! -->
This commit is contained in:
joshua-spacetime
2025-09-18 14:35:40 -07:00
committed by GitHub
parent 321e4302ef
commit 2f9554c702
2 changed files with 29 additions and 6 deletions
+27 -6
View File
@@ -1625,13 +1625,22 @@ pub async fn local_durability(commitlog_dir: CommitLogDir) -> io::Result<(LocalD
/// [DurabilityProvider]: crate::host::host_controller::DurabilityProvider
pub async fn snapshot_watching_commitlog_compressor(
mut snapshot_rx: watch::Receiver<u64>,
mut clog_tx: Option<tokio::sync::mpsc::Sender<u64>>,
mut snap_tx: Option<tokio::sync::mpsc::Sender<u64>>,
durability: LocalDurability,
) {
let mut prev_snapshot_offset = *snapshot_rx.borrow_and_update();
while snapshot_rx.changed().await.is_ok() {
let snapshot_offset = *snapshot_rx.borrow_and_update();
let durability = durability.clone();
let res = asyncify(move || {
if let Some(snap_tx) = &mut snap_tx {
if let Err(err) = snap_tx.try_send(snapshot_offset) {
tracing::warn!("failed to send offset {snapshot_offset} after snapshot creation: {err}");
}
}
let res: io::Result<_> = asyncify(move || {
let segment_offsets = durability.existing_segment_offsets()?;
let start_idx = segment_offsets
.binary_search(&prev_snapshot_offset)
@@ -1645,15 +1654,27 @@ pub async fn snapshot_watching_commitlog_compressor(
// in this case, segment_offsets[end_idx] is the segment that contains the snapshot,
// which we don't want to compress, so an exclusive range is correct.
let segment_offsets = &segment_offsets[..end_idx];
durability.compress_segments(segment_offsets)
durability.compress_segments(segment_offsets)?;
let n = segment_offsets.len();
let last_compressed_segment = if n > 0 { Some(segment_offsets[n - 1]) } else { None };
Ok(last_compressed_segment)
})
.await;
if let Err(e) = res {
tracing::warn!("failed to compress segments: {e}");
continue;
}
let last_compressed_segment = match res {
Ok(opt_offset) => opt_offset,
Err(err) => {
tracing::warn!("failed to compress segments: {err}");
continue;
}
};
prev_snapshot_offset = snapshot_offset;
if let Some((clog_tx, last_compressed_segment)) = clog_tx.as_mut().zip(last_compressed_segment) {
if let Err(err) = clog_tx.try_send(last_compressed_segment) {
tracing::warn!("failed to send offset {last_compressed_segment} after compression: {err}");
}
}
}
}
+2
View File
@@ -127,6 +127,8 @@ impl DurabilityProvider for StandaloneDurabilityProvider {
|snapshot_rx| {
tokio::spawn(relational_db::snapshot_watching_commitlog_compressor(
snapshot_rx,
None,
None,
durability,
));
}