-
Notifications
You must be signed in to change notification settings - Fork 483
feat: strategized plan compaction #5233
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -205,9 +205,157 @@ impl AddAssign for CompactionMetrics { | |||||
| } | ||||||
| } | ||||||
|
|
||||||
| /// Trait for implementing custom compaction planning strategies. | ||||||
| /// | ||||||
| /// This trait allows users to define their own compaction strategies by implementing | ||||||
| /// the `plan` method. The default implementation is provided by [`DefaultCompactionPlanner`]. | ||||||
| #[async_trait::async_trait] | ||||||
| pub trait CompactionPlanner: Send + Sync { | ||||||
| // get all fragments by default | ||||||
| fn get_fragments(&self, dataset: &Dataset, _options: &CompactionOptions) -> Vec<FileFragment> { | ||||||
| // get_fragments should be returning fragments in sorted order (by id) | ||||||
| // and fragment ids should be unique | ||||||
| dataset.get_fragments() | ||||||
| } | ||||||
|
|
||||||
| // no filter by default | ||||||
| async fn filter_fragments( | ||||||
| &self, | ||||||
| _dataset: &Dataset, | ||||||
| fragments: Vec<FileFragment>, | ||||||
| _options: &CompactionOptions, | ||||||
| ) -> Result<Vec<FileFragment>> { | ||||||
| Ok(fragments) | ||||||
| } | ||||||
|
||||||
|
|
||||||
| async fn plan(&self, dataset: &Dataset, options: &CompactionOptions) -> Result<CompactionPlan>; | ||||||
|
||||||
| } | ||||||
|
|
||||||
| /// Formulate a plan to compact the files in a dataset | ||||||
| /// | ||||||
| /// The compaction plan will contain a list of tasks to execute. Each task | ||||||
| /// will contain approximately `target_rows_per_fragment` rows and will be | ||||||
| /// rewriting fragments that are adjacent in the dataset's fragment list. Some | ||||||
| /// tasks may contain a single fragment when that fragment has deletions that | ||||||
| /// are being materialized and doesn't have any neighbors that need to be | ||||||
| /// compacted. | ||||||
| #[derive(Debug, Clone, Default)] | ||||||
| pub struct DefaultCompactionPlanner; | ||||||
|
|
||||||
| #[async_trait::async_trait] | ||||||
| impl CompactionPlanner for DefaultCompactionPlanner { | ||||||
| async fn plan(&self, dataset: &Dataset, options: &CompactionOptions) -> Result<CompactionPlan> { | ||||||
| let fragments = self.get_fragments(dataset, options); | ||||||
| debug_assert!( | ||||||
| fragments.windows(2).all(|w| w[0].id() < w[1].id()), | ||||||
| "fragments in manifest are not sorted" | ||||||
| ); | ||||||
| let mut fragment_metrics = futures::stream::iter(fragments) | ||||||
| .map(|fragment| async move { | ||||||
| match collect_metrics(&fragment).await { | ||||||
| Ok(metrics) => Ok((fragment.metadata, metrics)), | ||||||
| Err(e) => Err(e), | ||||||
| } | ||||||
| }) | ||||||
| .buffered(dataset.object_store().io_parallelism()); | ||||||
|
|
||||||
| let index_fragmaps = load_index_fragmaps(dataset).await?; | ||||||
| let indices_containing_frag = |frag_id: u32| { | ||||||
| index_fragmaps | ||||||
| .iter() | ||||||
| .enumerate() | ||||||
| .filter(|(_, bitmap)| bitmap.contains(frag_id)) | ||||||
| .map(|(pos, _)| pos) | ||||||
| .collect::<Vec<_>>() | ||||||
| }; | ||||||
|
|
||||||
| let mut candidate_bins: Vec<CandidateBin> = Vec::new(); | ||||||
| let mut current_bin: Option<CandidateBin> = None; | ||||||
| let mut i = 0; | ||||||
|
|
||||||
| while let Some(res) = fragment_metrics.next().await { | ||||||
| let (fragment, metrics) = res?; | ||||||
|
|
||||||
| let candidacy = if options.materialize_deletions | ||||||
| && metrics.deletion_percentage() > options.materialize_deletions_threshold | ||||||
| { | ||||||
| Some(CompactionCandidacy::CompactItself) | ||||||
| } else if metrics.physical_rows < options.target_rows_per_fragment { | ||||||
| // Only want to compact if their are neighbors to compact such that | ||||||
| // we can get a larger fragment. | ||||||
| Some(CompactionCandidacy::CompactWithNeighbors) | ||||||
| } else { | ||||||
| // Not a candidate | ||||||
| None | ||||||
| }; | ||||||
|
|
||||||
| let indices = indices_containing_frag(fragment.id as u32); | ||||||
|
|
||||||
| match (candidacy, &mut current_bin) { | ||||||
| (None, None) => {} // keep searching | ||||||
| (Some(candidacy), None) => { | ||||||
| // Start a new bin | ||||||
| current_bin = Some(CandidateBin { | ||||||
| fragments: vec![fragment], | ||||||
| pos_range: i..(i + 1), | ||||||
| candidacy: vec![candidacy], | ||||||
| row_counts: vec![metrics.num_rows()], | ||||||
| indices, | ||||||
| }); | ||||||
| } | ||||||
| (Some(candidacy), Some(bin)) => { | ||||||
| // We cannot mix "indexed" and "non-indexed" fragments and so we only consider | ||||||
| // the existing bin if it contains the same indices | ||||||
| if bin.indices == indices { | ||||||
| // Add to current bin | ||||||
| bin.fragments.push(fragment); | ||||||
| bin.pos_range.end += 1; | ||||||
| bin.candidacy.push(candidacy); | ||||||
| bin.row_counts.push(metrics.num_rows()); | ||||||
| } else { | ||||||
| // Index set is different. Complete previous bin and start new one | ||||||
| candidate_bins.push(current_bin.take().unwrap()); | ||||||
| current_bin = Some(CandidateBin { | ||||||
| fragments: vec![fragment], | ||||||
| pos_range: i..(i + 1), | ||||||
| candidacy: vec![candidacy], | ||||||
| row_counts: vec![metrics.num_rows()], | ||||||
| indices, | ||||||
| }); | ||||||
| } | ||||||
| } | ||||||
| (None, Some(_)) => { | ||||||
| // Bin is complete | ||||||
| candidate_bins.push(current_bin.take().unwrap()); | ||||||
| } | ||||||
| } | ||||||
|
|
||||||
| i += 1; | ||||||
| } | ||||||
|
|
||||||
| // Flush the last bin | ||||||
| if let Some(bin) = current_bin { | ||||||
| candidate_bins.push(bin); | ||||||
| } | ||||||
|
|
||||||
| let final_bins = candidate_bins | ||||||
| .into_iter() | ||||||
| .filter(|bin| !bin.is_noop()) | ||||||
| .flat_map(|bin| bin.split_for_size(options.target_rows_per_fragment)) | ||||||
| .map(|bin| TaskData { | ||||||
| fragments: bin.fragments, | ||||||
| }); | ||||||
|
|
||||||
| let mut compaction_plan = CompactionPlan::new(dataset.manifest.version, options.clone()); | ||||||
| compaction_plan.extend_tasks(final_bins); | ||||||
|
|
||||||
| Ok(compaction_plan) | ||||||
| } | ||||||
| } | ||||||
|
|
||||||
| /// Compacts the files in the dataset without reordering them. | ||||||
| /// | ||||||
| /// This does a few things: | ||||||
| /// By default, his does a few things: | ||||||
|
||||||
| /// By default, his does a few things: | |
| /// By default, this does a few things: |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
changed.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
👍
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Was already commented on, but do we need this? It seems like individual implementations can just call
dataset.get_fragments()and then do whatever filtering they would like.