-
Notifications
You must be signed in to change notification settings - Fork 1.8k
feat: implement GroupArrayAggAccumulator attempt 3 #17915
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 7 commits
7af558e
d1e694c
7106918
f209d27
d072eef
c25ab75
ab74e83
34c47f2
b9a6871
b2f420a
f0353a7
8a57197
5d7a78f
982e51b
35726a4
7960763
b837fc3
b7b830e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,224 @@ | ||||||
| // Licensed to the Apache Software Foundation (ASF) under one | ||||||
| // or more contributor license agreements. See the NOTICE file | ||||||
| // distributed with this work for additional information | ||||||
| // regarding copyright ownership. The ASF licenses this file | ||||||
| // to you under the Apache License, Version 2.0 (the | ||||||
| // "License"); you may not use this file except in compliance | ||||||
| // with the License. You may obtain a copy of the License at | ||||||
| // | ||||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||||
| // | ||||||
| // Unless required by applicable law or agreed to in writing, | ||||||
| // software distributed under the License is distributed on an | ||||||
| // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||||||
| // KIND, either express or implied. See the License for the | ||||||
| // specific language governing permissions and limitations | ||||||
| // under the License. | ||||||
|
|
||||||
| //! Dedicated implementation of `GroupsAccumulator` for `array_agg` | ||||||
|
|
||||||
| use std::iter::repeat_n; | ||||||
| use std::sync::Arc; | ||||||
|
|
||||||
| use arrow::array::{new_empty_array, Array, GenericListArray}; | ||||||
| use arrow::array::{ArrayRef, AsArray, BooleanArray}; | ||||||
| use arrow::buffer::OffsetBuffer; | ||||||
| use arrow::compute::kernels; | ||||||
| use arrow::datatypes::Field; | ||||||
| use datafusion_common::{internal_datafusion_err, Result}; | ||||||
| use datafusion_expr_common::groups_accumulator::{EmitTo, GroupsAccumulator}; | ||||||
|
|
||||||
| #[derive(Default)] | ||||||
| pub struct AggGroupAccumulator { | ||||||
| // [1,2,3] [4,5,6] | ||||||
| stacked_batches: Vec<ArrayRef>, | ||||||
| // address items of each group within the stacked_batches | ||||||
| // this is maintained to perform kernel::interleave | ||||||
| stacked_group_indices: Vec<Vec<(usize, usize)>>, | ||||||
| } | ||||||
|
|
||||||
| impl AggGroupAccumulator { | ||||||
| pub fn new() -> Self { | ||||||
| Self { | ||||||
| stacked_batches: vec![], | ||||||
| stacked_group_indices: vec![], | ||||||
| } | ||||||
| } | ||||||
| fn consume_stacked_batches( | ||||||
| &mut self, | ||||||
| emit_to: EmitTo, | ||||||
| ) -> Result<GenericListArray<i32>> { | ||||||
| let stacked_batches = self | ||||||
| .stacked_batches | ||||||
| .iter() | ||||||
| .map(|arr| arr.as_ref()) | ||||||
| .collect::<Vec<_>>(); | ||||||
|
|
||||||
| let group_indices = emit_to.take_needed(&mut self.stacked_group_indices); | ||||||
| let lengths = group_indices.iter().map(|v| v.len()); | ||||||
|
|
||||||
| let offsets_buffer = OffsetBuffer::from_lengths(lengths); | ||||||
|
|
||||||
| // group indices like [1,1,1,2,2,2] | ||||||
| // backend_array like [a,b,c,d,e,f] | ||||||
| // offsets should be: [0,3,6] | ||||||
| // then result should be [a,b,c], [d,e,f] | ||||||
|
|
||||||
| // backend_array is a flatten list of individual values before aggregation | ||||||
| let backend_array = kernels::interleave::interleave( | ||||||
| &stacked_batches, | ||||||
| group_indices | ||||||
| .into_iter() | ||||||
| .flatten() | ||||||
| .collect::<Vec<_>>() | ||||||
| .as_slice(), | ||||||
| )?; | ||||||
| let dt = backend_array.data_type(); | ||||||
| let field = Arc::new(Field::new_list_field(dt.clone(), true)); | ||||||
|
|
||||||
| let arr = | ||||||
| GenericListArray::<i32>::new(field, offsets_buffer, backend_array, None); | ||||||
| Ok(arr) | ||||||
| } | ||||||
| } | ||||||
|
|
||||||
| impl GroupsAccumulator for AggGroupAccumulator { | ||||||
| // given the stacked_batch as: | ||||||
| // - batch1 [1,4,5,6,7] | ||||||
| // - batch2 [5,1,1,1,1] | ||||||
|
|
||||||
| // and group_indices as | ||||||
| // indices g1: [(0,0), (1,1), (1,2) ...] | ||||||
| // indices g2: [] | ||||||
| // indices g3: [] | ||||||
| // indices g4: [(0,1)] | ||||||
| // each tuple represents (batch_index, and offset within the batch index) | ||||||
| // for example | ||||||
| // - (0,0) means the 0th item inside batch1, which is `1` | ||||||
| // - (1,1) means the 1th item inside batch2, which is `1` | ||||||
| fn update_batch( | ||||||
| &mut self, | ||||||
| values: &[ArrayRef], | ||||||
| group_indices: &[usize], | ||||||
| opt_filter: Option<&BooleanArray>, | ||||||
| total_num_groups: usize, | ||||||
| ) -> Result<()> { | ||||||
| if opt_filter.is_some() { | ||||||
| panic!("not implemented"); | ||||||
| } | ||||||
|
|
||||||
| let singular_col = values | ||||||
| .first() | ||||||
| .ok_or(internal_datafusion_err!("invalid agg input"))?; | ||||||
| if self.stacked_group_indices.len() < total_num_groups { | ||||||
| self.stacked_group_indices | ||||||
| .resize(total_num_groups, Vec::new()); | ||||||
| } | ||||||
|
|
||||||
| self.stacked_batches.push(Arc::clone(singular_col)); | ||||||
| let batch_index = self.stacked_batches.len() - 1; | ||||||
|
|
||||||
| if let Some(filter) = opt_filter { | ||||||
| for (array_offset, (group_index, filter_value)) in | ||||||
| group_indices.iter().zip(filter.iter()).enumerate() | ||||||
| { | ||||||
| if let Some(true) = filter_value { | ||||||
| self.stacked_group_indices[*group_index] | ||||||
| .push((batch_index, array_offset)); | ||||||
| } | ||||||
| } | ||||||
| } else { | ||||||
| for (array_offset, group_index) in group_indices.iter().enumerate() { | ||||||
| self.stacked_group_indices[*group_index] | ||||||
| .push((batch_index, array_offset)); | ||||||
| } | ||||||
| } | ||||||
|
|
||||||
| Ok(()) | ||||||
| } | ||||||
|
|
||||||
| fn evaluate(&mut self, emit_to: EmitTo) -> Result<ArrayRef> { | ||||||
| let arr = self.consume_stacked_batches(emit_to)?; | ||||||
| Ok(Arc::new(arr) as ArrayRef) | ||||||
| } | ||||||
|
|
||||||
| // filtered_null_mask(opt_filter, &values); | ||||||
| fn state(&mut self, emit_to: EmitTo) -> Result<Vec<ArrayRef>> { | ||||||
| Ok(vec![self.evaluate(emit_to)?]) | ||||||
| } | ||||||
|
|
||||||
| fn merge_batch( | ||||||
| &mut self, | ||||||
| values: &[ArrayRef], | ||||||
| group_indices: &[usize], | ||||||
| opt_filter: Option<&BooleanArray>, | ||||||
| total_num_groups: usize, | ||||||
| ) -> Result<()> { | ||||||
| // TODO: all the reference to this function always result into this opt_filter as none | ||||||
|
||||||
| // Since aggregate filter should be applied in partial stage, in final stage there should be no filter |
count:
| _opt_filter: Option<&BooleanArray>, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
some error on memory reservation, i'm not sure if the calculation if
fn sizeis wrong, but as i understand, we only account for the buffer newly created by the implementation, not the buffer we borrowed somewhere (i.e the stacked ArrayRef everytime we receive fromfn merge_batchorfn update_batch. Maybe it's a good chance for me learn how mem reservation/spilling works
I took a quick look at this code -- One thing we probably need to account for is the memory in the held ArrayRefs -- specifically by calling https://docs.rs/arrow/latest/arrow/array/trait.Array.html#tymethod.get_array_memory_size on all the stacked arrays
However, I bet the fuzz test failure is due to actually better accounting of memory. Maybe we need to readjust the parameters or something
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
the stacked arrays size will grow overtime, accounting their size will likely make this implementation infeasible under memory pressure. I wonder, should we introduce an option for user to either use the old implementation of GroupAccumulatorAdaptor vs this implementation
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
actually after reducing batch_size of the fuzz test it passed, this happens during spill merge. But interesting the size that makes the merge overflow is the size of the output (not the size of the accumulator), then how the new implementation cause the output to use more mem 🤔
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
for batch in batches {
in_progress_file.append_batch(&batch)?;
max_record_batch_size = max_record_batch_size.max(batch.get_sliced_size()?);
}
max_record_batch_size decides the allocation during the spill merge, maybe this is the difference
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As written I think this effectively has an extra allocation per group
If you saved the internal state using using a single
Vec<usize>that represents the group for each element in the ArrayRefs, you could thensortit (remembering the original index as well) to determine the final arguments tointerleave