Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support array aggregate sum function #7242

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions datafusion/common/src/scalar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -682,6 +682,37 @@ impl ScalarValue {
}
}

/// Return a new `ScalarValue::List` given a `Vec` of primitive values
pub fn new_primitives<T: ArrowPrimitiveType>(
values: Vec<Option<T::Native>>,
d: &DataType,
) -> Result<Self> {
if values.is_empty() {
return d.try_into();
}

let mut array = Vec::with_capacity(values.len());
let mut nulls = Vec::with_capacity(values.len());

for a in values {
match a {
Some(v) => {
array.push(v);
nulls.push(true);
}
None => {
array.push(T::Native::default());
nulls.push(false);
}
}
}

let arr = PrimitiveArray::<T>::new(array.into(), Some(NullBuffer::from(nulls)))
.with_data_type(d.clone());

Ok(ScalarValue::List(Arc::new(arr)))
}

/// Create a decimal Scalar from value/precision and scale.
pub fn try_new_decimal128(value: i128, precision: u8, scale: i8) -> Result<Self> {
// make sure the precision and scale is valid
Expand Down
4 changes: 4 additions & 0 deletions datafusion/core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,7 @@ name = "sort"
[[bench]]
harness = false
name = "topk_aggregate"

[[bench]]
harness = false
name = "aggregate_sum"
126 changes: 126 additions & 0 deletions datafusion/core/benches/aggregate_sum.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

#[macro_use]
extern crate criterion;
extern crate arrow;
extern crate datafusion;

mod data_utils;
use crate::criterion::Criterion;
use arrow_array::{RecordBatch, ArrayRef, Float64Array, PrimitiveArray};
use arrow_schema::{Schema, Field};
use data_utils::create_table_provider;
use datafusion::error::Result;
use datafusion::execution::context::SessionContext;
use datafusion_common::ScalarValue;
use datafusion_expr::AggregateFunction;
use datafusion_expr::type_coercion::aggregates::coerce_types;
use datafusion_physical_expr::{expressions::{create_aggregate_expr, try_cast, col}, AggregateExpr};
use parking_lot::Mutex;
use std::sync::Arc;
use tokio::runtime::Runtime;

pub fn aggregate(
batch: &RecordBatch,
agg: Arc<dyn AggregateExpr>,
) -> Result<ScalarValue> {
let mut accum = agg.create_accumulator()?;
let expr = agg.expressions();
let values = expr
.iter()
.map(|e| {
e.evaluate(batch)
.and_then(|v| v.into_array(batch.num_rows()))
})
.collect::<Result<Vec<_>>>()?;
accum.update_batch(&values)?;
accum.evaluate()
}

pub fn assert_aggregate(
array: ArrayRef,
function: AggregateFunction,
distinct: bool,
expected: ScalarValue,
) {
let data_type = array.data_type();
let sig = function.signature();
let coerced = coerce_types(&function, &[data_type.clone()], &sig).unwrap();

let input_schema = Schema::new(vec![Field::new("a", data_type.clone(), true)]);
let batch =
RecordBatch::try_new(Arc::new(input_schema.clone()), vec![array]).unwrap();

let input = try_cast(
col("a", &input_schema).unwrap(),
&input_schema,
coerced[0].clone(),
)
.unwrap();

let schema = Schema::new(vec![Field::new("a", coerced[0].clone(), true)]);
let agg =
create_aggregate_expr(&function, distinct, &[input], &[], &schema, "agg")
.unwrap();

let result = aggregate(&batch, agg).unwrap();
assert_eq!(expected, result);
}

fn query(ctx: Arc<Mutex<SessionContext>>, sql: &str) {
let rt = Runtime::new().unwrap();
let df = rt.block_on(ctx.lock().sql(sql)).unwrap();
criterion::black_box(rt.block_on(df.collect()).unwrap());
}

fn create_context(
partitions_len: usize,
array_len: usize,
batch_size: usize,
) -> Result<Arc<Mutex<SessionContext>>> {
let ctx = SessionContext::new();
let provider = create_table_provider(partitions_len, array_len, batch_size)?;
ctx.register_table("t", provider)?;
Ok(Arc::new(Mutex::new(ctx)))
}

fn criterion_benchmark(c: &mut Criterion) {
// let partitions_len = 8;
// let array_len = 32768 * 2; // 2^16
// let batch_size = 2048; // 2^11
// let ctx = create_context(partitions_len, array_len, batch_size).unwrap();

let n = 1000000000;
let vec_of_f64: Vec<f64> = (0..=n as usize).map(|x| 1 as f64).collect();
let a: ArrayRef = Arc::new(Float64Array::from(vec_of_f64));
// c.bench_function("sum 1e9", |b| b.iter(|| assert_aggregate(a.clone(), AggregateFunction::Sum, false, criterion::black_box(ScalarValue::List(Arc::new(Float64Array::from(vec![1000000001_f64])))))));
c.bench_function("sum 1e9", |b| b.iter(|| assert_aggregate(a.clone(), AggregateFunction::Sum, false, criterion::black_box(ScalarValue::from(1000000001_f64)))));

// c.bench_function("aggregate_query_no_group_by 15 12", |b| {
// b.iter(|| {
// query(
// ctx.clone(),
// "SELECT MIN(f64), AVG(f64), COUNT(f64) \
// FROM t",
// )
// })
// });
}

criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
13 changes: 12 additions & 1 deletion datafusion/expr/src/built_in_function.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,8 @@ pub enum BuiltinScalarFunction {
Cot,

// array functions
/// array_aggregate
ArrayAggregate,
/// array_append
ArrayAppend,
/// array_concat
Expand Down Expand Up @@ -389,6 +391,7 @@ impl BuiltinScalarFunction {
BuiltinScalarFunction::Tanh => Volatility::Immutable,
BuiltinScalarFunction::Trunc => Volatility::Immutable,
BuiltinScalarFunction::ArrayAppend => Volatility::Immutable,
BuiltinScalarFunction::ArrayAggregate => Volatility::Immutable,
BuiltinScalarFunction::ArrayConcat => Volatility::Immutable,
BuiltinScalarFunction::ArrayEmpty => Volatility::Immutable,
BuiltinScalarFunction::ArrayHasAll => Volatility::Immutable,
Expand Down Expand Up @@ -534,6 +537,7 @@ impl BuiltinScalarFunction {
Ok(data_type)
}
BuiltinScalarFunction::ArrayAppend => Ok(input_expr_types[0].clone()),
BuiltinScalarFunction::ArrayAggregate => unimplemented!("ArrayAggregate is based on Aggreation function, so no return value for it."),
BuiltinScalarFunction::ArrayConcat => {
let mut expr_type = Null;
let mut max_dims = 0;
Expand Down Expand Up @@ -882,7 +886,8 @@ impl BuiltinScalarFunction {

// for now, the list is small, as we do not have many built-in functions.
match self {
BuiltinScalarFunction::ArrayAppend => Signature::any(2, self.volatility()),
BuiltinScalarFunction::ArrayAggregate
| BuiltinScalarFunction::ArrayAppend => Signature::any(2, self.volatility()),
BuiltinScalarFunction::ArrayPopFront => Signature::any(1, self.volatility()),
BuiltinScalarFunction::ArrayPopBack => Signature::any(1, self.volatility()),
BuiltinScalarFunction::ArrayConcat => {
Expand Down Expand Up @@ -1509,6 +1514,12 @@ fn aliases(func: &BuiltinScalarFunction) -> &'static [&'static str] {
BuiltinScalarFunction::ArrowTypeof => &["arrow_typeof"],

// array functions
BuiltinScalarFunction::ArrayAggregate => &[
"array_aggregate",
"list_aggregate",
"array_aggr",
"list_aggr",
],
BuiltinScalarFunction::ArrayAppend => &[
"array_append",
"list_append",
Expand Down
7 changes: 7 additions & 0 deletions datafusion/expr/src/expr_fn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -597,6 +597,13 @@ scalar_expr!(
"returns the array without the first element."
);

scalar_expr!(
ArrayAggregate,
array_aggregate,
array name,
"allows the execution of arbitrary existing aggregate functions `name` on the elements of a list"
);

nary_scalar_expr!(ArrayConcat, array_concat, "concatenates arrays.");
scalar_expr!(
ArrayHas,
Expand Down
28 changes: 13 additions & 15 deletions datafusion/expr/src/type_coercion/aggregates.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@
// under the License.

use arrow::datatypes::{
DataType, TimeUnit, DECIMAL128_MAX_PRECISION, DECIMAL128_MAX_SCALE,
DataType, Field, TimeUnit, DECIMAL128_MAX_PRECISION, DECIMAL128_MAX_SCALE,
DECIMAL256_MAX_PRECISION, DECIMAL256_MAX_SCALE,
};

use datafusion_common::{internal_err, plan_err, DataFusionError, Result};
use std::ops::Deref;
use std::{ops::Deref, sync::Arc};

use crate::{AggregateFunction, Signature, TypeSignature};

Expand Down Expand Up @@ -118,6 +118,16 @@ pub fn coerce_types(
Dictionary(_, v) => {
return coerce_types(agg_fun, &[v.as_ref().clone()], signature)
}
List(field) => {
let coerced_types =
coerce_types(agg_fun, &[field.data_type().clone()], signature)?;
let data_type = coerced_types[0].clone();
List(Arc::new(Field::new(
field.name(),
data_type,
field.is_nullable(),
)))
}
_ => {
return plan_err!(
"The function {:?} does not support inputs of type {:?}.",
Expand Down Expand Up @@ -411,6 +421,7 @@ pub fn sum_return_type(arg_type: &DataType) -> Result<DataType> {
let new_precision = DECIMAL256_MAX_PRECISION.min(*precision + 10);
Ok(DataType::Decimal256(new_precision, *scale))
}
DataType::List(field) => sum_return_type(field.data_type()),
other => plan_err!("SUM does not support type \"{other:?}\""),
}
}
Expand Down Expand Up @@ -505,19 +516,6 @@ pub fn is_bool_and_or_support_arg_type(arg_type: &DataType) -> bool {
matches!(arg_type, DataType::Boolean)
}

pub fn is_sum_support_arg_type(arg_type: &DataType) -> bool {
match arg_type {
DataType::Dictionary(_, dict_value_type) => {
is_sum_support_arg_type(dict_value_type.as_ref())
}
_ => matches!(
arg_type,
arg_type if NUMERICS.contains(arg_type)
|| matches!(arg_type, DataType::Decimal128(_, _) | DataType::Decimal256(_, _))
),
}
}

pub fn is_avg_support_arg_type(arg_type: &DataType) -> bool {
match arg_type {
DataType::Dictionary(_, dict_value_type) => {
Expand Down
Loading