|
| 1 | +// Copyright 2021 Datafuse Labs |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +use std::sync::Arc; |
| 16 | + |
| 17 | +use databend_common_catalog::table_context::TableContext; |
| 18 | +use databend_common_exception::Result; |
| 19 | +use databend_common_expression::types::DataType; |
| 20 | + |
| 21 | +use crate::optimizer::SExpr; |
| 22 | +use crate::plans::Aggregate; |
| 23 | +use crate::plans::ConstantExpr; |
| 24 | +use crate::plans::DummyTableScan; |
| 25 | +use crate::plans::EvalScalar; |
| 26 | +use crate::plans::Operator; |
| 27 | +use crate::plans::RelOp; |
| 28 | +use crate::plans::RelOperator; |
| 29 | +use crate::plans::ScalarExpr; |
| 30 | +use crate::plans::ScalarItem; |
| 31 | +use crate::MetadataRef; |
| 32 | + |
| 33 | +// Replace aggregate function with scalar from table's accurate stats function |
| 34 | +pub struct RuleStatsAggregateOptimizer { |
| 35 | + metadata: MetadataRef, |
| 36 | + ctx: Arc<dyn TableContext>, |
| 37 | +} |
| 38 | + |
| 39 | +impl RuleStatsAggregateOptimizer { |
| 40 | + pub fn new(ctx: Arc<dyn TableContext>, metadata: MetadataRef) -> Self { |
| 41 | + RuleStatsAggregateOptimizer { metadata, ctx } |
| 42 | + } |
| 43 | + |
| 44 | + #[async_recursion::async_recursion(#[recursive::recursive])] |
| 45 | + pub async fn run(&self, s_expr: &SExpr) -> Result<SExpr> { |
| 46 | + let mut children = Vec::with_capacity(s_expr.arity()); |
| 47 | + for child in s_expr.children() { |
| 48 | + let child = self.run(child).await?; |
| 49 | + children.push(Arc::new(child)); |
| 50 | + } |
| 51 | + let s_expr = s_expr.replace_children(children); |
| 52 | + if let RelOperator::Aggregate(_) = s_expr.plan.as_ref() { |
| 53 | + self.normalize_aggregate(&s_expr).await |
| 54 | + } else { |
| 55 | + Ok(s_expr) |
| 56 | + } |
| 57 | + } |
| 58 | + |
| 59 | + async fn normalize_aggregate(&self, s_expr: &SExpr) -> Result<SExpr> { |
| 60 | + let agg: Aggregate = s_expr.plan().clone().try_into()?; |
| 61 | + if s_expr.arity() != 1 || agg.grouping_sets.is_some() || !agg.group_items.is_empty() { |
| 62 | + return Ok(s_expr.clone()); |
| 63 | + } |
| 64 | + |
| 65 | + // agg --> eval scalar --> scan |
| 66 | + let arg_eval_scalar = s_expr.child(0)?; |
| 67 | + if arg_eval_scalar.arity() != 1 |
| 68 | + || arg_eval_scalar.plan.as_ref().rel_op() != RelOp::EvalScalar |
| 69 | + { |
| 70 | + return Ok(s_expr.clone()); |
| 71 | + } |
| 72 | + |
| 73 | + let child = arg_eval_scalar.child(0)?; |
| 74 | + if child.arity() != 0 { |
| 75 | + return Ok(s_expr.clone()); |
| 76 | + } |
| 77 | + |
| 78 | + if let RelOperator::Scan(scan) = child.plan.as_ref() { |
| 79 | + if scan.prewhere.is_none() && scan.push_down_predicates.is_none() { |
| 80 | + let table = self.metadata.read().table(scan.table_index).table(); |
| 81 | + let schema = table.schema(); |
| 82 | + |
| 83 | + let mut column_ids = Vec::with_capacity(agg.aggregate_functions.len()); |
| 84 | + let mut need_rewrite_aggs = Vec::with_capacity(agg.aggregate_functions.len()); |
| 85 | + |
| 86 | + for item in agg.aggregate_functions.iter() { |
| 87 | + if let ScalarExpr::AggregateFunction(function) = &item.scalar { |
| 88 | + if ["min", "max"].contains(&function.func_name.as_str()) |
| 89 | + && function.args.len() == 1 |
| 90 | + && !function.distinct |
| 91 | + && Self::supported_stat_type(&function.args[0].data_type()?) |
| 92 | + { |
| 93 | + if let ScalarExpr::BoundColumnRef(b) = &function.args[0] { |
| 94 | + if let Ok(col_id) = |
| 95 | + schema.column_id_of(b.column.column_name.as_str()) |
| 96 | + { |
| 97 | + column_ids.push(col_id); |
| 98 | + need_rewrite_aggs |
| 99 | + .push(Some((col_id, function.func_name.clone()))); |
| 100 | + |
| 101 | + continue; |
| 102 | + } |
| 103 | + } |
| 104 | + } |
| 105 | + } |
| 106 | + need_rewrite_aggs.push(None); |
| 107 | + } |
| 108 | + |
| 109 | + if column_ids.is_empty() { |
| 110 | + return Ok(s_expr.clone()); |
| 111 | + } |
| 112 | + |
| 113 | + let mut eval_scalar_results = Vec::with_capacity(agg.aggregate_functions.len()); |
| 114 | + let mut agg_results = Vec::with_capacity(agg.aggregate_functions.len()); |
| 115 | + |
| 116 | + if let Some(stats) = table |
| 117 | + .accurate_columns_ranges(self.ctx.clone(), &column_ids) |
| 118 | + .await? |
| 119 | + { |
| 120 | + for (need_rewrite_agg, agg) in |
| 121 | + need_rewrite_aggs.iter().zip(agg.aggregate_functions.iter()) |
| 122 | + { |
| 123 | + if let Some((col_id, name)) = need_rewrite_agg { |
| 124 | + if let Some(stat) = stats.get(col_id) { |
| 125 | + if name.eq_ignore_ascii_case("min") && !stat.min.may_be_truncated { |
| 126 | + eval_scalar_results.push(ScalarItem { |
| 127 | + index: agg.index, |
| 128 | + scalar: ScalarExpr::ConstantExpr(ConstantExpr { |
| 129 | + value: stat.min.value.clone(), |
| 130 | + span: None, |
| 131 | + }), |
| 132 | + }); |
| 133 | + continue; |
| 134 | + } else if !stat.max.may_be_truncated { |
| 135 | + eval_scalar_results.push(ScalarItem { |
| 136 | + index: agg.index, |
| 137 | + scalar: ScalarExpr::ConstantExpr(ConstantExpr { |
| 138 | + value: stat.max.value.clone(), |
| 139 | + span: None, |
| 140 | + }), |
| 141 | + }); |
| 142 | + continue; |
| 143 | + } |
| 144 | + } |
| 145 | + } |
| 146 | + agg_results.push(agg.clone()); |
| 147 | + } |
| 148 | + } |
| 149 | + if eval_scalar_results.is_empty() { |
| 150 | + return Ok(s_expr.clone()); |
| 151 | + } |
| 152 | + |
| 153 | + let eval_scalar = EvalScalar { |
| 154 | + items: eval_scalar_results, |
| 155 | + }; |
| 156 | + |
| 157 | + if agg_results.is_empty() { |
| 158 | + let leaf = SExpr::create_leaf(Arc::new(DummyTableScan.into())); |
| 159 | + return Ok(SExpr::create_unary( |
| 160 | + Arc::new(eval_scalar.into()), |
| 161 | + Arc::new(leaf), |
| 162 | + )); |
| 163 | + } else { |
| 164 | + let agg = Aggregate { |
| 165 | + aggregate_functions: agg_results, |
| 166 | + ..agg.clone() |
| 167 | + }; |
| 168 | + let child = SExpr::create_unary( |
| 169 | + Arc::new(agg.into()), |
| 170 | + Arc::new(arg_eval_scalar.clone()), |
| 171 | + ); |
| 172 | + return Ok(SExpr::create_unary( |
| 173 | + Arc::new(eval_scalar.into()), |
| 174 | + Arc::new(child), |
| 175 | + )); |
| 176 | + } |
| 177 | + } |
| 178 | + } |
| 179 | + Ok(s_expr.clone()) |
| 180 | + } |
| 181 | + |
| 182 | + // from RangeIndex::supported_stat_type |
| 183 | + fn supported_stat_type(data_type: &DataType) -> bool { |
| 184 | + let inner_type = data_type.remove_nullable(); |
| 185 | + matches!( |
| 186 | + inner_type, |
| 187 | + DataType::Number(_) |
| 188 | + | DataType::Date |
| 189 | + | DataType::Timestamp |
| 190 | + | DataType::String |
| 191 | + | DataType::Decimal(_) |
| 192 | + ) |
| 193 | + } |
| 194 | +} |
0 commit comments