Skip to content

Add an example of embedding indexes inside a parquet file #16395

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 28 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
793abb9
Add an example of embedding indexes inside a parquet file
zhuqi-lucas Jun 13, 2025
1f480ee
Add page image
zhuqi-lucas Jun 13, 2025
2a0ecac
Add prune file example
zhuqi-lucas Jun 13, 2025
4e61d0e
Fix clippy
zhuqi-lucas Jun 13, 2025
a8da658
polish code
zhuqi-lucas Jun 13, 2025
c1ab4b9
Fmt
zhuqi-lucas Jun 13, 2025
baf0311
address comments
zhuqi-lucas Jun 13, 2025
18e7028
Add debug
zhuqi-lucas Jun 13, 2025
66dc5e4
Add new example, but it will fail with page index
zhuqi-lucas Jun 14, 2025
eb9b62e
add debug
zhuqi-lucas Jun 14, 2025
310576e
add debug
zhuqi-lucas Jun 14, 2025
fbeecfe
polish
zhuqi-lucas Jun 15, 2025
88fc6a6
debug
zhuqi-lucas Jun 15, 2025
32abcb9
Using low level API to support
zhuqi-lucas Jun 18, 2025
284510c
polish
zhuqi-lucas Jun 18, 2025
0410bd8
fix
zhuqi-lucas Jun 18, 2025
56ad7f6
Merge remote-tracking branch 'upstream/main' into embedding_indexes
zhuqi-lucas Jun 18, 2025
12ce9c2
merge
zhuqi-lucas Jun 18, 2025
0c093ac
fix
zhuqi-lucas Jun 18, 2025
a789084
Merge remote-tracking branch 'upstream/main' into embedding_indexes
zhuqi-lucas Jun 18, 2025
9c75814
complte solution
zhuqi-lucas Jun 19, 2025
13c1706
Merge remote-tracking branch 'upstream/main' into embedding_indexes
zhuqi-lucas Jun 19, 2025
06d6f08
polish comments
zhuqi-lucas Jun 19, 2025
6bd7d3e
adjust image
zhuqi-lucas Jun 19, 2025
23d7125
add comments part 1
zhuqi-lucas Jun 20, 2025
13b74ac
pin to new arrow-rs
zhuqi-lucas Jun 21, 2025
1b0501c
pin to new arrow-rs
zhuqi-lucas Jun 21, 2025
c344843
add comments part 2
zhuqi-lucas Jun 21, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions datafusion-examples/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -81,3 +81,5 @@ uuid = "1.17"

[target.'cfg(not(target_os = "windows"))'.dev-dependencies]
nix = { version = "0.30.1", features = ["fs"] }
[dependencies]
base64 = "0.22.1"
243 changes: 243 additions & 0 deletions datafusion-examples/examples/embedding_parquet_indexes.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,243 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

//! Example: embedding a "distinct values" index in a Parquet file's metadata
//!
//! 1. Read existing Parquet files
//! 2. Compute distinct values for a target column using DataFusion
//! 3. Serialize the distinct index to bytes and write to the new Parquet file
//! with these encoded bytes appended as a custom metadata entry
//! 4. Read each new parquet file, extract and deserialize the index from footer
//! 5. Use the distinct index to prune files when querying
use arrow::array::{ArrayRef, StringArray};
use arrow::record_batch::RecordBatch;
use arrow_schema::{DataType, Field, Schema, SchemaRef};
use async_trait::async_trait;
use base64::engine::general_purpose;
use base64::Engine;
use datafusion::catalog::{Session, TableProvider};
use datafusion::common::{HashMap, HashSet, Result};
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::memory::DataSourceExec;
use datafusion::datasource::physical_plan::{FileScanConfigBuilder, ParquetSource};
use datafusion::datasource::TableType;
use datafusion::execution::object_store::ObjectStoreUrl;
use datafusion::logical_expr::{Operator, TableProviderFilterPushDown};
use datafusion::parquet::arrow::ArrowWriter;
use datafusion::parquet::file::metadata::KeyValue;
use datafusion::parquet::file::properties::WriterProperties;
use datafusion::parquet::file::reader::{FileReader, SerializedFileReader};
use datafusion::physical_plan::ExecutionPlan;
use datafusion::prelude::*;
use datafusion::scalar::ScalarValue;
use std::fs::{create_dir_all, read_dir, File};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tempfile::TempDir;

/// Example creating parquet file that
/// contains specialized indexes that
/// are ignored by other readers
///
/// ```text
/// ┌──────────────────────┐
/// │┌───────────────────┐ │
/// ││ DataPage │ │ Standard Parquet
/// │└───────────────────┘ │ Data / pages
/// │┌───────────────────┐ │
/// ││ DataPage │ │
/// │└───────────────────┘ │
/// │ ... │
/// │ │
/// │┌───────────────────┐ │
/// ││ DataPage │ │
/// │└───────────────────┘ │
/// │┏━━━━━━━━━━━━━━━━━━━┓ │
/// │┃ ┃ │ key/value metadata
/// │┃ Special Index ┃◀┼──── that points at the
/// │┃ ┃ │ │ special index
/// │┗━━━━━━━━━━━━━━━━━━━┛ │
/// │╔═══════════════════╗ │ │
/// │║ ║ │
/// │║ Parquet Footer ║ │ │ Footer includes
/// │║ ║ ┼────── thrift-encoded
/// │║ ║ │ ParquetMetadata
/// │╚═══════════════════╝ │
/// └──────────────────────┘
///
/// Parquet File
/// ```
/// DistinctIndexTable is a custom TableProvider that reads Parquet files
#[derive(Debug)]
struct DistinctIndexTable {
schema: SchemaRef,
index: HashMap<String, HashSet<String>>,
dir: PathBuf,
}

impl DistinctIndexTable {
/// Scan a directory, read each file's footer metadata into a map
fn try_new(dir: impl Into<PathBuf>, schema: SchemaRef) -> Result<Self> {
let dir = dir.into();
let mut index = HashMap::new();
for entry in read_dir(&dir)? {
let p = entry?.path();
if p.extension().and_then(|s| s.to_str()) != Some("parquet") {
continue;
}
let name = p.file_name().unwrap().to_string_lossy().into_owned();
let reader = SerializedFileReader::new(File::open(&p)?)?;
if let Some(kv) = reader.metadata().file_metadata().key_value_metadata() {
if let Some(e) = kv.iter().find(|kv| kv.key == "distinct_index_data") {
let raw = general_purpose::STANDARD_NO_PAD
.decode(e.value.as_deref().unwrap())
.unwrap();
let s = String::from_utf8(raw).unwrap();
let set = s.lines().map(|l| l.to_string()).collect();
println!("Inserting File: {name}, Distinct Values: {set:?}");
index.insert(name, set);
}
}
}
Ok(Self { schema, index, dir })
}
}

// Write a Parquet file and embed its distinct "category" values in footer metadata
fn write_file_with_index(path: &Path, values: &[&str]) -> Result<()> {
let field = Field::new("category", DataType::Utf8, false);
let schema = Arc::new(Schema::new(vec![field.clone()]));
let arr: ArrayRef = Arc::new(StringArray::from(values.to_vec()));
let batch = RecordBatch::try_new(schema.clone(), vec![arr])?;

// Compute distinct values, serialize & Base64‑encode
let distinct: HashSet<_> = values.iter().copied().collect();
let serialized = distinct.iter().cloned().collect::<Vec<_>>().join("\n");
let b64 = general_purpose::STANDARD_NO_PAD.encode(serialized.as_bytes());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this writes the index into the footer itself (as an opaque string)

This has at least 2 downsides

  1. The footer metadata will be much larger / longer to parse
  2. A binary index must be converted to/from strings (as you are doing here with b64)

Is it possible to write the binary data directly into the parquet file?

Specifically, so then the metadata looks something like

// Find out where the current write position is
let offset_to_index_in_file = file.current_position()
file.write_all(distinct_index)?;

// now, finalize the file with the parquet metadata:
let props = WriterProperties::builder()
        .set_key_value_metadata(Some(vec![KeyValue::new(
            "distinct_index_data".into(),
            offset_to_index_in_file.to_string(),
        )]))
        .build();

I am not sure how easy this would be to do with the current API

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Very good point @alamb! Thank you.

I will try to find a better solution, i agree the following downsides.

This has at least 2 downsides

  1. The footer metadata will be much larger / longer to parse
  2. A binary index must be converted to/from strings (as you are doing here with b64)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried today, but found it's hard for current API to support this, will try it again.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Try to using low level API, but it only works when we disable page index, if we setting page index, it will follow up the real row group data, and it conflicts with our embedding indexes.

Copy link
Contributor Author

@zhuqi-lucas zhuqi-lucas Jun 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The code is here:

// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

//! Example: embedding a "distinct values" index in a Parquet file's metadata
//!
//! 1. Read existing Parquet files
//! 2. Compute distinct values for a target column using DataFusion
//! 3. Serialize the distinct index to bytes and write to the new Parquet file
//!    with these encoded bytes appended as a custom metadata entry
//! 4. Read each new parquet file, extract and deserialize the index from footer
//! 5. Use the distinct index to prune files when querying

use arrow::array::{ArrayRef, StringArray};
use arrow::record_batch::RecordBatch;
use arrow_schema::{DataType, Field, Schema, SchemaRef};
use async_trait::async_trait;
use datafusion::catalog::{Session, TableProvider};
use datafusion::common::{HashMap, HashSet, Result};
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::memory::DataSourceExec;
use datafusion::datasource::physical_plan::{FileScanConfigBuilder, ParquetSource};
use datafusion::datasource::TableType;
use datafusion::execution::object_store::ObjectStoreUrl;
use datafusion::logical_expr::{Operator, TableProviderFilterPushDown};
use datafusion::parquet::arrow::ArrowSchemaConverter;
use datafusion::parquet::data_type::{ByteArray, ByteArrayType};
use datafusion::parquet::errors::ParquetError;
use datafusion::parquet::file::metadata::KeyValue;
use datafusion::parquet::file::properties::WriterProperties;
use datafusion::parquet::file::reader::{FileReader, SerializedFileReader};
use datafusion::parquet::file::writer::SerializedFileWriter;
use datafusion::physical_plan::ExecutionPlan;
use datafusion::prelude::*;
use datafusion::scalar::ScalarValue;
use futures::AsyncWriteExt;
use std::fs::{create_dir_all, read_dir, File};
use std::io::{Read, Seek, SeekFrom, Write};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tempfile::TempDir;

/// We should disable page index support in the Parquet reader
/// when we ennable this feature, since we are using a custom index.
///
/// Example creating parquet file that
/// contains specialized indexes that
/// are ignored by other readers
///
/// ```text
///         ┌──────────────────────┐
///         │┌───────────────────┐ │
///         ││     DataPage      │ │      Standard Parquet
///         │└───────────────────┘ │      Data / pages
///         │┌───────────────────┐ │
///         ││     DataPage      │ │
///         │└───────────────────┘ │
///         │        ...           │
///         │                      │
///         │┌───────────────────┐ │
///         ││     DataPage      │ │
///         │└───────────────────┘ │
///         │┏━━━━━━━━━━━━━━━━━━━┓ │
///         │┃                   ┃ │        key/value metadata
///         │┃   Special Index   ┃◀┼────    that points at the
///         │┃                   ┃ │     │  special index
///         │┗━━━━━━━━━━━━━━━━━━━┛ │
///         │╔═══════════════════╗ │     │
///         │║                   ║ │
///         │║  Parquet Footer   ║ │     │  Footer includes
///         │║                   ║ ┼──────  thrift-encoded
///         │║                   ║ │        ParquetMetadata
///         │╚═══════════════════╝ │
///         └──────────────────────┘
///
///               Parquet File
/// ```
/// DistinctIndexTable is a custom TableProvider that reads Parquet files
#[derive(Debug)]
struct DistinctIndexTable {
    schema: SchemaRef,
    index: HashMap<String, HashSet<String>>,
    dir: PathBuf,
}

impl DistinctIndexTable {
    /// Scan a directory, read each file's footer metadata into a map
    fn try_new(dir: impl Into<PathBuf>, schema: SchemaRef) -> Result<Self> {
        let dir = dir.into();
        let mut index = HashMap::new();

        for entry in read_dir(&dir)? {
            let path = entry?.path();
            if path.extension().and_then(|s| s.to_str()) != Some("parquet") {
                continue;
            }
            let file_name = path.file_name().unwrap().to_string_lossy().to_string();

            let distinct_set = read_distinct_index(&path)?;

            println!("Read distinct index for {}: {:?}", file_name, distinct_set);
            index.insert(file_name, distinct_set);
        }

        Ok(Self { schema, index, dir })
    }
}

pub struct IndexedParquetWriter<W: Write + Seek> {
    writer: SerializedFileWriter<W>,
}

impl<W: Write + Seek + Send> IndexedParquetWriter<W> {
    pub fn try_new(
        sink: W,
        schema: Arc<Schema>,
        props: WriterProperties,
    ) -> Result<Self> {
        let schema_desc = ArrowSchemaConverter::new().convert(schema.as_ref())?;
        let props_ptr = Arc::new(props);
        let writer =
            SerializedFileWriter::new(sink, schema_desc.root_schema_ptr(), props_ptr)?;
        Ok(Self { writer })
    }
}

const INDEX_MAGIC: &[u8] = b"IDX1";

fn write_file_with_index(path: &Path, values: &[&str]) -> Result<()> {
    let field = Field::new("category", DataType::Utf8, false);
    let schema = Arc::new(Schema::new(vec![field.clone()]));
    let arr: ArrayRef = Arc::new(StringArray::from(values.to_vec()));
    let batch = RecordBatch::try_new(schema.clone(), vec![arr])?;

    let distinct: HashSet<_> = values.iter().copied().collect();
    let serialized = distinct.into_iter().collect::<Vec<_>>().join("\n");
    let index_bytes = serialized.into_bytes();

    let props = WriterProperties::builder().build();
    let file = File::create(path)?;

    let mut writer = IndexedParquetWriter::try_new(file, schema.clone(), props)?;

    {
        let mut rg_writer = writer.writer.next_row_group()?;
        let mut ser_col_writer = rg_writer
            .next_column()?
            .ok_or_else(|| ParquetError::General("No column writer".into()))?;

        let col_writer = ser_col_writer.typed::<ByteArrayType>();
        let values_bytes: Vec<ByteArray> = batch
            .column(0)
            .as_any()
            .downcast_ref::<StringArray>()
            .unwrap()
            .iter()
            .map(|opt| ByteArray::from(opt.unwrap()))
            .collect();

        println!("Writing values: {:?}", values_bytes);
        col_writer.write_batch(&values_bytes, None, None)?;
        ser_col_writer.close()?;
        rg_writer.close()?;
    }

    let offset = writer.writer.inner().seek(SeekFrom::Current(0))?;

    let index_len = index_bytes.len() as u64;
    writer.writer.inner().write_all(b"IDX1")?;
    writer.writer.inner().write_all(&index_len.to_le_bytes())?;

    writer.writer.inner().write_all(&index_bytes)?;

    writer.writer.append_key_value_metadata(KeyValue::new(
        "distinct_index_offset".to_string(),
        offset.to_string(),
    ));
    writer.writer.append_key_value_metadata(KeyValue::new(
        "distinct_index_length".to_string(),
        index_bytes.len().to_string(),
    ));

    writer.writer.close()?;

    println!("Finished writing file to {}", path.display());
    Ok(())
}

fn read_distinct_index(path: &Path) -> Result<HashSet<String>, ParquetError> {
    let mut file = File::open(path)?;

    let file_size = file.metadata()?.len();
    println!(
        "Reading index from {} (size: {})",
        path.display(),
        file_size
    );

    let reader = SerializedFileReader::new(file.try_clone()?)?;
    let meta = reader.metadata().file_metadata();

    let offset = meta
        .key_value_metadata()
        .and_then(|kvs| kvs.iter().find(|kv| kv.key == "distinct_index_offset"))
        .and_then(|kv| kv.value.as_ref())
        .ok_or_else(|| ParquetError::General("Missing index offset".into()))?
        .parse::<u64>()
        .map_err(|e| ParquetError::General(e.to_string()))?;

    let length = meta
        .key_value_metadata()
        .and_then(|kvs| kvs.iter().find(|kv| kv.key == "distinct_index_length"))
        .and_then(|kv| kv.value.as_ref())
        .ok_or_else(|| ParquetError::General("Missing index length".into()))?
        .parse::<usize>()
        .map_err(|e| ParquetError::General(e.to_string()))?;

    println!("Reading index at offset: {}, length: {}", offset, length);

    file.seek(SeekFrom::Start(offset))?;

    let mut magic_buf = [0u8; 4];
    file.read_exact(&mut magic_buf)?;
    if &magic_buf != INDEX_MAGIC {
        return Err(ParquetError::General("Invalid index magic".into()));
    }

    let mut len_buf = [0u8; 8];
    file.read_exact(&mut len_buf)?;
    let stored_len = u64::from_le_bytes(len_buf) as usize;

    if stored_len != length {
        return Err(ParquetError::General("Index length mismatch".into()));
    }

    let mut index_buf = vec![0u8; length];
    file.read_exact(&mut index_buf)?;

    let s =
        String::from_utf8(index_buf).map_err(|e| ParquetError::General(e.to_string()))?;

    Ok(s.lines().map(|s| s.to_string()).collect())
}

/// Implement TableProvider for DistinctIndexTable, using the distinct index to prune files
#[async_trait]
impl TableProvider for DistinctIndexTable {
    fn as_any(&self) -> &dyn std::any::Any {
        self
    }
    fn schema(&self) -> SchemaRef {
        self.schema.clone()
    }
    fn table_type(&self) -> TableType {
        TableType::Base
    }

    /// Prune files before reading: only keep files whose distinct set contains the filter value
    async fn scan(
        &self,
        _ctx: &dyn Session,
        _proj: Option<&Vec<usize>>,
        filters: &[Expr],
        _limit: Option<usize>,
    ) -> Result<Arc<dyn ExecutionPlan>> {
        // Look for a single `category = 'X'` filter
        let mut target: Option<String> = None;

        if filters.len() == 1 {
            if let Expr::BinaryExpr(expr) = &filters[0] {
                if expr.op == Operator::Eq {
                    if let (
                        Expr::Column(c),
                        Expr::Literal(ScalarValue::Utf8(Some(v)), _),
                    ) = (&*expr.left, &*expr.right)
                    {
                        if c.name == "category" {
                            println!("Filtering for category: {v}");
                            target = Some(v.clone());
                        }
                    }
                }
            }
        }
        // Determine which files to scan
        let keep: Vec<String> = self
            .index
            .iter()
            .filter(|(_f, set)| target.as_ref().is_none_or(|v| set.contains(v)))
            .map(|(f, _)| f.clone())
            .collect();

        println!("Pruned files: {:?}", keep.clone());

        // Build ParquetSource for kept files
        let url = ObjectStoreUrl::parse("file://")?;
        let source = Arc::new(ParquetSource::default());
        let mut builder = FileScanConfigBuilder::new(url, self.schema.clone(), source);
        for file in keep {
            let path = self.dir.join(&file);
            let len = std::fs::metadata(&path)?.len();
            builder = builder.with_file(PartitionedFile::new(
                path.to_str().unwrap().to_string(),
                len,
            ));
        }
        Ok(DataSourceExec::from_data_source(builder.build()))
    }

    fn supports_filters_pushdown(
        &self,
        fs: &[&Expr],
    ) -> Result<Vec<TableProviderFilterPushDown>> {
        // Mark as inexact since pruning is file‑granular
        Ok(vec![TableProviderFilterPushDown::Inexact; fs.len()])
    }
}

#[tokio::main]
async fn main() -> Result<()> {
    // 1. Create temp dir and write 3 Parquet files with different category sets
    let tmp = TempDir::new()?;
    let dir = tmp.path();
    create_dir_all(dir)?;
    write_file_with_index(&dir.join("a.parquet"), &["foo", "bar", "foo"])?;
    write_file_with_index(&dir.join("b.parquet"), &["baz", "qux"])?;
    write_file_with_index(&dir.join("c.parquet"), &["foo", "quux", "quux"])?;

    // 2. Register our custom TableProvider
    let field = Field::new("category", DataType::Utf8, false);
    let schema_ref = Arc::new(Schema::new(vec![field]));
    let provider = Arc::new(DistinctIndexTable::try_new(dir, schema_ref.clone())?);
    let ctx = SessionContext::new();

    ctx.register_table("t", provider)?;

    // 3. Run a query: only files containing 'foo' get scanned
    let df = ctx.sql("SELECT * FROM t").await?;
    df.show().await?;

    // 3. Run a query: only files containing 'foo' get scanned
    let df = ctx.sql("SELECT * FROM t WHERE category = 'foo'").await?;
    df.show().await?;

    Ok(())
}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks super cool @zhuqi-lucas

Try to using low level API, but it only works when we disable page index, if we setting page index, it will follow up the real row group data, and it conflicts with our embedding indexes.

I don't fully understand this concern -- I would probably have to play around with it some more

Are you willing to update this PR with this new example? I have some ideas on the various APIs we could use (like we could potentially encapsulate the index writing some more)

We could also then file a ticket upstream i arrow-rs with a description of what wasn't working with page indexes

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you @alamb , updated the code without page index using low level API, i will continue debugging the case that our self defined index with page index.


let props = WriterProperties::builder()
.set_key_value_metadata(Some(vec![KeyValue::new(
"distinct_index_data".into(),
b64,
)]))
.build();

let file = File::create(path)?;
let mut writer = ArrowWriter::try_new(file, schema, Some(props))?;
writer.write(&batch)?;
writer.finish()?;
Ok(())
}

/// Implement TableProvider for DistinctIndexTable, using the distinct index to prune files
#[async_trait]
impl TableProvider for DistinctIndexTable {
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn table_type(&self) -> TableType {
TableType::Base
}

/// Prune files before reading: only keep files whose distinct set contains the filter value
async fn scan(
&self,
_ctx: &dyn Session,
_proj: Option<&Vec<usize>>,
filters: &[Expr],
_limit: Option<usize>,
) -> Result<Arc<dyn ExecutionPlan>> {
// Look for a single `category = 'X'` filter
let mut target: Option<String> = None;

if filters.len() == 1 {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can also potentially use PruningPredicate::literal_guarantee to do this analysis rather than repeating it here

However, doing this walk explicitly in the example might also be a good idea to show how it could be done generall

if let Expr::BinaryExpr(expr) = &filters[0] {
if expr.op == Operator::Eq {
if let (
Expr::Column(c),
Expr::Literal(ScalarValue::Utf8(Some(v)), _),
) = (&*expr.left, &*expr.right)
{
if c.name == "category" {
println!("Filtering for category: {v}");
target = Some(v.clone());
}
}
}
}
}
// Determine which files to scan
let keep: Vec<String> = self
.index
.iter()
.filter(|(_f, set)| target.as_ref().is_none_or(|v| set.contains(v)))
.map(|(f, _)| f.clone())
.collect();

println!("Pruned files: {:?}", keep.clone());

// Build ParquetSource for kept files
let url = ObjectStoreUrl::parse("file://")?;
let source = Arc::new(ParquetSource::default());
let mut builder = FileScanConfigBuilder::new(url, self.schema.clone(), source);
for file in keep {
let path = self.dir.join(&file);
let len = std::fs::metadata(&path)?.len();
builder = builder.with_file(PartitionedFile::new(
path.to_str().unwrap().to_string(),
len,
));
}
Ok(DataSourceExec::from_data_source(builder.build()))
}

fn supports_filters_pushdown(
&self,
fs: &[&Expr],
) -> Result<Vec<TableProviderFilterPushDown>> {
// Mark as inexact since pruning is file‑granular
Ok(vec![TableProviderFilterPushDown::Inexact; fs.len()])
}
}

#[tokio::main]
async fn main() -> Result<()> {
// 1. Create temp dir and write 3 Parquet files with different category sets
let tmp = TempDir::new()?;
let dir = tmp.path();
create_dir_all(dir)?;
write_file_with_index(&dir.join("a.parquet"), &["foo", "bar", "foo"])?;
write_file_with_index(&dir.join("b.parquet"), &["baz", "qux"])?;
write_file_with_index(&dir.join("c.parquet"), &["foo", "quux", "quux"])?;

// 2. Register our custom TableProvider
let field = Field::new("category", DataType::Utf8, false);
let schema_ref = Arc::new(Schema::new(vec![field]));
let provider = Arc::new(DistinctIndexTable::try_new(dir, schema_ref.clone())?);
let ctx = SessionContext::new();
ctx.register_table("t", provider)?;

// 3. Run a query: only files containing 'foo' get scanned
let df = ctx.sql("SELECT * FROM t WHERE category = 'foo'").await?;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that is very cool

df.show().await?;

Ok(())
}