Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
252 changes: 252 additions & 0 deletions crates/fluss/src/client/table/lookup.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,252 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use crate::bucketing::BucketingFunction;
use crate::client::connection::FlussConnection;
use crate::client::metadata::Metadata;
use crate::error::{Error, Result};
use crate::metadata::{RowType, TableBucket, TableInfo};
use crate::row::InternalRow;
use crate::row::compacted::CompactedRow;
use crate::row::encode::KeyEncoder;
use crate::rpc::ApiError;
use crate::rpc::message::LookupRequest;
use std::sync::Arc;

/// The result of a lookup operation.
///
/// Contains the rows returned from a lookup. For primary key lookups,
/// this will contain at most one row. For prefix key lookups (future),
/// this may contain multiple rows.
pub struct LookupResult<'a> {
rows: Vec<Vec<u8>>,
row_type: &'a RowType,
}

impl<'a> LookupResult<'a> {
/// Creates a new LookupResult from a list of row bytes.
fn new(rows: Vec<Vec<u8>>, row_type: &'a RowType) -> Self {
Self { rows, row_type }
}

/// Creates an empty LookupResult.
fn empty(row_type: &'a RowType) -> Self {
Self {
rows: Vec::new(),
row_type,
}
}

/// Returns the only row in the result set as a [`CompactedRow`].
///
/// This method provides a zero-copy view of the row data, which means the returned
/// `CompactedRow` borrows from this result set and cannot outlive it.
///
/// # Returns
/// - `Ok(Some(row))`: If exactly one row exists.
/// - `Ok(None)`: If the result set is empty.
/// - `Err(Error::UnexpectedError)`: If the result set contains more than one row.
///
pub fn get_single_row(&self) -> Result<Option<CompactedRow<'_>>> {
match self.rows.len() {
0 => Ok(None),
1 => Ok(Some(CompactedRow::from_bytes(self.row_type, &self.rows[0]))),
_ => Err(Error::UnexpectedError {
message: "LookupResult contains multiple rows, use get_rows() instead".to_string(),
source: None,
}),
}
}

/// Returns all rows as CompactedRows.
pub fn get_rows(&self) -> Vec<CompactedRow<'_>> {
self.rows
.iter()
.map(|bytes| CompactedRow::from_bytes(self.row_type, bytes))
.collect()
}
}

/// Configuration and factory struct for creating lookup operations.
///
/// `TableLookup` follows the same pattern as `TableScan` and `TableAppend`,
/// providing a builder-style API for configuring lookup operations before
/// creating the actual `Lookuper`.
///
/// # Example
/// ```ignore
/// let table = conn.get_table(&table_path).await?;
/// let lookuper = table.new_lookup()?.create_lookuper()?;
/// let result = lookuper.lookup(&row).await?;
/// if let Some(value) = result.get_single_row() {
/// println!("Found: {:?}", value);
/// }
/// ```
// TODO: Add lookup_by(column_names) for prefix key lookups (PrefixKeyLookuper)
// TODO: Add create_typed_lookuper<T>() for typed lookups with POJO mapping
pub struct TableLookup<'a> {
conn: &'a FlussConnection,
table_info: TableInfo,
metadata: Arc<Metadata>,
}

impl<'a> TableLookup<'a> {
pub(super) fn new(
conn: &'a FlussConnection,
table_info: TableInfo,
metadata: Arc<Metadata>,
) -> Self {
Self {
conn,
table_info,
metadata,
}
}

/// Creates a `Lookuper` for performing key-based lookups.
///
/// The lookuper will automatically encode the key and compute the bucket
/// for each lookup using the appropriate bucketing function.
pub fn create_lookuper(self) -> Result<Lookuper<'a>> {
let num_buckets = self.table_info.get_num_buckets();

// Get data lake format from table config for bucketing function
let data_lake_format = self.table_info.get_table_config().get_datalake_format()?;
let bucketing_function = <dyn BucketingFunction>::of(data_lake_format.as_ref());

// Create key encoder for the primary key fields
let pk_fields = self.table_info.get_physical_primary_keys().to_vec();
let key_encoder =
<dyn KeyEncoder>::of(self.table_info.row_type(), pk_fields, data_lake_format)?;

Ok(Lookuper {
conn: self.conn,
table_info: self.table_info,
metadata: self.metadata,
bucketing_function,
key_encoder,
num_buckets,
})
}
}

/// Performs key-based lookups against a primary key table.
///
/// The `Lookuper` automatically encodes the lookup key, computes the target
/// bucket, finds the appropriate tablet server, and retrieves the value.
///
/// # Example
/// ```ignore
/// let lookuper = table.new_lookup()?.create_lookuper()?;
/// let row = GenericRow::new(vec![Datum::Int32(42)]); // lookup key
/// let result = lookuper.lookup(&row).await?;
/// ```
// TODO: Support partitioned tables (extract partition from key)
pub struct Lookuper<'a> {
conn: &'a FlussConnection,
table_info: TableInfo,
metadata: Arc<Metadata>,
bucketing_function: Box<dyn BucketingFunction>,
key_encoder: Box<dyn KeyEncoder>,
num_buckets: i32,
}

impl<'a> Lookuper<'a> {
/// Looks up a value by its primary key.
///
/// The key is encoded and the bucket is automatically computed using
/// the table's bucketing function.
///
/// # Arguments
/// * `row` - The row containing the primary key field values
///
/// # Returns
/// * `Ok(LookupResult)` - The lookup result (may be empty if key not found)
/// * `Err(Error)` - If the lookup fails
pub async fn lookup(&mut self, row: &dyn InternalRow) -> Result<LookupResult<'_>> {
// todo: support batch lookup
// Encode the key from the row
let encoded_key = self.key_encoder.encode_key(row)?;
let key_bytes = encoded_key.to_vec();

// Compute bucket from encoded key
let bucket_id = self
.bucketing_function
.bucketing(&key_bytes, self.num_buckets)?;

let table_id = self.table_info.get_table_id();
let table_bucket = TableBucket::new(table_id, bucket_id);

// Find the leader for this bucket
let cluster = self.metadata.get_cluster();
let leader =
cluster
.leader_for(&table_bucket)
.ok_or_else(|| Error::LeaderNotAvailable {
message: format!("No leader found for table bucket: {table_bucket}"),
})?;

// Get connection to the tablet server
let tablet_server =
cluster
.get_tablet_server(leader.id())
.ok_or_else(|| Error::LeaderNotAvailable {
message: format!(
"Tablet server {} is not found in metadata cache",
leader.id()
),
})?;

let connections = self.conn.get_connections();
let connection = connections.get_connection(tablet_server).await?;

// Send lookup request
let request = LookupRequest::new(table_id, None, bucket_id, vec![key_bytes]);
let response = connection.request(request).await?;

// Extract the values from response
if let Some(bucket_resp) = response.buckets_resp.into_iter().next() {
// Check for errors
if let Some(error_code) = bucket_resp.error_code {
if error_code != 0 {
return Err(Error::FlussAPIError {
api_error: ApiError {
code: error_code,
message: bucket_resp.error_message.unwrap_or_default(),
},
});
}
}

// Collect all values
let rows: Vec<Vec<u8>> = bucket_resp
.values
.into_iter()
.filter_map(|pb_value| pb_value.values)
.collect();

return Ok(LookupResult::new(rows, self.table_info.row_type()));
}

Ok(LookupResult::empty(self.table_info.row_type()))
}

/// Returns a reference to the table info.
pub fn table_info(&self) -> &TableInfo {
&self.table_info
}
}
38 changes: 36 additions & 2 deletions crates/fluss/src/client/table/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,22 @@

use crate::client::connection::FlussConnection;
use crate::client::metadata::Metadata;
use crate::error::{Error, Result};
use crate::metadata::{TableInfo, TablePath};
use std::sync::Arc;

use crate::error::Result;

pub const EARLIEST_OFFSET: i64 = -2;

mod append;
mod lookup;

mod log_fetch_buffer;
mod remote_log;
mod scanner;
mod writer;

pub use append::{AppendWriter, TableAppend};
pub use lookup::{LookupResult, Lookuper, TableLookup};
pub use scanner::{LogScanner, RecordBatchLogScanner, TableScan};

#[allow(dead_code)]
Expand Down Expand Up @@ -85,6 +86,39 @@ impl<'a> FlussTable<'a> {
pub fn has_primary_key(&self) -> bool {
self.has_primary_key
}

/// Creates a new `TableLookup` for configuring lookup operations.
///
/// This follows the same pattern as `new_scan()` and `new_append()`,
/// returning a configuration object that can be used to create a `Lookuper`.
///
/// The table must have a primary key (be a primary key table).
///
/// # Returns
/// * `Ok(TableLookup)` - A lookup configuration object
/// * `Err(Error)` - If the table doesn't have a primary key
///
/// # Example
/// ```ignore
/// let table = conn.get_table(&table_path).await?;
/// let lookuper = table.new_lookup()?.create_lookuper()?;
/// let key = vec![1, 2, 3]; // encoded primary key bytes
/// if let Some(value) = lookuper.lookup(key).await? {
/// println!("Found value: {:?}", value);
/// }
/// ```
pub fn new_lookup(&self) -> Result<TableLookup<'_>> {
if !self.has_primary_key {
return Err(Error::UnsupportedOperation {
message: "Lookup is only supported for primary key tables".to_string(),
});
}
Ok(TableLookup::new(
self.conn,
self.table_info.clone(),
self.metadata.clone(),
))
}
}

impl<'a> Drop for FlussTable<'a> {
Expand Down
4 changes: 2 additions & 2 deletions crates/fluss/src/client/write/write_format.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ impl WriteFormat {
match self {
WriteFormat::CompactedKv => Ok(KvFormat::COMPACTED),
other => Err(IllegalArgument {
message: format!("WriteFormat `{}` is not a KvFormat", other),
message: format!("WriteFormat `{other}` is not a KvFormat"),
}),
}
}
Expand All @@ -48,7 +48,7 @@ impl WriteFormat {
match kv_format {
KvFormat::COMPACTED => Ok(WriteFormat::CompactedKv),
other => Err(IllegalArgument {
message: format!("Unknown KvFormat: `{}`", other),
message: format!("Unknown KvFormat: `{other}`"),
}),
}
}
Expand Down
1 change: 1 addition & 0 deletions crates/fluss/src/metadata/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -729,6 +729,7 @@ impl TableConfig {
ArrowCompressionInfo::from_conf(&self.properties)
}

/// Returns the data lake format if configured, or None if not set.
pub fn get_datalake_format(&self) -> Result<Option<DataLakeFormat>> {
self.properties
.get("table.datalake.format")
Expand Down
28 changes: 28 additions & 0 deletions crates/fluss/src/proto/fluss_api.proto
Original file line number Diff line number Diff line change
Expand Up @@ -317,4 +317,32 @@ message GetFileSystemSecurityTokenResponse {
required bytes token = 2;
optional int64 expiration_time = 3;
repeated PbKeyValue addition_info = 4;
}

// lookup request and response
message LookupRequest {
required int64 table_id = 1;
repeated PbLookupReqForBucket buckets_req = 2;
}

message LookupResponse {
repeated PbLookupRespForBucket buckets_resp = 1;
}

message PbLookupReqForBucket {
optional int64 partition_id = 1;
required int32 bucket_id = 2;
repeated bytes key = 3;
}

message PbLookupRespForBucket {
optional int64 partition_id = 1;
required int32 bucket_id = 2;
optional int32 error_code = 3;
optional string error_message = 4;
repeated PbValue values = 5;
}

message PbValue {
optional bytes values = 1;
}
Loading
Loading