From eef9b3457e17eed3f8de25db0e5fd79fd06df90f Mon Sep 17 00:00:00 2001 From: Yuri Astrakhan Date: Sat, 22 Feb 2025 20:20:34 -0500 Subject: [PATCH] a few more fixes Signed-off-by: Yuri Astrakhan --- crates/core/src/operations/optimize.rs | 3 +-- crates/core/src/operations/write/execution.rs | 4 ++-- crates/core/src/schema/partitions.rs | 12 ++++++------ crates/gcp/tests/context.rs | 2 +- crates/hdfs/tests/context.rs | 2 +- crates/lakefs/src/client.rs | 14 +++++++------- crates/lakefs/tests/context.rs | 2 +- crates/mount/tests/context.rs | 3 +-- crates/test/src/concurrent.rs | 2 +- crates/test/src/read.rs | 4 ++-- proofs/src/main.rs | 6 ++---- python/src/schema.rs | 4 +--- 12 files changed, 26 insertions(+), 32 deletions(-) diff --git a/crates/core/src/operations/optimize.rs b/crates/core/src/operations/optimize.rs index 4eeb295693..44820dd88e 100644 --- a/crates/core/src/operations/optimize.rs +++ b/crates/core/src/operations/optimize.rs @@ -680,9 +680,8 @@ impl MergePlan { }) .map(|(partition, files)| { debug!( - "merging a group of {} files in partition {:?}", + "merging a group of {} files in partition {partition:?}", files.len(), - partition, ); for file in files.iter() { debug!(" file {}", file.path); diff --git a/crates/core/src/operations/write/execution.rs b/crates/core/src/operations/write/execution.rs index 74fd18719b..e2340f9573 100644 --- a/crates/core/src/operations/write/execution.rs +++ b/crates/core/src/operations/write/execution.rs @@ -69,7 +69,7 @@ pub(crate) async fn write_execution_plan_cdc( Action::Add(add) => { Action::Cdc(AddCDCFile { // This is a gnarly hack, but the action needs the nested path, not the - // path isnide the prefixed store + // path inside the prefixed store path: format!("_change_data/{}", add.path), size: add.size, partition_values: add.partition_values, @@ -421,7 +421,7 @@ pub(crate) async fn write_execution_plan_v2( { Action::Cdc(AddCDCFile { // This is a gnarly hack, but the action needs the nested path, not the - // path isnide the prefixed store + // path inside the prefixed store path: format!("_change_data/{}", add.path), size: add.size, partition_values: add.partition_values, diff --git a/crates/core/src/schema/partitions.rs b/crates/core/src/schema/partitions.rs index d65bf82d23..c55a065340 100644 --- a/crates/core/src/schema/partitions.rs +++ b/crates/core/src/schema/partitions.rs @@ -170,12 +170,12 @@ impl Serialize for PartitionFilter { S: Serializer, { let s = match &self.value { - PartitionValue::Equal(value) => format!("{} = '{}'", self.key, value), - PartitionValue::NotEqual(value) => format!("{} != '{}'", self.key, value), - PartitionValue::GreaterThan(value) => format!("{} > '{}'", self.key, value), - PartitionValue::GreaterThanOrEqual(value) => format!("{} >= '{}'", self.key, value), - PartitionValue::LessThan(value) => format!("{} < '{}'", self.key, value), - PartitionValue::LessThanOrEqual(value) => format!("{} <= '{}'", self.key, value), + PartitionValue::Equal(value) => format!("{} = '{value}'", self.key), + PartitionValue::NotEqual(value) => format!("{} != '{value}'", self.key), + PartitionValue::GreaterThan(value) => format!("{} > '{value}'", self.key), + PartitionValue::GreaterThanOrEqual(value) => format!("{} >= '{value}'", self.key), + PartitionValue::LessThan(value) => format!("{} < '{value}'", self.key), + PartitionValue::LessThanOrEqual(value) => format!("{} <= '{value}'", self.key), // used upper case for IN and NOT similar to SQL PartitionValue::In(values) => { let quoted_values: Vec = values.iter().map(|v| format!("'{v}'")).collect(); diff --git a/crates/gcp/tests/context.rs b/crates/gcp/tests/context.rs index 1e6d7f7171..c095a27e3f 100644 --- a/crates/gcp/tests/context.rs +++ b/crates/gcp/tests/context.rs @@ -95,7 +95,7 @@ impl StorageIntegration for GcpIntegration { fn copy_directory(&self, source: &str, destination: &str) -> std::io::Result { use futures::executor::block_on; - let to = format!("{}/{}", self.root_uri(), destination); + let to = format!("{}/{destination}", self.root_uri()); let _ = block_on(copy_table(source.to_owned(), None, to, None, true)); Ok(ExitStatus::default()) } diff --git a/crates/hdfs/tests/context.rs b/crates/hdfs/tests/context.rs index 156328abaf..f1447394fc 100644 --- a/crates/hdfs/tests/context.rs +++ b/crates/hdfs/tests/context.rs @@ -52,7 +52,7 @@ impl StorageIntegration for HdfsIntegration { "-copyFromLocal", "-p", source, - &format!("{}/{}", self.root_uri(), destination), + &format!("{}/{destination}", self.root_uri()), ]) .status() .unwrap()) diff --git a/crates/lakefs/src/client.rs b/crates/lakefs/src/client.rs index 63db236669..9647d086c8 100644 --- a/crates/lakefs/src/client.rs +++ b/crates/lakefs/src/client.rs @@ -55,7 +55,7 @@ impl LakeFSClient { ) -> DeltaResult<(Url, String)> { let (repo, source_branch, table) = self.decompose_url(source_url.to_string()); - let request_url = format!("{}/api/v1/repositories/{}/branches", self.config.host, repo); + let request_url = format!("{}/api/v1/repositories/{repo}/branches", self.config.host); let transaction_branch = format!("delta-tx-{operation_id}"); let body = json!({ @@ -102,8 +102,8 @@ impl LakeFSClient { branch: String, ) -> Result<(), TransactionError> { let request_url = format!( - "{}/api/v1/repositories/{}/branches/{}", - self.config.host, repo, branch + "{}/api/v1/repositories/{repo}/branches/{branch}", + self.config.host, ); let response = self .http_client @@ -139,8 +139,8 @@ impl LakeFSClient { allow_empty: bool, ) -> DeltaResult<()> { let request_url = format!( - "{}/api/v1/repositories/{}/branches/{}/commits", - self.config.host, repo, branch + "{}/api/v1/repositories/{repo}/branches/{branch}/commits", + self.config.host, ); let body = json!({ @@ -185,8 +185,8 @@ impl LakeFSClient { allow_empty: bool, ) -> Result<(), TransactionError> { let request_url = format!( - "{}/api/v1/repositories/{}/refs/{}/merge/{}", - self.config.host, repo, transaction_branch, target_branch + "{}/api/v1/repositories/{repo}/refs/{transaction_branch}/merge/{target_branch}", + self.config.host, ); let body = json!({ diff --git a/crates/lakefs/tests/context.rs b/crates/lakefs/tests/context.rs index e4835896e2..cb04db8bdf 100644 --- a/crates/lakefs/tests/context.rs +++ b/crates/lakefs/tests/context.rs @@ -45,7 +45,7 @@ impl StorageIntegration for LakeFSIntegration { fn copy_directory(&self, source: &str, destination: &str) -> std::io::Result { println!( "Copy directory called with {source} {}", - format!("{}/{}", self.root_uri(), destination) + format!("{}/{destination}", self.root_uri()) ); let lakectl = which("lakectl").expect("Failed to find lakectl executable"); diff --git a/crates/mount/tests/context.rs b/crates/mount/tests/context.rs index d7977b36de..28bef2a339 100644 --- a/crates/mount/tests/context.rs +++ b/crates/mount/tests/context.rs @@ -74,9 +74,8 @@ impl StorageIntegration for DbfsIntegration { let mut options = CopyOptions::new(); options.content_only = true; let dest_path = format!( - "/dbfs{}/{}", + "/dbfs{}/{destination}", self.tmp_dir.as_ref().to_str().unwrap(), - destination ); std::fs::create_dir_all(&dest_path)?; copy(source, &dest_path, &options).expect("Failed to copy"); diff --git a/crates/test/src/concurrent.rs b/crates/test/src/concurrent.rs index ac147789c9..aed4576925 100644 --- a/crates/test/src/concurrent.rs +++ b/crates/test/src/concurrent.rs @@ -108,7 +108,7 @@ impl Worker { async fn commit_sequence(&mut self, n: i64) -> HashMap { let mut result = HashMap::new(); for i in 0..n { - let name = format!("{}-{}", self.name, i); + let name = format!("{}-{i}", self.name); let v = self.commit_file(&name).await; result.insert(v, name); tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/crates/test/src/read.rs b/crates/test/src/read.rs index 9b1e51e22c..e6f8b6f7eb 100644 --- a/crates/test/src/read.rs +++ b/crates/test/src/read.rs @@ -137,7 +137,7 @@ pub async fn read_golden(integration: &IntegrationContext) -> TestResult { } async fn verify_store(integration: &IntegrationContext, root_path: &str) -> TestResult { - let table_uri = format!("{}/{}", integration.root_uri(), root_path); + let table_uri = format!("{}/{root_path}", integration.root_uri()); let storage = DeltaTableBuilder::from_uri(table_uri.clone()) .with_allow_http(true) @@ -158,7 +158,7 @@ async fn verify_store(integration: &IntegrationContext, root_path: &str) -> Test } async fn read_encoded_table(integration: &IntegrationContext, root_path: &str) -> TestResult { - let table_uri = format!("{}/{}", integration.root_uri(), root_path); + let table_uri = format!("{}/{root_path}", integration.root_uri()); let table = DeltaTableBuilder::from_uri(table_uri) .with_allow_http(true) diff --git a/proofs/src/main.rs b/proofs/src/main.rs index 964687e721..32511d6057 100644 --- a/proofs/src/main.rs +++ b/proofs/src/main.rs @@ -519,16 +519,14 @@ impl Model for AtomicRenameSys { let writer_versions = next_state.writer_versions(); lines.push(format!( - "writer_versions({}): {:?}", + "writer_versions({}): {writer_versions:?}", writer_versions.len(), - writer_versions, )); let blob_store_obj_keys = next_state.blob_store_obj_keys(); lines.push(format!( - "blob_store_obj_keys({}): {:?}", + "blob_store_obj_keys({}): {blob_store_obj_keys:?}", blob_store_obj_keys.len(), - blob_store_obj_keys, )); lines.join("\n") diff --git a/python/src/schema.rs b/python/src/schema.rs index c256c06c29..b93fdfc219 100644 --- a/python/src/schema.rs +++ b/python/src/schema.rs @@ -497,15 +497,13 @@ impl Field { format!(", metadata={metadata_repr}") }; Ok(format!( - "Field({}, {}, nullable={}{})", + "Field({}, {type_repr}, nullable={}{maybe_metadata})", self.inner.name(), - type_repr, if self.inner.is_nullable() { "True" } else { "False" }, - maybe_metadata, )) }