Skip to content

Commit c1bbc4c

Browse files
alambroeap
authored andcommitted
fix: fix clippy warnings on main
Signed-off-by: Andrew Lamb <[email protected]>
1 parent 4be81fb commit c1bbc4c

File tree

9 files changed

+23
-22
lines changed

9 files changed

+23
-22
lines changed

crates/catalog-unity/src/datafusion.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ impl UnitySchemaProvider {
193193
.map(|resp| match resp {
194194
Ok(TableTempCredentialsResponse::Success(temp_creds)) => Ok(temp_creds),
195195
Ok(TableTempCredentialsResponse::Error(err)) => Err(err.into()),
196-
Err(err) => Err(err.into()),
196+
Err(err) => Err(err),
197197
})
198198
.await
199199
}

crates/core/src/delta_datafusion/mod.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -910,7 +910,7 @@ impl TableProvider for LazyTableProvider {
910910
if projection != &current_projection {
911911
let execution_props = &ExecutionProps::new();
912912
let fields: DeltaResult<Vec<(Arc<dyn PhysicalExpr>, String)>> = projection
913-
.into_iter()
913+
.iter()
914914
.map(|i| {
915915
let (table_ref, field) = df_schema.qualified_field(*i);
916916
create_physical_expr(
@@ -2865,6 +2865,7 @@ mod tests {
28652865
let expected = vec![
28662866
ObjectStoreOperation::GetRange(LocationType::Data, 4920..4928),
28672867
ObjectStoreOperation::GetRange(LocationType::Data, 2399..4920),
2868+
#[expect(clippy::single_range_in_vec_init)]
28682869
ObjectStoreOperation::GetRanges(LocationType::Data, vec![4..58]),
28692870
];
28702871
let mut actual = Vec::new();

crates/core/src/kernel/snapshot/log_segment.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -831,7 +831,7 @@ pub(super) mod tests {
831831

832832
assert_eq!(commit.metrics.num_retries, 0);
833833
assert_eq!(commit.metrics.num_log_files_cleaned_up, 0);
834-
assert_eq!(commit.metrics.new_checkpoint_created, false);
834+
assert!(!commit.metrics.new_checkpoint_created);
835835

836836
let batches = LogSegment::try_new(
837837
&Path::default(),

crates/core/src/operations/load_cdf.rs

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -104,11 +104,10 @@ impl CdfLoadBuilder {
104104
for v in 0..self.snapshot.version() {
105105
if let Ok(Some(bytes)) = self.log_store.read_commit_entry(v).await {
106106
if let Ok(actions) = get_actions(v, bytes).await {
107-
if actions.iter().any(|action| match action {
108-
Action::CommitInfo(CommitInfo {
107+
if actions.iter().any(|action| {
108+
matches!(action, Action::CommitInfo(CommitInfo {
109109
timestamp: Some(t), ..
110-
}) if ts.timestamp_millis() < *t => true,
111-
_ => false,
110+
}) if ts.timestamp_millis() < *t)
112111
}) {
113112
return Ok(v);
114113
}

crates/core/src/operations/merge/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1462,7 +1462,7 @@ fn modify_schema(
14621462
return Err(DeltaTableError::Arrow { source: error });
14631463
}
14641464

1465-
if let Some(target_field) = target_schema.field_from_column(columns).ok() {
1465+
if let Ok(target_field) = target_schema.field_from_column(columns) {
14661466
// for nested data types we need to first merge then see if there a change then replace the pre-existing field
14671467
let new_field = merge_arrow_field(target_field, source_field, true)?;
14681468
if &new_field == target_field {

crates/core/src/protocol/checkpoints.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -698,8 +698,8 @@ mod tests {
698698
0, finalized_commit.metrics.num_log_files_cleaned_up,
699699
"Expected no log files cleaned up"
700700
);
701-
assert_eq!(
702-
false, finalized_commit.metrics.new_checkpoint_created,
701+
assert!(
702+
!finalized_commit.metrics.new_checkpoint_created,
703703
"Expected checkpoint created."
704704
);
705705
table.load().await.expect("Failed to reload table");

crates/core/tests/integration_datafusion.rs

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -194,8 +194,6 @@ mod local {
194194
// We want to emulate that this occurs on another node, so that all we have access to is the
195195
// plan byte serialization.
196196
let source_scan_bytes = {
197-
let ctx = SessionContext::new();
198-
let state = ctx.state();
199197
let source_table = open_table("../test/tests/data/delta-0.8.0-date").await?;
200198

201199
let target_provider = provider_as_source(Arc::new(source_table));
@@ -1185,8 +1183,6 @@ async fn simple_query(context: &IntegrationContext) -> TestResult {
11851183
}
11861184

11871185
mod date_partitions {
1188-
use tempfile::TempDir;
1189-
11901186
use super::*;
11911187

11921188
async fn setup_test(table_uri: &str) -> Result<DeltaTable, Box<dyn Error>> {

crates/gcp/tests/context.rs

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -101,11 +101,7 @@ impl StorageIntegration for GcpIntegration {
101101
}
102102
}
103103

104-
impl GcpIntegration {
105-
fn delete_bucket(&self) -> std::io::Result<ExitStatus> {
106-
gs_cli::delete_bucket(self.bucket_name.clone())
107-
}
108-
}
104+
impl GcpIntegration {}
109105

110106
/// small wrapper around google api
111107
pub mod gs_cli {

python/src/lib.rs

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -327,6 +327,7 @@ impl RawDeltaTable {
327327
/// This will acquire the internal lock since it is a mutating operation!
328328
pub fn load_version(&self, py: Python, version: i64) -> PyResult<()> {
329329
py.allow_threads(|| {
330+
#[allow(clippy::await_holding_lock)]
330331
rt().block_on(async {
331332
let mut table = self
332333
._table
@@ -344,6 +345,7 @@ impl RawDeltaTable {
344345
/// Retrieve the latest version from the internally loaded table state
345346
pub fn get_latest_version(&self, py: Python) -> PyResult<i64> {
346347
py.allow_threads(|| {
348+
#[allow(clippy::await_holding_lock)]
347349
rt().block_on(async {
348350
match self._table.lock() {
349351
Ok(table) => table
@@ -359,6 +361,7 @@ impl RawDeltaTable {
359361

360362
pub fn get_earliest_version(&self, py: Python) -> PyResult<i64> {
361363
py.allow_threads(|| {
364+
#[allow(clippy::await_holding_lock)]
362365
rt().block_on(async {
363366
match self._table.lock() {
364367
Ok(table) => table
@@ -397,6 +400,7 @@ impl RawDeltaTable {
397400
DateTime::<Utc>::from(DateTime::<FixedOffset>::parse_from_rfc3339(ds).map_err(
398401
|err| PyValueError::new_err(format!("Failed to parse datetime string: {err}")),
399402
)?);
403+
#[allow(clippy::await_holding_lock)]
400404
rt().block_on(async {
401405
let mut table = self
402406
._table
@@ -1041,6 +1045,7 @@ impl RawDeltaTable {
10411045
/// Run the History command on the Delta Table: Returns provenance information, including the operation, user, and so on, for each write to a table.
10421046
#[pyo3(signature = (limit=None))]
10431047
pub fn history(&self, limit: Option<usize>) -> PyResult<Vec<String>> {
1048+
#[allow(clippy::await_holding_lock)]
10441049
let history = rt().block_on(async {
10451050
match self._table.lock() {
10461051
Ok(table) => table
@@ -1058,6 +1063,7 @@ impl RawDeltaTable {
10581063
}
10591064

10601065
pub fn update_incremental(&self) -> PyResult<()> {
1066+
#[allow(clippy::await_holding_lock)]
10611067
#[allow(deprecated)]
10621068
Ok(rt()
10631069
.block_on(async {
@@ -1102,10 +1108,9 @@ impl RawDeltaTable {
11021108
}
11031109
fields
11041110
} else {
1105-
return Err(DeltaTableError::generic(
1111+
return Err(PythonError::from(DeltaTableError::generic(
11061112
"Couldn't construct list of fields for file stats expression gatherings",
1107-
))
1108-
.map_err(PythonError::from)?;
1113+
)))?;
11091114
};
11101115

11111116
self.cloned_state()?
@@ -1368,6 +1373,7 @@ impl RawDeltaTable {
13681373

13691374
// Runs lakefs pre-execution
13701375
if store.name() == "LakeFSLogStore" {
1376+
#[allow(clippy::await_holding_lock)]
13711377
rt().block_on(async {
13721378
handle
13731379
.before_post_commit_hook(store, true, operation_id)
@@ -1376,6 +1382,7 @@ impl RawDeltaTable {
13761382
.map_err(PythonError::from)?;
13771383
}
13781384

1385+
#[allow(clippy::await_holding_lock)]
13791386
let result = rt().block_on(async {
13801387
match self._table.lock() {
13811388
Ok(table) => create_checkpoint(&table, Some(operation_id))
@@ -1409,6 +1416,7 @@ impl RawDeltaTable {
14091416

14101417
// Runs lakefs pre-execution
14111418
if store.name() == "LakeFSLogStore" {
1419+
#[allow(clippy::await_holding_lock)]
14121420
rt().block_on(async {
14131421
handle
14141422
.before_post_commit_hook(store, true, operation_id)
@@ -1417,6 +1425,7 @@ impl RawDeltaTable {
14171425
.map_err(PythonError::from)?;
14181426
}
14191427

1428+
#[allow(clippy::await_holding_lock)]
14201429
let result = rt().block_on(async {
14211430
match self._table.lock() {
14221431
Ok(table) => {

0 commit comments

Comments
 (0)