Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 8 additions & 25 deletions akd/src/append_only_zks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,7 @@ impl AzksParallelismOption {
let parallel_levels = (parallelism as f32).log2().ceil() as u8;

info!(
"Parallel levels requested (parallelism: {}, parallel levels: {})",
parallelism, parallel_levels
"Parallel levels requested (parallelism: {parallelism}, parallel levels: {parallel_levels})",
);
Some(parallel_levels)
}
Expand Down Expand Up @@ -348,15 +347,9 @@ impl Azks {
tic_toc(self.preload_nodes(storage, &azks_element_set, parallelism_config)).await;
let load_count = fallible_load_count?;
if let Some(time) = time_s {
info!(
"Preload of nodes for insert ({} objects loaded), took {} s",
load_count, time,
);
info!("Preload of nodes for insert ({load_count} objects loaded), took {time} s",);
} else {
info!(
"Preload of nodes for insert ({} objects loaded) completed.",
load_count
);
info!("Preload of nodes for insert ({load_count} objects loaded) completed.",);
}

// increment the current epoch
Expand All @@ -378,7 +371,7 @@ impl Azks {
// update the number of nodes
self.num_nodes += num_inserted;

info!("Batch insert completed ({} new nodes)", num_inserted);
info!("Batch insert completed ({num_inserted} new nodes)");
}

Ok(())
Expand Down Expand Up @@ -652,11 +645,7 @@ impl Azks {
TreeNode::batch_get_from_storage(storage, &children, self.latest_epoch).await?;
count += children.len() as u64;

log::info!(
"Greedy lookup proof preloading loaded {} of {} nodes",
count,
requested_count
);
log::info!("Greedy lookup proof preloading loaded {count} of {requested_count} nodes");

Ok(count)
}
Expand Down Expand Up @@ -719,7 +708,7 @@ impl Azks {
)
.await?;

debug!("Preload of tree ({} nodes) completed", load_count);
debug!("Preload of tree ({load_count} nodes) completed");

Ok(load_count)
}
Expand Down Expand Up @@ -919,15 +908,9 @@ impl Azks {
.await;
let load_count = fallible_load_count?;
if let Some(time) = time_s {
info!(
"Preload of nodes for audit ({} objects loaded), took {} s",
load_count, time,
);
info!("Preload of nodes for audit ({load_count} objects loaded), took {time} s",);
} else {
info!(
"Preload of nodes for audit ({} objects loaded) completed.",
load_count
);
info!("Preload of nodes for audit ({load_count} objects loaded) completed.");
}
storage.log_metrics().await;

Expand Down
2 changes: 1 addition & 1 deletion akd/src/directory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ where
info!("Committing transaction");
match self.storage.commit_transaction().await {
Ok(num_records) => {
info!("Transaction committed ({} records)", num_records);
info!("Transaction committed ({num_records} records)");
}
Err(err) => {
error!("Failed to commit transaction, rolling back");
Expand Down
12 changes: 12 additions & 0 deletions akd/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,18 @@
//! # });
//! ```
//!
//! ### VRF Setup
//!
//! In order to use the directory, we need to set up a VRF key that will be used throughout the lifetime
//! of the directory. This can be done creating a struct that implements the [`ecvrf::VRFKeyStorage`] trait, which
//! requires an implementation of the `retrieve` method. The `retrieve` method should return
//! the VRF private key as a vector of bytes.
//!
//! Initially (during directory setup), we use the bytes of an `ed25519-dalek` signing key as the VRF private key. You can
//! refer to the [ed25519-dalek documentation](https://docs.rs/ed25519-dalek/latest/ed25519_dalek/struct.SigningKey.html)
//! for more information on how to generate a signing key. These bytes will need to be stored in a secure and persistent
//! location, and the `retrieve` method of [`ecvrf::VRFKeyStorage`] should return these bytes when called.
//!
//! For more information on setting configurations, see the [Configurations](#configurations) section.
//!
//! ## Publishing
Expand Down
4 changes: 2 additions & 2 deletions akd/src/storage/cache/high_parallelism.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ impl TimedCache {
}
});

info!("Removed {} expired elements from the cache", num_removed);
debug!("Retained cache size is {} bytes", retained_size);
info!("Removed {num_removed} expired elements from the cache");
debug!("Retained cache size is {retained_size} bytes");

if retained_size > memory_limit_bytes {
info!("Retained cache size has exceeded the predefined limit, cleaning old entries");
Expand Down
1 change: 1 addition & 0 deletions akd/src/storage/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ impl AsyncInMemoryDatabase {
}

#[cfg(test)]
/// Used to clear the in-memory database for testing purposes
pub fn clear(&self) {
self.db.clear();
self.user_info.clear();
Expand Down
6 changes: 3 additions & 3 deletions akd/src/storage/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ async fn test_get_and_set_item<Ns: Database>(storage: &Ns) {

let get_result = storage.get::<PvTreeNode>(&key2).await;
if let Err(err) = get_result {
panic!("Failed to retrieve history tree node (2) {:?}", err)
panic!("Failed to retrieve history tree node (2) {err:?}")
}

// === ValueState storage === //
Expand Down Expand Up @@ -222,7 +222,7 @@ async fn test_batch_get_items<Ns: Database>(storage: &Ns) {
.await;
// should be the same thing as the previous get
match got_all_min_states {
Err(err) => panic!("Failed to retrieve batch of user at min epochs: {:?}", err),
Err(err) => panic!("Failed to retrieve batch of user at min epochs: {err:?}"),
Ok(lst) if lst.len() != rand_users.len() => {
panic!(
"Retrieved list length does not match input length {} != {}",
Expand Down Expand Up @@ -263,7 +263,7 @@ async fn test_batch_get_items<Ns: Database>(storage: &Ns) {
.await;
// should be the same thing as the previous get
match got_all_max_states {
Err(err) => panic!("Failed to retrieve batch of user at min epochs: {:?}", err),
Err(err) => panic!("Failed to retrieve batch of user at min epochs: {err:?}"),
Ok(lst) if lst.len() != rand_users.len() => {
panic!(
"Retrieved list length does not match input length {} != {}",
Expand Down
3 changes: 1 addition & 2 deletions akd_core/src/proto/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,7 @@ impl TryFrom<&specs::types::NodeLabel> for crate::NodeLabel {

if label_len > 256 {
return Err(ConversionError::Deserialization(format!(
"Label length is too long, should be at most 256: {len}",
len = label_len
"Label length is too long, should be at most 256: {label_len}",
)));
}

Expand Down
9 changes: 3 additions & 6 deletions akd_core/src/verify/history.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,7 @@ fn verify_with_history_params(
// Make sure the start version is 1
if start_version != 1 {
return Err(VerificationError::HistoryProof(format!(
"Expected start version to be 1 given that it is a complete history, but got start_version = {}",
start_version
"Expected start version to be 1 given that it is a complete history, but got start_version = {start_version}",
)));
}
}
Expand All @@ -135,15 +134,13 @@ fn verify_with_history_params(
match num_proofs.cmp(&recency) {
Ordering::Greater => {
return Err(VerificationError::HistoryProof(format!(
"Expected at most {} update proofs, but got {} of them",
recency, num_proofs
"Expected at most {recency} update proofs, but got {num_proofs} of them",
)))
}
Ordering::Less => {
if start_version != 1 {
return Err(VerificationError::HistoryProof(format!(
"Expected at most {} update proofs, but got {} of them",
recency, num_proofs
"Expected at most {recency} update proofs, but got {num_proofs} of them",
)));
}
}
Expand Down
2 changes: 1 addition & 1 deletion examples/src/fixture_generator/writer/yaml.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ impl<T: Write> YamlWriter<T> {

impl<T: Write> Writer for YamlWriter<T> {
fn write_object(&mut self, object: impl Serialize) {
writeln!(self.out, "{}", YAML_SEPARATOR).unwrap();
writeln!(self.out, "{YAML_SEPARATOR}").unwrap();
serde_yaml::to_writer(&mut self.out, &object).unwrap();
}

Expand Down
20 changes: 10 additions & 10 deletions examples/src/mysql_demo/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ async fn pre_process_input(cli: &CliArgs, db: Option<&AsyncMySqlDatabase>) -> Op
println!("======= Dropping database ======= ");
if let Some(mysql_db) = db {
if let Err(error) = mysql_db.drop_tables().await {
error!("Error dropping database: {}", error);
error!("Error dropping database: {error}");
} else {
info!("Database dropped.");
}
Expand Down Expand Up @@ -308,15 +308,15 @@ async fn process_input(
match rpc_rx.await {
Err(err) => code = Some(format!("{err}")),
Ok(Err(dir_err)) => code = Some(dir_err),
Ok(Ok(msg)) => info!("{}", msg),
Ok(Ok(msg)) => info!("{msg}"),
}
if code.is_some() {
break;
}
}

if let Some(err) = code {
error!("Benchmark operation error {}", err);
error!("Benchmark operation error {err}");
} else {
let toc = tic.elapsed();

Expand Down Expand Up @@ -356,7 +356,7 @@ async fn process_input(
})
.collect();

info!("Inserting {} users", num_users);
info!("Inserting {num_users} users");
let (rpc_tx, _) = tokio::sync::oneshot::channel();
let rpc = directory_host::Rpc(
directory_host::DirectoryCommand::PublishBatch(user_data.clone()),
Expand Down Expand Up @@ -388,11 +388,11 @@ async fn process_input(
break;
}
}
info!("LOOKUP of {} users complete (iteration {})", num_users, i);
info!("LOOKUP of {num_users} users complete (iteration {i})");
}

if let Some(err) = code {
error!("Benchmark operation error {}", err);
error!("Benchmark operation error {err}");
} else {
let toc = tic.elapsed();

Expand All @@ -410,7 +410,7 @@ async fn process_input(
println!("======= One-off flushing of the database ======= ");
if let Some(mysql_db) = db {
if let Err(error) = mysql_db.get_db().delete_data().await {
error!("Error flushing database: {}", error);
error!("Error flushing database: {error}");
} else {
info!("Database flushed.");
}
Expand All @@ -420,7 +420,7 @@ async fn process_input(
println!("======= Dropping database ======= ");
if let Some(mysql_db) = db {
if let Err(error) = mysql_db.get_db().drop_tables().await {
error!("Error dropping database: {}", error);
error!("Error dropping database: {error}");
} else {
info!("Database dropped.");
}
Expand Down Expand Up @@ -488,7 +488,7 @@ async fn process_input(
println!("Response: {success}");
}
Ok(Err(dir_err)) => {
error!("Error in directory processing command: {}", dir_err);
error!("Error in directory processing command: {dir_err}");
}
Err(_) => {
error!("Failed to receive result from directory");
Expand All @@ -500,7 +500,7 @@ async fn process_input(
println!("Response: {success}");
}
Ok(Ok(Err(dir_err))) => {
error!("Error in directory processing command: {}", dir_err);
error!("Error in directory processing command: {dir_err}");
}
Ok(Err(_)) => {
error!("Failed to receive result from directory");
Expand Down
Loading
Loading