Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 44 additions & 26 deletions akd/src/auditor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
//! Code for an auditor of a authenticated key directory

use akd_core::configuration::Configuration;
use akd_core::AzksElement;

use crate::append_only_zks::AzksParallelismConfig;
use crate::AzksValue;
Expand Down Expand Up @@ -55,47 +56,64 @@ pub async fn audit_verify<TC: Configuration>(
}

/// Helper for audit, verifies an append-only proof.
///
/// This function first creates a new AZKS instance with the unchanged nodes from the proof,
/// then it verifies the start hash against the root hash of this AZKS instance.
/// Next, it creates another AZKS instance with the unchanged nodes and inserted nodes,
/// and verifies the end hash against the root hash of this second AZKS instance.
#[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
pub async fn verify_consecutive_append_only<TC: Configuration>(
proof: &SingleAppendOnlyProof,
start_hash: Digest,
end_hash: Digest,
end_epoch: u64,
) -> Result<(), AkdError> {
let db = AsyncInMemoryDatabase::new();
let manager = StorageManager::new_no_cache(db);
verify_append_only_hash::<TC>(proof.unchanged_nodes.clone(), start_hash, None).await?;

let mut unchanged_with_inserted_nodes = proof.unchanged_nodes.clone();
unchanged_with_inserted_nodes.extend(proof.inserted.iter().map(|x| {
let mut y = *x;
y.value = AzksValue(TC::hash_leaf_with_commitment(x.value, end_epoch).0);
y
}));

verify_append_only_hash::<TC>(unchanged_with_inserted_nodes, end_hash, Some(end_epoch - 1))
.await?;
Ok(())
}

/// This function verifies the root hash of an AZKS instance against an expected hash.
/// It creates an AZKS instance from a set of nodes, and checks if the computed root
/// hash matches the expected hash. The optional latest_epoch parameter allows for
/// specifying the latest epoch for the AZKS instance.
async fn verify_append_only_hash<TC: Configuration>(
nodes: Vec<AzksElement>,
expected_hash: Digest,
latest_epoch: Option<u64>,
) -> Result<(), AkdError> {
let manager = StorageManager::new_no_cache(
AsyncInMemoryDatabase::new_with_remove_child_nodes_on_insertion(),
);
let mut azks = Azks::new::<TC, _>(&manager).await?;
if let Some(epoch) = latest_epoch {
azks.latest_epoch = epoch;
}
azks.batch_insert_nodes::<TC, _>(
&manager,
proof.unchanged_nodes.clone(),
InsertMode::Auditor,
AzksParallelismConfig::default(),
)
.await?;
let computed_start_root_hash: Digest = azks.get_root_hash::<TC, _>(&manager).await?;
let mut verified = computed_start_root_hash == start_hash;
azks.latest_epoch = end_epoch - 1;
let updated_inserted = proof
.inserted
.iter()
.map(|x| {
let mut y = *x;
y.value = AzksValue(TC::hash_leaf_with_commitment(x.value, end_epoch).0);
y
})
.collect();
azks.batch_insert_nodes::<TC, _>(
&manager,
updated_inserted,
nodes,
InsertMode::Auditor,
AzksParallelismConfig::default(),
)
.await?;
let computed_end_root_hash: Digest = azks.get_root_hash::<TC, _>(&manager).await?;
verified = verified && (computed_end_root_hash == end_hash);
if !verified {
return Err(AkdError::AzksErr(AzksError::VerifyAppendOnlyProof));
let computed_hash: Digest = azks.get_root_hash::<TC, _>(&manager).await?;
if computed_hash != expected_hash {
return Err(AkdError::AzksErr(AzksError::VerifyAppendOnlyProof(
format!(
"Expected hash {} does not match computed root hash {}",
hex::encode(expected_hash),
hex::encode(computed_hash)
),
)));
}
Ok(())
}
6 changes: 3 additions & 3 deletions akd/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ pub enum AzksError {
/// Membership proof did not verify
VerifyMembershipProof(String),
/// Append-only proof did not verify
VerifyAppendOnlyProof,
VerifyAppendOnlyProof(String),
/// Thrown when a place where an epoch is needed wasn't provided one.
NoEpochGiven,
}
Expand All @@ -208,8 +208,8 @@ impl fmt::Display for AzksError {
Self::VerifyMembershipProof(error_string) => {
write!(f, "{error_string}")
}
Self::VerifyAppendOnlyProof => {
write!(f, "Append only proof did not verify!")
Self::VerifyAppendOnlyProof(error_string) => {
write!(f, "Append only proof did not verify: {error_string}")
}
Self::NoEpochGiven => {
write!(f, "An epoch was required but not supplied")
Expand Down
46 changes: 45 additions & 1 deletion akd/src/storage/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use crate::storage::types::{
DbRecord, KeyData, StorageType, ValueState, ValueStateKey, ValueStateRetrievalFlag,
};
use crate::storage::{Database, Storable, StorageUtil};
use crate::tree_node::{NodeKey, TreeNodeWithPreviousValue};
use crate::{AkdLabel, AkdValue};
use async_trait::async_trait;
use dashmap::DashMap;
Expand All @@ -30,15 +31,42 @@ type UserValueMap = HashMap<Epoch, ValueState>;
pub struct AsyncInMemoryDatabase {
db: Arc<DashMap<Vec<u8>, DbRecord>>,
user_info: Arc<DashMap<Vec<u8>, UserValueMap>>,
/// This flag is used to determine whether the database will automatically
/// (and aggressively) remove entries corresponding to left and right
/// children of a tree node when the node is inserted. The purpose behind this
/// is to reduce the size of the in-memory database by culling the child enries
/// once the parent node's hash has been calculated. The primary use case for this
/// is to improve auditing memory usage and time (since during auditing, we no longer
/// care about the child node hashes once its parent has been computed). Note that this
/// technique takes advantage of the way batch insertion of nodes into the tree works,
/// since we always process all of the children of a particular subtree before processing
/// the root of that subtree.
remove_child_nodes_on_insertion: bool,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm generally not the biggest fan of using a boolean to differentiate behavior, as I'd like to use something like a different type to reflect that the in-memory store we're using doesn't store everything, but I think that's a bit more of a rework than what we have now.

As such, I think what we have here is sufficient since we need to influence how inner workings of batch_set and using something like a newtype isn't necessarily going to make that easy given that we're not calling something before or after existing functionality. Additionally, you've commented this really well so it's pretty clear + the associated function to instantiate is super clear 👍

}

unsafe impl Send for AsyncInMemoryDatabase {}
unsafe impl Sync for AsyncInMemoryDatabase {}

impl AsyncInMemoryDatabase {
/// Returns the size of the in-memory database
pub fn size_of_db(&self) -> usize {
self.db.len()
}

/// Creates a new in memory db
pub fn new() -> Self {
Self::default()
Self {
remove_child_nodes_on_insertion: false,
..Self::default()
}
}

/// Creates a new in memory db with the flag set to remove child nodes on insertion
pub fn new_with_remove_child_nodes_on_insertion() -> Self {
Self {
remove_child_nodes_on_insertion: true,
..Self::default()
}
}

#[cfg(test)]
Expand Down Expand Up @@ -103,6 +131,22 @@ impl Database for AsyncInMemoryDatabase {
}
}
} else {
if self.remove_child_nodes_on_insertion {
if let DbRecord::TreeNode(node) = record.clone() {
if let Some(left_child) = node.latest_node.left_child {
self.db
.remove(&TreeNodeWithPreviousValue::get_full_binary_key_id(
&NodeKey(left_child),
));
}
if let Some(right_child) = node.latest_node.right_child {
self.db
.remove(&TreeNodeWithPreviousValue::get_full_binary_key_id(
&NodeKey(right_child),
));
}
}
}
self.db.insert(record.get_full_binary_id(), record);
}
}
Expand Down
2 changes: 1 addition & 1 deletion akd_core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ paste = { version = "1", optional = true }
bincode = "1"
itertools = "0.13"
proptest = "1"
proptest-derive = "0.4"
proptest-derive = "0.6"
rand = "0.8"
serde = { version = "1", features = ["derive"] }
criterion = "0.5"
Expand Down
2 changes: 1 addition & 1 deletion examples/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "examples"
version = "0.12.0-pre.11"
version = "0.12.0-pre.12"
authors = ["akd contributors"]
license = "MIT OR Apache-2.0"
edition = "2021"
Expand Down
Loading