From 9b5a8d917633850b28c8e25520fb27f87c0bcf31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 5 Oct 2022 10:12:59 +0200 Subject: [PATCH 01/17] move ledger's protocol module into shared crate --- apps/src/lib/node/ledger/mod.rs | 1 - shared/src/ledger/mod.rs | 2 ++ {apps/src/lib/node => shared/src}/ledger/protocol/mod.rs | 0 3 files changed, 2 insertions(+), 1 deletion(-) rename {apps/src/lib/node => shared/src}/ledger/protocol/mod.rs (100%) diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index dc0dfc184c8..965275cc03a 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -1,7 +1,6 @@ mod abortable; mod broadcaster; pub mod events; -pub mod protocol; pub mod rpc; mod shell; mod shims; diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index ef92b1e2d9c..22d1b175251 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -7,6 +7,8 @@ pub mod ibc; pub mod native_vp; pub mod parameters; pub mod pos; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +pub mod protocol; pub mod slash_fund; pub mod storage; pub mod storage_api; diff --git a/apps/src/lib/node/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs similarity index 100% rename from apps/src/lib/node/ledger/protocol/mod.rs rename to shared/src/ledger/protocol/mod.rs From 84777a2cfe9fde4c9c364b3b375643b51802cdb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 6 Oct 2022 12:14:34 +0200 Subject: [PATCH 02/17] protocol: update imports and add missing rustdoc --- apps/Cargo.toml | 4 +- .../lib/node/ledger/shell/finalize_block.rs | 1 + apps/src/lib/node/ledger/shell/mod.rs | 4 +- apps/src/lib/node/ledger/shell/queries.rs | 8 ++-- shared/Cargo.toml | 2 + shared/src/ledger/protocol/mod.rs | 47 ++++++++++--------- 6 files changed, 36 insertions(+), 30 deletions(-) diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 45cdb9aa5eb..50c1f5b065f 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -65,7 +65,7 @@ abciplus = [ ] [dependencies] -namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "secp256k1-sign-verify"]} +namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "tendermint-rpc", "secp256k1-sign-verify"]} ark-serialize = "0.3.0" ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" @@ -104,7 +104,7 @@ prost = "0.9.0" prost-types = "0.9.0" rand = {version = "0.8", default-features = false} rand_core = {version = "0.6", default-features = false} -rayon = "=1.5.1" +rayon = "=1.5.3" regex = "1.4.5" reqwest = "0.11.4" rlimit = "0.5.4" diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 2080b8d23de..92e06c96b97 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1,5 +1,6 @@ //! Implementation of the `FinalizeBlock` ABCI++ method for the Shell +use namada::ledger::protocol; use namada::types::storage::{BlockHash, Header}; use super::governance::execute_governance_proposals; diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 28167d4ebac..ab3b131c377 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -30,7 +30,7 @@ use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{ DBIter, Sha256Hasher, Storage, StorageHasher, DB, }; -use namada::ledger::{ibc, pos}; +use namada::ledger::{ibc, pos, protocol}; use namada::proto::{self, Tx}; use namada::types::chain::ChainId; use namada::types::key::*; @@ -60,7 +60,7 @@ use crate::facade::tower_abci::{request, response}; use crate::node::ledger::events::Event; use crate::node::ledger::shims::abcipp_shim_types::shim; use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; -use crate::node::ledger::{protocol, storage, tendermint_node}; +use crate::node::ledger::{storage, tendermint_node}; #[allow(unused_imports)] use crate::wallet::ValidatorData; use crate::{config, wallet}; diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index 53587d5ebfc..a9435d565a1 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -2,11 +2,11 @@ use borsh::{BorshDeserialize, BorshSerialize}; use ferveo_common::TendermintValidator; +use namada::ledger::queries::{RequestCtx, ResponseQuery}; +use namada::ledger::storage_api; use namada::types::address::Address; -use namada::types::key; use namada::types::key::dkg_session_keys::DkgPublicKey; -use namada::types::storage::{Key, PrefixValue}; -use namada::types::token::{self, Amount}; +use namada::types::{key, token}; use super::*; use crate::facade::tendermint_proto::crypto::{ProofOp, ProofOps}; @@ -70,7 +70,7 @@ where &self, token: &Address, owner: &Address, - ) -> std::result::Result { + ) -> std::result::Result { let height = self.storage.get_block_height().0; let query_resp = self.read_storage_value( &token::balance_key(token, owner), diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 6469694ea9b..f763f1f7838 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -37,6 +37,7 @@ wasm-runtime = [ "loupe", "parity-wasm", "pwasm-utils", + "rayon", "wasmer-cache", "wasmer-compiler-singlepass", "wasmer-engine-dylib", @@ -101,6 +102,7 @@ pwasm-utils = {version = "0.18.0", optional = true} rand = {version = "0.8", optional = true} # TODO proptest rexports the RngCore trait but the re-implementations only work for version `0.8`. *sigh* rand_core = {version = "0.6", optional = true} +rayon = {version = "=1.5.3", optional = true} rust_decimal = "1.14.3" serde = {version = "1.0.125", features = ["derive"]} serde_json = "1.0.62" diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index ed776fe21af..86b2a302908 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -2,29 +2,31 @@ use std::collections::BTreeSet; use std::panic; -use namada::ledger::eth_bridge::vp::EthBridge; -use namada::ledger::gas::{self, BlockGasMeter, VpGasMeter}; -use namada::ledger::governance::GovernanceVp; -use namada::ledger::ibc::vp::{Ibc, IbcToken}; -use namada::ledger::native_vp::{self, NativeVp}; -use namada::ledger::parameters::{self, ParametersVp}; -use namada::ledger::pos::{self, PosVP}; -use namada::ledger::slash_fund::SlashFundVp; -use namada::ledger::storage::write_log::WriteLog; -use namada::ledger::storage::{DBIter, Storage, StorageHasher, DB}; -use namada::proto::{self, Tx}; -use namada::types::address::{Address, InternalAddress}; -use namada::types::storage; -use namada::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; -use namada::vm::wasm::{TxCache, VpCache}; -use namada::vm::{self, wasm, WasmCacheAccess}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use thiserror::Error; +use crate::ledger::eth_bridge::vp::EthBridge; +use crate::ledger::gas::{self, BlockGasMeter, VpGasMeter}; +use crate::ledger::governance::GovernanceVp; +use crate::ledger::ibc::vp::{Ibc, IbcToken}; +use crate::ledger::native_vp::{self, NativeVp}; +use crate::ledger::parameters::{self, ParametersVp}; +use crate::ledger::pos::{self, PosVP}; +use crate::ledger::slash_fund::SlashFundVp; +use crate::ledger::storage::write_log::WriteLog; +use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use crate::proto::{self, Tx}; +use crate::types::address::{Address, InternalAddress}; +use crate::types::storage; +use crate::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; +use crate::vm::wasm::{TxCache, VpCache}; +use crate::vm::{self, wasm, WasmCacheAccess}; + +#[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { #[error("Storage error: {0}")] - StorageError(namada::ledger::storage::Error), + StorageError(crate::ledger::storage::Error), #[error("Error decoding a transaction from bytes: {0}")] TxDecodingError(proto::Error), #[error("Transaction runner error: {0}")] @@ -38,7 +40,7 @@ pub enum Error { #[error("The address {0} doesn't exist")] MissingAddress(Address), #[error("IBC native VP: {0}")] - IbcNativeVpError(namada::ledger::ibc::vp::Error), + IbcNativeVpError(crate::ledger::ibc::vp::Error), #[error("PoS native VP: {0}")] PosNativeVpError(pos::vp::Error), #[error("PoS native VP panicked")] @@ -46,17 +48,18 @@ pub enum Error { #[error("Parameters native VP: {0}")] ParametersNativeVpError(parameters::Error), #[error("IBC Token native VP: {0}")] - IbcTokenNativeVpError(namada::ledger::ibc::vp::IbcTokenError), + IbcTokenNativeVpError(crate::ledger::ibc::vp::IbcTokenError), #[error("Governance native VP error: {0}")] - GovernanceNativeVpError(namada::ledger::governance::vp::Error), + GovernanceNativeVpError(crate::ledger::governance::vp::Error), #[error("SlashFund native VP error: {0}")] - SlashFundNativeVpError(namada::ledger::slash_fund::Error), + SlashFundNativeVpError(crate::ledger::slash_fund::Error), #[error("Ethereum bridge native VP error: {0}")] - EthBridgeNativeVpError(namada::ledger::eth_bridge::vp::Error), + EthBridgeNativeVpError(crate::ledger::eth_bridge::vp::Error), #[error("Access to an internal address {0} is forbidden")] AccessForbidden(InternalAddress), } +/// Result of applying a transaction pub type Result = std::result::Result; /// Apply a given transaction From 2b3653176771fb9dd5aaff6788781b2607a3913d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 6 Oct 2022 12:18:19 +0200 Subject: [PATCH 03/17] add deps for router macro and update `Cargo.lock`s This adds a new feature "async-client" and "tendermint-rpc" to shared crate that if enabled generate async client code for all router's handler methods, and implements it for tendermint-rpc client, respectively. --- Cargo.lock | 10 ++++++++-- shared/Cargo.toml | 16 ++++++++++++++++ wasm/Cargo.lock | 3 +++ wasm_for_tests/wasm_source/Cargo.lock | 3 +++ 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 34ab8ab550d..445e824d8e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2917,6 +2917,7 @@ dependencies = [ "ark-ec", "ark-serialize", "assert_matches", + "async-trait", "bech32", "borsh", "byte-unit", @@ -2938,6 +2939,7 @@ dependencies = [ "loupe", "namada_proof_of_stake", "parity-wasm", + "paste", "pretty_assertions", "proptest", "prost", @@ -2945,6 +2947,7 @@ dependencies = [ "pwasm-utils", "rand 0.8.5", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde 1.0.145", "serde_json", @@ -2955,8 +2958,11 @@ dependencies = [ "tendermint 0.23.6", "tendermint-proto 0.23.5", "tendermint-proto 0.23.6", + "tendermint-rpc 0.23.5", + "tendermint-rpc 0.23.6", "test-log", "thiserror", + "tokio", "tonic-build", "tracing 0.1.37", "tracing-subscriber 0.3.16", @@ -4083,9 +4089,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ "autocfg 1.1.0", "crossbeam-deque", diff --git a/shared/Cargo.toml b/shared/Cargo.toml index f763f1f7838..d05e002230a 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -27,6 +27,7 @@ ibc-mocks-abcipp = [ ] # for integration tests and test utilies testing = [ + "async-client", "proptest", "rand", "rand_core", @@ -50,6 +51,15 @@ wasm-runtime = [ secp256k1-sign-verify = [ "libsecp256k1/hmac", ] +# Enable queries support for an async client +async-client = [ + "async-trait", +] +# tendermint-rpc support +tendermint-rpc = [ + "async-client", + "dep:tendermint-rpc", +] abcipp = [ "ibc-proto-abcipp", @@ -73,6 +83,7 @@ ark-serialize = "0.3" # We switch off "blake2b" because it cannot be compiled to wasm # branch = "bat/arse-merkle-tree" arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", default-features = false, features = ["std", "borsh"]} +async-trait = {version = "0.1.51", optional = true} bech32 = "0.8.0" borsh = "0.9.0" chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} @@ -94,6 +105,7 @@ itertools = "0.10.0" loupe = {version = "0.1.3", optional = true} libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} parity-wasm = {version = "0.42.2", optional = true} +paste = "1.0.9" # A fork with state machine testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} prost = "0.9.0" @@ -111,8 +123,10 @@ sha2 = "0.9.3" tempfile = {version = "3.2.0", optional = true} # temporarily using fork work-around for https://github.com/informalsystems/tendermint-rs/issues/971 tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-rpc-abcipp = {package = "tendermint-rpc", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", features = ["http-client"], optional = true} tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} tendermint = {version = "0.23.6", optional = true} +tendermint-rpc = {version = "0.23.6", features = ["http-client"], optional = true} tendermint-proto = {version = "0.23.6", optional = true} thiserror = "1.0.30" tracing = "0.1.30" @@ -127,12 +141,14 @@ zeroize = "1.5.5" [dev-dependencies] assert_matches = "1.5.0" +async-trait = {version = "0.1.51"} byte-unit = "4.0.13" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9"} pretty_assertions = "0.7.2" # A fork with state machine testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} test-log = {version = "0.2.7", default-features = false, features = ["trace"]} +tokio = {version = "1.8.2", default-features = false, features = ["rt", "macros"]} tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} [build-dependencies] diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index f9030a471a9..b83a53b664e 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -1357,6 +1357,7 @@ version = "0.8.1" dependencies = [ "ark-bls12-381", "ark-serialize", + "async-trait", "bech32", "borsh", "chrono", @@ -1373,12 +1374,14 @@ dependencies = [ "loupe", "namada_proof_of_stake", "parity-wasm", + "paste", "proptest", "prost", "prost-types", "pwasm-utils", "rand", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde", "serde_json", diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index b82f3b3d599..1259bd5cc4f 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -1357,6 +1357,7 @@ version = "0.8.1" dependencies = [ "ark-bls12-381", "ark-serialize", + "async-trait", "bech32", "borsh", "chrono", @@ -1373,12 +1374,14 @@ dependencies = [ "loupe", "namada_proof_of_stake", "parity-wasm", + "paste", "proptest", "prost", "prost-types", "pwasm-utils", "rand", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde", "serde_json", From 0c9b0bce9307555782742db0a106cbc38447730f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 6 Oct 2022 12:20:45 +0200 Subject: [PATCH 04/17] shared: add new queries router macro to replicate handwritten RPC paths --- shared/src/ledger/mod.rs | 1 + shared/src/ledger/queries/mod.rs | 519 ++++++++++++++++ shared/src/ledger/queries/router.rs | 881 ++++++++++++++++++++++++++++ shared/src/ledger/queries/types.rs | 171 ++++++ 4 files changed, 1572 insertions(+) create mode 100644 shared/src/ledger/queries/mod.rs create mode 100644 shared/src/ledger/queries/router.rs create mode 100644 shared/src/ledger/queries/types.rs diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 22d1b175251..cbe2528b76d 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -9,6 +9,7 @@ pub mod parameters; pub mod pos; #[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] pub mod protocol; +pub mod queries; pub mod slash_fund; pub mod storage; pub mod storage_api; diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs new file mode 100644 index 00000000000..0c66faca1e6 --- /dev/null +++ b/shared/src/ledger/queries/mod.rs @@ -0,0 +1,519 @@ +//! Ledger read-only queries can be handled and dispatched via the [`RPC`] +//! defined via `router!` macro. + +use tendermint_proto::crypto::{ProofOp, ProofOps}; +#[cfg(any(test, feature = "async-client"))] +pub use types::Client; +pub use types::{ + EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, Router, +}; + +use super::storage::{DBIter, StorageHasher, DB}; +use super::storage_api::{self, ResultExt, StorageRead}; +use crate::types::storage::{self, Epoch, PrefixValue}; +use crate::types::transaction::TxResult; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +use crate::types::transaction::{DecryptedTx, TxType}; + +#[macro_use] +mod router; +mod types; + +// Most commonly expected patterns should be declared first +router! {RPC, + // Epoch of the last committed block + ( "epoch" ) -> Epoch = epoch, + + // Raw storage access - read value + ( "value" / [storage_key: storage::Key] ) + -> Option> = storage_value, + + // Dry run a transaction + ( "dry_run_tx" ) -> TxResult = dry_run_tx, + + // Raw storage access - prefix iterator + ( "prefix" / [storage_key: storage::Key] ) + -> Vec = storage_prefix, + + // Raw storage access - is given storage key present? + ( "has_key" / [storage_key: storage::Key] ) + -> bool = storage_has_key, +} + +/// Handle RPC query request in the ledger. On success, returns response with +/// borsh-encoded data. +pub fn handle_path( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + RPC.handle(ctx, request) +} + +// Handler helpers: + +/// For queries that only support latest height, check that the given height is +/// not different from latest height, otherwise return an error. +pub fn require_latest_height( + ctx: &RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result<()> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if request.height != ctx.storage.last_height { + return Err(storage_api::Error::new_const( + "This query doesn't support arbitrary block heights, only the \ + latest committed block height ('0' can be used as a special \ + value that means the latest block height)", + )); + } + Ok(()) +} + +/// For queries that only support latest height, check that the given height is +/// not different from latest height, otherwise return an error. +pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { + if request.prove { + return Err(storage_api::Error::new_const( + "This query doesn't support proofs", + )); + } + Ok(()) +} + +// Handlers: + +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +fn dry_run_tx( + mut ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + use super::gas::BlockGasMeter; + use super::storage::write_log::WriteLog; + use crate::proto::Tx; + + let mut gas_meter = BlockGasMeter::default(); + let mut write_log = WriteLog::default(); + let tx = Tx::try_from(&request.data[..]).into_storage_result()?; + let tx = TxType::Decrypted(DecryptedTx::Decrypted(tx)); + let data = super::protocol::apply_tx( + tx, + request.data.len(), + &mut gas_meter, + &mut write_log, + ctx.storage, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ) + .into_storage_result()?; + Ok(ResponseQuery { + data, + ..ResponseQuery::default() + }) +} + +#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] +fn dry_run_tx( + _ctx: RequestCtx<'_, D, H>, + _request: &RequestQuery, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + unimplemented!( + "dry_run_tx request handler requires \"wasm-runtime\" and \ + \"ferveo-tpke\" features enabled." + ) +} + +fn epoch( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + require_latest_height(&ctx, request)?; + require_no_proof(request)?; + + let data = ctx.storage.last_epoch; + Ok(ResponseQuery { + data, + ..Default::default() + }) +} + +fn storage_value( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result>>> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + match ctx + .storage + .read_with_height(&storage_key, request.height) + .into_storage_result()? + { + (Some(value), _gas) => { + let proof = if request.prove { + let proof = ctx + .storage + .get_existence_proof( + &storage_key, + value.clone().into(), + request.height, + ) + .into_storage_result()?; + Some(proof.into()) + } else { + None + }; + Ok(ResponseQuery { + data: Some(value), + proof_ops: proof, + ..Default::default() + }) + } + (None, _gas) => { + let proof = if request.prove { + let proof = ctx + .storage + .get_non_existence_proof(&storage_key, request.height) + .into_storage_result()?; + Some(proof.into()) + } else { + None + }; + Ok(ResponseQuery { + data: None, + proof_ops: proof, + info: format!("No value found for key: {}", storage_key), + }) + } + } +} + +fn storage_prefix( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result>> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + require_latest_height(&ctx, request)?; + + let (iter, _gas) = ctx.storage.iter_prefix(&storage_key); + let data: storage_api::Result> = iter + .map(|(key, value, _gas)| { + let key = storage::Key::parse(key).into_storage_result()?; + Ok(PrefixValue { key, value }) + }) + .collect(); + let data = data?; + let proof_ops = if request.prove { + let mut ops = vec![]; + for PrefixValue { key, value } in &data { + let proof = ctx + .storage + .get_existence_proof(key, value.clone().into(), request.height) + .into_storage_result()?; + let mut cur_ops: Vec = + proof.ops.into_iter().map(|op| op.into()).collect(); + ops.append(&mut cur_ops); + } + // ops is not empty in this case + Some(ProofOps { ops }) + } else { + None + }; + Ok(ResponseQuery { + data, + proof_ops, + ..Default::default() + }) +} + +fn storage_has_key( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + require_latest_height(&ctx, request)?; + require_no_proof(request)?; + + let data = StorageRead::has_key(ctx.storage, &storage_key)?; + Ok(ResponseQuery { + data, + ..Default::default() + }) +} + +#[cfg(any(test, feature = "tendermint-rpc"))] +/// Provides [`Client`] implementation for Tendermint RPC client +pub mod tm { + use thiserror::Error; + + use super::*; + use crate::types::storage::BlockHeight; + + #[allow(missing_docs)] + #[derive(Error, Debug)] + pub enum Error { + #[error("{0}")] + Tendermint(#[from] tendermint_rpc::Error), + #[error("Decoding error: {0}")] + Decoding(#[from] std::io::Error), + #[error("Info log: {0}, error code: {1}")] + Query(String, u32), + #[error("Invalid block height: {0} (overflown i64)")] + InvalidHeight(BlockHeight), + } + + #[async_trait::async_trait] + impl Client for tendermint_rpc::HttpClient { + type Error = Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height + .map(|height| { + tendermint::block::Height::try_from(height.0) + .map_err(|_err| Error::InvalidHeight(height)) + }) + .transpose()?; + let response = tendermint_rpc::Client::abci_query( + self, + // TODO open the private Path constructor in tendermint-rpc + Some(std::str::FromStr::from_str(&path).unwrap()), + data, + height, + prove, + ) + .await?; + match response.code { + tendermint::abci::Code::Ok => Ok(EncodedResponseQuery { + data: response.value, + info: response.info, + proof_ops: response.proof.map(Into::into), + }), + tendermint::abci::Code::Err(code) => { + Err(Error::Query(response.info, code)) + } + } + } + } +} + +/// Queries testing helpers +#[cfg(any(test, feature = "testing"))] +mod testing { + use tempfile::TempDir; + + use super::*; + use crate::ledger::storage::testing::TestStorage; + use crate::types::storage::BlockHeight; + use crate::vm::wasm::{self, TxCache, VpCache}; + use crate::vm::WasmCacheRoAccess; + + /// A test client that has direct access to the storage + pub struct TestClient + where + RPC: Router, + { + /// RPC router + pub rpc: RPC, + /// storage + pub storage: TestStorage, + /// VP wasm compilation cache + pub vp_wasm_cache: VpCache, + /// tx wasm compilation cache + pub tx_wasm_cache: TxCache, + /// VP wasm compilation cache directory + pub vp_cache_dir: TempDir, + /// tx wasm compilation cache directory + pub tx_cache_dir: TempDir, + } + + impl TestClient + where + RPC: Router, + { + #[allow(dead_code)] + /// Initialize a test client for the given root RPC router + pub fn new(rpc: RPC) -> Self { + // Initialize the `TestClient` + let storage = TestStorage::default(); + let (vp_wasm_cache, vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + let (tx_wasm_cache, tx_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + Self { + rpc, + storage, + vp_wasm_cache: vp_wasm_cache.read_only(), + tx_wasm_cache: tx_wasm_cache.read_only(), + vp_cache_dir, + tx_cache_dir, + } + } + } + + #[async_trait::async_trait] + impl Client for TestClient + where + RPC: Router + Sync, + { + type Error = std::io::Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height.unwrap_or_default(); + // Handle a path by invoking the `RPC.handle` directly with the + // borrowed storage + let request = RequestQuery { + data, + path, + height, + prove, + }; + let ctx = RequestCtx { + storage: &self.storage, + vp_wasm_cache: self.vp_wasm_cache.clone(), + tx_wasm_cache: self.tx_wasm_cache.clone(), + }; + let response = self.rpc.handle(ctx, &request).unwrap(); + Ok(response) + } + } +} + +#[cfg(test)] +mod test { + use borsh::BorshDeserialize; + + use super::testing::TestClient; + use super::*; + use crate::ledger::storage_api::StorageWrite; + use crate::proto::Tx; + use crate::types::{address, token}; + + const TX_NO_OP_WASM: &str = "../wasm_for_tests/tx_no_op.wasm"; + + #[test] + fn test_queries_router_paths() { + let path = RPC.epoch_path(); + assert_eq!("/epoch", path); + + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let key = token::balance_key(&token_addr, &owner); + let path = RPC.storage_value_path(&key); + assert_eq!(format!("/value/{}", key), path); + + let path = RPC.dry_run_tx_path(); + assert_eq!("/dry_run_tx", path); + + let path = RPC.storage_prefix_path(&key); + assert_eq!(format!("/prefix/{}", key), path); + + let path = RPC.storage_has_key_path(&key); + assert_eq!(format!("/has_key/{}", key), path); + } + + #[tokio::test] + async fn test_queries_router_with_client() -> storage_api::Result<()> { + // Initialize the `TestClient` + let mut client = TestClient::new(RPC); + + // Request last committed epoch + let read_epoch = RPC.epoch(&client).await.unwrap(); + let current_epoch = client.storage.last_epoch; + assert_eq!(current_epoch, read_epoch); + + // Request dry run tx + let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); + let tx = Tx::new(tx_no_op, None); + let tx_bytes = tx.to_bytes(); + let result = RPC + .dry_run_tx_with_options(&client, Some(tx_bytes), None, false) + .await + .unwrap(); + assert!(result.data.is_accepted()); + + // Request storage value for a balance key ... + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let balance_key = token::balance_key(&token_addr, &owner); + // ... there should be no value yet. + let read_balance = + RPC.storage_value(&client, &balance_key).await.unwrap(); + assert!(read_balance.is_none()); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = + RPC.storage_prefix(&client, &balance_prefix).await.unwrap(); + assert!(read_balances.is_empty()); + + // Request storage has key + let has_balance_key = + RPC.storage_has_key(&client, &balance_key).await.unwrap(); + assert!(!has_balance_key); + + // Then write some balance ... + let balance = token::Amount::from(1000); + StorageWrite::write(&mut client.storage, &balance_key, balance)?; + // ... there should be the same value now + let read_balance = + RPC.storage_value(&client, &balance_key).await.unwrap(); + assert_eq!( + balance, + token::Amount::try_from_slice(&read_balance.unwrap()).unwrap() + ); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = + RPC.storage_prefix(&client, &balance_prefix).await.unwrap(); + assert_eq!(read_balances.len(), 1); + + // Request storage has key + let has_balance_key = + RPC.storage_has_key(&client, &balance_key).await.unwrap(); + assert!(has_balance_key); + + Ok(()) + } +} diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs new file mode 100644 index 00000000000..69888cb9356 --- /dev/null +++ b/shared/src/ledger/queries/router.rs @@ -0,0 +1,881 @@ +//! The main export of this module is the `router!` macro, which can be used to +//! define compile time tree patterns for a router in which the terminal leaves +//! are connected to the given handler functions. +//! +//! Note that for debugging pattern matching issue, you can uncomment +//! all the `println!`s in this module. + +use thiserror::Error; + +/// Router error. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("Found no matching pattern for the given path {0}")] + WrongPath(String), +} + +/// Find the index of a next slash after the given `start` index in the path. +/// When there are no more slashes, returns the index after the end of the path. +/// +/// # Panics +/// The given `start` must be < `path.len()`. +pub fn find_next_slash_index(path: &str, start: usize) -> usize { + path[start..] + .find('/') + // Offset by the starting position + .map(|i| start + i) + // If not found, go to the end of path + .unwrap_or(path.len()) +} + +/// Invoke the sub-handler or call the handler function with the matched +/// arguments generated by `try_match_segments`. +macro_rules! handle_match { + // Nested router + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + (sub $router:tt), ( $( $matched_args:ident, )* ), + ) => { + // not used anymore - silence the warning + let _ = $end; + // Undo last '/' advance, the next pattern has to start with `/`. + // This cannot underflow because path cannot be empty and must start + // with `/` + $start -= 1; + // Invoke `handle` on the sub router + return $router.internal_handle($ctx, $request, $start) + }; + + // Handler function + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + $handle:tt, ( $( $matched_args:ident, )* ), + ) => { + // check that we're at the end of the path - trailing slash is optional + if !($end == $request.path.len() || + // ignore trailing slashes + $end == $request.path.len() - 1 && &$request.path[$end..] == "/") { + // we're not at the end, no match + println!("Not fully matched"); + break + } + // If you get a compile error from here with `expected function, found + // queries::Storage`, you're probably missing the marker `(sub _)` + let result = $handle($ctx, $request, $( $matched_args ),* )?; + let data = borsh::BorshSerialize::try_to_vec(&result.data).into_storage_result()?; + return Ok($crate::ledger::queries::EncodedResponseQuery { + data, + info: result.info, + proof_ops: result.proof_ops, + }); + }; +} + +/// Using TT muncher pattern on the `$tail` pattern, this macro recursively +/// generates path matching logic that `break`s if some parts are unmatched. +macro_rules! try_match_segments { + // sub-pattern handle - this should only be invoked if the current + // $pattern is already matched + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + { $( $sub_pattern:tt $( -> $_sub_return_ty:path )? = $handle:tt, )* }, + $matched_args:tt, + () + ) => { + // Try to match each sub-patten + $( + // This loop never repeats, it's only used for a breaking + // mechanism when a $pattern is not matched to skip to the + // next one, if any + loop { + #[allow(unused_mut)] + let mut $start = $start; + let mut $end = $end; + // Try to match, parse args and invoke $handle, will + // break the `loop` not matched + try_match_segments!($ctx, $request, $start, $end, + $handle, $matched_args, $sub_pattern + ); + } + )* + }; + + // Terminal tail call, invoked after when all the args in the current + // pattern are matched and the $handle is not sub-pattern + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + () + ) => { + handle_match!($ctx, $request, $start, $end, $handle, ( $( $matched_args, )* ), ); + }; + + // Try to match an untyped argument, declares the expected $arg as &str + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:ident, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg = &$request.path[$start..$end]; + // Advanced index past the matched arg + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Try to match and parse a typed argument like the case below, but with + // the argument optional. + // Declares the expected $arg into type $t, if it can be parsed. + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : opt $arg_ty:ty] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg: Option<$arg_ty> = match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + // Only advance if optional argument is present, otherwise stay + // in the same position for the next match, if any. + + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + + Some(parsed) + }, + Err(_) => + { + // If arg cannot be parsed, ignore it because it's optional + None + } + }; + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Special case of the pattern below. When there are no more args in the + // tail and the handle isn't a sub-router (its fragment is ident), we try + // to match the rest of the path till the end. This is specifically needed + // for storage methods, which have `storage::Key` param that includes + // path-like slashes. + // + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:ident, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + ) + ) => { + let $arg: $arg_ty; + $end = $request.path.len(); + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + println!("Parsed {}", parsed); + $arg = parsed + }, + Err(_) => + { + println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + // Invoke the terminal pattern + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), () ); + }; + + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg: $arg_ty; + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + $arg = parsed + }, + Err(_) => + { + println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Try to match an expected string literal + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + $expected:literal + $( / $( $tail:tt)/ * )? + ) + ) => { + if &$request.path[$start..$end] == $expected { + // Advanced index past the matched arg + println!("Matched literal {}", $expected); + $start = $end; + } else { + println!("{} doesn't match literal {}", &$request.path[$start..$end], $expected); + // Try to skip to next pattern + break; + } + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* ), ( $( $( $tail )/ * )? ) ); + }; +} + +/// Generate a function that tries to match the given pattern and `break`s if +/// any of its parts are unmatched. This layer will check that the path starts +/// with `/` and then invoke `try_match_segments` TT muncher that goes through +/// the patterns. +macro_rules! try_match { + ($ctx:ident, $request:ident, $start:ident, $handle:tt, $segments:tt) => { + // check that the initial char is '/' + if $request.path.is_empty() || &$request.path[..1] != "/" { + println!("Missing initial slash"); + break; + } + // advance past initial '/' + $start += 1; + // Path is too short to match + if $start >= $request.path.len() { + println!("Path is too short"); + break; + } + let mut end = find_next_slash_index(&$request.path, $start); + try_match_segments!( + $ctx, + $request, + $start, + end, + $handle, + (), + $segments + ); + }; +} + +/// Convert literal pattern into a `&[&'static str]` +// TODO sub router pattern is not yet used +#[allow(unused_macros)] +macro_rules! pattern_to_prefix { + ( ( $( $pattern:literal )/ * ) ) => { + &[$( $pattern ),*] + }; + ( $pattern:tt ) => { + compile_error!("sub-router cannot have non-literal prefix patterns") + }; +} + +/// Turn patterns and their handlers into methods for the router, where each +/// dynamic pattern is turned into a parameter for the method. +macro_rules! pattern_and_handler_to_method { + // terminal rule + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + // $( $return_type:path )?, + $return_type:path, + $handle:tt, + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `" $handle "`."] + pub fn [<$handle _path>](&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request a simple borsh-encoded value from `" $handle "`, \ + without any additional request data, specified block height or \ + proof."] + pub async fn $handle(&self, client: &CLIENT, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $return_type, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + let path = self.[<$handle _path>]( $( $param ),* ); + + let data = client.simple_request(path).await?; + + let decoded: $return_type = + borsh::BorshDeserialize::try_from_slice(&data[..])?; + Ok(decoded) + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request value with optional data (used for e.g. \ + `dry_run_tx`), optionally specified height (supported for \ + `storage_value`) and optional proof (supported for \ + `storage_value` and `storage_prefix`) from `" $handle "`."] + pub async fn [<$handle _with_options>](&self, client: &CLIENT, + data: Option>, + height: Option<$crate::types::storage::BlockHeight>, + prove: bool, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $crate::ledger::queries::ResponseQuery<$return_type>, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + let path = self.[<$handle _path>]( $( $param ),* ); + + let $crate::ledger::queries::ResponseQuery { + data, info, proof_ops + } = client.request(path, data, height, prove).await?; + + let decoded: $return_type = + borsh::BorshDeserialize::try_from_slice(&data[..])?; + + Ok($crate::ledger::queries::ResponseQuery { + data: decoded, + info, + proof_ops, + }) + } + } + }; + + // sub-pattern + ( + $param:tt + $prefix:tt + $( $_return_type:path )?, + { $( $sub_pattern:tt $( -> $sub_return_ty:path )? = $handle:tt, )* }, + $pattern:tt + ) => { + $( + // join pattern with each sub-pattern + pattern_and_handler_to_method!( + $param + $prefix + $( $sub_return_ty )?, $handle, $pattern, $sub_pattern + ); + )* + }; + + // literal string arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( $pattern:literal $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty ),* ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($pattern)) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // untyped arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: str ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($name)) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // typed arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt: $type:ty] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: $type ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($name.to_string())) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // opt typed arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt: opt $type:ty] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: std::option::Option<$type> ) + [ $( { $prefix }, )* { $name.map(|arg| std::borrow::Cow::from(arg.to_string())) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // join pattern with sub-pattern + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( $( $pattern:tt )/ * ), ( $( $sub_pattern:tt )/ * ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty ),* ) + [ $( { $prefix }, )* ] + $( $return_type )?, + $handle, ( $( $pattern / )* $( $sub_pattern )/ * ) + ); + }; +} + +/// TT muncher macro that generates a `struct $name` with methods for all its +/// handlers. +macro_rules! router_type { + // terminal rule + ($name:ident { $( $methods:item )* }, ) => { + paste::paste! { + #[doc = "`" $name "`path router type"] + pub struct $name { + prefix: String, + } + + impl $name { + #[doc = "Construct this router as a root router"] + const fn new() -> Self { + Self { + prefix: String::new(), + } + } + + #[allow(dead_code)] + #[doc = "Construct this router as a sub-router at the given prefix path"] + const fn sub(prefix: String) -> Self { + Self { + prefix, + } + } + + // paste the generated methods + $( $methods )* + } + } + }; + + // a sub router - recursion + ( + $name:ident { $( $methods:item )* }, + $pattern:tt = (sub $router:ident) + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + paste::paste! { + router_type!{ + $name { + #[doc = "`" $name "` sub-router"] + pub fn [<$router:camel:snake>](&self) -> [<$router:camel>] { + // prefix for a sub can only contain literals + let current_prefix: &[&'static str] = pattern_to_prefix!($pattern); + let path = [&[self.prefix.as_str()][..], current_prefix].concat().join("/"); + [<$router:camel>]::sub(path) + } + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + } + }; + + // a sub-pattern - add a method for each handle inside it + ( + $name:ident + { $( $methods:item )* }, + $pattern:tt = { $( $sub_pattern:tt $( -> $sub_return_ty:path )? = $handle:tt, )* } + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + router_type!{ + $name { + $( + // join pattern with each sub-pattern + pattern_and_handler_to_method!( () [] $( $sub_return_ty )?, $handle, + $pattern, $sub_pattern + ); + )* + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + }; + + // pattern with a handle - add a method for the handle + ( + $name:ident + { $( $methods:item )* }, + $pattern:tt -> $return_type:path = $handle:tt + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + router_type!{ + $name { + pattern_and_handler_to_method!( () [] $return_type, $handle, $pattern ); + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + }; +} + +/// Compile time tree patterns router with type-safe dynamic parameter parsing, +/// automatic routing, type-safe path constructors and optional client query +/// methods (enabled with `feature = "async-client"`). +/// +/// The `router!` macro implements greedy matching algorithm. +#[macro_export] +macro_rules! router { + { $name:ident, $( $pattern:tt $( -> $return_type:path )? = $handle:tt , )* } => ( + + // `paste!` is used to convert the $name cases for a derived type and function name + paste::paste! { + + router_type!{[<$name:camel>] {}, $( $pattern $( -> $return_type )? = $handle ),* } + + impl $crate::ledger::queries::Router for [<$name:camel>] { + // TODO: for some patterns, there's unused assignment of `$end` + #[allow(unused_assignments)] + fn internal_handle( + &self, + ctx: $crate::ledger::queries::RequestCtx<'_, D, H>, + request: &$crate::ledger::queries::RequestQuery, + start: usize + ) -> $crate::ledger::storage_api::Result<$crate::ledger::queries::EncodedResponseQuery> + where + D: 'static + $crate::ledger::storage::DB + for<'iter> $crate::ledger::storage::DBIter<'iter> + Sync, + H: 'static + $crate::ledger::storage::StorageHasher + Sync, + { + + // Import for `.into_storage_result()` + use $crate::ledger::storage_api::ResultExt; + + // Import helper from this crate used inside the macros + use $crate::ledger::queries::router::find_next_slash_index; + + $( + // This loop never repeats, it's only used for a breaking + // mechanism when a $pattern is not matched to skip to the + // next one, if any + loop { + let mut start = start; + // Try to match, parse args and invoke $handle, will + // break the `loop` not matched + try_match!(ctx, request, start, $handle, $pattern); + } + )* + + return Err( + $crate::ledger::queries::router::Error::WrongPath(request.path.clone())) + .into_storage_result(); + } + } + + #[doc = "`" $name "` path router"] + pub const $name: [<$name:camel>] = [<$name:camel>]::new(); + } + + ); +} + +/// You can expand the `handlers!` macro invocation with e.g.: +/// ```shell +/// cargo expand ledger::queries::router::test_rpc_handlers --features "ferveo-tpke, ibc-mocks, testing, wasm-runtime, tendermint-rpc" --tests --lib +/// ``` +#[cfg(test)] +mod test_rpc_handlers { + use crate::ledger::queries::{RequestCtx, RequestQuery, ResponseQuery}; + use crate::ledger::storage::{DBIter, StorageHasher, DB}; + use crate::ledger::storage_api; + use crate::types::storage::Epoch; + use crate::types::token; + + /// A little macro to generate boilerplate fo RPC handler functions. + /// These are implemented to return their name as a String, joined by + /// slashes with their argument values turned `to_string()`, if any. + macro_rules! handlers { + ( + // name and params, if any + $( $name:ident $( ( $( $param:ident: $param_ty:ty ),* ) )? ),* + // optional trailing comma + $(,)? ) => { + $( + pub fn $name( + _ctx: RequestCtx<'_, D, H>, + _request: &RequestQuery, + $( $( $param: $param_ty ),* )? + ) -> storage_api::Result> + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = stringify!($name).to_owned(); + $( $( + let data = format!("{data}/{}", $param); + )* )? + Ok(ResponseQuery { + data, + ..ResponseQuery::default() + }) + } + )* + }; + } + + // Generate handler functions for the router below + handlers!( + a, + b0i, + b0ii, + b1, + b2i(balance: token::Amount), + b3(a1: token::Amount, a2: token::Amount, a3: token::Amount), + b3i(a1: token::Amount, a2: token::Amount, a3: token::Amount), + b3ii(a1: token::Amount, a2: token::Amount, a3: token::Amount), + x, + y(untyped_arg: &str), + z(untyped_arg: &str), + ); + + /// This handler is hand-written, because the test helper macro doesn't + /// support optional args. + pub fn b3iii( + _ctx: RequestCtx<'_, D, H>, + _request: &RequestQuery, + a1: token::Amount, + a2: token::Amount, + a3: Option, + ) -> storage_api::Result> + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "b3iii".to_owned(); + let data = format!("{data}/{}", a1); + let data = format!("{data}/{}", a2); + let data = a3.map(|a3| format!("{data}/{}", a3)).unwrap_or(data); + Ok(ResponseQuery { + data, + ..ResponseQuery::default() + }) + } + + /// This handler is hand-written, because the test helper macro doesn't + /// support optional args. + pub fn b3iiii( + _ctx: RequestCtx<'_, D, H>, + _request: &RequestQuery, + a1: token::Amount, + a2: token::Amount, + a3: Option, + a4: Option, + ) -> storage_api::Result> + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "b3iiii".to_owned(); + let data = format!("{data}/{}", a1); + let data = format!("{data}/{}", a2); + let data = a3.map(|a3| format!("{data}/{}", a3)).unwrap_or(data); + let data = a4.map(|a4| format!("{data}/{}", a4)).unwrap_or(data); + Ok(ResponseQuery { + data, + ..ResponseQuery::default() + }) + } +} + +/// You can expand the `router!` macro invocation with e.g.: +/// ```shell +/// cargo expand ledger::queries::router::test_rpc --features "ferveo-tpke, ibc-mocks, testing, wasm-runtime, tendermint-rpc" --tests --lib +/// ``` +#[cfg(test)] +mod test_rpc { + use super::test_rpc_handlers::*; + use crate::types::storage::Epoch; + use crate::types::token; + + // Setup an RPC router for testing + router! {TEST_RPC, + ( "sub" ) = (sub TEST_SUB_RPC), + ( "a" ) -> String = a, + ( "b" ) = { + ( "0" ) = { + ( "i" ) -> String = b0i, + ( "ii" ) -> String = b0ii, + }, + ( "1" ) -> String = b1, + ( "2" ) = { + ( "i" / [balance: token::Amount] ) -> String = b2i, + }, + ( "3" / [a1: token::Amount] / [a2: token::Amount] ) = { + ( "i" / [a3: token:: Amount] ) -> String = b3i, + ( [a3: token:: Amount] ) -> String = b3, + ( [a3: token:: Amount] / "ii" ) -> String = b3ii, + ( [a3: opt token::Amount] / "iii" ) -> String = b3iii, + ( "iiii" / [a3: opt token::Amount] / "xyz" / [a4: opt Epoch] ) -> String = b3iiii, + }, + }, + } + + router! {TEST_SUB_RPC, + ( "x" ) -> String = x, + ( "y" / [untyped_arg] ) -> String = y, + ( "z" / [untyped_arg] ) -> String = z, + } +} + +#[cfg(test)] +mod test { + use super::test_rpc::TEST_RPC; + use crate::ledger::queries::testing::TestClient; + use crate::ledger::queries::{RequestCtx, RequestQuery, Router}; + use crate::ledger::storage_api; + use crate::types::storage::Epoch; + use crate::types::token; + + /// Test all the possible paths in `TEST_RPC` router. + #[tokio::test] + async fn test_router_macro() -> storage_api::Result<()> { + let client = TestClient::new(TEST_RPC); + + // Test request with an invalid path + let request = RequestQuery { + path: "/invalid".to_owned(), + ..RequestQuery::default() + }; + let ctx = RequestCtx { + storage: &client.storage, + vp_wasm_cache: client.vp_wasm_cache.clone(), + tx_wasm_cache: client.tx_wasm_cache.clone(), + }; + let result = TEST_RPC.handle(ctx, &request); + assert!(result.is_err()); + + // Test requests to valid paths using the router's methods + + let result = TEST_RPC.a(&client).await.unwrap(); + assert_eq!(result, "a"); + + let result = TEST_RPC.b0i(&client).await.unwrap(); + assert_eq!(result, "b0i"); + + let result = TEST_RPC.b0ii(&client).await.unwrap(); + assert_eq!(result, "b0ii"); + + let result = TEST_RPC.b1(&client).await.unwrap(); + assert_eq!(result, "b1"); + + let balance = token::Amount::from(123_000_000); + let result = TEST_RPC.b2i(&client, &balance).await.unwrap(); + assert_eq!(result, format!("b2i/{balance}")); + + let a1 = token::Amount::from(345); + let a2 = token::Amount::from(123_000); + let a3 = token::Amount::from(1_000_999); + let result = TEST_RPC.b3(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3i(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3i/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3ii(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3ii/{a1}/{a2}/{a3}")); + + let result = + TEST_RPC.b3iii(&client, &a1, &a2, &Some(a3)).await.unwrap(); + assert_eq!(result, format!("b3iii/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3iii(&client, &a1, &a2, &None).await.unwrap(); + assert_eq!(result, format!("b3iii/{a1}/{a2}")); + + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &Some(a3), &None) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}/{a3}")); + + let a4 = Epoch::from(10); + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &Some(a3), &Some(a4)) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}/{a3}/{a4}")); + + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &None, &None) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}")); + + let result = TEST_RPC.test_sub_rpc().x(&client).await.unwrap(); + assert_eq!(result, format!("x")); + + let arg = "test123"; + let result = TEST_RPC.test_sub_rpc().y(&client, arg).await.unwrap(); + assert_eq!(result, format!("y/{arg}")); + + let arg = "test321"; + let result = TEST_RPC.test_sub_rpc().z(&client, arg).await.unwrap(); + assert_eq!(result, format!("z/{arg}")); + + Ok(()) + } +} diff --git a/shared/src/ledger/queries/types.rs b/shared/src/ledger/queries/types.rs new file mode 100644 index 00000000000..00cff84ed9c --- /dev/null +++ b/shared/src/ledger/queries/types.rs @@ -0,0 +1,171 @@ +use tendermint_proto::crypto::ProofOps; + +use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use crate::ledger::storage_api; +use crate::types::storage::BlockHeight; +#[cfg(feature = "wasm-runtime")] +use crate::vm::wasm::{TxCache, VpCache}; +#[cfg(feature = "wasm-runtime")] +use crate::vm::WasmCacheRoAccess; + +/// A request context provides read-only access to storage and WASM compilation +/// caches to request handlers. +#[derive(Debug, Clone)] +pub struct RequestCtx<'a, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + /// Storage access + pub storage: &'a Storage, + /// VP WASM compilation cache + #[cfg(feature = "wasm-runtime")] + pub vp_wasm_cache: VpCache, + /// tx WASM compilation cache + #[cfg(feature = "wasm-runtime")] + pub tx_wasm_cache: TxCache, +} + +/// A `Router` handles parsing read-only query requests and dispatching them to +/// their handler functions. A valid query returns a borsh-encoded result. +pub trait Router { + /// Handle a given request using the provided context. This must be invoked + /// on the root `Router` to be able to match the `request.path` fully. + fn handle( + &self, + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + self.internal_handle(ctx, request, 0) + } + + /// Internal method which shouldn't be invoked directly. Instead, you may + /// want to call `self.handle()`. + /// + /// Handle a given request using the provided context, starting to + /// try to match `request.path` against the `Router`'s patterns at the + /// given `start` offset. + fn internal_handle( + &self, + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + start: usize, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync; +} + +/// A client with async request dispatcher method, which can be used to invoke +/// type-safe methods from a root [`Router`], generated via `router!` macro. +#[cfg(any(test, feature = "async-client"))] +#[async_trait::async_trait] +pub trait Client { + /// `std::io::Error` can happen in decoding with + /// `BorshDeserialize::try_from_slice` + type Error: From; + + /// Send a simple query request at the given path. For more options, use the + /// `request` method. + async fn simple_request( + &self, + path: String, + ) -> Result, Self::Error> { + self.request(path, None, None, false) + .await + .map(|response| response.data) + } + + /// Send a query request at the given path. + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result; +} + +/// Temporary domain-type for `tendermint_proto::abci::RequestQuery`, copied +/// from +/// until we are on a branch that has it included. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +pub struct RequestQuery { + /// Raw query bytes. + /// + /// Can be used with or in lieu of `path`. + pub data: Vec, + /// Path of the request, like an HTTP `GET` path. + /// + /// Can be used with or in lieu of `data`. + /// + /// Applications MUST interpret `/store` as a query by key on the + /// underlying store. The key SHOULD be specified in the Data field. + /// Applications SHOULD allow queries over specific types like + /// `/accounts/...` or `/votes/...`. + pub path: String, + /// The block height for which the query should be executed. + /// + /// The default `0` returns data for the latest committed block. Note that + /// this is the height of the block containing the application's Merkle + /// root hash, which represents the state as it was after committing + /// the block at `height - 1`. + pub height: BlockHeight, + /// Whether to return a Merkle proof with the response, if possible. + pub prove: bool, +} + +/// Generic response from a query +#[derive(Clone, Debug, Default)] +pub struct ResponseQuery { + /// Response data to be borsh encoded + pub data: T, + /// Non-deterministic log of the request execution + pub info: String, + /// Optional proof - used for storage value reads which request `prove` + pub proof_ops: Option, +} + +/// [`ResponseQuery`] with borsh-encoded `data` field +pub type EncodedResponseQuery = ResponseQuery>; + +impl RequestQuery { + /// Try to convert tendermint RequestQuery into our [`RequestQuery`] + /// domain type. This tries to convert the block height into our + /// [`BlockHeight`] type, where `0` is treated as a special value to signal + /// to use the latest committed block height as per tendermint ABCI Query + /// spec. A negative block height will cause an error. + pub fn try_from_tm( + storage: &Storage, + tendermint_proto::abci::RequestQuery { + data, + path, + height, + prove, + }: tendermint_proto::abci::RequestQuery, + ) -> Result + where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, + { + let height = match height { + 0 => { + // `0` means last committed height + storage.last_height + } + _ => BlockHeight(height.try_into().map_err(|_| { + format!("Query height cannot be negative, got: {}", height) + })?), + }; + Ok(Self { + data, + path, + height, + prove, + }) + } +} From c5939176b9837fe842b1fef53a58238082aa616e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 6 Oct 2022 12:44:53 +0200 Subject: [PATCH 05/17] apps: replace RPC module and its handlers with new queries mod --- apps/src/lib/client/rpc.rs | 245 ++++++----------- apps/src/lib/node/ledger/mod.rs | 1 - apps/src/lib/node/ledger/rpc.rs | 104 ------- apps/src/lib/node/ledger/shell/governance.rs | 1 + apps/src/lib/node/ledger/shell/mod.rs | 39 --- .../lib/node/ledger/shell/process_proposal.rs | 5 +- apps/src/lib/node/ledger/shell/queries.rs | 260 +++--------------- 7 files changed, 117 insertions(+), 538 deletions(-) delete mode 100644 apps/src/lib/node/ledger/rpc.rs diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 6c1e3fb5f31..488f20f8f9e 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -23,6 +23,7 @@ use namada::ledger::pos::types::{ use namada::ledger::pos::{ self, is_validator_slashes_key, BondId, Bonds, PosParams, Slash, Unbonds, }; +use namada::ledger::queries::{self, RPC}; use namada::types::address::Address; use namada::types::governance::{ OfflineProposal, OfflineVote, ProposalResult, ProposalVote, TallyResult, @@ -35,63 +36,30 @@ use namada::types::{address, storage, token}; use crate::cli::{self, args, Context}; use crate::client::tendermint_rpc_types::TxResponse; -use crate::facade::tendermint::abci::Code; use crate::facade::tendermint_config::net::Address as TendermintAddress; use crate::facade::tendermint_rpc::error::Error as TError; use crate::facade::tendermint_rpc::query::Query; use crate::facade::tendermint_rpc::{ Client, HttpClient, Order, SubscriptionClient, WebSocketClient, }; -use crate::node::ledger::rpc::Path; /// Query the epoch of the last committed block pub async fn query_epoch(args: args::Query) -> Epoch { let client = HttpClient::new(args.ledger_address).unwrap(); - let path = Path::Epoch; - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match Epoch::try_from_slice(&response.value[..]) { - Ok(epoch) => { - println!("Last committed epoch: {}", epoch); - return epoch; - } - - Err(err) => { - eprintln!("Error decoding the epoch value: {}", err) - } - }, - Code::Err(err) => eprintln!( - "Error in the query {} (error code {})", - response.info, err - ), - } - cli::safe_exit(1) + let epoch = unwrap_client_response(RPC.epoch(&client).await); + println!("Last committed epoch: {}", epoch); + epoch } /// Query the raw bytes of given storage key pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { let client = HttpClient::new(args.query.ledger_address).unwrap(); - let path = Path::Value(args.storage_key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => { - println!("{}", HEXLOWER.encode(&response.value)); - } - Code::Err(err) => { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ); - cli::safe_exit(1) - } + let bytes = unwrap_client_response( + RPC.storage_value(&client, &args.storage_key).await, + ); + match bytes { + Some(bytes) => println!("Found data: 0x{}", HEXLOWER.encode(&bytes)), + None => println!("No data found for key {}", args.storage_key), } } @@ -135,11 +103,9 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { let owner = ctx.get(&owner); for (token, _) in tokens { let prefix = token.to_db_key().into(); - let balances = query_storage_prefix::( - client.clone(), - prefix, - ) - .await; + let balances = + query_storage_prefix::(&client, &prefix) + .await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, Some(&owner)); } @@ -149,7 +115,7 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { let token = ctx.get(&token); let prefix = token.to_db_key().into(); let balances = - query_storage_prefix::(client, prefix).await; + query_storage_prefix::(&client, &prefix).await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, None); } @@ -158,8 +124,7 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { for (token, _) in tokens { let key = token::balance_prefix(&token); let balances = - query_storage_prefix::(client.clone(), key) - .await; + query_storage_prefix::(&client, &key).await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, None); } @@ -660,18 +625,14 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { let owner = ctx.get(&owner); // Find owner's bonds to any validator let bonds_prefix = pos::bonds_for_source_prefix(&owner); - let bonds = query_storage_prefix::( - client.clone(), - bonds_prefix, - ) - .await; + let bonds = + query_storage_prefix::(&client, &bonds_prefix) + .await; // Find owner's unbonds to any validator let unbonds_prefix = pos::unbonds_for_source_prefix(&owner); - let unbonds = query_storage_prefix::( - client.clone(), - unbonds_prefix, - ) - .await; + let unbonds = + query_storage_prefix::(&client, &unbonds_prefix) + .await; let mut total: token::Amount = 0.into(); let mut total_active: token::Amount = 0.into(); @@ -780,18 +741,14 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { (None, None) => { // Find all the bonds let bonds_prefix = pos::bonds_prefix(); - let bonds = query_storage_prefix::( - client.clone(), - bonds_prefix, - ) - .await; + let bonds = + query_storage_prefix::(&client, &bonds_prefix) + .await; // Find all the unbonds let unbonds_prefix = pos::unbonds_prefix(); - let unbonds = query_storage_prefix::( - client.clone(), - unbonds_prefix, - ) - .await; + let unbonds = + query_storage_prefix::(&client, &unbonds_prefix) + .await; let mut total: token::Amount = 0.into(); let mut total_active: token::Amount = 0.into(); @@ -1032,11 +989,9 @@ pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { None => { // Iterate slashes for all validators let slashes_prefix = pos::slashes_prefix(); - let slashes = query_storage_prefix::( - client.clone(), - slashes_prefix, - ) - .await; + let slashes = + query_storage_prefix::(&client, &slashes_prefix) + .await; match slashes { Some(slashes) => { @@ -1075,12 +1030,13 @@ pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { /// Dry run a transaction pub async fn dry_run_tx(ledger_address: &TendermintAddress, tx_bytes: Vec) { let client = HttpClient::new(ledger_address.clone()).unwrap(); - let path = Path::DryRunTx; - let response = client - .abci_query(Some(path.into()), tx_bytes, None, false) - .await - .unwrap(); - println!("{:#?}", response); + let (data, height, prove) = (Some(tx_bytes), None, false); + let result = unwrap_client_response( + RPC.dry_run_tx_with_options(&client, data, height, prove) + .await, + ) + .data; + println!("Dry-run result: {}", result); } /// Get account's public key stored in its storage sub-space @@ -1113,7 +1069,7 @@ pub async fn is_delegator( let client = HttpClient::new(ledger_address).unwrap(); let bonds_prefix = pos::bonds_for_source_prefix(address); let bonds = - query_storage_prefix::(client.clone(), bonds_prefix).await; + query_storage_prefix::(&client, &bonds_prefix).await; bonds.is_some() && bonds.unwrap().count() > 0 } @@ -1123,8 +1079,7 @@ pub async fn is_delegator_at( epoch: Epoch, ) -> bool { let key = pos::bonds_for_source_prefix(address); - let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + let bonds_iter = query_storage_prefix::(client, &key).await; if let Some(mut bonds) = bonds_iter { bonds.any(|(_, bond)| bond.get(epoch).is_some()) } else { @@ -1144,7 +1099,7 @@ pub async fn known_address( Address::Established(_) => { // Established account exists if it has a VP let key = storage::Key::validity_predicate(address); - query_has_storage_key(client, key).await + query_has_storage_key(&client, &key).await } Address::Implicit(_) | Address::Internal(_) => true, } @@ -1293,100 +1248,52 @@ pub async fn query_storage_value( where T: BorshDeserialize, { - let path = Path::Value(key.to_owned()); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match T::try_from_slice(&response.value[..]) { - Ok(value) => return Some(value), - Err(err) => eprintln!("Error decoding the value: {}", err), - }, - Code::Err(err) => { - if err == 1 { - return None; - } else { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) - } - } - } - cli::safe_exit(1) + let bytes = unwrap_client_response(RPC.storage_value(client, key).await); + bytes.map(|bytes| { + T::try_from_slice(&bytes[..]).unwrap_or_else(|err| { + eprintln!("Error decoding the value: {}", err); + cli::safe_exit(1) + }) + }) } /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. pub async fn query_storage_prefix( - client: HttpClient, - key: storage::Key, + client: &HttpClient, + key: &storage::Key, ) -> Option> where T: BorshDeserialize, { - let path = Path::Prefix(key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => { - match Vec::::try_from_slice(&response.value[..]) { - Ok(values) => { - let decode = |PrefixValue { key, value }: PrefixValue| { - match T::try_from_slice(&value[..]) { - Err(_) => None, - Ok(value) => Some((key, value)), - } - }; - return Some(values.into_iter().filter_map(decode)); - } - Err(err) => eprintln!("Error decoding the values: {}", err), - } - } - Code::Err(err) => { - if err == 1 { - return None; - } else { + let values = unwrap_client_response(RPC.storage_prefix(client, key).await); + let decode = + |PrefixValue { key, value }: PrefixValue| match T::try_from_slice( + &value[..], + ) { + Err(err) => { eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) + "Skipping a value for key {}. Error in decoding: {}", + key, err + ); + None } - } + Ok(value) => Some((key, value)), + }; + if values.is_empty() { + None + } else { + Some(values.into_iter().filter_map(decode)) } - cli::safe_exit(1) } /// Query to check if the given storage key exists. pub async fn query_has_storage_key( - client: HttpClient, - key: storage::Key, + client: &HttpClient, + key: &storage::Key, ) -> bool { - let path = Path::HasKey(key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match bool::try_from_slice(&response.value[..]) { - Ok(value) => return value, - Err(err) => eprintln!("Error decoding the value: {}", err), - }, - Code::Err(err) => { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) - } - } - cli::safe_exit(1) + unwrap_client_response(RPC.storage_has_key(client, key).await) } /// Represents a query for an event pertaining to the specified transaction @@ -1556,8 +1463,7 @@ pub async fn get_proposal_votes( let vote_prefix_key = gov_storage::get_proposal_vote_prefix_key(proposal_id); let vote_iter = - query_storage_prefix::(client.clone(), vote_prefix_key) - .await; + query_storage_prefix::(client, &vote_prefix_key).await; let mut yay_validators: HashMap = HashMap::new(); let mut yay_delegators: HashMap> = @@ -1662,7 +1568,7 @@ pub async fn get_proposal_offline_votes( { let key = pos::bonds_for_source_prefix(&proposal_vote.address); let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + query_storage_prefix::(client, &key).await; if let Some(bonds) = bonds_iter { for (key, epoched_bonds) in bonds { // Look-up slashes for the validator in this key and @@ -1907,8 +1813,7 @@ pub async fn get_delegators_delegation( _epoch: Epoch, ) -> Vec
{ let key = pos::bonds_for_source_prefix(address); - let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + let bonds_iter = query_storage_prefix::(client, &key).await; let mut delegation_addresses: Vec
= Vec::new(); if let Some(bonds) = bonds_iter { @@ -1970,3 +1875,11 @@ fn lookup_alias(ctx: &Context, addr: &Address) -> String { None => format!("{}", addr), } } + +/// A helper to unwrap client's response. Will shut down process on error. +fn unwrap_client_response(response: Result) -> T { + response.unwrap_or_else(|err| { + eprintln!("Error in the query {}", err); + cli::safe_exit(1) + }) +} diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index 965275cc03a..9796c18fce8 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -1,7 +1,6 @@ mod abortable; mod broadcaster; pub mod events; -pub mod rpc; mod shell; mod shims; pub mod storage; diff --git a/apps/src/lib/node/ledger/rpc.rs b/apps/src/lib/node/ledger/rpc.rs deleted file mode 100644 index ad3d2f5fcb7..00000000000 --- a/apps/src/lib/node/ledger/rpc.rs +++ /dev/null @@ -1,104 +0,0 @@ -//! RPC endpoint is used for ledger state queries - -use std::fmt::Display; -use std::str::FromStr; - -use namada::types::address::Address; -use namada::types::storage; -use thiserror::Error; - -use crate::facade::tendermint::abci::Path as AbciPath; - -/// RPC query path -#[derive(Debug, Clone)] -pub enum Path { - /// Dry run a transaction - DryRunTx, - /// Epoch of the last committed block - Epoch, - /// Read a storage value with exact storage key - Value(storage::Key), - /// Read a range of storage values with a matching key prefix - Prefix(storage::Key), - /// Check if the given storage key exists - HasKey(storage::Key), -} - -#[derive(Debug, Clone)] -pub struct BalanceQuery { - #[allow(dead_code)] - owner: Option
, - #[allow(dead_code)] - token: Option
, -} - -const DRY_RUN_TX_PATH: &str = "dry_run_tx"; -const EPOCH_PATH: &str = "epoch"; -const VALUE_PREFIX: &str = "value"; -const PREFIX_PREFIX: &str = "prefix"; -const HAS_KEY_PREFIX: &str = "has_key"; - -impl Display for Path { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Path::DryRunTx => write!(f, "{}", DRY_RUN_TX_PATH), - Path::Epoch => write!(f, "{}", EPOCH_PATH), - Path::Value(storage_key) => { - write!(f, "{}/{}", VALUE_PREFIX, storage_key) - } - Path::Prefix(storage_key) => { - write!(f, "{}/{}", PREFIX_PREFIX, storage_key) - } - Path::HasKey(storage_key) => { - write!(f, "{}/{}", HAS_KEY_PREFIX, storage_key) - } - } - } -} - -impl FromStr for Path { - type Err = PathParseError; - - fn from_str(s: &str) -> Result { - match s { - DRY_RUN_TX_PATH => Ok(Self::DryRunTx), - EPOCH_PATH => Ok(Self::Epoch), - _ => match s.split_once('/') { - Some((VALUE_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Value(key)) - } - Some((PREFIX_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Prefix(key)) - } - Some((HAS_KEY_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::HasKey(key)) - } - _ => Err(PathParseError::InvalidPath(s.to_string())), - }, - } - } -} - -impl From for AbciPath { - fn from(path: Path) -> Self { - let path = path.to_string(); - // TODO: update in tendermint-rs to allow to construct this from owned - // string. It's what `from_str` does anyway - AbciPath::from_str(&path).unwrap() - } -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum PathParseError { - #[error("Unrecognized query path: {0}")] - InvalidPath(String), - #[error("Invalid storage key: {0}")] - InvalidStorageKey(storage::Error), -} diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index 979c3c0df15..5ddcaf16739 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -3,6 +3,7 @@ use namada::ledger::governance::utils::{ compute_tally, get_proposal_votes, ProposalEvent, }; use namada::ledger::governance::vp::ADDRESS as gov_address; +use namada::ledger::protocol; use namada::ledger::slash_fund::ADDRESS as slash_fund_address; use namada::ledger::storage::types::encode; use namada::ledger::storage::{DBIter, StorageHasher, DB}; diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index ab3b131c377..52026f40d11 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -18,7 +18,6 @@ use std::mem; use std::path::{Path, PathBuf}; #[allow(unused_imports)] use std::rc::Rc; -use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use namada::ledger::gas::BlockGasMeter; @@ -48,7 +47,6 @@ use num_traits::{FromPrimitive, ToPrimitive}; use thiserror::Error; use tokio::sync::mpsc::UnboundedSender; -use super::rpc; use crate::config::{genesis, TendermintMode}; #[cfg(feature = "abcipp")] use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; @@ -559,43 +557,6 @@ where response } - /// Simulate validation and application of a transaction. - fn dry_run_tx(&self, tx_bytes: &[u8]) -> response::Query { - let mut response = response::Query::default(); - let mut gas_meter = BlockGasMeter::default(); - let mut write_log = WriteLog::default(); - let mut vp_wasm_cache = self.vp_wasm_cache.read_only(); - let mut tx_wasm_cache = self.tx_wasm_cache.read_only(); - match Tx::try_from(tx_bytes) { - Ok(tx) => { - let tx = TxType::Decrypted(DecryptedTx::Decrypted(tx)); - match protocol::apply_tx( - tx, - tx_bytes.len(), - &mut gas_meter, - &mut write_log, - &self.storage, - &mut vp_wasm_cache, - &mut tx_wasm_cache, - ) - .map_err(Error::TxApply) - { - Ok(result) => response.info = result.to_string(), - Err(error) => { - response.code = 1; - response.log = format!("{}", error); - } - } - response - } - Err(err) => { - response.code = 1; - response.log = format!("{}", Error::TxDecoding(err)); - response - } - } - } - /// Lookup a validator's keypair for their established account from their /// wallet. If the node is not validator, this function returns None #[allow(dead_code)] diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 5572e00495d..5fa3e92763b 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -150,9 +150,8 @@ where } } else { // check that the fee payer has sufficient balance - let balance = self - .get_balance(&tx.fee.token, &tx.fee_payer()) - .unwrap_or_default(); + let balance = + self.get_balance(&tx.fee.token, &tx.fee_payer()); if tx.fee.amount <= balance { TxResult { diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index a9435d565a1..e53ea914177 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -9,7 +9,6 @@ use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::{key, token}; use super::*; -use crate::facade::tendermint_proto::crypto::{ProofOp, ProofOps}; use crate::node::ledger::response; impl Shell @@ -22,39 +21,39 @@ where /// the default if `path` is not a supported string. /// INVARIANT: This method must be stateless. pub fn query(&self, query: request::Query) -> response::Query { - use rpc::Path; - let height = match query.height { - 0 => self.storage.get_block_height().0, - 1.. => BlockHeight(query.height as u64), - _ => { + let ctx = RequestCtx { + storage: &self.storage, + vp_wasm_cache: self.vp_wasm_cache.read_only(), + tx_wasm_cache: self.tx_wasm_cache.read_only(), + }; + + // Convert request to domain-type + let request = match namada::ledger::queries::RequestQuery::try_from_tm( + &self.storage, + query, + ) { + Ok(request) => request, + Err(err) => { return response::Query { code: 1, - info: format!( - "The query height is invalid: {}", - query.height - ), + info: format!("Unexpected query: {}", err), ..Default::default() }; } }; - match Path::from_str(&query.path) { - Ok(path) => match path { - Path::DryRunTx => self.dry_run_tx(&query.data), - Path::Epoch => { - let (epoch, _gas) = self.storage.get_last_epoch(); - let value = namada::ledger::storage::types::encode(&epoch); - response::Query { - value, - ..Default::default() - } - } - Path::Value(storage_key) => { - self.read_storage_value(&storage_key, height, query.prove) - } - Path::Prefix(storage_key) => { - self.read_storage_prefix(&storage_key, height, query.prove) - } - Path::HasKey(storage_key) => self.has_storage_key(&storage_key), + + // Invoke the root RPC handler - returns borsh-encoded data on success + let result = namada::ledger::queries::handle_path(ctx, &request); + match result { + Ok(ResponseQuery { + data, + info, + proof_ops, + }) => response::Query { + value: data, + info, + proof_ops, + ..Default::default() }, Err(err) => response::Query { code: 1, @@ -70,205 +69,16 @@ where &self, token: &Address, owner: &Address, - ) -> std::result::Result { - let height = self.storage.get_block_height().0; - let query_resp = self.read_storage_value( + ) -> token::Amount { + let balance = storage_api::StorageRead::read( + &self.storage, &token::balance_key(token, owner), - height, - false, ); - if query_resp.code != 0 { - Err(format!( - "Unable to read token {} balance of the given address {}", - token, owner - )) - } else { - BorshDeserialize::try_from_slice(&query_resp.value[..]).map_err( - |_| { - "Unable to deserialize the balance of the given address" - .into() - }, - ) - } - } - - /// Query to read a value from storage - pub fn read_storage_value( - &self, - key: &Key, - height: BlockHeight, - is_proven: bool, - ) -> response::Query { - match self.storage.read_with_height(key, height) { - Ok((Some(value), _gas)) => { - let proof_ops = if is_proven { - match self.storage.get_existence_proof( - key, - value.clone().into(), - height, - ) { - Ok(proof) => Some(proof.into()), - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } else { - None - }; - response::Query { - value, - proof_ops, - ..Default::default() - } - } - Ok((None, _gas)) => { - let proof_ops = if is_proven { - match self.storage.get_non_existence_proof(key, height) { - Ok(proof) => Some(proof.into()), - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } else { - None - }; - response::Query { - code: 1, - info: format!("No value found for key: {}", key), - proof_ops, - ..Default::default() - } - } - Err(err) => response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }, - } - } - - /// Query to read a range of values from storage with a matching prefix. The - /// value in successful response is a [`Vec`] encoded with - /// [`BorshSerialize`]. - pub fn read_storage_prefix( - &self, - key: &Key, - height: BlockHeight, - is_proven: bool, - ) -> response::Query { - if height != self.storage.get_block_height().0 { - return response::Query { - code: 2, - info: format!( - "Prefix read works with only the latest height: height {}", - height - ), - ..Default::default() - }; - } - let (iter, _gas) = self.storage.iter_prefix(key); - let mut iter = iter.peekable(); - if iter.peek().is_none() { - response::Query { - code: 1, - info: format!("No value found for key: {}", key), - ..Default::default() - } - } else { - let values: std::result::Result< - Vec, - namada::types::storage::Error, - > = iter - .map(|(key, value, _gas)| { - let key = Key::parse(key)?; - Ok(PrefixValue { key, value }) - }) - .collect(); - match values { - Ok(values) => { - let proof_ops = if is_proven { - let mut ops = vec![]; - for PrefixValue { key, value } in &values { - match self.storage.get_existence_proof( - key, - value.clone().into(), - height, - ) { - Ok(p) => { - let mut cur_ops: Vec = p - .ops - .into_iter() - .map(|op| { - #[cfg(feature = "abcipp")] - { - ProofOp { - r#type: op.field_type, - key: op.key, - data: op.data, - } - } - #[cfg(not(feature = "abcipp"))] - { - op.into() - } - }) - .collect(); - ops.append(&mut cur_ops); - } - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } - // ops is not empty in this case - Some(ProofOps { ops }) - } else { - None - }; - let value = values.try_to_vec().unwrap(); - response::Query { - value, - proof_ops, - ..Default::default() - } - } - Err(err) => response::Query { - code: 1, - info: format!( - "Error parsing a storage key {}: {}", - key, err - ), - ..Default::default() - }, - } - } - } - - /// Query to check if a storage key exists. - fn has_storage_key(&self, key: &Key) -> response::Query { - match self.storage.has_key(key) { - Ok((has_key, _gas)) => response::Query { - value: has_key.try_to_vec().unwrap(), - ..Default::default() - }, - Err(err) => response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }, - } + // Storage read must not fail, but there might be no value, in which + // case default (0) is returned + balance + .expect("Storage read in the protocol must not fail") + .unwrap_or_default() } /// Lookup data about a validator from their protocol signing key From 66c632c2ff9e050f06c1ddcb1eeacc591439e7b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 7 Oct 2022 15:04:14 +0200 Subject: [PATCH 06/17] changelog: add #553 --- .changelog/unreleased/improvements/553-rpc-queries-router.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .changelog/unreleased/improvements/553-rpc-queries-router.md diff --git a/.changelog/unreleased/improvements/553-rpc-queries-router.md b/.changelog/unreleased/improvements/553-rpc-queries-router.md new file mode 100644 index 00000000000..877ac77c20e --- /dev/null +++ b/.changelog/unreleased/improvements/553-rpc-queries-router.md @@ -0,0 +1,4 @@ +- Replace the handcrafted RPC paths with a new `router!` macro RPC queries + definition that handles dynamic path matching, type-safe handler function + dispatch and also generates type-safe client methods for the queries. + ([#553](https://github.com/anoma/namada/pull/553)) \ No newline at end of file From 68135260ad7488fe752a6c62d06625f2da7211da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 24 Oct 2022 12:34:12 +0200 Subject: [PATCH 07/17] wasm: update checksums --- wasm/checksums.json | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index 01140354ba3..f3e11c7ea5f 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,18 +1,18 @@ { - "tx_bond.wasm": "tx_bond.38c037a51f9215c2be9c1b01f647251ffdc96a02a0c958c5d3db4ee36ccde43b.wasm", - "tx_ibc.wasm": "tx_ibc.5f86477029d987073ebfec66019dc991b0bb8b80717d4885b860f910916cbcdd.wasm", - "tx_init_account.wasm": "tx_init_account.8d901bce15d1ab63a591def00421183a651d4d5e09ace4291bf0a9044692741d.wasm", - "tx_init_nft.wasm": "tx_init_nft.1991808f44c1c24d4376a3d46b602bed27575f6c0359095c53f37b9225050ffc.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.716cd08d59b26bd75815511f03e141e6ac27bc0b7d7be10a71b04559244722c2.wasm", - "tx_init_validator.wasm": "tx_init_validator.611edff2746f71cdaa7547a84a96676b555821f00af8375a28f8dab7ae9fc9fa.wasm", - "tx_mint_nft.wasm": "tx_mint_nft.3f20f1a86da43cc475ccc127428944bd177d40fbe2d2d1588c6fadd069cbe4b2.wasm", - "tx_transfer.wasm": "tx_transfer.5653340103a32e6685f9668ec24855f65ae17bcc43035c2559a13f5c47bb67af.wasm", - "tx_unbond.wasm": "tx_unbond.71e66ac6f792123a2aaafd60b3892d74a7d0e7a03c3ea34f15fea9089010b810.wasm", - "tx_update_vp.wasm": "tx_update_vp.6d291dadb43545a809ba33fe26582b7984c67c65f05e363a93dbc62e06a33484.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.ff3def7b4bb0c46635bd6d544ac1745362757ce063feb8142d2ed9ab207f2a12.wasm", - "tx_withdraw.wasm": "tx_withdraw.ba1a743cf8914a353d7706777e0b1a37e20cd271b16e022fd3b50ad28971291f.wasm", - "vp_nft.wasm": "vp_nft.4471284b5c5f3e28c973f0a2ad2dde52ebe4a1dcd5dc15e93b380706fd0e35ea.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.7d7eb09cddc7ae348417da623e21ec4a4f8c78f15ae12de5abe7087eeab1e0db.wasm", - "vp_token.wasm": "vp_token.4a5436f7519de15c80103557add57e8d06e766e1ec1f7a642ffca252be01c5d0.wasm", - "vp_user.wasm": "vp_user.729b18aab60e8ae09b75b5f067658f30459a5ccfcd34f909b88da96523681019.wasm" + "tx_bond.wasm": "tx_bond.99da6abae7acd0a67341b8581bc2b9667cadd8d8c947086a6ed62cb8e5ab9f01.wasm", + "tx_ibc.wasm": "tx_ibc.449da8289b55d2e8ee4b80c4a271f0f82c14bc52f5f1cc141fc2fc25d7c379dc.wasm", + "tx_init_account.wasm": "tx_init_account.c7cf064e8d03315763d0a1c75b9fc0d44d98eab6a2943a82053a8d555861a14e.wasm", + "tx_init_nft.wasm": "tx_init_nft.7e7a5a2678da391ee89b02f08c75fab8b9f48a40e9d4d871d159694080ca41c0.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.be6c8b24bc0419a7242df6ffada532a17730fe13180fce16a91b310892d6ebad.wasm", + "tx_init_validator.wasm": "tx_init_validator.419c2cd5ddfdcc728ea41903e92ed05e60679086babf0feb8146a1a5c1c7ad79.wasm", + "tx_mint_nft.wasm": "tx_mint_nft.d5dcf0139e3fc332474db7ad9fd74f03af3c50433833a222a8ecf9880faedc1e.wasm", + "tx_transfer.wasm": "tx_transfer.15a74bbc4093bb0fd3e7943f597f88a444d6e7ea6e3a47401430e01945fe9ceb.wasm", + "tx_unbond.wasm": "tx_unbond.64ac67930786fc9d18631ed2d3a225a261114129a9ff57986347c904367efac5.wasm", + "tx_update_vp.wasm": "tx_update_vp.7148b2fef2f9a438ec8e3bf42d1e120ce690f0f69bb2b1c481711ee8b22cef54.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.5488e66b41ea1c45efdb6152fe8897c37e731ae97958db024bf1905651e0f54c.wasm", + "tx_withdraw.wasm": "tx_withdraw.976687bb02cde635a97de500ea72631f37b50516d34f72d5da3ca82b9617fe57.wasm", + "vp_nft.wasm": "vp_nft.92e1c20e54e67a8baa00bbeb61b3712cca32f34bd7e63e4f7f5da23bc303529a.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.ddbda2d7f226d40a337eb5176f96050501996d6db8f71fa99c21382f6a631b41.wasm", + "vp_token.wasm": "vp_token.60879bfd767808fe6400096cb1527fe44c81e1893a4ff9ce593a3d36e89a45f6.wasm", + "vp_user.wasm": "vp_user.a51b5650c3303789857d4af18d1d4b342bfa5974fcb2b8d6eca906be998168c5.wasm" } \ No newline at end of file From 3b0117e6e957f18d229e673b63355940a2baad89 Mon Sep 17 00:00:00 2001 From: brentstone Date: Tue, 25 Oct 2022 14:08:33 -0400 Subject: [PATCH 08/17] some doc comment edits --- shared/src/ledger/queries/router.rs | 4 ++-- shared/src/ledger/queries/types.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs index 69888cb9356..da4cbbb9004 100644 --- a/shared/src/ledger/queries/router.rs +++ b/shared/src/ledger/queries/router.rs @@ -15,7 +15,7 @@ pub enum Error { WrongPath(String), } -/// Find the index of a next slash after the given `start` index in the path. +/// Find the index of a next forward slash after the given `start` index in the path. /// When there are no more slashes, returns the index after the end of the path. /// /// # Panics @@ -646,7 +646,7 @@ mod test_rpc_handlers { use crate::types::storage::Epoch; use crate::types::token; - /// A little macro to generate boilerplate fo RPC handler functions. + /// A little macro to generate boilerplate for RPC handler functions. /// These are implemented to return their name as a String, joined by /// slashes with their argument values turned `to_string()`, if any. macro_rules! handlers { diff --git a/shared/src/ledger/queries/types.rs b/shared/src/ledger/queries/types.rs index 00cff84ed9c..c7b349ddc03 100644 --- a/shared/src/ledger/queries/types.rs +++ b/shared/src/ledger/queries/types.rs @@ -27,7 +27,7 @@ where } /// A `Router` handles parsing read-only query requests and dispatching them to -/// their handler functions. A valid query returns a borsh-encoded result. +/// their handler functions. A valid query returns a borsh-encoded result. pub trait Router { /// Handle a given request using the provided context. This must be invoked /// on the root `Router` to be able to match the `request.path` fully. From c1d8f04549bee772d43549abfcf0ffa162294e06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 27 Oct 2022 11:37:30 +0200 Subject: [PATCH 09/17] ledger/queries: fix require_no_proof doc-string --- shared/src/ledger/queries/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs index 0c66faca1e6..bf3d490b5c4 100644 --- a/shared/src/ledger/queries/mod.rs +++ b/shared/src/ledger/queries/mod.rs @@ -75,8 +75,8 @@ where Ok(()) } -/// For queries that only support latest height, check that the given height is -/// not different from latest height, otherwise return an error. +/// For queries that do not support proofs, check that proof is not requested, +/// otherwise return an error. pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { if request.prove { return Err(storage_api::Error::new_const( From ddc62be79c685552075883d387887012d775577e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 27 Oct 2022 11:37:52 +0200 Subject: [PATCH 10/17] ledger/queries: comment out `println`s for router path matching dbg --- shared/src/ledger/queries/router.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs index da4cbbb9004..257cf61ad23 100644 --- a/shared/src/ledger/queries/router.rs +++ b/shared/src/ledger/queries/router.rs @@ -57,7 +57,7 @@ macro_rules! handle_match { // ignore trailing slashes $end == $request.path.len() - 1 && &$request.path[$end..] == "/") { // we're not at the end, no match - println!("Not fully matched"); + // println!("Not fully matched"); break } // If you get a compile error from here with `expected function, found @@ -186,12 +186,12 @@ macro_rules! try_match_segments { $end = $request.path.len(); match $request.path[$start..$end].parse::<$arg_ty>() { Ok(parsed) => { - println!("Parsed {}", parsed); + // println!("Parsed {}", parsed); $arg = parsed }, Err(_) => { - println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); // If arg cannot be parsed, try to skip to next pattern break } @@ -218,7 +218,7 @@ macro_rules! try_match_segments { }, Err(_) => { - println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); // If arg cannot be parsed, try to skip to next pattern break } @@ -244,10 +244,10 @@ macro_rules! try_match_segments { ) => { if &$request.path[$start..$end] == $expected { // Advanced index past the matched arg - println!("Matched literal {}", $expected); + // println!("Matched literal {}", $expected); $start = $end; } else { - println!("{} doesn't match literal {}", &$request.path[$start..$end], $expected); + // println!("{} doesn't match literal {}", &$request.path[$start..$end], $expected); // Try to skip to next pattern break; } @@ -269,14 +269,14 @@ macro_rules! try_match { ($ctx:ident, $request:ident, $start:ident, $handle:tt, $segments:tt) => { // check that the initial char is '/' if $request.path.is_empty() || &$request.path[..1] != "/" { - println!("Missing initial slash"); + // println!("Missing initial slash"); break; } // advance past initial '/' $start += 1; // Path is too short to match if $start >= $request.path.len() { - println!("Path is too short"); + // println!("Path is too short"); break; } let mut end = find_next_slash_index(&$request.path, $start); From a16180f9b9744f4d778a57fc8444f54682062a02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 28 Oct 2022 18:55:48 +0200 Subject: [PATCH 11/17] make fmt --- shared/src/ledger/queries/router.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs index 257cf61ad23..317c532cb78 100644 --- a/shared/src/ledger/queries/router.rs +++ b/shared/src/ledger/queries/router.rs @@ -15,8 +15,9 @@ pub enum Error { WrongPath(String), } -/// Find the index of a next forward slash after the given `start` index in the path. -/// When there are no more slashes, returns the index after the end of the path. +/// Find the index of a next forward slash after the given `start` index in the +/// path. When there are no more slashes, returns the index after the end of the +/// path. /// /// # Panics /// The given `start` must be < `path.len()`. From e71c7752878d88fedb6567404bf1eb43c7d88ea7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 7 Oct 2022 16:04:16 +0200 Subject: [PATCH 12/17] move the current RPC patterns under "shell" sub-router --- apps/src/lib/client/rpc.rs | 15 +- shared/src/ledger/queries/mod.rs | 309 +------------------------- shared/src/ledger/queries/router.rs | 4 +- shared/src/ledger/queries/shell.rs | 332 ++++++++++++++++++++++++++++ 4 files changed, 348 insertions(+), 312 deletions(-) create mode 100644 shared/src/ledger/queries/shell.rs diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 488f20f8f9e..26f60313f0b 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -46,7 +46,7 @@ use crate::facade::tendermint_rpc::{ /// Query the epoch of the last committed block pub async fn query_epoch(args: args::Query) -> Epoch { let client = HttpClient::new(args.ledger_address).unwrap(); - let epoch = unwrap_client_response(RPC.epoch(&client).await); + let epoch = unwrap_client_response(RPC.shell().epoch(&client).await); println!("Last committed epoch: {}", epoch); epoch } @@ -55,7 +55,7 @@ pub async fn query_epoch(args: args::Query) -> Epoch { pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { let client = HttpClient::new(args.query.ledger_address).unwrap(); let bytes = unwrap_client_response( - RPC.storage_value(&client, &args.storage_key).await, + RPC.shell().storage_value(&client, &args.storage_key).await, ); match bytes { Some(bytes) => println!("Found data: 0x{}", HEXLOWER.encode(&bytes)), @@ -1032,7 +1032,8 @@ pub async fn dry_run_tx(ledger_address: &TendermintAddress, tx_bytes: Vec) { let client = HttpClient::new(ledger_address.clone()).unwrap(); let (data, height, prove) = (Some(tx_bytes), None, false); let result = unwrap_client_response( - RPC.dry_run_tx_with_options(&client, data, height, prove) + RPC.shell() + .dry_run_tx_with_options(&client, data, height, prove) .await, ) .data; @@ -1248,7 +1249,8 @@ pub async fn query_storage_value( where T: BorshDeserialize, { - let bytes = unwrap_client_response(RPC.storage_value(client, key).await); + let bytes = + unwrap_client_response(RPC.shell().storage_value(client, key).await); bytes.map(|bytes| { T::try_from_slice(&bytes[..]).unwrap_or_else(|err| { eprintln!("Error decoding the value: {}", err); @@ -1267,7 +1269,8 @@ pub async fn query_storage_prefix( where T: BorshDeserialize, { - let values = unwrap_client_response(RPC.storage_prefix(client, key).await); + let values = + unwrap_client_response(RPC.shell().storage_prefix(client, key).await); let decode = |PrefixValue { key, value }: PrefixValue| match T::try_from_slice( &value[..], @@ -1293,7 +1296,7 @@ pub async fn query_has_storage_key( client: &HttpClient, key: &storage::Key, ) -> bool { - unwrap_client_response(RPC.storage_has_key(client, key).await) + unwrap_client_response(RPC.shell().storage_has_key(client, key).await) } /// Represents a query for an event pertaining to the specified transaction diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs index bf3d490b5c4..4d160dd9e09 100644 --- a/shared/src/ledger/queries/mod.rs +++ b/shared/src/ledger/queries/mod.rs @@ -1,7 +1,7 @@ //! Ledger read-only queries can be handled and dispatched via the [`RPC`] //! defined via `router!` macro. -use tendermint_proto::crypto::{ProofOp, ProofOps}; +use shell::{Shell, SHELL}; #[cfg(any(test, feature = "async-client"))] pub use types::Client; pub use types::{ @@ -9,35 +9,17 @@ pub use types::{ }; use super::storage::{DBIter, StorageHasher, DB}; -use super::storage_api::{self, ResultExt, StorageRead}; -use crate::types::storage::{self, Epoch, PrefixValue}; -use crate::types::transaction::TxResult; -#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] -use crate::types::transaction::{DecryptedTx, TxType}; +use super::storage_api; #[macro_use] mod router; +mod shell; mod types; // Most commonly expected patterns should be declared first router! {RPC, - // Epoch of the last committed block - ( "epoch" ) -> Epoch = epoch, - - // Raw storage access - read value - ( "value" / [storage_key: storage::Key] ) - -> Option> = storage_value, - - // Dry run a transaction - ( "dry_run_tx" ) -> TxResult = dry_run_tx, - - // Raw storage access - prefix iterator - ( "prefix" / [storage_key: storage::Key] ) - -> Vec = storage_prefix, - - // Raw storage access - is given storage key present? - ( "has_key" / [storage_key: storage::Key] ) - -> bool = storage_has_key, + // Shell provides storage read access, block metadata and can dry-run a tx + ( "shell" ) = (sub SHELL), } /// Handle RPC query request in the ledger. On success, returns response with @@ -86,188 +68,6 @@ pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { Ok(()) } -// Handlers: - -#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] -fn dry_run_tx( - mut ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, -) -> storage_api::Result> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - use super::gas::BlockGasMeter; - use super::storage::write_log::WriteLog; - use crate::proto::Tx; - - let mut gas_meter = BlockGasMeter::default(); - let mut write_log = WriteLog::default(); - let tx = Tx::try_from(&request.data[..]).into_storage_result()?; - let tx = TxType::Decrypted(DecryptedTx::Decrypted(tx)); - let data = super::protocol::apply_tx( - tx, - request.data.len(), - &mut gas_meter, - &mut write_log, - ctx.storage, - &mut ctx.vp_wasm_cache, - &mut ctx.tx_wasm_cache, - ) - .into_storage_result()?; - Ok(ResponseQuery { - data, - ..ResponseQuery::default() - }) -} - -#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] -fn dry_run_tx( - _ctx: RequestCtx<'_, D, H>, - _request: &RequestQuery, -) -> storage_api::Result> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - unimplemented!( - "dry_run_tx request handler requires \"wasm-runtime\" and \ - \"ferveo-tpke\" features enabled." - ) -} - -fn epoch( - ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, -) -> storage_api::Result> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - require_latest_height(&ctx, request)?; - require_no_proof(request)?; - - let data = ctx.storage.last_epoch; - Ok(ResponseQuery { - data, - ..Default::default() - }) -} - -fn storage_value( - ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, - storage_key: storage::Key, -) -> storage_api::Result>>> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - match ctx - .storage - .read_with_height(&storage_key, request.height) - .into_storage_result()? - { - (Some(value), _gas) => { - let proof = if request.prove { - let proof = ctx - .storage - .get_existence_proof( - &storage_key, - value.clone().into(), - request.height, - ) - .into_storage_result()?; - Some(proof.into()) - } else { - None - }; - Ok(ResponseQuery { - data: Some(value), - proof_ops: proof, - ..Default::default() - }) - } - (None, _gas) => { - let proof = if request.prove { - let proof = ctx - .storage - .get_non_existence_proof(&storage_key, request.height) - .into_storage_result()?; - Some(proof.into()) - } else { - None - }; - Ok(ResponseQuery { - data: None, - proof_ops: proof, - info: format!("No value found for key: {}", storage_key), - }) - } - } -} - -fn storage_prefix( - ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, - storage_key: storage::Key, -) -> storage_api::Result>> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - require_latest_height(&ctx, request)?; - - let (iter, _gas) = ctx.storage.iter_prefix(&storage_key); - let data: storage_api::Result> = iter - .map(|(key, value, _gas)| { - let key = storage::Key::parse(key).into_storage_result()?; - Ok(PrefixValue { key, value }) - }) - .collect(); - let data = data?; - let proof_ops = if request.prove { - let mut ops = vec![]; - for PrefixValue { key, value } in &data { - let proof = ctx - .storage - .get_existence_proof(key, value.clone().into(), request.height) - .into_storage_result()?; - let mut cur_ops: Vec = - proof.ops.into_iter().map(|op| op.into()).collect(); - ops.append(&mut cur_ops); - } - // ops is not empty in this case - Some(ProofOps { ops }) - } else { - None - }; - Ok(ResponseQuery { - data, - proof_ops, - ..Default::default() - }) -} - -fn storage_has_key( - ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, - storage_key: storage::Key, -) -> storage_api::Result> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - require_latest_height(&ctx, request)?; - require_no_proof(request)?; - - let data = StorageRead::has_key(ctx.storage, &storage_key)?; - Ok(ResponseQuery { - data, - ..Default::default() - }) -} - #[cfg(any(test, feature = "tendermint-rpc"))] /// Provides [`Client`] implementation for Tendermint RPC client pub mod tm { @@ -418,102 +218,3 @@ mod testing { } } } - -#[cfg(test)] -mod test { - use borsh::BorshDeserialize; - - use super::testing::TestClient; - use super::*; - use crate::ledger::storage_api::StorageWrite; - use crate::proto::Tx; - use crate::types::{address, token}; - - const TX_NO_OP_WASM: &str = "../wasm_for_tests/tx_no_op.wasm"; - - #[test] - fn test_queries_router_paths() { - let path = RPC.epoch_path(); - assert_eq!("/epoch", path); - - let token_addr = address::testing::established_address_1(); - let owner = address::testing::established_address_2(); - let key = token::balance_key(&token_addr, &owner); - let path = RPC.storage_value_path(&key); - assert_eq!(format!("/value/{}", key), path); - - let path = RPC.dry_run_tx_path(); - assert_eq!("/dry_run_tx", path); - - let path = RPC.storage_prefix_path(&key); - assert_eq!(format!("/prefix/{}", key), path); - - let path = RPC.storage_has_key_path(&key); - assert_eq!(format!("/has_key/{}", key), path); - } - - #[tokio::test] - async fn test_queries_router_with_client() -> storage_api::Result<()> { - // Initialize the `TestClient` - let mut client = TestClient::new(RPC); - - // Request last committed epoch - let read_epoch = RPC.epoch(&client).await.unwrap(); - let current_epoch = client.storage.last_epoch; - assert_eq!(current_epoch, read_epoch); - - // Request dry run tx - let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); - let tx = Tx::new(tx_no_op, None); - let tx_bytes = tx.to_bytes(); - let result = RPC - .dry_run_tx_with_options(&client, Some(tx_bytes), None, false) - .await - .unwrap(); - assert!(result.data.is_accepted()); - - // Request storage value for a balance key ... - let token_addr = address::testing::established_address_1(); - let owner = address::testing::established_address_2(); - let balance_key = token::balance_key(&token_addr, &owner); - // ... there should be no value yet. - let read_balance = - RPC.storage_value(&client, &balance_key).await.unwrap(); - assert!(read_balance.is_none()); - - // Request storage prefix iterator - let balance_prefix = token::balance_prefix(&token_addr); - let read_balances = - RPC.storage_prefix(&client, &balance_prefix).await.unwrap(); - assert!(read_balances.is_empty()); - - // Request storage has key - let has_balance_key = - RPC.storage_has_key(&client, &balance_key).await.unwrap(); - assert!(!has_balance_key); - - // Then write some balance ... - let balance = token::Amount::from(1000); - StorageWrite::write(&mut client.storage, &balance_key, balance)?; - // ... there should be the same value now - let read_balance = - RPC.storage_value(&client, &balance_key).await.unwrap(); - assert_eq!( - balance, - token::Amount::try_from_slice(&read_balance.unwrap()).unwrap() - ); - - // Request storage prefix iterator - let balance_prefix = token::balance_prefix(&token_addr); - let read_balances = - RPC.storage_prefix(&client, &balance_prefix).await.unwrap(); - assert_eq!(read_balances.len(), 1); - - // Request storage has key - let has_balance_key = - RPC.storage_has_key(&client, &balance_key).await.unwrap(); - assert!(has_balance_key); - - Ok(()) - } -} diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs index 317c532cb78..332cad3f936 100644 --- a/shared/src/ledger/queries/router.rs +++ b/shared/src/ledger/queries/router.rs @@ -495,7 +495,7 @@ macro_rules! router_type { impl $name { #[doc = "Construct this router as a root router"] - const fn new() -> Self { + pub const fn new() -> Self { Self { prefix: String::new(), } @@ -503,7 +503,7 @@ macro_rules! router_type { #[allow(dead_code)] #[doc = "Construct this router as a sub-router at the given prefix path"] - const fn sub(prefix: String) -> Self { + pub const fn sub(prefix: String) -> Self { Self { prefix, } diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs new file mode 100644 index 00000000000..2304a0421e0 --- /dev/null +++ b/shared/src/ledger/queries/shell.rs @@ -0,0 +1,332 @@ +use tendermint_proto::crypto::{ProofOp, ProofOps}; + +use crate::ledger::queries::types::{RequestCtx, RequestQuery, ResponseQuery}; +use crate::ledger::queries::{require_latest_height, require_no_proof}; +use crate::ledger::storage::{DBIter, StorageHasher, DB}; +use crate::ledger::storage_api::{self, ResultExt, StorageRead}; +use crate::types::storage::{self, Epoch, PrefixValue}; +use crate::types::transaction::TxResult; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +use crate::types::transaction::{DecryptedTx, TxType}; + +router! {SHELL, + // Epoch of the last committed block + ( "epoch" ) -> Epoch = epoch, + + // Raw storage access - read value + ( "value" / [storage_key: storage::Key] ) + -> Option> = storage_value, + + // Dry run a transaction + ( "dry_run_tx" ) -> TxResult = dry_run_tx, + + // Raw storage access - prefix iterator + ( "prefix" / [storage_key: storage::Key] ) + -> Vec = storage_prefix, + + // Raw storage access - is given storage key present? + ( "has_key" / [storage_key: storage::Key] ) + -> bool = storage_has_key, +} + +// Handlers: + +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +fn dry_run_tx( + mut ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + use crate::ledger::gas::BlockGasMeter; + use crate::ledger::protocol; + use crate::ledger::storage::write_log::WriteLog; + use crate::proto::Tx; + + let mut gas_meter = BlockGasMeter::default(); + let mut write_log = WriteLog::default(); + let tx = Tx::try_from(&request.data[..]).into_storage_result()?; + let tx = TxType::Decrypted(DecryptedTx::Decrypted(tx)); + let data = protocol::apply_tx( + tx, + request.data.len(), + &mut gas_meter, + &mut write_log, + ctx.storage, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ) + .into_storage_result()?; + Ok(ResponseQuery { + data, + ..ResponseQuery::default() + }) +} + +#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] +fn dry_run_tx( + _ctx: RequestCtx<'_, D, H>, + _request: &RequestQuery, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + unimplemented!( + "dry_run_tx request handler requires \"wasm-runtime\" and \ + \"ferveo-tpke\" features enabled." + ) +} + +fn epoch( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + require_latest_height(&ctx, request)?; + require_no_proof(request)?; + + let data = ctx.storage.last_epoch; + Ok(ResponseQuery { + data, + ..Default::default() + }) +} + +fn storage_value( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result>>> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + match ctx + .storage + .read_with_height(&storage_key, request.height) + .into_storage_result()? + { + (Some(data), _gas) => { + let proof = if request.prove { + let proof = ctx + .storage + .get_existence_proof( + &storage_key, + data.clone(), + request.height, + ) + .into_storage_result()?; + Some(proof.into()) + } else { + None + }; + Ok(ResponseQuery { + data: Some(data), + proof_ops: proof, + ..Default::default() + }) + } + (None, _gas) => { + let proof = if request.prove { + let proof = ctx + .storage + .get_non_existence_proof(&storage_key, request.height) + .into_storage_result()?; + Some(proof.into()) + } else { + None + }; + Ok(ResponseQuery { + data: None, + proof_ops: proof, + info: format!("No value found for key: {}", storage_key), + }) + } + } +} + +fn storage_prefix( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result>> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + require_latest_height(&ctx, request)?; + + let (iter, _gas) = ctx.storage.iter_prefix(&storage_key); + let data: storage_api::Result> = iter + .map(|(key, value, _gas)| { + let key = storage::Key::parse(key).into_storage_result()?; + Ok(PrefixValue { key, value }) + }) + .collect(); + let data = data?; + let proof_ops = if request.prove { + let mut ops = vec![]; + for PrefixValue { key, value } in &data { + let proof = ctx + .storage + .get_existence_proof(key, value.clone(), request.height) + .into_storage_result()?; + let mut cur_ops: Vec = + proof.ops.into_iter().map(|op| op.into()).collect(); + ops.append(&mut cur_ops); + } + // ops is not empty in this case + Some(ProofOps { ops }) + } else { + None + }; + Ok(ResponseQuery { + data, + proof_ops, + ..Default::default() + }) +} + +fn storage_has_key( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + require_latest_height(&ctx, request)?; + require_no_proof(request)?; + + let data = StorageRead::has_key(ctx.storage, &storage_key)?; + Ok(ResponseQuery { + data, + ..Default::default() + }) +} + +#[cfg(test)] +mod test { + use borsh::BorshDeserialize; + + use crate::ledger::queries::testing::TestClient; + use crate::ledger::queries::RPC; + use crate::ledger::storage_api::{self, StorageWrite}; + use crate::proto::Tx; + use crate::types::{address, token}; + + const TX_NO_OP_WASM: &str = "../wasm_for_tests/tx_no_op.wasm"; + + #[test] + fn test_shell_queries_router_paths() { + let path = RPC.shell().epoch_path(); + assert_eq!("/shell/epoch", path); + + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let key = token::balance_key(&token_addr, &owner); + let path = RPC.shell().storage_value_path(&key); + assert_eq!(format!("/shell/value/{}", key), path); + + let path = RPC.shell().dry_run_tx_path(); + assert_eq!("/shell/dry_run_tx", path); + + let path = RPC.shell().storage_prefix_path(&key); + assert_eq!(format!("/shell/prefix/{}", key), path); + + let path = RPC.shell().storage_has_key_path(&key); + assert_eq!(format!("/shell/has_key/{}", key), path); + } + + #[tokio::test] + async fn test_shell_queries_router_with_client() -> storage_api::Result<()> + { + // Initialize the `TestClient` + let mut client = TestClient::new(RPC); + + // Request last committed epoch + let read_epoch = RPC.shell().epoch(&client).await.unwrap(); + let current_epoch = client.storage.last_epoch; + assert_eq!(current_epoch, read_epoch); + + // Request dry run tx + let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); + let tx = Tx::new(tx_no_op, None); + let tx_bytes = tx.to_bytes(); + let result = RPC + .shell() + .dry_run_tx_with_options(&client, Some(tx_bytes), None, false) + .await + .unwrap(); + assert!(result.data.is_accepted()); + + // Request storage value for a balance key ... + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let balance_key = token::balance_key(&token_addr, &owner); + // ... there should be no value yet. + let read_balance = RPC + .shell() + .storage_value(&client, &balance_key) + .await + .unwrap(); + assert!(read_balance.is_none()); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, &balance_prefix) + .await + .unwrap(); + assert!(read_balances.is_empty()); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(!has_balance_key); + + // Then write some balance ... + let balance = token::Amount::from(1000); + StorageWrite::write(&mut client.storage, &balance_key, balance)?; + // ... there should be the same value now + let read_balance = RPC + .shell() + .storage_value(&client, &balance_key) + .await + .unwrap(); + assert_eq!( + balance, + token::Amount::try_from_slice(&read_balance.unwrap()).unwrap() + ); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, &balance_prefix) + .await + .unwrap(); + assert_eq!(read_balances.len(), 1); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(has_balance_key); + + Ok(()) + } +} From e0f3187c3c7ab79bb40be021167019596438abf9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 14 Oct 2022 16:38:56 +0200 Subject: [PATCH 13/17] router: add `with_options` for handlers that use request/response --- apps/src/lib/client/rpc.rs | 32 ++-- shared/src/ledger/queries/mod.rs | 11 ++ shared/src/ledger/queries/router.rs | 249 ++++++++++++++++++++++------ shared/src/ledger/queries/shell.rs | 58 +++---- 4 files changed, 248 insertions(+), 102 deletions(-) diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 26f60313f0b..f6af362aeb9 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -54,10 +54,12 @@ pub async fn query_epoch(args: args::Query) -> Epoch { /// Query the raw bytes of given storage key pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { let client = HttpClient::new(args.query.ledger_address).unwrap(); - let bytes = unwrap_client_response( - RPC.shell().storage_value(&client, &args.storage_key).await, + let response = unwrap_client_response( + RPC.shell() + .storage_value(&client, None, None, false, &args.storage_key) + .await, ); - match bytes { + match response.data { Some(bytes) => println!("Found data: 0x{}", HEXLOWER.encode(&bytes)), None => println!("No data found for key {}", args.storage_key), } @@ -1032,9 +1034,7 @@ pub async fn dry_run_tx(ledger_address: &TendermintAddress, tx_bytes: Vec) { let client = HttpClient::new(ledger_address.clone()).unwrap(); let (data, height, prove) = (Some(tx_bytes), None, false); let result = unwrap_client_response( - RPC.shell() - .dry_run_tx_with_options(&client, data, height, prove) - .await, + RPC.shell().dry_run_tx(&client, data, height, prove).await, ) .data; println!("Dry-run result: {}", result); @@ -1249,9 +1249,12 @@ pub async fn query_storage_value( where T: BorshDeserialize, { - let bytes = - unwrap_client_response(RPC.shell().storage_value(client, key).await); - bytes.map(|bytes| { + let response = unwrap_client_response( + RPC.shell() + .storage_value(client, None, None, false, key) + .await, + ); + response.data.map(|bytes| { T::try_from_slice(&bytes[..]).unwrap_or_else(|err| { eprintln!("Error decoding the value: {}", err); cli::safe_exit(1) @@ -1269,8 +1272,11 @@ pub async fn query_storage_prefix( where T: BorshDeserialize, { - let values = - unwrap_client_response(RPC.shell().storage_prefix(client, key).await); + let values = unwrap_client_response( + RPC.shell() + .storage_prefix(client, None, None, false, key) + .await, + ); let decode = |PrefixValue { key, value }: PrefixValue| match T::try_from_slice( &value[..], @@ -1284,10 +1290,10 @@ where } Ok(value) => Some((key, value)), }; - if values.is_empty() { + if values.data.is_empty() { None } else { - Some(values.into_iter().filter_map(decode)) + Some(values.data.into_iter().filter_map(decode)) } } diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs index 4d160dd9e09..8b31376be47 100644 --- a/shared/src/ledger/queries/mod.rs +++ b/shared/src/ledger/queries/mod.rs @@ -68,6 +68,17 @@ pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { Ok(()) } +/// For queries that don't use request data, require that there are no data +/// attached. +pub fn require_no_data(request: &RequestQuery) -> storage_api::Result<()> { + if !request.data.is_empty() { + return Err(storage_api::Error::new_const( + "This query doesn't accept request data", + )); + } + Ok(()) +} + #[cfg(any(test, feature = "tendermint-rpc"))] /// Provides [`Client`] implementation for Tendermint RPC client pub mod tm { diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs index 332cad3f936..33773d6eb4c 100644 --- a/shared/src/ledger/queries/router.rs +++ b/shared/src/ledger/queries/router.rs @@ -48,21 +48,19 @@ macro_rules! handle_match { return $router.internal_handle($ctx, $request, $start) }; - // Handler function + // Handler function that uses a request (`with_options`) ( $ctx:ident, $request:ident, $start:ident, $end:ident, - $handle:tt, ( $( $matched_args:ident, )* ), + (with_options $handle:tt), ( $( $matched_args:ident, )* ), ) => { // check that we're at the end of the path - trailing slash is optional if !($end == $request.path.len() || // ignore trailing slashes $end == $request.path.len() - 1 && &$request.path[$end..] == "/") { // we're not at the end, no match - // println!("Not fully matched"); + println!("Not fully matched"); break } - // If you get a compile error from here with `expected function, found - // queries::Storage`, you're probably missing the marker `(sub _)` let result = $handle($ctx, $request, $( $matched_args ),* )?; let data = borsh::BorshSerialize::try_to_vec(&result.data).into_storage_result()?; return Ok($crate::ledger::queries::EncodedResponseQuery { @@ -71,6 +69,35 @@ macro_rules! handle_match { proof_ops: result.proof_ops, }); }; + + // Handler function that doesn't use the request, just the path args, if any + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + $handle:tt, ( $( $matched_args:ident, )* ), + ) => { + // check that we're at the end of the path - trailing slash is optional + if !($end == $request.path.len() || + // ignore trailing slashes + $end == $request.path.len() - 1 && &$request.path[$end..] == "/") { + // we're not at the end, no match + // println!("Not fully matched"); + break + } + // Check that the request is not sent with unsupported non-default + $crate::ledger::queries::require_latest_height(&$ctx, $request)?; + $crate::ledger::queries::require_no_proof($request)?; + $crate::ledger::queries::require_no_data($request)?; + + // If you get a compile error from here with `expected function, found + // queries::Storage`, you're probably missing the marker `(sub _)` + let data = $handle($ctx, $( $matched_args ),* )?; + let data = borsh::BorshSerialize::try_to_vec(&data).into_storage_result()?; + return Ok($crate::ledger::queries::EncodedResponseQuery { + data, + info: Default::default(), + proof_ops: None, + }); + }; } /// Using TT muncher pattern on the `$tail` pattern, this macro recursively @@ -168,16 +195,18 @@ macro_rules! try_match_segments { ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); }; - // Special case of the pattern below. When there are no more args in the - // tail and the handle isn't a sub-router (its fragment is ident), we try - // to match the rest of the path till the end. This is specifically needed - // for storage methods, which have `storage::Key` param that includes - // path-like slashes. + // Special case of the typed argument pattern below. When there are no more + // args in the tail and the handle isn't a sub-router (its handler is + // ident), we try to match the rest of the path till the end. + // + // This is specifically needed for storage methods, which have + // `storage::Key` param that includes path-like slashes. // // Try to match and parse a typed argument, declares the expected $arg into // type $t, if it can be parsed ( - $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:ident, + $ctx:ident, $request:ident, $start:ident, $end:ident, + $handle:ident, ( $( $matched_args:ident, )* ), ( [$arg:ident : $arg_ty:ty] @@ -202,6 +231,41 @@ macro_rules! try_match_segments { ( $( $matched_args, )* $arg, ), () ); }; + // One more special case of the typed argument pattern below for a handler + // `with_options`, where we try to match the rest of the path till the end. + // + // This is specifically needed for storage methods, which have + // `storage::Key` param that includes path-like slashes. + // + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + (with_options $handle:ident), + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + ) + ) => { + let $arg: $arg_ty; + $end = $request.path.len(); + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + println!("Parsed {}", parsed); + $arg = parsed + }, + Err(_) => + { + println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + // Invoke the terminal pattern + try_match_segments!($ctx, $request, $start, $end, (with_options $handle), + ( $( $matched_args, )* $arg, ), () ); + }; + // Try to match and parse a typed argument, declares the expected $arg into // type $t, if it can be parsed ( @@ -308,13 +372,12 @@ macro_rules! pattern_to_prefix { /// Turn patterns and their handlers into methods for the router, where each /// dynamic pattern is turned into a parameter for the method. macro_rules! pattern_and_handler_to_method { - // terminal rule + // terminal rule for $handle that uses request (`with_options`) ( ( $( $param:tt: $param_ty:ty ),* ) [ $( { $prefix:expr } ),* ] - // $( $return_type:path )?, $return_type:path, - $handle:tt, + (with_options $handle:tt), () ) => { // paste! used to construct the `fn $handle_path`'s name. @@ -328,29 +391,6 @@ macro_rules! pattern_and_handler_to_method { .filter_map(|x| x), "/") } - #[allow(dead_code)] - #[allow(clippy::too_many_arguments)] - #[cfg(any(test, feature = "async-client"))] - #[doc = "Request a simple borsh-encoded value from `" $handle "`, \ - without any additional request data, specified block height or \ - proof."] - pub async fn $handle(&self, client: &CLIENT, - $( $param: &$param_ty ),* - ) - -> std::result::Result< - $return_type, - ::Error - > - where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { - let path = self.[<$handle _path>]( $( $param ),* ); - - let data = client.simple_request(path).await?; - - let decoded: $return_type = - borsh::BorshDeserialize::try_from_slice(&data[..])?; - Ok(decoded) - } - #[allow(dead_code)] #[allow(clippy::too_many_arguments)] #[cfg(any(test, feature = "async-client"))] @@ -358,7 +398,7 @@ macro_rules! pattern_and_handler_to_method { `dry_run_tx`), optionally specified height (supported for \ `storage_value`) and optional proof (supported for \ `storage_value` and `storage_prefix`) from `" $handle "`."] - pub async fn [<$handle _with_options>](&self, client: &CLIENT, + pub async fn $handle(&self, client: &CLIENT, data: Option>, height: Option<$crate::types::storage::BlockHeight>, prove: bool, @@ -387,6 +427,50 @@ macro_rules! pattern_and_handler_to_method { } }; + // terminal rule that $handle that doesn't use request + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $return_type:path, + $handle:tt, + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `" $handle "`."] + pub fn [<$handle _path>](&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request a simple borsh-encoded value from `" $handle "`, \ + without any additional request data, specified block height or \ + proof."] + pub async fn $handle(&self, client: &CLIENT, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $return_type, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + let path = self.[<$handle _path>]( $( $param ),* ); + + let data = client.simple_request(path).await?; + + let decoded: $return_type = + borsh::BorshDeserialize::try_from_slice(&data[..])?; + Ok(decoded) + } + } + }; + // sub-pattern ( $param:tt @@ -581,6 +665,61 @@ macro_rules! router_type { /// methods (enabled with `feature = "async-client"`). /// /// The `router!` macro implements greedy matching algorithm. +/// +/// ## Examples +/// +/// ```rust,ignore +/// router! {ROOT, +/// // This pattern matches `/pattern_a/something`, where `something` can be +/// // parsed with `FromStr` into `ArgType`. +/// ( "pattern_a" / [typed_dynamic_arg: ArgType] ) -> ReturnType = handler, +/// +/// ( "pattern_b" / [optional_dynamic_arg: opt ArgType] ) -> ReturnType = +/// handler, +/// +/// // Untyped dynamic arg is a string slice `&str` +/// ( "pattern_c" / [untyped_dynamic_arg] ) -> ReturnType = handler, +/// +/// // The handler additionally receives the `RequestQuery`, which can have +/// // some data attached, specified block height and ask for a proof. It +/// // returns `ResponseQuery`, which can have some `info` string and a proof. +/// ( "pattern_d" ) -> ReturnType = (with_options handler), +/// +/// ( "another" / "pattern" / "that" / "goes" / "deep" ) -> ReturnType = handler, +/// +/// // Inlined sub-tree +/// ( "subtree" / [this_is_fine: ArgType] ) = { +/// ( "a" ) -> u64 = a_handler, +/// ( "b" / [another_arg] ) -> u64 = b_handler, +/// } +/// +/// // Imported sub-router - The prefix can only have literal segments +/// ( "sub" / "no_dynamic_args" ) = (sub SUB_ROUTER), +/// } +/// +/// router! {SUB_ROUTER, +/// ( "pattern" ) -> ReturnType = handler, +/// } +/// ``` +/// +/// Handler functions used in the patterns should have the expected signature: +/// ```rust,ignore +/// fn handler(ctx: RequestCtx<'_, D, H>, args ...) +/// -> storage_api::Result +/// where +/// D: 'static + DB + for<'iter> DBIter<'iter> + Sync, +/// H: 'static + StorageHasher + Sync; +/// ``` +/// +/// If the handler wants to support request options, it can be defined as +/// `(with_options $handler)` and then the expected signature is: +/// ```rust,ignore +/// fn handler(ctx: RequestCtx<'_, D, H>, request: &RequestQuery, args +/// ...) -> storage_api::Result> +/// where +/// D: 'static + DB + for<'iter> DBIter<'iter> + Sync, +/// H: 'static + StorageHasher + Sync; +/// ``` #[macro_export] macro_rules! router { { $name:ident, $( $pattern:tt $( -> $return_type:path )? = $handle:tt , )* } => ( @@ -659,9 +798,8 @@ mod test_rpc_handlers { $( pub fn $name( _ctx: RequestCtx<'_, D, H>, - _request: &RequestQuery, $( $( $param: $param_ty ),* )? - ) -> storage_api::Result> + ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -670,10 +808,7 @@ mod test_rpc_handlers { $( $( let data = format!("{data}/{}", $param); )* )? - Ok(ResponseQuery { - data, - ..ResponseQuery::default() - }) + Ok(data) } )* }; @@ -698,11 +833,10 @@ mod test_rpc_handlers { /// support optional args. pub fn b3iii( _ctx: RequestCtx<'_, D, H>, - _request: &RequestQuery, a1: token::Amount, a2: token::Amount, a3: Option, - ) -> storage_api::Result> + ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -711,22 +845,18 @@ mod test_rpc_handlers { let data = format!("{data}/{}", a1); let data = format!("{data}/{}", a2); let data = a3.map(|a3| format!("{data}/{}", a3)).unwrap_or(data); - Ok(ResponseQuery { - data, - ..ResponseQuery::default() - }) + Ok(data) } /// This handler is hand-written, because the test helper macro doesn't /// support optional args. pub fn b3iiii( _ctx: RequestCtx<'_, D, H>, - _request: &RequestQuery, a1: token::Amount, a2: token::Amount, a3: Option, a4: Option, - ) -> storage_api::Result> + ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -736,6 +866,20 @@ mod test_rpc_handlers { let data = format!("{data}/{}", a2); let data = a3.map(|a3| format!("{data}/{}", a3)).unwrap_or(data); let data = a4.map(|a4| format!("{data}/{}", a4)).unwrap_or(data); + Ok(data) + } + + /// This handler is hand-written, because the test helper macro doesn't + /// support handlers with `with_options`. + pub fn c( + _ctx: RequestCtx<'_, D, H>, + _request: &RequestQuery, + ) -> storage_api::Result> + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "c".to_owned(); Ok(ResponseQuery { data, ..ResponseQuery::default() @@ -774,6 +918,7 @@ mod test_rpc { ( "iiii" / [a3: opt token::Amount] / "xyz" / [a4: opt Epoch] ) -> String = b3iiii, }, }, + ( "c" ) -> String = (with_options c), } router! {TEST_SUB_RPC, diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index 2304a0421e0..8ba800023c8 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -1,7 +1,7 @@ use tendermint_proto::crypto::{ProofOp, ProofOps}; +use crate::ledger::queries::require_latest_height; use crate::ledger::queries::types::{RequestCtx, RequestQuery, ResponseQuery}; -use crate::ledger::queries::{require_latest_height, require_no_proof}; use crate::ledger::storage::{DBIter, StorageHasher, DB}; use crate::ledger::storage_api::{self, ResultExt, StorageRead}; use crate::types::storage::{self, Epoch, PrefixValue}; @@ -15,14 +15,14 @@ router! {SHELL, // Raw storage access - read value ( "value" / [storage_key: storage::Key] ) - -> Option> = storage_value, + -> Option> = (with_options storage_value), // Dry run a transaction - ( "dry_run_tx" ) -> TxResult = dry_run_tx, + ( "dry_run_tx" ) -> TxResult = (with_options dry_run_tx), // Raw storage access - prefix iterator ( "prefix" / [storage_key: storage::Key] ) - -> Vec = storage_prefix, + -> Vec = (with_options storage_prefix), // Raw storage access - is given storage key present? ( "has_key" / [storage_key: storage::Key] ) @@ -80,22 +80,13 @@ where ) } -fn epoch( - ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, -) -> storage_api::Result> +fn epoch(ctx: RequestCtx<'_, D, H>) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - require_latest_height(&ctx, request)?; - require_no_proof(request)?; - let data = ctx.storage.last_epoch; - Ok(ResponseQuery { - data, - ..Default::default() - }) + Ok(data) } fn storage_value( @@ -112,13 +103,13 @@ where .read_with_height(&storage_key, request.height) .into_storage_result()? { - (Some(data), _gas) => { + (Some(value), _gas) => { let proof = if request.prove { let proof = ctx .storage .get_existence_proof( &storage_key, - data.clone(), + value.clone().into(), request.height, ) .into_storage_result()?; @@ -127,7 +118,7 @@ where None }; Ok(ResponseQuery { - data: Some(data), + data: Some(value), proof_ops: proof, ..Default::default() }) @@ -175,7 +166,7 @@ where for PrefixValue { key, value } in &data { let proof = ctx .storage - .get_existence_proof(key, value.clone(), request.height) + .get_existence_proof(key, value.clone().into(), request.height) .into_storage_result()?; let mut cur_ops: Vec = proof.ops.into_iter().map(|op| op.into()).collect(); @@ -195,21 +186,14 @@ where fn storage_has_key( ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, storage_key: storage::Key, -) -> storage_api::Result> +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - require_latest_height(&ctx, request)?; - require_no_proof(request)?; - let data = StorageRead::has_key(ctx.storage, &storage_key)?; - Ok(ResponseQuery { - data, - ..Default::default() - }) + Ok(data) } #[cfg(test)] @@ -262,7 +246,7 @@ mod test { let tx_bytes = tx.to_bytes(); let result = RPC .shell() - .dry_run_tx_with_options(&client, Some(tx_bytes), None, false) + .dry_run_tx(&client, Some(tx_bytes), None, false) .await .unwrap(); assert!(result.data.is_accepted()); @@ -274,19 +258,19 @@ mod test { // ... there should be no value yet. let read_balance = RPC .shell() - .storage_value(&client, &balance_key) + .storage_value(&client, None, None, false, &balance_key) .await .unwrap(); - assert!(read_balance.is_none()); + assert!(read_balance.data.is_none()); // Request storage prefix iterator let balance_prefix = token::balance_prefix(&token_addr); let read_balances = RPC .shell() - .storage_prefix(&client, &balance_prefix) + .storage_prefix(&client, None, None, false, &balance_prefix) .await .unwrap(); - assert!(read_balances.is_empty()); + assert!(read_balances.data.is_empty()); // Request storage has key let has_balance_key = RPC @@ -302,22 +286,22 @@ mod test { // ... there should be the same value now let read_balance = RPC .shell() - .storage_value(&client, &balance_key) + .storage_value(&client, None, None, false, &balance_key) .await .unwrap(); assert_eq!( balance, - token::Amount::try_from_slice(&read_balance.unwrap()).unwrap() + token::Amount::try_from_slice(&read_balance.data.unwrap()).unwrap() ); // Request storage prefix iterator let balance_prefix = token::balance_prefix(&token_addr); let read_balances = RPC .shell() - .storage_prefix(&client, &balance_prefix) + .storage_prefix(&client, None, None, false, &balance_prefix) .await .unwrap(); - assert_eq!(read_balances.len(), 1); + assert_eq!(read_balances.data.len(), 1); // Request storage has key let has_balance_key = RPC From c87d5bf68794e835e4e79c6f31e258820e85d3cc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 24 Oct 2022 13:50:06 +0000 Subject: [PATCH 14/17] [ci] wasm checksums update --- wasm/checksums.json | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index f3e11c7ea5f..98d4134fd31 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,18 +1,18 @@ { - "tx_bond.wasm": "tx_bond.99da6abae7acd0a67341b8581bc2b9667cadd8d8c947086a6ed62cb8e5ab9f01.wasm", - "tx_ibc.wasm": "tx_ibc.449da8289b55d2e8ee4b80c4a271f0f82c14bc52f5f1cc141fc2fc25d7c379dc.wasm", - "tx_init_account.wasm": "tx_init_account.c7cf064e8d03315763d0a1c75b9fc0d44d98eab6a2943a82053a8d555861a14e.wasm", - "tx_init_nft.wasm": "tx_init_nft.7e7a5a2678da391ee89b02f08c75fab8b9f48a40e9d4d871d159694080ca41c0.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.be6c8b24bc0419a7242df6ffada532a17730fe13180fce16a91b310892d6ebad.wasm", - "tx_init_validator.wasm": "tx_init_validator.419c2cd5ddfdcc728ea41903e92ed05e60679086babf0feb8146a1a5c1c7ad79.wasm", - "tx_mint_nft.wasm": "tx_mint_nft.d5dcf0139e3fc332474db7ad9fd74f03af3c50433833a222a8ecf9880faedc1e.wasm", - "tx_transfer.wasm": "tx_transfer.15a74bbc4093bb0fd3e7943f597f88a444d6e7ea6e3a47401430e01945fe9ceb.wasm", - "tx_unbond.wasm": "tx_unbond.64ac67930786fc9d18631ed2d3a225a261114129a9ff57986347c904367efac5.wasm", - "tx_update_vp.wasm": "tx_update_vp.7148b2fef2f9a438ec8e3bf42d1e120ce690f0f69bb2b1c481711ee8b22cef54.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.5488e66b41ea1c45efdb6152fe8897c37e731ae97958db024bf1905651e0f54c.wasm", - "tx_withdraw.wasm": "tx_withdraw.976687bb02cde635a97de500ea72631f37b50516d34f72d5da3ca82b9617fe57.wasm", - "vp_nft.wasm": "vp_nft.92e1c20e54e67a8baa00bbeb61b3712cca32f34bd7e63e4f7f5da23bc303529a.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.ddbda2d7f226d40a337eb5176f96050501996d6db8f71fa99c21382f6a631b41.wasm", - "vp_token.wasm": "vp_token.60879bfd767808fe6400096cb1527fe44c81e1893a4ff9ce593a3d36e89a45f6.wasm", - "vp_user.wasm": "vp_user.a51b5650c3303789857d4af18d1d4b342bfa5974fcb2b8d6eca906be998168c5.wasm" + "tx_bond.wasm": "tx_bond.72f7ca706910728e7cd2699d225147634981f2bd82fa5c5e1800f33dd7a9268f.wasm", + "tx_ibc.wasm": "tx_ibc.cf61f60f726b00c4e4e26a2bfbb54d5a9fb0503aeb7ae46d9cfcd269417c6de4.wasm", + "tx_init_account.wasm": "tx_init_account.be35e9136ce7c62236ef40a0ec3a4fbfdd1c1c5999b0943c0495895c574ac01b.wasm", + "tx_init_nft.wasm": "tx_init_nft.b8dd99751cf701dcc04ccdd795a37c84ad6e37c833cf2d83ca674b1a5b8b7246.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.da041bb1412b6d4bb303232aaf6bec9369138d4a94b70e5b2e2b87dadb0a47b9.wasm", + "tx_init_validator.wasm": "tx_init_validator.628232b3c034a63d11bb6b75be4f4ed831c41cacf1b710ee7cb6fd94d889d12e.wasm", + "tx_mint_nft.wasm": "tx_mint_nft.9bccf7930e21c59a03ff0aa7c85210bec8a320a87ed3d9c4bf000f98ade0cea2.wasm", + "tx_transfer.wasm": "tx_transfer.22f49259ce8c1534473959d699bbbfecb5b42499e9752785aa597c54f059e54b.wasm", + "tx_unbond.wasm": "tx_unbond.197405a2903fc1bf4a1b8f4bb2d901b9b0c455443d567907bd317d756afb16a5.wasm", + "tx_update_vp.wasm": "tx_update_vp.bb01d77ae24013ba7652c723bb4e446607b34dff10e4f01de4a6640aa80d282a.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.55f84360fc7f4cec4542e53272017ecae22e004bac0faf62550c8711895bbae5.wasm", + "tx_withdraw.wasm": "tx_withdraw.69dfa7f299a28ce25190402b231d2dd184431c5c3b9a691aae7b77a366c6d78b.wasm", + "vp_nft.wasm": "vp_nft.8234618f0a3de3d7a6dd75d1463d42a50a357b9783a83525c0093297a0b69738.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.92e4bb1ac583963ebe69a818d670c72e0db2370fe7a5ab2216060603f8e18440.wasm", + "vp_token.wasm": "vp_token.34405f1e1568f6478606de9cd8bb3ff1ffb78f1aa14cfc32861b1c2cf4b6eddd.wasm", + "vp_user.wasm": "vp_user.b70ceb1616f51aae27672c1d4c1705392716dca185e0503d61b3457c4e773f78.wasm" } \ No newline at end of file From a966969a107c674cd0a1525a27b06e757853c9de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 28 Oct 2022 18:21:03 +0200 Subject: [PATCH 15/17] RPC: fix storage_value to return data as read w/o re-encoding in Option --- apps/src/lib/client/rpc.rs | 30 ++++++++-- shared/src/ledger/queries/router.rs | 85 +++++++++++++++++++++++++---- shared/src/ledger/queries/shell.rs | 42 ++++++++------ 3 files changed, 123 insertions(+), 34 deletions(-) diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index f6af362aeb9..d3f1303f416 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -59,9 +59,10 @@ pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { .storage_value(&client, None, None, false, &args.storage_key) .await, ); - match response.data { - Some(bytes) => println!("Found data: 0x{}", HEXLOWER.encode(&bytes)), - None => println!("No data found for key {}", args.storage_key), + if !response.data.is_empty() { + println!("Found data: 0x{}", HEXLOWER.encode(&response.data)); + } else { + println!("No data found for key {}", args.storage_key); } } @@ -1249,17 +1250,34 @@ pub async fn query_storage_value( where T: BorshDeserialize, { + // In case `T` is a unit (only thing that encodes to 0 bytes), we have to + // use `storage_has_key` instead of `storage_value`, because `storage_value` + // returns 0 bytes when the key is not found. + let maybe_unit = T::try_from_slice(&[]); + if let Ok(unit) = maybe_unit { + return if unwrap_client_response( + RPC.shell().storage_has_key(client, key).await, + ) { + Some(unit) + } else { + None + }; + } + let response = unwrap_client_response( RPC.shell() .storage_value(client, None, None, false, key) .await, ); - response.data.map(|bytes| { - T::try_from_slice(&bytes[..]).unwrap_or_else(|err| { + if response.data.is_empty() { + return None; + } + T::try_from_slice(&response.data[..]) + .map(Some) + .unwrap_or_else(|err| { eprintln!("Error decoding the value: {}", err); cli::safe_exit(1) }) - }) } /// Query a range of storage values with a matching prefix and decode them with diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs index 33773d6eb4c..e4823e5ad7b 100644 --- a/shared/src/ledger/queries/router.rs +++ b/shared/src/ledger/queries/router.rs @@ -62,12 +62,10 @@ macro_rules! handle_match { break } let result = $handle($ctx, $request, $( $matched_args ),* )?; - let data = borsh::BorshSerialize::try_to_vec(&result.data).into_storage_result()?; - return Ok($crate::ledger::queries::EncodedResponseQuery { - data, - info: result.info, - proof_ops: result.proof_ops, - }); + // The handle must take care of encoding if needed and return `Vec`. + // This is because for `storage_value` the bytes are returned verbatim + // as read from storage. + return Ok(result); }; // Handler function that doesn't use the request, just the path args, if any @@ -91,6 +89,7 @@ macro_rules! handle_match { // If you get a compile error from here with `expected function, found // queries::Storage`, you're probably missing the marker `(sub _)` let data = $handle($ctx, $( $matched_args ),* )?; + // Encode the returned data with borsh let data = borsh::BorshSerialize::try_to_vec(&data).into_storage_result()?; return Ok($crate::ledger::queries::EncodedResponseQuery { data, @@ -372,6 +371,61 @@ macro_rules! pattern_to_prefix { /// Turn patterns and their handlers into methods for the router, where each /// dynamic pattern is turned into a parameter for the method. macro_rules! pattern_and_handler_to_method { + // Special terminal rule for `storage_value` handle from + // `shared/src/ledger/queries/shell.rs` that returns `Vec` which should + // not be decoded from response.data, but instead return as is + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $return_type:path, + (with_options storage_value), + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `storage_value`."] + pub fn storage_value_path(&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request value with optional data (used for e.g. \ + `dry_run_tx`), optionally specified height (supported for \ + `storage_value`) and optional proof (supported for \ + `storage_value` and `storage_prefix`) from `storage_value`."] + pub async fn storage_value(&self, client: &CLIENT, + data: Option>, + height: Option<$crate::types::storage::BlockHeight>, + prove: bool, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $crate::ledger::queries::ResponseQuery>, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + println!("IMMA VEC!!!!!!"); + let path = self.storage_value_path( $( $param ),* ); + + let $crate::ledger::queries::ResponseQuery { + data, info, proof_ops + } = client.request(path, data, height, prove).await?; + + Ok($crate::ledger::queries::ResponseQuery { + data, + info, + proof_ops, + }) + } + } + }; + // terminal rule for $handle that uses request (`with_options`) ( ( $( $param:tt: $param_ty:ty ),* ) @@ -409,6 +463,7 @@ macro_rules! pattern_and_handler_to_method { ::Error > where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + println!("IMMA not a VEC!!!!!!"); let path = self.[<$handle _path>]( $( $param ),* ); let $crate::ledger::queries::ResponseQuery { @@ -682,7 +737,8 @@ macro_rules! router_type { /// /// // The handler additionally receives the `RequestQuery`, which can have /// // some data attached, specified block height and ask for a proof. It -/// // returns `ResponseQuery`, which can have some `info` string and a proof. +/// // returns `EncodedResponseQuery` (the `data` must be encoded, if +/// // necessary), which can have some `info` string and a proof. /// ( "pattern_d" ) -> ReturnType = (with_options handler), /// /// ( "another" / "pattern" / "that" / "goes" / "deep" ) -> ReturnType = handler, @@ -780,9 +836,13 @@ macro_rules! router { /// ``` #[cfg(test)] mod test_rpc_handlers { - use crate::ledger::queries::{RequestCtx, RequestQuery, ResponseQuery}; + use borsh::BorshSerialize; + + use crate::ledger::queries::{ + EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, + }; use crate::ledger::storage::{DBIter, StorageHasher, DB}; - use crate::ledger::storage_api; + use crate::ledger::storage_api::{self, ResultExt}; use crate::types::storage::Epoch; use crate::types::token; @@ -874,12 +934,12 @@ mod test_rpc_handlers { pub fn c( _ctx: RequestCtx<'_, D, H>, _request: &RequestQuery, - ) -> storage_api::Result> + ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = "c".to_owned(); + let data = "c".to_owned().try_to_vec().into_storage_result()?; Ok(ResponseQuery { data, ..ResponseQuery::default() @@ -1011,6 +1071,9 @@ mod test { .unwrap(); assert_eq!(result, format!("b3iiii/{a1}/{a2}")); + let result = TEST_RPC.c(&client, None, None, false).await.unwrap(); + assert_eq!(result.data, format!("c")); + let result = TEST_RPC.test_sub_rpc().x(&client).await.unwrap(); assert_eq!(result, format!("x")); diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index 8ba800023c8..62f32c0f87b 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -1,7 +1,8 @@ +use borsh::BorshSerialize; use tendermint_proto::crypto::{ProofOp, ProofOps}; -use crate::ledger::queries::require_latest_height; -use crate::ledger::queries::types::{RequestCtx, RequestQuery, ResponseQuery}; +use crate::ledger::queries::types::{RequestCtx, RequestQuery}; +use crate::ledger::queries::{require_latest_height, EncodedResponseQuery}; use crate::ledger::storage::{DBIter, StorageHasher, DB}; use crate::ledger::storage_api::{self, ResultExt, StorageRead}; use crate::types::storage::{self, Epoch, PrefixValue}; @@ -15,7 +16,7 @@ router! {SHELL, // Raw storage access - read value ( "value" / [storage_key: storage::Key] ) - -> Option> = (with_options storage_value), + -> Vec = (with_options storage_value), // Dry run a transaction ( "dry_run_tx" ) -> TxResult = (with_options dry_run_tx), @@ -35,7 +36,7 @@ router! {SHELL, fn dry_run_tx( mut ctx: RequestCtx<'_, D, H>, request: &RequestQuery, -) -> storage_api::Result> +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -59,9 +60,11 @@ where &mut ctx.tx_wasm_cache, ) .into_storage_result()?; - Ok(ResponseQuery { + let data = data.try_to_vec().into_storage_result()?; + Ok(EncodedResponseQuery { data, - ..ResponseQuery::default() + proof_ops: None, + info: Default::default(), }) } @@ -69,7 +72,7 @@ where fn dry_run_tx( _ctx: RequestCtx<'_, D, H>, _request: &RequestQuery, -) -> storage_api::Result> +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -89,11 +92,15 @@ where Ok(data) } +/// Returns data with `vec![]` when the storage key is not found. For all +/// borsh-encoded types, it is safe to check `data.is_empty()` to see if the +/// value was found, except for unit - see `fn query_storage_value` in +/// `apps/src/lib/client/rpc.rs` for unit type handling via `storage_has_key`. fn storage_value( ctx: RequestCtx<'_, D, H>, request: &RequestQuery, storage_key: storage::Key, -) -> storage_api::Result>>> +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -117,10 +124,10 @@ where } else { None }; - Ok(ResponseQuery { - data: Some(value), + Ok(EncodedResponseQuery { + data: value, proof_ops: proof, - ..Default::default() + info: Default::default(), }) } (None, _gas) => { @@ -133,8 +140,8 @@ where } else { None }; - Ok(ResponseQuery { - data: None, + Ok(EncodedResponseQuery { + data: vec![], proof_ops: proof, info: format!("No value found for key: {}", storage_key), }) @@ -146,7 +153,7 @@ fn storage_prefix( ctx: RequestCtx<'_, D, H>, request: &RequestQuery, storage_key: storage::Key, -) -> storage_api::Result>> +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -177,7 +184,8 @@ where } else { None }; - Ok(ResponseQuery { + let data = data.try_to_vec().into_storage_result()?; + Ok(EncodedResponseQuery { data, proof_ops, ..Default::default() @@ -261,7 +269,7 @@ mod test { .storage_value(&client, None, None, false, &balance_key) .await .unwrap(); - assert!(read_balance.data.is_none()); + assert!(read_balance.data.is_empty()); // Request storage prefix iterator let balance_prefix = token::balance_prefix(&token_addr); @@ -291,7 +299,7 @@ mod test { .unwrap(); assert_eq!( balance, - token::Amount::try_from_slice(&read_balance.data.unwrap()).unwrap() + token::Amount::try_from_slice(&read_balance.data).unwrap() ); // Request storage prefix iterator From 5454327eea01ba8b069a3fc99cf82b90a1ac40d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 31 Oct 2022 15:54:52 +0100 Subject: [PATCH 16/17] queries: fix unused import in wasm build --- shared/src/ledger/queries/shell.rs | 35 +++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index 62f32c0f87b..7491af49456 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -6,10 +6,12 @@ use crate::ledger::queries::{require_latest_height, EncodedResponseQuery}; use crate::ledger::storage::{DBIter, StorageHasher, DB}; use crate::ledger::storage_api::{self, ResultExt, StorageRead}; use crate::types::storage::{self, Epoch, PrefixValue}; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] use crate::types::transaction::TxResult; #[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] use crate::types::transaction::{DecryptedTx, TxType}; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] router! {SHELL, // Epoch of the last committed block ( "epoch" ) -> Epoch = epoch, @@ -30,6 +32,24 @@ router! {SHELL, -> bool = storage_has_key, } +#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] +router! {SHELL, + // Epoch of the last committed block + ( "epoch" ) -> Epoch = epoch, + + // Raw storage access - read value + ( "value" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_value), + + // Raw storage access - prefix iterator + ( "prefix" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_prefix), + + // Raw storage access - is given storage key present? + ( "has_key" / [storage_key: storage::Key] ) + -> bool = storage_has_key, +} + // Handlers: #[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] @@ -68,21 +88,6 @@ where }) } -#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] -fn dry_run_tx( - _ctx: RequestCtx<'_, D, H>, - _request: &RequestQuery, -) -> storage_api::Result -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - unimplemented!( - "dry_run_tx request handler requires \"wasm-runtime\" and \ - \"ferveo-tpke\" features enabled." - ) -} - fn epoch(ctx: RequestCtx<'_, D, H>) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, From 27f324cfa46898415328ca41242bec3a739834a5 Mon Sep 17 00:00:00 2001 From: "Raymond E. Pasco" Date: Mon, 31 Oct 2022 12:24:05 -0400 Subject: [PATCH 17/17] changelog: add #569 --- .changelog/unreleased/improvements/569-rpc-sub-shell.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/569-rpc-sub-shell.md diff --git a/.changelog/unreleased/improvements/569-rpc-sub-shell.md b/.changelog/unreleased/improvements/569-rpc-sub-shell.md new file mode 100644 index 00000000000..96f0a8bd3b5 --- /dev/null +++ b/.changelog/unreleased/improvements/569-rpc-sub-shell.md @@ -0,0 +1,2 @@ +- Move all shell RPC endpoints under the /shell path. This is a breaking change + to RPC consumers. ([#569](https://github.com/anoma/namada/pull/569)) \ No newline at end of file